diff --git "a/5893.jsonl" "b/5893.jsonl" new file mode 100644--- /dev/null +++ "b/5893.jsonl" @@ -0,0 +1,651 @@ +{"seq_id":"214334273","text":"#!/usr/bin/env python3\n\nimport sys\nfrom lxml import etree\nfrom os.path import basename\nfrom osm.street import Street\nfrom osm.attraction import Attraction\n\ndef main(fname):\n osm = etree.parse(fname)\n\n for cls in [Street, Attraction]:\n objs = {}\n for el in osm.xpath(cls.XPath):\n o = cls(el)\n try:\n objs[o].update(o)\n except KeyError:\n objs[o] = o\n\n print(' {0} '.format(cls.Name.upper()).center(60, '='))\n for n,o in sorted(objs.items()):\n print(o)\n print()\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(\"Usage: python3 {0} \".format(basename(sys.argv[0])))\n else:\n main(sys.argv[1])\n","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"319776576","text":"# -*- coding: utf-8 -*-\n\n# * Copyright (c) 2009-2018. Authors: see NOTICE file.\n# *\n# * Licensed under the Apache License, Version 2.0 (the \"License\");\n# * you may not use this file except in compliance with the License.\n# * You may obtain a copy of the License at\n# *\n# * http://www.apache.org/licenses/LICENSE-2.0\n# *\n# * Unless required by applicable law or agreed to in writing, software\n# * distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport re\nimport os\n\n__author__ = \"Rubens Ulysse \"\n__contributors__ = [\"Marée Raphaël \", \"Mormont Romain \"]\n__copyright__ = \"Copyright 2010-2018 University of Liège, Belgium, http://www.cytomine.be/\"\n\nfrom cytomine.cytomine import Cytomine\nfrom cytomine.models.collection import Collection\nfrom cytomine.models.model import Model\n\nimport numpy as np\n\n\nclass ImageGroup(Model):\n def __init__(self, name=None, id_project=None, **attributes):\n super(ImageGroup, self).__init__()\n self.name = name\n self.project = id_project\n self.populate(attributes)\n\n def characteristics(self):\n uri = \"imagegroup/{}/characteristics.json\".format(self.id)\n return Cytomine.get_instance().get(uri)\n\n def download(self, dest_pattern=\"{name}\", override=True, parent=False):\n \"\"\"\n Download the original image.\n\n Parameters\n ----------\n dest_pattern : str, optional\n Destination path for the downloaded image. \"{X}\" patterns are replaced by the value of X attribute\n if it exists.\n override : bool, optional\n True if a file with same name can be overrided by the new file.\n parent : bool, optional\n True to download image parent if the image is a part of a multidimensional file.\n\n Returns\n -------\n downloaded : bool\n True if everything happens correctly, False otherwise.\n \"\"\"\n if self.id is None:\n raise ValueError(\"Cannot download file with no ID.\")\n\n pattern = re.compile(\"{(.*?)}\")\n dest_pattern = re.sub(pattern, lambda m: str(getattr(self, str(m.group(0))[1:-1], \"_\")), dest_pattern)\n parameters = {\"parent\": parent}\n\n destination = os.path.dirname(dest_pattern)\n if not os.path.exists(destination):\n os.makedirs(destination)\n\n return Cytomine.get_instance().download_file(\"{}/{}/download\".format(self.callback_identifier, self.id),\n dest_pattern, override, parameters)\n\n\nclass ImageGroupCollection(Collection):\n def __init__(self, filters=None, max=0, offset=0, **parameters):\n super(ImageGroupCollection, self).__init__(ImageGroup, filters, max, offset)\n self._allowed_filters = [\"project\"]\n self.set_parameters(parameters)\n\n\nclass ImageGroupHDF5(Model):\n def __init__(self, id_group=None, filename=None, **attributes):\n super(ImageGroupHDF5, self).__init__()\n self.group = id_group\n self.groupName = None\n self.filename = filename\n self.progress = None\n self.status = None\n self.populate(attributes)\n\n @property\n def callback_identifier(self):\n return \"imagegroupHDF5\"\n\n def pixel(self, x, y):\n uri = \"imagegroupHDF5/{}/{}/{}/pixel.json\".format(self.id, x, y)\n return np.asarray([[Cytomine.get_instance().get(uri)[\"spectra\"]]])\n\n def rectangle(self, x, y, width, height):\n uri = \"imagegroupHDF5/{}/{}/{}/{}/{}/rectangle.json\".format(self.id, x, y, width, height)\n collection = Cytomine.get_instance().get(uri)[\"collection\"]\n spectrum = np.array([data[\"spectra\"] for data in collection])\n spectrum = np.expand_dims(spectrum, axis=1)\n _, _, depth = spectrum.shape\n return spectrum.reshape((width, height, depth))\n\n\n# class ImageGroupHDF5Collection(Collection):\n# def __init__(self, filters=None, max=0, offset=0, **parameters):\n# super(ImageGroupHDF5Collection, self).__init__(ImageGroupHDF5, filters, max, offset)\n# self._allowed_filters = [\"project\"]\n# self.set_parameters(parameters)\n#\n# @property\n# def callback_identifier(self):\n# return \"imagegroupHDF5\"\n\n\nclass ImageSequence(Model):\n def __init__(self, id_image_group=None, id_image_instance=None, z_stack=None,\n slice=None, time=None, channel=None, **attributes):\n super(ImageSequence, self).__init__()\n self.imageGroup = id_image_group\n self.image = id_image_instance\n self.zStack = z_stack\n self.slice = slice\n self.time = time\n self.channel = channel\n self.model = None\n self.populate(attributes)\n\n def __str__(self):\n return \"[{}] {} : Group {} - Image {} {}/{}/{}/{} \".format(self.callback_identifier,\n self.id, self.imageGroup, self.image,\n self.channel, self.zStack, self.slice, self.time)\n\n\nclass ImageSequenceCollection(Collection):\n def __init__(self, filters=None, max=0, offset=0, **parameters):\n super(ImageSequenceCollection, self).__init__(ImageSequence, filters, max, offset)\n self._allowed_filters = [\"imagegroup\", \"imageinstance\"]\n self.set_parameters(parameters)\n","sub_path":"cytomine/models/imagegroup.py","file_name":"imagegroup.py","file_ext":"py","file_size_in_byte":5744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"421235469","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 26 10:01:38 2018\n\n@author: ptbngoc\n\"\"\"\n\nimport numpy as np\nimport string\nfrom nltk import sent_tokenize\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\n\n#load data\nfilename = \"plain_text.txt\"\nfile = open(filename, 'rt')\ntext = file.read()\nfile.close()\n#split into words\ntokens = word_tokenize(text)\n#convert to lower case\ntokens = [w.lower() for w in tokens]\n#remove punctuation from each word\ntable = str.maketrans('','',string.punctuation)\nstripped = [w.translate(table) for w in tokens]\n#remove remaining tokens that are not alphabetic\nwords = [word for word in stripped if word.isalpha()]\n#filter out stop words\nstop_words = set(stopwords.words('english'))\nwords = [w for w in words if not w in stop_words]\nprint(words[:100])\n\n#stemming of words\nporter = PorterStemmer()\nstemmed = [porter.stem(word) for word in tokens]\nprint(stemmed[:100])","sub_path":"cleaning_text_new.py","file_name":"cleaning_text_new.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"500006331","text":"from django.test import TestCase\nfrom ..models import Expense\n\n\n# Create your tests here.\nclass ExpenseTest(TestCase):\n \"\"\" Test module for Expense model \"\"\"\n\n def setUp(self):\n Expense.objects.create(name='one', description='Desciption')\n Expense.objects.create(name='two', description='Another')\n\n def test_get_expense(self):\n first = Expense.objects.get(name='one')\n second = Expense.objects.get(name='two')\n self.assertEqual(first.get_expense(),\n 'one = description: Desciption')\n self.assertEqual(second.get_expense(),\n 'two = description: Another')\n","sub_path":"expenses/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"134656352","text":"from migen import *\nfrom migen.build.platforms import ___your_platform___ as my_platfrom\n\nclass Blinker(Module):\n\n def __init__(self, signal_out, counter_width = 26):\n \"\"\"\n desc: A Module that blinks the signal_out every 2**counter_width cycles;\n arg signal_out: the signal to write the ;\n kwarg counter_width: the number of bits in the counter, by default 26;\n \"\"\"\n self.signal_out = signal_out # the led to output to\n\n self.counter = Signal(counter_width) # make a 26 bit \"counter\"\n self.sync += self.counter.eq(self.counter + 1) # have the counter increment on every clock cycle\n\n self.comb += self.signal_out.eq(self.counter[counter_width - 1]) # have the signal_out's value always be equal to bit 25 of the counter\n\n\n\nplat = my_platfrom.Platform()\nmy_led = plat.request(\"user_led\")\nmy_blinker = Blinker(my_led, counter_width = 20)\n\nplat.build(my_blinker) # transpile to verilog and build for the Platform\n","sub_path":"examples/basic/blink.py","file_name":"blink.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"555707403","text":"\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\nimport numpy as np\nimport shutil\n\nimport imageio\nimport os\nimport natsort\n\nfrom datetime import datetime, date, time\nimport scipy.io\nfrom scipy.io import loadmat\n\nreactor = \"MSDR\"\n\n\n###################### DELTE AFTERWARDS ########################\nbeta_directory = \"./Apollo/\" + reactor\n\nprint(\"Searching directory for .out files...\")\nnames = []\nfor root, dirs, files in os.walk(beta_directory):\n for file in files:\n if file.endswith(\".out\"):\n print(os.path.join(root, file))\n names.append(os.path.join(root, file))\n\n###################### DELTE AFTERWARDS ########################\n\n\nocurrences_fine = open(names[0], 'r').read().count(\n \" k-eff = \")\nprint(\"There are {} transport calculation results.\".format(ocurrences_fine))\n\n\ntext_path = (\"./\" + reactor + \"/k_eff_\" + reactor + \".txt\")\nif os.path.exists(text_path):\n os.remove(text_path)\n \noutfile_msdr = open(text_path, \"a\")\n\nwith open(names[0], 'r') as f:\n textfile_temp = f.read()\n for i in range(1, ocurrences_fine+1):\n outfile_msdr.write(textfile_temp.split(' k-eff = ')[i].split(\n \"Time=\")[0])\n f.close()\noutfile_msdr.close()\n\n\n\ntext_path2 = (\"./\" + reactor + \"/k_times_\" + reactor + \".txt\")\nif os.path.exists(text_path2):\n os.remove(text_path2)\n \noutfile_msdr = open(text_path2, \"a\")\nwith open(names[0], 'r') as f:\n textfile_temp = f.read()\n for i in range(1, ocurrences_fine+1):\n outfile_msdr.write(textfile_temp.split('Time=')[i].split(\n \"d Nominal conditions\")[0])\n f.close()\noutfile_msdr.close()\n\n\ntext_path3 = (\"./\" + reactor + \"/k_burnups_\" + reactor + \".txt\")\nif os.path.exists(text_path3):\n os.remove(text_path3)\n \noutfile_msdr = open(text_path3, \"a\")\nwith open(names[0], 'r') as f:\n textfile_temp = f.read()\n for i in range(1, ocurrences_fine+1):\n outfile_msdr.write(textfile_temp.split(', Burnup =')[i].split(\n \"GWd/MTIHM\")[0])\n f.close()\noutfile_msdr.close()\n\n\n\ndf_keff = pd.read_csv(text_path, header=None, delimiter=r\"\\s+\", index_col=False)\ndf_time = pd.read_csv(text_path2, header=None, delimiter=r\"\\s+\", index_col=False)\ndf_burnup = pd.read_csv(text_path3, header=None, delimiter=r\"\\s+\", index_col=False)\n\ndf_keff = df_keff.T\ndf_time = df_time.T\ndf_burnup = df_burnup.T\n\ndf_ktime = pd.concat([df_keff, df_time], axis=1, sort=False)\ndf_kburn = pd.concat([df_keff, df_burnup], axis=1, sort=False)\n\n\ndf_ktime.columns = ['K_eff', 'Time']\ndf_kburn.columns = ['K_eff', 'Burnup (GWd/MTIHM)']\n\nax = df_ktime.plot(\"Time\", \"K_eff\", figsize=(7, 5), title=\"K_eff vs Time\")\nax.set_ylabel(\"K-eff\")\nax.grid()\nplt.savefig(\"./\" + reactor + \"/K_time.png\")\n\nax = df_kburn.plot(\"Burnup (GWd/MTIHM)\", \"K_eff\", figsize=(7, 5), title=\"K_eff vs Burnup\")\nax.set_ylabel(\"K-eff\")\nax.grid()\nplt.savefig(\"./\" + reactor + \"/K_burnup.png\")\n\n","sub_path":"SCALE_beta/Other/Python_Utilities/k_burnup.py","file_name":"k_burnup.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"518348994","text":"from sqlalchemy.exc import InvalidRequestError, IntegrityError\nfrom ..app import db\nfrom ..model.user import User\nfrom ..util.logger import get_logger as log\n\ndef FindOneOrCreate(ID=None, userName=None):\n query = User.query\n\n if ID is not None:\n query = query.filter_by(id=ID)\n if userName is not None:\n query = query.filter_by(UserName=userName)\n\n query = query.one_or_none()\n\n if query:\n query = query.__dict__\n \n query.pop(\"_sa_instance_state\")\n\n return query\n else:\n if Create(userName) == 200:\n query = User.query.filter_by(UserName=userName).one_or_none()\n query = query.__dict__\n \n\n return query\n\ndef Create(userName):\n createUser = User(UserName=userName)\n\n try:\n db.session.add(createUser)\n db.session.commit()\n\n return 200\n \n\n except InvalidRequestError:\n log().error(\"Unable to create data\")\n return 500\n\n","sub_path":"backend/app/controller/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"359297832","text":"#!/usr/bin/env python3\nimport glob\nimport os\n\ncurr_dir = os.getcwd()\npages = []\npage_count = 0\n\nfiles = []\n\nfor file in glob.glob(\"**/*.html\"):\n file_path = os.path.join(curr_dir, file)\n files.append(file_path)\n\nfor file in files:\n lines = []\n seo_title = \"\"\n new_line = \"\"\n line_num = 0\n\n with open(file, \"r+\") as file_content:\n lines = file_content.readlines()\n for num, line in enumerate(lines):\n if line.startswith(\"title:\"):\n line_num = num + 1\n seo_title = line.replace(\"title:\", \"seo_title:\")\n\n lines.insert(line_num, seo_title)\n\n with open(file, \"w\") as file_content:\n file_content.writelines(lines)\n\n","sub_path":"create_seo_title.py","file_name":"create_seo_title.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"348777991","text":"class Planet(object):\n def __init__(self, name, diameter, distance, moons):\n self.name = name\n self.diameter = diameter\n self.distance = distance\n self.moons = moons\n def __str__(self):\n return f'''Name: {self.name},\nDiameter: {self.diameter} miles,\nDistance from Sun: {self.distance} miles,\nMoons: {self.moons}'''\n ","sub_path":"planet.py","file_name":"planet.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"382632300","text":"#Importing libraries\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\nimport string\nimport re\nimport numpy as np\nimport random\nfrom collections import Counter\n\n\nstop = set(stopwords.words('english'))\nexclude = set(string.punctuation)\nlemma = WordNetLemmatizer()\n \n# Cleaning the text sentences so that punctuation marks, stop words & digits are removed\ndef clean(doc):\n doc=doc.replace('
',' ') \n stop_free = \" \".join([i for i in doc.lower().split() if i not in stop])\n punc_free = ''.join(ch for ch in stop_free if ch not in exclude)\n normalized = \" \".join(lemma.lemmatize(word) for word in punc_free.split())\n processed = re.sub(r\"\\d+\",\"\",normalized)\n y = processed.split()\n for word in y:\n if len(word)<=2:\n del y[y.index(word)]\n word.lower()\n return y\n\n\n#this method returns important words after applying bigram from a sentence as list\ndef getwords(sentence):\n w = sentence.split(\" \")\n w= w + [w[i]+' '+w[i+1] for i in range(len(w)-1)]\n w= list(set(w))\n return w\n\n\npath = \"movie.txt\"\n\ntrain_clean_sentences = []\ny_train=np.array([])\ntest_clean_sentences = []\ny_test=np.array([])\nfp = open(path,'r')\n\nds=[]\nfor row in fp:\n ds.append([row[:-2],int(row[-2])])\n\nfor i in range(len(ds)):\n cleaned= clean(ds[i][0])\n cleaned = ' '.join(cleaned)\n ds[i][0]=cleaned\n\nrandom.shuffle(ds)\n\nposlines=[]\nneglines=[]\nfor i in ds:\n if i[1]==1:\n poslines.append(i[0])\n else:\n neglines.append(i[0])\n\npossplit=int(len(poslines)*0.6)\nnegsplit=int(len(neglines)*0.6)\n\ntrain_clean_sentences= [(x,1) for x in poslines[:possplit]] + [(x,0) for x in neglines[:negsplit]]\n#y_train= [1]*possplit + [0]*negsplit\ny_train=np.append(y_train,[[1]*possplit + [0]*negsplit])\n\ntest_clean_sentences= [(x,1) for x in poslines[possplit:]] + [(x,0) for x in neglines[negsplit:]]\n#y_test= [1]*(len(poslines)-possplit) + [0]*(len(neglines)-negsplit)\ny_test=np.append(y_test,[[1]*(len(poslines)-possplit) + [0]*(len(neglines)-negsplit)])\n\nposwords={}\nnegwords={}\n\n\nfor line,label in train_clean_sentences:\n words= getwords(line)\n for word in words:\n if label==1: poswords[word]= poswords.get(word, 0) + 1\n if label==0: negwords[word]= negwords.get(word, 0) + 1\n\nposwords = { k : v for k,v in poswords.items() if (v>=10 & v<=2000)}\nnegwords = { k : v for k,v in negwords.items() if (v>=10 & v<=2000)}\n\npredicted_labels_NB=np.array([])\n\nfor testline,testlabel in test_clean_sentences:\n testwords= getwords(testline)\n totpos, totneg= 0.0, 0.0\n for word in testwords: \n a= poswords.get(word,0.0)# + 1.0\n b= negwords.get(word,0.0)# + 1.0 \n if ((a!=0.0)|(b!=0.0)):\n totpos+= a/(a+b)\n totneg+= b/(a+b) \n if (totpos>totneg):\n predicted_labels_NB=np.append(predicted_labels_NB,1)\n else:\n predicted_labels_NB=np.append(predicted_labels_NB,0)\n\nprint (\"\\n----------------PREDICTIONS BY NAIVE-BAYES------------------\")\n\nap_pp=0\nap_pn=0\nan_pp=0\nan_pn=0\nacc=0\n\nfor i in range(len(y_test)):\n if predicted_labels_NB[i]==y_test[i]:\n acc+=1\n if(y_test[i]==1):\n ap_pp+=1\n else:\n an_pn+=1\n else:\n if(y_test[i]==1):\n ap_pn+=1\n else:\n an_pp+=1\npct= 100/len(y_test)\naccuracy= acc* pct\nprint(' Pos Neg')\nprint(' ',ap_pp*pct,' ',an_pp*pct)\nprint(' ',ap_pn*pct,' ',an_pn*pct)\nprint('Accuracy: ', accuracy,'%')\n","sub_path":"movie_Naive-Bayes.py","file_name":"movie_Naive-Bayes.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"556562052","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/5/26 下午4:51\n# @Author : czw@rich-f.com\n# @Site : www.rich-f.com\n# @File : forms.py\n# @Software: 数据交换管理平台\n# @Function:数据服务目录系统模块-表单\n\n\nfrom flask_wtf import Form\nfrom wtforms import StringField, SelectField, BooleanField, PasswordField, RadioField, SelectMultipleField, SubmitField, \\\n TextAreaField, \\\n validators\nfrom wtforms.ext.sqlalchemy.fields import QuerySelectMultipleField\nfrom richdataxweb.database import db\nfrom wtforms.validators import ValidationError, DataRequired, Email, Length, EqualTo, Regexp\nfrom .models import ApiLocal, Apiapplication, Api\nfrom ..sysadmin.models import SysOrg\nimport re\nfrom richdataxweb.dbsrc.models import DBType\nfrom sqlalchemy.engine import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom richdataxweb.service_directory.models import CentralNodeIPPort\n\n\nclass SelectFieldx(SelectField):\n def pre_validate(self, form):\n pass\n\n\nclass SelectMultipleFieldx(SelectMultipleField):\n def pre_validate(self, form):\n pass\n\n\nclass ApiForm(Form):\n api_name = StringField('远程机构名', validators=[DataRequired(message='远程机构名不能为空'), Length(min=1, max=255)])\n api_address = StringField('资源地址', validators=[DataRequired(message='资源地址不能为空'), Length(min=1, max=255)])\n\n def __init__(self, *args, **kwargs):\n \"\"\"Create instance.\"\"\"\n super(ApiForm, self).__init__(*args, **kwargs)\n\n id = 0\n\n def setId(self, id):\n ApiForm.id = id\n\n def validate(self):\n initial_validation = super(ApiForm, self).validate()\n errors = 0\n if not initial_validation:\n errors += 1\n t = SysOrg.query.filter_by(org_name=self.api_name.data).first()\n\n py_file = __import__(\"model_\" + 'api')\n Api = getattr(py_file, 'Api')\n url = get_url()\n db1 = create_engine(url)\n DBsession = sessionmaker(db1)\n dbsession = DBsession()\n\n try:\n t = dbsession.query(Api).get(ApiForm.id)\n initial_name = t.api_name\n if initial_name != self.api_name.data:\n t1 = dbsession.query(Api).filter_by(api_name=self.api_name.data).first()\n if t1:\n self.api_name.errors.append('该远程机构名已存在')\n errors += 1\n except:\n t = dbsession.query(Api).filter_by(api_name=self.api_name.data).first()\n if t:\n self.api_name.errors.append('该远程机构名已存在')\n errors += 1\n if errors > 0:\n return False\n else:\n return True\n\n\ndef get_url():\n initial_db = CentralNodeIPPort.query.first()\n\n if initial_db.db_type == 'mysql':\n url = \"mysql+pymysql://\" + initial_db.user + \":\" + initial_db.passwd + \"@\" + initial_db.central_node_ip + \":\" + initial_db.central_node_port + \"/\" + initial_db._db + \"?charset=utf8\"\n elif initial_db.db_type == 'oracle':\n url = \"oracle+cx_oracle://\" + initial_db.user + \":\" + initial_db.passwd + \"@\" + initial_db.central_node_ip + \":\" + initial_db.central_node_port + \"/\" + initial_db._db + \"?charset=utf8\"\n elif initial_db.db_type == 'sqlserver':\n url = \"sqlserver+pymssql://\" + initial_db.user + \":\" + initial_db.passwd + \"@\" + initial_db.central_node_ip + \":\" + initial_db.central_node_port + \"/\" + initial_db._db + \"?charset=utf8\"\n else:\n return None # 跳到报错页面or弹框提示错误\n return url\n\n\nclass ApiLocalForm(Form):\n api_id = StringField('资源标识符', validators=[DataRequired(message='资源标识符不能为空'), Length(min=1, max=25)])\n api_name = StringField('资源名称', validators=[DataRequired(message='资源名称不能为空'), Length(min=1, max=255)])\n src_type = SelectField('资源类型', choices=[('1', '文件'), ('2', '数据库')], default='1')\n opt_type = SelectField('资源操作类型',\n choices=[('1', '数据库表增加'), ('2', '数据库表修改'), ('3', '数据库表删除'), ('4', '数据库表查询'), ('5', '文件传输')],\n default='1')\n config = StringField('资源配置信息')\n update_date = StringField('修改时间')\n\n def __init__(self, *args, **kwargs):\n \"\"\"Create instance.\"\"\"\n super(ApiLocalForm, self).__init__(*args, **kwargs)\n\n def validate(self):\n initial_validation = super(ApiLocalForm, self).validate()\n errors = 0\n if not initial_validation:\n errors += 1\n t = ApiLocal.query.filter_by(api_id=self.api_id.data).first()\n if t:\n self.api_id.errors.append('该资源标识符已存在')\n errors += 1\n if errors > 0:\n return False\n else:\n return True\n\n\nclass ApiLocalRegForm(Form):\n org_id = SelectMultipleFieldx('有效机构')\n validity_s = StringField('有效期-始', validators=[DataRequired(message='开始有效期不能为空')])\n validity_e = StringField('有效期-终')\n\n def __init__(self, *args, **kwargs):\n \"\"\"Create instance.\"\"\"\n super(ApiLocalRegForm, self).__init__(*args, **kwargs)\n\n def validate(self):\n initial_validation = super(ApiLocalRegForm, self).validate()\n if not initial_validation:\n return False\n else:\n return True\n\n\nclass EditApiLocalForm(Form):\n api_id = StringField('资源标识符', validators=[DataRequired(message='资源标识符不能为空'), Length(min=1, max=40)])\n api_name = StringField('资源名称', validators=[DataRequired(message='资源名称不能为空'), Length(min=1, max=255)])\n src_type = SelectField('资源类型', choices=[('1', '文件'), ('2', '数据库')])\n opt_type = SelectField('资源操作类型',\n choices=[('1', '数据库表增加'), ('2', '数据库表修改'), ('3', '数据库表删除'), ('4', '数据库表查询'), ('5', '文件传输')])\n config = StringField('资源配置信息')\n update_date = StringField('修改时间')\n\n # update_by = StringField('修改人')\n\n def __init__(self, *args, **kwargs):\n \"\"\"Create instance.\"\"\"\n super(EditApiLocalForm, self).__init__(*args, **kwargs)\n\n id = 0\n\n def setId(self, id):\n EditApiLocalForm.id = id\n\n def validate(self):\n initial_validation = super(EditApiLocalForm, self).validate()\n errors = 0\n if not initial_validation:\n errors += 1\n\n try:\n api_local = ApiLocal.query.get(EditApiLocalForm.id)\n initial_name = api_local.api_name\n if initial_name != self.api_name.data:\n p = ApiLocal.query.filter_by(api_name=self.api_name.data).first()\n if p:\n self.api_name.errors.append('该资源名称已存在')\n errors += 1\n except:\n p = ApiLocal.query.filter_by(api_name=self.api_name.data).first()\n if p:\n self.api_name.errors.append('该资源名称已存在')\n errors += 1\n if errors > 0:\n return False\n else:\n return True\n\n\nclass ApiapplicationForm(Form):\n application = StringField('申请单位', validators=[DataRequired(message='申请单位不能为空'), Length(min=1, max=255)])\n mobile = StringField('联系电话', validators=[DataRequired(message='手机号码不能为空'), Length(min=1, max=255)])\n mail = StringField('邮箱', validators=[Email(message='请输入正确的邮箱地址'), Length(min=6, max=40)])\n app_introductions = TextAreaField('申请说明', validators=[DataRequired(message='申请说明不能为空'), Length(min=1, max=255)])\n\n def __init__(self, *args, **kwargs):\n \"\"\"Create instance.\"\"\"\n super(ApiapplicationForm, self).__init__(*args, **kwargs)\n\n def validate(self):\n initial_validation = super(ApiapplicationForm, self).validate()\n errors = 0\n if not initial_validation:\n errors += 1\n # t = Apiapplication.query.filter_by(mobile=self.mobile.data).first()\n pattern = re.compile(r\"^(1[358][0-9]{9})$\") # 设置联系号码正则验证\n if len(self.mobile.data) != 0:\n if pattern.match(self.mobile.data):\n pass\n else:\n self.mobile.errors.append('请输入正确手机号码')\n errors += 1\n if errors > 0:\n return False\n else:\n return True\n\n\nclass FormCentralIPPort(Form):\n central_node_ip = StringField('远程节点IP:', validators=[DataRequired(message='远程节点IP不能为空'), Length(min=1, max=64)])\n central_node_port = StringField('端口号:', validators=[DataRequired(message='端口号不能为空'), Length(min=1, max=64)])\n user = StringField('用户名:', validators=[DataRequired(message='用户名不能为空'), Length(min=1, max=25)])\n passwd = StringField('密码:', validators=[DataRequired(message='密码不能为空'), Length(min=1, max=25)])\n db_type = SelectField('数据库类型:')\n db = StringField('数据库链接名:', validators=[DataRequired(message='数据库名称不能为空'), Length(min=1, max=25)])\n\n def __init__(self, *args, **kwargs):\n \"\"\"Create instance.\"\"\"\n super(FormCentralIPPort, self).__init__(*args, **kwargs)\n\n self.db_type.choices = [(a.db_type, a.db_type) for a in DBType.query.all()]\n\n def validate(self):\n initial_validation = super(FormCentralIPPort, self).validate()\n errors = 0\n if not initial_validation:\n errors += 1\n\n if errors > 0:\n return False\n else:\n return True\n\n\nclass ApiRemoteForm(Form):\n table_name = StringField('资源数据表', validators=[DataRequired(message='资源数据表不能为空'), Length(min=1, max=255)])\n api_remote_name = StringField('资源名称', validators=[DataRequired(message='资源名称不能为空'), Length(min=1, max=255)])\n opt_type = SelectField('资源类型', choices=[('数据库', '数据库'), ('文件', '文件')], default='数据库')\n mobile = StringField('手机号码')\n tel = StringField('电话号码')\n mail = StringField('邮箱地址')\n note = TextAreaField('备注说明')\n\n def __init__(self, *args, **kwargs):\n \"\"\"Create instance.\"\"\"\n super(ApiRemoteForm, self).__init__(*args, **kwargs)\n\n def validate(self):\n initial_validation = super(ApiRemoteForm, self).validate()\n errors = 0\n if not initial_validation:\n errors += 1\n\n # pattern = re.compile(r\"^(1[358][0-9]{9})$\") # 设置联系号码正则验证\n # if len(self.mobile.data) != 0:\n # if pattern.match(self.mobile.data):\n # pass\n # else:\n # self.mobile.errors.append('请输入正确手机号码')\n # errors += 1\n\n if errors > 0:\n return False\n else:\n return True\n","sub_path":"richdataxweb/service_directory/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":11128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"128914068","text":"from django.core.exceptions import PermissionDenied\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.views.generic import DeleteView\n\nfrom trix.trix_core.models import Course, User\n\n\nclass RemoveCourseAdminView(LoginRequiredMixin, DeleteView):\n model = Course\n template_name = \"trix_course/remove_course_admin.django.html\"\n user_id = None\n\n def get(self, request, **kwargs):\n self.user_id = kwargs['user_id']\n course = get_object_or_404(Course, id=kwargs['pk'])\n if request.user.is_course_owner(course):\n return super(RemoveCourseAdminView, self).get(request, **kwargs)\n else:\n raise PermissionDenied\n\n def get_context_data(self, **kwargs):\n context = super(RemoveCourseAdminView, self).get_context_data(**kwargs)\n context['admin_user'] = User.objects.get(id=self.user_id)\n return context\n\n def delete(self, request, *args, **kwargs):\n '''\n Removes a single given admin from the course.\n '''\n course_id = kwargs['pk']\n user_id = kwargs['user_id']\n course = Course.objects.get(id=course_id)\n admin_user = User.objects.get(id=user_id)\n\n # Check if we only want to remove as an owner\n if request.POST.get('owner'):\n course.owner.remove(admin_user)\n else:\n course.admins.remove(admin_user)\n if admin_user in course.owner.all():\n course.owner.remove(admin_user)\n\n return redirect('trix_course_admin', course_id=course_id)\n","sub_path":"trix/trix_course/views/remove.py","file_name":"remove.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"260681099","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 31 09:14:10 2019\r\nhttps://github.com/jaimeps/adaboost-implementation/blob/master/adaboost.py\r\n@author: sprawesh\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport sklearn.metrics as sm\r\n\r\nplt.rcParams['font.family'] = 'Arial'\r\nplt.rcParams['axes.linewidth'] = 0.5\r\n\r\ndef error_rate(y_true, y_pred):\r\n return 1 - sm.accuracy_score(y_true, y_pred)\r\n\r\n#load data\r\ndf = pd.read_csv('E:\\\\fall-19\\\\ime692\\\\python-codes\\\\spam\\\\Spam.txt')\r\ndf.head()\r\nvar = list(df.columns)\r\nvar.remove('test')\r\nvar.remove('spam')\r\n\r\n#feature names\r\nfeatures = []\r\ndef feature_names(var):\r\n for item in var:\r\n i = item.rindex('_', 0, len(item))\r\n si = item[i:len(item)]\r\n features.append(si)\r\n\r\nfeature_names(var)\r\n#remove test and spam from columns\r\n#preparing response and predictor for train and test\r\nX, y = df[var].values, df['spam'].values\r\n\r\n#split X, and y for training and test\r\nX_train = X[df.test.values == 0]\r\nX_test = X[df.test.values == 1]\r\n\r\ny_train = y[df.test.values == 0]\r\ny_test = y[df.test.values == 1]\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.ensemble import AdaBoostClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\n#random forest\r\ndef rf_predict_test(X_train, y_train, X_test, y_test):\r\n err_test = []\r\n for i in range(500, 501):\r\n rf_clf = RandomForestClassifier(n_estimators=i+1, random_state=10).fit(X_train, y_train) \r\n y_test_hat = rf_clf.predict(X_test)\r\n rf_error_rate = error_rate(y_test, y_test_hat)\r\n err_test.append((i, rf_error_rate)) \r\n if (i%20 == 0):\r\n print(f'Random Forest Test Error Rate: {rf_error_rate*100:.1f}%', \"index = \", i)\r\n return err_test\r\n\r\ndum = rf_predict_test(X_train, y_train, X_test, y_test)\r\n#convert tuple list into numpy array\r\ndt = np.dtype('int,float')\r\nxerr = np.array(dum, dtype=dt)\r\n \r\n \r\n#xerr['f0'][1:10]\r\n#xerr['f1'][1:10]\r\n\r\n#bagging\r\ndef bag_predict_test(X_train, y_train, X_test, y_test):\r\n err_test = []\r\n for i in range(500):\r\n rf_clf = RandomForestClassifier(n_estimators=i+1, random_state=10, max_features=None).fit(X_train, y_train) \r\n y_test_hat = rf_clf.predict(X_test)\r\n rf_error_rate = error_rate(y_test, y_test_hat)\r\n err_test.append((i, rf_error_rate)) \r\n if (i%20 == 0):\r\n print(f'Random Forest Test Error Rate: {rf_error_rate*100:.1f}%', \"index = \", i)\r\n return err_test \r\n\r\n\r\n#plot OOB results in the sample plot created earlier\r\nberr = np.array(bag_predict_test(X_train, y_train, X_test, y_test), dtype=dt)\r\n\r\n\r\n#adaboost implementation\r\n#SAMME is Adaboost for binary classification\r\ndef ada_predict_test(X_train, y_train, X_test, y_test, depth):\r\n err_test = []\r\n for i in range(300):\r\n ad_clf = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=depth), n_estimators=i+1, algorithm=\"SAMME\").fit(X_train, y_train)\r\n y_test_hat = ad_clf.predict(X_test)\r\n ad_error_rate = error_rate(y_test, y_test_hat)\r\n err_test.append((i, ad_error_rate))\r\n if (i%20 == 0):\r\n print(f'Boosting Test Error Rate: {ad_error_rate*100:.1f}%', \"index = \", i)\r\n return err_test\r\n\r\n\r\n#aerr = np.array(ada_te, dtype=dt)\r\n\r\n\r\nfor i in range(8):\r\n f_name = \"boost_test\"\r\n ada_te = ada_predict_test(X_train, y_train, X_test, y_test, i+1)\r\n ada_tr = ada_predict_test(X_train, y_train, X_train, y_train, i+1)\r\n aet = np.array(ada_te, dtype=dt)\r\n atr = np.array(ada_tr, dtype=dt)\r\n y = pd.DataFrame(aet)\r\n z = pd.DataFrame(atr)\r\n f_name = f_name + str(i+1) + \".csv\"\r\n y.to_csv('E:\\\\fall-19\\\\ime692\\\\python-codes\\\\spam\\\\'+f_name)\r\n z.to_csv('E:\\\\fall-19\\\\ime692\\\\python-codes\\\\spam\\\\'+\"boost_train\"+str(i+1) + \".csv\")\r\n\r\n\r\n\r\n#plot the results for random forest\r\nfig, ax = plt.subplots(figsize=(2.8, 2.8), dpi=110)\r\nfig.subplots_adjust(left=0, right=1, top=1, bottom=0)\r\n#ax.scatter(xerr['f0'], xerr['f1'], c='#0000FF', s=1)\r\nax.plot(xerr['f0'], xerr['f1'], c='#0000FF', linewidth = 0.6, label = 'Random Forest')\r\nax.plot(berr['f0'], berr['f1'], c='#00000F', linewidth = 0.6, label = 'Bagging') \r\n#ax.plot(aerr['f0'], aerr['f1'], c='#a000FE', linewidth = 0.6, label = 'Boosting') \r\nax.axhline(y=0.047, c='#0000FF', linewidth=0.6, linestyle='--') # green '#00FF00'\r\nax.axhline(y=0.055, c='#00000F', linewidth=0.6, linestyle='--')\r\nax.axhline(y=0.081, c='#a0000F', linewidth=0.6, linestyle='--') \r\n \r\nfor i in ax.get_yticklabels() + ax.get_xticklabels():\r\n i.set_fontsize(8)\r\nax.set_xlabel('Number of Trees', color='#ab00FF', fontsize=8)\r\nax.set_ylabel('Test error', color='#ab00FF', fontsize=8)\r\n#legend should come after giving label? \r\nplt.legend(loc='upper right', bbox_to_anchor=(0.5, 0.5, 0.5, 0.5), fontsize = 8) \r\nplt.title('Spam Data', fontsize=10) \r\nplt.show()\r\n\r\n#variable importance plot\r\nrf_cx = RandomForestClassifier(n_estimators=100, random_state=10).fit(X_train, y_train)\r\n\r\ndef plot_relative_importance(importance):\r\n max_importance = np.max(importance)\r\n relative_importance = sorted(zip(100*importance/max_importance, features))\r\n yticks = np.arange(len(relative_importance))\r\n yticklabels = [ri[1] for ri in relative_importance][::-1]\r\n bars_sizes = [ri[0] for ri in relative_importance][::-1]\r\n \r\n fig, ax = plt.subplots(figsize=(4.3, 6.5), dpi=150)\r\n bars = ax.barh(yticks, bars_sizes, height=0.8, color='red')\r\n plt.setp(ax, yticks=yticks, yticklabels=yticklabels)\r\n ax.set_xlim([0, 100])\r\n ax.set_ylim([-0.5, 57])\r\n for e in ax.get_yticklabels()+ax.get_xticklabels():\r\n e.set_fontsize(6)\r\n ax.tick_params(left=False)\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['top'].set_visible(False)\r\n _ = ax.set_xlabel('Relative Importance', fontsize=7)\r\n\r\n\r\nplot_relative_importance(rf_cx.feature_importances_)\r\n\r\n\r\n \r\n","sub_path":"7th_Semester/IME692/lectures/l13/spam2.py","file_name":"spam2.py","file_ext":"py","file_size_in_byte":5985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"175457177","text":"import os\nimport traceback\nfrom os.path import join \n\nfrom flask import jsonify, make_response, request\nfrom werkzeug.utils import secure_filename\n\nfrom app.models import CheckIn, Flight, Ticket, User\nfrom app.routes import app, db\n\n\n@app.route('/flight/status/', methods=['GET'])\ndef get_flight_details(flight_id):\n auth_token = request.headers.get('Authorization')\n\n if auth_token:\n msg, user_id = User.decode_auth_token(auth_token)\n if user_id is not None:\n flight = Flight.query.filter(Flight.id == int(flight_id)).first()\n if flight is not None:\n response = {'status': 'OK', 'flight_details': str(flight)}\n return make_response(jsonify(response)), 200\n else:\n response = {'status': 'fail', 'msg': 'Flight not found'}\n return make_response(jsonify(response)), 404\n else:\n response = {'status': 'fail', 'msg': msg}\n return make_response(jsonify(response)), 401\n else:\n response = {'status': 'fail', 'msg': 'Please provide a valid token.'}\n return make_response(jsonify(response)), 401\n\n\n@app.route('/flight/all/', methods=['GET'])\ndef get_flights():\n auth_token = request.headers.get('Authorization')\n\n if auth_token:\n msg, user_id = User.decode_auth_token(auth_token)\n if user_id is not None:\n flights = Flight.query.all()\n\n flights = [str(flight) for flight in flights]\n response = {'status': 'OK', 'data': flights}\n return make_response(jsonify(response)), 200\n else:\n response = {'status': 'fail', 'msg': msg}\n return make_response(jsonify(response)), 401\n else:\n response = {'status': 'fail', 'msg': 'Please provide a valid token.'}\n return make_response(jsonify(response)), 401\n\n\n@app.route('/flight/check_in/', methods=['POST'])\ndef check_in(ticket_id):\n auth_token = request.headers.get('Authorization')\n data = request.get_json()\n\n if auth_token:\n msg, user_id = User.decode_auth_token(auth_token)\n if user_id is not None:\n ticket = Ticket.query.filter(Ticket.id == ticket_id).first()\n if ticket is not None:\n try:\n check_in = CheckIn(\n ticket_id=ticket_id,\n passport_number=data.get('passport_number'),\n carry_on=data.get('carry_on'))\n CheckIn.save(check_in)\n response = {\n 'status': 'OK',\n 'msg': 'Checked In!',\n 'CheckIn ID': int(check_in.id)\n }\n return make_response(jsonify(response)), 201\n except Exception as e:\n traceback.print_tb(e.__traceback__)\n print(e)\n response = {\n 'status': 'error',\n 'msg': 'Something went wrong. Please try again.'\n }\n return make_response(jsonify(response)), 400\n else:\n response = {'status': 'error', 'msg': 'Ticket not found.'}\n return make_response(jsonify(response)), 404\n else:\n response = {'status': 'error', 'msg': msg}\n return make_response(jsonify(response)), 401\n else:\n response = {'status': 'OK', 'msg': 'Please provide a valid token.'}\n return make_response(jsonify(response)), 401\n\n\n@app.route('/flight/check_in/upload_photo/', methods=['PUT'])\ndef handle_upload(checkIn_id):\n def validate_img():\n img = request.files.get('image')\n if img is not None:\n if img.filename != '':\n return img, True\n else:\n return 'No image selected.', False\n else:\n return 'No image uploaded.', False\n\n auth_token = request.headers.get('Authorization')\n\n if auth_token:\n msg, user_id = User.decode_auth_token(auth_token)\n if user_id is not None:\n check_in = CheckIn.query.filter(CheckIn.id == checkIn_id).first()\n if check_in is not None:\n a, b = validate_img()\n if b:\n file_name = secure_filename(a.filename)\n check_in.image_url = file_name\n\n os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)\n a.save(join(app.config['UPLOAD_FOLDER'], file_name))\n CheckIn.save(check_in)\n\n response = {'status': 'OK', 'msg': 'Image uploaded!'}\n return make_response(jsonify(response)), 200\n else:\n response = {'status': 'error', 'msg': a}\n return make_response(jsonify(response)), 400\n else:\n response = {'status': 'error', 'msg': 'Please Check In!'}\n return make_response(jsonify(response)), 404\n else:\n response = {'status': 'error', 'msg': msg}\n return make_response(jsonify(response)), 401\n else:\n response = {'status': 'OK', 'msg': 'Please provide a valid token.'}\n return make_response(jsonify(response)), 401\n","sub_path":"app/routes/checkin_controllers.py","file_name":"checkin_controllers.py","file_ext":"py","file_size_in_byte":5330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"311637834","text":"__author__ = 'Evan'\n#---------- Import libraries ---------\nimport pygame\nimport time\n#------------ Constants -------------\n\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\n\nSCREEN_WIDTH = 500\nSCREEN_HEIGHT = 400\nsize = [SCREEN_WIDTH, SCREEN_HEIGHT]\nscreen = pygame.display.set_mode(size)\n\nplayer1_image = pygame.image.load(\"D:\\\\Users\\\\Evan\\\\Documents\\\\Pygame Projects\\\\Python Tutorials\\\\practice game\\\\player1.png\").convert()\nplayer1_image.set_colorkey(WHITE)\nplayer2_image = pygame.image.load(\"D:\\\\Users\\\\Evan\\\\Documents\\\\Pygame Projects\\\\Python Tutorials\\\\practice game\\\\player2.png\").convert()\nplayer2_image.set_colorkey(WHITE)\nscore_image = pygame.image.load(\"D:\\\\Users\\\\Evan\\\\Documents\\\\Pygame Projects\\\\Python Tutorials\\\\practice game\\\\score.png\").convert()\nscore_image.set_colorkey(WHITE)\nscore_blue = pygame.image.load(\"D:\\\\Users\\\\Evan\\\\Documents\\\\Pygame Projects\\\\Python Tutorials\\\\practice game\\\\scoreBlue.png\").convert()\nscore_blue.set_colorkey(WHITE)\nscore_red = pygame.image.load(\"D:\\\\Users\\\\Evan\\\\Documents\\\\Pygame Projects\\\\Python Tutorials\\\\practice game\\\\scoreRed.png\").convert()\nscore_red.set_colorkey(WHITE)\n\npygame.key.set_repeat()\n\n# ----------- Functions -----------\n\ndef text_objects(text, font, colour):\n text_surface = font.render(text, True, colour)\n return text_surface, text_surface.get_rect()\n\n\ndef display_message(text, size, colour):\n large_text = pygame.font.Font('freesansbold.ttf', size)\n text_surf, text_rect = text_objects(text, large_text, colour)\n text_rect.center = ((SCREEN_WIDTH/2),(SCREEN_HEIGHT/2))\n screen.blit(text_surf, text_rect)\n\n pygame.display.update()\n\n time.sleep(2)\n\n\n# ------------ Classes ----------------\n\nclass Player(pygame.sprite.Sprite):\n\n def __init__(self, image_name):\n super().__init__()\n self.image = image_name\n self.rect = self.image.get_rect()\n self.rect.size = (56, 56)\n self.start_pos = [50, 150]\n self.x_coord = self.start_pos[0]\n self.y_coord = self.start_pos[1]\n\n def pinned(self, pin_start, pin_length):\n if pin_length - pin_start > 3:\n return True\n else:\n return False\n\n def reset_position(self):\n self.x_coord = self.start_pos[0]\n self.y_coord = self.start_pos[1]\n\n def update(self):\n self.rect[0] = self.x_coord\n self.rect[1] = self.y_coord\n\n\nclass Scoreboard(pygame.sprite.Sprite):\n\n def __init__(self, position):\n super().__init__()\n self.score_list = [0, 0, 0]\n self.image = score_image\n self.rect = self.image.get_rect()\n self.rect[0] = position[0]\n self.rect[1] = position[1]\n self.blue_tally = 0\n self.red_tally = 0\n def draw_score(self, winner):\n if winner == \"player1\":\n self.image = score_blue\n elif winner == \"player2\":\n self.image = score_red\n\n def update_score(self, winner):\n if winner == \"player1\":\n if self.score_list[0] == 0:\n self.score_list[0] = 1\n elif self.score_list[1] == 0:\n self.score_list[1] = 1\n elif self.score_list[2] == 0:\n self.score_list[2] = 1\n\n elif winner == \"player2\":\n if self.score_list[0] == 0:\n self.score_list[0] = 2\n elif self.score_list[1] == 0:\n self.score_list[1] = 2\n elif self.score_list[2] == 0:\n self.score_list[2] = 2\n\n def check_winner(self):\n if self.score_list[1] != 0:\n for i in range(len(self.score_list)):\n if self.score_list[i] == 1:\n self.blue_tally += 1\n if self.blue_tally == 2:\n return 1\n elif self.score_list[i] == 2:\n self.red_tally += 1\n if self.red_tally == 2:\n return 2\n\n\nclass Game():\n \"\"\" This class represents an instance of the game. If we need to\n reset the game we'd just need to create a new instance of this\n class. \"\"\"\n def __init__(self):\n \"\"\" Constructor. Create all our attributes and initialize\n the game. \"\"\"\n #Sprite Lists\n self.all_sprites_list = pygame.sprite.Group()\n\n self.score_board1 = Scoreboard([195, 10])\n self.score_board2 = Scoreboard([235, 10])\n self.score_board3 = Scoreboard([275, 10])\n self.scoreboard_list = [self.score_board1, self.score_board2, self.score_board3]\n\n self.player1 = Player(player1_image)\n self.player2 = Player(player2_image)\n self.player2.x_coord = 370\n\n self.all_sprites_list.add(self.player1)\n self.all_sprites_list.add(self.player2)\n self.all_sprites_list.add(self.score_board1)\n self.all_sprites_list.add(self.score_board2)\n self.all_sprites_list.add(self.score_board3)\n\n self.game_over = False\n self.round_number = 0\n self.winner = None\n\n self.p1_x_speed = 0\n self.p1_y_speed = 0\n self.p2_x_speed = 0\n self.p2_y_speed = 0\n\n self.p1_pin_start_time = 0\n self.p1_pin_duration = 0\n self.p1_pin_pressed = False\n self.p2_pin_start_time = 0\n self.p2_pin_duration = 0\n self.p2_pin_pressed = False\n self.collided = False\n\n def process_events(self):\n \"\"\" Process all of the events. Return a \"True\" if we need\n to close the window. \"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n elif event.type == pygame.KEYDOWN:\n # ----------- player 1 controls ---------------\n if event.key == pygame.K_a:\n self.p1_x_speed = -5\n elif event.key == pygame.K_d:\n self.p1_x_speed = 5\n elif event.key == pygame.K_s:\n self.p1_y_speed = 5\n elif event.key == pygame.K_w:\n self.p1_y_speed = -5\n elif event.key == pygame.K_SPACE:\n if self.collided:\n self.p1_pin_start_time = time.time()\n self.p1_pin_pressed = True\n # ----------- player 2 controls ---------------\n\n if event.key == pygame.K_LEFT:\n self.p2_x_speed = -5\n elif event.key == pygame.K_RIGHT:\n self.p2_x_speed = 5\n elif event.key == pygame.K_DOWN:\n self.p2_y_speed = 5\n elif event.key == pygame.K_UP:\n self.p2_y_speed = -5\n elif event.key == pygame.K_RSHIFT:\n if self.collided:\n self.p2_pin_start_time = time.time()\n self.p2_pin_pressed = True\n\n elif event.type == pygame.KEYUP:\n # ----------- player 1 controls ---------------\n if event.key == pygame.K_a or event.key == pygame.K_d:\n self.p1_x_speed = 0\n if event.key == pygame.K_w or event.key == pygame.K_s:\n self.p1_y_speed = 0\n if event.key == pygame.K_SPACE:\n self.p1_pin_start_time = 0\n self.p1_pin_pressed = False\n\n # ----------- player 2 controls ---------------\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n self.p2_x_speed = 0\n if event.key == pygame.K_UP or event.key == pygame.K_DOWN:\n self.p2_y_speed = 0\n if event.key == pygame.K_SPACE:\n self.p2_pin_start_time = 0\n self.p2_pin_pressed = False\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if self.game_over:\n main()\n def run_logic(self):\n \"\"\"\n This method is run each time through the frame. It\n updates positions and checks for collisions.\n \"\"\"\n if not self.game_over:\n self.player1.x_coord += self.p1_x_speed\n self.player1.y_coord += self.p1_y_speed\n self.player2.x_coord += self.p2_x_speed\n self.player2.y_coord += self.p2_y_speed\n # ------Player 1------\n if self.player1.x_coord > SCREEN_WIDTH - 80:\n self.player1.x_coord = SCREEN_WIDTH - 80\n elif self.player1.x_coord < 0:\n self.player1.x_coord = 0\n if self.player1.y_coord > SCREEN_HEIGHT - 80:\n self.player1.y_coord = SCREEN_HEIGHT - 80\n elif self.player1.y_coord < 0:\n self.player1.y_coord = 0\n # ----Player 2 ------\n if self.player2.x_coord > SCREEN_WIDTH - 80:\n self.player2.x_coord = SCREEN_WIDTH - 80\n elif self.player2.x_coord < 0:\n self.player2.x_coord = 0\n if self.player2.y_coord > SCREEN_HEIGHT - 80:\n self.player2.y_coord = SCREEN_HEIGHT - 80\n elif self.player2.y_coord < 0:\n self.player2.y_coord = 0\n\n self.collided = pygame.sprite.collide_circle(self.player1, self.player2)\n\n if self.p1_pin_pressed and self.collided:\n self.p1_pin_duration = time.time()\n else:\n self.p1_pin_duration = time.time()\n self.p1_pin_start_time = self.p1_pin_duration\n self.p1_pin_pressed = False\n\n if self.p2_pin_pressed and self.collided:\n self.p2_pin_duration = time.time()\n else:\n self.p2_pin_duration = time.time()\n self.p2_pin_start_time = self.p1_pin_duration\n self.p2_pin_pressed = False\n\n if self.player1.pinned(self.p1_pin_start_time, self.p1_pin_duration):\n self.player1.reset_position()\n self.player2.reset_position()\n self.player2.x_coord = 370\n self.score_board1.update_score(\"player1\")\n self.round_number += 1\n if self.round_number == 1:\n self.score_board1.draw_score(\"player1\")\n elif self.round_number == 2:\n self.score_board2.draw_score(\"player1\")\n elif self.round_number == 3:\n self.score_board3.draw_score(\"player1\")\n\n elif self.player2.pinned(self.p2_pin_start_time, self.p2_pin_duration):\n self.player1.reset_position()\n self.player2.reset_position()\n self.player2.x_coord = 370\n self.score_board1.update_score(\"player2\")\n self.round_number += 1\n print(self.round_number)\n if self.round_number == 1:\n self.score_board1.draw_score(\"player2\")\n elif self.round_number == 2:\n self.score_board2.draw_score(\"player2\")\n elif self.round_number == 3:\n self.score_board3.draw_score(\"player2\")\n\n print(self.score_board1.score_list)\n self.all_sprites_list.update()\n\n def display_frame(self, screen):\n \"\"\" Display everything to the screen for the game. \"\"\"\n screen.fill(WHITE)\n if not self.game_over:\n if self.player1.pinned(self.p1_pin_start_time, self.p1_pin_duration) and self.score_board1.check_winner() == 1:\n display_message(\"Blue Wins\", 65, BLUE)\n self.game_over = True\n elif self.player2.pinned(self.p2_pin_start_time, self.p2_pin_duration) and self.score_board1.check_winner() == 2:\n display_message(\"Red Wins\", 65, RED)\n self.game_over = True\n self.all_sprites_list.draw(screen)\n pygame.display.flip()\n else:\n display_message(\"Click to Restart\", 40, BLACK)\n# ------------ Main function -----------\ndef main():\n \"\"\" Main program function. \"\"\"\n # Initialize Pygame and set up the window\n pygame.init()\n pygame.display.set_caption(\"Practice Game\")\n pygame.mouse.set_visible(False)\n\n done = False\n clock = pygame.time.Clock()\n\n # Create our objects and set the data\n\n # Create an instance of the Game class\n game = Game()\n # --------- Main game loop ----------\n while not done:\n done = game.process_events()\n game.process_events()\n game.run_logic()\n\n game.display_frame(screen)\n\n clock.tick(60)\nif __name__ == \"__main__\":\n main()\n\npygame.quit()\nquit()\n","sub_path":"Practice game.py","file_name":"Practice game.py","file_ext":"py","file_size_in_byte":12663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"252837300","text":"import os\r\nimport discord\r\nfrom dotenv import load_dotenv\r\nfrom discord.ext import commands\r\nfrom pydrive.auth import GoogleAuth\r\nfrom pydrive.drive import GoogleDrive\r\n\r\ngauth = GoogleAuth()\r\ndrive = GoogleDrive(gauth)\r\n\r\n\r\nload_dotenv()\r\nTOKEN = os.getenv('DISCORD_TOKEN')\r\n\r\nbot = commands.Bot(command_prefix = '!')\r\n\r\n@bot.event\r\nasync def on_ready(): # event handler (create another @client.event to check for a new event)\r\n print(f'{bot.user} has connected to Discord!')\r\n general = bot.get_channel(208052054734667777)\r\n await general.send(f'{bot.user} has connected to Discord!')\r\n\r\n@bot.command(name = 'pull')\r\nasync def pull_img(ctx, img):\r\n file_list = drive.ListFile({'q': f\"title contains '{img}' and trashed=false\"}).GetList()\r\n file_id = file_list[0]['id'] # get the file ID\r\n file = drive.CreateFile({'id': file_id})\r\n file_name = f'{img}.png'\r\n file.GetContentFile(file_name)\r\n await ctx.send(file=discord.File(file_name))\r\n\r\n@bot.command(name = 'add')\r\nasync def add_image(ctx, img):\r\n await save(img)\r\n\r\n\r\n@bot.event\r\nasync def on_error(event, *args, **kwargs):\r\n with open('err.log', 'a') as f:\r\n if event == 'on_message':\r\n f.write(f'Unhandled message: {args[0]}\\n')\r\n else:\r\n raise\r\n\r\nbot.run(TOKEN)","sub_path":"Pix_Bot.py","file_name":"Pix_Bot.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"247291090","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAuthor : Terry \n\"\"\"\nfrom __future__ import division # Correct Python 2 division\n\nimport numpy as np\n\nimport config_global as sp\n\n\nfrom threading import Thread\n\nimport time\n\n\nclass LiveData(Thread):\n\n\tdef __init__(self, nChans=224, dSource=None, dSourceSampleRate=None):\n\t\tThread.__init__(self)\n\t\tself.startTime = time.time()\n\t\t# self.bufferChunkSize = bufferChunkSize\n\n\t\tif dSource:\n\t\t\tself.dataSource = dSource\t\t\t\n\t\t\tself.timeBetweenSamples = 1 / self.dSourceSampleRate\n\t\t\tself.maxIndex = 999999999999\n\n\t\telse:\n\t\t\tself.dataSource = sp.dat['SigPy']['dataFilt'] \t\t\t\n\t\t\tself.dSourceSampleRate = 1 / sp.dat['SigPy']['timeBetweenSamples']\n\t\t\tself.timeBetweenSamples = sp.dat['SigPy']['timeBetweenSamples']\n\t\t\tself.maxIndex = self.dataSource.shape[1]\n\t\t\tself.bufferedChunk = self.dataSource[:,0]\n\n\n\t\tself.nChans = self.dataSource.shape[0]\n\n\n\t\tself.stampToIndexMultiplier = 1 / self.dSourceSampleRate\n\t\tself.priorBufferedChunk = np.copy(self.bufferedChunk).reshape(-1,1)\n\n\t\t# set time to sleep for roughly as long as it takes to build up the buffer\n\t\t# self.timeToSleep = self.bufferChunkSize / self.dSourceSampleRate \n\t\tself.timeToSleep = 0.1\n\t\t\n\t\tself.lastIndex = 0\n\t\tself.bufferedChunk = np.zeros(shape=(self.nChans,1))\n\n\t\tself.newChunk = False\n\t\tself.shouldStop = False\n\n\t\tself.lastCaptureTime = time.time()\n\n\n\tdef run(self):\n\t\t''' Start pulling in live data (or simulation of live data) '''\n\t\tprint(\"Starting thread for live data capture:\")\n\t\tself.lastCaptureTime = time.time()\n\n\t\twhile True:\n\n\t\t\t# Get time since capture began\n\t\t\tif (time.time() - self.lastCaptureTime) >= self.timeBetweenSamples :\n\n\t\t\t\tself.lastIndex += 1 \t\t\t\t\n\n\t\t\t\t#Add frame to buffer\n\n\t\t\t\tself.bufferedChunk = np.hstack((self.bufferedChunk, self.dataSource[:,self.lastIndex].reshape(-1,1))) #, axis=1)\n\n\t\t\t\t# Update capture time\n\t\t\t\tself.lastCaptureTime = self.lastCaptureTime + self.timeBetweenSamples\n\n\t\t\tif self.lastIndex >= self.maxIndex :\n\t\t\t\tprint(\"Live capture thread terminating, reached last index.\")\n\t\t\t\treturn\n\n\t\t\tif self.shouldStop :\n\t\t\t\tprint(\"Live capture thread terminating, signaled to stop by another thread.\")\n\t\t\t\treturn\n\n\t\t\t# nSamplesBuffered = self.currIndex - self.lastIndexPush\n\t\t\t# print(\"lastIndex: \", self.lastIndex)\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"signal_processing/livedata.py","file_name":"livedata.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"188336951","text":"#!/usr/local/Cellar/python3\n# -*- coding: utf-8 -*-\n\nimport requests\nimport time\nimport queue\n\n\ndef getProxy():\n \"\"\"\n 下载代理的ip\n \"\"\"\n api_url = '请填写代理IP的提取链接,于IP提取网站上生成'\n result = requests.get(api_url)\n proxy_json = result.json()\n proxy = proxy_json['result']\n\n for proxy_ip in proxy:\n extract_ip = proxy_ip['ip:port']\n url_queue.put(extract_ip)\n\n\ndef verification():\n \"\"\"\n 检测代理的ip是否可用\n \"\"\"\n success_count = 0\n failure_count = 0\n run_time = time.strftime(\"%Y%m%d\", time.localtime())\n path = '/Users/wangjiacan/Desktop/shawn/爬取资料/ip/ip_' + run_time + '.txt'\n\n url = 'https://httpbin.org/get?show_env=1'\n\n headers = {'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Mobile Safari/537.36'}\n\n for i in range(url_queue.qsize()):\n ip = url_queue.get()\n try:\n proxies = {\n \"http\": \"http://\" + ip,\n \"https\": \"http://\" + ip\n }\n r = requests.get(url, headers=headers, proxies=proxies, timeout=20)\n r.raise_for_status()\n\n with open(path, 'a') as f:\n f.write(ip + \"\\n\")\n f.close()\n success_count += 1\n print(ip + \"提取成功\")\n except Exception:\n failure_count += 1\n print(ip + \"无效\")\n\n print(\"代理ip成功:爬取成功{}条数据,爬取失败{}条数据\".format(success_count, failure_count))\n\n\nif __name__ == '__main__':\n url_queue = queue.Queue()\n print(\"代理ip提取开始...\")\n getProxy()\n print(\"代理ip下载完成,开始检测ip可用性\")\n verification()\n print(\"代理ip提取结束...\")\n","sub_path":"getpoxy.py","file_name":"getpoxy.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"328279993","text":"\"\"\"\nThis module provides a remote procedure call (RPC) mechanism over sockets\nbetween conventional computers (PCs) running Python. It strives to be\ntransparent and uses ``artiq.management.pyon`` internally so that e.g. Numpy\narrays can be easily used.\n\n\"\"\"\n\nimport socket\nimport asyncio\nimport traceback\n\nfrom artiq.management import pyon\n\n\nclass RemoteError(Exception):\n \"\"\"Raised when a RPC failed or raised an exception on the remote (server)\n side.\n\n \"\"\"\n pass\n\n\nclass IncompatibleServer(Exception):\n \"\"\"Raised by the client when attempting to connect to a server that does\n not have the expected type.\n\n \"\"\"\n pass\n\n\n_init_string = b\"ARTIQ pc_rpc\\n\"\n\n\nclass Client:\n \"\"\"This class proxies the methods available on the server so that they\n can be used as if they were local methods.\n\n For example, if the server provides method ``foo``, and ``c`` is a local\n ``Client`` object, then the method can be called as: ::\n\n result = c.foo(param1, param2)\n\n The parameters and the result are automatically transferred with the\n server.\n\n Only methods are supported. Attributes must be accessed by providing and\n using \"get\" and/or \"set\" methods on the server side.\n\n At object initialization, the connection to the remote server is\n automatically attempted. The user must call ``close_rpc`` to\n free resources properly after initialization completes successfully.\n\n :param host: Identifier of the server. The string can represent a\n hostname or a IPv4 or IPv6 address (see\n ``socket.create_connection`` in the Python standard library).\n :param port: TCP port to use.\n :param expected_id_type: Server type to expect. ``IncompatibleServer`` is\n raised when the types do not match. Use ``None`` to accept any server\n type.\n\n \"\"\"\n def __init__(self, host, port, expected_id_type):\n self.socket = socket.create_connection((host, port))\n self.socket.sendall(_init_string)\n self._identify(expected_id_type)\n\n def get_rpc_id(self):\n \"\"\"Returns a dictionary containing the identification information of\n the server.\n\n \"\"\"\n return self._server_identification\n\n def close_rpc(self):\n \"\"\"Closes the connection to the RPC server.\n\n No further method calls should be done after this method is called.\n\n \"\"\"\n self.socket.close()\n\n def _send_recv(self, obj):\n line = pyon.encode(obj) + \"\\n\"\n self.socket.sendall(line.encode())\n\n buf = self.socket.recv(4096).decode()\n while \"\\n\" not in buf:\n more = self.socket.recv(4096)\n if not more:\n break\n buf += more.decode()\n obj = pyon.decode(buf)\n\n return obj\n\n def _identify(self, expected_id_type):\n obj = {\"action\": \"identify\"}\n self._server_identification = self._send_recv(obj)\n if (expected_id_type is not None\n and self._server_identification[\"type\"] != expected_id_type):\n raise IncompatibleServer\n\n def _do_rpc(self, name, args, kwargs):\n obj = {\"action\": \"call\", \"name\": name, \"args\": args, \"kwargs\": kwargs}\n obj = self._send_recv(obj)\n if obj[\"result\"] == \"ok\":\n return obj[\"ret\"]\n elif obj[\"result\"] == \"error\":\n raise RemoteError(obj[\"message\"] + \"\\n\" + obj[\"traceback\"])\n else:\n raise ValueError\n\n def __getattr__(self, name):\n def proxy(*args, **kwargs):\n return self._do_rpc(name, args, kwargs)\n return proxy\n\n\nclass Server:\n \"\"\"This class creates a TCP server that handles requests coming from\n ``Client`` objects.\n\n The server is designed using ``asyncio`` so that it can easily support\n multiple connections without the locking issues that arise in\n multi-threaded applications. Multiple connection support is useful even in\n simple cases: it allows new connections to be be accepted even when the\n previous client failed to properly shut down its connection.\n\n :param target: Object providing the RPC methods to be exposed to the\n client.\n :param id_type: A string identifying the server type. Clients use it to\n verify that they are connected to the proper server.\n :param id_parameters: An optional human-readable string giving more\n information about the parameters of the server.\n\n \"\"\"\n def __init__(self, target, id_type, id_parameters=None):\n self.target = target\n self.id_type = id_type\n self.id_parameters = id_parameters\n self._client_tasks = set()\n\n @asyncio.coroutine\n def start(self, host, port):\n \"\"\"Starts the server.\n\n The user must call ``stop`` to free resources properly after this\n method completes successfully.\n\n This method is a `coroutine`.\n\n :param host: Bind address of the server (see ``asyncio.start_server``\n from the Python standard library).\n :param port: TCP port to bind to.\n\n \"\"\"\n self.server = yield from asyncio.start_server(self._handle_connection,\n host, port)\n\n @asyncio.coroutine\n def stop(self):\n \"\"\"Stops the server.\n\n \"\"\"\n for task in self._client_tasks:\n task.cancel()\n self.server.close()\n yield from self.server.wait_closed()\n del self.server\n\n def _client_done(self, task):\n self._client_tasks.remove(task)\n\n def _handle_connection(self, reader, writer):\n task = asyncio.Task(self._handle_connection_task(reader, writer))\n self._client_tasks.add(task)\n task.add_done_callback(self._client_done)\n\n @asyncio.coroutine\n def _handle_connection_task(self, reader, writer):\n try:\n line = yield from reader.readline()\n if line != _init_string:\n return\n while True:\n line = yield from reader.readline()\n if not line:\n break\n obj = pyon.decode(line.decode())\n action = obj[\"action\"]\n if action == \"call\":\n try:\n method = getattr(self.target, obj[\"name\"])\n ret = method(*obj[\"args\"], **obj[\"kwargs\"])\n obj = {\"result\": \"ok\", \"ret\": ret}\n except Exception as e:\n obj = {\"result\": \"error\",\n \"message\": type(e).__name__ + \": \" + str(e),\n \"traceback\": traceback.format_exc()}\n line = pyon.encode(obj) + \"\\n\"\n writer.write(line.encode())\n elif action == \"identify\":\n obj = {\"type\": self.id_type}\n if self.id_parameters is not None:\n obj[\"parameters\"] = self.id_parameters\n line = pyon.encode(obj) + \"\\n\"\n writer.write(line.encode())\n finally:\n writer.close()\n\n\ndef simple_server_loop(target, id_type, host, port, id_parameters=None):\n \"\"\"Runs a server until an exception is raised (e.g. the user hits Ctrl-C).\n\n See ``Server`` for a description of the parameters.\n\n \"\"\"\n loop = asyncio.get_event_loop()\n try:\n server = Server(target, id_type, id_parameters)\n loop.run_until_complete(server.start(host, port))\n try:\n loop.run_forever()\n finally:\n loop.run_until_complete(server.stop())\n finally:\n loop.close()\n","sub_path":"artiq/management/pc_rpc.py","file_name":"pc_rpc.py","file_ext":"py","file_size_in_byte":7608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"559966794","text":"from pyswip import Prolog, Query, Variable, Functor, call\nimport tempfile\nimport re\nimport os\nfrom ast import literal_eval\n\nclass PrologConnector:\n\n def __init__(self):\n self.prolog = Prolog()\n\n @staticmethod\n def create_temp_file(code) -> str: # create temp file for Prolog code\n\n if 'connect_files' not in os.listdir(): # custom dir\n try:\n os.mkdir('connect_files')\n except PermissionError:\n raise PermissionError(\"Don't have permission for create a dir\")\n\n with tempfile.NamedTemporaryFile('w', delete=False, suffix='.pro', dir='connect_files') as file:\n file.write(code)\n return file.name[file.name.rfind('\\\\') + 1::]\n\n def consult_code(self, code: str, delete=True): # consult Prolog code via temp file\n self.consult_file('connect_files/' +\n self.create_temp_file(code), delete)\n\n def consult_file(self, file_name: str, delete=False):\n self.prolog.consult(file_name)\n if delete:\n os.remove(file_name)\n\n # warning! can be broken if maxresult = -1\n def get_n_ans(self, instructions: str, maxresult=1, **kwargs) -> [dict]:\n # query for prolog\n return self.prolog.query(instructions, maxresult=maxresult, **kwargs)\n\n def get_n_ans_new(self, instructions: str, maxresults=-1, solves=True) -> list:\n ' functors and items of predicates, variables'\n terms, vars, statements = self.parse_ins(instructions)\n vars_ans = [] if solves else {i[0]: []\n for i in vars} # list/dict of variable values\n statements_ans = {} # list of statements\n if terms:\n q = Query(*terms) # make query\n while q.nextSolution() and maxresults: # find solutions\n maxresults -= 1\n if solves:\n # append values\n vars_ans.append({k: v.value for k, v in vars})\n else:\n for k, v in vars:\n if v.value not in vars_ans[k]:\n vars_ans[k].append(v.value)\n q.closeQuery()\n if statements:\n for statement in statements:\n statements_ans.update({statement[1]: call(statement[0])})\n return vars_ans, statements_ans\n\n @staticmethod\n def parse_ins(instruction) -> list and list and list:\n if instruction[-1] != ';':\n instruction += ';'\n terms = [] # if need var(s)\n vars = []\n statements = [] # if need True or False\n pnames = re.compile(r'\\[.+\\]|[\\w\\d]+') # find names(vars|lists|strings|ints) in atoms\n plist = re.compile(r'\\[.+\\]') # find list\n # find predirects\n for pred, atoms in re.findall(r'([^\\(\\)\\,\\s]+|\\S)(\\([^\\)]+\\))\\;', instruction):\n names = pnames.findall(atoms)\n items = []\n there_is_var = False\n for atom in names:\n atom = atom.strip()\n\n if atom[0].isupper(): # check for var\n any_var = Variable() # link to Prologs var\n items.append(any_var)\n vars.append((atom, any_var))\n there_is_var = True\n\n elif atom.isdigit(): # check for int\n items.append(int(atom))\n\n elif plist.search(atom): # check for list\n items.append(literal_eval(atom))\n\n else:\n try: # check for float\n items.append(float(atom))\n except ValueError:\n items.append(atom)\n if there_is_var:\n terms.append(Functor(pred, len(names))(*items))\n else:\n statements.append(\n (Functor(pred, len(names))(*items), pred + atoms))\n return terms, vars, statements\n\n def make_req(self, command: str, solves=False, **kwargs): # with custom parameters\n 'for all solves of only 1 request(may be unused)'\n a = self.get_n_ans_new(command, solves=solves, **kwargs)\n # getting only 1 result\n if a[0]:\n return a[0]\n elif a[1]:\n for i in a[1].values():\n return i\n else:\n return None\n\n def assert_code(self, ins: str):\n \"\"\"\n for assertion facts(the same as consult_code)\n \"\"\"\n for i in ins.split(';'):\n self.prolog.assertz(i)\n\n def retract_code(self, ins: str, all=False):\n g = ins.split(';')\n if all:\n for i in g:\n self.prolog.retractall(i)\n else:\n for i in g:\n self.prolog.retract(i)\n","sub_path":"connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":4777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"288613827","text":"from dll_stack import Stack\nfrom dll_queue import Queue\n# import sys\n# sys.path.append('../queue_and_stack')\n\n\nclass BinarySearchTree:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n # Insert the given value into the tree\n def insert(self, value):\n if value < self.value:\n if not self.left:\n self.left = BinarySearchTree(value)\n else:\n self.left.insert(value)\n else:\n if not self.right:\n self.right = BinarySearchTree(value)\n else:\n self.right.insert(value)\n\n # Return True if the tree contains the value\n # False if it does not\n def contains(self, target):\n if target == self.value:\n return True\n elif target > self.value:\n return self.right.contains(target) if self.right else False\n else:\n return self.left.contains(target) if self.left else False\n\n # Return the maximum value found in the tree\n def get_max(self):\n # return self.right.get_max() if self.right else self.value\n return self.value if not self.right else self.right.get_max()\n\n # Call the function `cb` on the value of each node\n # You may use a recursive or iterative approach\n\n def for_each(self, cb):\n cb(self.value)\n self.left and self.left.for_each(cb)\n self.right and self.right.for_each(cb)\n\n # DAY 2 Project -----------------------\n\n # Print all the values in order from low to high\n # Hint: Use a recursive, depth first traversal\n def in_order_print(self, node):\n node.left and self.in_order_print(node.left)\n print(node.value)\n node.right and self.in_order_print(node.right)\n\n # Print the value of every node, starting with the given node,\n # in an iterative breadth first traversal\n def bft_print(self, node):\n to_print = Queue()\n to_print.enqueue(node)\n while to_print.len() > 0:\n dequeued_node = to_print.dequeue()\n print(dequeued_node.value)\n dequeued_node.left and to_print.enqueue(dequeued_node.left)\n dequeued_node.right and to_print.enqueue(dequeued_node.right)\n\n # Print the value of every node, starting with the given node,\n # in an iterative depth first traversal\n def dft_print(self, node):\n to_print = Stack()\n to_print.push(node)\n while to_print.len() > 0:\n popped_node = to_print.pop()\n print(popped_node.value)\n popped_node.right and to_print.push(popped_node.right)\n popped_node.left and to_print.push(popped_node.left)\n\n # STRETCH Goals -------------------------\n # Note: Research may be required\n\n # Print In-order recursive DFT\n def pre_order_dft(self, node):\n print(node.value)\n node.left and self.pre_order_dft(node.left)\n node.right and self.pre_order_dft(node.right)\n\n # Print Post-order recursive DFT\n\n def post_order_dft(self, node):\n node.left and self.post_order_dft(node.left)\n node.right and self.post_order_dft(node.right)\n print(node.value)\n","sub_path":"binary_search_tree/binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"569585534","text":"#!/Users/rlaney/.virtualenvs/NetEngineerONE/bin/python\n\nfrom __future__ import absolute_import, division, print_function\n\nimport itertools\nimport netmiko\ntry:\n import simplejson as json # Prefer simplejson because of SPEED!\nexcept ImportError:\n import json\nimport mytools\nimport os\nimport sys\nimport signal\n#from trigger.netdevices import NetDevices\n#from trigger.conf import settings\n#from trigger.netdevices.loader import BaseLoader\n#from trigger.exceptions import LoaderFailed\n\n\nsignal.signal(signal.SIGPIPE, signal.SIG_DFL) # IOError: Broken pipe\nsignal.signal(signal.SIGINT, signal.SIG_DFL) # KeyboardInterrupt: Ctrl-C\n\n\n#if len(sys.argv) < 3:\n# print('Usage: cmdrunner.py commands.txt devices.json')\n# exit()\n\nnetmiko_exceptions = (netmiko.ssh_exception.NetMikoTimeoutException,\n netmiko.ssh_exception.NetMikoAuthenticationException)\n\n#data_source = 'het-devices.json'\n\nwith open('het-devices.json', 'r') as contents:\n devices = json.load(contents)\n for d in devices:\n if not d['operatingSystem']:\n sys.exit()\n else:\n print(d['nodeName']['ip'])\n\n#def load_data_source(self, data_source, **kwargs):\n# try:\n# return self.get_data(data_source)\n# except Exception as err:\n# raise LoaderFailed(\"Tried %r; and failed: %r\" % (data_source, err))\n\n\n#with open('het-devices.json') as device_file:\n# all_devices = json.load(device_file)\n# devices = json.dumps(all_devices, indent=4, sort_keys=True)\n\n\nprint('~'*79)\nsys.exit()\n\n\nios_devices = [d for d in devices if d['os'] == 'IOS']\nprint(len(ios_devices))\ndevice_list = []\nfor device in ios_devices:\n device_list.append(device['host', 'name'])\n print(device_list.sort())\n\nprint('~'*79)\nsys.exit()\n\nrouters = [d for d in devices if d['deviceType'] == 'ROUTER']\nprint(len(routers))\nfor router in routers:\n router_list = []\n router_list.append(router['nodeName'])\n print(sorted(router_list))\n\nprint('~'*79)\n\nswitches = [d for d in devices if d['deviceType'] == 'SWITCH']\nprint(len(switches))\nfor switch in switches:\n switch_list = []\n switch_list.append(switch)\n print(sorted(switch_list))\n\nprint('~'*79)\n\nsys.exit()\n\nwith open('router_cmds.txt') as rtr_file:\n router_commands = rtr_file.readlines()\n\nprint(router_commands)\nprint('~'*79)\n\nwith open('switch_cmds.txt') as swi_file:\n switch_commands = swi_file.readlines()\n\nprint(switch_commands)\nprint('~'*79)\n\nusername, password = mytools.get_creds()\n\nfor device in devices:\n try:\n print('~'*79)\n print('Connecting to ', device['nodeName'])\n connection = netmiko.ConnectHandler(ip=device['ip'], device_type=device['platform'],\n username=username, password=password)\n print(connection.send_command('show clock'))\n connection.disconnect()\n except netmiko_exceptions as e:\n print('Failed on ', device['nodeName'], e)\n","sub_path":"python/trigger/cmdrunner/het-cmdrunner.py","file_name":"het-cmdrunner.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"559715415","text":"import pyttsx3\nimport speech_recognition as sr\nimport datetime\nimport wikipedia\nimport webbrowser\nimport os\n\nengine = pyttsx3.init('sapi5')\n# Getting the voices already present in the system!\nvoices = engine.getProperty('voices')\n\n# setting voice to the first voice in the system!\nengine.setProperty('voice', voices[0].id)\n\n# Speak some text input!\ndef speak(audio):\n engine.say(audio)\n engine.runAndWait()\n\ndef wishMe():\n hour = int(datetime.datetime.now().hour)\n if hour >= 0 and hour < 12:\n speak(\"Good Morning!\")\n elif hour >= 12 and hour < 18:\n speak(\"Good afternoon!\")\n else:\n speak(\"Good evening!\")\n \n speak(\"I am Jarvis! Please tell me how can I help you?\")\n\n# takes speech command!\ndef takeCommand():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening...\")\n r.pause_threshold = 1\n audio = r.listen(source)\n \n try:\n print(\"Recognizing...\")\n query = r.recognize_google(audio,language='en-in')\n print(f\"User said: {query}\\n\")\n except Exception as e:\n print(\"Say that again\")\n return \"None\"\n return query\n\nif __name__ == \"__main__\":\n wishMe()\n while True:\n query = takeCommand().lower()\n\n if 'wikipedia' in query:\n speak('Searching wikipedia...')\n query = query.replace('wikipedia','')\n results = wikipedia.summary(query, sentences=2)\n speak(\"According to wikipedia\")\n speak(results)\n elif 'your name' in query:\n speak('My name is Jarvis')\n\n elif 'open youtube' in query:\n webbrowser.open(\"youtube.com\")\n \n elif 'open google' in query:\n webbrowser.open(\"google.com\")\n \n elif 'the time' in query:\n strTime = datetime.datetime.now().strftime(\"%H:%M:%S\")\n speak(f\"Sir, The time is {strTime}\")\n\n elif 'open code' in query:\n vscode = \"C:\\\\Users\\\\Rupesh\\\\AppData\\\\Roaming\\\\Microsoft\\\\Windows\\\\Start Menu\\\\Programs\\\\Visual Studio Code\"\n os.startfile(vscode)\n","sub_path":"jarvis.py","file_name":"jarvis.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"400757336","text":"import os\nimport sys\nimport lmdb # install lmdb by \"pip install lmdb\"\nimport cv2\nimport numpy as np\nimport argparse\nimport json\nimport cv2\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--image_dir', help='path to dataset')\nparser.add_argument('--label', help='path to label file')\nparser.add_argument('--output', help='path to output lmdb')\n\nopt = parser.parse_args()\nimage_dir = opt.image_dir\nlabel_file = opt.label\noutput_dir = opt.output\n\nprint('image_dir', image_dir)\nprint('label file', label_file)\nprint('output_dir', output_dir)\n\ndef checkImageIsValid(imageBin):\n if imageBin is None:\n return False\n imageBuf = np.fromstring(imageBin, dtype=np.uint8)\n img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)\n imgH, imgW = img.shape[0], img.shape[1]\n if imgH * imgW == 0:\n return False\n return True\n\n\ndef writeCache(env, cache):\n with env.begin(write=True) as txn:\n for k, v in cache.items():\n txn.put(k.encode(), v)\n\n\ndef createDataset(outputPath, imagePathList, labelList, lexiconList=None, checkValid=True):\n \"\"\"\n Create LMDB dataset for CRNN training.\n\n ARGS:\n outputPath : LMDB output path\n imagePathList : list of image path\n labelList : list of corresponding groundtruth texts\n lexiconList : (optional) list of lexicon lists\n checkValid : if true, check the validity of every image\n \"\"\"\n assert(len(imagePathList) == len(labelList))\n nSamples = len(imagePathList)\n env = lmdb.open(outputPath, map_size=1099511627776)\n cache = {}\n cnt = 1\n for i in range(nSamples):\n imagePath = imagePathList[i]\n label = labelList[i]\n if not os.path.exists(imagePath):\n print('%s does not exist' % imagePath)\n continue\n with open(imagePath, 'rb') as f:\n imageBin = f.read()\n if checkValid:\n if not checkImageIsValid(imageBin):\n print('%s is not a valid image' % imagePath)\n continue\n\n imageKey = 'image-%09d' % cnt\n labelKey = 'label-%09d' % cnt\n cache[imageKey] = imageBin\n cache[labelKey] = label.encode('utf-8')\n if lexiconList:\n lexiconKey = 'lexicon-%09d' % cnt\n cache[lexiconKey] = ' '.join(lexiconList[i])\n if cnt % 1000 == 0:\n writeCache(env, cache)\n cache = {}\n print('Written %d / %d' % (cnt, nSamples))\n cnt += 1\n nSamples = cnt-1\n cache['num-samples'] = str(nSamples).encode('utf-8')\n writeCache(env, cache)\n print('Created dataset with %d samples' % nSamples)\n\n\nif __name__ == '__main__':\n imagePathList = []\n labelList = []\n\n # Create cinnamon data\n # with open(label_file, 'r') as file:\n # json_data = json.load(file)\n #\n # for key, value in json_data.items():\n # img_name = key.split('.')[0]\n # image_path = os.path.join(image_dir, key)\n # label = value\n #\n # # imagePathList.append(os.path.join(image_dir, img_name + '_gray.png'))\n # # imagePathList.append(os.path.join(image_dir, img_name + '_crop_thread_0.png'))\n # # imagePathList.append(os.path.join(image_dir, img_name + '_thread_10.png'))\n # imagePathList.append(os.path.join(image_dir, img_name + '_crop_thread_20.png'))\n # # imagePathList.append(os.path.join(image_dir, img_name + '_crop_thread_otsu.png'))\n # # labelList.append(label)\n # # labelList.append(label)\n # # labelList.append(label)\n # labelList.append(label)\n # # labelList.append(label)\n\n # Create IAM data\n img_dir_2 = '/Users/thinhvu/PycharmProjects/ocr_data/IAM_Handwriting_DB/sentences'\n label_file_2 = '/Users/thinhvu/PycharmProjects/ocr_data/IAM_Handwriting_DB/ascii/sentences.txt'\n with open(label_file_2, 'r') as file:\n for line in file:\n if not line.startswith('#'):\n line_arr = line.strip().split(' ')\n if line_arr[2] == 'ok':\n graylevel = int(line_arr[3])\n\n img_name = line_arr[0]\n img_name_parts = img_name.split('-')\n img_path = os.path.join(img_dir_2, img_name_parts[0],\n img_name_parts[0] + '-' + img_name_parts[1], img_name + '_binary.png')\n\n # save_img_path = os.path.join(img_dir_2, img_name_parts[0],\n # img_name_parts[0] + '-' + img_name_parts[1], img_name + '_binary.png')\n #\n # img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n # img = cv2.threshold(img, graylevel, 255, cv2.THRESH_BINARY)[1]\n # cv2.imwrite(save_img_path, img)\n\n if os.path.isfile(img_path):\n imagePathList.append(img_path)\n text = line_arr[-1].replace('|', ' ')\n labelList.append(text)\n\n # print(imagePathList)\n # print(labelList)\n createDataset(output_dir, imagePathList, labelList)\n","sub_path":"tool/create_dataset.py","file_name":"create_dataset.py","file_ext":"py","file_size_in_byte":5102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"459611889","text":"from os import walk\nfrom lxml import html\nfrom lxml import etree\nfrom common import *\nfrom Entity import *\nimport time\nimport datetime as datetime\nimport random\nimport re\nfrom Logger import Logger\n\n\nlpCorr = []\nchsCorr = []\nmtCorr = []\n\ndef fillLPCorr(ligaProCorr):\n ligaProCorr.append(['Бурдин А', 'Алексей Бурдин'])\n ligaProCorr.append(['Заикин А', 'Алан Заикин'])\n ligaProCorr.append(['Карпенко В', 'Вячеслав Карпенко'])\n ligaProCorr.append(['Попов Д', 'Дмитрий Попов'])\n ligaProCorr.append(['Терехов А', 'Антон Терехов'])\n ligaProCorr.append(['Егоров Н', 'Николай Егоров'])\n ligaProCorr.append(['Анохин И', 'Илья Анохин'])\n ligaProCorr.append(['Королев С', 'Семен Королев'])\n ligaProCorr.append(['Анисимов А', 'Антон Анисимов'])\n ligaProCorr.append(['Семин А', 'Артем Семин'])\n ligaProCorr.append(['Виноградов А', 'Алексей Виноградов'])\n ligaProCorr.append(['Макаров А', 'Александр Макаров'])\n ligaProCorr.append(['Меркушев С', 'Станислав Меркушев'])\n ligaProCorr.append(['Морозов А', 'Александр Морозов'])\n ligaProCorr.append(['Ануфриев В', 'Владимир Ануфриев'])\n ligaProCorr.append(['Маслов Д', 'Даниил Маслов'])\n ligaProCorr.append(['Федоров Д', 'Дмитрий Федоров'])\n ligaProCorr.append(['Голубева А', 'Анастасия Голубева'])\n ligaProCorr.append(['Лебедева В', 'Виктория Лебедева'])\n ligaProCorr.append(['Свиридов А', 'Алексей Свиридов'])\n ligaProCorr.append(['Крылов А', 'Александр Крылов'])\n ligaProCorr.append(['Морозова В', 'Валерия Морозова'])\n ligaProCorr.append(['Резниченко А', 'Александр Резниченко'])\n ligaProCorr.append(['Беспалова Е', 'Екатерина Беспалова'])\n ligaProCorr.append(['Булхак А', 'Антон Булхак'])\n ligaProCorr.append(['Фомина А', 'Анастасия Фомина'])\n ligaProCorr.append(['Кутузова А', 'Алина Кутузова'])\n ligaProCorr.append(['Воронов А', 'Александр Воронов'])\n\ndef fillChSCorr(chsCorr):\n chsCorr.append(['Мего П', 'Павол Мего'])\n\ndef fillMTCorr(mtCorr):\n mtCorr.append(['Млинарж А', 'Алексей Млинарж'])\n mtCorr.append(['Млинарж', 'Алексей Млинарж'])\n mtCorr.append(['Алексей Алексей Млинарж', 'Алексей Млинарж'])\n mtCorr.append(['Иванов Н', 'Никита Иванов'])\n\nfillLPCorr(lpCorr)\nfillChSCorr(chsCorr)\nfillMTCorr(mtCorr)\n\nclass BKFonResultsPreparator:\n\n @staticmethod\n def run(logger):\n print('BKFonResultsPreparator')\n logger.print('BKFonResultsPreparator')\n\n playersDict = GlobalPlayersDict(\"filtered\")\n\n corrections = readCorrectionsList('data/bkfon/corrections.txt')\n wrongLines = []\n matches = BKFonResultsPreparator.getMatches(corrections, wrongLines, logger)\n logger.print(len(matches))\n players = BKFonResultsPreparator.getMatchesPlayers(matches)\n\n m = dict()\n w = dict()\n mw = dict()\n\n multiple = dict()\n unknown = dict()\n\n for player in players:\n\n id = playersDict.getId(player)\n if len(id) == 1:\n if id[0][0] == 'm':\n updateDict(m, player)\n else:\n updateDict(w, player)\n elif len(id) == 0:\n updateDict(unknown, player)\n else:\n fl_mw = ''\n for e in id:\n fl_mw += e[0]\n fl_mw = ''.join(sorted(set(list(fl_mw))))\n if fl_mw == 'm':\n updateDict(m, player)\n elif fl_mw == 'w':\n updateDict(w, player)\n else:\n updateDict(mw, player)\n if not (fl_mw + ' ' + player in multiple):\n multiple[fl_mw + ' ' + player] = 0\n multiple[fl_mw + ' ' + player] += 1\n\n playersMW = dict()\n playersMatches = dict()\n for player in players:\n playersMW[player] = [0, 0, 0]\n for match in matches:\n if len(match.names[0]) == 1:\n pl1 = match.names[0][0]\n pl2 = match.names[1][0]\n if pl1 in m:\n playersMW[pl2][0] += 1\n elif pl1 in w:\n playersMW[pl2][1] += 1\n if pl2 in m:\n playersMW[pl1][0] += 1\n elif pl2 in w:\n playersMW[pl1][1] += 1\n playersMW[pl1][2] += 1\n playersMW[pl2][2] += 1\n if not (pl1 in playersMatches):\n playersMatches[pl1] = []\n playersMatches[pl1].append(match.toStr())\n if not (pl2 in playersMatches):\n playersMatches[pl2] = []\n playersMatches[pl2].append(match.toStr())\n\n prefix = 'prepared_data/bkfon/'\n\n with open(prefix + 'bkfon_players_x_mw.txt', 'w', encoding='utf-8') as fout:\n for e in sorted(playersMW.items(), key=lambda x: -x[1][2]):\n # for e in sorted(playersMW.items(), key = lambda x: (x[1][0] + 1) / (x[1][2] + 2)):\n if (e[0] in unknown) and e[1][2] > 0:\n logger.print(e)\n fout.write(e[0] + '\\t' + str(e[1]) + '\\t' + '\\t'.join(playersMatches[e[0]]) + '\\n')\n\n with open(prefix + 'bkfon_players_men.txt', 'w', encoding='utf-8') as fout:\n for e in sorted(m.keys()):\n fout.write(e + '\\t' + ';'.join(playersDict.getId(e)) + '\\n')\n with open(prefix + 'bkfon_players_women.txt', 'w', encoding='utf-8') as fout:\n for e in sorted(w.keys()):\n fout.write(e + '\\t' + ';'.join(playersDict.getId(e)) + '\\n')\n with open(prefix + 'bkfon_players_mw.txt', 'w', encoding='utf-8') as fout:\n for e in sorted(mw.keys()):\n fout.write(e + '\\t' + ';'.join(playersDict.getId(e)) + '\\n')\n\n multiple = dict()\n unknown = dict()\n\n with open(prefix + 'all_results.txt', 'w', encoding='utf-8') as fout, open(prefix + 'players_collisions.txt',\n 'w', encoding='utf-8') as fout1:\n fout.write('\\t'.join(\n ['date', 'time', 'compName', 'id1', 'id2', 'setsScore', 'pointsScore', 'name1', 'name2',\n 'matchId']) + '\\n')\n for match in matches:\n flError = 0\n if match.flError == 0:\n #print(match.toStr())\n ids = [[], []]\n for i in range(2):\n for player in match.names[i]:\n\n id = playersDict.getId(player)\n\n if len(id) == 1:\n ids[i].append(id[0])\n elif len(id) == 0:\n flError = 'unknown ' + player\n if not (player in unknown):\n unknown[player] = 0\n unknown[player] += 1\n else:\n flError = 'multiple ' + player\n fl_mw = ''\n for e in id:\n fl_mw += e[0]\n fl_mw = ''.join(sorted(set(list(fl_mw))))\n if not (fl_mw + ' ' + player in multiple):\n multiple[fl_mw + ' ' + player] = 0\n multiple[fl_mw + ' ' + player] += 1\n fout1.write('MANY ' + player + ' ' + str(id) + ' ' + match.toStr() + '\\n')\n #print('MANY ' + player + ' ' + str(id) + ' ' + match.toStr())\n\n if flError == 0:\n resTokens = match.toArr()\n resTokens.append(resTokens[3])\n resTokens.append(resTokens[4])\n resTokens[3] = ';'.join(ids[0])\n resTokens[4] = ';'.join(ids[1])\n resTokens.append(match.matchId)\n fout.write('\\t'.join(resTokens) + '\\n')\n if (match.flError != 0 or flError != 0) and match.compName.lower().replace('-', '').find('лига про') != -1:\n logger.print('LIGA PRO error ' + str(match.flError) + ' ' + str(flError) + ' ' + match.toStr())\n\n with open(prefix + 'bkfon_players_multiple.txt', 'w', encoding='utf-8') as fout:\n for e in sorted(multiple.items(), key=lambda x: -x[1]):\n fout.write(e[0] + '\\t' + str(e[1]) + '\\n')\n with open(prefix + 'bkfon_players_unknown.txt', 'w', encoding='utf-8') as fout:\n for e in sorted(unknown.items(), key=lambda x: -x[1]):\n fout.write(e[0] + '\\t' + str(e[1]) + '\\n')\n\n @staticmethod\n def process(filename, matches, matchesHashes, corrections, logger):\n with open(filename, 'r', encoding='utf-8') as fin:\n for line in fin:\n dt, matchTime, compName, matchId, names1, names2, setsScore, pointsScore = line.rstrip('\\n').split('\\t')\n names = [names1, names2]\n\n tcorr = corrections.copy()\n if compName.replace('Жен. ', '').find('Лига Про. Москва') != -1:\n tcorr += lpCorr\n if compName.replace('Жен. ', '').find('Челленджер серия') != -1:\n tcorr += chsCorr\n if compName.replace('Жен. ', '').find('Мастер-Тур') != -1:\n tcorr += mtCorr\n\n #names = re.sub(' +', ' ', names.replace(u'\\xa0', ' '))\n #names = names.strip().split(' - ')\n\n for k, v in tcorr:\n if k.find(';') != -1:\n if k.split(';')[0] == dt:\n names = [e.replace(k.split(';')[1], v) for e in names]\n else:\n names = [e.replace(k, v) for e in names]\n #print(names)\n\n #if (names[0] + names[1]).find('Харимото') != -1:\n # print(filename, line)\n names = [names[0].split(';'), names[1].split(';')]\n if pointsScore != 'отмена' and pointsScore != 'прерван' and len(setsScore) > 0:\n match = Match(dt,\n names,\n names=names,\n setsScore=setsScore,\n pointsScore=pointsScore,\n time=matchTime,\n compName=compName,\n matchId=matchId)\n try:\n mHash = calcHash([match.date, match.time] + match.names[0] + match.names[1] + match.sets)\n except:\n logger.print(line)\n raise\n if mHash not in matchesHashes:\n matches.append(match)\n matchesHashes[mHash] = [len(matches) - 1, filename + '\\t' + line.rstrip()]\n else:\n matches[matchesHashes[mHash][0]] = match\n logger.print(matchesHashes[mHash])\n logger.print(filename + '\\t' + line.rstrip())\n logger.print()\n\n\n @staticmethod\n def getMatches(corrections, wrongLines, logger):\n matches = list()\n matchesHashes = dict()\n for f in walk('data/bkfon/results_parsed'):\n for ff in f[2]:\n BKFonResultsPreparator.process('data/bkfon/results_parsed' + '/' + ff,\n matches, matchesHashes, corrections, logger)\n # if ff.find('new') != -1:\n # processNew(ff, matches, corrections)\n # else:\n # processOld(ff, matches, corrections)\n return matches\n\n @staticmethod\n def getMatchesPlayers(matches):\n res = dict()\n for match in matches:\n for i in range(2):\n for player in match.names[i]:\n updateDict(res, player)\n return res\n\n\ndef main():\n BKFonResultsPreparator.run(logger=Logger('BKFonResultsPreparator.txt'))\n\nif __name__ == \"__main__\":\n main()","sub_path":"BKFonResultsPreparator.py","file_name":"BKFonResultsPreparator.py","file_ext":"py","file_size_in_byte":13120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"581725152","text":"n, W = map(lambda x: int(x), input().split())\r\nresult_price = 0\r\nsubjects = []\r\n\r\nfor i in range(n):\r\n c, w = map(lambda x: int(x), input().split())\r\n subjects.append([c, w])\r\n\r\nsubjects.sort(key=lambda x: -(x[0] / x[1]))\r\n\r\n\r\nfor subject in subjects:\r\n if subject[1] <= W:\r\n result_price += subject[0]\r\n W -= subject[1]\r\n\r\n elif 0 < W < subject[1]:\r\n result_price += subject[0] * (W / subject[1])\r\n W = 0\r\n\r\n if W == 0:\r\n break\r\n\r\nprint(result_price)\r\n","sub_path":"MyBag.py","file_name":"MyBag.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"76754024","text":"class ActionRemapper:\n def __init__(self, rom, ale):\n # Save the environment name\n self.rom = rom\n\n # Save the ALE environment\n self.ale = ale\n\n # Initialize the action remap\n self.action_remap = self._get_action_remap()\n\n # Print the action remap\n print('Action remapping is used: {}'.format(self.action_remap))\n\n def act(self, action):\n return self.ale.act(action)\n\n def game_over(self):\n return self.ale.game_over()\n\n def getEpisodeFrameNumber(self):\n return self.ale.getEpisodeFrameNumber()\n\n def getMinimalActionSet(self):\n # Get action set (e.g., [0, 1, 2, 3, 4, 6, 7, 11, 12] in beam_rider)\n legal_actions = self.ale.getMinimalActionSet()\n\n # Get original action targets (e.g., {0:0, 1:1, 2:7, 3:8})\n original_actions = [original for _, original\n in self.action_remap.items()]\n\n # Map the index to the legal actions and return (e.g., [0, 1, 11, 12])\n return [legal_actions[idx] for idx in original_actions]\n\n def getScreenDims(self):\n return self.ale.getScreenDims()\n\n def getScreenGrayscale(self):\n return self.ale.getScreenGrayscale()\n\n def lives(self):\n return self.ale.lives()\n\n def reset_game(self):\n return self.ale.reset_game()\n\n def _get_action_remap(self):\n # Determine environment ID\n if self.rom == 'asteroids':\n return {\n 0: 0, # NOOP\n 1: 1, # FIRE\n 2: 2, # UP\n 3: 3, # RIGHT\n 4: 4, # LEFT\n 5: 5, # DOWN\n 6: 6, # UPRIGHT\n 7: 7, # UPLEFT\n }\n elif self.rom == 'beam_rider':\n return {\n 0: 0, # NOOP\n 1: 1, # FIRE\n 2: 3, # RIGHT\n 3: 4, # LEFT\n }\n elif self.rom == 'bowling':\n return {\n 0: 0, # NOOP\n 1: 1, # FIRE\n 2: 2, # UP\n 3: 3, # DOWN\n }\n elif self.rom == 'breakout':\n return self._identity_action_remap()\n elif self.rom == 'enduro':\n return {\n 0: 0, # NOOP\n 1: 1, # FIRE (Accelerate)\n 2: 2, # RIGHT\n 3: 3, # LEFT\n 4: 4, # DOWN (Decelerate)\n }\n elif self.rom == 'freeway':\n return self._identity_action_remap()\n elif self.rom == 'kung_fu_master':\n return self._identity_action_remap()\n elif self.rom == 'ms_pacman':\n return {\n 0: 0, # NOOP\n 1: 1, # UP\n 2: 2, # RIGHT\n 3: 3, # LEFT\n 4: 4, # DOWN\n }\n elif self.rom == 'pong':\n return {\n 0: 0, # NOOP\n 1: 2, # RIGHT\n 2: 3, # LEFT\n }\n elif self.rom == 'qbert':\n return {\n 0: 0, # NOOP\n 1: 2, # UP\n 2: 3, # RIGHT\n 3: 4, # LEFT\n 4: 5, # DOWN\n }\n elif self.rom == 'seaquest':\n return {\n 0: 0, # NOOP\n 1: 1, # FIRE\n 2: 2, # UP\n 3: 3, # RIGHT\n 4: 4, # LEFT\n 5: 5, # DOWN\n }\n elif self.rom == 'skiing':\n return self._identity_action_remap()\n elif self.rom == 'space_invaders':\n return {\n 0: 0, # NOOP\n 1: 1, # FIRE\n 2: 2, # RIGHT\n 3: 3, # LEFT\n }\n else:\n raise ValueError('Unknown environment ID: {}'.format(self.rom))\n\n def _identity_action_remap(self):\n # Get action set\n legal_actions = self.ale.getMinimalActionSet()\n\n # Get the number of actions\n num_actions = len(legal_actions)\n\n action_remap = {}\n\n for i in range(num_actions):\n action_remap[i] = i\n\n return action_remap\n","sub_path":"option_critic/action_remapper.py","file_name":"action_remapper.py","file_ext":"py","file_size_in_byte":4167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"408203392","text":"#! python3\r\n# -*- coding:utf-8 -*-\r\n'''\r\nTitle: 功能操作类\r\nDescription: 计日工签证中中,发起签证,启用签证以及签发审批单基本操作\r\n@author: Xushenwei\r\n@update: 2017年12月13日\r\n'''\r\nimport sys, configparser, autoit, pymysql, operator\r\nfrom time import sleep\r\nsys.path.insert(0,r'D:\\AutomatedTestScripts\\IM\\Functions\\basic_functions')\r\nfrom autoit_function import MouseControl, WinControl, ProcessControl\r\nimport screen_point\r\n\r\n\r\ndef add_clicks(self, x, y, z, step):\r\n\tm1 = MouseControl()\r\n\tfor i in range(step):\r\n\t\tm1.click(self.IMWin, '', x, y)\r\n\t\tsleep(0.5)\r\n\t\ty += z \r\n\tm1.click(self.IMWin, '', 890, 770)\r\n\tsleep(1)\r\n\t\t\r\ndef sigh_forms(self, x, y, z, step):\r\n\tm1 = MouseControl()\r\n\tm1.click(self.IMWin, '', x, y)\r\n\tsleep(0.5)\r\n\tfor i in range(step):\r\n\t\tm1.click(self.IMWin, '', x, y)\r\n\t\tsleep(1.5)\r\n\t\tm1.click(self.IMWin, '', 890, 610)# 确定\r\n\t\tsleep(1.5)\r\n\t\tm1.click(self.IMWin, '', x, y)\r\n\t\tsleep(1.5)\r\n\t\tm1.click(self.IMWin, '', 890, 660)# 确定\r\n\t\ty += z\r\n\t\tsleep(1.5)\r\n\r\n\r\nclass DayworkVisaFunctions():\r\n\r\n\tdef __init__(self):\r\n\t\t\"\"\"读取配置文件\"\"\"\r\n\t\tconf = configparser.ConfigParser()\r\n\t\tconf.read(r\"D:\\AutomatedTestScripts\\IM\\ConfigurationFiles\\MainConfig.ini\")\r\n\t\tself.IMWin = conf.get('parameters', 'win_IM')\r\n\t\tconf.read(r\"D:\\AutomatedTestScripts\\IM\\ConfigurationFiles\\MessagesConfig.ini\")\r\n\t\tself.message1 = conf.get('计日工签证Messages', '计日工签证名称')\r\n\t\tself.message2 = conf.get('计日工签证Messages', '桩号')\r\n\t\tself.message3 = conf.get('计日工签证Messages', '签证说明')\r\n\t\tself.message4 = conf.get('计日工签证Messages', '签证申请编号')\r\n\t\tself.message5 = conf.get('计日工签证Messages', '变更工程量')\r\n\t\tself.message11 = conf.get('合同管理Messages', '合同编号')\r\n\t\tconf.read(r\"D:\\AutomatedTestScripts\\IM\\ConfigurationFiles\\DatabaseConfig.ini\")\r\n\t\tself.host = conf.get('logindatabase', 'host')\r\n\t\tself.user = conf.get('logindatabase', 'user')\r\n\t\tself.password = conf.get('logindatabase', 'password')\r\n\t\tself.db = conf.get('logindatabase', 'db')\r\n\t\tself.port = 3306\r\n\r\n\tdef day_count_visa(self):\r\n\t\t\"\"\"计日工签证\"\"\"\r\n\t\tif WinControl(self.IMWin).exists():\r\n\t\t\tm1 = MouseControl()\r\n\t\t\tsleep(2)\r\n\t\t\t# 计日工签证\r\n\t\t\tm1.click(self.IMWin, '', screen_point.计日工签证[0], screen_point.计日工签证[1])\r\n\t\t\tsleep(0.5)\r\n\t\t\t# 选择项目工程\r\n\t\t\tm1.click(self.IMWin, '', 42, 183)\r\n\t\t\tsleep(0.5)\r\n\t\t\t# 发起签证\r\n\t\t\tm1.click(self.IMWin, '', 460, 180)\r\n\t\t\tsleep(0.5)\r\n\t\t\t# 计日工签证名称\r\n\t\t\tm1.click(self.IMWin, '', 810, 435)\r\n\t\t\tautoit.send(self.message1)\r\n\t\t\tsleep(0.5)\r\n\t\t\t# 桩号\r\n\t\t\tm1.click(self.IMWin, '', 810, 485)\r\n\t\t\tautoit.send(self.message2)\r\n\t\t\tsleep(0.5)\r\n\t\t\t# 签证说明\r\n\t\t\tm1.click(self.IMWin, '', 810, 550)\r\n\t\t\tautoit.send(self.message3)\r\n\t\t\tsleep(0.5)\r\n\t\t\t# 签证申请编号\r\n\t\t\tm1.click(self.IMWin, '', 1240, 435)\r\n\t\t\tautoit.send(self.message4)\r\n\t\t\tsleep(2)\r\n\t\t\t# 发起\r\n\t\t\tm1.click(self.IMWin, '', 890, 660, clicks=2)\r\n\t\t\tsleep(2)\r\n\t\t\t# 启用签证\r\n\t\t\tm1.click(self.IMWin, '', 460, 180)\r\n\t\t\tsleep(2)\r\n\t\t\t# 计日工签证名称\r\n\t\t\tm1.click(self.IMWin, '', 820, 245)\r\n\t\t\tautoit.send(self.message1)\r\n\t\t\tsleep(0.5)\r\n\t\t\t# 桩号\r\n\t\t\tm1.click(self.IMWin, '', 820, 295)\r\n\t\t\tautoit.send(self.message2)\r\n\t\t\tsleep(0.5)\r\n\t\t\t# 签证单编号\r\n\t\t\tm1.click(self.IMWin, '', 1220, 245)\r\n\t\t\tautoit.send(self.message4)\r\n\t\t\tsleep(0.5)\r\n\t\t\t# 选择清单\r\n\t\t\tm1.click(self.IMWin, '', 675, 350)\r\n\t\t\tsleep(0.5)\r\n\t\t\t# 选择104\r\n\t\t\tm1.click(self.IMWin, '', 567, 453)\r\n\t\t\tsleep(0.5)# 确定\r\n\t\t\tm1.click(self.IMWin, '', 890, 770)\r\n\t\t\tsleep(2)\r\n\t\t\t# 签证工作量\r\n\t\t\tm1.click(self.IMWin, '', 1280, 470)\r\n\t\t\tautoit.send(self.message5)\r\n\t\t\tsleep(0.5)\r\n\t\t\t# 确定\r\n\t\t\tm1.click(self.IMWin, '', 890, 820)\r\n\t\t\tsleep(2)\r\n\t\t\t# 审批计日工启用签证单\r\n\t\t\tsigh_forms(self, 1695, 315, 40, 1)\r\n\t\t\tsleep(0.5)\r\n\t\t\t# 签发变更令\r\n\t\t\tm1.click(self.IMWin, '', 560, 180)\r\n\t\t\tsleep(1.5)\r\n\t\t\t# 审批计日工审批单\r\n\t\t\tsigh_forms(self, 1695, 355, 40, 1)\r\n\t\t\tsleep(2)\r\n\r\n\tdef check_begin_daywork_message(self):\r\n\t\t\"\"\"检验发起签证信息正确\"\"\"\r\n\t\t# 登录数据库\r\n\t\tself.connect = pymysql.connect(self.host, self.user, self.password, self.db, self.port, charset = 'utf8'\\\r\n\t\t\t, cursorclass = pymysql.cursors.DictCursor)\r\n\t\ttry:\r\n\t\t\t# 使用cursor()方法获取操作游标\r\n\t\t\twith self.connect.cursor() as cursor:\r\n\t\t\t\t# 通过合同编号来查询contractId\r\n\t\t\t\tcursor.execute(\"SELECT * FROM t_contract WHERE contractNum = %s\", self.message11)\r\n\t\t\t\tresults0 = cursor.fetchone()\r\n\t\t\t\tcontractId1 = results0['id']\r\n\t\t\t\t# 通过contractId来查询发起签证信息\r\n\t\t\t\tcursor.execute(\"SELECT * FROM t_daywork_request WHERE contractId = %s\", contractId1)\r\n\t\t\t\t# 获取查询结果\r\n\t\t\t\tresults = cursor.fetchone()\r\n\t\texcept:\r\n\t\t\tprint(\"Error: unable to fetch data!\")\r\n\r\n\t\tfinally:\r\n\t\t\t# 查询完毕后必须关闭连接\r\n\t\t\tself.connect.close()\r\n\t\t#print(results)\r\n\t\t# 删除对比字典中不需要的元素\r\n\t\tdel results['id'], results['contractId'], results['createDate'], results['visaAmount'], results['templateDbId']\\\r\n\t\t, results['status'], results['launchedBy'], results['epid']\r\n\t\t#print(results)\r\n\t\t\r\n\t\t# 新建一个传参字典,以便于和从服务器获取的数据做比对\r\n\t\tdicta = {\r\n\t\t\t'requestNum' : self.message4,\r\n\t\t\t'requestName' : self.message1,\r\n\t\t\t'stakeMark' : self.message2,\r\n\t\t\t'remark' : self.message3,\r\n\t\t\t}\r\n\t\t# 用自己创建的字典和从数据库获得的字典相比较\r\n\t\ta = operator.eq(results, dicta)\r\n\t\tif a == True:\r\n\t\t\t#print(\"OK\")\r\n\t\t\tpass\r\n\t\telse:\r\n\t\t\traise Exception('数据传输不正确!')\r\n\r\n\tdef check_daywork_visa_message(self):\r\n\t\t\"\"\"检验签证工程量信息正确\"\"\"\r\n\t\t# 登录数据库\r\n\t\tself.connect = pymysql.connect(self.host, self.user, self.password, self.db, self.port, charset = 'utf8'\\\r\n\t\t\t, cursorclass = pymysql.cursors.DictCursor)\r\n\t\ttry:\r\n\t\t\t# 使用cursor()方法获取操作游标\r\n\t\t\twith self.connect.cursor() as cursor:\r\n\t\t\t\t# 通过合同编号来查询contractId\r\n\t\t\t\tcursor.execute(\"SELECT * FROM t_contract WHERE contractNum = %s\", self.message11)\r\n\t\t\t\tresults0 = cursor.fetchone()\r\n\t\t\t\tcontractId1 = results0['id']\r\n\t\t\t\t#print(contractId1)\r\n\t\t\t\t# 通过contractId来查询resultId\t\t\t\t\r\n\t\t\t\tcursor.execute(\"SELECT * FROM t_daywork_request WHERE contractId = %s\", contractId1)\r\n\t\t\t\tresults1 = cursor.fetchone()\r\n\t\t\t\trequestId1 = results1['id']\r\n\t\t\t\t#print(requestId1)\r\n\t\t\t\t# 通过contractId来查询resultId\t\t\t\t\r\n\t\t\t\tcursor.execute(\"SELECT * FROM t_daywork_visa_list WHERE requestId = %s\", requestId1)\r\n\t\t\t\tresults2 = cursor.fetchone()\r\n\t\t\t\t#print(results2)\r\n\t\t\t\t# 验证变更工程量是否正确\r\n\t\t\t\tif results2['visaQuantity'] == float(self.message5):\r\n\t\t\t\t\t#print(\"OK\")\r\n\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\traise Exception(\"信息不匹配!\")\r\n\t\tfinally:\r\n\t\t\t# 查询完毕后必须关闭连接\r\n\t\t\tself.connect.close()\r\n\r\n\r\n\tdef check_change_list_message(self):\r\n\t\t\"\"\"检验计日工签证表单是否全部已通过\"\"\"\r\n\t\t# 登录数据库\r\n\t\tself.connect = pymysql.connect(self.host, self.user, self.password, self.db, self.port, charset = 'utf8'\\\r\n\t\t\t, cursorclass = pymysql.cursors.DictCursor)\r\n\t\ttry:\r\n\t\t\t# 使用cursor()方法获取操作游标\r\n\t\t\twith self.connect.cursor() as cursor:\t\r\n\t\t\t\t# 通过合同编号来查询contractId\r\n\t\t\t\tcursor.execute(\"SELECT * FROM t_contract WHERE contractNum = %s\", self.message11)\r\n\t\t\t\tresults0 = cursor.fetchone()\r\n\t\t\t\tcontractId1 = results0['id']\r\n\t\t\t\t# 通过contractId来查询resultId\t\t\t\t\r\n\t\t\t\tcursor.execute(\"SELECT * FROM t_daywork_request WHERE contractId = %s\", contractId1)\r\n\t\t\t\tresults1 = cursor.fetchone()\r\n\t\t\t\trequestId1 = results1['id']\r\n\t\t\t\t# 通过resultId来查找instanceId\t\t\t\t\r\n\t\t\t\tcursor.execute(\"SELECT * FROM t_daywork_request_form WHERE requestId = %s\", requestId1)\r\n\t\t\t\tresults2 = cursor.fetchall()\r\n\t\t\t\t# 创建一个空列表,用以存放instanceId\r\n\t\t\t\tinstanceIdList = []\r\n\t\t\t\tfor dicta in results2:\r\n\t\t\t\t\tinstanceId = dicta['instanceId']\r\n\t\t\t\t\tinstanceIdList.append(instanceId)\r\n\t\t\t\tfor id1 in instanceIdList:\r\n\t\t\t\t\t# 通过instanceId来查找该ID下的status的值,如果等于2,则代表通过\t\t\t\t\t\r\n\t\t\t\t\tcursor.execute(\"SELECT * FROM t_process_instance WHERE id = %s\", id1)\r\n\t\t\t\t\tresults3 = cursor.fetchone()\r\n\t\t\t\t\tif results3['status'] == 2:\r\n\t\t\t\t\t\t#print(\"OK\")\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\traise Exception('工程变更表单中此表单没有通过!')\r\n\t\tfinally:\r\n\t\t\t# 查询完毕后必须关闭连接\r\n\t\t\tself.connect.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n\tDVF = DayworkVisaFunctions()\r\n\tDVF.day_count_visa()\r\n\tDVF.check_begin_daywork_message()\r\n\tDVF.check_daywork_visa_message()\r\n\tDVF.check_change_list_message()","sub_path":"IM/Functions/3/DayworkVisaFunctions.py","file_name":"DayworkVisaFunctions.py","file_ext":"py","file_size_in_byte":8658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"447770865","text":"from django.conf.urls import patterns, url\nfrom register import views\n\nurlpatterns = patterns('',\n url(r'^$', views.add_user, name='add_user'),\n url(r'^thanks', views.index, name='index'),\n url(r'^add_user', views.add_user, name='add_user'),\n url(r'^faq', views.faq, name='faq'),\n url(r'^contact', views.contact, name='contact'),\n url(r'^privacy', views.privacy, name='privacy'),\n\n )\n","sub_path":"register/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"466624561","text":"#! /usr/bin/env python3\n\nfrom urllib.request import urlopen\nfrom html.parser import HTMLParser\nimport re\n\n# Extract data from Time Table\nhtml = urlopen('http://www.artsandscience.utoronto.ca/ofr/timetable/winter/csc.html')\nhtmlData = html.read()\n\n# The regular expressions for all 6 fields\ncourse = re.compile('^CSC\\d{3}(Y|H)1$')\nsection = re.compile('^[FSY]{1}$')\nlecture = re.compile('^([TL]){1}[0123456789]{2}0{1}\\d{1}$')\ntime = re.compile('^[MWTRF]{1,5}\\d{0,2}[-]?\\d{0,2}$|Cancel|TBA')\ninstructor = re.compile('^\\w\\.|Tba|Staff')\nlocation = re.compile('^\\w{2} \\d{1,4}|SS')\n\n# Different cases for extracted data\nclass MyHTMLParser(HTMLParser):\n\n\t# Variables to remember data fields and later print them at appropriate times.\n courseMemory = ''\n sectionMemory = ''\n typeMemory = 'stringsAgainstIndexErrors!'\n timeMemory = ''\n locationMemory = ''\n instructorMemory = ''\n\n # Switches that are turned on and off, according to specific data that is reead.\n roomChangeMemory = False\n roomSwitch = False\n\n # This switch is for \"Y\" section courses, and is turned on after a course is read.\n # This way, we know that the \"Y\" that is being read is not a waitlist \"Y\".\n switch = False\n fieldMemory = 0\n\n # Takes in data from HTML parser and sorts it accordingly.\n def handle_data(self, data):\n\n \t# Checks if the data is a course.\n \tif course.match(data):\n\t if self.fieldMemory == 4 and self.typeMemory[1] != '2':\n\t \tprint(''\n\t \t\t+''\n\t \t\t+ self.typeMemory \n\t \t\t+'' \n\t \t\t+ self.timeMemory\n\t \t\t+ ''\n\t \t\t+self.locationMemory\n\t \t\t+''\n\t \t\t+ self.instructorMemory\n\t \t\t+'')\n\t self.switch = True\n\t self.fieldMemory = 0\n\t self.courseMemory = data\n \t\n \t# Checks if the data is a section, either \"F\", \"S\" or \"Y\". \n \t# The switch indicates that this field comes directly after the course field, \n \t# to prevent waitlist fields from being entered.\n \telif section.match(data) and self.switch:\n\t if data == \"F\":\n\t\t print(''+ ' \\n')\n\t elif data == \"S\":\n\t\t print('
' \n\t\t \t+ self.courseMemory \n\t\t \t+'' \n\t\t \t+ \"Fall\" \n\t\t \t+'
'+ ' \\n')\n\t elif data == \"Y\":\n\t\t print('
' \n\t\t \t+ self.courseMemory \n\t\t \t+'' \n\t\t \t+ \"Spring\"\n\t\t \t+'
'+ ' \\n')\n\t self.sectionMemory = data\n\t self.fieldMemory = 1\n\t self.switch = False\n \t\n \t# Checks the data that has\" note: room change\".\n \telif \"change\" in data:\n \t\tself.roomChangeMemory = True\n \t\n \t# Checks if this is a lecture/tutorial, and remembers it.\n \telif lecture.match(data):\n\t self.typeMemory = data\n\t self.fieldMemory = 2\n\t self.roomChangeMemory = False\n\t self.roomSwitch = False\n \t\n \t# Checks if data is a time.\n \telif time.match(data):\n\t self.timeMemory = data\n\t self.fieldMemory = 3\n\t # For Tutorials. (Either 'L'ecture or 'T'utorial) \n\t # Time is the last field for a tutorial section.\n\t if self.typeMemory[0] == 'T': #and data!= 'Cancel':\n\t print('')\n\n\t # For cancelled courses. Currently enabled.\n\t elif data == 'Cancel' and self.typeMemory[1] != '2':\n\t \tprint(''\n\t \t\t+'')\n\t \n\t if self.roomChangeMemory == True:\n\t \tself.roomSwitch = True\n \t# Checks if data is a location.\n \telif location.match(data):\n\t self.locationMemory = data\n\t if self.roomSwitch == True and self.typeMemory[1] != '2':\n\t \tprint(''\n\t \t\t+'')\n\t self.roomChangeMemory = False\n\t self.roomSwitch = False\n\t self.fieldMemory = 4\n \t\n \t# Checks if data is an instructor.\n \telif instructor.match(data):\n\t if self.typeMemory[1] != '2':\n\t \tprint('' \n\t \t\t+'')\n\t self.fieldMemory = 5\n\t self.instructorMemory = data\n\t \n\n# The html parser that parses the data.\nparser = MyHTMLParser(strict=False)\n\n# Everything is printed, and called in this block.\nprint('
' \n\t\t \t+ self.courseMemory \n\t\t \t+'' \n\t\t \t+ \"Full Year\" \n\t\t \t+'
'\n\t \t+ self.typeMemory \n\t \t+'' \n\t \t+ data\n\t \t+'
' \n\t \t\t+ self.typeMemory +'' \n\t \t\t+ data\n\t \t\t+'
'\n\t \t\t+ self.typeMemory \n\t \t\t+'' \n\t \t\t+ self.timeMemory\n\t \t\t+ ''\n\t \t\t+self.locationMemory\n\t \t\t+''\n\t \t\t+ self.instructorMemory\n\t \t\t+'
'\n\t \t\t+ self.typeMemory \n\t \t\t+'' \n\t \t\t+ self.timeMemory\n\t \t\t+''\n\t \t\t+self.locationMemory\n\t \t\t+''\n\t \t\t+data\n\t \t\t+'
')\nprint(''\n\t+ ''\n\t+ ''\n\t+ ''\n\t+ ''\n\t+ ''\n\t+ ''\n\t+ '
CourseSectionTypeTimeLocationInstructor
')\nparser.feed(str(htmlData))\nprint('
')","sub_path":"timeTableRetriever.py","file_name":"timeTableRetriever.py","file_ext":"py","file_size_in_byte":6905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"578288850","text":"# The numpy package is essential to this task, as it can create distributions\nimport numpy as np\n#This package is used for the plotting of the graph that we are to do.\nimport matplotlib.pyplot as plt\n\n#A function for finding the mean easier is defined.\ndef hatlambda(abba):\n return np.mean(abba)\n#Values mu and sigma are stored.\nmu = 0\nsigma = 1\n#A random seed for the distribution is chosen.\nnp.random.seed(4444)\n#The amount of repetitions that are to be done.\nrep = 1000\n#Two vectors are saved, one for sample mean and other for variance.\nvecEST1 = np.empty(rep)\nvecVAR = np.empty(rep)\n\n#The outer loop creates the normal distribution 1000 times and saves the\n# sample mean in the before created vector.\nfor i in range(rep):\n X = np.random.normal(mu,sigma,5)\n vecEST1[i] = hatlambda(X)\n #This loop determines the variance by using the built-in sum function\n #to sum over all realizations. Note it goes up to 4, because python is zero\n # indexed.\n for j in range(4):\n vecVAR[i] = sum((X - hatlambda(X)) ** 2) / 5\n# The horizontal lines are created in the next two lines, using plt.plot.\nplt.plot(range(rep), np.ones(rep) - 1.0, color = 'r')\nplt.plot(range(rep), np.ones(rep), color = 'b')\n\n# The data points are drawn, and given the form of x and o, to make it easier to spot.\nplt.plot(range(rep), vecEST1, 'x', alpha=0.3)\nplt.plot(range(rep), vecVAR, 'o', alpha=0.3)\n#The graph is shown at the end.\nplt.show()\n","sub_path":"MASD/Assignment 5/1cde.py","file_name":"1cde.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"263808727","text":"\"\"\"\nFile name :\nAuthor :\nEmail : aarc.88@gmail.com\nDate :\nLast edit :\nLanguage : Python 3.8 or >\nAeronautical Institute of Technology - Airbus Brazil\n\nDescription:\n - This function calculates the cabine dimensions\n - Reference PreSTO-Cabin - https://www.fzt.haw-hamburg.de/pers/Scholz/PreSTo/PreSTo-Cabin_Documentation_10-11-15.pdf\nInputs:\n -\nOutputs:\n -\nTODO's:\n -\n\n\"\"\"\n# =============================================================================\n# IMPORTS\n# =============================================================================\nimport math\nimport numpy as np\n# =============================================================================\n# CLASSES\n# =============================================================================\n\n# =============================================================================\n# FUNCTIONS\n# =============================================================================\n\n\ndef fuselage_cross_section(fuselage):\n\n in_to_m = 0.0254\n\n # Seat dimensions economy class\n armrest_top = 22 # [inch]\n armrest_bottom = 7 # [inch]\n armrest_width = 2*in_to_m # armrest width\n armrest_top_height = armrest_top*in_to_m\n armrest_bottom_height = armrest_bottom*in_to_m\n\n seat_cushion_thickness_YC = 0.14 # [m] YC - economy class\n seat_cushion_width_YC = fuselage['seat_width']\n double_container = 'no'\n backrest_height = 0.59 # [m]\n floor_thickness = 0.117 # [m]\n aux = ((armrest_top_height-armrest_bottom_height) -\n seat_cushion_thickness_YC)/2\n seat_cushion_height = aux + armrest_bottom_height\n\n # Default values (95% american male)\n pax_distance_head_wall = 0.06 # [m]\n pax_distance_shoulder_wall = 0.04 # [m]\n pax_shoulder_breadth = 0.53 # [m]\n pax_eye_height = 0.87 # [m]\n pax_midshoulder_height = 0.70 # [m]\n\n delta_z_symmetry_inferior = -1\n delta_z_symmetry_superior = 2\n points_number = 20\n seat_delta_width_floor = 0.025\n\n iterations = 12\n if double_container == 'no':\n if fuselage['container_type'] == 'none':\n lowerdeck_width_top = 0\n lowerdeck_width_bottom = 0\n lowerdeck_height = 0\n elif fuselage['container_type'] == 'LD1':\n lowerdeck_width_bottom = 1.56\n lowerdeck_width_top = 2.44\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD11':\n lowerdeck_width_bottom = 3.18\n lowerdeck_width_top = 3.28\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD2':\n lowerdeck_width_bottom = 1.19\n lowerdeck_width_top = 1.66\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD26':\n lowerdeck_width_bottom = 3.18\n lowerdeck_width_top = 4.16\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD29':\n lowerdeck_width_bottom = 3.18\n lowerdeck_width_top = 4.82\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD3':\n lowerdeck_width_bottom = 1.56\n lowerdeck_width_top = 2.11\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD3-45':\n lowerdeck_width_bottom = 1.56\n lowerdeck_width_top = 2.11\n lowerdeck_height = 1.19\n elif fuselage['container_type'] == 'LD3-45R':\n lowerdeck_width_bottom = 1.56\n lowerdeck_width_top = 1.66\n lowerdeck_height = 1.19\n elif fuselage['container_type'] == 'LD3-45W':\n lowerdeck_width_bottom = 1.43\n lowerdeck_width_top = 2.53\n lowerdeck_height = 1.14\n elif fuselage['container_type'] == 'LD39':\n lowerdeck_width_bottom = 3.18\n lowerdeck_width_top = 4.82\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD4':\n lowerdeck_width_bottom = 2.44\n lowerdeck_width_top = 2.54\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD6':\n lowerdeck_width_bottom = 3.18\n lowerdeck_width_top = 4.16\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD8':\n lowerdeck_width_bottom = 2.44\n lowerdeck_width_top = 3.28\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD9':\n lowerdeck_width_bottom = 3.18\n lowerdeck_width_top = 3.28\n lowerdeck_height = 1.68\n else:\n if fuselage['container_type'] == 'None':\n lowerdeck_width_top = 0\n lowerdeck_width_bottom = 0\n lowerdeck_height = 0\n elif fuselage['container_type'] == 'LD1':\n lowerdeck_width_bottom = 3.22\n lowerdeck_width_top = 4.77\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD11':\n lowerdeck_width_bottom = 3.18\n lowerdeck_width_top = 3.28\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD2':\n lowerdeck_width_bottom = 2.49\n lowerdeck_width_top = 3.22\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD26':\n lowerdeck_width_bottom = 3.18\n lowerdeck_width_top = 4.16\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD29':\n lowerdeck_width_bottom = 3.18\n lowerdeck_width_top = 4.82\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD3':\n lowerdeck_width_bottom = 3.22\n lowerdeck_width_top = 4.11\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD3-45':\n lowerdeck_width_bottom = 3.22\n lowerdeck_width_top = 4.11\n lowerdeck_height = 1.19\n elif fuselage['container_type'] == 'LD3-45R':\n lowerdeck_width_bottom = 1.56\n lowerdeck_width_top = 1.66\n lowerdeck_height = 1.19\n elif fuselage['container_type'] == 'LD3-45W':\n lowerdeck_width_bottom = 1.43\n lowerdeck_width_top = 2.53\n lowerdeck_height = 1.14\n elif fuselage['container_type'] == 'LD39':\n lowerdeck_width_bottom = 3.18\n lowerdeck_width_top = 4.82\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD4':\n lowerdeck_width_bottom = 2.44\n lowerdeck_width_top = 2.54\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD6':\n lowerdeck_width_bottom = 3.18\n lowerdeck_width_top = 4.16\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD8':\n lowerdeck_width_bottom = 2.44\n lowerdeck_width_top = 3.28\n lowerdeck_height = 1.68\n elif fuselage['container_type'] == 'LD9':\n lowerdeck_width_bottom = 3.18\n lowerdeck_width_top = 3.28\n lowerdeck_height = 1.68\n\n if fuselage['aisles_number'] == 1:\n seats_number = max(fuselage['seat_abreast_number'], 2) # minor number of rows == 3\n seats_number = min(fuselage['seat_abreast_number'], 6) # major number of rows == 9\n elif fuselage['aisles_number'] == 2:\n seats_number = max(fuselage['seat_abreast_number'], 6) # minor number of rows == 3\n seats_number = min(fuselage['seat_abreast_number'], 9) # major number of rows == 9\n\n fuselage['seat_abreast_number'] = seats_number\n\n if fuselage['aisles_number'] == 1:\n left_fuselage_seats = math.ceil(fuselage['seat_abreast_number']/2)\n right_fuselage_seats = fuselage['seat_abreast_number'], - left_fuselage_seats\n else:\n if fuselage['seat_abreast_number'] == 6:\n left_fuselage_seats = 2\n right_fuselage_seats = 2\n center_fuselage_seats = 2\n elif fuselage['seat_abreast_number'] == 7:\n left_fuselage_seats = 2\n right_fuselage_seats = 2\n center_fuselage_seats = 3\n elif fuselage['seat_abreast_number'] == 8:\n left_fuselage_seats = 3\n right_fuselage_seats = 3\n center_fuselage_seats = 2\n elif fuselage['seat_abreast_number'] == 9:\n left_fuselage_seats = 3\n right_fuselage_seats = 3\n center_fuselage_seats = 3\n\n # Calculate the width coordinates for the various points\n w0 = 0.5*fuselage['cabine_height']*fuselage['aisles_number']\n w4 = fuselage['seat_abreast_number']*seat_cushion_width_YC + fuselage['aisles_number'] * \\\n fuselage['aisle_width'] + (fuselage['seat_abreast_number'] -\n fuselage['aisles_number'] + 1)*armrest_width\n y_last_seat = 0.5*(w4 - seat_cushion_width_YC - 2*armrest_width)\n w1 = 2*y_last_seat\n w2 = 2*(pax_distance_head_wall + 0.084 + y_last_seat)\n w3 = 2*(pax_distance_shoulder_wall + pax_shoulder_breadth/2 + y_last_seat)\n w5 = w4\n w6 = w4 - 2*seat_delta_width_floor - 2*armrest_width\n w7 = lowerdeck_width_top\n w8 = lowerdeck_width_top\n w9 = lowerdeck_width_bottom\n\n while iterations > 0:\n iterations = iterations - 1\n\n k = 0\n k_minimum = points_number\n result_z_symmetry_minimum = 1000\n k_minimum2 = points_number\n result_z_symmetry_minimum2 = 1000\n\n while k <= points_number:\n delta_z_symmetry = k * \\\n (delta_z_symmetry_superior - delta_z_symmetry_inferior) / \\\n points_number + delta_z_symmetry_inferior\n h0 = fuselage['cabine_height'] - delta_z_symmetry\n h1 = pax_eye_height + seat_cushion_height - \\\n delta_z_symmetry + 0.126 + pax_distance_head_wall\n h2 = pax_eye_height + seat_cushion_height - delta_z_symmetry\n h3 = pax_midshoulder_height + seat_cushion_height - delta_z_symmetry\n h4 = armrest_top_height - delta_z_symmetry\n h5 = armrest_bottom_height - delta_z_symmetry\n h6 = -delta_z_symmetry\n h7 = -delta_z_symmetry - floor_thickness\n h8 = -delta_z_symmetry - floor_thickness - lowerdeck_height + \\\n (lowerdeck_width_top - lowerdeck_width_bottom)/2\n h9 = -delta_z_symmetry - floor_thickness - lowerdeck_height\n\n # Calculate semi width of the ellipse describing the fuselage\n a0 = np.sqrt(((w0/2)**2 + h0**2)/(fuselage['height_to_width_ratio'])**2)\n a1 = np.sqrt(((w1/2)**2 + h1**2)/(fuselage['height_to_width_ratio'])**2)\n a2 = np.sqrt(((w2/2)**2 + h2**2)/(fuselage['height_to_width_ratio'])**2)\n a3 = np.sqrt(((w3+0.04)/2)**2 +\n ((h3**2)/(fuselage['height_to_width_ratio']**2)))\n a4 = np.sqrt(((w4+0.04)/2)**2 +\n ((h4**2)/(fuselage['height_to_width_ratio']**2)))\n a5 = np.sqrt(((w5/2)**2 + h5**2)/(fuselage['height_to_width_ratio'])**2)\n a6 = np.sqrt(((w6/2)**2 + h6**2)/(fuselage['height_to_width_ratio'])**2)\n a7 = np.sqrt(((w7/2)**2 + h7**2)/(fuselage['height_to_width_ratio'])**2)\n a8 = np.sqrt(((w8/2)**2 + h8**2)/(fuselage['height_to_width_ratio'])**2)\n a9 = np.sqrt(((w9/2)**2 + h9**2)/(fuselage['height_to_width_ratio'])**2)\n\n # Get the maximum value of these widths, so each point == inside\n # the fuselage\n array_widths = [a0, a1, a2, a3, a4, a5, a6, a7, a8, a9]\n maximum_width = max(array_widths)\n\n # If the current width == one of the 2 smallest, then it has to be stored\n if maximum_width < result_z_symmetry_minimum:\n k_minimum2 = k_minimum\n result_z_symmetry_minimum2 = result_z_symmetry_minimum\n k_minimum = k\n result_z_symmetry_minimum = maximum_width\n elif maximum_width < result_z_symmetry_minimum2:\n k_minimum2 = k\n result_z_symmetry_minimum2 = maximum_width\n\n k = k + 1\n\n # Update the interval where delta_z_symmetry has to be\n if k_minimum < k_minimum2:\n if k_minimum > 0:\n k = k_minimum - 1\n delta_z_symmetry_inferior_new = k * \\\n (delta_z_symmetry_superior - delta_z_symmetry_inferior) / \\\n (points_number + delta_z_symmetry_inferior)\n if k_minimum < points_number:\n k = k_minimum2 + 1\n delta_z_symmetry_superior_new = k * \\\n (delta_z_symmetry_superior - delta_z_symmetry_inferior) / \\\n (points_number + delta_z_symmetry_inferior)\n else:\n if k_minimum2 > 0:\n k = k_minimum2 - 1\n delta_z_symmetry_inferior_new = k * \\\n (delta_z_symmetry_superior - delta_z_symmetry_inferior) / \\\n (points_number + delta_z_symmetry_inferior)\n if k_minimum < points_number:\n k = k_minimum + 1\n delta_z_symmetry_superior_new = k * \\\n (delta_z_symmetry_superior - delta_z_symmetry_inferior) / \\\n (points_number + delta_z_symmetry_inferior)\n\n delta_z_symmetry_inferior = delta_z_symmetry_inferior_new\n delta_z_symmetry_superior = delta_z_symmetry_superior_new\n minimum_width = result_z_symmetry_minimum\n\n # Update the fuselage equivalent diameter and fuselage and floor thickness\n fuselage_equivalent_diameter = 2 * \\\n minimum_width*np.sqrt(fuselage['height_to_width_ratio'])\n fuselage_thickness = (0.084 + 0.045*fuselage_equivalent_diameter)/2\n floor_thickness = 0.035 * \\\n (fuselage_equivalent_diameter + fuselage_thickness)\n\n besta = [delta_z_symmetry_inferior, delta_z_symmetry_superior]\n fuselage_Dz_floor = min(besta)\n fuselage_thickness_floor = floor_thickness\n fuselage_outer_equivalent_diameter = fuselage_equivalent_diameter + 2*fuselage_thickness\n fuselage_df = fuselage_outer_equivalent_diameter\n fuselage_wf = fuselage_thickness\n\n fuselage_rails_number_right = 2\n fuselage_rails_number_left = 2\n fuselage_dseat_seat_rail = 0.2\n\n axis_x = 2*minimum_width\n axis_y = axis_x*fuselage['height_to_width_ratio']\n\n axis_x_exterior = axis_x + 2*fuselage_thickness\n axis_y_exterior = axis_y + 2*fuselage_thickness\n\n fuselage['width'] = axis_x_exterior\n fuselage['height'] = axis_y_exterior\n\n return fuselage\n# =============================================================================\n# MAIN\n# =============================================================================\n\n# =============================================================================\n# TEST\n# =============================================================================\n","sub_path":"aircraft_framework_win/framework_PhD/framework/Sizing/Geometry/fuselage_sizing.py","file_name":"fuselage_sizing.py","file_ext":"py","file_size_in_byte":14854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"51140319","text":"from random_search import RandomAgent\nfrom bfs import BreadthFirstAgent\nfrom poker_game_example import PokerGame\nfrom greedy_search import GreedyAgent\n\nINIT_AGENT_STACK = 400\n\nrandomAgent = RandomAgent(\n current_hand=None, stack=INIT_AGENT_STACK, action=None, action_value=None)\nbfsAgent = BreadthFirstAgent(\n current_hand=None, stack=INIT_AGENT_STACK, action=None, action_value=None)\ngreedyAgent = GreedyAgent(\n current_hand=None, stack=INIT_AGENT_STACK, action=None, action_value=None)\n\nprint('Result random agent')\nrandom_game = PokerGame(randomAgent)\nrandom_game.startGame()\nrandom_game.printResultingState()\n\nprint('Result breadth first agent')\nbreadth_game = PokerGame(bfsAgent)\nbreadth_game.startGame()\nbreadth_game.printResultingState()\n\nprint('Result greedy agent')\ngreedy_game = PokerGame(greedyAgent)\ngreedy_game.startGame()\ngreedy_game.printResultingState()\n","sub_path":"Lab2/lab2/Task2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"440919325","text":"#!/usr/bin/python\n#vim: set fileencoding:utf-8\n\nimport werobot\nrobot = werobot.WeRoBot(token='tokenhere') #token=\"tokenhere\"\n@robot.text\ndef echo(message):\n a = '点击进入西电微信课堂'\n if message.content == \"hello\" :\n return a \n else:\n return '请输入hello'\nrobot.config['HOST'] = '0.0.0.0'\nrobot.config['PORT'] = 80\nrobot.run()\n","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"327090536","text":"import os\r\nimport shutil\r\nimport sys\r\nimport win32api\r\n\r\nprojectPath=\"\"\r\njsFilePath=\"\";\r\ncompileParam=\"\"\r\njsFilePathNew=\"\"\r\noutLaya=False\r\nmyRoot=sys.path[0].replace(\"\\\\\",\"/\")+\"/\"\r\nprint(myRoot)\r\ncompileExe=myRoot+\"laya.js.exe\"\r\n\r\ndef getAbsPath(rpath):\r\n return os.path.normpath(os.path.join(myRoot, rpath))\r\n\r\ndef copyFiles(sourceDir, targetDir): \r\n if sourceDir.find(\".svn\") > 0: \r\n return \r\n for file in os.listdir(sourceDir): \r\n sourceFile = os.path.join(sourceDir, file) \r\n targetFile = os.path.join(targetDir, file) \r\n if os.path.isfile(sourceFile): \r\n if not os.path.exists(targetDir): \r\n os.makedirs(targetDir) \r\n if targetFile.find(\"max.js\")>0 or not os.path.exists(targetFile) or(os.path.exists(targetFile) and (os.path.getsize(targetFile) != os.path.getsize(sourceFile))): \r\n open(targetFile, \"wb\").write(open(sourceFile, \"rb\").read()) \r\n if os.path.isdir(sourceFile): \r\n First_Directory = False \r\n copyFiles(sourceFile, targetFile)\r\n \r\ndef copyFileToTar(srcFile,tarFile):\r\n print(\"copyFileToTar\",srcFile,tarFile);\r\n if os.path.exists(srcFile):\r\n pass;\r\n else:\r\n print(\"!exitst:\",srcFile)\r\n return;\r\n\r\n if os.path.isdir(srcFile):\r\n copyFiles(srcFile,tarFile);\r\n else:\r\n shutil.copyfile(srcFile,tarFile);\r\n\r\ndef sOpen(exe,param):\r\n win32api.ShellExecute(0, 'open', exe,param,'',1)\r\n\r\ncompileParam=getAbsPath(\"../NLPPlatform.as3proj\")+\";iflash=false;chromerun=false;outlaya=false\";\r\n#sOpen(compileExe,compileParam);\r\nos.system(compileExe+\" \"+compileParam)\r\nprint(\"compile complete\");\r\ncopyFileToTar(getAbsPath(\"../bin/h5\"),getAbsPath(\"../../ElectronApp/NLPPlatform/h5/\"))\r\nprint(\"copy complete\")\r\nexePath=getAbsPath(\"../../Electron/EasyDesk.exe\")+\" \"+getAbsPath(\"../../ElectronApp/NLPPlatform\");\r\nprint(\"exe:\",exePath)\r\nos.system(exePath);\r\nprint(\"workDone\");\r\n","sub_path":"debugtoolplatform/NLPPlatform/pyTools/copyOutsweek.py","file_name":"copyOutsweek.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"538784242","text":"import asyncio\nimport aiohttp\nimport json\nimport signal \nimport sys \nimport time \n\nloop= asyncio.get_event_loop()\nclient= aiohttp.ClientSession(loop= loop)\n\n\nasync def get_json(client, url):\n async with client.get(url) as response:\n assert response.status== 200\n return await response.read()\n\n\nasync def get_raddit_top(subreddit, client):\n print(\"\\nGet Retting for {}\\n\".format(subreddit))\n data= await get_json(client, 'https://www.reddit.com/r/' + subreddit + '/top.json?sort=top&t=day&limit=5')\n\n jdata= json.loads(data.decode('utf-8'))\n for i in jdata['data']['children']:\n time.sleep(.7)\n score = i['data']['score']\n title = i['data']['title']\n link = i['data']['url']\n print(subreddit+\">> \"+str(score) + ': ' + title + ' (' + link + ')')\n print('DONE:', subreddit + '\\n')\n\n\ndef signal_handler(sig, frame):\n print(\"\\nSTOPPING\\n\")\n loop.stop()\n client.close()\n sys.exit(0)\n\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n\nasyncio.ensure_future(get_raddit_top(\"python\", client))\nasyncio.ensure_future(get_raddit_top(\"programming\", client))\nasyncio.ensure_future(get_raddit_top(\"compsci\", client))\n\nprint(\"Starting run forever\")\nloop.run_forever()\nprint(\"After forever\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"asynchronous.py","file_name":"asynchronous.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"473605568","text":"__author__ = 'jjanssen'\nimport config as cf\n\nsuffix_list = ['AG','srl','spa','SA','kft','sl','sas','ab','INC','llc','Games',\n 'ltd','Software','Apps','Media','Studios','Mobile','Studio',\n 'Technologies','Limited','Solutions','GmbH','Union','Labs','Bank',\n 'Entertainment','Digital','Technology','Co','Group','Interactive',\n 'App','Publishing','Productions','game','Pvt','Development','Corporation',\n 'Systems','Team','Tech','Company','Federal','Soft','Design','Corp','Services',\n 'Magazine','Network','Android','COLTD','Srl','International','Dev','A','Lab','web','Consulting','Pty']\n\nsuffix_list = [e.lower() for e in suffix_list]\n\n\ndef clean_punctuation(text):\n result = []\n if text is not None:\n text_to_list = text.lower().split()\n for text_element in text_to_list:\n text_element = text_element.decode('utf-8')\n result.append(text_element.translate(cf.removal_translate_table))\n text = \" \".join(result)\n return text\n\n\ndef clean_company_suffix(text):\n result = []\n if text is not None:\n company_name_to_list = text.lower().split()\n for name_element in company_name_to_list:\n if name_element not in suffix_list:\n result.append(name_element)\n text = \" \".join(result)\n return text\n\n\ndef clean_company(row):\n if row['company'] is not None:\n text = row['company']\n text_cleaned_punctuation = clean_punctuation(text)\n row['cleaned_company'] = clean_company_suffix(text_cleaned_punctuation)\n else:\n row['cleaned_company'] = row['company']\n return row","sub_path":"exact-matching-improvement/lib/feature_company.py","file_name":"feature_company.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"32068296","text":"#coding:utf-8\n\n# core\nNLP_VERB = '/v' # 动词\nNLP_VERB_NOUN = '/vn' # 动名词\nNLP_VERB_S = '/vg' # 短动词\nNLP_VERB_ADV = '/vd' # 副动词\n\nNLP_NOUN = '/n' # 名词\nNLP_NAME = '/nr' # 人名\nNLP_ORG = '/nt' # 机构名\nNLP_LOCA = '/ns' # 地点名词\nNLP_SITE = '/na' # 景点\nNLP_NOUN_SPE = '/nz' # 专有名词\nNLP_NOUN_S = '/ng' # 短名词\nNLP_NOUN_FOR = '/nx' # 外来词\nNLP_STA = '/s' # 位置\n\nNLP_PREP = '/p' # 介词\n\nNLP_ADJ = '/a' # 形容词\nNLP_ADJ_2 = '/z' # 形容词\nNLP_ADJ_S = '/ag' # 短形容词\nNLP_ADJ_S2 = '/bg' # 短形容词\nNLP_ADJ_NOUN = '/an' # 形容名词\nNLP_ADV = '/ad' # 副词\nNLP_ADV_2 = '/b' # 副词\nNLP_ADV_S = '/dg' # 短副词\n\nNLP_TIME = '/t' # 时间\nNLP_TIME_S = '/tg' # 短时间词\nNLP_NUM = '/m' # 数词\nNLP_NUM_S = '/mg' # 天干地支表示的序数词\nNLP_QUAN = '/q' # 量词\nNLP_REP = '/r' # 代词\nNLP_ABB = '/j' # 简写缩略语\nNLP_SLA = '/l' # 俗语俚语\nNLP_IDI = '/i' # 成语\nNLP_NEG = '/h' # 否定词\n\n# omitted\nNLP_AUX = '/u' # 助词 的\nNLP_MARK = '/w' # 符号\nNLP_PLU = '/k' # 复数助词\nNLP_DIR = '/f' # 方位词\nNLP_DEG = '/d' # ���度副词\nNLP_CONN = '/c' # 连词\nNLP_TONE = '/y' # 语气助词\nNLP_TONE_S = '/yg' # 古汉语语气词\nNLP_MIME = '/o' # 拟声词\nNLP_HREP = '/rg' # 短人称代词\nNLP_TONE_2 = '/e' # 发语词\n\n","sub_path":"define.py","file_name":"define.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"434127949","text":"from __future__ import division\nfrom torchvision import transforms, utils\nimport argparse\nimport math\nimport numpy as np\nimport os\nimport socket\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data\n\nproject_path = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(project_path)\nfrom data.cifar_loader import Cifar10Dataset, Cifar100Dataset\nfrom data.load_cifar10 import load_cifar10\nfrom data.load_cifar100 import load_cifar100\nfrom nets.nets_cosine import DenseNetCosine, WideResNetCosine\nfrom torch_model.model import Model, SGDNoWeightDecayLast\nimport helper.common_helper as com_help\nimport helper.ood_helper as ood_help\n\n\nparser = argparse.ArgumentParser(\n description='Out-of-distribution detection, neural network training.'\n)\nparser.add_argument(\n '--nn', default=\"dense-100\", type=str,\n help='neural network name { dense-100 | wrn-28-10 }'\n)\nparser.add_argument(\n '--tr_dset', default=\"cifar10\", type=str,\n help='training (in-distribution) dataset { cifar10 | cifar100 }'\n)\n\n\ndataset_details = {\n 'cifar10': {\n 'n_class': 10,\n 'mean': [0.49137255, 0.48235294, 0.44666667],\n 'std': [0.24705882, 0.24352941, 0.26156863],\n 'dset': Cifar10Dataset,\n 'n_channel': 3,\n },\n 'cifar100': {\n 'n_class': 100,\n 'mean': [0.5071, 0.4865, 0.4409],\n 'std': [0.2673, 0.2564, 0.2762],\n 'dset': Cifar100Dataset,\n 'n_channel': 3,\n },\n}\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n assert args.nn is not None, \"Please specify '--nn'.\"\n assert args.tr_dset is not None, \"Please specify '--tr_dset'.\"\n\n # General details\n gpu_amount = 1\n tr_dset_name = args.tr_dset\n network_name = args.nn\n gpus = range(0, gpu_amount)\n print(\"Running on: {}\".format(socket.gethostname()))\n\n # Dataset details\n n_class = dataset_details[tr_dset_name]['n_class']\n mean = dataset_details[tr_dset_name]['mean']\n std = dataset_details[tr_dset_name]['std']\n Dataset = dataset_details[tr_dset_name]['dset']\n n_ch = dataset_details[tr_dset_name]['n_channel']\n\n # OOD testing dataset\n _, _, cifar10_x, cifar10_y = load_cifar10()\n cifar10_x = (cifar10_x - mean) / std\n _, _, cifar100_x, cifar100_y = load_cifar100()\n cifar100_x = (cifar100_x - mean) / std\n datasets = ood_help.get_ood_dataset_cifar(mean, std)\n datasets['cifar10'] = cifar10_x\n datasets['cifar100'] = cifar100_x\n\n # Remove first 1,000 images because they are used as val sample for other method.\n te_datasets = {}\n for dset in datasets.keys():\n np.random.seed(123)\n tmp_data = np.random.permutation(datasets[dset])\n te_datasets[dset] = tmp_data[1000:]\n datasets = te_datasets\n\n # The Evaluation for 5 runs\n all_acc_tpr95s, all_aurocs, all_aupr_ins = [], [], []\n for run_id in range(1, 1+5):\n ckpt_path = os.path.join(project_path, 'ckpt', 'cos', network_name, tr_dset_name, 'run-{}'.format(run_id))\n\n # Transforms applied to testing image\n composed = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=mean, std=std)\n ])\n te_dset = Dataset('test', composed)\n te_loader = torch.utils.data.DataLoader(\n te_dset, batch_size=128, shuffle=False, num_workers=4)\n\n if 'wrn' in network_name:\n _, n_layer, widen = network_name.split('-')\n net = WideResNetCosine(\n int(n_layer), n_class, widen_factor=int(widen), input_n_channel=n_ch,\n )\n w_decay = 5e-4\n epoch = 200\n elif 'dense' in network_name:\n _, n_layer = network_name.split('-')\n net = DenseNetCosine(\n int(n_layer), n_class, input_n_channel=n_ch,\n )\n w_decay = 1e-4\n epoch = 300\n\n params = net.parameters()\n criterion = nn.CrossEntropyLoss()\n optim_fn = SGDNoWeightDecayLast\n optimizer = optim_fn(\n params, lr=1, momentum=0.9, nesterov=True, weight_decay=w_decay,\n )\n\n model = Model(gpus, ckpt_path, net, optimizer, criterion)\n\n # OOD Evaluation\n keys_preds = ['scaled_cosine', 'softmax', 'scale', 'cosine_similarity']\n preds = com_help.get_predictions(model, datasets, keys_preds)\n \n if tr_dset_name in ['cifar10']:\n lbl = cifar10_y\n elif tr_dset_name in ['cifar100']:\n lbl = cifar100_y\n\n if tr_dset_name == 'cifar10':\n ood_datasets = [\n 'imnet_cropped', 'imnet_cropped_mod', 'imnet_resized', \n 'lsun_cropped', 'lsun_cropped_mod', 'lsun_resized', 'isun', 'svhn', 'food101',\n 'mnist', 'fmnist', 'notmnist', 'gaus_noise', 'unif_noise',\n ]\n elif tr_dset_name == 'cifar100':\n ood_datasets = [\n 'imnet_cropped', 'imnet_cropped_mod', 'imnet_resized',\n 'lsun_cropped', 'lsun_cropped_mod', 'lsun_resized', 'isun', 'svhn', 'food101', 'stl10',\n 'mnist', 'fmnist', 'notmnist', 'gaus_noise', 'unif_noise',\n ]\n\n acc_tpr_95s, aurocs, aupr_ins = com_help.ood_detection_eval(\n preds, tr_dset_name, 'cosine_similarity', ood_datasets, run_id\n )\n all_acc_tpr95s.append(acc_tpr_95s)\n all_aurocs.append(aurocs)\n all_aupr_ins.append(aupr_ins)\n\n # Summarize the result\n all_acc_tpr95s = np.stack(all_acc_tpr95s).T\n all_aurocs = np.stack(all_aurocs).T\n all_aupr_ins = np.stack(all_aupr_ins).T\n mean_acc_tpr95s = np.mean(all_acc_tpr95s, axis=1)\n mean_aurocs = np.mean(all_aurocs, axis=1)\n mean_aupr_ins = np.mean(all_aupr_ins, axis=1)\n std_acc_tpr95s = np.std(all_acc_tpr95s, axis=1, ddof=1)\n std_aurocs = np.std(all_aurocs, axis=1, ddof=1)\n std_aupr_ins = np.std(all_aupr_ins, axis=1, ddof=1)\n ood_names = []\n translator = {\n 'imnet_cropped': 'TIN(c)',\n 'imnet_cropped_mod': 'TIN(c)*',\n 'imnet_resized': 'TIN(r)',\n 'lsun_cropped': 'LSUN(c)',\n 'lsun_cropped_mod': 'LSUN(c)*',\n 'lsun_resized': 'LSUN(r)',\n 'isun': 'iSUN',\n 'svhn': 'SVHN',\n 'food101': 'Food-101',\n 'stl10': 'STL-10',\n 'mnist': 'MNIST',\n 'fmnist': 'F-MNIST',\n 'notmnist': 'NotMNIST',\n 'gaus_noise': 'Gausian',\n 'unif_noise': 'Uniform',\n }\n print('-------------------------------------------')\n print(' OOD Detection Performace of 5 runs ')\n print('-------------------------------------------')\n print('| Acc@TPR95 | AUROC | AUPR In |')\n print('-------------------------------------------')\n acc_tpr_95s, aurocs, aupr_ins = [], [], []\n for i in range(len(mean_aurocs)):\n print('| {:.2f}({:.2f}) | {:.2f}({:.2f}) | {:.2f}({:.2f}) | {}'.format(\n mean_acc_tpr95s[i], std_acc_tpr95s[i], mean_aurocs[i], std_aurocs[i],\n mean_aupr_ins[i], std_aupr_ins[i], translator[ood_datasets[i]]\n ))\n\n print('-------------------------------------------')\n","sub_path":"cosine_ood_detector.py","file_name":"cosine_ood_detector.py","file_ext":"py","file_size_in_byte":7108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"563328339","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns=[\n #urls for pages\n path('',views.new,name='home'),\n path('home',views.new,name='home'),\n path('Login', views.Login,name='Login'),\n path('index', views.index,name='index'),\n path('FileUpload', views.FileUpload,name='FileUpload'),\n path('MainObjectives', views.MainObjectives,name='MainObjectives'),\n path('DemoObjectives', views.DemoObjectives,name='DemoObjectives'),\n path('PharmObjectives', views.PharmObjectives,name='PharmObjectives'),\n path('OPObjectives', views.OPObjectives,name='OPObjectives'),\n path('DischargeObjectives', views.DischargeObjectives,name='DischargeObjectives'),\n path('ApptObjectives', views.ApptObjectives,name='ApptObjectives'),\n path('BedObjectives', views.BedObjectives,name='BedObjectives'),\n path('DocObjectives', views.DocObjectives,name='DocObjectives'),\n path('MLCObjectives', views.MLCObjectives,name='MLCObjectives'),\n path('SpecObjectives', views.SpecObjectives,name='SpecObjectives'),\n path('WaitObjectives', views.WaitObjectives,name='WaitObjectives'),\n path('WalkInObjectives', views.WalkInObjectives,name='WalkInObjectives'),\n path('WardObjectives', views.WardObjectives,name='WardObjectives'),\n\n path('NursingStationObjectives',views.NursingStationObjectives,name='NursingStationObjectives'),\n path('OrdersPerPatient',views.OrdersPerPatient,name='OrdersPerPatient'),\n path('TopMovableObjectives',views.TopMovableObjectives,name='TopMovableObjectives'),\n path('DrugStockObjectives',views.DrugStockObjectives,name='DrugStockObjectives'),\n path('Radiology',views.Radiology,name='Radiology'),\n path('SurgeryObjectives',views.SurgeryObjectives,name='SurgeryObjectives'),\n\n\n path('InsuranceObjectives',views.InsuranceObjectives,name='InsuranceObjectives'),\n path('GenderObjectives',views.GenderObjectives,name='GenderObjectives'),\n path('AgeObjectives',views.AgeObjectives,name='AgeObjectives'),\n path('ApptDemoObjectives',views.ApptDemoObjectives,name='ApptDemoObjectives'),\n path('WalkinDemoObjectives',views.WalkinDemoObjectives,name='WalkinDemoObjectives'), \n path('AreaObjectives',views.AreaObjectives,name='AreaObjectives'),\n\n\n\n\n path('Login',views.Login,name='Login'),\n path('Logout',views.Logout,name='Logout'),\n\n #urls for Anisha's visualisations ########################################################################\n\n path('EachWardBed',views.EachWardBed,name='EachWardBed'),\n path('AllWardBed',views.AllWardBed,name='AllWardBed'),\n path('SpecificWardBed',views.SpecificWardBed,name='SpecificWardBed'),\n path('SpecDocBed',views.SpecDocBed,name='SpecDocBed'),\n path('SpecificSpecBed',views.SpecificSpecBed,name='SpecificSpecBed'),\n path('EachDoc',views.EachDoc,name='EachDoc'),\n path('EachSpec',views.EachSpec,name='EachSpec'),\n path('SpecificDocSpecificSpec',views.SpecificDocSpecificSpec,name='SpecificDocSpecificSpec'),\n path('EachWard',views.EachWard,name='EachWard'),\n path('SpecificWardDay',views.SpecificWardDay,name='SpecificWardDay'),\n path('SpecWardSpecSpecSpecDoc',views.SpecWardSpecSpecSpecDoc,name='SpecWardSpecSpecSpecDoc'),\n path('SpecificWardSpecificSpec',views.SpecificWardSpecificSpec,name='SpecificWardSpecificSpec'),\n\n path('PaymentsFilter',views.paymentsFilter,name='PaymentsFilter'),\n\n path('MLCWard',views.MLCWard,name='MLCWard'),\n path('MLCSpecialty',views.MLCSpecialty,name='MLCSpecialty'),\n\n path('DeptAppt',views.DeptAppt,name='DeptAppt'),\n path('DocAppt',views.DocAppt,name='DocAppt'),\n path('DocW',views.DocW,name='DocW'),\n path('DeptW',views.DeptW,name='DeptW'),\n\n\n\n #urls for Amala's visualisations ##########################################################################\n\n path('PharmacyNursingStation_priority', views.pharm_priority,name='PharmacyNursingStation_priority'), \n path('PharmacyNursingStation_station', views.pharm_station,name='PharmacyNursingStation_station'), \n path('PharmacyNursingStation_overall', views.pharm_overall,name='PharmacyNursingStation_overall'),\n\n path('TopMedicines_station', views.topmovable_station,name='TopMedicines_station'), \n path('TopMedicines_overall', views.topmovable_overall,name='TopMedicines_overall'),\n\n path('DrugStock_itemcat', views.drugstock_itemcat,name='DrugStock_itemcat'), \n path('DrugStock_overall', views.drugstock_overall,name='DrugStock_overall'),\n\n path('Surgery_surgeryname', views.surgeryname,name='Surgery_surgeryname'), \n path('Surgery_surgerydept', views.surgerydept,name='Surgery_surgerydept'),\n\n\n #all urls for varsha team ###################################################################################\n\n path('insurancedepartment',views.insurancedepartment,name='insurancedepartment'),\n path('insuranceward',views.insuranceward,name='insuranceward'),\n path('insurancearea',views.insurancearea,name='insurancearea'),\n\n\n path('genderdepartment',views.genderdepartment,name='genderdepartment'),\n path('genderward',views.genderward,name='genderward'),\n path('genderarea',views.genderarea,name='genderarea'),\n \n path('agedepartment',views.agedepartment,name='agedepartment'),\n path('ageward',views.ageward,name='ageward'),\n path('agearea',views.agearea,name='agearea'),\n\n path('appointmentdepartment',views.appointmentdepartment,name='appointmentdepartment'),\n path('appointmentarea',views.appointmentarea,name='appointmentarea'),\n\n path('walkindepartment',views.walkindepartment,name='walkindepartment'),\n path('walkinarea',views.walkinarea,name='walkinarea'),\n\n\n\n]","sub_path":"mainpage/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"292053666","text":"#! /usr/bin/env python\n# From https://stackoverflow.com/questions/16198546/get-exit-code-and-stderr-from-subprocess-call/16198668\n\nimport subprocess\n\ncmd_args = ['ls', '-a']\n\npipes = subprocess.Popen(cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\nstd_out, std_err = pipes.communicate(timeout=3)\n\nif pipes.returncode != 0:\n # an error happened!\n print(\"problem2\")\n err_msg = \"%s. Code: %s\" % (std_err.strip(), pipes.returncode)\n raise Exception(err_msg)\nelif len(std_err):\n # return code is 0 (no error), but we may want to\n # do something with the info on std_err\n # i.e. logger.warning(std_err)\n print(\"stderr: \" + str(std_err))\n\n# do whatever you want with std_out\nprint(\"stdout: \" + str(std_out))\n# i.e. json.loads(std_out)\n","sub_path":"python/pipetest.py","file_name":"pipetest.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"569966739","text":"import numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport sys\n\ninfile = sys.argv[1]\noutfile = sys.argv[2]\nwindow_size = int(sys.argv[3])\n\ndef main():\n with open(infile, 'r') as f:\n a = []\n for line in f:\n buffer = line.split()\n if(buffer[0] == \"#\" or buffer[0] == \"@\" or buffer[0] == \"@TYPE\"):\n continue\n else:\n a.append(buffer)\n\n # convert to numpy array for easier manipulation later\n # astype will convert the array of strings to floats\n a = np.array(a)\n a = a.astype(np.float)\n\n # compute the length of array after using the window size\n len_b = len(a) / window_size # Python gives the floor\n if len_b == 0 or len_b < 0:\n print(\"something wrong with the length of window size or array\")\n return 1\n\n # use row count to move along the original array, add window size to it every iteration\n # create b as an empty array so that vstack can be used later\n # this will not consider the edge effect at the end of the array!!!\n row_count = 0\n b = np.array([0, 0])\n while len_b > 0:\n b = np.vstack((b, [a[row_count, 0], np.mean(a[row_count:row_count+window_size, 1])]))\n row_count += window_size\n len_b = len_b - 1\n\n b = np.delete(b, 0, 0)\n fig = plt.figure(dpi=300)\n ax = fig.add_subplot(111)\n plt.plot(b[:,0], b[:,1])\n plt.savefig(outfile, dpi=300)\n return 0\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"plot_xvg_run_avg.py","file_name":"plot_xvg_run_avg.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"490789077","text":"#!/usr/bin/env python\n# HW03_ex06\n# (1) Please comment your code.\n# (2) Please be thoughtful when naming your variables.\n# (3) Please remove development code before submitting.\n################################################################################\n# Exercise 1\n# When you submit only include your final function: compare\ndef compare(x,y):\n#comparing x and y to return 1 if x>y, 0 if they are equal and -1 if xy):\t\t\n#\t\tprint(\"1\")\n\t\treturn 1\n\tif (x==y):\n#\t\tprint(\"0\")\n\t\treturn 0\n\tif (x 0: # 用户名(手机号码)或 学号 重名\n# \t\t\t\t\treturn render(request, 'register.html', {'form': rf, 'msg': \"该手机号或学号已存在!\"})\n# \t\t\t\telse:\n# \t\t\t\t\tusername = rf.cleaned_data['username']\n# \t\t\t\t\tuserID = rf.cleaned_data['userID']\n# \t\t\t\t\tpassword1 = rf.cleaned_data['password1']\n# \t\t\t\t\tpassword2 = rf.cleaned_data['password2']\n# \t\t\t\tif password1 != password2:\n# \t\t\t\t\treturn render(request, 'register.html', {'form': rf, 'msg': \"输入密码不一致!\"})\n# \t\t\t\telse:\n# \t\t\t\t\tuser = models.NewUser(username=username, password=password1)\n# \t\t\t\t\tuser.set_password(password1) # 重要!!!设置密码加密,不然是明文,admin里密码显示也是明文,登录不了\n# \t\t\t\t\tuser.save()\n# \t\t\t\t\tnew_password = password1.encode('ascii')\n# \t\t\t\t\tu = models.UserProfile.objects.create(user=user, username=username,\n# \t\t\t\t\t userID=userID, password=new_password,\n# \t\t\t\t\t userphone=username) # 注册成功则创建对应user profile\n# \t\t\t\t\tu.save()\n# \t\t\t\t\treturn redirect('/login', {'form': rf, 's': \"注册成功!请登录!\"})\n# \t\t\telse:\n# # \t\t\t\treturn render(request, 'register.html', {'form': rf, 'msg': \"等待正确输入!\"})\n\ndef register(request):\n msg = \"\"\n if request.POST:\n rf = CaptchaTestForm(request.POST)\n if rf.is_valid():\n username = rf.cleaned_data['username']\n filter_result1 = models.NewUser.objects.filter(username=username)\n if len(filter_result1) > 0:\n msg = \"该手机或学号已经存在\"\n return render(request, 'register.html', {'rf': rf, 'msg': msg})\n else:\n username = rf.cleaned_data['username']\n userID = rf.cleaned_data['userID'] # 学号\n password1 = rf.cleaned_data['password1'] # 密码\n checknum = createPhoneCode() # 生成的验证码\n user = models.NewUser(username=username, password=password1)\n user.set_password(password1) # 重要!!!设置密码加密,不然是明文,admin里密码显示也是明文,登录不了\n user.save()\n new_password = password1.encode('ascii')\n u = models.UserProfile.objects.create(user=user, username=username,\n userID=userID, password=new_password,\n userphone=username, checknum=checknum) # 注册成功则创建对应user\n\n u.save()\n sms_log = send_sms(key, text + checknum, username)\n print(sms_log)\n return redirect('/login', {'rf': rf, 's': \"注册成功!请登录!\"})\n\n else:\n # return HttpResponse('么么哒!验证码是小写哟')\n msg = \"你的验证码输入有误\"\n else:\n rf = CaptchaTestForm()\n return render(request, 'register.html', {'rf': rf, 'msg': msg})\n\n\n@login_required\ndef index(request):\n all_mails = models.Mail.objects.all() # 在主页面中显示的快递\n mail = []\n for x in all_mails:\n if x.Situation == 0: # 只显示没有被取的快递\n mail.append(x)\n\n return render(request, 'index.html', {'mails': mail}) # 返回字典\n\n\n@permission_required('main.success_check', login_url='/check/') # 转义到check\ndef mailpage(request, mail_id):\n mail = models.Mail.objects.get(pk=mail_id)\n return render(request, 'mailpage.html', {'mail': mail})\n\n\n@permission_required('main.success_check', login_url='/check/')\ndef Mail(request):\n if request.method == 'POST':\n qf = MailForm(request.POST)\n if qf.is_valid():\n user = request.user\n whereup = qf.cleaned_data['whereup']\n wheredown = qf.cleaned_data['wheredown']\n detail = qf.cleaned_data['detail']\n models.Mail.objects.create(WhereUP=whereup, WhereDown=wheredown, Host_user=user, Detail=detail)\n u = models.UserProfile.objects.get(user=user)\n u.please_num += 1\n # 请求快递数目+1\n u.save()\n return HttpResponseRedirect('/')\n else:\n p = '快递所在地与到件地都不能为空'\n return render(request, 'new.html', {'form': qf, 'msg': p})\n\n else:\n return render(request, 'new.html')\n\n\n@permission_required('main.success_check', login_url='/check/')\ndef user(request):\n user = request.user\n all_mails = models.Mail.objects.all()\n mails = []\n print(\"1\")\n for x in all_mails:\n if x.Take_user == user or x.Host_user == user:\n mails.append(x)\n print(\"2\")\n return render(request, 'user.html', {'mails': mails})\n\n\n@permission_required('main.success_check', login_url='/check/')\ndef doing(request):\n user = request.user\n all_mails = models.Mail.objects.all()\n mails = []\n for x in all_mails:\n if x.Take_user == user or x.Host_user == user:\n mails.append(x)\n return render(request, 'doing.html', {'mails': mails})\n\n@permission_required('main.success_check', login_url='/check/')\ndef take(request, mail_id):\n user = request.user # 从当前请求中得到用户名\n mail = models.Mail.objects.get(pk=mail_id)\n if mail.Situation < 1:\n mail.Situation = 1\n mail.Take_user = user # 这里存在一个问题是如果先前有一个物品已经有了Take_user 则会产生错误!\n mail.Take_time = timezone.now()\n mail.save()\n return HttpResponseRedirect('/doing/')\n\n\n@permission_required('main.success_check', login_url='/check/')\ndef get(request, mail_id):\n mail = models.Mail.objects.get(pk=mail_id)\n mail.Situation = 2 # 到达状态码数\n user = mail.Take_user\n u = models.UserProfile.objects.get(user=user)\n u.zhiyuan += 1\n u.do_num += 1\n u.save()\n mail.Receive_time = timezone.now()\n mail.save()\n return HttpResponseRedirect('/user/')\n\n\n@permission_required('main.success_check', login_url='/check/')\ndef quxiao(request, mail_id):\n models.Mail.objects.get(pk=mail_id).delete()\n user = request.user\n u = models.UserProfile.objects.get(user=user)\n u.please_num -= 1\n u.kind_num -= 5 # 信誉度扣除5分\n u.save()\n return HttpResponseRedirect('/user/')\n\n\ndef check(request):\n user = request.user\n uProfile = models.UserProfile.objects.get(user=user)\n if request.method == 'POST':\n form = CheckForm(request.POST, request.FILES)\n if form.is_valid():\n form_sms_num = form.cleaned_data['sms_check']\n if form_sms_num == uProfile.checknum:\n uProfile.myimage = form.cleaned_data['updatephoto']\n uProfile.situation = 1\n uProfile.save()\n messages.success(request, '提交认证成功!我们的工作人员将在四个小时内验证请求')\n return HttpResponseRedirect('/')\n else:\n messages.error(request, '貌似你输入的验证码和注册时候的验证码不符')\n else:\n form = CheckForm()\n return render(request, 'check.html', {'form': form})\n\ndef xieyi(request):\n return render(request,'xieyi.html')\n","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"243258339","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 29 05:53:22 2018\n@author: itwill03\n\"\"\"\n\n\nimport numpy as np\nimport os\nimport glob\nimport tensorflow as tf\nimport pandas as pd\n\ntf.set_random_seed(777) \n\n#train_info = pd.read_csv(\"/home/itwill03/sound/train.csv\",delimiter=',')\n#train_data = np.genfromtxt(\"/home/itwill03/sound/yy/feature_train.csv\", delimiter=',')\ntrain_info = pd.read_csv(\"C:\\data\\sound/train.csv\",delimiter=',')\ntrain_data = np.genfromtxt(\"C:\\data\\sound\\mel_train2.csv\", delimiter=',')\ntrain_data.shape #Out[16]: (50, 16384)\n\n#normalizaion\ntrain_data = librosa.util.normalize(train_data) #axix = ?, norm = ?>0>?\n\n#label set\nlabels = train_info['label']\ndf_label = pd.DataFrame(labels)\nl = train_info['label'].unique()\n\nfor i in range(len(l)):\n df_label[df_label==l[i]] = i\n \n#train data set \ntrain_data = pd.DataFrame(train_data)\ntrain_data['label']=df_label\ntrain_data = train_data.astype(np.float32)\ntrain_data.shape #Out[20]: (50, 16385)\n#np.savetxt(\"c:/data/sound/train_data.csv\",train_data, delimiter=\",\")\n\n#훈련세트, validation세트 나누기\nfrom sklearn.model_selection import train_test_split\ntrain_set, validate_set = train_test_split(train_data, test_size = 0.3)\ntrainData = train_set.values[:,0:16384] \ntrainLabel = train_set.values[:,-1]\nvalidateData = validate_set.values[:,0:16384]\nvalidataLabel = validate_set.values[:,-1]\n\n#print (trainData.shape,trainLabel.shape,validateData.shape,validataLabel.shape)\ntf.reset_default_graph()\n\n# 텐서플로우 모델 생성\nn_dim = 16384\nn_classes = 41\ntraining_epochs = 700\nlearning_rate = 0.0004\nbatch_size = 100\nsteps_for_validate = 5\nkeep_prob = tf.placeholder(tf.float32)\n\nX = tf.placeholder(tf.float32, [None, n_dim])\n#X_img = tf.reshape(X, [-1, 128, 128, 1])\nY = tf.placeholder(tf.int32, [None, 1])\nY_onehot=tf.reshape(tf.one_hot(Y, 41), [-1, 41])\np_keep_conv = tf.placeholder(tf.float32, name='p_keep_conv')\np_keep_hidden = tf.placeholder(tf.float32, name='p_keep_hidden')\n\n\n\n# img shape = (?, 128, 128, 1)\nc1 = tf.layers.conv2d(tf.reshape(X, [-1, 128, 128, 1]), 32, kernel_size=[3, 3], strides=(1, 1), \n padding='same', activation=tf.nn.elu, name=\"c1\")\nn1 = tf.layers.batch_normalization(c1) \np1 = tf.layers.max_pooling2d(inputs=n1, pool_size=[2, 2], strides=2) \np1 = tf.nn.dropout(p1, p_keep_conv)\n\n# img shape = (?, 64, 64, 32)\nc2 = tf.layers.conv2d(tf.reshape(p1, [-1, 64, 64, 32]), 64, kernel_size=[3, 3], strides=(1, 1), \n padding='same', activation=tf.nn.elu, name=\"c2\")\nn2 = tf.layers.batch_normalization(c2) \np2 = tf.layers.max_pooling2d(inputs=n2, pool_size=[2, 2], strides=2) #shape = [?, 1, 48, 100]\np2 = tf.nn.dropout(p2, p_keep_conv)\n\n# img shape = (?, 32, 32, 64)\nc3 = tf.layers.conv2d(tf.reshape(p2, [-1, 32, 32, 64]), 128, kernel_size=[3, 3], strides=(1, 1), \n padding='same', activation=tf.nn.elu, name=\"c3\")\nn3 = tf.layers.batch_normalization(c3) \np3 = tf.layers.max_pooling2d(inputs=n3, pool_size=[2, 2], strides=2) #shape = [?, 1, 24, 200]\np3 = tf.nn.dropout(p3, p_keep_conv)\n\nL4_flat = tf.reshape(p3, shape=[-1, 16*16*128]) \nW1 = tf.get_variable(\"W1\", shape=[16*16*128, 64], initializer=tf.contrib.layers.xavier_initializer())\nL5 = tf.nn.elu(tf.matmul(L4_flat, W1))\nn5 = tf.layers.batch_normalization(L5) \nL5 = tf.nn.dropout(n5, p_keep_hidden)\n\nW2 = tf.get_variable(\"W2\", shape=[64,41],initializer=tf.contrib.layers.xavier_initializer())\nb = tf.Variable(tf.random_normal([41]))\nlogits = tf.matmul(L5, W2) + b\n\n# define cost/loss & optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels= Y_onehot))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # 아담버젼\npredict_op = tf.argmax(logits, 1, name=\"pred\")\n\n# initialize\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\nsaver = tf.train.Saver()\n\n\n# train my model\n\nprint('Learning started. It takes sometime.')\nfor epoch in range(training_epochs):\n avg_cost = 0\n total_batch = int(len(trainData) / batch_size)\n for i in range(total_batch):\n batch_xs = trainData[i*batch_size:(i+1)*batch_size]\n batch_ys = trainLabel[i*batch_size:(i+1)*batch_size].reshape(-1, 1)\n feed_dict = {X: batch_xs, Y: batch_ys, p_keep_conv: .7, p_keep_hidden: .5}\n c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)\n avg_cost += c / total_batch\n print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))\n if epoch % steps_for_validate == steps_for_validate-1:\n correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y_onehot, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n x=np.random.choice(validataLabel.shape[0], 500, replace=False)\n print('Accuracy:', sess.run(accuracy, feed_dict={\n X: validateData[x], Y: validataLabel[x].reshape(-1, 1), p_keep_conv: 1, p_keep_hidden: 1}))\n #save_path = saver.save(sess, '/home/paperspace/Downloads/optx/optx')\nprint('Finished!')\n\n\"\"\"\n#1.\ntraining_epochs = 700\nlearning_rate = 0.0004\nactivation = elu\nconv = kernel_size=[3, 3], strides=(1, 1)\n kernel_size=[3, 3], strides=(1, 1)\n kernel_size=[3, 3], strides=(1, 1)\npool = pool_size=[2, 2], strides=2\n pool_size=[2, 2], strides=2\n pool_size=[2, 2], strides=2\nflat = 16*16*128 -> 64\np_keep_conv: .7, p_keep_hidden: .5\n\ncost : 0.06~0.08에서 계속 떨어지는중\nAccuracy: 59~63% \n전처리시 정규화 고려.. librosa.util.normalize\n윈도우 14*2 고려..\n러닝레이트 조절\n\"\"\"\n","sub_path":"x_ksw/New Project/PT자료/mel_cnn.py","file_name":"mel_cnn.py","file_ext":"py","file_size_in_byte":5566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"90110097","text":"import numpy as np\nimport matplotlib.pyplot as plt\ndef sinc(m):\n\th1=[]\n\tfor n in range(-m,m):\n\t\tx=np.sin(np.pi/4*n)/(np.pi*n)\n\t\th1=np.append(h1,x)\n\th1[m]=1/4\n\treturn h1\ndef rect_win(m):\n\th2=[]\n\tfor n in range(0,m-1):\n\t\tx=1\n\t\th2=np.append(h2,x)\n\treturn h2\ndef tri_win(m):\n\th3=[]\n\tfor n in range(0,m-1):\n\t\tx1=np.abs(n-(m-1)/2)\n\t\tx=1-((2*x1)/(m-1))\n\t\th3=np.append(h3,x)\n\treturn h3\ndef hann_win(m):\n\th4=[]\n\tfor n in range(0,m-1):\n\t\tx=0.5-0.5*np.cos(2*np.pi*n/(m-1))\n\t\th4=np.append(h4,x)\n\treturn h4\ndef hamm_win(m):\n\th5=[]\n\tfor n in range(0,m-1):\n\t\tx=0.54-0.46*np.cos(2*np.pi*n/(m-1))\n\t\th5=np.append(h5,x)\n\treturn h5\n\t\t\n\n","sub_path":"lab8_windows.py","file_name":"lab8_windows.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"460041699","text":"from apache_beam.transforms import DoFn\nfrom apache_beam.transforms import ParDo\nfrom apache_beam.transforms import PTransform\n\n__all__ = ['RestructDictFn', 'RestructDict']\n\n\nclass RestructDictFn(DoFn):\n def __init__(self, mappings):\n super().__init__()\n self.mappings = mappings\n\n def _get_data(self, d, path):\n for k in path.split('.'):\n d = d[k]\n return d\n\n def _set_data(self, d, path, value):\n p = path.split('.')\n for k in p[:-1]:\n if k not in d:\n d[k] = dict()\n d = d[k]\n d[p[-1]] = value\n\n def process(self, x):\n d = dict()\n for org, exp in self.mappings.items():\n if exp is None or exp is False:\n continue\n v = self._get_data(x, org)\n if exp is True:\n self._set_data(d, org, v)\n elif type(exp) is str:\n self._set_data(d, exp, v)\n else:\n raise Exception(f'unexpected filter type: {type(exp)}')\n yield d\n\n\nclass RestructDict(PTransform):\n def __init__(self, *args, **kwargs):\n super().__init__()\n self._fn = RestructDictFn(*args, **kwargs)\n\n def expand(self, pvalue):\n return pvalue | ParDo(self._fn)\n","sub_path":"python/beamx/transforms/restruct_dict.py","file_name":"restruct_dict.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"173308797","text":"INF = float('inf')\n\nGRAPH = dict()\nGRAPH['start'] = {'a': 6, 'b': 2}\nGRAPH['a'] = {'fin': 1}\nGRAPH['b'] = {'a': 3, 'fin': 5}\nGRAPH['fin'] = {}\n\ncosts = {'a': 6, 'b': 2, 'fin': INF}\n\nparents = {'a': 'start', 'b': 'start', 'fin': None}\n\nprocessed = set()\n\n\ndef find_path():\n node, cost = find_lowest_cost_node()\n while node:\n neighbors = GRAPH[node]\n for neighbour, neighbour_cost in neighbors.items():\n new_cost = cost + neighbour_cost\n if new_cost < costs[neighbour]:\n costs[neighbour] = new_cost\n parents[neighbour] = node\n processed.add(node)\n node, cost = find_lowest_cost_node()\n\n\ndef find_lowest_cost_node() -> tuple:\n lowest_cost = INF\n lowest_cost_node = None\n for node, cost in costs.items():\n if cost < lowest_cost and node not in processed:\n lowest_cost = cost\n lowest_cost_node = node\n return lowest_cost_node, lowest_cost\n\n\ndef visualize_path():\n spam = []\n while True:\n for child, parent in parents.items():\n if parent == 'start' and not spam:\n spam.extend([parent, child])\n elif spam and (parent == spam[-1]):\n spam.append(child)\n if len(spam) == len(GRAPH):\n break\n return spam\n\n\nif __name__ == '__main__':\n find_path()\n print(visualize_path())\n","sub_path":"grokkaem_algo/7_algorithm_dijkstra.py","file_name":"7_algorithm_dijkstra.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"154995457","text":"from typing import Tuple, Callable, List\n\nimport pennylane as qml\nimport pennylane.numpy as np\n\n\ndef constructor(q_num: int) -> Tuple[Callable, int]:\n\t\"\"\"\n\tImplements circuit B from J. Romero, J. P. Olson, and A. Aspuru-Guzik, “Quantum autoencoders for efficient compression\n\tof quantum data,” Quantum Sci. Technol., vol. 2, no. 4, p. 045001, Dec. 2017, doi: 10.1088/2058-9565/aa8072.\n\n\t:param q_num: number of qubits\n\t:return:\n\t\"\"\"\n\n\tdef circuit(inputs: np.tensor, weights: np.tensor) -> List[np.tensor]:\n\t\t# angle encoding of the input\n\t\tfor i in range(q_num):\n\t\t\tqml.RX(inputs[i], wires=i)\n\n\t\tidx = 0 # current index for the weights\n\n\t\t# layer of single qubit rotations\n\t\tfor i in range(q_num):\n\t\t\tqml.Rot(weights[idx], weights[idx + 1], weights[idx + 2], wires=i)\n\t\t\tidx += 3\n\n\t\t# layer of controlled single qubit rotations\n\t\tfor i in range(q_num):\n\t\t\tfor j in range(q_num):\n\t\t\t\tif i != j:\n\t\t\t\t\tqml.CRot(weights[idx], weights[idx + 1], weights[idx + 2], wires=[i, j])\n\t\t\t\t\tidx += 3\n\n\t\t# layer of single qubit rotations\n\t\tfor i in range(q_num):\n\t\t\tqml.Rot(weights[idx], weights[idx + 1], weights[idx + 2], wires=i)\n\t\t\tidx += 3\n\n\t\treturn [qml.expval(qml.PauliZ(wires=i)) for i in range(q_num)]\n\n\treturn circuit, (2 * q_num + q_num * (q_num - 1)) * 3\n","sub_path":"src/pl/QNN2.py","file_name":"QNN2.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"450764775","text":"from sqlalchemy.orm import Session\nfrom dbUtils import DbUtil, User\n\n__author__ = 'WhiteBlue'\n\n\nclass UserDao(object):\n @staticmethod\n def get_all_user():\n # session = Session()\n session = DbUtil.get_session()\n users = session.query(User).order_by('id')\n session.close()\n return users\n","sub_path":"dao.py","file_name":"dao.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"143324505","text":"import time\n\nfrom .data_stream import DataStream\n\nimport middleware.glo as glo\n\nclass Subscriber(DataStream):\n \"\"\"\n Provides the same function as :py:class:`rospy.Subscriber`, which\n implements :py:class:`.DataStream`.\n \"\"\"\n\n def __init__(self, topic_name, message_type, monitored=True):\n \"\"\"\n :param str topic_name: resource name of topic, e.g. '/camera1/image'.\n :param class message_type: message class for serialization. Note that\n `header.stamp` must exist.\n :param monitored: whether to attach a :py:class:`.DataStreamMonitor` to\n this publisher.\n \"\"\"\n super(Subscriber, self).__init__()\n self._topic_name = topic_name\n self._message_type = message_type\n self._get_timestamp = self._generate_get_timestamp(message_type)\n self._ros_subscriber = glo.get().create_subscription(message_type, topic_name, self._on_data)\n glo.logger().debug(topic_name)\n if monitored:\n from middleware import DataStreamMonitor\n DataStreamMonitor(self, name=topic_name)\n\n\n\n def _on_data(self, data):\n self._commit({\n self._topic_name: data,\n '__timestamp': self._get_timestamp(data),\n })\n\n def unsubscribe(self):\n \"\"\"\n unsubscribe from topic. There will be no more data coming after this\n call. Additional calls to unsubscribe() have no effect.\n \"\"\"\n glo.get().destroy_subscription(self._ros_subscriber)\n\n\n","sub_path":"spin_camera/src/octopus-dependency/src/middleware/middleware/adapter/subscriber.py","file_name":"subscriber.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"319262132","text":"# -*- coding: utf-8 -*-\n#qpy:webapp:goo.gl缩址墙内代理生成器\n#qpy://localhost:8080/\nfrom bottle import debug, run\nfrom bottle import route\nfrom bottle import error\nfrom bottle import redirect, abort\nfrom bottle import request, response\nfrom bottle import static_file\nfrom bottle import template\nfrom bottle import TEMPLATE_PATH\n#import httplib, urllib\nimport sys\nimport os.path\n_app_root = os.path.dirname(__file__)\nCUSTOM_TPL_PATH = os.path.abspath(\n os.path.join(\n _app_root\n , \"views/\")\n )\nTEMPLATE_PATH.insert(0, CUSTOM_TPL_PATH)\nsys.path.insert(0, os.path.join(_app_root, \"3party/\"))\nimport requests\n#print dir(requests)\n\n\n@route('/')\ndef index():\n return template('main')\n\n@route('/goo', method='POST')\ndef goo():\n uri = request.forms.get('uri')\n return template('goo'\n , result = curl_goo(uri)\n )\n\ndef curl_goo(uri):\n payload = {'uri': uri}\n r = requests.post(\"http://api.zhgdg.org/v0/goo\", data=payload)\n #print \"r.keys\\n\\t\", r.keys()\n uris = r.text.split('\\n')\n return uris\n\n\n@error(404)\ndef error404(error):\n return template('404')\n\n@route('/favicon.ico')\ndef favicon():\n abort(204)\n \n@route('/static/')\ndef server_static(filepath):\n #print _app_root+'/static'\n return static_file(filepath, root = _app_root+'/static')\n\nrun(host='localhost', port=8080)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"201418476","text":"\"\"\"\nСкрипт инициализации хранилища vault.\nДолжен запускаться единожды после старта контейнера vault.\n\"\"\"\nfrom os import getenv\n\nimport hvac\nfrom hvac.exceptions import InvalidPath\n\nVAULT_URL = getenv(\"VAULT_URL\", \"http://127.0.0.1\") + \":8200\"\n\nif __name__ == '__main__':\n client = hvac.Client(url=VAULT_URL)\n if not client.sys.is_initialized():\n result = client.sys.initialize(secret_shares=1, secret_threshold=1)\n\n root_token = result['root_token']\n key = result['keys'][0]\n\n response = client.sys.submit_unseal_key(key=key)\n\n # Root token: s.lWIQ2smClt5a7UBCOHylbmSN\n # Unseal key: 144046b9f51d944f9b634d01b29dad74443a58a860e2f4eb6592d1ec071c764e\n client.token = root_token\n try:\n client.secrets.kv.v2.configure(mount_point='kv')\n except hvac.exceptions.InvalidPath:\n client.sys.enable_secrets_engine('kv', path='kv')\n write_response = client.secrets.kv.v2.create_or_update_secret(\n mount_point='kv',\n path='auth_service',\n secret=dict(\n postgres_user=\"auth\",\n postgres_password=\"12345\",\n service_secret_key='this-really-needs-to-be-changed',\n google_client_id='1059758954803-q9trni3t7v09bcjqd9mc9g0kh9kkebmc.apps.googleusercontent.com',\n google_client_secret='IsKneEoZTWYAKLoccl-dKwOR'\n ),\n )\n\n print(\"Root token: \" + root_token)\n print(\"Unseal key: \" + key)\n","sub_path":"initializers/vault_init.py","file_name":"vault_init.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"331843484","text":"#!/usr/bin/env python\n\nfrom apache_conf_parser.directives.simple_directive import SimpleDirective\n\n\nclass RewriteCond(SimpleDirective):\n contexts = [\n 'server_config',\n 'virtual_host',\n 'directory',\n '.htaccess'\n ]\n apache_module = 'mod_rewrite'\n description = 'Defines a condition under which rewriting will take place'\n\n match_regexp = r'\\s*RewriteCond\\s+(?P[^ ]*)\\s+(?P[^ ]*)\\s*\\[?(?P[^] ]*)?\\]?$'\n\n @property\n def flags(self):\n _flags = []\n if self.matches is not None:\n flag_group = self.matches.get('flags')\n _flags += [f.strip() for f in flag_group.split(',')]\n return _flags\n","sub_path":"apache_conf_parser/directives/rewrite_cond.py","file_name":"rewrite_cond.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"218148210","text":"# -*- coding: utf-8 -*-\n\"\"\"\nProfile: http://hl7.org/fhir/StructureDefinition/Linkage\nRelease: R5\nVersion: 5.0.0\nBuild ID: 2aecd53\nLast updated: 2023-03-26T15:21:02.749+11:00\n\"\"\"\nimport typing\n\nfrom pydantic import Field, root_validator\nfrom pydantic.error_wrappers import ErrorWrapper, ValidationError\nfrom pydantic.errors import MissingError, NoneIsNotAllowedError\n\nfrom . import backboneelement, domainresource, fhirtypes\n\n\nclass Linkage(domainresource.DomainResource):\n \"\"\"Disclaimer: Any field name ends with ``__ext`` doesn't part of\n Resource StructureDefinition, instead used to enable Extensibility feature\n for FHIR Primitive Data Types.\n\n Links records for 'same' item.\n Identifies two or more records (resource instances) that refer to the same\n real-world \"occurrence\".\n \"\"\"\n\n resource_type = Field(\"Linkage\", const=True)\n\n active: bool = Field(\n None,\n alias=\"active\",\n title=\"Whether this linkage assertion is active or not\",\n description=(\n \"Indicates whether the asserted set of linkages are considered to be \"\n '\"in effect\".'\n ),\n # if property is element of this resource.\n element_property=True,\n )\n active__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_active\", title=\"Extension field for ``active``.\"\n )\n\n author: fhirtypes.ReferenceType = Field(\n None,\n alias=\"author\",\n title=\"Who is responsible for linkages\",\n description=(\n \"Identifies the user or organization responsible for asserting the \"\n \"linkages as well as the user or organization who establishes the \"\n \"context in which the nature of each linkage is evaluated.\"\n ),\n # if property is element of this resource.\n element_property=True,\n # note: Listed Resource Type(s) should be allowed as Reference.\n enum_reference_types=[\"Practitioner\", \"PractitionerRole\", \"Organization\"],\n )\n\n item: typing.List[fhirtypes.LinkageItemType] = Field(\n ...,\n alias=\"item\",\n title=\"Item to be linked\",\n description=(\n \"Identifies which record considered as the reference to the same real-\"\n \"world occurrence as well as how the items should be evaluated within \"\n \"the collection of linked items.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n\n @classmethod\n def elements_sequence(cls):\n \"\"\"returning all elements names from\n ``Linkage`` according specification,\n with preserving original sequence order.\n \"\"\"\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"active\",\n \"author\",\n \"item\",\n ]\n\n\nclass LinkageItem(backboneelement.BackboneElement):\n \"\"\"Disclaimer: Any field name ends with ``__ext`` doesn't part of\n Resource StructureDefinition, instead used to enable Extensibility feature\n for FHIR Primitive Data Types.\n\n Item to be linked.\n Identifies which record considered as the reference to the same real-world\n occurrence as well as how the items should be evaluated within the\n collection of linked items.\n \"\"\"\n\n resource_type = Field(\"LinkageItem\", const=True)\n\n resource: fhirtypes.ReferenceType = Field(\n ...,\n alias=\"resource\",\n title=\"Resource being linked\",\n description=\"The resource instance being linked as part of the group.\",\n # if property is element of this resource.\n element_property=True,\n # note: Listed Resource Type(s) should be allowed as Reference.\n enum_reference_types=[\"Resource\"],\n )\n\n type: fhirtypes.Code = Field(\n None,\n alias=\"type\",\n title=\"source | alternate | historical\",\n description=(\n 'Distinguishes which item is \"source of truth\" (if any) and which items'\n \" are no longer considered to be current representations.\"\n ),\n # if property is element of this resource.\n element_property=True,\n element_required=True,\n # note: Enum values can be used in validation,\n # but use in your own responsibilities, read official FHIR documentation.\n enum_values=[\"source\", \"alternate\", \"historical\"],\n )\n type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_type\", title=\"Extension field for ``type``.\"\n )\n\n @classmethod\n def elements_sequence(cls):\n \"\"\"returning all elements names from\n ``LinkageItem`` according specification,\n with preserving original sequence order.\n \"\"\"\n return [\"id\", \"extension\", \"modifierExtension\", \"type\", \"resource\"]\n\n @root_validator(pre=True, allow_reuse=True)\n def validate_required_primitive_elements_1283(\n cls, values: typing.Dict[str, typing.Any]\n ) -> typing.Dict[str, typing.Any]:\n \"\"\"https://www.hl7.org/fhir/extensibility.html#Special-Case\n In some cases, implementers might find that they do not have appropriate data for\n an element with minimum cardinality = 1. In this case, the element must be present,\n but unless the resource or a profile on it has made the actual value of the primitive\n data type mandatory, it is possible to provide an extension that explains why\n the primitive value is not present.\n \"\"\"\n required_fields = [(\"type\", \"type__ext\")]\n _missing = object()\n\n def _fallback():\n return \"\"\n\n errors: typing.List[\"ErrorWrapper\"] = []\n for name, ext in required_fields:\n field = cls.__fields__[name]\n ext_field = cls.__fields__[ext]\n value = values.get(field.alias, _missing)\n if value not in (_missing, None):\n continue\n ext_value = values.get(ext_field.alias, _missing)\n missing_ext = True\n if ext_value not in (_missing, None):\n if isinstance(ext_value, dict):\n missing_ext = len(ext_value.get(\"extension\", [])) == 0\n elif (\n getattr(ext_value.__class__, \"get_resource_type\", _fallback)()\n == \"FHIRPrimitiveExtension\"\n ):\n if ext_value.extension and len(ext_value.extension) > 0:\n missing_ext = False\n else:\n validate_pass = True\n for validator in ext_field.type_.__get_validators__():\n try:\n ext_value = validator(v=ext_value)\n except ValidationError as exc:\n errors.append(ErrorWrapper(exc, loc=ext_field.alias))\n validate_pass = False\n if not validate_pass:\n continue\n if ext_value.extension and len(ext_value.extension) > 0:\n missing_ext = False\n if missing_ext:\n if value is _missing:\n errors.append(ErrorWrapper(MissingError(), loc=field.alias))\n else:\n errors.append(\n ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)\n )\n if len(errors) > 0:\n raise ValidationError(errors, cls) # type: ignore\n\n return values\n","sub_path":"fhir/resources/linkage.py","file_name":"linkage.py","file_ext":"py","file_size_in_byte":7600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"181884243","text":"import uuid\nfrom flask_restplus import Resource, reqparse, fields, inputs\nfrom flask import request, current_app\nfrom datetime import datetime\nfrom werkzeug.exceptions import BadRequest, NotFound, InternalServerError\nfrom sqlalchemy.exc import DBAPIError\n\nfrom app.extensions import api, db\nfrom app.api.utils.custom_reqparser import CustomReqparser\nfrom app.api.utils.resources_mixins import UserMixin\nfrom app.api.utils.access_decorators import requires_role_view_all, requires_role_edit_report, requires_role_mine_admin\n\nfrom app.api.mines.reports.models.mine_report import MineReport\nfrom app.api.mines.reports.models.mine_report_submission import MineReportSubmission\nfrom app.api.mines.reports.models.mine_report_comment import MineReportComment\n\nfrom app.api.mines.response_models import MINE_REPORT_COMMENT_MODEL\n\n\nclass MineReportCommentListResource(Resource, UserMixin):\n parser = CustomReqparser()\n parser.add_argument('report_comment', type=str, location='json')\n parser.add_argument('comment_visibility_ind', type=inputs.boolean, location='json')\n\n @api.doc(description='retrive a list of comments for all report submissions')\n @api.marshal_with(MINE_REPORT_COMMENT_MODEL, envelope='records', code=200)\n @requires_role_view_all\n def get(self, mine_guid, mine_report_guid):\n\n mine_report = MineReport.find_by_mine_report_guid(mine_report_guid)\n\n if not mine_report:\n raise NotFound('Mine report not found')\n\n mine_report_submissions = mine_report.mine_report_submissions\n\n if not mine_report_submissions:\n raise NotFound('No mine report submissions for this report')\n\n current_app.logger.info(f'Retrieving comments for {mine_report}')\n\n comments = [\n comment.__dict__ for submission in mine_report_submissions[:-1]\n for comment in submission.comments\n ]\n for comment in comments:\n comment['from_latest_submission'] = False\n latest_comments = [\n comment.__dict__ for submission in mine_report_submissions[-1:]\n for comment in submission.comments\n ]\n for comment in latest_comments:\n comment['from_latest_submission'] = True\n\n return comments + latest_comments, 200\n\n @api.expect(MINE_REPORT_COMMENT_MODEL)\n @api.doc(description='creates a new comment for the report submission')\n @api.marshal_with(MINE_REPORT_COMMENT_MODEL, code=201)\n @requires_role_edit_report\n def post(self, mine_guid, mine_report_guid):\n\n mine_report_submission = MineReportSubmission.find_latest_by_mine_report_guid(\n mine_report_guid)\n\n if not mine_report_submission:\n raise NotFound('Mine report submission not found')\n # TODO: Do we want to create a submission if it doesn't exist?\n\n data = self.parser.parse_args()\n\n if not data['report_comment']:\n raise BadRequest('Empty comment')\n\n mine_report_comment_guid = uuid.uuid4()\n\n mine_report_comment = MineReportComment.create(\n mine_report_submission,\n mine_report_comment_guid=mine_report_comment_guid,\n report_comment=data['report_comment'],\n comment_visibility_ind=data['comment_visibility_ind'],\n )\n\n current_app.logger.info(f'Creating comment {mine_report_comment}')\n\n mine_report_comment.save()\n\n return mine_report_comment, 201\n\n\nclass MineReportCommentResource(Resource, UserMixin):\n parser = CustomReqparser()\n parser.add_argument('report_comment', type=str, location='json')\n parser.add_argument('comment_visibility_ind', type=inputs.boolean, location='json')\n\n @api.expect(MINE_REPORT_COMMENT_MODEL)\n @api.doc(description='update a comment')\n @api.marshal_with(MINE_REPORT_COMMENT_MODEL, code=201)\n @requires_role_edit_report\n def put(self, mine_guid, mine_report_guid, mine_report_comment_guid=None):\n\n data = self.parser.parse_args()\n comment = MineReportComment.find_by_guid(mine_report_comment_guid)\n if not comment:\n raise NotFound('Mine report comment with guid \"{mine_report_comment_guid}\" not found.')\n\n current_app.logger.info(f'Updating {comment} with {data}')\n for key, value in data.items():\n setattr(comment, key, value)\n\n comment.save()\n\n return comment, 201\n\n @api.doc(description='Delete a mine report comment by guid',\n params={'mine_report_comment_guid': 'guid of the comment to delete.'})\n @requires_role_mine_admin\n def delete(self, mine_guid, mine_report_guid, mine_report_comment_guid):\n comment = MineReportComment.find_by_guid(mine_report_comment_guid)\n if not comment:\n raise NotFound('Mine report comment with guid \"{mine_report_comment_guid}\" not found.')\n\n comment.deleted_ind = True\n current_app.logger.info(f'Deleting {comment}')\n\n comment.save()\n\n return ('', 204)\n","sub_path":"services/core-api/app/api/mines/reports/resources/mine_report_comment.py","file_name":"mine_report_comment.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"491008772","text":"def sort_list(my_list, direction=None):\n \"\"\"Сортировка списка my_list методом пузырька.\n :param my_list: список значений\n :param direction: (опциональный) направление сортировки. Возможные значения: 1, -1\n \"\"\"\n # создаем новую переменную new_list в функции - она доступна только в этой функции\n # и не доступна в теле основной программы\n new_list = []\n for element in my_list:\n new_list.append(element)\n\n if direction:\n # Сначала мы проверяем передано ли вообще что-то в параметр direction: if direction и только в этом случае выполняем сортировку.\n # Если ничего не передается, то direction будет равен None, а если значение не заполнено, то if возвращает False.\n for i in range(len(new_list)):\n for j in range(i, len(new_list)):\n if (direction == -1 and new_list[i] > new_list[j]) or (direction == 1 and new_list[i] < new_list[j]):\n # Если direction == -1 и текущий элемент больше следующего\n # или direction == 1 и текущий элемент меньше следующего,\n # то меняем местами элементы.\n temp = new_list[i]\n new_list[i] = new_list[j]\n new_list[j] = temp\n\n return new_list\n\nlist1 = [4, -2, 5, 3, -1 , 1, 0, 5, 2]\nprint(f'{list1} - значение list1 до вызова функции')\n\nsorted_list = sort_list(list1, 1) # результат функции sort_list() передаем в новую переменную sorted_list\n\nprint(f'{list1} - значение list1 после вызова функции')\nprint(f'{sorted_list} - значение переменной sorted_list')\n\n#https://pumpskill.ru/courses/bazovyy-kurs-python/lessons/osnovy-strukturnogo-programmirovaniya/chistye-i-gryaznye-funkcii/\n","sub_path":"module3/sort_list_Create_a_pure_function.py","file_name":"sort_list_Create_a_pure_function.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"333468111","text":"\"\"\"PyMultimethods: Multimethods for Python\n\nPyMultimethods provides a pythonic library for implementing multimethods (aka generic methods, generic functions, multiple dispatch). Multimethods are functions that exhibit polymorphic behaviour, where all of the function arguments are considered during dispatch.\n\"\"\"\n\nfrom distutils.core import setup\n\ndoclines = __doc__.split('\\n')\nsummary = doclines[0]\ndescription = '\\n'.join(doclines[2:])\n\nclassifiers = ['Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU Affero General Public License v3',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ]\n\nsetup(name='PyMultimethods',\n version='0.2',\n description=summary,\n long_description=description,\n author='Nathan Davis',\n author_email='davisn90210@gmail.com',\n license='GNU AGPL, version 3',\n url='http://launchpad.net/pymultimethods',\n platforms=['any'],\n classifiers=classifiers,\n py_modules=['multidispatch'])\n","sub_path":"pypi_install_script/PyMultimethods-0.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"445165460","text":"# Owner(s): [\"oncall: distributed\"]\n\nimport copy\nimport sys\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nfrom torch.distributed._composable import checkpoint, fully_shard\nfrom torch.distributed.fsdp.wrap import ModuleWrapPolicy\nfrom torch.testing._internal.common_distributed import skip_if_lt_x_gpu\nfrom torch.testing._internal.common_fsdp import FSDPTest\nfrom torch.testing._internal.common_utils import (\n TEST_WITH_DEV_DBG_ASAN,\n run_tests,\n)\n\n\nif not dist.is_available():\n print(\"Distributed not available, skipping tests\", file=sys.stderr)\n sys.exit(0)\n\n\nif TEST_WITH_DEV_DBG_ASAN:\n print(\n \"Skip dev-asan as torch + multiprocessing spawn have known issues\",\n file=sys.stderr,\n )\n sys.exit(0)\n\n\nclass ToyModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.l1 = nn.Linear(100, 100)\n self.seq = nn.Sequential(\n nn.ReLU(),\n nn.Linear(100, 100),\n nn.ReLU(),\n )\n self.l2 = nn.Linear(100, 100)\n\n def forward(self, x):\n return self.l2(self.seq(self.l1(x)))\n\n\nclass TestFSDPCheckpoint(FSDPTest):\n @property\n def world_size(self) -> int:\n return 2\n\n def _test_wrap_same_submodule(self, use_reentrant, grad_to_none):\n LR = 0.01\n device = torch.device(\"cuda\")\n\n model = ToyModel().to(device)\n\n local_model = copy.deepcopy(model)\n local_optim = torch.optim.Adam(local_model.parameters(), lr=LR)\n\n combo_model = copy.deepcopy(model)\n combo_optim = torch.optim.Adam(combo_model.parameters(), lr=LR)\n\n # compose checkpoint and fully_shard\n combo_model.seq = checkpoint(\n combo_model.seq, use_reentrant=use_reentrant\n )\n combo_model.seq = fully_shard(\n combo_model.seq,\n policy=ModuleWrapPolicy({nn.Linear}),\n )\n\n x = torch.randn(2, 100, device=device)\n\n for _ in range(5):\n combo_loss = combo_model(x).sum()\n local_loss = local_model(x).sum()\n\n self.assertEqual(combo_loss, local_loss)\n\n combo_loss.backward()\n combo_optim.step()\n combo_optim.zero_grad(set_to_none=grad_to_none)\n\n local_loss.backward()\n local_optim.step()\n local_optim.zero_grad(set_to_none=grad_to_none)\n\n @skip_if_lt_x_gpu(2)\n def test_wrap_same_submodule(self):\n self.run_subtests(\n {\n \"use_reentrant\": [True, False],\n \"grad_to_none\": [True, False],\n },\n self._test_wrap_same_submodule,\n )\n\n\nif __name__ == \"__main__\":\n run_tests()\n","sub_path":"test/distributed/_composable/test_compose.py","file_name":"test_compose.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"439888017","text":"# This is the class you derive to create a plugin\nimport json\nimport uuid\nimport subprocess\nimport os\n\nfrom airflow.plugins_manager import AirflowPlugin\n\nfrom flask import Blueprint\nfrom flask_admin import BaseView, expose\nfrom flask_admin.base import MenuLink\n\n# Importing base classes that we need to derive\nfrom airflow.hooks.base_hook import BaseHook\nfrom airflow.models import BaseOperator\nfrom airflow.models.baseoperator import BaseOperatorLink\nfrom airflow.sensors.base_sensor_operator import BaseSensorOperator\nfrom airflow.executors.base_executor import BaseExecutor\nfrom airflow.utils.decorators import apply_defaults\nfrom flask_appbuilder import BaseView as AppBuilderBaseView\nfrom airflow.exceptions import AirflowException\nfrom datetime import datetime as dte, timedelta\n\n\nclass RDMS2RDMSOperator(BaseOperator):\n template_fields = ('src_query_sql', 'tar_table', 'tar_columns')\n ui_color = '#edd5f1'\n\n @apply_defaults\n def __init__(self,\n src_conn_id,\n src_query_sql,\n tar_conn_id,\n tar_table,\n tar_columns,\n tar_pre_sql,\n *args,\n **kwargs):\n super().__init__(*args, **kwargs)\n\n filter_start_date = dte.utcnow() - timedelta(days=5)\n filter_start_date = filter_start_date.strftime(\"%Y-%m-%d %H:%M:%S\")\n self.src_conn_id = src_conn_id\n self.src_query_sql = src_query_sql + \" WHERE create_date > '%s'\" % filter_start_date\n self.tar_conn_id = tar_conn_id\n self.tar_table = tar_table\n self.tar_columns = tar_columns\n self.tar_pre_sql = tar_pre_sql + \" WHERE create_date > '%s'\" % filter_start_date\n\n def execute(self, context):\n \"\"\"\n Execute\n \"\"\"\n self.log.info('src_query_sql: %s', self.src_query_sql)\n self.log.info('tar_pre_sql: %s', self.tar_pre_sql)\n\n task_id = context['task_instance'].dag_id + \"#\" + context['task_instance'].task_id\n\n self.hook = RDBMS2RDBMSHook(\n task_id=task_id,\n src_conn_id=self.src_conn_id,\n src_query_sql=self.src_query_sql,\n tar_conn_id=self.tar_conn_id,\n tar_table=self.tar_table,\n tar_columns=self.tar_columns,\n tar_pre_sql=self.tar_pre_sql,\n )\n self.hook.execute(context=context)\n\n def on_kill(self):\n self.log.info('Sending SIGTERM signal to bash process group')\n os.killpg(os.getpgid(self.hook.sp.pid), signal.SIGTERM)\n\n\nclass RDBMS2RDBMSHook(BaseHook):\n \"\"\"\n Datax执行器\n \"\"\"\n\n def __init__(self,\n task_id,\n src_conn_id,\n src_query_sql,\n tar_conn_id,\n tar_table,\n tar_columns,\n tar_pre_sql):\n self.task_id = task_id\n self.src_conn = self.get_connection(src_conn_id)\n self.src_query_sql = src_query_sql\n self.tar_conn = self.get_connection(tar_conn_id)\n self.tar_table = tar_table\n self.tar_columns = tar_columns\n self.tar_pre_sql = tar_pre_sql\n\n self.log.info(\"Source connection: {}:{}/{}\".format(self.src_conn.host, self.src_conn.port, self.src_conn.schema))\n self.log.info(\"Target connection: {}:{}/{}\".format(self.tar_conn.host, self.tar_conn.port, self.tar_conn.schema))\n\n def Popen(self, cmd, **kwargs):\n \"\"\"\n Remote Popen\n\n :param cmd: command to remotely execute\n :param kwargs: extra arguments to Popen (see subprocess.Popen)\n :return: handle to subprocess\n \"\"\"\n self.sp = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n **kwargs)\n\n for line in iter(self.sp.stdout):\n self.log.info(line.strip().decode('utf-8'))\n\n self.sp.wait()\n\n self.log.info(\"Command exited with return code %s\", self.sp.returncode)\n\n if self.sp.returncode:\n raise AirflowException(\"Execute command failed\")\n\n def generate_setting(self):\n \"\"\"\n datax速度等设置\n \"\"\"\n self.setting = {\n \"speed\": {\n \"byte\": 104857600\n },\n \"errorLimit\": {\n \"record\": 0,\n \"percentage\": 0.02\n }\n }\n return self.setting\n\n def generate_reader(self):\n \"\"\"\n datax reader\n \"\"\"\n conn = self.src_conn\n conn_type = 'mysql'\n reader_name = 'mysqlreader'\n if(conn.conn_type == 'postgres'):\n conn_type = 'postgresql'\n reader_name = 'postgresqlreader'\n\n self.src_jdbc_url = \"jdbc:\"+conn_type+\"://\"+conn.host.strip()+\":\" + str(conn.port) + \"/\" + conn.schema.strip()\n self.reader = {\n \"name\": reader_name,\n \"parameter\": {\n \"username\": conn.login.strip(),\n \"password\": conn.password.strip(),\n \"connection\": [\n {\n \"querySql\": [\n self.src_query_sql\n ],\n \"jdbcUrl\": [\n self.src_jdbc_url\n ]\n }\n ]\n }\n }\n\n return self.reader\n\n def generate_writer(self):\n conn = self.tar_conn\n conn_type = 'mysql'\n reader_name = 'mysqlreader'\n if(conn.conn_type == 'postgres'):\n conn_type = 'postgresql'\n reader_name = 'postgresqlreader'\n\n self.tar_jdbc_url = \"jdbc:\"+conn_type+\"://\"+conn.host.strip()+\":\" + str(conn.port) + \"/\" + conn.schema.strip()\n self.writer = {\n \"name\": \"postgresqlwriter\",\n \"parameter\": {\n \"username\": conn.login.strip(),\n \"password\": conn.password.strip(),\n \"column\": self.tar_columns,\n \"preSql\": [\n self.tar_pre_sql\n ],\n \"connection\": [{\n \"jdbcUrl\": self.tar_jdbc_url,\n \"table\": [self.tar_table]\n }]\n }\n }\n return self.writer\n\n def generate_config(self):\n content = [{\n \"reader\": self.generate_reader(),\n \"writer\": self.generate_writer()\n }]\n\n job = {\n \"setting\": self.generate_setting(),\n \"content\": content\n }\n\n config = {\n \"job\": job\n }\n\n self.target_json = json.dumps(config)\n\n # write json to file\n self.json_file = '/tmp/datax_json_'+self.task_id + uuid.uuid1().hex\n # 打开一个文件\n fo = open(self.json_file, \"w\")\n fo.write(self.target_json)\n fo.close()\n self.log.info(\"write config json {}\".format(self.json_file))\n return self.json_file\n\n def execute(self, context):\n self.generate_config()\n\n # 上传文件\n datax_home = '/opt/datax/bin'\n cmd = ['python', datax_home + '/datax.py', self.json_file]\n self.Popen(cmd)\n # 删除配置文件\n os.remove(self.json_file)\n\n\n# Will show up under airflow.sensors.test_plugin.PluginSensorOperator\nclass PluginSensorOperator(BaseSensorOperator):\n pass\n\n\n# Will show up under airflow.executors.test_plugin.PluginExecutor\nclass PluginExecutor(BaseExecutor):\n pass\n\n\n# Will show up under airflow.macros.test_plugin.plugin_macro\n# and in templates through {{ macros.test_plugin.plugin_macro }}\ndef plugin_macro():\n pass\n\n# Creating a flask admin BaseView\nclass TestView(BaseView):\n @expose('/')\n def test(self):\n # in this example, put your test_plugin/test.html template at airflow/plugins/templates/test_plugin/test.html\n return self.render(\"test_plugin/test.html\", content=\"Hello galaxy!\")\nv = TestView(category=\"Test Plugin\", name=\"Test View\")\n\n# Creating a flask blueprint to integrate the templates and static folder\nbp = Blueprint(\n \"test_plugin\", __name__,\n template_folder='templates', # registers airflow/plugins/templates as a Jinja template folder\n static_folder='static',\n static_url_path='/static/test_plugin')\n\nml = MenuLink(\n category='Test Plugin',\n name='Test Menu Link',\n url='https://airflow.apache.org/')\n\n# Creating a flask appbuilder BaseView\nclass TestAppBuilderBaseView(AppBuilderBaseView):\n @expose(\"/\")\n def test(self):\n return self.render(\"test_plugin/test.html\", content=\"Hello galaxy!\")\n\nv_appbuilder_view = TestAppBuilderBaseView()\nv_appbuilder_package = {\"name\": \"Test View\",\n \"category\": \"Test Plugin\",\n \"view\": v_appbuilder_view}\n\n# Creating a flask appbuilder Menu Item\nappbuilder_mitem = {\"name\": \"Google\",\n \"category\": \"Search\",\n \"category_icon\": \"fa-th\",\n \"href\": \"https://www.google.com\"}\n\n\n# A global operator extra link that redirect you to\n# task logs stored in S3\nclass S3LogLink(BaseOperatorLink):\n name = 'S3'\n\n def get_link(self, operator, dttm):\n return 'https://s3.amazonaws.com/airflow-logs/{dag_id}/{task_id}/{execution_date}'.format(\n dag_id=operator.dag_id,\n task_id=operator.task_id,\n execution_date=dttm,\n )\n\n\n# Defining the plugin class\nclass DataXPlugin(AirflowPlugin):\n name = \"datax2\"\n operators = [RDMS2RDMSOperator]\n sensors = [PluginSensorOperator]\n hooks = [RDBMS2RDBMSHook]\n executors = [PluginExecutor]\n macros = [plugin_macro]\n # admin_views = [v]\n # flask_blueprints = [bp]\n # menu_links = [ml]\n # appbuilder_views = [v_appbuilder_package]\n # appbuilder_menu_items = [appbuilder_mitem]\n #global_operator_extra_links = [S3LogLink()]\n\n","sub_path":"plugins/datax2.py","file_name":"datax2.py","file_ext":"py","file_size_in_byte":9927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"275838394","text":"from flask import Flask, jsonify,request\nimport numpy as np\nimport pandas as pd\nfrom flask_httpauth import HTTPBasicAuth\n\nimport json\nfrom pandas import json_normalize\n\napp = Flask(__name__)\nauth = HTTPBasicAuth()\n\n@auth.verify_password\ndef authenticate(username, password):\n if username and password:\n if username == 'admin' and password == 'admin':\n return True\n else:\n return False\n return False\n \n@app.route(\"/path\", methods=['POST'])\n@auth.login_required\ndef predict():\n request_data = request.get_json()\n\n sourcejobid = request.args.get('sourcejobid')\n targetjobid = request.args.get('targetjobid')\n flag = request.args.get('flag')\n if(flag==\"1\"):\n inpskills = request_data['Skills']\n df = pd.read_csv('jobroles.csv')\n df = df.astype('str')\n\n #Source and target grades\n dfseid= df.loc[df['JobID'] == sourcejobid]\n sourceGrade= list(set(dfseid[\"Grade\"]))[0]\n dftid= df.loc[df['JobID'] == targetjobid]\n targetGrade= list(set(dftid[\"Grade\"]))[0]\n\n dfseeid= df.loc[df['JobID'] == sourcejobid]\n ssector= list(set(dfseeid[\"Sector\"]))[0]\n dfsetid= df.loc[df['JobID'] == targetjobid]\n tsector= list(set(dfsetid[\"Sector\"]))[0]\n out=[]\n\n\n\n comp=[]\n sourcejobids= []\n sourcejobids.append(sourcejobid)\n\n dfseo= df.loc[df['JobID'] == sourcejobid]\n sjobtitle= str(list(set(dfseo[\"JobRole\"]))[0])\n resDicts=dict(JobId=sourcejobid, Position= sjobtitle, Sector= ssector, CommonSkills=\"0\", MatchSkillsScore= 0, MatchCompetencyScore= 00, MatchScore= 00)\n rs=[]\n rs.append(resDicts)\n out.append(dict(grade= sourceGrade,results= rs ))\n if(int(sourceGrade)=10):\n if(jid in list(set(dft[\"JobID\"]))):\n jobid=jid\n dfje= dfse.loc[dfse['Grade'] == str(k)]\n dfn= dfje.loc[dfje['JobID'] == str(jobid)]\n jobtitle= list(set(dfn[\"JobRole\"]))\n sector= list(set(dfn[\"Sector\"]))\n\n resDict=dict(JobId=jobid,Position= jobtitle[0], Sector= sector[0],CommonSkills=intersection_skills, MatchSkillsScore= match, MatchCompetencyScore= 0, MatchScore= common)\n result.append(resDict)\n print(result)\n \n elif(not result): \n if(jid in list(set(dft[\"JobID\"]))):\n jobid=jid\n dfje= dfse.loc[dfse['Grade'] == str(k)]\n dfn= dfje.loc[dfje['JobID'] == str(jobid)]\n jobtitle= list(set(dfn[\"JobRole\"]))\n sector= list(set(dfn[\"Sector\"]))\n resDict=dict(JobId=jid,Position= jobtitle[0], Sector= sector[0],CommonSkills=intersection_skills, MatchSkillsScore= match, MatchCompetencyScore= 0, MatchScore= common)\n result.append(resDict)\n print(result)\n else:\n if(jid in list(set(dft[\"JobID\"]))):\n jobid=jid\n dfje= dfse.loc[dfse['Grade'] == str(k)]\n dfn= dfje.loc[dfje['JobID'] == str(jobid)]\n jobtitle= list(set(dfn[\"JobRole\"]))\n sector= list(set(dfn[\"Sector\"]))\n resDict=dict(JobId=jid,Position= jobtitle[0], Sector= sector[0],CommonSkills=intersection_skills, MatchSkillsScore= match, MatchCompetencyScore= 0, MatchScore= common)\n result.append(resDict)\n print(result)\n out.append(dict(grade= k,results= sorted(result, key=lambda k: k[\"MatchScore\"],reverse=True)[0:3] ))\n \n sourcejobids= []\n for l in range(0,len(sorted(result, key=lambda k: k[\"MatchScore\"],reverse=True)[0:3])):\n print(out[-1][\"results\"][l]['JobId'])\n sourcejobids.append(out[-1][\"results\"][l]['JobId'])\n print(list(set(sourcejobids)))\n \n result1=[]\n pdfid= df.loc[df['JobID'] == str(sourcejobids[0])]\n pinpskills= pdfid[\"Skill\"]\n ndfid= df.loc[df['JobID'] == targetjobid]\n ninpskills= ndfid[\"Skill\"]\n\n skillslist1_set = set(pinpskills)\n intersection = skillslist1_set.intersection(ninpskills)\n\n intersection_skillst = list(intersection)\n\n commont = len(intersection_skillst)\n print(commont)\n matcht= commont / len(ninpskills)\n dft= df.loc[df['JobID'] == targetjobid]\n jobtitle= str(list(set(dft[\"JobRole\"]))[0])\n resDict1=dict(JobId=targetjobid, Position= jobtitle, CommonSkills=intersection_skillst, MatchSkillsScore= matcht, MatchCompetencyScore= 0, MatchScore= commont)\n result1.append(resDict1)\n\n dfe=df.loc[df['JobID'] == str(targetjobid)]\n out.append(dict(grade=list(set(dfe[\"Grade\"]))[0],results= result1))\n return jsonify(output=out)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"423389061","text":"# Otra forma de leer archivos CSV con cabeceras, es utilizar el \n# objeto DictReader en vez de reader, y así acceder solo al \n# valor de las columnas deseadas, por su nombre:\n\nfrom csv import DictReader\n\nwith open('datos_meteorologicos.csv', 'r') as archivo:\n documento = DictReader(archivo, delimiter=';', quotechar='\"')\n for fila in documento: \n print(fila['ID'])\n","sub_path":"e_bahit_data_science/csv_files_DictReader.py","file_name":"csv_files_DictReader.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"446971116","text":"import os\nfrom pathlib import Path\nfrom typing import List\n\nfrom strictdoc.core.document_tree import DocumentTree\nfrom strictdoc.core.file_tree import Folder, FileFinder, File\nfrom strictdoc.core.source_tree import SourceTree\n\n\nclass SourceFile:\n def __init__(\n self,\n level,\n full_path,\n doctree_root_mount_path,\n in_doctree_source_file_rel_path,\n output_dir_full_path,\n output_file_full_path,\n ):\n assert isinstance(level, int)\n assert os.path.exists(full_path)\n\n self.level = level\n self.full_path = full_path\n self.doctree_root_mount_path = doctree_root_mount_path\n self.in_doctree_source_file_rel_path = in_doctree_source_file_rel_path\n self.output_dir_full_path = output_dir_full_path\n self.output_file_full_path = output_file_full_path\n self.path_depth_prefix = (\"../\" * (level + 2))[:-1]\n\n _, file_extension = os.path.splitext(in_doctree_source_file_rel_path)\n self.extension = file_extension\n\n self.traceability_info = None\n\n def __str__(self):\n return (\n \"SourceFile(\"\n \"level: {}, \"\n \"full_path: {}, \"\n \"doctree_root_mount_path: {}, \"\n \"in_doctree_source_file_rel_path: {}, \"\n \"output_path_dir_full_path: {}, \"\n \"output_path_file_full_path: {}\"\n \")\".format(\n self.level,\n self.full_path,\n self.doctree_root_mount_path,\n self.in_doctree_source_file_rel_path,\n self.output_dir_full_path,\n self.output_file_full_path,\n )\n )\n\n def is_python_file(self):\n return self.extension == \".py\"\n\n def is_c_file(self):\n return self.extension == \".c\"\n\n def is_cpp_file(self):\n return self.extension == \".cpp\"\n\n\nclass SourceFilesFinder:\n @staticmethod\n def find_source_files(\n output_html_root, document_tree: DocumentTree\n ) -> SourceTree:\n map_file_to_source = {}\n found_source_files: List[SourceFile] = []\n root_folder_or_file: Folder = document_tree.file_tree[\n 0\n ].root_folder_or_file\n assert os.path.abspath(root_folder_or_file.root_path)\n\n # TODO: Unify this on the FileTree class level.\n # Introduce #mount_directory method?\n doctree_root_abs_path = root_folder_or_file.root_path\n doctree_root_abs_path = (\n os.path.dirname(doctree_root_abs_path)\n if os.path.isfile(doctree_root_abs_path)\n else doctree_root_abs_path\n )\n doctree_root_mount_path = os.path.basename(doctree_root_abs_path)\n\n file_tree = FileFinder.find_files_with_extensions(\n doctree_root_abs_path, {\".py\", \".c\", \".cpp\"}\n )\n\n root_level = doctree_root_abs_path.count(os.sep)\n\n file: File\n for _, file, _ in file_tree.iterate():\n in_doctree_source_file_rel_path = os.path.relpath(\n file.root_path, doctree_root_abs_path\n )\n last_folder_in_path = os.path.relpath(\n file.get_folder_path(), doctree_root_abs_path\n )\n output_dir_full_path = os.path.join(\n output_html_root,\n \"_source_files\",\n doctree_root_mount_path,\n last_folder_in_path,\n )\n Path(output_dir_full_path).mkdir(parents=True, exist_ok=True)\n\n output_file_name = f\"{file.get_file_name()}.html\"\n output_file_full_path = os.path.join(\n output_dir_full_path, output_file_name\n )\n\n level = file.get_folder_path().count(os.sep) - root_level\n\n source_file = SourceFile(\n level,\n file.root_path,\n doctree_root_mount_path,\n in_doctree_source_file_rel_path,\n output_dir_full_path,\n output_file_full_path,\n )\n found_source_files.append(source_file)\n map_file_to_source[file] = source_file\n\n source_tree = SourceTree(\n file_tree, found_source_files, map_file_to_source\n )\n return source_tree\n","sub_path":"strictdoc/core/finders/source_files_finder.py","file_name":"source_files_finder.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"269084260","text":"from conans import ConanFile, tools, CMake\nfrom conans.errors import ConanInvalidConfiguration\nfrom conans.tools import Version\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass LibvaultConan(ConanFile):\n name = \"libvault\"\n license = \"MIT\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/abedra/libvault\"\n description = \"A C++ library for Hashicorp Vault\"\n topics = (\"vault\", \"libvault\", \"secrets\", \"passwords\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n exports_sources = [\"CMakeLists.txt\", \"patches/**\"]\n generators = \"cmake\", \"cmake_find_package\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n @property\n def _mac_os_minimum_required_version(self):\n return \"10.15\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def requirements(self):\n self.requires(\"libcurl/7.80.0\")\n self.requires(\"catch2/2.13.7\")\n\n def validate(self):\n compiler = str(self.settings.compiler)\n compiler_version = Version(self.settings.compiler.version.value)\n\n minimum_compiler_version = {\n \"Visual Studio\": \"19\",\n \"gcc\": \"8\",\n \"clang\": \"7.0\",\n \"apple-clang\": \"12\"\n }\n\n minimum_cpp_standard = 17\n\n if compiler in minimum_compiler_version and \\\n compiler_version < minimum_compiler_version[compiler]:\n raise ConanInvalidConfiguration(\"{} requires a compiler that supports\"\n \" at least C++{}. {} {} is not\"\n \" supported.\"\n .format(self.name, minimum_cpp_standard, compiler, compiler_version))\n\n if compiler == \"clang\" and self.settings.compiler.libcxx in [\"libstdc++\", \"libstdc++11\"] and self.settings.compiler.version == \"11\":\n raise ConanInvalidConfiguration(\"clang 11 with libstdc++ is not supported due to old libstdc++ missing C++17 support\")\n\n if tools.is_apple_os(self.settings.os):\n os_version = self.settings.get_safe(\"os.version\")\n if os_version and Version(os_version) < self._mac_os_minimum_required_version:\n raise ConanInvalidConfiguration(\n \"Macos Mojave (10.14) and earlier cannot to be built because C++ standard library too old.\")\n\n if self.settings.compiler.get_safe(\"cppstd\"):\n tools.check_min_cppstd(self, minimum_cpp_standard)\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version], strip_root=True, destination=self._source_subfolder)\n\n def _configure_cmake(self):\n if not self._cmake:\n self._cmake = CMake(self)\n self._cmake.definitions[\"ENABLE_TEST\"] = False\n self._cmake.definitions[\"ENABLE_INTEGRATION_TEST\"] = False\n self._cmake.definitions[\"ENABLE_COVERAGE\"] = False\n self._cmake.definitions[\"LINK_CURL\"] = False\n # Set `-mmacosx-version-min` to enable C++17 standard library support.\n self._cmake.definitions['CMAKE_OSX_DEPLOYMENT_TARGET'] = self._mac_os_minimum_required_version\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def build(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n self.cpp_info.names[\"cmake_find_package\"] = \"libvault\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"libvault\"\n self.cpp_info.names[\"pkg_config\"] = \"vault\"\n","sub_path":"recipes/libvault/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":4421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"63673362","text":"\"\"\"\n------------------------------------------------------------------------\nLab 3, Task 1\n------------------------------------------------------------------------\nAuthor: Nicolas Mills\nID: 180856100\nEmail: mill6100@mylaurier.ca\n__updated__ = 2018-09-18\n------------------------------------------------------------------------\n\"\"\"\nMEMBERSHIP_DISCOUNT = 5 / 100 # Basic membership discount\n\nmembership_cost = float(input(\"Gym membership cost: $\"))\nnum_friends = int(input(\"Number of friends signed up: \"))\n\nif num_friends == 1:\n total_discount = MEMBERSHIP_DISCOUNT\nelif num_friends == 2:\n total_discount = 2 * MEMBERSHIP_DISCOUNT\nelif num_friends >= 3:\n total_discount = 3 * MEMBERSHIP_DISCOUNT\nelse:\n total_discount = 0\n \ntotal_cost = membership_cost - (membership_cost * total_discount)\n\nprint(\"Your membership cost is ${:,.2f}\".format(total_cost))","sub_path":"workspace/CP104/mill6100_l3/src/t01 - Copy.py","file_name":"t01 - Copy.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"306399169","text":"from livestreamer.compat import str, bytes\nfrom livestreamer.plugins import Plugin, PluginError, NoStreamsError\nfrom livestreamer.stream import RTMPStream\nfrom livestreamer.utils import urlget\n\nimport xml.dom.minidom, re\n\nclass UStreamTV(Plugin):\n AMFURL = \"http://cgw.ustream.tv/Viewer/getStream/1/{0}.amf\"\n SWFURL = \"http://cdn1.ustream.tv/swf/4/viewer.rsl.210.swf\"\n\n @classmethod\n def can_handle_url(self, url):\n return \"ustream.tv\" in url\n\n def _get_channel_id(self, url):\n data = urlget(url)\n\n match = re.search(b\"channelId=(\\d+)\", data)\n if match:\n return int(match.group(1))\n\n def _get_streams(self):\n def get_amf_value(data, key):\n pattern = (\"{0}\\x02..(.*?)\\x00\").format(key)\n match = re.search(bytes(pattern, \"ascii\"), data)\n if match:\n return str(match.group(1), \"ascii\")\n\n streams = {}\n channelid = self._get_channel_id(self.url)\n\n if not channelid:\n raise NoStreamsError(self.url)\n\n self.logger.debug(\"Fetching stream info\")\n data = urlget(self.AMFURL.format(channelid))\n\n playpath = get_amf_value(data, \"streamName\")\n cdnurl = get_amf_value(data, \"cdnUrl\")\n fmsurl = get_amf_value(data, \"fmsUrl\")\n\n if playpath:\n stream = RTMPStream(self.session, {\n \"rtmp\": (\"{0}/{1}\").format(cdnurl or fmsurl, playpath),\n \"pageUrl\": self.url,\n \"swfUrl\": self.SWFURL,\n \"live\": True\n })\n streams[\"live\"] = stream\n\n return streams\n\n__plugin__ = UStreamTV\n","sub_path":"src/livestreamer/plugins/ustreamtv.py","file_name":"ustreamtv.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"592036311","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 7 18:05:51 2018\n\n@author: Think\n\"\"\"\n\nclass Solution:\n def numTrees(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n catalan numbers 1, 1, 2, 5, 14, 42\n h[n] = h[1] * h[n-1] + h[2] *h[n-2] +...+h[n-1] * h[1]\n \"\"\"\n if n == 0:\n return 0\n ans = [0] * (n+1)\n ans[0] = 1\n for i in range(1,n+1):\n for j in range(i):\n ans[i] += ans[j] * ans[i-j-1]\n return ans[n]\n \nif __name__ == '__main__':\n n = 5\n result = Solution().numTrees(n)\n print(result)","sub_path":"96.Unique Binary Search Trees.py","file_name":"96.Unique Binary Search Trees.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"151167847","text":"import unittest\nfrom model import Model\nimport view\n\n\nclass TestModel(unittest.TestCase):\n def test_del(self):\n model = Model()\n for i in model.get_all():\n view.render_item(i[0], i[1], i[2], i[3])\n\n model.del_film('Inception')\n print(\"DELETED\\n\\n\\n\")\n for i in model.get_all():\n view.render_item(i[0], i[1], i[2], i[3])\n\n def test_find_by_name(self):\n print('test find by name Inception')\n model = Model()\n print(model.find_by_film('Inception'))\n\n def test_find_by_producer(self):\n print('test find by producer Cristofer Nolan')\n model = Model()\n print(model.find_by_producer('Cristofer Nolan'))\n\n def test_find_by_name_mask(self):\n print('Test by mask')\n model = Model()\n print(model.list_film_by_name_mask('*g*'))\n","sub_path":"test/ModelTest.py","file_name":"ModelTest.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"224909170","text":"import time\n\nfrom selenium import webdriver\n\ndr = webdriver.Chrome()\ndr.get(\"http://www.baidu.com\")\n\nsearch_text = dr.find_element_by_id(\"kw\")\nsearch_text.send_keys(\"selnium1\")\nsearch_text.submit()\n\ntime.sleep(2)\n\ndr.quit()\n","sub_path":"src/test/test_4.3.2.py","file_name":"test_4.3.2.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"617444806","text":"import discord\nfrom discord.ext import commands\nimport asyncio\nfrom colorthief import ColorThief\nfrom urllib.parse import urlparse\nimport io\nimport os\nimport json\n\nfrom pymongo import MongoClient\nclient = MongoClient(\"mongsfd23@cluster0bbb-shard-00-00-ffddp.mongodb.net:27017,cluster0bbb-shard-00-01-ffddp.mongodb.net:27017,cluster0bbb-shard-00-02-ffddp.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0bbb-shard-0&authSource=admin\")\n\ndb = client.test\ngames = ['clash_royale', 'clash_of_clans', 'overwatch']\nuser_tags = db.usertags\n\nclass CustomContext(commands.Context):\n '''Custom Context class to provide utility.'''\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @property\n def session(self):\n '''Returns the bot's aiohttp client session'''\n return self.bot.session\n\n def delete(self):\n '''shortcut'''\n return self.message.delete()\n\n async def purge(self, *args, **kwargs):\n '''Shortcut to channel.purge'''\n await self.channel.purge(*args, **kwargs)\n\n @staticmethod\n def valid_image_url(url):\n '''Checks if a url leads to an image.'''\n types = ['.png', '.jpg', '.gif', '.webp']\n parsed = urlparse(url)\n if any(parsed.path.endswith(i) for i in types):\n return url.replace(parsed.query, 'size=128')\n return False\n\n async def get_dominant_color(self, url=None, quality=10):\n '''\n Returns the dominant color of an image from a url\n '''\n av = self.author.avatar_url\n url = self.valid_image_url(url or av)\n\n if not url:\n raise ValueError('Invalid image url passed.')\n try:\n async with self.session.get(url) as resp:\n image = await resp.read()\n except:\n return discord.Color.default()\n\n with io.BytesIO(image) as f:\n try:\n color = ColorThief(f).get_color(quality=quality)\n except:\n return discord.Color.dark_grey()\n \n return discord.Color.from_rgb(*color)\n\n def load_json(self, path=None):\n with open(path or 'data/stats.json') as f:\n return json.load(f)\n\n def save_json(self, data, path=None):\n with open(path or 'data/stats.json', 'w') as f:\n f.write(json.dumps(data, indent=4))\n\n \n def save_db (self, key, value):\n user_tags = db.usertags.update_one({\"user tags\" : \"here\"}, {'$set': {key : value}}, upsert=True)\n \n \n def save_tag(self, tag, game, id=None):\n id = id or self.author.id\n game = game.lower()\n mylist = []\n mylist.append(str(tag))\n #if not db.usertags.distinct(str(id)):\n #user_tags = db.usertags.update_one({\"user tags\" : \"here\"}, {'$set': {str(game) + '.' + str(id) : mylist}}, upsert=True)\n cursor = db.usertags.find({str(game) + '.' + str(id): {\"$exists\": True}}).limit(1)\n if cursor.count() > 0:\n ting = db.usertags.distinct(str(game) + '.' + str(id))\n ting.append(str(tag))\n thong = ting\n user_tags = db.usertags.update_one({\"user tags\": \"here\"}, {'$set': {str(game) + '.' + str(id): thong}},\n upsert=True)\n\n else:\n user_tags = db.usertags.update_one({\"user tags\": \"here\"}, {'$set': {str(game) + '.' + str(id): mylist}}, upsert=True)\n\n\n\n \n\n\n \n \n \n\n def add_tag(self, tag, game, id=None):\n id = id or self.author.id\n \n if db.usertags.find({ str(game) + '.' + str(id): { '$exists': True, '$ne': None } }) is None:\n user_tags = db.usertags.update_one({\"user tags\" : \"here\"}, {'$push': {str(game) + '.' + str(id) : str(tag)}}, upsert=True)\n\n else: \n pass\n\n def remove_tag(self, tag, game, id=None):\n id = id or self.author.id\n game = game.lower()\n\n the_tag = db.usertags.distinct(str(game) + '.' + str(id))\n the_tag.remove(str(tag))\n user_tags = db.usertags.update({\"user tags\" : \"here\"}, { '$set' : {str(game) + '.' + str(id) : the_tag} });\n\n\n def get_tag(self, game, id=None, *, index=0):\n id = id or self.author.id\n\n tag = db.usertags.distinct(str(game) + '.' + str(id))\n \n return tag[index]\n\n\n @staticmethod\n def paginate(text: str):\n '''Simple generator that paginates text.'''\n last = 0\n pages = []\n for curr in range(0, len(text)):\n if curr % 1980 == 0:\n pages.append(text[last:curr])\n last = curr\n appd_index = curr\n if appd_index != len(text)-1:\n pages.append(text[last:curr])\n return list(filter(lambda a: a != '', pages))\n","sub_path":"ext/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":4767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"153311160","text":"import math\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import integrate as intg\r\n\r\nN=101 # number of integrate points\r\n\r\ndef fun(theta,z):\r\n q=1 # for simplification, set charge density = 1\r\n R=1 # for simplification\r\n if (z==R) and (theta==0):\r\n return 0\r\n # avoid the effect of singularity\r\n # otherwise, the code will have problem!\r\n y1=R**2*q*(z-R*np.cos(theta))*np.sin(theta)\r\n y2=R**2+z**2-2*R*z*np.cos(theta)\r\n y2=y2*np.sqrt(y2)\r\n y=y1/y2\r\n return y\r\n# ignore the factor 1/(4*np.pi*epsilon_0)\r\n\r\ndef myint_linear(z,n): # z as a number\r\n theta=np.linspace(0,np.pi,n)\r\n intg=0.5*(fun(theta[0],z)+fun(theta[-1],z))\r\n for i in range(1,n-1):\r\n intg=intg+fun(theta[i],z)\r\n return intg*(theta[1]-theta[0])\r\n\r\ndef myint_simpson(z,n):\r\n theta=np.linspace(0,np.pi,n)\r\n intg=fun(theta[0],z)+fun(theta[-1],z)\r\n for i in range(1,n-1,2):\r\n intg=intg+4*fun(theta[i],z)\r\n for i in range(2,n-2,2):\r\n intg=intg+2*fun(theta[i],z)\r\n return intg*(theta[1]-theta[0])/3\r\n\r\n\r\nz0=np.linspace(0,3,301)\r\n\r\nintg_quad=[]\r\nfor i in range(len(z0)):\r\n intg_quad.append(intg.quad(lambda x : fun(x,z0[i]), 0, np.pi)[0])\r\n\r\nmy_linear=[]\r\nfor i in range(len(z0)):\r\n my_linear.append(myint_linear(z0[i],N))\r\n\r\nmy_simpson=[]\r\nfor i in range(len(z0)):\r\n my_simpson.append(myint_linear(z0[i],N))\r\n\r\n\r\nplt.plot(z0,intg_quad,'-',z0,my_linear,'--',z0,my_simpson,':')\r\nplt.legend(['integrate.quad','linear','simpson'],loc='best')\r\nplt.show()\r\n\r\n# There is not obvious different between Linear and Simpson integrate,\r\n# but the number of point make difference.\r\n\r\n# there is a singularity in the integrand.\r\n# In my integrator, this singularity is important; but quad do not care it.\r\n","sub_path":"HW1/JIAO Hao 1-4.py","file_name":"JIAO Hao 1-4.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"241321638","text":"from django.core.management.base import BaseCommand\n\nfrom core.tasks import fail_intentionally_task\nfrom logs.tasks import sync_interest_task, recompute_interest_by_batch_task, \\\n import_new_sushi_attempts_task, smart_interest_sync_task\nfrom sushi.tasks import fetch_new_sushi_data_task, retry_queued_attempts_task, \\\n fetch_new_sushi_data_for_credentials_task\nfrom publications.tasks import erms_sync_platforms_task\n\n\nclass Command(BaseCommand):\n\n help = 'Start the specified celery task'\n\n tasks = {\n 'fetch_new_sushi_data_task': fetch_new_sushi_data_task,\n 'fetch_new_sushi_data_for_credentials_task': fetch_new_sushi_data_for_credentials_task,\n 'sync_interest_task': sync_interest_task,\n 'recompute_interest_by_batch_task': recompute_interest_by_batch_task,\n 'retry_queued_attempts_task': retry_queued_attempts_task,\n 'import_new_sushi_attempts_task': import_new_sushi_attempts_task,\n 'erms_sync_platforms_task': erms_sync_platforms_task,\n 'smart_interest_sync_task': smart_interest_sync_task,\n 'fail_intentionally_task': fail_intentionally_task,\n }\n\n def add_arguments(self, parser):\n parser.add_argument('task')\n parser.add_argument('params', nargs='*')\n\n def handle(self, *args, **options):\n task_name = options['task']\n args = options['params'] or ()\n task = self.tasks.get(task_name)\n if not task:\n self.stderr.write(self.style.ERROR(f'Cannot find task: {task_name}'))\n self.stderr.write(self.style.WARNING('Available tasks:'))\n for key in self.tasks.keys():\n self.stderr.write(self.style.WARNING(f' - {key}'))\n else:\n self.stderr.write(self.style.SUCCESS(f'Starting task: {task_name}'))\n handle = task.delay(*args)\n self.stderr.write(self.style.SUCCESS(f'Task handle: {handle}'))\n\n\n","sub_path":"apps/core/management/commands/start_celery_task.py","file_name":"start_celery_task.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"543072890","text":"import pandas as pd\r\nimport numpy as np\r\nfrom math import pi\r\n\r\nimport squarify\r\nimport plotly.plotly as py\r\nimport plotly.graph_objs as go\r\n\r\n\r\nfrom bokeh.layouts import column,row\r\nfrom bokeh.models import Button,Select,RangeSlider,DataTable,TableColumn,RadioGroup,CheckboxGroup,Div,Markup\r\nfrom bokeh.plotting import figure, curdoc\r\nfrom bokeh.io import output_file, show ,output_notebook\r\nfrom bokeh.transform import cumsum,factor_cmap\r\nfrom bokeh.models import ColumnDataSource,FactorRange\r\nfrom bokeh.models.widgets import Paragraph\r\n\r\n# Create pandas Data Frame from CSV file\r\ndf = pd.read_csv('student-por.csv')\r\n\r\n##########################################################################################################\r\n\r\n# Create Pie Chart to display the Gender Frequency of the Students\r\npie_data = df.groupby('sex').size().reset_index(name='count')\r\nangle = pie_data['count']/pie_data['count'].sum() * 2*pi\r\ncolor = ['cyan','lightgreen']\r\nsex = ['M','F']\r\npie_chart = figure(title=\"Gender(Male Vs Female)\", toolbar_location='right',\r\n x_range=(-0.5, 1.0))\r\n \r\ncolumnData = ColumnDataSource(data= dict(angle=angle,color = color,sex=sex,data = pie_data))\r\n\r\ncheckbox_group = CheckboxGroup(\r\n labels=[\"GP\", \"MS\"], active=[0, 1])\r\n\r\ndef updatepie(attr,old,new): \r\n if len(new) == 1:\r\n print(new[0])\r\n if new[0] == 0:\r\n tempData = df[(df['school']== 'GP')]\r\n pie_data = tempData.groupby('sex').size().reset_index(name='count')\r\n angle = pie_data['count']/pie_data['count'].sum() * 2*pi\r\n columnData.data['angle'] = angle\r\n columnData.data['data'] = pie_data\r\n print(new[0])\r\n elif new[0] == 1:\r\n tempData = df[(df['school']== 'MS')]\r\n pie_data = tempData.groupby('sex').size().reset_index(name='count')\r\n angle = pie_data['count']/pie_data['count'].sum() * 2*pi\r\n columnData.data['angle'] = angle\r\n columnData.data['data'] = pie_data\r\n print(new[0])\r\n \r\n \r\n\r\ncheckbox_group.on_change('active', updatepie)\r\n\r\npie_chart.wedge(x=0, y=1, radius=0.4, start_angle=cumsum('angle', include_zero=True), \r\n end_angle=cumsum('angle'),\r\n line_color=\"white\", fill_color='color', legend='sex', source=columnData)\r\npie_chart.axis.axis_label=None\r\npie_chart.axis.visible=False\r\npie_chart.grid.grid_line_color = None\r\ncol2 = column(checkbox_group,pie_chart)\r\n##########################################################################################################\r\n\r\n# Create Stacked Bar Chart\r\ndataG3 = df.groupby(['G3','sex']).size().reset_index(name='counts')\r\ndfG3 = df.G3.unique().tolist()\r\ndfG3.sort()\r\n\r\n\r\nG3 = []\r\n\r\nfor val in dfG3:\r\n G3.append(str (val))\r\n \r\nmale = []\r\nfemale = []\r\n\r\nfor val in dfG3:\r\n females = dataG3[(dataG3['G3']== val) & (dataG3['sex']== 'F')]\r\n males = dataG3[(dataG3['G3']== val) & (dataG3['sex']== 'M')]\r\n if males.empty:\r\n male.append(0)\r\n else:\r\n male.append(males['counts'].iloc[0])\r\n if females.empty:\r\n female.append(0)\r\n else:\r\n female.append(females['counts'].iloc[0])\r\nsex = ['M', 'F']\r\ndataSource = {'Grade': G3,\r\n 'M': male,\r\n 'F': female \r\n }\r\npalette = [\"#c9d9d3\", \"#718dbf\"]\r\n\r\nx = [ (G, s) for G in G3 for s in sex ]\r\n\r\ncounts = sum(zip(dataSource['M'], dataSource['F']), ())\r\n\r\nsource = ColumnDataSource(data=dict(x=x, counts=counts))\r\n\r\nstacked_bar = figure(x_range=FactorRange(*x), plot_width=800, title=\"Grade Counts by Sex\",\r\n toolbar_location='right')\r\n\r\nstacked_bar.vbar(x='x', top='counts', width=1, source=source, line_color=\"white\",\r\n fill_color=factor_cmap('x', palette=palette, factors=sex, start=1, end=2))\r\n\r\nstacked_bar.y_range.start = 0\r\nstacked_bar.x_range.range_padding = 0.1\r\nstacked_bar.xaxis.major_label_orientation = 1\r\nstacked_bar.xgrid.grid_line_color = None\r\n##########################################################################################################\r\n\r\n#Create the bar chart for Alcohol Frquency range\r\ndalco = df.groupby('Dalc').size().reset_index(name='counts')\r\n\r\nx = dalco['Dalc'].apply(str).tolist()\r\ny = dalco['counts'].tolist()\r\n\r\nbar = figure(x_range=x, title=\"Alcohol consumption per day\",\r\n toolbar_location='right')\r\nbar.xaxis.axis_label = 'Quantity of Alcohol'\r\nbar.yaxis.axis_label = 'Count'\r\n\r\nbar.vbar(x=x, top=y, width=0.9)\r\n\r\nbar.xgrid.grid_line_color = None\r\nbar.y_range.start = 0\r\n##########################################################################################################\r\n\r\nN = df.shape[0]\r\nx = df['traveltime']\r\ny = df['G3']\r\nradii = np.random.random(size=N)\r\n\r\nsource = ColumnDataSource(data=dict(x=x, y=y,data=df))\r\n\r\ncolors = [\r\n \"#%02x%02x%02x\" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)\r\n]\r\n\r\nTOOLS=\"hover,crosshair,pan,wheel_zoom,zoom_in,zoom_out,box_zoom\"\r\n\r\np = figure(tools=TOOLS, title=\"Grade Variation based on the Studytime , Travel time , Absences\")\r\n\r\np.scatter('x', 'y',source =source,line_width=3, line_alpha=0.6) \r\n\r\np.xaxis.axis_label = 'Grade'\r\np.yaxis.axis_label = 'traveltime'\r\n \r\nselect = Select(title=\"Select Field:\", \r\n value=\"traveltime\", \r\n options=[\"studytime\", \"traveltime\",\"absences\"])\r\n\r\ndef select_callback(attr,old,new):\r\n source.data['y'] = df[new]\r\n p.yaxis.axis_label = new\r\n\r\nselect.on_change('value',select_callback)\r\n##########################################################################################################\r\n\r\nsource1 = ColumnDataSource(data=dict())\r\n\r\nslider = RangeSlider(title=\"Grade\", start=0, end=20, value=(0, 20), step=1, format=\"0,0\")\r\nslider.on_change('value', lambda attr, old, new: update())\r\n\r\ndef update():\r\n current = df[(df['G3'] >= slider.value[0]) & (df['G3'] <= slider.value[1])].dropna()\r\n source1.data = {\r\n 'school' : current.school,\r\n 'sex' : current.sex,\r\n 'age' : current.age,\r\n 'G3' : current.G3\r\n }\r\n\r\ncolumns = [\r\n TableColumn(field=\"school\", title=\"School Name\"),\r\n TableColumn(field=\"sex\", title=\"Gender\"),\r\n TableColumn(field=\"age\", title=\"Age (years)\"),\r\n TableColumn(field=\"G3\", title=\"Grade\")\r\n]\r\n\r\nupdate()\r\ndata_table = DataTable(source=source1, columns=columns,width=1000)\r\n##########################################################################################################\r\n#Create Chart for displaying the Frquency of Reason\r\n\r\nradio_group = RadioGroup(\r\n labels=[\"GP\", \"MS\"], active=0)\r\n\r\n\r\ntempData = df[(df['school']== 'GP')]\r\ndfReason = tempData.groupby('reason').size().reset_index(name='counts') \r\nx1 = dfReason['reason']\r\ny1 = dfReason['counts']\r\n\r\ndataSource = ColumnDataSource(data=dict(x1=x1,y1=y1,data=dfReason))\r\n\r\ndef make_plot(attr,old,new):\r\n if new == 0:\r\n tempData = df[(df['school']== 'GP')]\r\n dfReason = tempData.groupby('reason').size().reset_index(name='counts')\r\n dataSource.data['x1'] = dfReason['reason']\r\n dataSource.data['y1'] = dfReason['counts']\r\n dataSource.data['data'] = dfReason\r\n elif new == 1:\r\n tempData = df[(df['school']== 'MS')] \r\n dfReason = tempData.groupby('reason').size().reset_index(name='counts') \r\n dataSource.data['x1'] = dfReason['reason']\r\n dataSource.data['y1'] = dfReason['counts']\r\n dataSource.data['data'] = dfReason\r\n\r\nReason_bar = figure(x_range= x1, title=\"Reason Count\",\r\n toolbar_location='right')\r\nReason_bar.vbar(x='x1',top='y1', source = dataSource, width=0.9)\r\nReason_bar.xgrid.grid_line_color = None\r\nReason_bar.y_range.start = 0\r\nReason_bar.xaxis.axis_label = \"Reason\"\r\nReason_bar.yaxis.axis_label = \"Count\"\r\nradio_group.on_change('active', make_plot) \r\n\r\ncol1 = column(radio_group,Reason_bar)\r\n##########################################################################################################\r\ndiv = Div(text=\"\"\"Student Performance Analysis\"\"\",\r\nwidth=1000, height=40)\r\n\r\nmarkup = Markup( text=\"\"\"

Student Grade depends on Study hours, Travel Time

\"\"\")\r\n\r\npara = Paragraph(text=\"\"\"Why Bokeh Library:\r\nWe have choosen Python Programming language because there are more interaction techniques are available with different libraries. From this We used Bokeh,Matplotlib and Seaborn which gives us the clear view for our idea. We started with R programming(R shiny) and some of the python libraries like Plotly,Dash etc . \r\nHere, we found a problem while executing the process. So we have gone through with Bokeh package with the Dashboard.\r\n\r\nINTERACTIVE FUNCTIONS:\r\nsliders\r\nRadiobutton\r\nzoom in and out\r\ndropdown list\r\nhovering\r\ncrosshair\"\"\",\r\nwidth=800, height=100)\r\n\r\nrow1 = row(div)\r\nrow2 = row(slider,data_table)\r\nrow3 = row(col2,stacked_bar)\r\nrow4 = row(bar,column(select,p))\r\nrow5 = row(para)\r\n##########################################################################################################\r\n\r\n# put the button and plot in a layout and add to the document\r\n\r\ncurdoc().add_root(column(row1,row2,row3,row4,row5))\r\ncurdoc().title = \"Student Performance Analysis\"","sub_path":"my-app.py","file_name":"my-app.py","file_ext":"py","file_size_in_byte":9156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"4090439","text":"n = int(input('Enter a positive integer\\n> '))\r\n\r\nfactorial = 1\r\n\r\n#the trick is to start from n, then multiply by (n-1), and so on...\r\nwhile n > 1: #can also be >= 1\r\n factorial *= n\r\n n -= 1 #decrement n value by 1 for next multiplication\r\n\r\nprint('Factorial is:', factorial)\r\n","sub_path":"Recitation04/exercise6_W.py","file_name":"exercise6_W.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"362512682","text":"import argparse\nimport sqlite3\n\n\ndef cli():\n parser = argparse.ArgumentParser()\n parser.add_argument('--player1')\n parser.add_argument('--player2')\n return parser.parse_args()\n\n\ndef random_deck(conn, owner):\n cur = conn.cursor()\n cur.execute('SELECT * FROM Decks WHERE owner = ? ORDER BY RANDOM() LIMIT 1', (owner, ))\n row = cur.fetchone()\n cur.close()\n return row\n\n\ndef find_match(conn, deck_row, other_owner):\n keyforge_id, expected_aember, aember_control, creature_control, artifact_control, deck_manipulation, aerc_score, sas_rating, raw_aember, owner, name, house1, house2, house3 = deck_row\n\n point_range = 1\n min_expected_aember = expected_aember - point_range\n max_expected_aember = expected_aember + point_range\n\n min_aember_control = aember_control - point_range\n max_aember_control = aember_control + point_range\n\n min_creature_control = creature_control - point_range\n max_creature_control = creature_control + point_range\n\n min_artifact_control = artifact_control - point_range\n max_artifact_control = artifact_control + point_range\n\n min_deck_manipulation = deck_manipulation - point_range\n max_deck_manipulation = deck_manipulation + point_range\n\n query = '''\n SELECT *\n FROM Decks\n WHERE (expected_aember >= ? AND expected_aember <= ?) AND\n (aember_control >= ? AND aember_control <= ?) AND\n (creature_control >= ? AND creature_control <= ?) AND\n (artifact_control >= ? AND artifact_control <= ?) AND\n (deck_manipulation >= ? AND deck_manipulation <= ?) AND\n owner = ? AND\n keyforge_id <> ?\n ORDER BY RANDOM()\n LIMIT 1\n '''\n query_args = (min_expected_aember, max_expected_aember,\n min_aember_control, max_aember_control,\n min_creature_control, max_creature_control,\n min_artifact_control, max_artifact_control,\n min_deck_manipulation, max_deck_manipulation,\n other_owner, keyforge_id, )\n\n cur = conn.cursor()\n cur.execute(query, query_args)\n row = cur.fetchone()\n cur.close()\n return row\n\n\ndef main(cli):\n if cli.player1 is None or cli.player2 is None:\n print('--player1 and --player2 options are required')\n return\n\n conn = sqlite3.connect('wildwormhole.db')\n while True:\n p1_deck = random_deck(conn, cli.player1)\n match_deck = find_match(conn, p1_deck, cli.player2)\n if match_deck is None:\n continue\n else:\n break\n conn.close()\n p1_deck_args = [cli.player1] + [i for i in p1_deck[10:]] + [i for i in p1_deck[6:8]]\n p2_deck_args = [cli.player2] + [i for i in match_deck[10:]] + [i for i in match_deck[6:8]]\n msg_template = '{0} playing {1} ({2} | {3} | {4}): AERC {5} | SAS {6}'\n print(msg_template.format(*p1_deck_args))\n print('VS')\n print(msg_template.format(*p2_deck_args))\n\n\nif __name__ == \"__main__\":\n main(cli())","sub_path":"match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"172214970","text":"import urllib.request\nfrom bs4 import BeautifulSoup\nimport urllib\nimport time\nfrom string import punctuation\n\ndef main():\n # 챗봇의 단어 리스트\n keywords = []\n\n # 사용자의 마지막 단어\n user_word = input()\n # user_word가 정상적인 단어라면,\n if iscurrect_user_word(user_word):\n #그 단어의 끝단어로 시작하는 리스트를 가져옴.\n keywords = startswith_endof_userword(user_word)\n #그 리스트에서 2글자 미만, 4글자 이상단어 삭제함\n keywords = word_2to3(keywords)\n else: \n keywords.append(\"그런 단어는 없단다 나의 승리란다\")\n print(keywords)\n\n# -----------------------------------------------------\n# 유저 단어의 끝 단어로 검색하는 함수\n# str(단어)를 입력받고\n# list(단어 명단)을 리턴한다.\n# -----------------------------------------------------\ndef startswith_endof_userword(text):\n page_num = 1\n keywords = []\n # 가져올 샘플단어 수 최소단위 100\n word_num = 100\n # 단어 검색 counter\n length = 0\n lose_game = False\n\n # 유저 단어맨 끝 글자를 유니코드로 변환\n user_word_uni = urllib.parse.quote(text[-1])\n # 검색 결과에 따라 엄청 다를 수가 있다. 제일 많은 '가'가 11000개로 110페이지까지 있으니 넉넉하게 30페이지로 \n for i in range(0,(word_num//100)):\n url = \"https://www.wordrow.kr/%EC%8B%9C%EC%9E%91%ED%95%98%EB%8A%94-%EB%A7%90/\"+user_word_uni+\"/?%EC%AA%BD=\"+str(page_num+i)\n sourcecode = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(sourcecode, \"lxml\")\n\n # 시작하는 단어가 없을 경우 / 게임 패배 했을 경우\n for i in soup.find_all(\"span\", class_= \"h4 text-warning\"):\n keywords.append(\"졌다\")\n lose_game = True\n break\n \n if lose_game:\n lose_game = False\n break\n\n # 시작하는 단어가 있는 경우 단어들 리스트에 추가\n for i in soup.find_all(\"h3\", class_=\"card-caption\"):\n if length >= word_num:\n break\n # tag a = 단어 / 단어를 keywords에 추가\n keywords.append(i.find('a').get_text().strip())\n length += 1\n return keywords\n\n# -----------------------------------------------------\n# 유저 단어 검사 함수\n# str(단어)를 입력받고\n# True/False를 리턴한다.\n# -----------------------------------------------------\ndef iscurrect_user_word(text):\n \n # 자음을 입력했으면 빠이\n consonants = ['ㄱ','ㄴ','ㄷ','ㄹ','ㅁ','ㅂ','ㅅ','ㅇ','ㅈ','ㅊ','ㅋ','ㅌ','ㅍ','ㅎ']\n for i in text:\n if i in consonants:\n return False\n\n # 특수문자 있으면 빠이\n for i in text:\n if i in punctuation:\n return False\n\n # 한글자 단어면 빠이\n if len(text) < 2:\n return False\n \n # 검색 결과\n # 사용자 단어를 유니코드화 함\n try:\n user_word_uni = urllib.parse.quote(text)\n url = \"https://www.wordrow.kr/%EC%9D%98%EB%AF%B8/\"+user_word_uni\n sourcecode = urllib.request.urlopen(url).read()\n except :\n return False\n\n soup = BeautifulSoup(sourcecode, \"lxml\")\n\n # 검색했는데 없는 페이지 뜨면 빠이\n for i in soup.find_all(\"h2\", class_= \"card-caption\"):\n return False\n\n # 저것들이 다 아닌 괜찮은 단어면 단어면 ㅇㅋ\n return True\n\n# -----------------------------------------------------\n# 2글자 미만, 4글자 이상 글자 다 걸러냄\n# list(단어 명단)을 입력받고\n# list(단어 명단)을 리턴한다.\n# -----------------------------------------------------\ndef word_2to3(keywords):\n new_list = []\n for i in keywords:\n if len(i) > 3:\n continue\n elif 2 <= len(i) <= 3:\n new_list.append(i)\n else:\n continue\n\n return new_list\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"endtoend_Prac-14시25분.py","file_name":"endtoend_Prac-14시25분.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"419074656","text":"# -*- coding: utf-8 -*-\n\nimport PyMcaQt as qt\n\nclass LabelEditAndButton(qt.QWidget) :\n\n def __init__(self,boolLabel=True,textLabel=\"text1\",booltextEdit=True,textEdit=\"text2\", boolButton=True,textButton=\"Browse...\", parent=None) :\n qt.QWidget.__init__(self, parent)\n self.textLabel=textLabel\n self.textEdit=textEdit\n self.textButton=textButton;\n self.boolLabel=boolLabel\n self.booltextEdit=booltextEdit\n self.boolButton=boolButton\n\n self._build()\n\n def _build(self) :\n self.layout=qt.QHBoxLayout()\n self.label =None\n self.lineEdit =None\n self.button =None\n if(self.boolLabel) :\n self.label=qt.QLabel(self.textLabel,self)\n self.layout.addWidget(self.label)\n if(self.booltextEdit) :\n self.lineEdit= qt.QLineEdit(self.textEdit,self)\n self.layout.addWidget(self.lineEdit)\n\n if(self.boolButton) :\n self.button=qt.QPushButton(self.textButton,self)\n self.layout.addWidget(self.button)\n self.connect(self.button,qt.SIGNAL(\"clicked()\"),self.buttonPushed)\n\n self.setLayout(self.layout)\n\n def changeLabel(self,textLabel):\n self.textLabel=textLabel\n self.label.setText(self.textLabel)\n\n def changeLineEdit(self,textLineEit):\n self.textEdit=textLineEit\n self.lineEdit.setText(self.textEdit)\n\n def valueLineEdit(self):\n self.textEdit=self.lineEdit.text()\n return self.textEdit\n\n def buttonPushed(self):\n self.resultFileName = str(qt.QFileDialog.getSaveFileName(self, \"Save 3D volume snapshots \", self.textEdit))\n self.lineEdit.setText(self.resultFileName)\n self.textEdit = self.resultFileName\n return self.textEdit\n","sub_path":"PINI/LabelEditAndButton.py","file_name":"LabelEditAndButton.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"101988835","text":"def dict_interdiff(d1, d2):\n '''\n d1, d2: dicts whose keys and values are integers\n Returns a tuple of dictionaries according to the instructions above\n '''\n intersect = {}\n diff = {}\n \n d1_keys = list(d1.keys())\n d2_keys = list(d2.keys())\n \n for d1_key in d1.keys():\n try:\n index1 = d1_keys.index(d1_key)\n index2 = d2_keys.index(d1_key)\n intersect[d1_key] = f(d1[d1_key], d2[d1_key])\n del d1_keys[index1]\n del d2_keys[index2]\n except ValueError:\n continue\n \n combined_keys = d1_keys + d2_keys\n combined_keys.sort()\n \n for key in combined_keys:\n if key in d1_keys:\n diff[key] = d1[key]\n else:\n diff[key] = d2[key]\n \n return (intersect, diff, )","sub_path":"Quiz/Problem 7.py","file_name":"Problem 7.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"57997970","text":"#########################################################\r\n# Utilities for the Unicon Data warehouse Importer\r\n#\r\n\r\nimport logging\r\nimport ConfigParser\r\nimport psycopg2\r\n\r\n\r\n#########################################################\r\n# read config file, return config dictionary\r\n#\r\ndef getConfig(filename):\r\n config = {}\r\n try:\r\n parser = ConfigParser.ConfigParser()\r\n parser.read(filename)\r\n itemList = parser.items('Main')\r\n for i in itemList:\r\n config[i[0]] = i[1]\r\n \r\n logging.debug(\"Loaded config: \\n\\n\" + str(config) + \"\\n\\n\")\r\n \r\n except Exception as err:\r\n logging.error('failed to load config ' + str(err))\r\n\r\n return config\r\n\r\n\r\n\r\n#########################################################\r\n# open and return the input file, in read mode\r\n#\r\ndef getFile(filename):\r\n try:\r\n f = open(filename, 'r')\r\n except Exception as ex:\r\n logging.error('Failed to open file: ' + filename + ' : ' + str(ex))\r\n return None\r\n return f\r\n\r\n#########################################################\r\n# get a Postgres db connection (works for Redshift)\r\n#\r\ndef getConnection(config):\r\n conn = None\r\n try:\r\n conn=psycopg2.connect(dbname=config['dbname'], host=config['host'], port=eval(config['port']), user=config['user'], password=config['password'])\r\n except Exception as err:\r\n logging.error('failed to get DB connection to: ' + config['dbname'] + \" ex: \" + str(err))\r\n\r\n return conn\r\n","sub_path":"uniDWUtils.py","file_name":"uniDWUtils.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"538304402","text":"from flask import flash, redirect, render_template, url_for\nfrom flask_login import login_required\nfrom retrotechclub import app, db\nfrom retrotechclub.models import GameMaster, GameImage\nfrom retrotechclub.forms import ImageForm\n\n\n@app.route('/games//')\n@login_required\ndef game_images(master_id, image_type):\n images = GameImage.query.filter_by(game_master_id=master_id,\n image_type=image_type)\n return render_template('game_images.html', images=images,\n image_type=image_type, master_id=master_id)\n\n\n@app.route('/games///add', methods=['POST', 'GET'])\n@login_required\ndef game_image_add(master_id, image_type):\n game_master = GameMaster.query.get_or_404(master_id)\n form = ImageForm()\n if form.validate_on_submit():\n image = GameImage(\n image_type,\n form.image.data.filename,\n form.caption.data,\n form.credit.data,\n master_id\n )\n image.save_images(form.image.data)\n db.session.add(image)\n db.session.commit()\n flash('New image added', 'alert-success')\n return render_template('game_image_add.html', game_master=game_master,\n image_type=image_type, form=form)\n\n\n@app.route('/games////edit',\n methods=['POST', 'GET'])\n@login_required\ndef game_image_edit(master_id, image_type, image_id):\n game_master = GameMaster.query.get_or_404(master_id)\n image = GameImage.query.get_or_404(image_id)\n form = ImageForm()\n if not form.is_submitted():\n form.caption.data = image.caption\n form.credit.data = image.credit\n if form.validate_on_submit():\n image.caption = form.caption.data\n image.credit = form.credit.data\n db.session.commit()\n flash('Image edited', 'alert-success')\n return render_template('game_image_edit.html', image=image,\n game_master=game_master, form=form)\n\n\n@app.route('/games////delete')\n@login_required\ndef game_image_delete(master_id, image_type, image_id):\n image = GameImage.query.get_or_404(image_id)\n image.delete_images()\n db.session.delete(image)\n db.session.commit()\n flash('Image deleted', 'alert-success')\n return redirect(url_for('game_images', master_id=master_id,\n image_type=image_type))\n","sub_path":"retrotechclub/views/image_views.py","file_name":"image_views.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"485476532","text":"from pypokerengine.players import BasePokerPlayer\nimport random as rand\nimport pprint\nfrom pypokerengine.players import BasePokerPlayer\nfrom pypokerengine.api.emulator import Emulator\nfrom pypokerengine.utils.game_state_utils import restore_game_state\nimport numpy as np\nfrom keras.layers import Input, Dense, Conv2D,concatenate,Flatten\nfrom keras.models import Model\nimport pandas as pd\n\nclass RandomPlayer(BasePokerPlayer):\n\n def __init__(self):\n self.vvh = 0\n\n def keras_model():\n\n input_cards = Input(shape=(4,13,4), name=\"cards_input\")\n input_actions = Input(shape=(2,5,4), name=\"actions_input\")\n input_position = Input(shape=(1,),name=\"position_input\")\n\n x1 = Conv2D(32,(2,2),activation='relu')(input_cards)\n x2 = Conv2D(32,(2,2),activation='relu')(input_actions)\n x3 = Dense(1,activation='relu')(input_position)\n\n d1 = Dense(128,activation='relu')(x1)\n d1 = Flatten()(d1)\n d2 = Dense(128,activation='relu')(x2)\n d2 = Flatten()(d2)\n x = concatenate([d1,d2,x3])\n x = Dense(128)(x)\n x = Dense(32)(x)\n out = Dense(3)(x)\n\n model = Model(inputs=[input_cards, input_actions,input_position], outputs=out)\n # if (self.vvh == 0):\n # model.load_weights('aaaaaa.h5', by_name=True)\n model.compile(optimizer='rmsprop', loss='mse')\n\n return model\n\n self.table = {}\n self.my_uuid = None\n self.e = 0.1\n self.experience_state = []\n self.experience_reward = []\n self.play = False\n self.model =keras_model()\n\n\n\n\n def declare_action(self, valid_actions, hole_card, round_state):\n\n for gghh in round_state[\"action_histories\"]:\n print(gghh)\n\n\n\n def getcardx(card):\n suit = card[0]\n if(suit == 'S'):\n return 0\n elif(suit == 'H'):\n return 1\n elif(suit=='D'):\n return 2\n elif(suit=='C'):\n return 3\n\n def getcardy(card):\n index = card[1]\n if(index=='A'):\n return 12\n elif(index=='K'):\n return 11\n elif(index=='Q'):\n return 10\n elif(index=='J'):\n return 9\n elif(index=='T'):\n return 8\n else:\n return int(index)-2\n\n def getstreetgrid(cards):\n grid = np.zeros((4,13))\n for card in cards:\n grid[getcardx(card),getcardy(card)] = 1\n return grid\n\n def converttoimagemeth(eff_stack,round_state,street):\n image = np.zeros((2,5))\n actions = round_state[\"action_histories\"][street]\n index = 0\n turns = 0\n\n\n for action in actions:\n #max of 12actions per street\n if ('amount' in action and turns < 5):\n # print(\"inside action\")\n # print(action['amount'])\n image[index,turns] = action['amount'] / eff_stack\n index += 1\n\n if(index%2 == 0):\n index=0\n turns +=1\n\n return image\n\n\n sb_cards = [ hole_card[0], hole_card[1] ]\n\n getcardimg = getstreetgrid(sb_cards)\n flop_cards_img = np.zeros((4,13))\n turn_cards_img = np.zeros((4,13))\n river_cards_img = np.zeros((4,13))\n flop = []\n turn = []\n river = []\n\n\n self.my_uuid = round_state['seats'][round_state['next_player']]['uuid']\n self.my_cards = hole_card\n self.community_card = round_state['community_card']\n\n starting_stack = 10000\n\n if (self.play == True):\n self.old_state = self.sb_features\n self.targetQ = self.allQ_sb\n self.oldAction = self.action_sb\n\n sb_position = 1\n\n flop_actions = np.zeros((2,5))\n turn_actions = np.zeros((2,5))\n river_actions = np.ones((2,5))\n\n preflop_actions = converttoimagemeth(starting_stack,round_state,'preflop')\n\n if (round_state['street'] == 'flop'):\n flop = round_state['community_card']\n flop_cards_img = getstreetgrid(flop)\n flop_actions = converttoimagemeth(starting_stack,round_state,'flop')\n\n if ( round_state['street'] == 'turn'):\n turn = round_state['community_card'][2]\n turn_cards_img = getstreetgrid([turn])\n turn_actions = converttoimagemeth(starting_stack,round_state,'turn')\n if (round_state['street'] == 'river'):\n river = round_state['community_card'][3]\n river_cards_img = getstreetgrid([river])\n river_actions = converttoimagemeth(starting_stack,round_state,'river')\n\n self.actions_feature = np.stack([preflop_actions,flop_actions,turn_actions,river_actions],axis=2).reshape((1,2,5,4))\n sb_cards_feature = np.stack([getcardimg,flop_cards_img,turn_cards_img,river_cards_img],\n axis=2).reshape((1,4,13,4))\n # print(\"action_feature\")\n # print(actions_feature.shape)\n # print(\"sb_cards_feature\")\n # print(sb_cards_feature.shape)\n self.sb_features = [sb_cards_feature,self.actions_feature,np.array([sb_position]).reshape((1,1))]\n # print(\"combine\")\n # print(self.sb_features.shape)\n #if round_state['seats'][round_state['big_blind_pos']]['uuid'] == self.my_uuid:\n y = 0.9\n max_replay_size = 40\n self.action_sb = 3\n #run model to choose action\n self.allQ_sb = self.model.predict(self.sb_features)\n self.action_sb = np.argmax(self.allQ_sb)\n reward_sb = 0\n if(self.play == True):\n reward_sb += y*np.max(self.allQ_sb)\n\n\n self.targetQ[0,self.action_sb] = reward_sb\n self.vvh = self.vvh + 1\n #new_name = 'my_model_weights'\n #model.fit(self.old_state,self.targetQ,verbose=0)\n self.experience_state.append(self.old_state)\n self.experience_reward.append(self.targetQ)\n if(len(self.experience_state) > max_replay_size):\n del self.experience_state[0]\n del self.experience_reward[0]\n #new_name = new_name + str(self.vvh) + '.h5'\n #self.model.save_weights(new_name)\n\n\n self.play = True\n\n e = 0.1\n\n for ve in range(len(self.experience_state)):\n #print(\"hello\")\n self.model.fit(self.experience_state[ve],self.experience_reward[ve],verbose = 0)\n #print(\"finish\")\n\n\n if(np.random.rand(1) < e):\n self.action_sb = np.random.randint(0,4)\n\n if (self.action_sb == 3 or len(valid_actions ) == 2):\n self.action_sb = 1\n\n if(self.action_sb == 0):\n #game_state,events = emulator.apply_action(game_state,'fold',0)\n call_action_info = valid_actions[0]\n action = call_action_info[\"action\"]\n return action\n\n if(self.action_sb == 1):\n call_action_info = valid_actions[1]\n action = call_action_info[\"action\"]\n return action\n\n\n if(self.action_sb == 2):\n call_action_info = valid_actions[2]\n action = call_action_info[\"action\"]\n return action\n\n\n\n def receive_game_start_message(self, game_info):\n\n pass\n\n def receive_round_start_message(self, round_count, hole_card, seats):\n pass\n\n def receive_street_start_message(self, street, round_state):\n pass\n\n def receive_game_update_message(self, action, round_state):\n pass\n\n def receive_round_result_message(self, winners, hand_info, round_state):\n print(\"winners\")\n for i in winners:\n\n print(i)\n\n pass\n\ndef setup_ai():\n return RandomPlayer()\n","sub_path":"randomplayer.py","file_name":"randomplayer.py","file_ext":"py","file_size_in_byte":7983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"622517967","text":"import itertools\n# count\n\n# cycle\n\n# repeat\n\n# natuals = itertools.count(1)\n# ns = itertools.takewhile(lambda x: x <= 10, natuals)\n# print(list(ns))\n\n# chain\n# for c in itertools.chain(\"ABC\", \"XYZ\"):\n# print(c)\n\n# groupby\n# for key, group in itertools.groupby(\"AAABBBCCAAA\"):\n# print(key, list(group))\n\n\n# def pi(N):\n# natuals = itertools.count(1, 2)\n# ns = itertools.takewhile(lambda x: x <= 2 * N - 1, natuals)\n# # N_ns = ns[:N]\n# final_ns = map(lambda x: (-1) ** ((x - 1) / 2 % 2) * 4 / x, ns)\n# return sum(final_ns)\n\ndef pi(N):\n numbers = itertools.count(1, 2)\n numbers = itertools.takewhile(lambda x: x <= 2 * N - 1, numbers)\n numbers2 = map(lambda x: (-1) ** ((x - 1) / 2 % 2) * 4 / x, numbers)\n return sum(numbers2)\n\n# 测试:\nprint(pi(10))\nprint(pi(100))\nprint(pi(1000))\nprint(pi(10000))\nassert 3.04 < pi(10) < 3.05\nassert 3.13 < pi(100) < 3.14\nassert 3.140 < pi(1000) < 3.141\nassert 3.1414 < pi(10000) < 3.1415\nprint('ok')","sub_path":"learn_itertools.py","file_name":"learn_itertools.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"582352051","text":"from tkinter import *\r\n\r\nfrom tkinter import ttk\r\n\r\nimport math\r\n\r\nfrom tkinter import messagebox as ms\r\n\r\n\r\n\r\nroot=Tk()\r\n\r\nroot.title(\"Factorial calculator\")\r\n\r\nresult=StringVar()\r\n\r\nfact=StringVar()\r\n\r\ndef factorial(event=None):\r\n\r\n try:\r\n\r\n n=int(fact.get())\r\n\r\n count=0\r\n\r\n while count==0:\r\n\r\n if n>=1000:\r\n\r\n return ms.showinfo('error',\"{} is too large a number!\".format(n))\r\n\r\n else:\r\n count=1\r\n\r\n x=math.factorial(n)\r\n\r\n return result.set(x)\r\n\r\n except:\r\n\r\n result.set('ERROR!!')\r\n\r\n fact.set('ERROR!!')\r\n\r\n\r\ndef reset(event2=None):\r\n\r\n fact.set(\"\")\r\n\r\n result.set(\"\")\r\n\r\nroot.geometry(\"550x350+100+100\")\r\n\r\nroot.bind('',reset)\r\n\r\nroot.bind('',factorial)\r\n\r\nlabel=Label(root,text='Enter number',bd=5,font='chiller 15 bold',bg='steel blue',width=20)\r\n\r\nlabel.place(relx=0.1,rely=0.1)\r\n\r\nlabel2=Label(root,text='Result',bd=5,font='chiller 15 bold',bg='steel blue',width=20)\r\n\r\nlabel2.place(relx=0.1,rely=0.3)\r\n\r\nent1=Entry(root,textvariable=result,bd=10,width=30,bg='steel blue',font='arial 10 bold')\r\n\r\nent1.place(relx=0.5,rely=0.3)\r\n\r\nent2=Entry(root,textvariable=fact,bd=10,width=30)\r\n\r\nent2.place(relx=0.5,rely=0.1)\r\n\r\nbtn=ttk.Button(root,text='calculate',command=factorial,width=70)\r\n\r\nbtn.place(relx=0.1,rely=0.6)\r\n\r\nbtn=ttk.Button(root,text='reset',command=reset,width=70)\r\n\r\nbtn.place(relx=0.1,rely=0.5)\r\n\r\n\r\nroot.mainloop()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"hackerearth/test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"603018380","text":"from linked_list import *\n\nclass HashTable:\n\n\t#attributes = list of mod 30 elements, store each value\n\tdef __init__(self):\n\t\tself.T = [None for i in range(30)]\n\t\t\n\t#insert string into hash table\n\tdef inserthash(self,strs):\n\t\tl = list(strs)\n\t\tn = len(l)\n\n\t\ta = 33 #prime number for min. collision\n\t\tp = ord(l[n-1])\n\t\tj = n-2\n\n\t\twhile j >= 0:\n\t\t\tp = p*a + ord(l[j])\n\t\t\tj = j-1\n\t\t\t\n\t\ti = p%30\n\n\n\t\tif self.T[i] == None:\n\t\t\tself.T[i] = LinkedList()\n\t\t\ttemp = self.T[i].head\n\t\t\tself.T[i].insertLL(strs, temp)\n\n\t\telse:\n\t\t\ttemp = self.T[i].head\n\t\t\t# while temp.next!=None:\n\t\t\t# \ttemp = temp.next\n\t\t\tself.T[i].insertLL(strs,temp)\n\n\tdef searchhash(self,strs):\n\t\tl = list(strs)\n\t\tn = len(l)\n\n\t\ta = 33 #prime number for min. collision\n\t\tp = ord(l[n-1])\n\t\tj = n-2\n\n\t\twhile j >= 0:\n\t\t\tp = p*a + ord(l[j])\n\t\t\tj = j-1\n\t\t\t\n\t\ti = p%30\n\n\n\t\ttemp = self.T[i]\n\t\tx = temp.searchLL(strs)\n\t\tif (x == \"Found\"):\n\t\t\treturn \"Found\"\n\n\t\treturn \"Not Found\"\n\n\tdef displayhash(self,strs):\n\t\tl = list(strs)\n\t\tn = len(l)\n\n\t\ta = 33 #prime number for min. collision\n\t\tp = ord(l[n-1])\n\t\tj = n-2\n\n\t\twhile j >= 0:\n\t\t\tp = p*a + ord(l[j])\n\t\t\tj = j-1\n\t\t\t\n\t\ti = p%30\n\n\t\tresult2 = []\n\n\t\tfor i in range(0,30):\n\t\t\tif self.T[i] != None:\n\t\t\t\ttemp = self.T[i]\n\t\t\t\tresult = list(temp.searchLLHash(strs))\n\t\t\t\tfor word in result:\n\t\t\t\t\tif word!=[]:\n\t\t\t\t\t\tresult2.append(word)\n\t\treturn result2\n\n\n\tdef keys(self):\n\t\tcount = 0;\n\t\tl = []\n\n\t\tfor i in range(0,30):\n\t\t\tif self.T[i] != None:\n\t\t\t\tl.append(i)\n\n\t\treturn l\n\n\n# def main():\n# \tH = HashTable()\n# \tH.inserthash(\"Please\")\n# \tH.inserthash(\"Testing\")\n# \tH.inserthash(\"Python\")\n# \tprint(H.keys())\n# \tH.searchhash(\"Please\")\n# \tH.searchhash(\"Test\")\n# \tH.searchhash(\"Testing\")\n\n# if __name__ == '__main__':\n# main()\n\n \n\n\n","sub_path":"3rd Semester/IT200 DSA/Lab 4/hash_table.py","file_name":"hash_table.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"251428457","text":"\"\"\"\nDjango Model处理函数库。\n\"\"\"\nimport json\nimport logging\nimport datetime\nfrom django.db import models as basemodels\nfrom django.utils import timezone\nfrom zencore.errors import WrongParameterTypeError\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef json_dumper(target):\n \"\"\"\n 数据对象json序列化分析器。\n \"\"\"\n if isinstance(target, basemodels.Model):\n data = {}\n data.update(target.__dict__)\n keys = list(data.keys())\n for key in keys:\n if key.startswith(\"_\"):\n del data[key]\n return data\n if isinstance(target, datetime.datetime):\n return timezone.localtime(target).isoformat()\n raise ValueError(\"{} is not json serializable.\")\n\n\ndef json_dumps(target):\n \"\"\"\n 数据对象json序列化。\n \"\"\"\n return json.dumps(target, default=json_dumper)\n\n\ndef make_search_dict(**kwargs):\n \"\"\"\n 文字模糊搜索字典。\n \"\"\"\n filters = {}\n for key, value in kwargs.items():\n if value:\n key = \"{}__icontains\".format(key)\n filters[key] = value\n return filters\n\n\ndef get_user_object(pk, user, model, *search_fields, user_field_name=\"user\"):\n \"\"\"获取用户所属对象。\n \"\"\"\n if isinstance(user, int):\n user_id = user\n elif isinstance(user, str):\n user_id = int(user)\n elif isinstance(user, basemodels.Model):\n user_id = user.pk\n else:\n return None\n queryset = model.objects.filter(**{user_field_name+\"__pk\":user_id})\n return get_object(pk, queryset, *search_fields)\n\n\ndef get_object(pk, queryset, *search_fields):\n \"\"\"获取模型对象。\n \"\"\"\n ok = False\n try:\n if issubclass(queryset, basemodels.Model):\n model = queryset\n queryset = model.objects\n ok = True\n except TypeError:\n pass\n try:\n if isinstance(queryset, basemodels.QuerySet):\n model = queryset.model\n ok = True\n except TypeError:\n pass\n if not ok:\n logger.error(\"Django model helper function [get_object]'s parameter queryset can be models.Model or models.QuerySet, but got: {}.\".format(queryset))\n raise WrongParameterTypeError()\n if isinstance(pk, model):\n return pk\n def get(name, value):\n \"\"\"从数据库中查取对象。\n \"\"\"\n condition = {name: value}\n try:\n return queryset.get(**condition)\n except model.DoesNotExist:\n return None\n search_fields = list(search_fields)\n search_fields.insert(0, \"pk\")\n for field_name in search_fields:\n if isinstance(pk, int):\n node = get(field_name, pk)\n if node:\n return node\n if isinstance(pk, str) and pk.isdigit():\n node = get(field_name, int(pk))\n if node:\n return node\n node = get(field_name, pk)\n if node:\n return node\n return None\n","sub_path":"src/zencore/django/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"302928671","text":"class MyCircularQueue:\n\tmyQueue = []\n\thead = tail = 0\n\n\tdef __init__(self, k:int):\n\t\tmyQueue = [-1] * k\n\t\thead = tail = 0\n\tdef enQueue(self, value: int) -> bool:\n\t\tglobal tail\n\t\tif(tail < len(myQueue) -1):\n\t\t\tmyQueue.append(value)\n\t\t\ttail += 1\n\t\telif(tail == len(myQueue) - 1):\n\t\t\ttail = 0\n\t\t\tif(myQueue[tail] == -1):\n\t\t\t\tmyQueue[tail] = value\n\t\t\telse:\n\t\t\t\treturn False\n\t\t\treturn True\n\n\tdef deQueue(self) ->bool:\n\t\tglobal head\n\t\tmyQueue[head] = -1\n\t\thead += 1\n\n\tdef printQueue(self):\n\t\tprint(myQueue)\n\ndef main():\n\tobj = MyCircularQueue(10)\n\ttest1 = obj.enQueue(12).printQueue()\n\nmain()","sub_path":"myCircularQueue.py","file_name":"myCircularQueue.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"43046774","text":"# https://oj.leetcode.com/problems/letter-combinations-of-a-phone-number/\n\nclass Solution:\n # @return a list of strings, [s1, s2]\n def letterCombinations(self, digits):\n if len(digits) == 0:\n return ['']\n\n n = len(digits)\n # digit_to_letter is the map from digit to letters\n digit_to_letter = ['', '', 'abc', 'def', 'ghi', 'jkl', 'mno', 'pqrs', 'tuv', 'wxyz']\n\n # using depth-first search to search solution\n self.path = []\n self.ans = []\n self.dfs(0, n, digits, digit_to_letter)\n\n return self.ans\n\n def dfs(self, k, n, digits, digit_to_letter):\n if k == n:\n self.ans.append(''.join(self.path))\n return\n\n for c in digit_to_letter[int(digits[k])]:\n self.path.append(c)\n self.dfs(k+1, n, digits, digit_to_letter)\n self.path.pop()\n","sub_path":"leetans/letterCombine.py","file_name":"letterCombine.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"109911111","text":"from django.shortcuts import render, redirect\nfrom .forms import InputForm, PredictForm\nfrom .GPModel.GPM_django_PF import runmodel, stabilityclass_latlon, stabilityclass_input\nfrom .GPModel.releaseprediction import RH_APIcall\nfrom .models import EntryRequest\n\ndef index(request):\n return render(request, 'dispersal/index.html')\ndef about(request):\n return render(request, 'dispersal/about.html')\ndef contact(request):\n return render(request, 'dispersal/contact.html')\ndef entries(request):\n context ={'database' : EntryRequest.objects.all()}\n return render(request, 'dispersal/database.html', context )\n\ndef run(request):\n template_name = 'dispersal/run.html'\n form = InputForm()\n return render(request, template_name, {'form': form})\n\ndef release(request):\n template_name = 'dispersal/prediction.html'\n form = PredictForm()\n return render(request, template_name, {'form': form})\n\ndef predictions(request):\n if request.method == 'POST':\n form = PredictForm(request.POST)\n if form.is_valid():\n lat = form.cleaned_data['lat']\n NS = form.cleaned_data['NS']\n lon = form.cleaned_data['lon']\n WE = form.cleaned_data['WE']\n lat=float(NS+lat)\n lon=float(WE+lon)\n prediction,city,country=RH_APIcall(lat,lon)\n return render(request, 'dispersal/prediction_results.html', {'context':prediction,'city':city,'country':country})\n else:\n return redirect('/dispersal/predictions')\n\ndef results(request):\n if request.method == 'POST':\n form = InputForm(request.POST)\n if form.is_valid():\n message=''\n graph='2D'\n lat = form.cleaned_data['lat']\n NS = form.cleaned_data['NS']\n lon = form.cleaned_data['lon']\n WE = form.cleaned_data['WE']\n location=str(NS+lat)+' , '+str(WE+lon)\n lat=float(NS+lat)\n lon=float(WE+lon)\n # print(request.POST)\n # print('/////////////')\n # print(request.POST.get('weathercheck'))\n try:\n stabilityclasses,wind,RH,I,R,clouds,UV,city, country =stabilityclass_latlon(lat,lon)\n except Exception as e:\n message='It seems like there has been an error regarding the API that gathers weather data.\\nYou can try again later or run the model inputting yourself the weather data.'\n return render(request,'dispersal/error.html',{'exception':message})\n\n if request.POST.get('weathercheck') == \"on\":\n try:\n I = int(form.cleaned_data['UV'])\n wind = float(form.cleaned_data['wind'])\n clouds = int(form.cleaned_data['cloudiness'])/100\n R = float(form.cleaned_data['rain'])\n stabilityclasses=stabilityclass_input(wind,clouds,I)\n except Exception as e:\n message='There has been an error with the given data. Please input the values correctly.'\n return render(request,'dispersal/error.html',{'exception':message})\n\n H = float(form.cleaned_data['height'])\n bushperc = int(form.cleaned_data['bushperc'])/100\n leafperc = float(form.cleaned_data['leafperc'])/100\n # Calculating the source strength based on percentage of infection\n # sporesinleaf = 8.29* 993 mm2 * 7111.37 = 58540948.1 spores in leaf\n # Q(spores/s) =58540948.1 * 0.0283 (% released)/3600 (s) = 460.2\n sporesinleaf = 460.2 * leafperc\n # Number of leaves in bush (approx 1m2)* % bush infected\n leafinbush= 900* bushperc\n Q= round(sporesinleaf*leafinbush,2)\n\n try:\n maxdistances=runmodel(graph,H,Q, float(wind),I,R,clouds,stabilityclasses)\n except Exception as e:\n message=\"There has been an error with running the model. Maintance might be needed.\"\n return render(request,'dispersal/error.html',{'exception':message})\n\n print(country,city,bushperc,leafperc,H,Q,stabilityclasses,R,RH,UV,wind,location,maxdistances['Day'][4])\n print(location)\n entry = EntryRequest(country=country, city=city,bushperc=bushperc*100,leafperc=leafperc*100,\n height=H,Q=Q,stability_class=stabilityclasses,rain=R,RH=RH,irradiance=UV,\n wind=wind,location=str(location),\n maxdis=max([str(maxdistances['Day'][4]),str(maxdistances['Night'][4])]))\n try:\n entry.save()\n except Exception as e:\n message = \"Entry could not be recorded\"\n\n context={'source':Q,'country':country,'city':city,\n 'rain': R,'RH': RH,'clouds':round(clouds*100,1),'Irradiance': I,\n 'wind': wind, 'bushperc': round(bushperc*100,0), 'leafperc': leafperc*100,\n 'X99d': maxdistances['Day'][3],'X99n': maxdistances['Night'][3],\n 'XminD': maxdistances['Day'][4],'XminN': maxdistances['Night'][4],\n 'X95d': maxdistances['Day'][0],'X75d': maxdistances['Day'][1],\n 'X50d': maxdistances['Day'][2],'X95n': maxdistances['Night'][0],\n 'X75n': maxdistances['Night'][1],'X50n': maxdistances['Night'][2],\n 'imgday': maxdistances['Day'][5].decode('utf-8'),'imgnight':maxdistances['Night'][5].decode('utf-8'),\n 'msgs': message}\n return render(request, 'dispersal/results.html', context)\n else:\n print('form no valid')\n form = InputForm()\n return redirect('/dispersal/run/')\n\n else:\n return redirect('/dispersal/run/')\n","sub_path":"dispersal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"301545364","text":"# Metrics and evaluation\ndef compute_avg_return(environment, policy, num_episodes=10):\n total_return = 0.0\n for _ in range(num_episodes):\n\n time_step = environment.reset()\n episode_return = 0.0\n\n while not time_step.is_last():\n action_step = policy.action(time_step)\n time_step = environment.step(action_step.action)\n episode_return += time_step.reward\n total_return += episode_return\n\n tensor_avg_return = total_return / num_episodes\n num_avg_return = tensor_avg_return.numpy()[0]\n return num_avg_return\n\n\ndef win_rate(environment, policy, num_episodes=10):\n wins = 0\n for _ in range(num_episodes):\n time_step = environment.reset()\n episode_return = 0.0\n\n while not time_step.is_last():\n action_step = policy.action(time_step)\n time_step = environment.step(action_step.action)\n episode_return += time_step.reward\n if episode_return >= 1:\n wins += 1\n return (wins / num_episodes) * 100\n","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"626751722","text":"\"\"\"Prints todos to various outputs and formats\"\"\"\nfrom typing import List\n\nfrom .todo import Todo\n\ntry:\n import rich\nexcept ImportError:\n rich = None\n\n\nclass ConsolePrinter:\n \"\"\"Prints todos to the console\"\"\"\n\n def __init__(self, todos: List[Todo]):\n self.todos = todos\n self.to_print = []\n\n def format(self):\n \"\"\"Formats the todos to be printable\"\"\"\n if not self.todos:\n print(\"No todos found.\")\n return\n longest_name = max(self.todos, key=lambda t: len(f\"{t.file_name}:{t.linepos} \"))\n padding_size = len(f\"{longest_name.file_name}:{longest_name.linepos} \")\n for todo in self.todos:\n associates = (\" (\" + \", \".join(todo.associates) + \")\") if todo.associates else \"\"\n current_name = len(todo.file_name + str(todo.linepos) + \" \")\n self.to_print.append(\n f\"{todo.file_name}:{todo.linepos} \"\n f\"{' '*(padding_size-current_name)}{todo.tag}\"\n f\"{associates}: {todo.text}\"\n )\n\n def print(self):\n \"\"\"Does the actual printing\"\"\"\n self.format()\n for todo in self.to_print:\n print(todo)\n\n\nclass ColoredConsolePrinter:\n \"\"\"Prints todos to the console\"\"\"\n\n def __init__(self, todos: List[Todo]):\n self.todos = todos\n self.to_print = []\n\n def format(self):\n \"\"\"Formats the todos to be printable\"\"\"\n if not self.todos:\n print(\"No todos found.\")\n return\n longest_name = max(self.todos, key=lambda t: len(f\"{t.file_name}:{t.linepos} \"))\n padding_size = len(f\"{longest_name.file_name}:{longest_name.linepos} \")\n for todo in self.todos:\n associates = (\" (\" + \", \".join(todo.associates) + \")\") if todo.associates else \"\"\n current_name = len(todo.file_name + str(todo.linepos) + \" \")\n if rich:\n self.to_print.append(\n f\"[bold yellow]{todo.file_name}:{todo.linepos}[/]\"\n f\"[bold green]{' '*(padding_size-current_name)}{todo.tag}[/]\"\n f\"[bold cyan]{associates}[/]: {todo.text}\"\n )\n else:\n self.to_print.append(\n f\"\\x1b[1;33m{todo.file_name}:{todo.linepos}\\x1b[0m \"\n f\"\\x1b[1;32m{' '*(padding_size-current_name)}{todo.tag}\\x1b[0m\"\n f\"\\x1b[1;36m{associates}\\x1b[0m: {todo.text}\"\n )\n\n def print(self):\n \"\"\"Does the actual printing\"\"\"\n self.format()\n for todo in self.to_print:\n if rich:\n rich.print(todo)\n else:\n print(todo)\n\n\nclass TextFilePrinter(ConsolePrinter):\n \"\"\"Prints todos to a text file\"\"\"\n\n def __init__(self, todos: List[Todo], file_name: str):\n super().__init__(todos)\n self.file_name = file_name or \"todo.txt\"\n\n def print(self):\n self.format()\n with open(self.file_name, \"w\", encoding=\"utf-8\") as file:\n for todo in self.to_print:\n file.write(todo + \"\\n\")\n if self.todos:\n print(f\"Successfully saved all todos to {self.file_name}\")\n\n\nclass MarkdownFilePrinter(TextFilePrinter):\n \"\"\"Prints todos to a markdown file\"\"\"\n\n def __init__(\n self,\n todos: List[Todo],\n file_name: str,\n ):\n super().__init__(todos, file_name or \"TODO.md\")\n\n def format(self):\n \"\"\"Formats the todos to be printable\"\"\"\n if not self.todos:\n print(\"No todos found.\")\n return\n self.to_print.append(\"# TODO.md\\n\")\n for todo in self.todos:\n associates = \", \".join(\"@\" + i for i in todo.associates) if todo.associates else \"\"\n self.to_print.append(f\"- {todo.text} #{todo.tag} {associates} ({todo.file_name}:{todo.linepos}) \")\n\n\nclass GithubFlavouredMarkdownFilePrinter(MarkdownFilePrinter):\n \"\"\"Prints todos to a markdown file\"\"\"\n\n def __init__(self, todos: List[Todo], file_name: str, repo: str, branch: str = None):\n super().__init__(todos, file_name or \"TODO.md\")\n self.repo = repo\n self.branch = branch or \"master\"\n\n def format(self):\n \"\"\"Formats the todos to be printable\"\"\"\n if not self.todos:\n print(\"No todos found.\")\n return\n self.to_print.append(\"# TODO.md\\n\")\n\n for todo in self.todos:\n associates = \", \".join(\"@\" + i for i in todo.associates) if todo.associates else \"\"\n if self.repo:\n filename = todo.file_name.replace(\"\\\\\", \"/\")\n file = f\"[{todo.file_name}:{todo.linepos}]({self.repo}/blob/{self.branch}/{filename}#L{todo.linepos})\"\n else:\n file = f\"{todo.file_name}:{todo.linepos}\"\n self.to_print.append(f\"- [ ] {todo.text} #{todo.tag} {associates} ({file}) \")\n","sub_path":"todot/printer.py","file_name":"printer.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"385964623","text":"import asyncio\nimport colorsys\nimport copy\nimport io\nimport json\nimport logging\nimport random\nimport re\nimport sys\n\nimport aiohttp\nimport aiomysql\nimport discord\nimport markovify\nfrom imgurpython import ImgurClient\nfrom imgurpython.helpers.error import *\nfrom PIL import Image\n\n# Data values for storage database:\n\nMESSAGE_FEATURE = 0\nGREENTEXT_BANNED = 1\nRAINBOW_ROLE = 2\n# bot_data vals: 0 == message_feature. 1 == banned channel for\n# greentext. 2 == role to rainbow colours\n\n\n# bad code, I need to change this. Checks if bot is blocked from channel\ndef test_for_blocked(thing, exceptions):\n for i in channelExcept:\n if str(thing) in i:\n return False\n return True\n\n\n# checks if any item of exceptions is in thing\ndef test_for_blocked2(thing, exceptions):\n for i in exceptions:\n if i in thing:\n return False\n return True\n\n\ndef strip_char(lst, chr):\n return [i.strip(chr) for i in lst]\n\n\n# open file, will return content of file. if string is provided, it is\n# appended to the end of the file\ndef append_and_reload(fname, string=False):\n file = open(fname, 'r+')\n array = file.readlines()\n if string and not str(string) in array:\n array.append(string)\n file.write('\\n{}'.format(string))\n done = True\n else:\n done = False\n file.close()\n return strip_char(array, '\\n'), done\n\n\n# opens json file and returns it as a dict. If string is provided, the\n# json File is replaced with string.\ndef save_and_open_json(fname, string=False):\n try:\n if string:\n with open(fname, 'w') as jsonFile:\n json.dump(string, jsonFile, indent=4,\n sort_keys=True, separators=(',', ':'))\n with open(fname) as jsonFile:\n return json.load(jsonFile)\n except:\n with open(fname, 'w') as jsonFile:\n json.dump({}, jsonFile, indent=4,\n sort_keys=True, separators=(',', ':'))\n with open(fname) as jsonFile:\n return json.load(jsonFile)\n\n return strip_char(array, '\\n'), done\n\n\n# adds periods to end of text\ndef get_periods(*text):\n text = list(text)\n regex = re.compile(\"^.+$(?(text) : I will convert this to greentext\",\n \"#!banGreenText : I will ban greentext on the channel it was sent in!\",\n \"#!markov (user) : I will generate a markov chained message based on this user (no user set will result in a random user being selected)\",\n \"#!sendMessage (optional: text): generate a text string.\",\n \"#!createMessage (message): add a message to the database, use {0}, {1}, {2} etc to substitute in the first, second, \\\n third @mention respectively, try to keep it to {0} and {1} only so that you dont get bugged messages\",\n \"#!setAvatar : I will set my avatar to an attached image (upload image and add this command to the text)\",\n \"#!meme : I'll find you a meme!\",\n \"#!userData : I'll PM you some stuff about yourself in the server you send it from.\"\n \"#!image (search tags) : I'll search imgur for an image with the specified tags! (Not yet IMPLEMENTED) (tell me if this works and this message is not removed!)\",\n \"#!rainbowRoles : I'll make your roles go rainbow (I must be have the permission to edit roles, and your role must not be above the role assigned to this bot)\",\n \"#!rainbowSpeed (1 <= speed <= 480) : I'll set the rainbow speed complete 1 loop in the number of seconds you specify! (will never be 1s per cycle due to\\\n ratelimit on editing roles.)\"\n]\n\n\nmarkov_commands = [\n 'count'\n]\n\n\n# assign these, so they can be tested for later (if for some reason a json\n# open fails)\nglobal dbInfo, ImgurInfo\n\ndbInfo = False\nimgurInfo = False\ndbConn = False\n\nbasedb = {\"host\": \"127.0.0.1\", \"user\": \"root\",\n \"password\": \"\", \"port\": 3306, \"dbtype\": \"mysql\"}\n\n# open database file\ntry:\n dbInfo = save_and_open_json(\"database.json\")\n print(dbInfo)\nexcept:\n save_and_open_json(\"database.json\", basedb)\n programFailed[\"state\"] = True\n programFailed[\"reason\"].append(\n \"Please place database connection info into the \\'database.json\\' file\")\n\n\nbaseImgur = {\"id\": None, \"secret\": None}\n\n# open imgur api json\ntry:\n imgurInfo = save_and_open_json(\"imgurInfo.json\")\n if (not imgurInfo[\"id\"]) or (not imgurInfo[\"secret\"]):\n raise Warning\n print(imgurInfo)\nexcept Warning:\n programFailed[\"state\"] = True\n programFailed[\"reason\"].append(\n \"Imgur file is invalid, please check there are values for both \\'id\\' and \\'secret\\'!\")\nexcept:\n save_and_open_json(\"imgurInfo.json\", baseImgur)\n programFailed[\"state\"] = True\n programFailed[\"reason\"].append(\n \"Please place imgur api keys into \\'imgurInfo.json\\' file\")\n\n\nclient = discord.Client()\nqueue = asyncio.Queue()\n\nlogger = logging.getLogger('discord')\nlogger.setLevel(logging.DEBUG)\nhandler = logging.FileHandler(\n filename='discord.log', encoding='utf8', mode='w')\nhandler.setFormatter(logging.Formatter(\n '%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\nlogger.addHandler(handler)\n\n\nasync def rainbow_colours():\n await client.wait_until_ready()\n global dbConn\n roleObjects = []\n rainbowSpeed = 0.1\n if dbConn:\n cur = await dbConn.cursor()\n await cur.execute(\"SELECT data FROM discordbot.tea_party_bot_data where type = {n};\".format(n=RAINBOW_ROLE))\n rainbowRoles = [i[0] for i in await cur.fetchall()]\n await cur.close()\n\n for i in client.servers:\n for c in i.roles:\n if c.id in rainbowRoles:\n roleObjects.append(c)\n\n print(\"\\nrainbowRoles = {}\".format(str(rainbowRoles)))\n print(\"roleObjects = {}\\n\".format(str(roleObjects)))\n while not client.is_closed:\n try:\n if not queue.empty():\n print(\"queue get\")\n for i in range(queue.qsize()):\n item = await queue.get()\n print(item)\n if item[0] == \"rainbowTime\":\n rainbowSpeed = item[1]\n elif item[1] == \"rainbowObjects\":\n roleObjects.append(item[1])\n if roleObjects:\n for i in range(360):\n r, g, b = [round(c * 255)\n for c in colorsys.hsv_to_rgb(i / 360, 1, 1)]\n for role in roleObjects:\n color = discord.colour\n color.value = r * 256 * 256 + g * 256 + b\n color.r = r\n color.g = g\n color.b = b\n await client.edit_role(role.server, role, colour=color)\n await asyncio.sleep(rainbowSpeed)\n except:\n print(\"exception in rainbow loop\")\n await asyncio.sleep(rainbowSpeed)\n\n@client.event\nasync def on_ready():\n await client.change_presence(game=discord.Game(name='#!cmds and some stuff'))\n global dbConn\n dbConn = await aiomysql.connect(host=dbInfo[\"host\"],\n port=dbInfo[\"port\"],\n user=dbInfo[\"user\"],\n password=dbInfo[\"password\"],\n db=dbInfo[\"dbtype\"],\n charset='utf8')\n print(dbConn)\n global imgurConn\n imgurConn = ImgurClient(imgurInfo[\"id\"], imgurInfo[\"secret\"])\n client.loop.create_task(rainbow_colours())\n print('connected to server as {}'.format(client.user))\n\n\n@client.event\nasync def on_message(message):\n global dbConn\n if (not message.author.bot) and dbConn and message.author != client.user:\n\n if not message.server:\n await client.send_message(message.channel, 'Nope. Message me on a real server please!')\n return\n\n userRegex = re.compile(\"\\<\\@\\d+\\>\")\n print('\\nmessage from {user}: {msg} on channel: {chn}\\n'.format(\n user=message.author, msg=message.content, chn=message.channel))\n if message.content:\n try:\n cur = await dbConn.cursor()\n await cur.execute(\"INSERT INTO discordbot.messages_tea_party (user, message, id, channel) VALUES ('{u}', '{m}', '{i}', '{c}');\".format(\n u=str(message.author.id).replace(\"'\", r\"\\'\"), m=str(message.clean_content).replace(\"'\", r\"\\'\"), i=message.id, c=message.server.id))\n await dbConn.commit()\n await cur.close()\n print('added message: {msg} from user: {usr}'.format(\n msg=message.clean_content, usr=message.author.id))\n except:\n print('MYSQL exception, attempting to reconnect!')\n dbConn.close()\n dbConn = await aiomysql.connect(host=dbInfo[\"host\"],\n port=dbInfo[\"port\"],\n user=dbInfo[\"user\"],\n password=dbInfo[\"password\"],\n db=dbInfo[\"dbtype\"],\n charset='utf8')\n return\n\n if message.content.lower().startswith('#!cmds'):\n await client.send_message(message.author, 'list of my commands: ```{}```'.format('\\n\\n'.join(cmdList)))\n\n if message.content.lower().startswith(\"#!rainbowspeed\"):\n try:\n speed = float(message.content.split()[-1])\n if not 1.0 <= speed <= 480:\n raise ValueError\n # turn into time per iteration\n await queue.put([\"rainbowTime\", speed / 360])\n print(speed)\n await client.send_message(message.channel, \"Set speed of rainbow to specified timing! New speed will apply when the current cycle finishes!\")\n except ValueError:\n await client.send_message(message.channel, \"Error setting time! Please specify a numerical value in the range 1 to 480!\")\n except IndexError:\n await client.send_message(message.channel, \"Error setting time! Please provide a number!\")\n except:\n await client.send_message(message.channel, \"Error setting time!\")\n\n if message.content.lower().startswith(\"#!rainbowroles\"):\n if message.author.top_role.position > message.server.me.top_role.position:\n await client.send_message(message.channel, \"Sorry, I cannot do rainbow text on a role higher than me!\")\n elif message.author.top_role.is_everyone:\n await client.send_message(message.channel, \"Sorry, I cannot do rainbow text on the @everyone role!\")\n else:\n cur = await dbConn.cursor()\n await cur.execute(\"INSERT INTO discordbot.tea_party_bot_data (type, data) VALUES ({n}, '{d}');\".format(n=RAINBOW_ROLE, d=message.author.top_role.id))\n await dbConn.commit()\n await cur.close()\n await queue.put([\"rainbowObjects\", message.author.top_role])\n\n if message.content.lower().startswith('#!userdata'):\n permissions = message.author.server_permissions\n top_role = message.author.top_role\n other_roles = message.author.roles\n\n formattedMsg = \"On guild: {guild},Your permissions are: \\n{perms}\\n\\n Your highest role is: \\n{top}\\n\\n Your list of roles is: \\n{roles}\\n\".format(\n guild=message.server.name,\n perms=permissions.value,\n top=\"Name: {n}, perms: {p}, id: {i}, colour: {c}\".format(\n n=top_role.name,\n p=top_role.permissions.value,\n i=top_role.id,\n c=top_role.colour),\n roles='\\n'.join([\n \"Name: {n}, perms: {p}, id: {i}, colour: {c}\".format(\n n=i.name,\n p=i.permissions.value,\n i=i.id,\n c=i.colour) for i in other_roles]))\n await client.send_message(message.author, formattedMsg)\n\n if message.content.lower().startswith('#!meme'):\n hold = await client.send_message(message.channel, \"Hang on a sec, fetching you a meme!\")\n await client.send_typing(message.channel)\n try:\n memes = imgurConn.memes_subgallery()\n meme = random.choice(memes)\n while meme.is_album:\n meme = random.choice(memes)\n if meme.animated:\n url = meme.gifv\n else:\n url = meme.link\n print(\"Grabbing memes: {}\".format(url))\n request = await aiohttp.request('get', url)\n raw = await request.read()\n imageData = io.BytesIO(raw)\n await client.send_file(message.channel, imageData, filename=url.split(\"/\")[-1], content=meme.title)\n except ImgurClientError as e:\n client.send_message(\n message.channel, \"Something went wrong with getting that meme, error: {}\".format(e))\n except ImgurClientRateLimitError:\n client.send_message(\n message.channel, \"I've ran out of imgur api requests, oh shit!\")\n except:\n client.send_message(\n message.channel, \"An unspecified error happened with the imgur api\")\n finally:\n await client.delete_message(hold)\n\n if message.content.lower().startswith(\"#!bangreentext\"):\n cur = await dbConn.cursor()\n await cur.execute(\"INSERT INTO discordbot.tea_party_bot_data (type, data) VALUES ({n}, '{d}');\".format(n=GREENTEXT_BANNED, d=str(message.channel.id)))\n await dbConn.commit()\n await cur.close()\n await client.send_message(message.channel, \"Banned this channel from greentext!\")\n\n if '>' in message.content:\n\n cur = await dbConn.cursor()\n await cur.execute(\"SELECT data FROM discordbot.tea_party_bot_data where type = {n};\".format(n=GREENTEXT_BANNED))\n banned = await cur.fetchall()\n await cur.close()\n\n bannedList = [i[0] for i in banned]\n\n if not str(message.channel.id) in bannedList:\n lines = None\n lines = [i for i in message.content.split('\\n') if i[0] == '>']\n if lines:\n await client.send_message(message.channel, '```css\\n{}\\n```'.format('\\n'.join(lines)))\n # greentext\n\n if message.content.lower().startswith('#!setavatar'):\n if message.attachments:\n try:\n url = message.attachments[0]['url']\n print('URL: {}'.format(url))\n imgType = url.split('.')[-1]\n request = await aiohttp.request('get', url)\n raw = await request.read()\n imageData = bytes(raw)\n pngImage = Image.open(io.BytesIO(imageData))\n pngImage.save(\"avatar_temp.png\")\n avatarImage = open(\"avatar_temp.png\", \"rb\")\n await client.edit_profile(avatar=avatarImage.read())\n avatarImage.close()\n await client.send_message(message.channel, \"Set my avatar to the uploaded image.\")\n except:\n await client.send_message(message.channel, \"Something went wrong with the image change. Check you uploaded an actual image file!\")\n else:\n await client.send_message(message.channel, \"No images uploaded, try again with an image in the message!\")\n\n if message.content.lower().startswith('#!createmessage'):\n cur = await dbConn.cursor()\n await cur.execute(\"INSERT INTO discordbot.tea_party_bot_data (type, data) VALUES ({n}, '{d}');\".format(n=MESSAGE_FEATURE, d=message.content[len('#!createmessage '):].replace(\"'\", r\"\\'\")))\n await dbConn.commit()\n await cur.close()\n await client.send_message(message.channel, 'Added new message to lolibot, example: {}'.format(message.content[len('#!createmessage '):].format('you', 'someone')))\n\n if message.content.lower().startswith('#!sendmessage'):\n cur = await dbConn.cursor()\n await cur.execute(\"SELECT data FROM discordbot.tea_party_bot_data where type = {n};\".format(n=MESSAGE_FEATURE))\n messages = await cur.fetchall()\n # print(messages)\n await cur.close()\n if messages and message.mentions:\n await client.send_message(message.channel, str(random.choice(messages)[0].format(\n message.author.mention, *[mention.mention for mention in message.mentions])))\n\n if message.content.lower().startswith(\"#!markov-stats\"):\n if not test_for_blocked2(message.content, markov_commands):\n data = message.content.split()\n if len(data) > 1:\n data = data[1:]\n content = {}\n counter = [0, 0, False]\n for i, c in enumerate(data):\n if c.lower() in markov_commands:\n if not counter[2]:\n counter[2] = True\n else:\n content[data[counter[0]]] = data[\n counter[0] + 1:counter[1] + 1]\n counter[0] = i\n counter[1] = i\n if counter[2]:\n content[data[counter[0]]] = data[\n counter[0] + 1:counter[1] + 1]\n for i, c in content.items():\n print('i: {i}, c: {c}'.format(i=i, c=c))\n if i == \"count\":\n for n in c:\n if userRegex.match(n):\n msgServer = message.server\n if msgServer:\n msgMember = str(msgServer.get_member(\n n.lstrip('<@').rstrip('>')))\n cur = await dbConn.cursor()\n await cur.execute(\"SELECT DISTINCT user FROM discordbot.messages_tea_party;\")\n users = [str(i[0]) for i in await cur.fetchall()]\n if str(n.lstrip('<@').rstrip('>')) in users:\n\n await cur.execute(\"SELECT * FROM discordbot.messages_tea_party where user = '{}';\".format(str(n.lstrip('<@').rstrip('>')).replace(\"'\", r\"\\'\")))\n result = await cur.fetchall()\n await client.send_message(message.channel, \"Number of messages collected for {usr}: {count}\".format(usr=msgMember, count=len(result)))\n await cur.close()\n\n if message.content.lower().startswith(\"#!markov\"):\n print('generating markov')\n for i in range(10):\n try:\n cur = await dbConn.cursor()\n await cur.execute(\"SELECT DISTINCT user FROM discordbot.messages_tea_party WHERE channel = '{c}';\".format(c=message.server.id))\n users = [str(i[0]) for i in await cur.fetchall()]\n\n if message.mentions:\n user = str(message.mentions[0].id)\n elif 'all' in message.clean_content:\n user = 'everyone'\n else:\n user = random.choice(users)\n\n if user in users or user == 'everyone':\n if not user == 'everyone':\n await cur.execute(\"SELECT message FROM discordbot.messages_tea_party where user = '{u}' and channel = '{c}';\".format(\n u=str(user).replace(\"'\", r\"\\'\"), c=message.server.id))\n else:\n await cur.execute(\"SELECT message FROM discordbot.messages_tea_party WHERE channel = '{c}';\".format(c=message.server.id))\n messages = [str(i[0]) for i in await cur.fetchall()]\n markovBase = markovify.Text(get_periods(\n *messages))\n sentence = markovBase.make_sentence(tries=50)\n print(\"Generated markov: {}\".format(sentence))\n if sentence:\n await client.send_message(message.channel, \"\\n\\n\\n__**Message by {usr}**__\\n\\n{chain}\".format(usr=str(message.server.get_member((user))), chain=sentence))\n break\n await cur.close()\n except:\n print('Exception occured in 1st markov forloop: {}'.format(\n sys.exc_info()[0]))\n continue\n else:\n await client.send_message(message.channel, \"\\n\\n\\n__**Message by {usr}**__\\n\\nNot enough data to make a sentence of this user.\".format(usr=str(message.server.get_member((user)))))\n\nwhile not programFailed[\"state\"]:\n try:\n token = open(\"discord.token\", \"r\").readline()\n print(token)\n client.run(token)\n client.close()\n except:\n programFailed[\"state\"] = True\n programFailed[\"reason\"].append(\n \"Please place your discord token in the \\'discord.token\\' file.\")\n break\n\n\nfor i in programFailed[\"reason\"]:\n print(\"Program failed with reason: {}\".format(i))\n\ninput(\"press any key to exit program!\")\nexit()\n","sub_path":"anime-irl.py","file_name":"anime-irl.py","file_ext":"py","file_size_in_byte":22443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"35591802","text":"import matplotlib.pyplot as plt\nimport sys\n\ncount_qmsgs = 0\n\ndef check_size(i,common):\n\tglobal count_qmsgs\n\tif common[i][1] is 'pub':\n\t\tcount_qmsgs+=1\n\telse:\n\t\tcount_qmsgs-=1\n\treturn count_qmsgs\n\ndef queue_size(sent, recieved):\n\tsent_list = [[sent[0][i], 'pub'] for i in range(0, len(sent[0]))]\n\trecieved_list = [[recieved[0][i], 'sub'] for i in range(0, len(sent[0]))]\n\tcommon = sorted(sent_list + recieved_list, key = lambda el: el[0])\n\tlist_ = [check_size(i, common) for i in range(0, len(common))]\n\tlist_counts = []\n\tfor i in range(0, len(list_)):\n\t\tif common[i][1] == 'pub':\n\t\t\tlist_counts.append(list_[i])\n\treturn list_counts\n\ndef main():\n\tsend_time = []\n\treceive_time = []\n\t\n\tsend = open(sys.argv[1], 'r')\n\tfor line in send:\n\t\tsend_time.append(list(map(int, line.split())))\n\tsend.close()\n\n\treceive = open(sys.argv[2], 'r')\n\tfor line in receive:\n\t\treceive_time.append(list(map(int, line.split())))\n\treceive.close()\n\n\tif len(send_time) != len(receive_time):\n\t\tprint(\"Number of lines doesn't match\")\n\t\texit()\n\tfor i in range(0, len(send_time)):\n\t\tif len(send_time[i]) != len(receive_time[i]):\n\t\t\tprint(len(send_time[i]), len(receive_time[i]))\n\t\t\tprint(\"Number of observations doesn't match\")\n\t\t\texit()\n\tlist_counts = queue_size(send_time,receive_time)\n\tdelay_time =[[receive_time[i][j] - send_time[i][j] for j in range(0, len(send_time[i]))] for i in range(0, len(send_time))]\n\tdelay = []\n\tfor i in range(0, 10):\n\t\tdelay.append(delay_time[0][0:(500*(i+1))])\n\tdelay_time = delay\n\tmeans = [sum(delay_time[i])/(1000000*len(delay_time[i])) for i in range(0, len(delay_time))]\n\twith open('delays.txt', 'w') as f:\n\t\tf.write(' '.join([str(i/1000000) for i in delay_time[-1]]))\n\tplt.boxplot(list(map(lambda x: [x[i]/1000000 for i in range(0, len(x))], delay_time)), positions=[len(d) for d in delay], widths=[200 for i in range(0, 10)], whis=[0,100])\n\tplt.ylabel('time, ms')\n\tplt.xlabel('count of messages')\n\tplt.show()\n\tplt.plot([i for i in range(1, len(delay_time[-1])+1)], list(map(lambda x: x/1000000, delay_time[-1])))\n\tplt.ylabel('time, ms')\n\tplt.xlabel('number of message')\n\tplt.show()\n\tplt.plot([i for i in range(1, len(list_counts)+1)], list_counts)\n\tplt.ylabel('messages in queue')\n\tplt.xlabel('number of sent messages')\n\tplt.show()\n\nif __name__ == '__main__':\n\tif len(sys.argv) < 3:\n\t\tprint('Missed send time file and/or receive time file')\n\t\texit()\n\tmain()\n","sub_path":"test_delays/boxplot.py","file_name":"boxplot.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"421121776","text":"from django.urls import path\nfrom user import views\n\napp_name = 'user'\n\nurlpatterns = [\n path('login/', views.user_login, name='login'),\n path('register/', views.user_register, name='register'),\n path('homepage/', views.homepage, name='homepage'),\n path('logout/', views.user_logout, name='logout'),\n path('/profile/', views.profile, name='profile'),\n]\n","sub_path":"user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"193632487","text":"\"\"\"\nHigh-level functions used across the CAP-Toolkit package.\n\n\"\"\"\nimport h5py\nimport pyproj\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nfrom scipy import signal\n\n\n# --- Utilitiy functions --- #\n\n\ndef print_args(args):\n \"\"\"Print arguments passed to argparse.\"\"\"\n print(\"Input arguments:\")\n for arg in list(vars(args).items()):\n print(arg)\n\n\ndef read_h5(fname, vnames):\n \"\"\"Generic HDF5 reader.\n\n vnames : ['var1', 'var2', 'var3']\n \"\"\"\n with h5py.File(fname, \"r\") as f:\n variables = [f[v][()] for v in vnames]\n\n return variables if len(vnames) > 1 else variables[0]\n\n\ndef save_h5(fname, vardict, mode=\"a\"):\n \"\"\"Generic HDF5 writer.\n\n vardict : {'name1': var1, 'name2': va2, 'name3': var3}\n \"\"\"\n with h5py.File(fname, mode) as f:\n for k, v in list(vardict.items()):\n if k in f:\n f[k][:] = np.squeeze(v)\n else:\n f[k] = np.squeeze(v)\n\n\ndef is_empty(ifile):\n \"\"\"Test if file is corruted or empty\"\"\"\n try:\n with h5py.File(ifile, \"r\") as f:\n if bool(list(f.keys())):\n return False\n else:\n return True\n except IOError:\n return True\n\n\ndef find_nearest(arr, val):\n \"\"\"Find index of 'nearest' value(s).\n\n Args:\n arr (nd array) : The array to search in (nd). No need to be sorted.\n val (scalar or array) : Value(s) to find.\n\n Returns:\n out (tuple or scalar) : The index (or tuple if nd array) of nearest\n entry found. If `val` is a list of values then a tuple of ndarray\n with the indices of each value is return.\n\n See also:\n find_nearest2\n\n \"\"\"\n idx = []\n\n if np.ndim(val) == 0:\n val = np.array([val])\n\n for v in val:\n idx.append((np.abs(arr - v)).argmin())\n idx = np.unravel_index(idx, arr.shape)\n\n return idx if val.ndim > 1 else idx[0]\n\n\ndef mad_std(x, axis=None):\n \"\"\"Robust standard deviation (using MAD).\"\"\"\n\n return 1.4826 * np.nanmedian(np.abs(x - np.nanmedian(x, axis)), axis)\n\n\ndef transform_coord(proj1, proj2, x, y):\n \"\"\"Transform coordinates from proj1 to proj2 (EPSG num).\n\n Examples EPSG proj:\n Geodetic (lon/lat): 4326\n Stereo AnIS (x/y): 3031\n Stereo GrIS (x/y): 3413\n \"\"\"\n # Set full EPSG projection strings\n proj1 = pyproj.Proj(\"+init=EPSG:\" + str(proj1))\n proj2 = pyproj.Proj(\"+init=EPSG:\" + str(proj2))\n # Convert coordinates\n\n return pyproj.transform(proj1, proj2, x, y)\n\n\n# --- Processing functions --- #\n\n\ndef sgolay1d(h, window=3, order=1, deriv=0, dt=1.0, mode=\"nearest\", time=None):\n \"\"\"Savitztky-Golay filter with support for NaNs.\n\n If time is given, interpolate NaNs otherwise pad w/zeros.\n If time is given, calculate dt as t[1]-t[0].\n\n Args:\n dt (int): spacing between samples (for correct units).\n\n Notes:\n Works with numpy, pandas and xarray objects.\n\n \"\"\"\n if isinstance(h, (pd.Series, xr.DataArray)):\n h = h.values\n if isinstance(time, (pd.Series, xr.DataArray)):\n time = time.values\n\n _h = h.copy()\n (i_nan,) = np.where(np.isnan(_h))\n (i_valid,) = np.where(np.isfinite(_h))\n\n if i_valid.size < 5:\n return _h\n elif time is not None:\n _h[i_nan] = np.interp(time[i_nan], time[i_valid], _h[i_valid])\n dt = np.abs(time[1] - time[0])\n else:\n _h[i_nan] = 0\n\n return signal.savgol_filter(_h, window, order, deriv, delta=dt, mode=mode)\n\n\n# TODO: Think if dx, dy should be applied here !!!\ndef sgolay2d(z, window_size, order, derivative=None):\n \"\"\"Two dimensional data smoothing and least-square gradient estimate.\n\n Code from:\n http://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html\n\n Reference:\n A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of\n Data by Simplified Least Squares Procedures. Analytical\n Chemistry, 1964, 36 (8), pp 1627-1639.\n\n \"\"\"\n # number of terms in the polynomial expression\n # TODO: Double check this (changed for Py3)\n n_terms = (order + 1) * (order + 2) // 2\n\n if window_size % 2 == 0:\n raise ValueError(\"window_size must be odd\")\n\n if window_size ** 2 < n_terms:\n raise ValueError(\"order is too high for the window size\")\n\n half_size = window_size // 2\n\n # exponents of the polynomial.\n # p(x,y) = a0 + a1*x + a2*y + a3*x^2 + a4*y^2 + a5*x*y + ...\n # this line gives a list of two item tuple. Each tuple contains\n # the exponents of the k-th term. First element of tuple is for x\n # second element for y.\n # Ex. exps = [(0,0), (1,0), (0,1), (2,0), (1,1), (0,2), ...]\n exps = [(k - n, n) for k in range(order + 1) for n in range(k + 1)]\n\n # coordinates of points\n ind = np.arange(-half_size, half_size + 1, dtype=np.float64)\n dx = np.repeat(ind, window_size)\n dy = np.tile(ind, [window_size, 1]).reshape(window_size ** 2,)\n\n # build matrix of system of equation\n A = np.empty((window_size ** 2, len(exps)))\n\n for i, exp in enumerate(exps):\n A[:, i] = (dx ** exp[0]) * (dy ** exp[1])\n\n # pad input array with appropriate values at the four borders\n new_shape = z.shape[0] + 2 * half_size, z.shape[1] + 2 * half_size\n Z = np.zeros((new_shape))\n # top band\n band = z[0, :]\n Z[:half_size, half_size:-half_size] = band - np.abs(\n np.flipud(z[1 : half_size + 1, :]) - band\n )\n # bottom band\n band = z[-1, :]\n Z[-half_size:, half_size:-half_size] = band + np.abs(\n np.flipud(z[-half_size - 1 : -1, :]) - band\n )\n # left band\n band = np.tile(z[:, 0].reshape(-1, 1), [1, half_size])\n Z[half_size:-half_size, :half_size] = band - np.abs(\n np.fliplr(z[:, 1 : half_size + 1]) - band\n )\n # right band\n band = np.tile(z[:, -1].reshape(-1, 1), [1, half_size])\n Z[half_size:-half_size, -half_size:] = band + np.abs(\n np.fliplr(z[:, -half_size - 1 : -1]) - band\n )\n # central band\n Z[half_size:-half_size, half_size:-half_size] = z\n\n # top left corner\n band = z[0, 0]\n Z[:half_size, :half_size] = band - np.abs(\n np.flipud(np.fliplr(z[1 : half_size + 1, 1 : half_size + 1])) - band\n )\n # bottom right corner\n band = z[-1, -1]\n Z[-half_size:, -half_size:] = band + np.abs(\n np.flipud(np.fliplr(z[-half_size - 1 : -1, -half_size - 1 : -1]))\n - band\n )\n\n # top right corner\n band = Z[half_size, -half_size:]\n Z[:half_size, -half_size:] = band - np.abs(\n np.flipud(Z[half_size + 1 : 2 * half_size + 1, -half_size:]) - band\n )\n # bottom left corner\n band = Z[-half_size:, half_size].reshape(-1, 1)\n Z[-half_size:, :half_size] = band - np.abs(\n np.fliplr(Z[-half_size:, half_size + 1 : 2 * half_size + 1]) - band\n )\n\n # solve system and convolve\n\n if derivative is None:\n m = np.linalg.pinv(A)[0].reshape((window_size, -1))\n\n return signal.fftconvolve(Z, m, mode=\"valid\")\n elif derivative == \"col\":\n c = np.linalg.pinv(A)[1].reshape((window_size, -1))\n\n return signal.fftconvolve(Z, -c, mode=\"valid\")\n elif derivative == \"row\":\n r = np.linalg.pinv(A)[2].reshape((window_size, -1))\n\n return signal.fftconvolve(Z, -r, mode=\"valid\")\n elif derivative == \"both\":\n c = np.linalg.pinv(A)[1].reshape((window_size, -1))\n r = np.linalg.pinv(A)[2].reshape((window_size, -1))\n\n return (\n signal.fftconvolve(Z, -r, mode=\"valid\"),\n signal.fftconvolve(Z, -c, mode=\"valid\"),\n )\n\n\ndef make_grid(xmin, xmax, ymin, ymax, dx, dy, return_2d=False):\n \"\"\"Construct output grid-coordinates.\"\"\"\n Nn = int((np.abs(ymax - ymin)) / dy) + 1\n Ne = int((np.abs(xmax - xmin)) / dx) + 1\n xi = np.linspace(xmin, xmax, num=Ne)\n yi = np.linspace(ymin, ymax, num=Nn)\n\n if return_2d:\n return np.meshgrid(xi, yi)\n else:\n return xi, yi\n\n# --- Test functions --- #\n\n\n# Some edge test cases (for the 3-km grid)\ntest_ij_3km = [\n (845, 365), # 0 PIG Floating 1\n (831, 364), # 1 PIG Floating 2\n (1022, 840), # 2 CS-2 only 1\n (970, 880), # 3 CS-2 only 2\n (100, 1170), # 4 fig1 large peaks at mission overlaps\n (100, 766), # 5 fig2 peak at mission overlap\n (7, 893), # 6 step change at beguining\n (8, 892), # 7 with hole\n (9, 889), # 8 with large hole\n (11, 893), # 9 step in divergence\n]\n","sub_path":"captoolkit/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"69337168","text":"\"\"\"\n给定一个只包括 '(',')','{','}','[',']' 的字符串,判断字符串是否有效。\n\n有效字符串需满足:\n\n左括号必须用相同类型的右括号闭合。\n左括号必须以正确的顺序闭合。\n注意空字符串可被认为是有效字符串。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/valid-parentheses\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n\nclass Solution:\n def isValid(self, s: str) -> bool:\n stack = []\n for char in s:\n if char == '(' or char == '{' or char == '[':\n stack.append(char)\n elif stack and (char == ')' and stack[-1] == '('\n or char == '}' and stack[-1] == '{'\n or char == ']' and stack[-1] == '['):\n stack.pop()\n else:\n return False\n return stack == []\n\n\nif __name__ == \"__main__\":\n test_cases = [\n \"()\",\n \"()[]{}\",\n \"(]\",\n \"([)]\",\n \"{[]}\"\n ]\n\n s = Solution()\n for case in test_cases:\n res = s.isValid(case)\n print(f\"test case:{case}, result:{res}\")\n","sub_path":"stack/20_isValid.py","file_name":"20_isValid.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"640898787","text":"import folium\nimport json\nimport threading\n\nfrom pymongo import MongoClient\n\n\n# adds marker to corresponding map, executed as thread\ndef add_marker(_lat, _lng, _name, _time, _day):\n maps[\"{}_{}\".format(_time[\"weekday_num\"], _day[\"time\"] % 24)] \\\n .circle_marker(location=[_lat, _lng], radius=day[\"popularity\"],\n popup=_name, line_color='#3186cc',\n fill_color='#3186cc')\n\n\nparams = json.loads(open(\"params.json\", \"r\").read())\n\nclient = MongoClient('localhost', params[\"dbPort\"])\nmongod = client[params[\"dbName\"]]\nplaces = mongod[params[\"collectionName\"]]\n\nmaps = {}\n\n# init with empty maps \"day_time\"\nfor x, y in [(day, currT) for day in range(7) for currT in range(24)]:\n maps[\"{}_{}\".format(x, y)] = folium.Map(location=[52.509719, 13.393527],\n tiles='Stamen Toner',\n zoom_start=14)\n\ncurr_thread = None\n\n# iterate over mongodb data with populartimes\nfor i, location in enumerate(places.find({\"popular_times\": {\"$exists\": True}})):\n\n print(\"--- place #{} ---\".format(i))\n\n loc = location[\"location\"][\"location\"]\n lat = loc[\"lat\"]\n lng = loc[\"lng\"]\n name = location[\"name\"]\n\n for time in location[\"popular_times\"]:\n for day in time[\"data\"]:\n curr_thread = threading.Thread(target=add_marker(lat, lng, name, time, day))\n curr_thread.start()\n\ncurr_thread.join()\n\nfor map_key in maps.keys():\n maps[map_key].create_map(path=\"maps/{}.html\".format(m))\n print(\"--- created map {} ---\".format(m))\n","sub_path":"create_map.py","file_name":"create_map.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"253395886","text":"#!/usr/bin/env python\nfrom CSHLDAP import CSHLDAP\nfrom datetime import date, datetime\nfrom csh_webnews import Webnews\nimport argparse\n\ndef checkBirthday(ldap):\n today = date.today()\n for member in allMembersWithBirthdaysOnDate(ldap, today):\n name = member[\"displayName\"]\n if len(name) < 1:\n continue\n displayName = name[0]\n\ndef allMembersWithBirthdays(ldap):\n \"\"\"\n Finds all active members in LDAP and strips those without B-Days\n Returns: The list of all active members with a birthday\n \"\"\"\n activeMembers = ldap.search(active=\"1\")\n members = []\n for memberTuple in activeMembers:\n if len(memberTuple) < 1:\n continue\n member = memberTuple[1]\n birthday = birthdateFromMember(member)\n if not birthday:\n continue\n members.append(member)\n return members\n\ndef allMembersWithBirthdaysOnDate(ldap, day):\n \"\"\"\n Finds all members with a birthday on a specified date.\n Returns: An array of members whos birthday falls on day\n \"\"\"\n allMembers = allMembersWithBirthdays(ldap)\n birthdayMembers = []\n for member in allMembers:\n birthday = birthdateFromMember(member)\n if day.month != birthday.month or day.day != birthday.day:\n continue\n birthdayMembers.append(member)\n return birthdayMembers\n\ndef birthdateFromMember(member):\n \"\"\"\n Takes a member and returns their birthday in a date form.\n Returns: A date object or a None if the member doesn't have a birthday\n \"\"\"\n if not \"birthday\" in member:\n return None\n birthday = member[\"birthday\"]\n if len(birthday) < 1:\n return None\n birthdayString = birthday[0]\n memberMonthDay = birthdayString[:8]\n birthdate = datetime.strptime(memberMonthDay, \"%Y%m%d\")\n return date(year=birthdate.year, month=birthdate.month, day=birthdate.day)\n\ndef message(ldap):\n \"\"\"\n Finds all active members whos birthday is today, parses a subject and body for WebNews\n Returns: The subject line, The body\n \"\"\"\n day = date.today()\n birthdays = allMembersWithBirthdaysOnDate(ldap, day)\n numberOfBirthdays = len(birthdays)\n if numberOfBirthdays == 0:\n return None, None\n plural = \"s\" if numberOfBirthdays > 1 else \"\"\n name = \"Today\" if numberOfBirthdays > 1 else birthdays[0][\"cn\"]\n subject = name[0] + \"'s Birthday\" + plural\n string = \"\"\n for member in birthdays:\n birthdate = birthdateFromMember(member)\n age = date.today().year - birthdate.year\n name = member[\"displayName\"]\n commonName = member[\"cn\"]\n if len(name) < 1:\n continue\n nameString = name[0]\n memberString = nameString + \" is \" + str(age) + \" years old.\\n\"\n string += memberString\n string += \"\\nShower on sight!\\n\\n(This post was automatically generated by the WebNews Birthday Bot.)\"\n return subject, string\n\ndef main(user=None, password=None, apiKey=None, test=False):\n ldap = CSHLDAP(user, password)\n subject, post = message(ldap)\n if not post:\n print(\"No birthdays today.\")\n return\n newsgroup = \"csh.test\" if test else \"csh.noise\"\n webnews = Webnews(api_key=apiKey, api_agent=\"WebNews Birthday Bot\")\n webnews.compose(newsgroup=newsgroup, subject=subject, body=post)\n print(post)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Find users with a birthday.')\n parser.add_argument(\"user\", help=\"Specify a username.\")\n parser.add_argument(\"password\", help=\"Specify the password for the user.\")\n parser.add_argument(\"apikey\", help=\"API key for posting to WebNews\")\n parser.add_argument(\"--test\", \"-t\",\n action=\"store_true\",\n help=\"Posts to csh.test instead of csh.noise\")\n args = parser.parse_args()\n\n if not args.apikey:\n print(\"No API key provided.\")\n exit()\n\n main(user=args.user, password=args.password,\n apiKey=args.apikey, test=args.test)\n","sub_path":"birthday.py","file_name":"birthday.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"159765413","text":"import os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(here, 'VERSION')) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name='isdc-geopanel',\n version= version,\n description='panel module',\n long_description=open(os.path.join(here, 'README.md')).read(),\n license='iMMAP',\n author ='iMMAP',\n author_email = 'asdc@immap.org',\n packages=find_packages(),\n include_package_data = True,\n install_requires=[],\n zip_safe= False\n)\n","sub_path":"pypi_install_script/isdc-geopanel-1.0.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"180333732","text":"\na = [5, 4, 3, 2, 1]\nb = []\nc = []\n\ndef hanoi(n, source, target, spare):\n if n > 0:\n hanoi(n-1, source, spare, target)\n target.append(source.pop())\n hanoi(n-1, spare, target, source)\n\nhanoi(len(a), a, c, b)\n\nprint(c)","sub_path":"hanoi.py","file_name":"hanoi.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"521195730","text":"# -*- coding: utf-8 -*- #\n# Copyright 2022 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Creates a new AlloyDB secondary instance.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.api_lib.alloydb import api_util\nfrom googlecloudsdk.api_lib.alloydb import instance_operations\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.command_lib.alloydb import flags\nfrom googlecloudsdk.command_lib.alloydb import instance_helper\nfrom googlecloudsdk.core import log\nfrom googlecloudsdk.core import properties\nfrom googlecloudsdk.core import resources\n\n\n@base.ReleaseTracks(\n base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA, base.ReleaseTrack.GA\n)\nclass CreateSecondary(base.CreateCommand):\n \"\"\"Creates a new AlloyDB SECONDARY instance within a given cluster.\"\"\"\n\n detailed_help = {\n 'DESCRIPTION': '{description}',\n 'EXAMPLES': \"\"\"\\\n To create a new secondary instance, run:\n\n $ {command} my-instance --cluster=my-cluster --region=us-central1\n \"\"\",\n }\n\n @staticmethod\n def Args(parser):\n \"\"\"Specifies additional command flags.\n\n Args:\n parser: argparse.Parser: Parser object for command line inputs\n \"\"\"\n base.ASYNC_FLAG.AddToParser(parser)\n flags.AddCluster(parser, False)\n flags.AddAvailabilityType(parser)\n flags.AddInstance(parser)\n flags.AddRegion(parser)\n\n def Run(self, args):\n \"\"\"Constructs and sends request.\n\n Args:\n args: argparse.Namespace, An object that contains the values for the\n arguments specified in the .Args() method.\n\n Returns:\n ProcessHttpResponse of the request made.\n \"\"\"\n client = api_util.AlloyDBClient(self.ReleaseTrack())\n alloydb_client = client.alloydb_client\n alloydb_messages = client.alloydb_messages\n cluster_ref = client.resource_parser.Create(\n 'alloydb.projects.locations.clusters',\n projectsId=properties.VALUES.core.project.GetOrFail,\n locationsId=args.region,\n clustersId=args.cluster,\n )\n instance_resource = alloydb_messages.Instance()\n instance_ref = client.resource_parser.Create(\n 'alloydb.projects.locations.clusters.instances',\n projectsId=properties.VALUES.core.project.GetOrFail,\n locationsId=args.region,\n clustersId=args.cluster,\n instancesId=args.instance,\n )\n instance_resource.name = instance_ref.RelativeName()\n instance_resource.instanceType = (\n alloydb_messages.Instance.InstanceTypeValueValuesEnum.SECONDARY\n )\n instance_resource.availabilityType = instance_helper.ParseAvailabilityType(\n alloydb_messages, args.availability_type)\n req = alloydb_messages.AlloydbProjectsLocationsClustersInstancesCreatesecondaryRequest(\n instance=instance_resource,\n instanceId=args.instance,\n parent=cluster_ref.RelativeName(),\n )\n op = alloydb_client.projects_locations_clusters_instances.Createsecondary(\n req\n )\n op_ref = resources.REGISTRY.ParseRelativeName(\n op.name, collection='alloydb.projects.locations.operations'\n )\n log.status.Print('Operation ID: {}'.format(op_ref.Name()))\n if not args.async_:\n instance_operations.Await(\n op_ref, 'Creating secondary instance', self.ReleaseTrack()\n )\n return op\n","sub_path":"lib/surface/alloydb/instances/create_secondary.py","file_name":"create_secondary.py","file_ext":"py","file_size_in_byte":3873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"583234266","text":"from Base.Encoder import *\nfrom Base.Decoder import *\nfrom Base.Utils import *\n\n\nclass RepeatNet(chainer.Chain):\n def __init__(self, item_size, embed_size, hidden_size):\n self.joint_train = joint_train\n super(RepeatNet, self).__init__(\n enc=NStepGRUEncoder(item_size, embed_size, hidden_size),\n dec=AttReDecoder(item_size, hidden_size),\n )\n\n def predict(self, input_list):\n x_enable = chainer.Variable(self.xp.array(mask(input_list)))\n batch_last_h, batch_seq_h = self.enc(input_list, x_enable)\n p_r, p_e, p = self.dec(batch_last_h, input_list, batch_seq_h, x_enable)\n\n return p_r + p_e, p\n\n def train(self, input_list, output_list):\n predicts, p = self.predict(input_list)\n\n slices = self.xp.zeros(predicts.shape, dtype=self.xp.int32) > 0\n for i, v in enumerate(output_list):\n slices[i, v] = True\n loss = -F.sum(F.log(F.get_item(predicts, slices)))/len(input_list)\n return loss\n","sub_path":"Model/RepeatNet.py","file_name":"RepeatNet.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"391791996","text":"from random import randint\nimport os\nfrom rider import Rider\nfrom defs import rows,fg,bg,reset_color\nimport defs,random\n\nrows = int(rows)\n\ncol_yf='\\x1B[38;2;255;255;0m'\ncol_of='\\x1b[38;2;255;165;0m'\ncol_ob='\\x1b[48;2;255;165;0m'\n\narc_v = [[col_yf, col_ob, '▐' , '', 1], [col_of, '', '▌' , '', -1]]\narc_h = [[col_of, '', '▂' , '', -1], [col_yf, col_ob, '▀' , '', 1]]\narc_r = [\n [['','',' ','',-1],[col_yf, col_ob, '▗' , '', 1],[col_of,'','▖','',-1]],\n [[col_of,'','▝','',-1],[col_yf, col_ob, '▚' , '', 1],[col_of,'','▖','',-1]],\n [[col_of,'','▝','',-1],[col_yf, col_ob, '▘' , '', 1],['','',' ','',-1]]\n ]\narc_l = [\n [[col_of,'','▗','',-1],[col_yf, col_ob, '▖' , '', 1],['','',' ','',-1]],\n [[col_of,'','▗','',-1],[col_yf, col_ob, '▞' , '', 1],[col_of,'','▘','',-1]],\n [['','',' ','',-1],[col_yf, col_ob, '▝' , '', 1],[col_of,'','▘','',-1]]\n ]\n\n\nclass Arcs:\n def fill_in_art(self,board,val,freq,iter):\n if val==1:\n starty=randint(1,rows//3-1) #changing\n startx=randint(1,freq-1)+iter*freq #const\n for j in range(rows//3):\n i=0\n for idx in arc_v:\n if idx[4] == -1:\n val = [idx[0],board[starty+j][startx+i][1],idx[2],bg+fg]\n else:\n val = [idx[0],idx[1],idx[2],bg+fg]\n \n board[starty+j][startx+i]=val\n defs.board_check[starty+j][startx+i]=1\n i+=1\n elif val==2:\n starty=randint(1,rows-3) #const\n startx=randint(1,freq-1)+iter*freq #const\n for j in range(2*rows//3):\n i=0\n for idx in arc_h:\n if idx[4] == -1:\n val = [idx[0],board[starty+i][startx+j][1],idx[2],bg+fg]\n else:\n val = [idx[0],idx[1],idx[2],bg+fg]\n board[starty+i][startx+j]=val\n defs.board_check[starty+i][startx+j]=1\n i+=1\n elif val==3:\n starty=randint(1,rows//3-1) #changing\n startx=randint(1,freq-1)+iter*freq #const\n for j in range(rows//3):\n if j == 0: p = arc_r[0]\n elif j == rows//3-1: p = arc_r[2]\n else : p = arc_r[1]\n i=0\n for idx in p:\n if idx[4] == -1:\n val = [idx[0],board[starty+j][startx+i+j][1],idx[2],bg+fg]\n else:\n val = [idx[0],idx[1],idx[2],bg+fg]\n \n board[starty+j][startx+i+j]=val\n defs.board_check[starty+j][startx+i+j]=1\n i+=1\n else:\n starty=randint(1,rows//3-1) #changing\n startx=randint(1,freq-1)+iter*freq #const\n for j in range(rows//3-1,-1,-1):\n if j == 0: p = arc_l[0]\n elif j == rows//3-1: p = arc_l[2]\n else : p = arc_l[1]\n i=0\n for idx in p:\n if idx[4] == -1:\n val = [idx[0],board[starty+j][startx+i-j][1],idx[2],bg+fg]\n else:\n val = [idx[0],idx[1],idx[2],bg+fg]\n \n board[starty+j][startx+i-j]=val\n defs.board_check[starty+j][startx+i-j]=1\n i+=1\n\n def __init__(self,freq,board):\n for i in range(2,defs.board_len//freq):\n val=random.sample(range(1,5),2)\n try:\n self.fill_in_art(board,val[0],freq+1,i)\n except:pass\n try:\n self.fill_in_art(board,val[1],freq+1,i)\n except:pass","sub_path":"arcs.py","file_name":"arcs.py","file_ext":"py","file_size_in_byte":3866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"599185116","text":"#!/usr/bin/python\n# encoding: utf-8\n\nimport logging\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.client import timeline\n\nimport config\nimport dataset\nimport model_build\nimport utils\n\nconf = tf.app.flags.FLAGS\ndebug = (conf.log_level.upper() == \"DEBUG\")\n\n# log\nlogging.basicConfig(filename=\"../log/development.log\",\n format=\"[%(asctime)s] %(message)s\", datefmt=\"%m-%d %H:%M:%S\")\nlogger = logging.getLogger()\nlogger.setLevel(conf.log_level)\n\n\nclass Controller(object):\n \"\"\"Control the main process of this model.\"\"\"\n\n def __init__(self, phase_train):\n # config session\n self.sess = tf.Session(\n config=tf.ConfigProto(log_device_placement=False))\n\n # set up logger\n self.saver = tf.train.Saver(tf.all_variables())\n self.summary_writer = tf.train.SummaryWriter(\n conf.log_dir, graph=self.sess.graph)\n\n # switch of train / val\n self.phase_train = phase_train\n # global step\n self.global_step = tf.Variable(0, False, name=\"global_step\")\n self.update_global_step_op = tf.assign_add(self.global_step, 1)\n tf.scalar_summary(\"global_step\", self.global_step, collections=[\"brief\"])\n\n def set_net(self, net):\n self.net = net\n\n def _train_op(self, loss, param_groups, scope=\"train\"):\n # scale the learning rate according to iter\n learning_rate_scale = tf.case([(self.global_step < conf.train_warm_up_steps, lambda: tf.constant(0.5, dtype=tf.float32)),\n (self.global_step < 2000, lambda: tf.constant(1.0, dtype=tf.float32)),\n (self.global_step < 4000, lambda: tf.constant(0.1, dtype=tf.float32)),\n (self.global_step >= 6000, lambda: tf.constant(0.01, dtype=tf.float32))],\n lambda: tf.constant(1.0, dtype=tf.float32), # should never be used\n exclusive=False)\n curr_lr = conf.base_learning_rate * learning_rate_scale\n tf.scalar_summary(\"learning_rate\", curr_lr, collections=[\"brief\"])\n\n if conf.optimizer == \"AdadeltaOptimizer\":\n # TODO(meijieru): Add support for adadelta\n raise NotImplementedError(\"Adadelta not support now\")\n elif conf.optimizer == \"MomentumOptimizer\":\n param_lists = [] # [[params_0], [params_1], ..., [params_n]]\n opts = [] # optimizers for param_lists\n for collection, params in param_groups.iteritems():\n param_lists.extend(params)\n # double the learning rate for bias\n if collection == \"vgg\":\n assert len(params) == 2\n opts.append(tf.train.MomentumOptimizer(curr_lr, conf.momentum))\n opts.append(tf.train.MomentumOptimizer(curr_lr * 2, conf.momentum))\n elif collection == \"b11\":\n assert len(params) == 2\n opts.append(tf.train.MomentumOptimizer(curr_lr * 10, conf.momentum))\n opts.append(tf.train.MomentumOptimizer(curr_lr * 20, conf.momentum))\n elif collection == \"b12\":\n assert len(params) == 1\n opts.append(tf.train.MomentumOptimizer(curr_lr * 10, conf.momentum))\n elif collection == \"b13_14\":\n assert len(params) == 2\n opts.append(tf.train.MomentumOptimizer(curr_lr * 10, conf.momentum))\n opts.append(tf.train.MomentumOptimizer(curr_lr * 20, conf.momentum))\n else:\n raise ValueError(\"Unknown collection\")\n else:\n raise ValueError(\"Unknown optimizer\")\n\n # update weights according to the gradients\n apply_grad_ops = []\n all_train_params = reduce(lambda a, b: a + b, param_lists, [])\n grads = tf.gradients(loss, all_train_params, name=\"gradients\")\n count = 0\n for params, opt in zip(param_lists, opts):\n apply_grad_ops.append(opt.apply_gradients(zip(grads[count:count + len(params)], params)))\n count += len(params)\n assert count == len(grads)\n\n # summary the train_params\n for param in all_train_params:\n tf.histogram_summary(param.op.name, param, collections=\"detailed\")\n # summary the gradients of the train_params\n for grad, param in zip(grads, all_train_params):\n if grad is not None:\n tf.histogram_summary(param.op.name + \"/gradients\", grad, collections=\"detailed\")\n\n with tf.control_dependencies(apply_grad_ops):\n train_op = tf.no_op(\"train_op\")\n return train_op\n\n def _init_or_reload(self):\n init_op = tf.initialize_all_variables()\n logger.info(\"Initializing\")\n self.sess.run(init_op, {self.phase_train.name: True})\n if conf.load_dir:\n checkpoint = tf.train.get_checkpoint_state(conf.load_dir)\n model_checkpoint_path = checkpoint.model_checkpoint_path\n if checkpoint and model_checkpoint_path:\n self.saver.restore(self.sess, model_checkpoint_path)\n logger.info(\"Model load from {}\".format(model_checkpoint_path))\n else:\n raise AttributeError(\"No valid checkpoint found\")\n\n def val(self, logits, loss, labels, step=None, from_train=True):\n logger.info(\"Start validation\")\n if not from_train:\n self._init_or_reload()\n tf.train.start_queue_runners(sess=self.sess)\n\n n_samples = conf.num_val\n val_batch_size = conf.val_batch_size\n n_val_batch = n_samples / val_batch_size\n\n val_logits = np.zeros([n_samples, conf.img_h, conf.img_w, conf.num_classes], dtype=np.float32)\n val_labels = np.zeros([n_samples, conf.img_h, conf.img_w], dtype=np.int64)\n val_losses = []\n\n for i in xrange(n_val_batch):\n fetches = [logits, labels, loss]\n sess_outputs = self.sess.run(\n fetches, {self.phase_train.name: False})\n val_logits[i * val_batch_size:(i + 1) * val_batch_size, :] = sess_outputs[0]\n val_labels[i * val_batch_size:(i + 1) * val_batch_size] = sess_outputs[1]\n val_losses.append(sess_outputs[2])\n pred_labels = np.argmax(val_logits, axis=3)\n\n # metrics calculating\n val_loss = float(np.mean(np.asarray(val_losses)))\n confusion = utils.ConfusionMatrix(pred_labels, val_labels)\n val_pixel_acc = confusion.acc()\n val_mean_acc = confusion.mean_acc(ignore_first=True)\n val_mean_iou = confusion.mean_iou(ignore_first=True)\n\n # summary confusion matrix\n normalize_img = confusion.plot(is_normalize=True)\n image_summary = utils.image_summary(normalize_img, self.sess, \"confusion_normalized_{}\".format(step))\n self.summary_writer.add_summary(image_summary)\n\n # summary the results\n logger.info(\"Validate pixel accuracy = {}, mean accuracy = {}, mean IoU = {}\".format(\n val_pixel_acc, val_mean_acc, val_mean_iou))\n val_summary = tf.Summary()\n val_summary.value.add(tag=\"val_pixel_acc\", simple_value=val_pixel_acc)\n val_summary.value.add(tag=\"val_mean_acc\", simple_value=val_mean_acc)\n val_summary.value.add(tag=\"val_mean_iou\", simple_value=val_mean_iou)\n val_summary.value.add(tag=\"val_loss\", simple_value=val_loss)\n for cl, value in zip(utils.classes_list(conf.dataset), confusion.cl_acc()):\n val_summary.value.add(tag=\"val_cl_acc_{}\".format(cl), simple_value=value)\n self.summary_writer.add_summary(val_summary, step)\n\n def profiling(self, logits, loss, param_groups, labels, scope=\"profiling\"):\n logger.info(\"Start profiling\")\n\n pixel_acc = utils.tf_pixel_accuracy(logits, labels)\n train_op = self._train_op(loss, param_groups, scope=scope)\n self._init_or_reload()\n tf.train.start_queue_runners(sess=self.sess)\n fetches = [train_op, loss, pixel_acc]\n\n # for profiling\n logger.info(\"Warming up {} iters\".format(conf.profiling_warmup_steps))\n for i in xrange(conf.profiling_warmup_steps):\n self.sess.run(fetches, {self.phase_train.name: True})\n\n logger.info(\"Profiling within {} iters\".format(conf.profiling_steps))\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n for i in xrange(conf.profiling_steps):\n self.sess.run(fetches, {self.phase_train.name: True},\n options=run_options, run_metadata=run_metadata)\n\n # Create the Timeline object, and write it to a json\n tl = timeline.Timeline(run_metadata.step_stats)\n ctf = tl.generate_chrome_trace_format()\n with open(conf.profiling_result_path, \"w\") as f:\n f.write(ctf)\n logger.info(\"Profiling results written to {}\".format(conf.profiling_result_path))\n\n def main_train(self, logits, loss, param_groups, labels, scope=\"train\"):\n # training operation\n train_op = self._train_op(loss, param_groups, scope=scope)\n\n # summary\n pixel_acc = utils.tf_pixel_accuracy(logits, labels)\n model_build.summary_loss()\n tf.scalar_summary(\"train_loss\", loss, collections=[\"brief\"])\n tf.scalar_summary(\"train_pixel_accuracy\", pixel_acc, collections=[\"brief\"])\n brief_summary_op = tf.merge_all_summaries(key=\"brief\")\n detailed_summary_op = tf.merge_all_summaries(key=\"detailed\")\n\n # init paramters or load from checkpoint\n self._init_or_reload()\n\n # start data loading\n tf.train.start_queue_runners(sess=self.sess)\n\n # average losses of each iteration\n accumulated_loss = 0\n\n # train loop\n logger.info(\"Start training\")\n while True:\n # step = self.sess.run(self.global_step)\n step =self.global_step.eval(session=self.sess)\n if step >= conf.max_iter:\n logger.info(\"Maximum iter reached\")\n break\n\n need_brief_summary = (step % conf.brief_summary_interval == 0)\n need_detailed_summary = (step % conf.detailed_summary_interval == 0)\n need_val = (step % conf.val_interval == 0) and (step > 0)\n need_save = (step > 0) and (step % conf.save_interval == 0)\n if conf.profiling_during_train:\n need_profiling = (step > 0) and (step % conf.profiling_interval == 0)\n else:\n need_profiling = False\n\n fetches = {}\n fetches[\"train_op\"] = train_op\n fetches[\"loss\"] = loss\n if need_brief_summary:\n fetches[\"brief_summary\"]= brief_summary_op\n fetches[\"pixel_acc\"] = pixel_acc\n if need_detailed_summary:\n fetches[\"detailed_summary\"]= detailed_summary_op\n if need_profiling:\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n else:\n run_options = None\n run_metadata = None\n\n sess_outputs = self.sess.run(fetches, {self.phase_train.name: True},\n options=run_options, run_metadata=run_metadata)\n accumulated_loss += sess_outputs[\"loss\"]\n\n if need_brief_summary:\n logger.info(\"Iteration = {}, train loss = {}, pixel accuracy = {}\".format(\n step, accumulated_loss / min(step+1, conf.brief_summary_interval), sess_outputs[\"pixel_acc\"]))\n accumulated_loss = 0\n self.summary_writer.add_summary(sess_outputs[\"brief_summary\"], step)\n if need_detailed_summary:\n self.summary_writer.add_summary(sess_outputs[\"detailed_summary\"], step)\n logger.info(\"Detailed summaries written\")\n if need_profiling:\n self.summary_writer.add_run_metadata(run_metadata, \"step_{}\".format(step))\n logger.info(\"Profiling written\")\n\n if need_save:\n checkpoint_path = os.path.join(conf.log_dir, \"checkpoint\")\n self.saver.save(self.sess, checkpoint_path, global_step=step)\n logger.info(\"Checkpoint saved at {}\".format(checkpoint_path))\n\n if need_val:\n self.val(logits, labels, loss, step)\n\n # update training status\n self.update_global_step_op.eval(session=self.sess)\n\n\ndef logits_params(net, strategy):\n \"\"\"Set up logits and parameters to be trained.\"\"\"\n param_groups = {}\n if strategy == \"b11\":\n logits = net.b11\n param_groups[\"vgg\"] = [tf.get_collection(\"vgg_weights\") , tf.get_collection(\"vgg_biases\")]\n param_groups[\"b11\"] = [tf.get_collection(\"b11_weights\"), tf.get_collection(\"b11_biases\")]\n elif strategy == \"b12\":\n logits = net.b12\n param_groups[\"b12\"] = [tf.get_collection(\"b12\")]\n elif strategy == \"b13_14\":\n logits = net.b14\n param_groups[\"b13_14\"] = [tf.get_collection(\"b13_14_weights\"), tf.get_collection(\"b13_14_biases\")]\n elif strategy == \"fine_tuned\":\n logits = net.b15\n param_groups[\"vgg\"] = [tf.get_collection(\"vgg_weights\") , tf.get_collection(\"vgg_biases\")]\n param_groups[\"b11\"] = [tf.get_collection(\"b11_weights\"), tf.get_collection(\"b11_biases\")]\n param_groups[\"b12\"] = [tf.get_collection(\"b12\")]\n param_groups[\"b13_14\"] = [tf.get_collection(\"b13_14_weights\"), tf.get_collection(\"b13_14_biases\")]\n else:\n raise ValueError(\"Unknown training strategy '{}'\".format(strategy))\n return logits, param_groups\n\n\ndef main():\n # Data feeding\n logger.info(\"Dataset loading\")\n with tf.device(\"/cpu:0\"):\n train_data = dataset.Dataset(conf.train_data_path,\n conf.batch_size, type=dataset.TRAIN)\n val_data = dataset.Dataset(conf.val_data_path,\n conf.val_batch_size, type=dataset.VAL)\n phase_train = tf.placeholder(dtype=tf.bool, name=\"phase_train\")\n images, labels = tf.cond(phase_train,\n lambda: (train_data.images, train_data.labels),\n lambda: (val_data.images, val_data.labels))\n\n # Network building\n net = model_build.DeepParseNet(conf.vgg_param_path)\n logger.info(\"Network building\")\n if conf.action == \"train\" or conf.action == \"profiling\":\n is_train = True\n else:\n is_train = False\n net.build(images, conf.m, conf.n, conf.k, conf.num_classes, is_train=is_train, debug=debug)\n if debug:\n net.check_param()\n utils.show_all_variable()\n\n # Training setting\n controller = Controller(phase_train)\n controller.set_net(net)\n logits, param_groups = logits_params(net, conf.strategy)\n loss = model_build.loss(logits, labels, conf.num_classes, scope=conf.strategy)\n\n if conf.action == \"train\":\n controller.main_train(logits, loss, param_groups, labels, scope=conf.strategy)\n elif conf.action == \"profiling\":\n controller.profiling(logits, loss, param_groups, labels)\n elif conf.action == \"val\":\n controller.val(logits, loss, labels, from_train=False)\n else: # TODO(meijieru): implement test\n raise ValueError(\"Unsupport action\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"104146263","text":"A = 'A'\nB = 'B'\nC = 'C'\nD = 'D'\n\nEnvironment = {\n A: 'Dirty',\n B: 'Dirty',\n C: 'Dirty',\n D: 'Dirty',\n 'Current': A\n}\n\n\ndef REFLEX_VACUUM_AGENT(loc_st): # Determine action\n if loc_st[1] == 'Dirty':\n return 'Suck'\n if loc_st[0] == A:\n return 'Right'\n if loc_st[0] == B:\n return 'Down'\n if loc_st[0] == C:\n return 'Left'\n if loc_st[0] == D:\n return 'Up'\n\n\ndef Sensors(): # Sense Environment\n location = Environment['Current']\n return (location, Environment[location])\n\n\ndef Actuators(action): # Modify Environment\n location = Environment['Current']\n if action == 'Suck':\n Environment[location] = 'Clean'\n elif action == 'Right' and location == A:\n Environment['Current'] = B\n elif action == 'Down' and location == B:\n Environment['Current'] = C\n elif action == 'Left' and location == C:\n Environment['Current'] = D\n elif action == 'Up' and location == D:\n Environment['Current'] = A\n\n\ndef run(n): # run the agent through n steps\n for _ in range(1, n):\n (location, status) = Sensors() # Sense Environment before action\n print(location + \" \" + status)\n action = REFLEX_VACUUM_AGENT(Sensors())\n Actuators(action)\n (location, status) = Sensors() # Sense Environment after action\n print(\"Doing \" + action + \" at \" + location +\n \" - It's now \" + status + \"\\n\")\n\n\nif __name__ == '__main__':\n run(10)\n","sub_path":"1/justReflex.py","file_name":"justReflex.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"143927081","text":"import sys\nimport numpy as np\nimport pyproj\nimport geopy\nimport gdal\nimport osr\nfrom math import sqrt,atan,pi\nfrom PIL import Image\n\n\ndef read_img(img):\n\timg = Image.open(img)\n\treturn np.array(img)\n\ndef calc_bbox(center_lat,center_lon,width,height):\n\t#Get WKT Polygon\n\tgeod = pyproj.Geod(ellps='WGS84')\n\n\t# width = 15000. # m\n\t# height = 15000. # m\n\trect_diag = sqrt( width**2 + height**2 )\n\n\t# center_lon = -45.00655\n\t# center_lat = -22.68918\n\n\tazimuth1 = atan(width/height)\n\tazimuth2 = atan(-width/height)\n\tazimuth3 = atan(width/height)+pi # first point + 180 degrees\n\tazimuth4 = atan(-width/height)+pi # second point + 180 degrees\n\n\tpt1_lon, pt1_lat, _ = geod.fwd(center_lon, center_lat, azimuth1*180/pi, rect_diag)\n\tpt2_lon, pt2_lat, _ = geod.fwd(center_lon, center_lat, azimuth2*180/pi, rect_diag)\n\tpt3_lon, pt3_lat, _ = geod.fwd(center_lon, center_lat, azimuth3*180/pi, rect_diag)\n\tpt4_lon, pt4_lat, _ = geod.fwd(center_lon, center_lat, azimuth4*180/pi, rect_diag)\n\n\twkt_poly = [[pt1_lon, pt1_lat], [pt2_lon, pt2_lat], [pt3_lon, pt3_lat], [pt4_lon, pt4_lat]]\n\n\t## Coordinates\n\ttop_right = np.asarray(wkt_poly[0])\n\ttop_left = np.asarray(wkt_poly[1])\n\tbot_left = np.asarray(wkt_poly[2])\n\tbot_right = np.asarray(wkt_poly[3])\n\n\treturn [bot_left.min(),bot_left.max(),top_right.min(),top_right.max()]\n\ndef create_tiff(data,bbox):\n BBOX = bbox\n\n xres = abs(BBOX[0]-BBOX[2]) / data.shape[1]\n yres = abs(BBOX[1]-BBOX[3]) / data.shape[0]\n\n geotransform = (BBOX[0], xres, 0, BBOX[3], 0, -yres)\n \n # create the 3-band raster file\n dst_ds = gdal.GetDriverByName('GTiff').Create(img_dir[:-4]+'.tif', \n data.shape[1], data.shape[0], 1, gdal.GDT_Float32)\n\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference() # establish encoding\n srs.ImportFromEPSG(4326) # WGS84 lat/long\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(data) # write a-band to the raster\n dst_ds.FlushCache() # write to disk\n dst_ds = None\n\n print('Image Converted!!!')\n\n\nimg_dir = str(sys.argv[1])\nprint(img_dir[:-4])\nlat_ = float(sys.argv[2])\nlon_ = float(sys.argv[3])\nwidth_ = int(sys.argv[4])\nheight_ = int(sys.argv[5])\n\nrimg = read_img(img_dir)\nbbox_ = calc_bbox(lat_,lon_,width_,height_)\ncreate_tiff(rimg,bbox_)\n","sub_path":"src/img2geo.py","file_name":"img2geo.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"114344909","text":"from django.forms import ModelForm\nfrom django import forms\nfrom test_app.models import Users\nfrom django.core.exceptions import ValidationError\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\n\n\nclass AddUser(ModelForm):\n class Meta:\n model = Users\n fields = ['name', 'email', 'phone', 'mobile_phone', 'status']\n\n def __init__(self, *args, **kwargs):\n super(AddUser, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'add_user'\n self.helper.form_class = 'blueForms'\n self.helper.form_method = 'post'\n self.helper.form_action = ''\n self.helper.add_input(Submit('submit', 'Create'))\n super(AddUser, self).__init__(*args, **kwargs)\n\n\nclass EditUser(ModelForm):\n class Meta:\n model = Users\n fields = ['name', 'email', 'phone', 'mobile_phone', 'status', 'user_courses']\n widgets = {\n 'user_courses': forms.SelectMultiple(),\n 'name': forms.TextInput(attrs={'readonly':'readonly'})\n }\n\n def __init__(self, *args, **kwargs):\n super(EditUser, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'edit_user'\n self.helper.form_class = 'blueForms'\n self.helper.form_method = 'post'\n self.helper.form_action = ''\n self.helper.add_input(Submit('submit', 'Save'))\n\n super(EditUser, self).__init__(*args, **kwargs)\n\n def clean(self):\n usrs_crs = self.cleaned_data.get('user_courses')\n if usrs_crs and usrs_crs.count() > 5:\n raise ValidationError('Maximum 5 courses are allowed.')\n return self.cleaned_data\n\n\n\n","sub_path":"test_app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"88607367","text":"#Find maximum number between three numbers\nnum1 = float(input(\"Enter 1st number: \"))\nnum2 = float(input(\" Enter 2nd number: \"))\nnum3 = float(input(\"Enter 3rd number: \"))\n\nif(num1 >= num2) and (num1 >= num3):\n maximum = num1\nelif(num2 >= num1) and (num2 >= num3):\n maximum = num2\nelse:\n maximum = num3\n\nprint(\"Maximum number between\",num1,\",\",num2,\"and\",num3,\"is\" ,maximum)\n","sub_path":"6th.py","file_name":"6th.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"428407648","text":"from tkinter import *\r\nfrom tkinter.ttk import Scale\r\nfrom tkinter import colorchooser\r\nfrom tkinter import filedialog, messagebox\r\nfrom PIL import ImageGrab #form pillow package\r\n\r\n\r\nclass paint(object):\r\n DEFAULT_PEN_SIZE = 10.0\r\n \r\n def __init__(self):\r\n self.root= Tk()\r\n self.root.title(\"My Paint\")\r\n self.root.geometry(\"800x500\")\r\n self.size=Canvas(self.root, width=800, height=500)\r\n self.root.configure(bg=\"white\")\r\n\r\n self.pen_color=\"black\"\r\n self.eraser_color=\"white\"\r\n \r\n \r\n self.color_frame=LabelFrame(self.root, text=\"Color\", font=('arial', 15, 'bold'), bd=5, relief=RIDGE, bg=\"#6FA5F5\")\r\n self.color_frame.place(x=10, y=400, width=240, height=90)\r\n\r\n colors=['white', 'black', 'brown','red', 'orange','yellow','green', 'blue', '#3E1C91', '#F20A85']\r\n i=j=0\r\n for color in colors: #col is value of color\r\n Button(self.color_frame, bg=color, bd=2, relief=RIDGE, width=5, command=lambda col= color: self.select_color(col)).grid(row=i, column=j)\r\n j+=1\r\n if j==5:\r\n j=0\r\n i=1\r\n\r\n self.edit_color_button=Button(self.root, text=\"Edit Color\",font=('arial', 11, 'bold'), bd=3, bg=\"#6FA5F5\", command=self.edit_color, width=12,height=2, relief=RIDGE)\r\n self.edit_color_button.place(x=255, y=400)\r\n\r\n self.eraser_button=Button(self.root, text=\"Eraser\",font=('arial', 11, 'bold'), bd=3, bg=\"#6FA5F5\", command=self.eraser, width=12,height=2, relief=RIDGE)\r\n self.eraser_button.place(x=255, y=445)\r\n\r\n self.clear_button=Button(self.root, text=\"Clear\",font=('arial', 11, 'bold'), bd=3, bg=\"#6FA5F5\", command=lambda : self.canvas.delete(\"all\"), width=12,height=2, relief=RIDGE)\r\n self.clear_button.place(x=355, y=400)\r\n\r\n self.save_button=Button(self.root, text=\"Save\",font=('arial', 11, 'bold'), bd=3, bg=\"#6FA5F5\", command=self.save_paint, width=12,height=2, relief=RIDGE)\r\n self.save_button.place(x=355, y=445)\r\n\r\n \r\n\r\n self.pen_size_scale_frame=LabelFrame(self.root, text=\"size\", bd=5, bg=\"#6FA5F5\", font=('areal',15,'bold'), relief=RIDGE)\r\n self.pen_size_scale_frame.place(x=480, y=400, height=70, width=250)\r\n\r\n self.pen_size=Scale(self.pen_size_scale_frame, orient=HORIZONTAL, from_=0, to=50, length=220)\r\n self.pen_size.set(1)\r\n self.pen_size.grid(row=0, column=1, padx=15)\r\n\r\n self.canvas=Canvas(self.root, bg='white', bd=5, relief=GROOVE, height=380, width=787)\r\n self.canvas.place(x=0, y=0)\r\n\r\n #self.setup()\r\n self.canvas.bind('', self.paint)\r\n \r\n\r\n self.root.mainloop()\r\n\r\n \r\n #dragging of mouse\r\n def paint(self, event):\r\n x1, y1=(event.x-2), (event.y-2)\r\n x2, y2=(event.x+2),(event.y+2)\r\n\r\n self.canvas.create_oval(x1, y1, x2, y2, fill=self.pen_color, outline=self.pen_color, width=self.pen_size.get())\r\n \r\n \r\n def select_color(self, col):\r\n self.pen_color=col\r\n\r\n def eraser(self):\r\n self.pen_color=self.eraser_color\r\n\r\n def edit_color(self):\r\n color=colorchooser.askcolor()\r\n self.pen_color=color[1]\r\n\r\n def save_paint(self):\r\n try:\r\n #self.canvas.update()\r\n filename = filedialog.asksaveasfilename(defaultextension='.jpg')\r\n #print(filename)\r\n x = self.root.winfo_rootx() + self.canvas.winfo_x()\r\n #print(x, self.canvas.winfo_x())\r\n y = self.root.winfo_rooty() + self.canvas.winfo_y()\r\n #print(y)\r\n x1 = x + self.canvas.winfo_width()\r\n #print(x1)\r\n y1 = y + self.canvas.winfo_height()\r\n #print(y1)\r\n ImageGrab.grab().crop(x, y, x1, y1).save(filename)\r\n messagebox.showinfo('paint says image is saved as ', + str(filename))\r\n \r\n except:\r\n messagebox.showerror(\"paint says\", \"unable to save image, \\n something went wrong\")\r\n\r\n\r\nif __name__=='__main__':\r\n paint()\r\n \r\n","sub_path":"py_assignments/Assignment_6(Paint).py","file_name":"Assignment_6(Paint).py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"364233046","text":"#!/usr/bin/env python3\n\nimport sys\nimport math\nimport base64\nimport tkinter\n\nfrom io import BytesIO\nfrom PIL import Image as PILImage\n\n## NO ADDITIONAL IMPORTS ALLOWED!\n\nclass Image:\n def __init__(self, width, height, pixels):\n self.width = width\n self.height = height\n self.pixels = pixels\n\n def get_pixel(self, x, y):\n return self.pixels[(y-1)*self.width + x-1] \n \n def set_pixel(self, x, y, c):\n self.pixels[(y-1)*self.width + x-1] = c\n\n def apply_per_pixel(self, func):\n result = Image.new(self.width, self.height)\n for x in range(1,result.width+1):\n for y in range(1,result.height+1):\n color = self.get_pixel(x, y)\n newcolor = func(color)\n result.set_pixel(x, y, newcolor)\n return result\n\n def inverted(self):\n return self.apply_per_pixel(lambda c: 255-c)\n \n # Handle with edge effect.\n def get_extended_pixel(self, m):\n \"\"\"return (extended image)\"\"\"\n # row extension\n res = []\n for j in range(self.height):\n a = 0 + j*self.width\n b = self.width + j*self.width\n row = self.pixels[a:b] # extract each row\n new_row = [row[0]]*m + row + [row[-1]]*m \n res = res + new_row\n # add upward and downward rows\n firs_row = res[:self.width+2*m]\n last_row = res[-(self.width+2*m):]\n \n res = firs_row*m + res + last_row*m\n return Image(self.width+2*m, self.height+2*m, res)\n \n # Clip not well behaviored pixels.\n def clip(self,p):\n if p < 0:\n return 0\n if p > 255:\n return 255\n else:\n return int(round(p))\n\n # Apply 3*3 kernel.\n def kernel_3(self, ker):\n using_im = self.get_extended_pixel(1)\n result = Image.new(self.width, self.height)\n for x in range(1,result.width+1):\n for y in range(1,result.height+1):\n # create subimage\n sub_im = []\n for j in range(-1,2):\n for i in range(-1,2):\n sub_im.append(using_im.get_pixel(x+1+i, y+1+j))\n newcolor = 0\n for k in range(len(ker)):\n newcolor = newcolor + ker[k]*sub_im[k]\n result.set_pixel(x, y, self.clip(newcolor))\n return result\n \n # Apply abrbitary kernel.\n def apply_kernel(self, ker, if_clip = True):\n ker_size = math.sqrt(len(ker))\n m = int((ker_size-1)/2) # extend number\n using_im = self.get_extended_pixel(m)\n result = Image.new(self.width, self.height)\n for x in range(1,result.width+1):\n for y in range(1,result.height+1):\n # create subimage\n sub_im = []\n for j in range(-m,m+1):\n for i in range(-m,m+1):\n sub_im.append(using_im.get_pixel(x+m+i, y+m+j))\n newcolor = 0\n for k in range(len(ker)):\n newcolor = newcolor + ker[k]*sub_im[k]\n if if_clip:\n result.set_pixel(x, y, self.clip(newcolor))\n else:\n result.set_pixel(x, y, newcolor)\n return result\n \n def blurred(self, n):\n ker = [1/(n*n)]*(n*n) # blur kernel\n return self.apply_kernel(ker)\n \n def sharpened(self, n):\n ker = [-1/(n*n)]*(n*n)\n center = int((n*n-1)/2)\n ker[center] = 2 - 1/(n*n)\n return self.apply_kernel(ker)\n \n def edges(self):\n K_x = [-1,0,1,\n -2,0,2,\n -1,0,1] \n O_x = self.apply_kernel(K_x, if_clip=False)\n K_y = [-1,-2,-1,\n 0,0,0,\n 1,2,1]\n O_y = self.apply_kernel(K_y, if_clip=False)\n \n result = Image.new(self.width, self.height)\n for x in range(1,self.width+1):\n for y in range(1,self.height+1):\n O_x_pixel = O_x.get_pixel(x, y)\n O_y_pixel = O_y.get_pixel(x, y)\n newcolor = math.sqrt(O_x_pixel**2 + O_y_pixel**2)\n result.set_pixel(x, y, self.clip(newcolor))\n return result\n \n # Helper functions\n def get_sum(self, L):\n ans = 0\n for i in L:\n ans = ans + i\n return ans\n \n def get_minIdx(self, L):\n min = L[0]\n min_idx = 0\n for i in range(len(L)):\n if L[i] < min:\n min = L[i]\n min_idx = i\n return min_idx\n \n def del_min(self, L, idx):\n L1 = L[:idx]\n L2 = L[idx+1:]\n return L1 + L2\n \n def get_row(self, j):\n return self.pixels[(j-1)*self.width:j*self.width]\n \n def get_col(self, i):\n ans = []\n for k in range(1, self.height+1):\n row = self.get_row(k)\n ans.append(row[i-1])\n return ans\n # End help functions\n \n def rescale(self):\n # computer energy\n energy_im = self.edges()\n energy = []\n for x in range(1, self.width+1):\n col = energy_im.get_col(x)\n energy.append(self.get_sum(col)) \n # get min index\n min_idx = self.get_minIdx(energy)\n # delete min column\n new_pixels = []\n for y in range(1, self.height+1):\n row = self.get_row(y)\n new_row = self.del_min(row, min_idx)\n new_pixels = new_pixels + new_row\n return Image(self.width-1, self.height, new_pixels)\n \n def rescale_pic(self, num):\n im = self.rescale()\n for i in range(num-1):\n im = im.rescale()\n return im \n \n \n # Below this point are utilities for loading, saving, and displaying\n # images, as well as for testing.\n\n def __eq__(self, other):\n return all(getattr(self, i) == getattr(other, i)\n for i in ('height', 'width', 'pixels'))\n\n @classmethod\n def load(cls, fname):\n \"\"\"\n Loads an image from the given file and returns an instance of this\n class representing that image. This also performs conversion to\n grayscale.\n\n Invoked as, for example:\n i = Image.load('test_images/cat.png')\n \"\"\"\n with open(fname, 'rb') as img_handle:\n img = PILImage.open(img_handle)\n img_data = img.getdata()\n if img.mode.startswith('RGB'):\n pixels = [round(.299*p[0] + .587*p[1] + .114*p[2]) for p in img_data]\n elif img.mode == 'LA':\n pixels = [p[0] for p in img_data]\n elif img.mode == 'L':\n pixels = list(img_data)\n else:\n raise ValueError('Unsupported image mode: %r' % img.mode)\n w, h = img.size\n return cls(w, h, pixels)\n\n @classmethod\n def new(cls, width, height):\n \"\"\"\n Creates a new blank image (all 0's) of the given height and width.\n\n Invoked as, for example:\n i = Image.new(640, 480)\n \"\"\"\n return cls(width, height, [0 for i in range(width*height)])\n\n def save(self, fname, mode='PNG'):\n \"\"\"\n Saves the given image to disk or to a file-like object. If fname is\n given as a string, the file type will be inferred from the given name.\n If fname is given as a file-like object, the file type will be\n determined by the 'mode' parameter.\n \"\"\"\n out = PILImage.new(mode='L', size=(self.width, self.height))\n out.putdata(self.pixels)\n if isinstance(fname, str):\n out.save(fname)\n else:\n out.save(fname, mode)\n out.close()\n\n def gif_data(self):\n \"\"\"\n Returns a base 64 encoded string containing the given image as a GIF\n image.\n\n Utility function to make show_image a little cleaner.\n \"\"\"\n buff = BytesIO()\n self.save(buff, mode='GIF')\n return base64.b64encode(buff.getvalue())\n\n def show(self):\n \"\"\"\n Shows the given image in a new Tk window.\n \"\"\"\n global WINDOWS_OPENED\n if tk_root is None:\n # if tk hasn't been properly initialized, don't try to do anything.\n return\n WINDOWS_OPENED = True\n toplevel = tkinter.Toplevel()\n # highlightthickness=0 is a hack to prevent the window's own resizing\n # from triggering another resize event (infinite resize loop). see\n # https://stackoverflow.com/questions/22838255/tkinter-canvas-resizing-automatically\n canvas = tkinter.Canvas(toplevel, height=self.height,\n width=self.width, highlightthickness=0)\n canvas.pack()\n canvas.img = tkinter.PhotoImage(data=self.gif_data())\n canvas.create_image(0, 0, image=canvas.img, anchor=tkinter.NW)\n def on_resize(event):\n # handle resizing the image when the window is resized\n # the procedure is:\n # * convert to a PIL image\n # * resize that image\n # * grab the base64-encoded GIF data from the resized image\n # * put that in a tkinter label\n # * show that image on the canvas\n new_img = PILImage.new(mode='L', size=(self.width, self.height))\n new_img.putdata(self.pixels)\n new_img = new_img.resize((event.width, event.height), PILImage.NEAREST)\n buff = BytesIO()\n new_img.save(buff, 'GIF')\n canvas.img = tkinter.PhotoImage(data=base64.b64encode(buff.getvalue()))\n canvas.configure(height=event.height, width=event.width)\n canvas.create_image(0, 0, image=canvas.img, anchor=tkinter.NW)\n # finally, bind that function so that it is called when the window is\n # resized.\n canvas.bind('', on_resize)\n toplevel.bind('', lambda e: canvas.configure(height=e.height, width=e.width))\n\n\ntry:\n tk_root = tkinter.Tk()\n tk_root.withdraw()\n tcl = tkinter.Tcl()\n def reafter():\n tcl.after(500,reafter)\n tcl.after(500,reafter)\nexcept:\n tk_root = None\nWINDOWS_OPENED = False\n\nif __name__ == '__main__':\n # code in this block will only be run when you explicitly run your script,\n # and not when the tests are being run. this is a good place for\n # generating images, etc.\n# im = Image.load('test_images/twocats.png')\n# \n# K_x = [-1,0,1,\n# -2,0,2,\n# -1,0,1] \n# O_x = im.apply_kernel(K_x)\n# O_x.show()\n# K_y = [-1,-2,-1,\n# 0,0,0,\n# 1,2,1]\n# O_y = im.apply_kernel(K_y)\n# O_y.show()\n \n \n # the following code will cause windows from Image.show to be displayed\n # properly, whether we're running interactively or not:\n if WINDOWS_OPENED and not sys.flags.interactive:\n tk_root.mainloop()\n","sub_path":"lab1/lab.py","file_name":"lab.py","file_ext":"py","file_size_in_byte":10926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"292891553","text":"#python解释器从上到下执行从右到左执行,先计算等号右边的值,赋值给等号左边的变量\n# 标识符 --取名字 ,你来命名多的都叫标识符\n# 标识符的规范\n# 数字 字母 下划线 组成\n# 不能以数字开头\n# 字母和数字之间可以用下划线隔开 方便阅读\n# 见名只意\n# 不能以关键字命名\n# #['False', 'None', 'True', 'and', 'as', 'assert', 'async', 'await', 'break', 'class', 'continue', 'def', 'del', 'elif', 'else', 'except', 'finally', 'for', 'from', 'global', 'if', 'import', 'in', 'is', 'lambda', 'nonlocal', 'not', 'or', 'pass', 'raise', 'return', 'try', 'while', 'with', 'yield']\nimport keyword\nprint(keyword.kwlist)\n\n# class_1129_base\n\n# 注释 单行 多行注释 成对的三引号 ''' , \"\"\"\n# 缩进 控制\n# a=10\n# if a==1:\n# print ('a的值为1')\n# else:\n# print ('a的值不为1')\n# print ('hello word !') #函数 输出内容到控制台\n# a=input('请输入一个数据') #函数 从控制台获取数据\n# print(a)\n\n#获取文件路径 D:\\PyCharm Community Edition 2017.2.3\\python12\\class_190817\\class_1.py\n\n# 变量名:命名一个标识符存储一个数据 变量名\n#变量第一次出现叫定义,下次再出变量叫使用这个变量\n#数字 字符串 列表 元祖 字典 集合 等各种类型的数据\n# a=1\n# b=1\n# print(id(a))\n# print(id(b))\n\n# a=1\n# b=2\n# print(a)\n\n#数字 :证型,浮点型\n# int 整型(有符号整型)\na=10\na=100\n# 帮你判断数据类型的函数 type(数据)\nprint(type(a))\n# 浮点型 float 引号不在就是浮点型\na=10.0\nprint(type(a))\n# 字符串 str string\n# 成对的单引号 双引号 以及三引号扩起来的内容都是字符串\nc='10'\nd=\"hello yutq.test\"\ne=\"\"\"\"10.00000\"\"\"\nprint(type(e))\n##如果你的字符里面必须包含单引号 最外层用双引号\n##如果你的字符里面必须包含双引号 最外层用单引号\nx='\"hello test\"'\nprint(x)\na=1\na=4\nprint(a)# 取值最后一个变量\n\n\n# type()函数小结:\n# 1.可以查看变量或数据的类型\n# 2.使用 type(变量名) 或 type(数据)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"class_190817/class_1.py","file_name":"class_1.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"523133359","text":"# ReadMeGenerator\n# Created by JKChang\n# 10/05/2017, 19:59\n# Description: add index of the directory\n\nimport os\n\n\ndef mappingFormat(filepath):\n if filepath.endswith('.txt'):\n return 'Text'\n elif filepath.endswith('.py'):\n return 'Python'\n elif filepath.endwith('.java'):\n return 'Java'\n else:\n pass\n\n\ndef wMarkovTable(number, name, link, language, tag):\n return '|' + number + \\\n '|[' + name + ']' + '(' + link + ')|' + \\\n language + '|' + tag + '|'\n\n\ndef wMarkovTableHead(listofHead):\n res = ['', '']\n for x in listofHead:\n if x != 'link':\n res[0] += (x + '|')\n res[1] += ('--' + '|')\n return '|' + res[0] + '\\n' + '|' + res[1]\n\n\n# folderPath = r'/Users/jkchang/Github/Testfolder/'\n# folderPath = '/Users/jkchang/Github/Python/Runood_100/'\nfolderPath = '/Users/jkchang/Github/Python/100 Python/'\nrmPath = folderPath + '/README.md'\ntitle = ['#', 'Title', 'Language', 'Description']\n\n# ----------- For each file----------------------\npathList = []\nbody = []\nnumber = 0\n\nfor path, subdirs, files in os.walk(folderPath):\n for filename in files:\n if filename.endswith(('.txt', '.py', '.java')):\n number += 1\n name = os.path.splitext(filename)[0]\n filePath = os.path.join(path, filename)\n link = '.' + filePath[len(folderPath) - 1:]\n language = mappingFormat(filename)\n\n # -- tag --\n tag = ''\n with open(filePath) as f:\n line = f.readlines()\n for content in line:\n if 'Tag' in content:\n tag = content[content.index(':') + 1:].strip().capitalize()\n\n body.append(wMarkovTable(str(number), name, link, language, tag))\n\n# ----------- For each file----------------------\n\n\nheader = wMarkovTableHead(title)\nbody = '\\n'.join(body)\nprint(header + '\\n' + body)\n#\n# with io.open(rmPath, 'w', encoding=\"utf-8\") as f:\n# f.write(unicode(header + '\\n' + body))\n","sub_path":"CJK/temp/ReadMeGenerator.py","file_name":"ReadMeGenerator.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"96896292","text":"from mopidy.core import Core\n\nimport pytest\n\nfrom tests import dummy_audio, dummy_backend, dummy_mixer\n\n\n@pytest.fixture\ndef config():\n return {\n 'core': {\n 'max_tracklist_length': 10000,\n },\n 'mpris': {\n 'desktop_file': '/tmp/mopidy.desktop',\n },\n }\n\n\n@pytest.fixture\ndef audio():\n actor = dummy_audio.create_proxy()\n yield actor\n actor.stop()\n\n\n@pytest.fixture\ndef backend(audio):\n actor = dummy_backend.create_proxy(audio=audio)\n yield actor\n actor.stop()\n\n\n@pytest.fixture\ndef mixer():\n actor = dummy_mixer.create_proxy()\n yield actor\n actor.stop()\n\n\n@pytest.fixture\ndef core(config, backend, mixer):\n actor = Core.start(config=config, backends=[backend], mixer=mixer).proxy()\n yield actor\n actor.stop()\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"27043663","text":"from matplotlib.matlab import * \r\n \r\nx = linspace(-4, 4, 200) \r\nf1 = power(10, x) \r\nf2 = power(e, x) \r\nf3 = power(2, x) \r\n \r\nplot(x, f1, 'r', x, f2, 'b', x, f3, 'g', linewidth=2) \r\naxis([-4, 4, -0.5, 8])\r\ntext(1, 7.5, r'$10^x$', fontsize=16)\r\ntext(2.2, 7.5, r'$e^x$', fontsize=16)\r\ntext(3.2, 7.5, r'$2^x$', fonsize=16)\r\ntitle('A simple example', fontsize=16)\r\n \r\nsavefig('power.png', dpi=75)\r\nshow() \r\n ","sub_path":"2 第三方库学习/2 库函数/draw_picture.py","file_name":"draw_picture.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"513750145","text":"\r\nfrom six.moves import cPickle\r\nimport scipy\r\nimport os.path\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.ticker as mticks\r\nimport math\r\n\r\nimport create_random_results_baseline\r\nimport create_uncertainty_greedy_results\r\nimport create_uncertainty_DS_optimization_results\r\nimport create_OCS_results\r\n\r\nimport weights_random_results_baseline\r\nimport weights_uncertainty_greedy_results\r\nimport weights_uncertainty_DS_optimization_results\r\nimport weights_OCS_results\r\n\r\n### VARIABLES - ONLY FOR THE PURPOSE OF TESTING ###\r\nN_samples = 100\r\nfeature_based_func_type = 2\r\ngamma = 100\r\nnoise_level = 0.1\r\nthreshold_zero_flag = 1\r\nfeedback_model = 1\r\nverbose = 1\r\nN_bins = 5\r\n\r\n### RUN ###\r\n\r\n\r\n### ALSO RUN DISCRETIZED SCORES (make sure what exactly the user feedback is)\r\n\r\n\r\n### GLOBALIZE VARIABLES : file paths\r\n\r\n### LOAD ###\r\n\r\nproc_data = {} #empty dictionary\r\nscipy.io.loadmat('processed_data.mat', mdict=proc_data)\r\n \r\ndata_dir = 'data_folder/'\r\n\r\n#random_results_baseline file\r\nrandom_name = 'RandRes_Nsmps_' + str(N_samples) + '_FeaFncTp_' + str(feature_based_func_type) + '_ga_' + str(gamma) + '_NsLv_' + str(noise_level) + '_Thr0Flg_' + str(threshold_zero_flag) + '_FdMdl_' + str(feedback_model)\r\nopfile = open(data_dir + random_name + '.save', 'rb')\r\nrand_result_runs = cPickle.load(opfile)\r\nrand_result_runs = np.array(list(rand_result_runs.values()))\r\nrand_result = np.mean(rand_result_runs, axis=1) #mean over 10 random runs\r\nrand_err_result = np.std(rand_result_runs, axis=1) #std dev over 10 random runs\r\nopfile.close()\r\n\r\n#uncertainty_greedy_result file\r\ngreedy_name = 'UnctyGrdy_Res_Nsmps_' + str(N_samples) + '_FeaFncTp_' + str(feature_based_func_type) + '_ga_' + str(gamma) + '_NsLv_' + str(noise_level) + '_Thr0Flg_' + str(threshold_zero_flag) + '_FdMdl_' + str(feedback_model)\r\nopfile = open(data_dir + greedy_name + '.save', 'rb')\r\ngreedy_result = cPickle.load(opfile)['Collected_Uncertainty_Result_Mat']\r\ngreedy_result = np.array(list(greedy_result.values()))\r\nopfile.close()\r\n\r\n#uncertainty_DS_optimization file\r\nACS_name = 'UnctyRandGrdy_Res_Nsmps_' + str(N_samples) + '_FeaFncTp_' + str(feature_based_func_type) + '_ga_' + str(gamma) + '_NsLv_' + str(noise_level) + '_Thr0Flg_' + str(threshold_zero_flag) + '_FdMdl_' + str(feedback_model)\r\nopfile = open(data_dir + ACS_name + '.save', 'rb')\r\nACS_result = cPickle.load(opfile)['Collected_Uncertainty_Result_Mat']\r\nACS_result = np.array(list(ACS_result.values()))\r\nopfile.close()\r\n\r\n#OCS files\r\nOCS_0_name = 'UnctyGrdyAlpha_Res_Nsmps_' + str(N_samples) + '_FeaFncTp_' + str(feature_based_func_type) + '_ga_' + str(gamma) + '_NsLv_' + str(noise_level) + '_Thr0Flg_' + str(threshold_zero_flag) + '_FdMdl_' + str(feedback_model) + '_alpha_' + str(0)\r\nopfile = open(data_dir + OCS_0_name + '.save', 'rb')\r\nOCS_0_result = cPickle.load(opfile)['Collected_Uncertainty_Result_Mat']\r\nOCS_0_result = np.array(list(OCS_0_result.values()))\r\nopfile.close()\r\n\r\n#OCS files\r\nOCS_0_1_name = 'UnctyGrdyAlpha_Res_Nsmps_' + str(N_samples) + '_FeaFncTp_' + str(feature_based_func_type) + '_ga_' + str(gamma) + '_NsLv_' + str(noise_level) + '_Thr0Flg_' + str(threshold_zero_flag) + '_FdMdl_' + str(feedback_model) + '_alpha_' + str(0.1)\r\nopfile = open(data_dir + OCS_0_1_name + '.save', 'rb')\r\nOCS_0_1_result = cPickle.load(opfile)['Collected_Uncertainty_Result_Mat']\r\nOCS_0_1_result = np.array(list(OCS_0_1_result.values()))\r\nopfile.close()\r\n\r\nOCS_1_name = 'UnctyGrdyAlpha_Res_Nsmps_' + str(N_samples) + '_FeaFncTp_' + str(feature_based_func_type) + '_ga_' + str(gamma) + '_NsLv_' + str(noise_level) + '_Thr0Flg_' + str(threshold_zero_flag) + '_FdMdl_' + str(feedback_model) + '_alpha_' + str(1)\r\nopfile = open(data_dir + OCS_1_name + '.save', 'rb')\r\nOCS_1_result = cPickle.load(opfile)['Collected_Uncertainty_Result_Mat']\r\nOCS_1_result = np.array(list(OCS_1_result.values()))\r\nopfile.close()\r\n\r\nOCS_10_name = 'UnctyGrdyAlpha_Res_Nsmps_' + str(N_samples) + '_FeaFncTp_' + str(feature_based_func_type) + '_ga_' + str(gamma) + '_NsLv_' + str(noise_level) + '_Thr0Flg_' + str(threshold_zero_flag) + '_FdMdl_' + str(feedback_model) + '_alpha_' + str(10)\r\nopfile = open(data_dir + OCS_10_name + '.save', 'rb')\r\nOCS_10_result = cPickle.load(opfile)['Collected_Uncertainty_Result_Mat']\r\nOCS_10_result = np.array(list(OCS_10_result.values()))\r\nopfile.close()\r\n\r\n\r\n### TAKE MEAN OVER 14 idx AND NORMALIZE ###\r\nbest_result = proc_data['V_rouge_optimized_summary_score'][0]\r\n\r\nrand_mean = np.array([0 for i in range(N_samples)], dtype=float)\r\nrand_err_mean = np.array([0 for i in range(N_samples)], dtype=float)\r\ngreedy_mean = np.array([0 for i in range(N_samples)], dtype=float)\r\nACS_mean = np.array([0 for i in range(N_samples)], dtype=float)\r\nOCS_0_mean = np.array([0 for i in range(N_samples)], dtype=float)\r\nOCS_0_1_mean = np.array([0 for i in range(N_samples)], dtype=float)\r\nOCS_1_mean = np.array([0 for i in range(N_samples)], dtype=float)\r\nOCS_10_mean = np.array([0 for i in range(N_samples)], dtype=float)\r\n\r\nfor idx in range(14):\r\n normfactor = best_result[idx][0][0]\r\n rand_mean += rand_result[idx,:]/(14.0*normfactor)\r\n rand_err_mean += rand_err_result[idx,:]/(14.0*normfactor)\r\n greedy_mean += greedy_result[idx,:]/(14.0*normfactor)\r\n ACS_mean += ACS_result[idx,:]/(14.0*normfactor)\r\n OCS_0_mean += OCS_0_result[idx,:]/(14.0*normfactor)\r\n OCS_0_1_mean += OCS_0_1_result[idx,:]/(14.0*normfactor)\r\n OCS_1_mean += OCS_1_result[idx,:]/(14.0*normfactor)\r\n OCS_10_mean += OCS_10_result[idx,:]/(14.0*normfactor)\r\n \r\n\r\ns = range(100)\r\nplt.errorbar(s, rand_mean, yerr=rand_err_mean, color=\"pink\", fmt=\"o\", markersize = 3, markeredgecolor=\"pink\", markeredgewidth=0.7, linewidth = 0.7)\r\nbaseline, = plt.plot(rand_mean, color=\"pink\", linestyle=\"-\", linewidth = 0.7, label='Random Baseline')\r\ngreedy, = plt.plot(greedy_mean, color=\"red\", linestyle=\"-\", linewidth = 0.7, label='Greedy')\r\nACS, = plt.plot(ACS_mean, color=\"green\", linestyle=\"-\", linewidth = 0.7, label='ACS')\r\nOCS_0, = plt.plot(OCS_0_mean, color=\"orange\", linestyle=\"-\", linewidth = 0.7, label='OCS alpha=0')\r\n#OCS_0_1, = plt.plot(OCS_0_1_mean, color=\"purple\", linestyle=\"-\", linewidth = 0.7, label='OCS alpha=0.1')\r\nOCS_1, = plt.plot(OCS_1_mean, color=\"blue\", linestyle=\"-\", linewidth = 0.7, label='OCS alpha=1')\r\nOCS_10, = plt.plot(OCS_10_mean, color=\"purple\", linestyle=\"-\", linewidth = 0.7, label='OCS alpha=10')\r\n#ax.plot(np.mean(Collected_Random_Result_Mat[idx], axis=0) + np.std(Collected_Random_Result_Mat[idx], axis=0), color=\"blue\", linestyle=\"--\")\r\n\r\nplt.legend(handles=[baseline, greedy, ACS, OCS_0, OCS_1, OCS_10], loc=4)\r\n\r\naxes = plt.gca()\r\naxes.set_xlim([0,100])\r\naxes.set_ylim([0.50,1.00])\r\n\r\nplt.xlabel('Iterations')\r\nplt.ylabel('Performance')\r\nplt.title('Image Summarization with V-rouge Feedback')\r\nplt.savefig('VrougeFeedback' + '.eps', dpi=900)\r\n\r\n\r\n\r\n###### reference ######\r\n\r\n## python\r\n#opfile = open(data_dir + output_file_name + '.save', 'rb')\r\n#pycheck = cPickle.load(opfile)\r\n#opfile.close()\r\n#\r\n#### PLOTTING ###\r\n#x = np.arange(0.1, 1.5, 0.1)\r\n#t = [math.sin(x0) for x0 in x]\r\n#td = [2*t0 for t0 in t]\r\n#\r\n#plt.subplot(1,2,1)\r\n#plt.plot(t, linestyle=\"--\")\r\n#plt.plot(td, color=\"red\")\r\n#plt.subplot(1,2,2)\r\n#plt.plot(td, color=\"green\", linestyle=\"-\")\r\n#plt.tight_layout(pad=0.5)\r\n#plt.savefig('test.png', dpi=900)","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":7306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"644862743","text":"# Desafio 38\n\nnro1 = int(input('numero 1'))\nnro2 = int(input('numero 2'))\n\nif nro2 > nro1:\n print('o primero numero é menor que o sendo')\nelif nro1 > nro2:\n print('o primeiro numero é maior que o segundo ')\nelse:\n print('os numeros são iguais ')\n\n\n\n","sub_path":"CursoEmVideoExercicios/Desafio038.py","file_name":"Desafio038.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"212193809","text":"\r\nfrom flask import Flask, render_template, request\r\nfrom werkzeug import secure_filename\r\napp = Flask(__name__)\r\nimport os\r\nimport pickle \r\n\r\n\r\n\r\n\r\n\r\n\r\n# Make the WSGI interface available at the top level so wfastcgi can get it.\r\nwsgi_app = app.wsgi_app\r\nclass Movie(object):\r\n\r\n\tdef __init__(self, filename):\r\n\t\tself.filename = filename\r\n\t\tself.review = \"\"\r\n\t\tself.rating = \"\"\r\n\r\n\t\r\n\tdef add_rating(self,input):\r\n\t\tself.rating = f\"{input} Stars\"\r\n\r\n\tdef add_summary(self,input):\r\n\t\tself.review = f\"{input}\"\r\n\r\ndef pickle_objects():\r\n\tmovies=[]\r\n\tpath = os.getcwd() +\"/static/images\"\r\n\tfor filename in os.listdir(path):\r\n\t\tmovies.append(Movie(filename))\r\n\r\n@app.route('/')\r\ndef hello():\r\n\r\n\treturn render_template(\"movies.html\", movies = movies)\r\n\r\n@app.route('/rate', methods=[\"GET\",\"POST\"])\r\ndef rate():\r\n\r\n\treturn render_template('rate_movie.html')\r\n\r\n@app.route('/upload', methods=[\"POST\",\"GET\"])\r\ndef mainUploader():\r\n\tif request.method == \"POST\":\r\n\t\tfile = request.files[\"file\"]\r\n\t\tfilename = secure_filename(file.filename)\r\n\t\tfile.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\r\n\t\timages.append(Movie(filename,\"No Title\"))\r\n\treturn render_template(\"upload.html\", uploaded = uploaded)\r\n\r\nif __name__ == '__main__':\r\n import os\r\n HOST = os.environ.get('SERVER_HOST', 'localhost')\r\n try:\r\n PORT = int(os.environ.get('SERVER_PORT', '5555'))\r\n except ValueError:\r\n PORT = 5555\r\n app.run(HOST, PORT)\r\n","sub_path":"movie/Movie_Rater/Movie_Rater/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"162197214","text":"\"\"\"\r\ndp[i] = min( dp[i - square_num_lessthan_i] )\r\nO(n^2) time\r\nIt's a correct solution, but I've no idea that why it gets TLE in python.\r\n\"\"\"\r\nclass Solution(object):\r\n def numSquares(self, n):\r\n \"\"\"\r\n :type n: int\r\n :rtype: int\r\n \"\"\"\r\n if n == 0:\r\n return 0\r\n dp = [float(\"inf\")] * (n+1)\r\n dp[0] = 0\r\n dp[1] = 1\r\n for i in range(2, n+1):\r\n tmp = int(math.sqrt(i))\r\n if i == tmp * tmp:\r\n dp[i] = 1\r\n continue\r\n for j in range(1, tmp+1):\r\n dp[i] = min( dp[i], dp[i-j*j] + 1 )\r\n return dp[n]\r\n \r\n###########################################################################\r\n\"\"\"\r\nBFS\r\nhttps://leetcode.com/discuss/62229/short-python-solution-using-bfs\r\n\"\"\"\r\nclass Solution(object):\r\n def numSquares(self, n):\r\n if n < 2:\r\n return n\r\n \r\n squares = [ i * i for i in range(1, int(math.sqrt(n))+1) ]\r\n queue = {n}\r\n level = 0\r\n while queue:\r\n level += 1\r\n tmp_q = set()\r\n for i in queue:\r\n for square in squares:\r\n if i == square:\r\n return level\r\n if i < square:\r\n break\r\n tmp_q.add(i - square)\r\n queue = tmp_q \r\n \r\n###########################################################################\r\n\"\"\"\r\nMath: number theory\r\nhttps://leetcode.com/discuss/57066/4ms-c-code-solve-it-mathematically\r\nhttps://leetcode.com/discuss/58056/summary-of-different-solutions-bfs-static-and-mathematics\r\nI'm not expecting to understand....>~<\r\n\"\"\"\r\nclass Solution(object):\r\n def numSquares(self, n):\r\n # Based on Lagrange's Four Square theorem, there are only 4 possible results: 1, 2, 3, 4.\r\n if self.is_square(n):\r\n return 1\r\n \r\n # The result is 4 if and only if n can be written in the form of 4^k*(8*m + 7). Please refer to Legendre's three-square theorem.\r\n while n & 3 == 0: # n % 4 == 0\r\n n >>= 2\r\n if n & 7 == 7: # n % 8 == 7\r\n return 4\r\n \r\n # Check whether 2 is the result.\r\n sqrt_n = int(math.sqrt(n))\r\n for i in range(1, sqrt_n+1):\r\n if self.is_square(n - i*i):\r\n return 2\r\n \r\n return 3\r\n \r\n def is_square(self, n):\r\n sqrt_n = int(math.sqrt(n))\r\n return sqrt_n * sqrt_n == n","sub_path":"src/279_PerfectSquares.py","file_name":"279_PerfectSquares.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"223159284","text":"import copy\nimport random\nimport collections\nimport numpy as np\nimport importlib.util\nfrom itertools import tee\n\n\ndef pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)\n\n\ndef merge_dicts(*dicts):\n \"\"\"\n Recursive dict merge.\n Instead of updating only top-level keys,\n dict_merge recurses down into dicts nested\n to an arbitrary depth, updating keys.\n \"\"\"\n assert len(dicts) > 1\n\n dict_ = copy.deepcopy(dicts[0])\n\n for merge_dict in dicts[1:]:\n for k, v in merge_dict.items():\n if (\n k in dict_ and isinstance(dict_[k], dict)\n and isinstance(merge_dict[k], collections.Mapping)\n ):\n dict_[k] = merge_dicts(dict_[k], merge_dict[k])\n else:\n dict_[k] = merge_dict[k]\n\n return dict_\n\n\ndef set_global_seeds(i):\n try:\n import torch\n except ImportError:\n pass\n else:\n torch.manual_seed(i)\n torch.cuda.manual_seed_all(i)\n try:\n import tensorflow as tf\n except ImportError:\n pass\n else:\n tf.set_random_seed(i)\n random.seed(i)\n np.random.seed(i)\n\n\ndef import_module(name, path):\n spec = importlib.util.spec_from_file_location(name, path)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n return mod\n\n\ndef boolean_flag(parser, name, default=False, help=None):\n \"\"\"\n Add a boolean flag to argparse parser.\n\n Parameters\n ----------\n parser: argparse.Parser\n parser to add the flag to\n name: str\n -- will enable the flag, while --no- will disable it\n default: bool or None\n default value of the flag\n help: str\n help string for the flag\n \"\"\"\n dest = name.replace(\"-\", \"_\")\n parser.add_argument(\n \"--\" + name,\n action=\"store_true\",\n default=default,\n dest=dest,\n help=help\n )\n parser.add_argument(\"--no-\" + name, action=\"store_false\", dest=dest)\n\n\nclass FrozenClass(object):\n __isfrozen = False\n\n def __setattr__(self, key, value):\n if self.__isfrozen and not hasattr(self, key):\n raise TypeError(\"%r is a frozen class\" % self)\n object.__setattr__(self, key, value)\n\n def _freeze(self):\n self.__isfrozen = True\n","sub_path":"utils/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"394187930","text":"# -*- coding:utf-8 -*-\nimport socket\n\nclient = socket.socket()\n\nclient.connect(('localhost', 6969)) # 连接服务器\n\nwhile True:\n msg = input(\">>:\").strip()\n if len(msg) == 0: continue\n client.send(msg.encode()) # 发送数据\n\n data = client.recv(1024) # 接收数据\n\n print(\"返回数据:\", data.decode())\n\nclient.close()\n","sub_path":"socketTestClient.py","file_name":"socketTestClient.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"156809709","text":"#!/usr/bin/env python3\n\nfrom sys import exit\nimport re\n\ndef main( ):\n with open( \"test.txt\", 'r' ) as fin:\n fileStr = ( fin.readlines( ) )\n\n fileStr = ( str( fileStr ) )\n\n emailPattern = ( r\"([a-zA-Z_.]{1,25})@([a-zA-Z]{1,25}).([a-zA-Z]{1,5})\" )\n result = ( re.search( emailPattern, fileStr ) )\n\n print( \"match!\" ) if( result ) else print( \"no match!\" )\n\n return( 0 )\n\nif( __name__ == ( \"__main__\" ) ):\n exit( main( ) )\n","sub_path":"regex2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"46980207","text":"import wmi\r\ndef checkStatus():\r\n #Initalizing the wmi \r\n f = wmi.WMI()\r\n running_Status = False\r\n print(\"Let's start checking\")\r\n #Loop through all process\r\n #We would be using the WMI.Win32_Process function in order to get the list of running processes on the system\r\n for process in f.Win32_Process():\r\n if \"JAVA.exe\" in process.Name:\r\n running_Status = True\r\n print(\"Current \", running_Status)","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"178226187","text":"from dal import autocomplete as ac\nfrom django import forms\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse_lazy\n\nfrom ghost_shopper.check.models import CheckPerformInvitation\nfrom ghost_shopper.user_profile.models import User\n\nfrom .enums import CheckStatusesEnum\nfrom .models import Check, CheckPerformRequest\n\n\nclass CheckForm(forms.ModelForm):\n\n status = forms.ChoiceField(\n choices=tuple(CheckStatusesEnum.values.items()),\n label='Статус',\n widget=forms.Select(attrs={'class': 'form-control'}))\n curator = forms.ModelChoiceField(queryset=User.objects.filter(is_staff=True), widget=forms.Select(\n attrs={'class': 'form-control form-control-uniform', 'placeholder': 'Куратор'}), label='Куратор')\n\n class Meta:\n model = Check\n fields = (\n 'status', 'start_date', 'deadline', 'target', 'performer', 'curator', 'reward',\n 'conformation_period', 'comment', 'instruction', 'kind'\n )\n widgets = {\n 'start_date': forms.DateInput(attrs={'type': 'date', 'class': 'form-control pickadate'}),\n 'deadline': forms.DateInput(attrs={'type': 'date', 'class': 'form-control pickadate'}),\n 'target': ac.Select2(\n url=reverse_lazy('organisation:autocomplete-node'),\n attrs={'data-placeholder': 'Цель проверки'}\n ),\n 'kind': forms.Select(attrs={'class': 'form-control form-control-uniform', 'placeholder': 'Тип'}),\n 'performer': ac.Select2(\n url=reverse_lazy('profile:performer-autocomplete'),\n attrs={'data-placeholder': 'Тайный покупатель', 'class': 'form-control'}\n ),\n 'reward': forms.NumberInput(attrs={\n 'type': \"number\",\n 'id': \"qty_input\",\n 'class': \"form-control form-control-sm\",\n 'value': \"1\",\n 'min': \"1\",\n 'placeholder': 'Награда (руб)'\n }),\n 'conformation_period': forms.NumberInput(attrs={'class': 'form-control'}),\n 'comment': forms.Textarea(attrs={'class': 'form-control', 'placeholder': 'Комментарий', 'rows': 3}),\n 'instruction': ac.Select2(\n url=reverse_lazy('instruction:autocomplete'),\n attrs={'data-placeholder': 'Название инструкции'}\n )\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.initial.get('start_date', None):\n initial = self.initial['start_date']\n self.fields['start_date'].widget.attrs['data-value'] = initial.strftime('%Y-%m-%d')\n if self.initial.get('deadline', None):\n initial = self.initial['deadline']\n self.fields['deadline'].widget.attrs['data-value'] = initial.strftime('%Y-%m-%d')\n\n\nclass CheckPerformerAppointForm(forms.ModelForm):\n class Meta:\n model = Check\n fields = ('performer', )\n widgets = {\n 'performer': ac.Select2(\n url=reverse_lazy('profile:performer-autocomplete'),\n attrs={'data-placeholder': 'Тайный покупатель'}),\n }\n\n def save(self, *args, **kwargs):\n check = super().save(*args, **kwargs)\n check.make_processing()\n return check\n\n\nclass PerformRequestForm(forms.ModelForm):\n class Meta:\n model = CheckPerformRequest\n fields = ('check_obj', 'performer')\n widgets = {\n 'check_obj': forms.HiddenInput(),\n 'performer': forms.HiddenInput()\n }\n\n def __init__(self, *args, **kwargs):\n check = None\n if kwargs.get('check_id', None):\n check = get_object_or_404(Check.usual.all(), id=kwargs.pop('check_id'))\n performer = get_object_or_404(User.objects.filter(is_performer=True), id=kwargs.pop('performer_id'))\n\n super().__init__(*args, **kwargs)\n\n if check:\n self.fields['check_obj'].initial = check\n self.fields['performer'].initial = performer\n\n\nclass InvitePerformersForm(forms.Form):\n performers = forms.ModelMultipleChoiceField(queryset=None)\n check = forms.ModelChoiceField(\n queryset=Check.objects.filter(status=CheckStatusesEnum.AVAILABLE), widget=forms.HiddenInput)\n\n def __init__(self, *args, **kwargs):\n qs = kwargs.pop('performers_qs')\n check = kwargs.pop('check', None)\n super().__init__(*args, **kwargs)\n self.fields['performers'].queryset = qs\n\n if check is not None:\n self.fields['check'].initial = check\n\n def save(self, *args, **kwargs):\n \"\"\" Create performer invitation for each of performers in the form \"\"\"\n for performer in self.cleaned_data['performers']:\n CheckPerformInvitation.invite(performer=performer, check=self.cleaned_data['check'])\n","sub_path":"ghost_shopper/check/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"348565401","text":"\n'''\nPurpose :[Market Risk feed files],[To model a Price Swap as an asian option in order to get the correct average value (float leg) and a swap fixed leg]\nDepartment and Desk :[IT],[MR]\nRequester: :Jacqueline Calitz\nDeveloper :Heinrich Cronje\nCR Number :XXXXXX\n'''\n\n\nimport ael, string, acm, PositionFile, MR_MainFunctions\nimport csv\nInsL = []\n\n\n# OPENFILE ##########################################################################################################\n \ndef OpenFile(temp,FileDir,Filename,PositionName,*rest):\n \n filename = FileDir + Filename\n \n outfile = open(filename, 'w')\n \n outfile.close()\n \n del InsL[:]\n InsL[:] = []\n \n return filename\n\n# OPENFILE ##########################################################################################################\n\ndef writeCSVRow(_headers, _dataSet, _csvPtr):\n _out = []\n\n # Populating the tuple for the output\n for _col in tuple(_headers.split()):\n _out.append(_dataSet[_col] if _col in _dataSet else '')\n\n _csvPtr.writerow(_out)\n\n# WRITE - FILE ######################################################################################################\n\ndef Write(i,FileDir,Filename,PositionName,*rest):\n\n filename = FileDir + Filename\n insaddr = i.insaddr\n Instrument = acm.FInstrument[insaddr]\n \n # Defining the fields\n columnsHeaders='''BASFLAG HeaderName OBJECT TYPE NAME IDENTIFIER CurrencyCAL CurrencyDAYC CurrencyPERD CurrencyUNIT NotionlAtStartFLAG \n NotionalAtEndFLAG EffectiveDATE CouponRateCAL CouponRateDAYC CouponRatePERD CouponRateVAL StateProcFUNC TermNB TermUNIT \n TermCAL CouponGenENUM FixedCouponDateNB BusDayRuleRULE BusDayRuleBUSD BusDayRuleCONV BusDayRuleCAL InitialIndxLvlFUNC \n InitialIndxLvlUNIT InitialIndxLvlVAL InitialIndxLvlSTRG PaymntProcXREF DiscountCurveXREF CouponProratedFLAG TheoModelXREF \n MarketModelXREF FairValueModelXREF SettlementProcFUNC \n '''\n \n columnsHeadersCashflow='''BASFLAG HeaderName ATTRIBUTE OBJECT VariabNotionalDATE VariabNotionalENUM VariabNotionalCAL \n VariabNotionalDAYC VariabNotionalPERD VariabNotionalUNIT VariabNotionalVAL \n '''\n\n if (insaddr) not in InsL:\n InsL.append(insaddr)\n \n # Data initialisation\n data={}\n cData={} # Cashflow data\n \n #Base record\n data['BASFLAG'] = 'BAS'\n data['HeaderName'] = 'Swap Fixed Leg'\n data['OBJECT'] = 'Swap Fixed LegSPEC'\n data['TYPE'] = 'Swap Fixed Leg'\n data['NAME'] = MR_MainFunctions.NameFix(i.insid)+'_Fixed'\n data['IDENTIFIER'] = 'insaddr_'+ str(insaddr)+'_2'\n data['BusDayRuleCAL'] = 'Weekends'\n data['CouponGenENUM'] = 'Backward'\n data['StateProcFUNC'] = '@cash flow generator'\n data['TermUNIT'] = 'Maturity'\n data['InitialIndxLvlVAL'] = '0' \n data['TheoModelXREF'] = 'Swap Fixed Leg(Cashflows)'\n \n for l in i.legs():\n if l.type == 'Fixed':\n data['CouponRateDAYC'] = MR_MainFunctions.DayCountFix(l.daycount_method)\n data['CouponRateVAL'] = getattr(l, 'fixed_rate')\n data['CurrencyUNIT'] = l.curr.insid\n data['EffectiveDATE'] = MR_MainFunctions.Datefix(l.start_day)\n \n try:\n data['DiscountCurveXREF'] = Instrument.MappedDiscountLink().Value().Link().YieldCurveComponent().Curve().Name()\n except:\n data['DiscountCurveXREF'] = Instrument.MappedDiscountLink().Value().Link().YieldCurveComponent().Name()\n \n # Writing the CSV file\n with open (filename, 'ab') as csvfile:\n csvwriter = csv.writer(csvfile)\n writeCSVRow(columnsHeaders, data, csvwriter)\n \n #Rollover record \n cData['BASFLAG'] = 'rm_ro'\n cData['HeaderName'] = 'Swap Fixed Leg : Variable Notional'\n cData['ATTRIBUTE'] = 'Variable Notional'\n cData['OBJECT'] = 'Swap Fixed LegSPEC' \n \n if l.type == 'Fixed':\n cData['VariabNotionalUNIT'] = l.curr.insid\n \n \n for cf in l.cash_flows():\n if cf.pay_day > ael.date_today():\n cData['VariabNotionalDATE'] = MR_MainFunctions.Datefix(cf.pay_day) \n cData['VariabNotionalVAL'] = abs(cf.nominal_amount())*100 \n \n writeCSVRow(columnsHeadersCashflow, cData, csvwriter)\n \n csvfile.close()\n\n return i.insid\n\n# WRITE - FILE ######################################################################################################\n","sub_path":"Python modules/MR_PriceSwap_FixedLeg.py","file_name":"MR_PriceSwap_FixedLeg.py","file_ext":"py","file_size_in_byte":4988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"82900604","text":"\n# coding: utf-8\n\n# # Explore DES Wave Buoy data\n\n# In[46]:\n\n\nimport requests\nimport json\nimport datetime\nimport numpy as np\nimport datetime\nimport pandas as pd\n\n\n# In[49]:\n\ndef get_waves(utctime= False):\n \"\"\"Retrieve wave data from the Open data portal\"\"\"\n r = requests.get(\"https://data.qld.gov.au/api/action/datastore_search?resource_id=2bbef99e-9974-49b9-a316-57402b00609c&limit=20000\")\n f = r.json() \n \n dataraw = f['result']['records']\n data = []\n for records in dataraw:\n recs = []\n for k in records:\n recs.append(records.get(k))\n data.append(recs)\n\n\n datadf = pd.DataFrame.from_dict(data)\n datadf.columns = list(dataraw[0].keys())\n datadf.index = pd.to_datetime(datadf.DateTime)\n \n now = datetime.datetime.now()\n fewago = now - datetime.timedelta(days=2.5)\n \n datadf = datadf.loc[fewago.strftime(\"%Y-%m-%d\"): now.strftime(\"%Y-%m-%d\")]\n datadf = datadf[~(datadf[['Tp','Hsig','Tz']] < -1).any(axis=1)]\n\n if utctime == True:\n \tdatadf.index = datadf.index - pd.timedelta(hours = 10)\n \n datadf.sort_index()\n return datadf\n\n\n# In[35]:\n\n\ndef get_location():\n datadf = get_waves()\n sites = list(datadf.Site.unique())\n lats = []\n longs = []\n for i in sites:\n if str(datadf['Longitude'][datadf['Site'] == i][-1]) == '-99.9':\n continue\n if str(datadf['Latitude'][datadf['Site'] == i][-1]) == '-99.9':\n continue\n longs.append(str(datadf['Longitude'][datadf['Site'] == i][-1]))\n lats.append(str(datadf['Latitude'][datadf['Site'] == i][-1]))\n coords = list(zip(lats, longs))\n return coords\n","sub_path":"python/fetch_online_des_waves.py","file_name":"fetch_online_des_waves.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"141090931","text":"import functools\nimport random\n\n\ndef parityGenerator(msg):\n binary_li = list(map(int, list(msg)))\n parity = functools.reduce(lambda a, b: a ^ b, binary_li)\n return parity\n\n\ndef parityChecker(msg):\n p = parityGenerator(msg)\n if p == 0:\n return True\n else:\n return False\n\n\ndef sender(msg_li):\n fill = max([len(i) for i in msg_li])\n msg_li = [list(i.zfill(fill)) for i in msg_li]\n\n mat = []\n for m in msg_li:\n binary_li = list(map(int, list(m)))\n binary_li.append(parityGenerator(binary_li))\n mat.append(binary_li)\n\n parity_line = []\n for c in range(fill):\n parity_line.append(parityGenerator([row[c] for row in mat]))\n\n parity_line.append(parityGenerator(parity_line))\n mat.append(parity_line)\n\n send = []\n for m in mat:\n send.append(''.join(list(map(str, m))))\n\n return send\n\n\ndef receiver(msg_li):\n data = []\n for m in msg_li[:-1]:\n data.append(m[:-1])\n\n mat = []\n for m in msg_li:\n binary_li = list(map(int, list(m)))\n if parityChecker(binary_li) == False:\n return False, data\n mat.append(binary_li)\n\n for c in range(max([len(i) for i in msg_li])):\n if parityChecker([row[c] for row in mat]) == False:\n return False, data\n\n return True, data\n\n\ndata = list(map(str, input('Enter space seperated data to send: ').split()))\nmsg_send = sender(data)\nprint(\"Data with parity:\", str(msg_send))\n\np, msg_received = receiver(msg_send)\n\nprint()\nif p:\n print('The transmission was ERROR-FREE!\\nThe data received is: ' + str(msg_received))\nelse:\n print('There was an ERROR in the data received!\\nThe data received is: ' + str(msg_received))\n","sub_path":"Semester/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"409562839","text":"from django.test import LiveServerTestCase\nfrom django.urls import reverse\nfrom django.contrib.auth.models import User\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium import webdriver\n\n\nclass basicFunctionalTests(LiveServerTestCase):\n\n def setUp(self):\n options = Options()\n options.set_headless(headless=True)\n self.browser = webdriver.Firefox(\n firefox_options=options)\n self.browser.implicitly_wait(1)\n self.new_username = 'MrTest1'\n self.new_password = 'APAssword123@'\n\n def tearDown(self):\n self.browser.quit()\n\n def test_home_page_is_loaded(self):\n self.browser.get(self.live_server_url)\n self.assertIn('Giggel', self.browser.title)\n\n def test_login_button_present(self):\n self.browser.get(self.live_server_url)\n self.assertTrue(self.browser.find_element_by_name(\"login\"))\n","sub_path":"giggel/functional_tests/test_basic.py","file_name":"test_basic.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"46706165","text":"'''\nGraphing\n\nAuthor: Rylan Santinon\n'''\nimport pygal\nfrom csv_io import CsvIo\nfrom urlparse import urlparse\nimport os\n\nclass Graphing(object):\n '''Graphs and diagrams based on retrieved data'''\n def __init__(self, directory):\n self.directory = directory\n self.csvio = CsvIo()\n self.make_directory()\n\n def make_directory(self):\n '''Make the output directory if one does not exist'''\n if not os.path.exists(self.directory):\n os.makedirs(self.directory)\n\n def output_png(self, chart, filename):\n '''Output the chart to a png file at directory/filename'''\n chart.render_to_png(os.path.join(self.directory, filename))\n\n def karma_by_created(self, outpng):\n FACTOR = 1.0/1000000000\n users = self.csvio.get_all_users_full()\n user_list = []\n for k in users.keys():\n user_list.append(users[k])\n\n karmas = []\n createds = []\n c = 0\n for u in user_list:\n c = c + 1\n if int(u[1]) > 250 and int(u[1]) < 110000:\n if c % 15 != 0:\n continue\n karmas.append(int(u[1]))\n createds.append(int(u[2])/FACTOR)\n\n xychart = pygal.XY(stroke=False, x_title='Created time (seconds past epoch) x 10^-9')\n xychart.title = 'Karma vs Created time'\n xychart.add('Karma', zip(createds, karmas))\n\n self.output_png(xychart, outpng)\n\n def domain_frequency(self, topn, outpng):\n '''Make a png frequency graph for top-n domains'''\n stories = self.csvio.get_all_stories()\n count_map = {}\n for k in stories.keys():\n count_map_key = self.canonical(stories[k][-1])\n count = count_map.get(count_map_key, 0)\n count_map[count_map_key] = count + 1\n count_list = []\n\n for k in count_map.keys():\n if k == '':\n continue\n count_list.append([count_map[k], k])\n sorted_list = sorted(count_list)\n\n top = sorted_list[-topn:]\n top.reverse()\n\n count_axis = [l[0] for l in top]\n name_axis = [l[1] for l in top]\n\n bar_chart = pygal.Bar()\n bar_chart.x_labels = name_axis\n bar_chart.title = \"Frequency of top \" + str(topn) + \" domains\"\n bar_chart.add('Domains', count_axis)\n self.output_png(bar_chart, outpng)\n\n def canonical(self, url):\n '''Canonical representation of url's domain'''\n loc = urlparse(url).netloc\n if 'www.' in loc:\n return loc.split('.')[1]\n else:\n return loc\n\nif __name__ == '__main__':\n G = Graphing('diagrams')\n G.domain_frequency(10, 'frequency_bar.png')\n G.karma_by_created('karma_created.png')\n","sub_path":"graphing.py","file_name":"graphing.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"270282584","text":"import os.path\n\nfrom commonroad.common.solution import Solution, CommonRoadSolutionWriter\n\n# Load planner module\nfrom iterator import scenario_iterator_interactive, scenario_iterator_non_interactive, _search_interactive_scenarios\nfrom main_interactive_CRplanner import motion_planner_interactive\n\n\ndef save_solution(solution: Solution, path: str) -> None:\n \"\"\"\n Save the given solution to the given path.\n \"\"\"\n return CommonRoadSolutionWriter(solution).write_to_file(\n output_path=path,\n overwrite=True,\n pretty=True\n )\n\n\n# Run Main Process\nif __name__ == \"__main__\":\n scenario_dir = \"/commonroad/scenarios\"\n solution_dir = \"/commonroad/solutions\"\n\n # solve all non-interactive scenarios\n # for scenario, planning_problem_set in scenario_iterator_non_interactive(scenario_dir):\n # print(f\"Processing scenario {str(scenario.scenario_id)} ...\")\n # solution = motion_planner(scenario)\n # save_solution(solution, solution_dir)\n\n # solve the second half of interactive scenarios\n interactive_paths = _search_interactive_scenarios(scenario_dir)\n n_interactive_scenarios = len(interactive_paths)\n last_half_scenario_path = interactive_paths[int(n_interactive_scenarios/2):]\n\n for scenario_path in last_half_scenario_path:\n print(f\"Processing scenario {os.path.basename(scenario_path)} ...\")\n try:\n solution = motion_planner_interactive(scenario_path)\n save_solution(solution, solution_dir)\n except: \n print('-'*20,'cannot solve this scenario', scenario_path,'-'*20)\n else:\n print('-'*20,scenario_path, 'solved already','-'*20)\n","sub_path":"commonroad-docker-submission/planner/main_core2.py","file_name":"main_core2.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"255507563","text":"from sklearn.feature_extraction.text import TfidfVectorizer\nimport pandas as pd\nimport numpy as np\nimport liwc\nfrom collections import Counter\nimport textstat\nimport string\nfrom nltk.corpus import stopwords\nfrom settings.scraper import TARGET_LANGS\nimport stanza\n\nnlp = stanza.Pipeline('en', use_gpu=False) # stanza.download('en')\nparse, category_names = liwc.load_token_parser('../data/tools/LIWC2015_English.dic')\n\n\ndef append_features(X1, X2):\n if len(X1.index) == 0:\n return X2\n return pd.concat([X1, X2], axis=1)\n\n\ndef tfidf_extractor(dataset):\n tfidfconverter = TfidfVectorizer(max_features=1500, min_df=5, max_df=0.7, stop_words=stopwords.words('english'))\n X_tfidf = tfidfconverter.fit_transform(dataset['text'].values).toarray()\n features = pd.DataFrame(X_tfidf, columns = tfidfconverter.get_feature_names())\n \n return features\n\n\ndef punct_extractor(dataset):\n column_names = ['punct_'+str(index) for index, punct in enumerate(list(string.punctuation))]\n\n features = pd.DataFrame(0, index=np.arange(dataset.shape[0]), columns = column_names)\n \n for index, row in dataset.iterrows():\n for index, punct in enumerate(string.punctuation):\n if punct in row['content']:\n features.at[index, 'punct_'+str(index)] += 1\n \n return features\n\n\ndef liwc_extractor(dataset):\n features = pd.DataFrame(0, index=np.arange(dataset.shape[0]), columns = category_names)\n \n for index, row in dataset.iterrows():\n tokens = row['content'].split(' ')\n category_counts = Counter(category for token in tokens for category in parse(token))\n for category, value in category_counts.items():\n features.at[index, category] = value\n \n return features\n\n\ndef readibility_extractor(dataset):\n features = pd.DataFrame(0., index=np.arange(dataset.shape[0]), columns = [\n 'flesch_kincaid_grade',\n 'flesch_reading_ease',\n 'gunning_fog',\n 'automated_readability_index',\n 'num_char',\n 'num_paragraph',\n 'max_word_len',\n ])\n \n for index, row in dataset.iterrows():\n features.at[index, 'flesch_kincaid_grade'] = textstat.flesch_kincaid_grade(row['content'])\n features.at[index, 'flesch_reading_ease'] = textstat.flesch_reading_ease(row['content'])\n features.at[index, 'gunning_fog'] = textstat.gunning_fog(row['content'])\n features.at[index, 'automated_readability_index'] = textstat.automated_readability_index(row['content'])\n features.at[index, 'num_char'] = len(row['content'])\n features.at[index, 'num_paragraph'] = len(row['content'].split('\\n'))\n features.at[index, 'max_word_len'] = max([len(token) for token in row['content'].split(' ')])\n \n return features\n\n\ndef syntax_preextractor(dataset):\n result = dataset.copy()\n \n result['syntax'] = ''\n\n for index, row in result.iterrows():\n doc = nlp(row['headline'] + '. ' + row['content'])\n\n row_feature = ''\n\n for sentence in doc.sentences:\n for word in sentence.words:\n parent = word.head\n if parent == 0:\n continue\n parent_pos = sentence.words[parent-1].xpos\n grandparent = sentence.words[parent-1].head\n if grandparent == 0:\n continue\n grandparent_pos = sentence.words[grandparent-1].xpos\n feature = grandparent_pos + parent_pos + word.lemma\n row_feature += feature + ' '\n\n result.at[index, 'syntax'] = row_feature\n \n return result\n\n\ndef syntax_extractor(dataset):\n dataset_syntax = syntax_preextractor(dataset)\n \n tfidfconverter = TfidfVectorizer(max_features=1500, min_df=5, max_df=0.7, stop_words=stopwords.words('english'))\n X_tfidf = tfidfconverter.fit_transform(dataset_syntax['syntax'].values).toarray()\n features = pd.DataFrame(X_tfidf, columns = tfidfconverter.get_feature_names())\n \n return features\n\n\ndef mult_evidence_similarity_extractor(dataset, mult_evidence, n_articles=10):\n column_names = []\n for lang in TARGET_LANGS:\n for i in range(n_articles):\n column_names.append(lang + '_' + str(i) + '_sim')\n\n features = pd.DataFrame(0., index=np.arange(dataset.shape[0]), columns = column_names)\n features['file'] = dataset['file'].copy()\n\n for file, evidence in mult_evidence.items():\n for lang, res_articles in evidence.items():\n for i, article in enumerate(res_articles[:n_articles]):\n features.at[dataset.file==file, lang + '_' + str(i) + '_sim'] = article['similarity']\n\n return features.drop('file', axis=1)\n\n\ndef mult_evidence_rank_extractor(dataset, mult_evidence, n_articles=10):\n column_names = []\n for lang in TARGET_LANGS:\n for i in range(n_articles):\n column_names.append(lang + '_' + str(i) + '_rank')\n\n features = pd.DataFrame(0., index=np.arange(dataset.shape[0]), columns = column_names)\n features['file'] = dataset['file'].copy()\n\n for file, evidence in mult_evidence.items():\n for lang, res_articles in evidence.items():\n for i, article in enumerate(res_articles[:n_articles]):\n features.at[dataset.file==file, lang + '_' + str(i) + '_rank'] = article['alexa_rank']\n\n return features.drop('file', axis=1)","sub_path":"tools/features_extraction.py","file_name":"features_extraction.py","file_ext":"py","file_size_in_byte":5376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"591336869","text":"from selenium import webdriver\nimport json\nimport re\nimport bs4\n\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\nPROP_INDEX = 'index'\nPROP_CHAINS = 'chains'\nPROP_CHAINS_Actions = 'actions'\nPROP_CHAIN_ACTION_FRAME = 'frame'\nPROP_CHAIN_ACTION_SELECTOR = 'selector'\nPROP_CHAIN_ACTION_NAME = 'name'\nPROP_CHAIN_ACTION_VALUE = 'value'\nPROP_CHAIN_VARS = 'vars'\nPROP_CHAIN_VAR_NAME = 'name'\nPROP_CHAIN_VAR_SCOPE = 'scope'\nPROP_CHAIN_VAR_PROCESSORS = 'processors'\nPROP_CHAIN_VAR_PROCESSOR_NAME = 'name'\nPROP_CHAIN_VAR_PROCESSOR_PATTERN = 'pattern'\nPROP_CHAIN_ASSERT = 'assert'\nPROP_CHAIN_ASSERT_ASSERTED = 'asserted'\nPROP_CHAIN_ASSERT_EXCEPTED = 'excepted'\n\n\nclass SeleniumTester:\n\n def get_action(self, action):\n return {'click': ClickActionExecutor(action), 'input': InputActionExecutor(action),\n }.get(action[PROP_CHAIN_ACTION_NAME], 'error')\n\n def get_processor(self, processor):\n return {'regex': RegexProcessor(processor), 'selector': SelectorProcess(processor),\n 'constant': ConstantProcessor(processor)}.get(processor[PROP_CHAIN_VAR_PROCESSOR_NAME], 'error')\n\n def __init__(self) -> None:\n with open('pro.json') as file:\n content = file.read()\n self.prop = json.loads(content)\n self.driver = webdriver.Chrome('../bin/chromedriver')\n global wait\n wait = WebDriverWait(self.driver, timeout=1)\n global global_vars\n global_vars = {}\n\n def run(self):\n # 初始化\n self.driver.get(self.prop[PROP_INDEX])\n\n for chain in self.prop[PROP_CHAINS]:\n\n actions = chain['actions']\n for action in actions:\n\n # 切换frame\n if PROP_CHAIN_ACTION_FRAME in action:\n frame = action[PROP_CHAIN_ACTION_FRAME]\n self.driver.switch_to.frame(frame)\n\n # 执行selector\n selector = action[PROP_CHAIN_ACTION_SELECTOR]\n try:\n ele = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, selector)))\n except TimeoutException:\n print('selector [%s] 未获取到元素' % (selector))\n self.driver.close()\n exit(1)\n # else:\n # print('unknown exception')\n # self.driver.close()\n # exit(10)\n\n # 执行action\n action_executor = self.get_action(action)\n action_executor.do_action(ele)\n\n # 切回父frame\n if PROP_CHAIN_ACTION_FRAME in action:\n self.driver.switch_to.parent_frame()\n\n # 定义变量\n local_vars = {}\n if PROP_CHAIN_VARS in chain:\n for var in chain[PROP_CHAIN_VARS]:\n if 'frame' in var:\n frame = var['frame']\n self.driver.switch_to.frame(frame)\n content = self.driver.page_source\n for each_processor in var[PROP_CHAIN_VAR_PROCESSORS]:\n content = self.driver.page_source\n processor = self.get_processor(each_processor)\n content = processor.do_process(content)\n\n scope = var[PROP_CHAIN_VAR_SCOPE]\n var_name = var[PROP_CHAIN_VAR_NAME]\n if scope == 'local':\n local_vars[var_name] = content\n else:\n global_vars[var_name] = content\n\n # 切回父frame\n if 'frame' in var:\n self.driver.switch_to.parent_frame()\n\n # 执行断言\n if PROP_CHAIN_ASSERT in chain:\n\n the_assert = chain[PROP_CHAIN_ASSERT]\n\n asserted_var_name = the_assert[PROP_CHAIN_ASSERT_ASSERTED]\n if asserted_var_name in global_vars:\n asserted = global_vars[asserted_var_name]\n else:\n asserted = local_vars[asserted_var_name]\n\n excepted_var_name = the_assert[PROP_CHAIN_ASSERT_EXCEPTED]\n if excepted_var_name in global_vars:\n excepted = global_vars[excepted_var_name]\n else:\n excepted = local_vars[excepted_var_name]\n\n print('assert:asserted[%s],excepted[%s]' % (asserted, excepted))\n\n\nclass ActionExecutor:\n def __init__(self, action) -> None:\n self.action = action\n\n\nclass InputActionExecutor(ActionExecutor):\n def do_action(self, ele):\n ele.send_keys(self.action[PROP_CHAIN_ACTION_VALUE] + '\\n')\n\n\nclass ClickActionExecutor(ActionExecutor):\n def do_action(self, ele):\n ele.click()\n\n\nclass Processor:\n def __init__(self, processor) -> None:\n self.process = processor\n\n\nclass RegexProcessor(Processor):\n def do_process(self, content):\n pattern = self.process[PROP_CHAIN_VAR_PROCESSOR_PATTERN]\n result = re.search(pattern, content)\n if result:\n return result.group()\n\n\nclass SelectorProcess(Processor):\n def do_process(self, content):\n pattern = self.process[PROP_CHAIN_VAR_PROCESSOR_PATTERN]\n wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, pattern)))\n doc = bs4.BeautifulSoup(content, features=\"html.parser\")\n result = doc.select(pattern)\n if result.__len__() == 0:\n return ''\n return result.pop(0).string\n\n\nclass ConstantProcessor(Processor):\n def do_process(self, content):\n return self.process[PROP_CHAIN_VAR_PROCESSOR_PATTERN]\n\n\ntester = SeleniumTester()\ntester.run()\n","sub_path":"src/SeleniumTester.py","file_name":"SeleniumTester.py","file_ext":"py","file_size_in_byte":5882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"188355041","text":"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\n\nimport torch\n\nfrom . import base_sde\nfrom . import methods\nfrom . import misc\nfrom .._brownian import BaseBrownian, BrownianInterval\nfrom ..settings import LEVY_AREA_APPROXIMATIONS, METHODS, NOISE_TYPES, SDE_TYPES\nfrom ..types import Any, Dict, Optional, Scalar, Tensor, TensorOrTensors, Vector\n\n\ndef sdeint(sde: base_sde.BaseSDE,\n y0: Tensor,\n ts: Vector,\n bm: Optional[BaseBrownian] = None,\n method: Optional[str] = None,\n dt: Optional[Scalar] = 1e-3,\n adaptive: Optional[bool] = False,\n rtol: Optional[Scalar] = 1e-5,\n atol: Optional[Scalar] = 1e-4,\n dt_min: Optional[Scalar] = 1e-5,\n options: Optional[Dict[str, Any]] = None,\n names: Optional[Dict[str, str]] = None,\n logqp: Optional[bool] = False,\n **unused_kwargs) -> TensorOrTensors:\n \"\"\"Numerically integrate an SDE.\n\n Args:\n sde: Object with methods `f` and `g` representing the\n drift and diffusion. The output of `g` should be a single tensor of\n size (batch_size, d) for diagonal noise SDEs or (batch_size, d, m)\n for SDEs of other noise types; d is the dimensionality of state and\n m is the dimensionality of Brownian motion.\n y0 (Tensor): A tensor for the initial state.\n ts (Tensor or sequence of float): Query times in non-descending order.\n The state at the first time of `ts` should be `y0`.\n bm (Brownian, optional): A 'BrownianInterval', `BrownianPath` or\n `BrownianTree` object. Should return tensors of size (batch_size, m)\n for `__call__`. Defaults to `BrownianInterval`.\n method (str, optional): Numerical integration method to use. Must be\n compatible with the SDE type (Ito/Stratonovich) and the noise type\n (scalar/additive/diagonal/general). Defaults to a sensible choice\n depending on the SDE type and noise type of the supplied SDE.\n dt (float, optional): The constant step size or initial step size for\n adaptive time-stepping.\n adaptive (bool, optional): If `True`, use adaptive time-stepping.\n rtol (float, optional): Relative tolerance.\n atol (float, optional): Absolute tolerance.\n dt_min (float, optional): Minimum step size during integration.\n options (dict, optional): Dict of options for the integration method.\n names (dict, optional): Dict of method names for drift and diffusion.\n Expected keys are \"drift\" and \"diffusion\". Serves so that users can\n use methods with names not in `(\"f\", \"g\")`, e.g. to use the\n method \"foo\" for the drift, we supply `names={\"drift\": \"foo\"}`.\n logqp (bool, optional): If `True`, also return the log-ratio penalty.\n This argument will be deprecated in the future and is only included\n to support backward compatibility.\n\n Returns:\n A single state tensor of size (T, batch_size, d).\n\n Raises:\n ValueError: An error occurred due to unrecognized noise type/method,\n or if `sde` is missing required methods.\n \"\"\"\n misc.handle_unused_kwargs(unused_kwargs, msg=\"`sdeint`\")\n del unused_kwargs\n\n sde, y0, ts, bm, method = check_contract(sde, y0, ts, bm, method, names, logqp)\n misc.assert_no_grad(['ts', 'dt', 'rtol', 'atol', 'dt_min'],\n [ts, dt, rtol, atol, dt_min])\n return integrate(\n sde=sde,\n y0=y0,\n ts=ts,\n bm=bm,\n method=method,\n dt=dt,\n adaptive=adaptive,\n rtol=rtol,\n atol=atol,\n dt_min=dt_min,\n options=options,\n logqp=logqp\n )\n\n\ndef check_contract(sde, y0, ts, bm, method, names, logqp):\n if names is None:\n names_to_change = {}\n else:\n names_to_change = {key: names[key] for key in (\"drift\", \"diffusion\", \"prior_drift\") if key in names}\n if len(names_to_change) > 0:\n sde = base_sde.RenameMethodsSDE(sde, **names_to_change)\n\n required_funcs = (\"f\", \"g\", \"h\") if logqp else (\"f\", \"g\")\n missing_funcs = [func for func in required_funcs if not hasattr(sde, func)]\n if len(missing_funcs) > 0:\n raise ValueError(f\"sde is required to have the methods {required_funcs}. Missing functions: {missing_funcs}\")\n\n if not hasattr(sde, \"noise_type\"):\n raise ValueError(f\"sde does not have the attribute noise_type.\")\n\n if sde.noise_type not in NOISE_TYPES:\n raise ValueError(f\"Expected noise type in {NOISE_TYPES}, but found {sde.noise_type}.\")\n\n if not hasattr(sde, \"sde_type\"):\n raise ValueError(f\"sde does not have the attribute sde_type.\")\n\n if sde.sde_type not in SDE_TYPES:\n raise ValueError(f\"Expected sde type in {SDE_TYPES}, but found {sde.sde_type}.\")\n\n # --- Backwards compatibility: v0.1.1. ---\n if logqp:\n sde = base_sde.SDELogqp(sde)\n y0 = torch.cat((y0, y0.new_zeros(size=(y0.size(0), 1))), dim=1)\n # ----------------------------------------\n\n if method is None:\n method = {\n SDE_TYPES.ito: {\n NOISE_TYPES.diagonal: METHODS.srk,\n NOISE_TYPES.additive: METHODS.srk,\n NOISE_TYPES.scalar: METHODS.srk,\n NOISE_TYPES.general: METHODS.euler\n }[sde.noise_type],\n SDE_TYPES.stratonovich: METHODS.midpoint,\n }[sde.sde_type]\n\n if method not in METHODS:\n raise ValueError(f\"Expected method in {METHODS}, but found {method}.\")\n\n if not torch.is_tensor(y0):\n raise ValueError(f\"`y0` must be a torch.Tensor.\")\n\n if not torch.is_tensor(ts):\n if not isinstance(ts, (tuple, list)) or not all(isinstance(t, (float, int)) for t in ts):\n raise ValueError(f\"Evaluation times `ts` must be a 1-D Tensor or list/tuple of floats.\")\n ts = torch.tensor(ts, dtype=y0.dtype, device=y0.device)\n\n drift_shape = sde.f(ts[0], y0).size()\n if drift_shape != y0.size():\n raise ValueError(f\"Drift must return a Tensor of the same shape as `y0`. \"\n f\"Got drift shape {drift_shape}, but y0 shape {y0.size()}.\")\n\n diffusion_shape = sde.g(ts[0], y0).size()\n noise_channels = diffusion_shape[-1]\n if sde.noise_type in (NOISE_TYPES.additive, NOISE_TYPES.general, NOISE_TYPES.scalar):\n batch_dimensions = diffusion_shape[:-2]\n drift_shape, diffusion_shape = tuple(drift_shape), tuple(diffusion_shape)\n if len(drift_shape) == 0:\n raise ValueError(\"Drift must be of shape (..., state_channels), but got shape ().\")\n if len(diffusion_shape) < 2:\n raise ValueError(f\"Diffusion must have shape (..., state_channels, noise_channels), \"\n f\"but got shape {diffusion_shape}.\")\n if drift_shape != diffusion_shape[:-1]:\n raise ValueError(f\"Drift and diffusion shapes do not match. Got drift shape {drift_shape}, \"\n f\"meaning {drift_shape[:-1]} batch dimensions and {drift_shape[-1]} channel \"\n f\"dimensions, but diffusion shape {diffusion_shape}, meaning \"\n f\"{diffusion_shape[:-2]} batch dimensions, {diffusion_shape[-2]} channel \"\n f\"dimensions and {diffusion_shape[-1]} noise dimension.\")\n if diffusion_shape[:-2] != batch_dimensions:\n raise ValueError(\"Every Tensor returned by the diffusion must have the same number and size of batch \"\n \"dimensions.\")\n if diffusion_shape[-1] != noise_channels:\n raise ValueError(\"Every Tensor returned by the diffusion must have the same number of noise channels.\")\n if sde.noise_type == NOISE_TYPES.scalar:\n if noise_channels != 1:\n raise ValueError(f\"Scalar noise must have only one channel; \"\n f\"the diffusion has {noise_channels} noise channels.\")\n else: # sde.noise_type == NOISE_TYPES.diagonal\n batch_dimensions = diffusion_shape[:-1]\n drift_shape, diffusion_shape = tuple(drift_shape), tuple(diffusion_shape)\n if len(drift_shape) == 0:\n raise ValueError(\"Drift must be of shape (..., state_channels), but got shape ().\")\n if len(diffusion_shape) == 0:\n raise ValueError(f\"Diffusion must have shape (..., state_channels), but got shape ().\")\n if drift_shape != diffusion_shape:\n raise ValueError(f\"Drift and diffusion shapes do not match. Got drift shape {drift_shape}, \"\n f\"meaning {drift_shape[:-1]} batch dimensions and {drift_shape[-1]} channel \"\n f\"dimensions, but diffusion shape {diffusion_shape}, meaning \"\n f\"{diffusion_shape[:-1]} batch dimensions, {diffusion_shape[-1]} channel \"\n f\"dimensions and {diffusion_shape[-1]} noise dimension.\")\n if diffusion_shape[:-1] != batch_dimensions:\n raise ValueError(\"Every Tensor return by the diffusion must have the same number and size of batch \"\n \"dimensions.\")\n if diffusion_shape[-1] != noise_channels:\n raise ValueError(\"Every Tensor return by the diffusion must have the same number of noise \"\n \"channels.\")\n sde = base_sde.ForwardSDE(sde)\n\n if bm is None:\n if method == METHODS.srk:\n levy_area_approximation = LEVY_AREA_APPROXIMATIONS.space_time\n elif method == METHODS.log_ode_midpoint:\n levy_area_approximation = LEVY_AREA_APPROXIMATIONS.foster\n else:\n levy_area_approximation = LEVY_AREA_APPROXIMATIONS.none\n bm = BrownianInterval(t0=ts[0], t1=ts[-1], size=(*batch_dimensions, noise_channels), dtype=y0.dtype,\n device=y0.device, levy_area_approximation=levy_area_approximation)\n\n return sde, y0, ts, bm, method\n\n\ndef integrate(sde, y0, ts, bm, method, dt, adaptive, rtol, atol, dt_min, options, logqp=False):\n if options is None:\n options = {}\n\n solver_fn = methods.select(method=method, sde_type=sde.sde_type)\n solver = solver_fn(\n sde=sde,\n bm=bm,\n y0=y0,\n dt=dt,\n adaptive=adaptive,\n rtol=rtol,\n atol=atol,\n dt_min=dt_min,\n options=options\n )\n if adaptive and method == METHODS.euler and sde.noise_type != NOISE_TYPES.additive:\n warnings.warn(f\"Numerical solution is not guaranteed to converge to the correct solution when using adaptive \"\n f\"time-stepping with the Euler--Maruyama method with non-additive noise.\")\n\n ys = solver.integrate(ts)\n\n # --- Backwards compatibility: v0.1.1. ---\n if logqp:\n ys, log_ratio = ys.split(split_size=(y0.size(1) - 1, 1), dim=2)\n log_ratio_increments = torch.stack(\n [log_ratio_t_plus_1 - log_ratio_t\n for log_ratio_t_plus_1, log_ratio_t in zip(log_ratio[1:], log_ratio[:-1])], dim=0\n ).squeeze(dim=2)\n return ys, log_ratio_increments\n # ----------------------------------------\n\n return solver.integrate(ts)\n","sub_path":"torchsde/_core/sdeint.py","file_name":"sdeint.py","file_ext":"py","file_size_in_byte":11788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"443892037","text":"import json, helpers\n\nimport messages as msg\n\nfrom time import gmtime, strftime\n\n\ndef execute():\n\trecord = helpers.load_record()\n\t\n\tcurrent = record['current']\n\n\tif current == \"\":\n\t\tmsg.no_current_project()\n\telse:\n\t\tobj = {}\n\t\tspentList = []\n\t\tfor item in record['projects'][current]['time']:\n\t\t\tspentList.append(item['spent'])\n\t\t\tif item['end'] == '':\n\t\t\t\tobj = item\n\n\t\tif len(obj) > 0:\n\t\t\trecordedTime = helpers.time_stamp()\n\t\t\trecordedDate = helpers.date_stamp()\n\t\t\tobj['end'] = recordedTime\n\t\t\tobj['spent'] = helpers.time_spent(obj['start'], obj['end'])\n\t\t\tobj['spent_date'] = recordedDate\n\n\t\t\tcontent = helpers.glue_updated_record(record)\n\t\t\thelpers.write_file(helpers.recordPath, content)\n\n\t\t\tmsg.untracking_message(obj)\n\t\telse:\n\t\t\tmsg.nothing_being_tracked()","sub_path":"untrack.py","file_name":"untrack.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"562090656","text":"\nimport pandas as pd\nfrom pandas import DataFrame\nimport matplotlib.pyplot as plt # Biblioteca para gerar os gráficos - https://matplotlib.org/\nimport math\n\ndataFrame = pd.read_csv('tracker/data.csv') # Le o arquivo e cria o objeto panda com a instancia do CSV\n\n\n\ncol_length = dataFrame.shape[1] # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.shape.html\n\n#Declarando variaveis\nconsolidado = [] # Variavel com o consolidado geral\nresult = [] # Variavel com o consolidado por coluna\ndays = [] # Variavel com os dados diarios\n\n\n\n# Gerando dataFrame\nfor index in range(0, col_length-1): # lista as colunas do csv\n\n df = dataFrame.iloc[:, index+1] # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.iloc.html\n last_daily_var = 100 # quantidade de dias para gerar a media\n\n\n for row_index, row in df.iteritems():\n # Realiza os calculos apos a 100 linha\n if row_index >= 100: #pula as 100 primeiras linhas\n\n last_val = dataFrame.iloc[row_index - 1, index + 1] # obtem o registro anterior\n rol_10 = dataFrame.iloc[row_index-10:row_index, index+1] # obtem a os 10 registros anteriores a linha\n rol_100 = dataFrame.iloc[row_index-100:row_index, index+1] # obtem a os 100 registros anteriores a linha\n daily_var = ((row/last_val) - 1)*100 # variacao diaria\n\n # Comprado ou vendido\n if rol_10.mean() < rol_100.mean(): #media de 10 e menor que a media de 100: venda, senao compra\n #baixa\n daily_var = (last_val/row) - 1\n\n last_daily_var = (last_daily_var*(1+daily_var))\n if math.isnan(daily_var): # verifica se a linha esta em branco\n result.append(0)\n else:\n result.append(daily_var) # armazena o dado em um novo array\n else:\n result.append(0)\n\n consolidado.append(result)\n result = []\n\nfinal = DataFrame(consolidado)\n\n# Calcula a media de cada linha gerado no consolidado\nfinal.loc['mean'] = final.mean(axis=0, numeric_only=True)\nlast_val = 100\ngraph_data = []\nfor row_index, row in final.T.iloc[:, -1].iteritems():\n # print(row, last_val)\n if isinstance(row, str):\n graph_data.append(0)\n else:\n value = (1+row)*100\n graph_data.append(value)\n last_val = value\n\ngraph = DataFrame(graph_data)\nfinal.loc['graph'] = graph_data\nfinal.loc['date'] = dataFrame.iloc[:, 0]\n\nplt.close('all')\nprint(final.loc[['graph']])\n\n#Gera a imagem do grafico\npd.DataFrame(final.T, columns=['graph', 'date']).plot()\nplt.savefig('grafico.png', bbox_inches='tight')\n","sub_path":"trend_following.py","file_name":"trend_following.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"425518140","text":"'''\n\naim is to create sequential subset for a problem recursively\n'''\n\n\ndef isPalindrome(s,startindex, endindex):\n\n if endindex < startindex:\n return True\n elif s[startindex] == s[endindex]:\n return isPalindrome(s,startindex+1, endindex -1)\n else:\n return False\n\n\ndef createseqsubset(s):\n sol = []\n def helper(sidx, eidx):\n if eidx - sidx>1:\n helper(sidx,eidx-1)\n if isPalindrome(s[sidx:eidx],0, eidx-sidx-1) :\n print (s[sidx:eidx], sidx, eidx)\n if sidx == 0 and eidx ==len(s) :\n sol.append(s[sidx:eidx])\n elif sidx==0:\n\n sol.append(''.join((''.join(s[0:sidx]), s[sidx:eidx],'|' ,'|'.join(s[eidx:]))))\n elif eidx == len(s):\n print('came here')\n sol.append(''.join(('|'.join(s[0:sidx]), '|', s[sidx:eidx], ''.join(s[eidx:]))))\n else:\n sol.append(''.join(('|'.join(s[0:sidx]), '|', s[sidx:eidx], '|', '|'.join(s[eidx:]))))\n for i in range(len(s)):\n helper(i,len(s))\n\n if len(s) > 0:\n sol.append('|'.join(s))\n print(sol)\ncreateseqsubset('abracadabra')\ncreateseqsubset('bxxy')\ncreateseqsubset('aa')\ncreateseqsubset('xyy')","sub_path":"Python/Practice/Recursion/createseq_subset.py","file_name":"createseq_subset.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"206725642","text":"from django.core.management.base import BaseCommand\nfrom django.conf import settings\nfrom django.db.models import Q\n\nfrom mixpanel import Mixpanel, MixpanelException\n\nfrom apps.customers.models import Customer\nfrom apps.swipedeals.models import Orders, Customers, Coupons\n\n\nclass Command(BaseCommand):\n\n def get_customer(self, customer_id):\n try:\n sdcustomer = Customers.objects.get(id=customer_id)\n except Customers.DoesNotExist:\n raise\n try:\n customer = Customer.objects.get(user_profile__email=sdcustomer.email)\n return customer\n except Customer.DoesNotExist:\n sdcustomer\n\n def get_skus(self, order_id):\n coupons = Coupons.objects.filter(order_id=order_id)\n skus = []\n for coupon in coupons:\n _sku = coupon.sku\n if _sku not in (None, ''):\n skus.extend([s.strip() for s in _sku.split(',')])\n return skus\n\n def handle(self, *args, **options):\n\n # create mixpanel client\n client = Mixpanel(settings.MIXPANEL_TOKEN)\n\n # get cart purchases\n carts = Orders.objects.filter(Q(state='cart purchased') and ~Q(customer_id=None))[:1000]\n for cart in carts:\n # get customer\n try:\n customer = self.get_customer(cart.customer_id)\n except Exception as e:\n self.stderr.write(str(e))\n continue\n\n # get skus\n skus = self.get_skus(cart.id)\n skus = ', '.join(skus)\n\n # get spend\n spent = cart.total_price\n\n # get date\n timestamp = cart.created_at\n timestamp = timestamp.strftime('%Y-%m-%d')\n\n # add to mixpanel\n try:\n self.stdout.write('%s spent %s on %s on %s' % (str(customer), spent, skus, timestamp))\n client.people_track_charge(customer.id, spent, {'SKUs': skus, 'Date': timestamp})\n # client.track(customer.id, 'Sales Order', {'Date': timestamp, 'Spent': spent})\n except MixpanelException as e:\n self.stderr.write(str(e))\n continue\n except AttributeError:\n continue\n","sub_path":"apps/swipedeals/management/commands/mixpanel-consume-historical-sales.py","file_name":"mixpanel-consume-historical-sales.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"551439792","text":"class Solution:\n def largestDivisibleSubset(self, nums: List[int]) -> List[int]:\n nums.sort()\n dp = [1] * len(nums)\n prev = [-1] * len(nums)\n \n for i in range(len(dp)):\n for j in range(i):\n if nums[i] % nums[j] == 0:\n if dp[j] + 1 > dp[i]:\n dp[i] = dp[j] + 1\n prev[i] = j\n \n largestSubsetPos = 0\n largestSubsetLen = 0\n for i in range(len(dp)):\n if dp[i] > largestSubsetLen:\n largestSubsetLen += 1\n largestSubsetPos = i\n \n ret = []\n for i in range(largestSubsetLen):\n ret.append(nums[largestSubsetPos])\n largestSubsetPos = prev[largestSubsetPos]\n \n return ret","sub_path":"368. Largest Divisible Subset/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"40483011","text":"import csv\nimport datetime\nimport os\nimport locale\nlocale.setlocale(locale.LC_ALL,'en_US.UTF-8')\n\ncsv.register_dialect(\n 'mydialect',\n delimiter = ',',\n quotechar = '\"',\n doublequote = True,\n skipinitialspace = True,\n lineterminator = '\\r\\n',\n quoting = csv.QUOTE_MINIMAL)\n\ndef get_idx(rows, date_str):\n mylen = len(list(row for row in rows if row['Date'] == date_str))\n #print(\"mylen=\"+str(mylen))\n if mylen <=0:#NOT EXIST\n #print(\"j=\"+str('-1'))\n return -1\n elif mylen == 1:\n j = next(j for j, row in enumerate(rows) if row['Date'] == date_str)\n #print(\"j=\"+str(j))\n return j\n else:\n raise(\"get_idx(): mylen > 1\")\n\npath = \"./history\" \nfiles= os.listdir(path) \n#print(files)\n\nf = open(path+\"/\"+'bitcoin.csv','r')\nreader = csv.DictReader(f, dialect='mydialect')\n\n#INITIATE ROWS_WR\nrows_wr = list() #list of dict\nfor row in reader:\n vol = locale.atof(row['Volume'] ) if row['Volume'] != '-' else 0\n mkc = locale.atof(row['MarketCap']) if row['MarketCap'] != '-' else 0 \n rows_wr.append({'Date':row['Date'], \n 'Volume':0, 'MarketCap':0, \n 'VolumeBTC':vol,'MarketCapBTC':mkc,\n 'VolumeUSDT':0, 'MarketCapUSDT':0,\n 'VolumeETH':0, 'MarketCapETH':0,\n 'VolumeOTS':0, 'MarketCapOTS':0})\n\nf = open(path+\"/\"+'tether.csv','r')\nreader = csv.DictReader(f, dialect='mydialect')\nfor row in reader:\n vol = locale.atof(row['Volume'] ) if row['Volume'] != '-' else 0\n mkc = locale.atof(row['MarketCap']) if row['MarketCap'] != '-' else 0 \n idx_wr = get_idx(rows_wr, row['Date'])\n if idx_wr < 0: raise('idx_wr<0')\n rows_wr[idx_wr]['VolumeUSDT'] = vol\n rows_wr[idx_wr]['MarketCapUSDT'] = mkc \n\nf = open(path+\"/\"+'ethereum.csv','r')\nreader = csv.DictReader(f, dialect='mydialect')\nfor row in reader:\n vol = locale.atof(row['Volume'] ) if row['Volume'] != '-' else 0\n mkc = locale.atof(row['MarketCap']) if row['MarketCap'] != '-' else 0 \n idx_wr = get_idx(rows_wr, row['Date'])\n if idx_wr < 0: raise('idx_wr<0')\n rows_wr[idx_wr]['VolumeETH'] = vol\n rows_wr[idx_wr]['MarketCapETH'] = mkc \n\npath = \"./history\" \nfiles= os.listdir(path) \nfor file in files: \n #if file == 'bitcoin.csv': continue\n #if file == 'paxex.csv': break\n print(file)\n cname = file.rstrip('csv').rstrip('\\.') \n\n f = open(path+\"/\"+file,'r')\n reader = csv.DictReader(f, dialect='mydialect')\n \n #load coin xxx history data to rows(list of dict)\n #'High','Volume','MarketCap','HighInBTC']\n for row in reader:\n vol = locale.atof(row['Volume'] ) if row['Volume'] != '-' else 0\n mkc = locale.atof(row['MarketCap']) if row['MarketCap'] != '-' else 0 \n idx_wr = get_idx(rows_wr, row['Date'])\n if idx_wr < 0: raise('idx_wr<0')\n vol_prev = rows_wr[idx_wr]['Volume'] \n rows_wr[idx_wr]['Volume'] += vol \n rows_wr[idx_wr]['MarketCap'] += mkc\n #if row['Date'] == 'Jan 01, 2018': \n # fname = './debug/fluidity_total_1.csv'\n # with open(fname,'a') as f:\n # writer = csv.writer(f)\n # writer.writerow((cname, vol_prev, vol, rows_wr[idx_wr]['Volume']))\n\n #if row['Date'] == 'Jan 02, 2018': \n # fname = './debug/fluidity_total_2.csv'\n # with open(fname,'a') as f:\n # writer = csv.writer(f)\n # writer.writerow((cname, vol_prev, vol, rows_wr[idx_wr]['Volume']))\n\nfor row in rows_wr:\n vol = row['Volume'] \n mkc = row['MarketCap']\n volbtc = row['VolumeBTC'] \n mkcbtc = row['MarketCapBTC']\n volusdt= row['VolumeUSDT'] \n mkcusdt= row['MarketCapUSDT']\n voleth = row['VolumeETH'] \n mkceth = row['MarketCapETH']\n\n row['VolumeOTS'] = vol-volbtc-volusdt-voleth \n row['MarketCapOTS'] = mkc-mkcbtc-mkcusdt-mkceth\n volots = row['VolumeOTS']\n mkcots = row['MarketCapOTS']\n\n row['VolumeBTCPct'] = volbtc / vol if vol != 0 else 0\n row['MarketCapBTCPct'] = mkcbtc / mkc if mkc != 0 else 0\n row['VolumeUSDTPct'] = volusdt/ vol if vol != 0 else 0\n row['MarketCapUSDTPct']= mkcusdt/ mkc if mkc != 0 else 0\n row['VolumeETHPct'] = voleth / vol if vol != 0 else 0\n row['MarketCapETHPct'] = mkceth / mkc if mkc != 0 else 0\n row['VolumeOTSPct'] = volots / vol if vol != 0 else 0\n row['MarketCapOTSPct'] = mkcots / mkc if mkc != 0 else 0\n\nfname = './work/fluidity_total.csv'\nwith open(fname,'w') as f:\n headers = ['Date',\n 'Volume','MarketCap',\n 'VolumeBTC','MarketCapBTC',\n 'VolumeBTCPct','MarketCapBTCPct',\n 'VolumeUSDT','MarketCapUSDT',\n 'VolumeUSDTPct','MarketCapUSDTPct',\n 'VolumeETH','MarketCapETH',\n 'VolumeETHPct','MarketCapETHPct',\n 'VolumeOTS','MarketCapOTS',\n 'VolumeOTSPct','MarketCapOTSPct'] \n writer = csv.DictWriter(f, dialect='mydialect',fieldnames=headers)\n writer.writeheader()\n writer.writerows(rows_wr)\n","sub_path":"python/cmc/cmc_parse_fluidity_total.py","file_name":"cmc_parse_fluidity_total.py","file_ext":"py","file_size_in_byte":5082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"143716110","text":"import os\nimport logging\nimport argparse\nimport textwrap\nimport datetime\n\ndef parse_args():\n \"\"\"\n Parse command line arguments specifying the data to move to folders.\n\n Returns\n --------\n args: argparse.Namespace\n parsed command line arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Find collocations between an RO and MW satellite', formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=textwrap.dedent('''\\\n Example usage:\n\n python3 reallocate_mw_data.py --data_dir /Users/alexmeredith/ro-nadir-collocation/SNPP-Data --start_day 21001 --end_day 21031 --time_tol 600 --sat_type jpss \n\n The example above creates folders called `21001`...`21031` and allocates SNPP data for each day into the appropriate folder (+ data from 600 seconds before and after the day in question).\n '''))\n parser.add_argument('--data_dir', help='Path to data to move', required=True)\n parser.add_argument('--start_day', help='Day in TLE format (ex. 21001)', required=True)\n parser.add_argument('--end_day', help='Day in TLE format (ex. 21031)', required=True)\n parser.add_argument('--time_tol', type=int, help='Time tolerance', required=True)\n parser.add_argument('--sat_type', help='Satellite type. Supported types are (`jpss` and `metop`)', required=True)\n return parser.parse_args()\n\ndef yyddd_to_datetime(yy_jd):\n \"\"\"\n Convert day in format {yy}{ddd} (e.g. 21001 = January 1, 2021)\n to Python datetime object.\n Arguments \n -----------\n yy_jd: int or str \n julian date with 2-digit year\n Returns \n --------\n dt: datetime\n datetime.datetime corresponding to 0:00:00 on the represented date\n \"\"\"\n year,jd = int(str(yy_jd)[0:2]), int(str(yy_jd)[2:])\n dt = datetime.datetime(year+2000, 1, 1)+datetime.timedelta(days=jd-1)\n return dt\n\ndef year_and_day_of_year_to_jd(year, day_of_year):\n \"\"\"\n Converts a year and day of year to a yyddd formatted Julian date.\n Example usage:\n year = 2021 and day_of_year = 1 -> 21001\n year = 1999 and day_of_year = 2 -> year out of range error\n year = 2000 and day_of_year = 366 -> 00366\n Arguments \n ----------\n year: int\n year (will throw error if not between 2000 and 2099)\n day_of_year: int\n day of year\n \"\"\"\n if year < 2000 or year > 2099:\n raise ValueError('Year must be between 2000 and 2099 (inclusive).')\n year_str = str(year)[2:4]\n day_of_year_str = str(day_of_year)\n while len(day_of_year_str) < 3:\n day_of_year_str = '0' + day_of_year_str\n return year_str + day_of_year_str\n\ndef datetime_to_yyddd(dt, buffer):\n \"\"\"\n Given a datetime.datetime object, return all Julian dates within buffer \n seconds of the datetime in yyddd format.\n Example usage:\n datetime.datetime(2021, 1, 1, 23, 50) with a buffer of 1200 seconds (20 min)\n matches Jan 1 2021 and Jan 2 2021, and would return ['21001', '21002']\n Arguments \n ----------\n dt: datetime.datetime\n time object to classify\n buffer: int or float\n buffer in seconds\n Returns \n -------\n jd_list: list\n list of strings representing matching Julian dates\n \"\"\"\n jd_set = set() \n\n #Ensure that we sample at least one datetime per day between -buffer/86400 to buffer/86400\n buffer_days = int(buffer/86400)\n dt_list = [dt - datetime.timedelta(days=x) for x in range(-buffer_days, buffer_days)]\n\n #Get endpoints of time window from dt-buffer to dt + buffer\n dt_list.append(dt + datetime.timedelta(seconds=buffer))\n dt_list.append(dt - datetime.timedelta(seconds=buffer))\n\n for dt_sample in dt_list:\n day_of_year = dt_sample.timetuple().tm_yday\n year = dt_sample.year\n\n jd = year_and_day_of_year_to_jd(year, day_of_year)\n jd_set.add(jd)\n\n return sorted(list(jd_set))\n\ndef get_end_of_prev_data(root_path, day, prev_day, prev_date, extra_min, verbose=False):\n \"\"\"\n Get all files that contain data within extra_min of the\n start of the current day.\n Arguments\n ----------\n root_path: str\n root directory\n day: str\n current day\n prev_day: str\n previous day\n extra_min: int\n extra minutes to add to the start of the current day\n Returns\n -------\n list of files that contain data within extra_min of the\n start of the current day\n \"\"\"\n files = []\n for file in os.listdir(os.path.join(root_path, prev_day)):\n file_tokens = file.split('_')\n date = file_tokens[2]\n hh = file_tokens[4][1:3]\n mm = file_tokens[4][3:5]\n if date == prev_date and 60 - int(mm) + 60 * (23-int(hh)) < extra_min:\n files.append(os.path.join(root_path, prev_day, file))\n if verbose:\n for file in files:\n logging.info(f\"Moving {file} to {day}\")\n return files\n\ndef get_start_of_next_data(root_path, day, next_day, next_date, extra_min, verbose=False):\n \"\"\"\n Get all files that contain data within extra_min of the\n end of the current day.\n Arguments\n ----------\n root_path: str\n root directory\n day: str\n current day\n next_day: str\n previous day\n extra_min: int\n extra minutes to add to the start of the current day\n Returns\n -------\n list of files that contain data within extra_min of the\n start of the current day\n \"\"\"\n files = []\n for file in os.listdir(os.path.join(root_path, next_day)):\n file_tokens = file.split('_')\n hh = file_tokens[3][1:3]\n mm = file_tokens[3][3:5]\n date = file_tokens[2]\n if date == next_date and int(mm) + 60 * (int(hh)) < extra_min:\n files.append(os.path.join(root_path, next_day, file))\n if verbose:\n for file in files:\n logging.info(f\"Moving {file} to {day}\")\n return files\n\ndef metop_fname_to_start_time(fname):\n \"\"\"\n Transforms a filename for a MetOp sounding file into a datetime \n representing the time of the first sounding in the file.\n MetOp sounding files have the following format:\n `W_XX-EUMETSAT-Darmstadt, SOUNDING+SATELLITE, METOP{A, B, C}+\n AMSUA_C_EUMP_{yyyy}{mm}{dd}{hh}{mm}{ss}_{ffff}_eps_o_l1.nc`\n where fffff is the five-digit file number.\n Example usage:\n `W_XX-EUMETSAT-Darmstadt, SOUNDING+SATELLITE, METOPC+\n AMSUA_C_EUMP_20210119223423_43278_eps_o_l1.nc` ->\n datetime.datetime(2021, 1, 19, 22, 34, 23)\n Arguments \n ----------\n fname: str\n name of a MetOp sounding file in the specified format\n Returns \n --------\n dt: datetime.datetime\n datetime object corresponding to the first sounding time \n in the file\n \"\"\"\n try:\n fname_tokens = fname.split('_')\n date_info = fname_tokens[4]\n year = int(date_info[0:4])\n month = int(date_info[4:6])\n day = int(date_info[6:8])\n hour = int(date_info[8:10])\n minute = int(date_info[10:12])\n second = int(date_info[12:14])\n except:\n raise ValueError(f'File {fname} was not in expected MetOp data format and could not be parsed.')\n\n return datetime.datetime(year, month, day, hour, minute, second)\n\ndef jpss_fname_to_start_time(fname):\n \"\"\"\n Transforms a filename for a GATMO file containing soundings for\n a satellite in the JPSS program into a datetime representing the\n time of the first sounding in the file.\n GATMO files have the following format:\n `GATMO_{nnn}_d{yyyy}{mm}{dd}_t{hh}{mm}{sss}_e{hh}{mm}{sss}\n _b{ffff}_c{gggggggggggggggggggg}_nobc_ops.h5`\n where ffff and ggg.. are irrelevant numbers for our purposes. nnn \n is a code identifying the satellite and is j01 for NOAA-20 and npp\n for Suomi-NPP. The second hh-mm-ss time corresponds to the last \n sounding time and isn't used in this function.\n Example usage:\n `GATMO_npp_d20210101_t2356346_e0004343_b47579\n _c20210102040435288988_nobc_ops.h5` ->\n datetime.datetime(2021, 1, 1, 23, 56, 35)\n Arguments \n ----------\n fname: str\n name of a GATMO file in the specified format\n Returns \n ---------\n dt: datetime.datetime\n datetime object corresponding to the first sounding time \n in the file\n \"\"\"\n try:\n fname_tokens = fname.split('_')\n date_info = fname_tokens[2]\n year = int(date_info[1:5])\n month = int(date_info[5:7])\n day = int(date_info[7:9])\n time_info = fname_tokens[3]\n hour = int(time_info[1:3])\n minute = int(time_info[3:5])\n second = int(round(int(time_info[5:7]) + int(time_info[7])/10))\n if second > 59:\n second = 59\n except:\n raise ValueError(f'File {fname} was not in expected JPSS data format and could not be parsed.')\n\n return datetime.datetime(year, month, day, hour, minute, second)\n\ndef group_sounding_files_by_jd(sounding_files, min_jd, max_jd, buffer, fname_to_time_func):\n \"\"\"\n Given a list of sounding files, a min and max Julian date, and a \n time buffer in seconds, find a file-to-Julian-date matchup such that \n each day from min_jd to max_jd (inclusive) corresponds to the minimal\n list of sounding files fully covering the time window (day start - buffer \n sec, day end + buffer sec).\n It is assumed that there are no gaps between files, so the end time of a file \n is assumed to be the same as the start time of the next file.\n Example:\n ['W_XX-..._20210101235000', 'W_XX-...20210102010000'], 21001, 21002, 600 \n yields '21001' matched to the first file and '21002' matched to both files\n Arguments \n ----------\n sounding_files: list of str\n list of sounding files to consider\n min_jd: str \n minimum Julian date to match files to (files before this date won't be \n allocated)\n max_jd: str\n maximum Julian date to match files to (files after this date won't be\n allocated)\n buffer: int\n time buffer in seconds\n fname_to_time_func: function\n function mapping str -> datetime which converts the start time of a file \n to a datetime given the file name\n Returns \n --------\n jd_to_files: dict\n dict with str reps of Julian dates as keys and lists of file names as values\n \"\"\"\n jds_to_files = {}\n min_jd_int = int(min_jd)\n max_jd_int = int(max_jd)\n sounding_files = sorted(sounding_files)\n for file in sounding_files:\n jds = datetime_to_yyddd(fname_to_time_func(file), buffer)\n for jd in jds:\n if int(jd) >= min_jd_int and int(jd) <= max_jd_int:\n if jd in jds_to_files:\n jds_to_files[jd].append(file)\n else:\n jds_to_files[jd] = [file]\n\n #Need to check if file before first file in day ends after (day-buffer)\n #(same as first file starts after day-buffer)\n for jd in jds_to_files:\n file_list = sorted(jds_to_files[jd])\n first_file_ind = sounding_files.index(file_list[0])\n if first_file_ind > 0:\n first_file_time = fname_to_time_func(sounding_files[first_file_ind])\n if (yyddd_to_datetime(jd) - first_file_time).seconds <= buffer:\n file_list.append(sounding_files[first_file_ind-1])\n jds_to_files[jd] = sorted(file_list)\n return jds_to_files\n\ndef allocate_files(root_path, min_jd, max_jd, buffer, fname_to_time_func):\n \"\"\"\n Wrapper around group_sounding_files_by_jd. Given a folder containing \n sounding files, find a jd -> file mapping using group_sounding_files_by_jd,\n and for each jd create a folder and copy all sounding files corresponding \n to that jd into the jd folder.\n Example usage:\n allocate_files('/Users/alexmeredith/gpsro-data/NOAA20-Data/raw', '21001', \n '21031', 10800, jpss_fname_to_start_time)\n This will create folders 21001...21031 in `raw` and fill them appropriately.\n Arguments\n ----------\n root_path: str\n path to folder containing sounding files\n min_jd: str\n minimum Julian date to match files to\n max_jd: str\n maximum Julian date to match files to\n buffer: int\n time buffer in seconds\n fname_to_time_func: function\n function mapping str -> datetime which converts the start time of a file\n to a datetime given the file name\n \"\"\"\n sounding_files = os.listdir(root_path)\n jds_to_files = group_sounding_files_by_jd(sounding_files, min_jd, max_jd, buffer, fname_to_time_func)\n for jd in jds_to_files:\n os.makedirs(f'{root_path}/{jd}', exist_ok=True)\n for file in jds_to_files[jd]:\n os.system(f'cp {root_path}/{file} {root_path}/{jd}/{file}')\n\ndef main():\n args = parse_args()\n name_to_time = jpss_fname_to_start_time if args.sat_type == 'jpss' else metop_fname_to_start_time if args.sat_type == 'metop' else None\n if name_to_time == None:\n raise ValueError(f'Satellite type {args.sat_type} not supported. Only `jpss` and `metop` are supported.')\n allocate_files(args.data_dir, args.start_day, args.end_day, args.time_tol, name_to_time)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"data/reallocate_mw_data.py","file_name":"reallocate_mw_data.py","file_ext":"py","file_size_in_byte":13537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"8808404","text":"from os import listdir\nfrom os.path import isfile, join\npositiveFiles = ['positiveReviews/' + f for f in listdir('positiveReviews/') if isfile(join('positiveReviews/', f))]\nnegativeFiles = ['negativeReviews/' + f for f in listdir('negativeReviews/') if isfile(join('negativeReviews/', f))]\nnumWords = []\nfor pf in positiveFiles:\n with open(pf, \"r\", encoding='utf-8') as f:\n line=f.readline()\n counter = len(line.split())\n numWords.append(counter)\nprint('Positive files finished')\n\nfor nf in negativeFiles:\n with open(nf, \"r\", encoding='utf-8') as f:\n line=f.readline()\n counter = len(line.split())\n numWords.append(counter)\nprint('Negative files finished')\n\nnumFiles = len(numWords)\nprint('The total number of files is', numFiles)\nprint('The total number of words in the files is', sum(numWords))\nprint('The average number of words in the files is', sum(numWords)/len(numWords))\n","sub_path":"Code/contract_size.py","file_name":"contract_size.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"61132184","text":"#**************************#\n#* Trevor Bright * #\n#* CECS 590-01 * #\n#* Assignment 1 * #\n#************************* #\nimport numpy as np\nimport pandas as ps\nimport math\nimport random\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\n\nseed = 7\nnp.random.seed(seed)\n\ndef checkPoint(x, y): \n p = math.pow((x), 2) + math.pow((y), 2)\n return p \n\ndef generateDataset():\n inCords = []\n outCords = []\n while len(inCords) < 100 or len(outCords) < 100:\n tempX = np.random.normal(.5, math.sqrt(.08))\n tempY = np.random.normal(.5, math.sqrt(.08))\n if (tempX >= 0 and tempY >= 0 and tempX < 1 and tempY < 1):\n if(checkPoint(tempX,tempY) <= 1):\n inCords.append([tempX,tempY])\n else:\n outCords.append([tempX,tempY])\n joined = inCords[:100]+outCords[:100]\n random.shuffle(joined)\n return joined\n\n\ndef rankDataset(dataset):\n labels = []\n for cords in dataset:\n if(checkPoint(cords[0],cords[1]) <= 1):\n labels.append(1)\n else:\n labels.append(0)\n return labels\n\ndef create_baseline():\n # create model\n model = Sequential()\n model.add(Dense(60, input_dim=2, kernel_initializer='normal', activation='relu'))\n model.add(Dense(30, kernel_initializer='normal', activation='relu'))\n model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))\n # Compile model\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model \n\ndataset = generateDataset()\nlabels = rankDataset(dataset)\n\nnp.random.seed(seed)\nestimators = []\nestimators.append(('standardize', StandardScaler()))\nestimators.append(('mlp', KerasClassifier(build_fn=create_baseline, epochs=100, batch_size=5, verbose=0)))\npipeline = Pipeline(estimators)\nkfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)\nresults = cross_val_score(pipeline, dataset, labels, cv=kfold)\n\nprint(\"Standardized: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))","sub_path":"Bright_Trevor_01/Trevor_Bright_problem_4b.py","file_name":"Trevor_Bright_problem_4b.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"414398826","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n#########################################################\n# Author: Nilo Ney Coutinho\n# Book: Introdução a progrmação com Python\n#########################################################\n# File: listagem6-21.py\n# Description: Simulação de um fila de banco..\n\nultimo = 10\nfila = list(range(1, ultimo+1))\nwhile True: \n\tprint(\"\\nExiste {0} clientes na fila\".format(len(fila)))\n\tprint(\"Fila atual: \",fila)\n\tprint(\"Digite F para adicionar um cliente ao fim da fila, \")\n\tprint(\"ou A para realizar um atendimento, S para sair...\")\n\toperacao = input(\"Operações (F, A ou S):\").upper()\n\tif operacao == 'A':\n\t\tif(len(fila) > 0):\n\t\t\tatendido = fila.pop(0)\n\t\t\tprint(\"Cliente {0} atendido\".format(atendido))\n\t\telse:\n\t\t\tprint(\"Fila fazia! Niguem para atender...\")\n\telif operacao == 'F':\n\t\tultimo+=1\n\t\tfila.append(ultimo)\n\telif operacao == 'S':\n\t\tbreak\n\telse:\n\t\tprint(\"Operação invalida, digite apenas F, A ou S!\")","sub_path":"cap06/listagem6-21.py","file_name":"listagem6-21.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"192034676","text":"#Written by Colman Koivisto and Kristopher Quaife-Glasier Section EB2\nimport sys\nimport random\nimport time\nfrom tkinter import *\nfrom Modules import *\nfrom Modules.SerialComm import *\n\n\"\"\"\nProtocol:\n Server actions:\n SF - Fold\n SB - Bet\n SH - Check\n SC - Call\n SR - Raise\n srvent actions:\n CF - Fold\n CB - Bet\n CH - Check\n CC - Call\n CR - Raise\n Others:\n P - Player cards\n O - Community cards\n BB - Big Blind\n BS - Small Blind\n L - Login\n DS - Server is dealer\n DC - srvent is dealer\n CS - srvent Stack\n SS - Server Stack\n\"\"\"\n\n\ndef loginLoop(serial_in,serial_out):\n l = Tk()\n user = login(l).user()\n return user\n\n\ndef initGame(serial_in, serial_out, userC):\n cliname=userC.name()\n clistack=userC.get_balance()\n srvname=\"\"\n srvstack=0\n\n while True:#Waits until srvent has logged in\n srvname=receive_msg_from_server(serial_in)\n if srvname[0]==\"L\":\n break\n\n srvname=srvname[2:]\n send_msg_to_server(serial_out,\"L \"+cliname)\n\n while True:#Waits until srvent sends their stack\n srvstack=receive_msg_from_server(serial_in)\n print(srvstack)\n if srvstack[0:2]==\"SS\":\n break\n srvstack=int(\"\".join(srvstack[3:]))\n send_msg_to_server(serial_out,\"CS \"+str(clistack))\n\n print()\n print(\"Server:\")\n print(\"\\tUsername:\\t\\t\", srvname)\n print(\"\\tCurrent balance:\\t\", srvstack)\n print(\"Client:\")\n print(\"\\tUsername:\\t\\t\", cliname)\n print(\"\\tCurrent balance:\\t\", clistack)\n print()\n\n app = Game((srvname,srvstack), userC, serial_in, serial_out)\n\n\n\n\ndef mainLoop(serial_in,serial_out):\n while True:\n user=loginLoop(serial_in,serial_out)\n initGame(serial_in,serial_out,user)\n\n\n\nif __name__==\"__main__\":\n\n import argparse\n parser = argparse.ArgumentParser(\n description='client-server message test.',\n formatter_class=argparse.RawTextHelpFormatter,\n )\n\n parser.add_argument(\"-s\",\n help=\"Set serial port for protocol\",\n nargs=\"?\",\n type=str,\n dest=\"serial_port_name\",\n default=\"/dev/ttyACM0\")\n\n args = parser.parse_args()\n\n # this imports serial, and provides a useful wrapper around it\n from Modules import textserial\n\n serial_port_name = args.serial_port_name;\n\n # Open up the connection\n baudrate = 9600 # [bit/seconds] 115200 also works\n\n # Run the server protocol forever\n\n # The with statment ensures that if things go bad, then ser\n # will still be closed properly.\n\n with textserial.TextSerial(\n serial_port_name, baudrate, newline=None) as ser:\n mainLoop(ser, ser)\n","sub_path":"5_Texas_Hold'em/Client/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"398506954","text":"'''\nCreated on 03-Nov-2017\n\n@author: karthikeyan\n'''\nimport unittest\nfrom flask import Flask\nfrom mock import patch\nfrom com.mycompany.controller import reports_controller\nfrom com.mycompany.service.report_service import ReportService\n\napp = reports_controller.app \n\nclass ControllerTest(unittest.TestCase):\n\n app = app.test_client()\n app.testing = True\n controller = reports_controller\n \n @patch.object(ReportService, \"get_all_cpu_report\")\n def test_get_cpu_report(self, mock_cpu_report):\n mock_cpu_report.return_value = {\"component\" : \"CPU\", \"utilization\" : 5.0, \"time\" : \"2017-11-13 09:52:13.371590\"}\n result = self.app.get('/report/cpu')\n self.assertEqual(result.status_code, 200)\n \n @patch.object(ReportService, \"get_all_ram_report\")\n def test_get_virtual_memory_report(self, mock_ram_report):\n mock_ram_report.return_value = {\"component\" : \"VirtualMemory\", \"utilization\" : 5.0, \"time\" : \"2017-11-13 09:52:13.371590\"}\n result = self.app.get('/report/virtualmemory')\n self.assertEqual(result.status_code, 200)\n \n @patch.object(Flask, \"run\")\n def test_start_flask(self, mock_run):\n self.controller.start_flask()\n self.assertTrue(mock_run.called)\n \n ","sub_path":"src/unittest/python/reports_controller_tests.py","file_name":"reports_controller_tests.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"335439566","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom . import views\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.home, name='home'),\n path('gallery/', include('gallery.urls')),\n path('register/', include('registration.urls')),\n path('database/', include('database.urls')),\n path('accounts/', include('django.contrib.auth.urls')),\n path('blog/', include('blog.urls')),\n path('sendEmail/', include('sendEmail.urls')),\n path('profile/', include('user.urls')),\n\n\n]\n\nurlpatterns += staticfiles_urlpatterns()\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n","sub_path":"Anabia/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"483671383","text":"import tweepy\nfrom tweepy.streaming import StreamListener\nfrom credentials import *\n\n# Access and authorize our Twitter credentials from credentials.py\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\nB=\"@kanyewest\"\nnew_tweets = api.user_timeline(screen_name=B,count=8000)\nb=\" \"\nfor s in new_tweets:\n\n reader = open(\"data/kanye/input.txt\", \"w\")\n b= b+ \". \"+ s.text\n print (b)\n reader.writelines(str(b))\n reader.close()\n","sub_path":"harvest.py","file_name":"harvest.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"73197505","text":"from . import news_blu\nfrom flask import render_template, g, request, jsonify\nfrom info.utils.common import user_login_data\nfrom info.utils.response_code import RET\nfrom info.models import *\n\n\n@news_blu.route('/')\n@user_login_data\ndef news_detail(news_id):\n news_ls = News.query.order_by(News.clicks.desc()).limit(constants.CLICK_RANK_MAX_NEWS)\n click_news_list = [news.to_basic_dict() for news in news_ls]\n news = News.query.get(news_id)\n news.clicks += 1\n\n # 判断用户是否收藏过该新闻\n is_collected = True if g.user and news in g.user.collection_news else False\n\n # 当前登录用户是否关注当前新闻作者\n is_followed = True if g.user and news.user and news.user.followers.filter(User.id == g.user.id).count() > 0 else False\n\n comments = Comment.query.filter(Comment.news_id == news_id).order_by(Comment.create_time.desc()).all()\n\n comment_like_ids = []\n\n comment_ids = [comment.id for comment in comments] if g.user else []\n if len(comment_ids) > 0:\n # 取到当前用户在当前新��的所有评论点赞的记录\n comment_likes = CommentLike.query.filter(CommentLike.comment_id.in_(comment_ids),\n CommentLike.user_id == g.user.id).all()\n # 取出记录中所有的评论id\n comment_like_ids = [comment_like.comment_id for comment_like in comment_likes]\n\n comment_list = []\n for item in comments if comments else []:\n comment_dict = item.to_dict()\n comment_dict[\"is_like\"] = True if g.user and item.id in comment_like_ids else False\n comment_list.append(comment_dict)\n\n data = {\n \"news\": news.to_dict(),\n \"user_info\": g.user.to_dict() if g.user else None,\n \"is_collected\": is_collected,\n \"is_followed\": is_followed,\n \"comments\": comment_list,\n \"click_news_list\": click_news_list\n }\n return render_template('news/detail.html', data=data)\n\n\n@news_blu.route(\"/news_collect\", methods=['POST'])\n@user_login_data\ndef news_collect():\n user = g.user\n if not user:\n return jsonify(errno=RET.SESSIONERR, errmsg=\"用户未登录\")\n json_data = request.json\n news_id = json_data.get(\"news_id\")\n action = json_data.get(\"action\")\n if not news_id:\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")\n if action not in (\"collect\", \"cancel_collect\"):\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")\n news = News.query.get(news_id)\n user.collection_news.append(news) if action == \"collect\" else user.collection_news.remove(news)\n db.session.commit()\n return jsonify(errno=RET.OK, errmsg=\"操作成功\")\n\n\n@news_blu.route('/news_comment', methods=[\"POST\"])\n@user_login_data\ndef add_news_comment():\n user = g.user\n if not user:\n return jsonify(errno=RET.SESSIONERR, errmsg=\"用户未登录\")\n data_dict = request.json\n news_id = data_dict.get(\"news_id\")\n comment_str = data_dict.get(\"comment\")\n parent_id = data_dict.get(\"parent_id\")\n comment = Comment()\n comment.user_id = user.id\n comment.news_id = news_id\n comment.content = comment_str\n if parent_id:\n comment.parent_id = parent_id\n db.session.add(comment)\n db.session.commit()\n return jsonify(errno=RET.OK, errmsg=\"评论成功\", data=comment.to_dict())\n\n\n@news_blu.route('/comment_like', methods=[\"POST\"])\n@user_login_data\ndef set_comment_like():\n if not g.user:\n return jsonify(errno=RET.SESSIONERR, errmsg=\"用户未登录\")\n data_dict = request.json\n comment_id = data_dict.get(\"comment_id\")\n action = data_dict.get(\"action\")\n comment = Comment.query.get(comment_id)\n if action == \"add\":\n comment_like = CommentLike.query.filter_by(comment_id=comment_id, user_id=g.user.id).first()\n if not comment_like:\n comment_like = CommentLike()\n comment_like.comment_id = comment_id\n comment_like.user_id = g.user.id\n comment.like_count += 1\n db.session.add(comment_like)\n else:\n comment_like = CommentLike.query.filter_by(comment_id=comment_id, user_id=g.user.id).first()\n if comment_like:\n comment.like_count -= 1\n db.session.delete(comment_like)\n db.session.commit()\n return jsonify(errno=RET.OK, errmsg=\"操作成功\")\n\n\n@news_blu.route('/followed_user', methods=[\"POST\"])\n@user_login_data\ndef followed_user():\n if not g.user:\n return jsonify(errno=RET.SESSIONERR, errmsg=\"用户未登录\")\n\n user_id = request.json.get(\"user_id\")\n action = request.json.get(\"action\")\n target_user = User.query.get(user_id)\n # 根据不同操作做不同逻辑\n if action == \"follow\":\n if target_user.followers.filter(User.id == g.user.id).count() > 0:\n return jsonify(errno=RET.DATAEXIST, errmsg=\"当前已关注\")\n target_user.followers.append(g.user)\n else:\n if target_user.followers.filter(User.id == g.user.id).count() > 0:\n target_user.followers.remove(g.user)\n\n db.session.commit()\n return jsonify(errno=RET.OK, errmsg=\"操作成功\")\n","sub_path":"Information/info/module/news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"481594892","text":"#!/usr/bin/env python3\n\nimport socket as s\nimport time as t\nimport datetime as dt\n\n# set variables \ni = 1\nwait = 2 # интервал проверок в секундах\nsrv = {'drive.google.com':'0.0.0.0', 'mail.google.com':'0.0.0.0', 'google.com':'0.0.0.0'}\ninit=0\n\nprint('*** start script ***')\nprint(srv)\nprint('********************')\n\nwhile 1 == 1 : #отладочное число проверок \n for host in srv:\n ip = s.gethostbyname(host)\n if ip != srv[host]:\n if i==1 and init !=1:\n print(str(dt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")) +' [ERROR] ' + str(host) +' IP mistmatch: '+srv[host]+' '+ip)\n srv[host]=ip\n #print(i)\n i+=1 # счетчик итераций для отладки, закомментировать для бесконечного цикла\n if i >=50 :\n break\n t.sleep(wait)","sub_path":"python/dz3.py","file_name":"dz3.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"442735118","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom pathlib import Path\r\nimport xlsxwriter\r\n\r\n# Request user to input number of controls and subjects in their experiment to process data appropriately\r\nwhile True:\r\n try:\r\n num_Controls = int(input(\"How many controls are in your experiment?\\n\"))\r\n break\r\n except ValueError:\r\n print(\"Not an integer! Try again.\")\r\n continue\r\n\r\nwhile True:\r\n try:\r\n num_Patients = int(input(\"\\nHow many patients are in your experiment?\\n\"))\r\n break\r\n except ValueError:\r\n print(\"Not an integer! Try again.\")\r\n continue\r\n\r\nif(num_Controls == 0 and num_Patients == 0):\r\n exit(1)\r\n \r\n# class that allows iteration over all objects of a class\r\nclass IterSubject(type):\r\n def __iter__(cls):\r\n return iter(cls._allSubjects)\r\n\r\n\r\n# class that defines a generic Subject\r\nclass Subject(metaclass=IterSubject):\r\n _allSubjects = []\r\n\r\n def __init__(self, subjectID): # initializer (requires Subject ID as an argument when creating an object)\r\n self._allSubjects.append(self)\r\n #self.data = np.zeros((8, 8)) # 2D array of fixed size\r\n self.subjectID = subjectID\r\n\r\n def setData(self, file): # function to pass in name of text file and load data into array\r\n with open(file) as f:\r\n for i,l in enumerate(f):\r\n pass\r\n r = i+1\r\n c = f.readline()\r\n cs = [int(n) for n in c.split()]\r\n\r\n #print(r, c)\r\n self.data = np.zeros(r,cs)\r\n\r\n self.data = np.loadtxt(file)\r\n backwards = str(file)[::-1]\r\n backwards = backwards.split('\\\\')\r\n forwards = backwards[0][::-1]\r\n forwards = forwards.split('.')\r\n self.name = forwards[0]\r\n\r\n def getData(self): # function that return array\r\n return self.data\r\n\r\n def printData(self): # function to print array\r\n print(self.data)\r\n\r\n def printID(self): # function print Subject ID\r\n print(self.subjectID)\r\n\r\n def getName(self):\r\n return(self.name)\r\n\r\n def trimData(self, new_size_x, new_size_y):\r\n temp = np.copy(self.data)\r\n self.data = temp[:new_size_x,:new_size_y]\r\n\r\ndata_folder = Path(r\"//utdfs01/UTD/Dept/BBSResearch/LabCLINT/SandBox/Python toolbox project\")\r\n\r\nControls = [] # list to store a specific group of instances of class Subject\r\nprint(\"\\nData entry for Controls:\")\r\nfor x in range(\r\n num_Controls): # loop through number of Controls that user inputted and get data files for each Control subject\r\n file_Location = input(\r\n \"\\nPlease enter the path of the data file for Control %s in the folder 'Python toolbox project': \\n\" %\r\n (str)(x + 1)) # request user to input path to data file; example can be found hard-coded below\r\n fileName = data_folder / file_Location # compile full path of data file\r\n Controls.append(\r\n Subject(\"Control %s\" % (str)(x + 1))) # Create a new instance of class Subject and add it to Controls list\r\n while True:\r\n try:\r\n Controls[x].setData(fileName) # read data from corresponding data file and store in array for that Control\r\n break\r\n except:\r\n print(\"\\nFile not found in the specified path: %s\" % fileName)\r\n file_Location = input(\r\n \"\\nPlease try again. \\n\") # request user to input path to data file; example can be found hard-coded below\r\n fileName = data_folder / file_Location # compile full path of data file\r\n\r\nPatients = [] # list to store a specific group of instances of class Subject\r\nprint(\"\\nData entry for Patients:\")\r\nfor p in range(\r\n num_Patients): # loop through number of Controls that user inputted and get data files for each Control subject\r\n file_Location = input(\r\n \"\\nPlease enter the path of the data file for Patient %s in the folder 'Python toolbox project': \\n\" %\r\n (str)(p + 1)) # request user to input path to data file; example can be found hard-coded below\r\n fileName = data_folder / file_Location # compile full path of data file\r\n Patients.append(\r\n Subject(\"Patient %s\" % (str)(p + 1))) # Create a new instance of class Subject and add it to Controls list\r\n while True:\r\n try:\r\n Patients[p].setData(fileName) # read data from corresponding data file and store in array for that Control\r\n break\r\n except:\r\n print(\"\\nFile not found in the specified path: %s\" % fileName)\r\n file_Location = input(\r\n \"\\nPlease try again. \\n\") # request user to input path to data file; example can be found hard-coded below\r\n fileName = data_folder / file_Location # compile full path of data file\r\n\r\n# Control1 = Subject(\"Control 1\")\r\n# fileName = data_folder / r\"Sample data/Controls/avv_045_restZT_data-sLorRoiLog.txt\" #specify file path here\r\n# Control1.setData(fileName)\r\n\r\n# Control1 \"Sample data/Controls/avv_045_restZT_data-sLorRoiLog.txt\"\r\n# Control2 \"Sample data/Controls/cak_011_restZT_data-sLorRoiLog.txt\"\r\n# Control3 \"Sample Data/Controls/nck_020_restZT_data-sLorRoiLog.txt\"\r\n# Control4 \"Sample Data/Controls/oxs_036_restZT_data-sLorRoiLog.txt\"\r\n# Control5 \"Sample Data/Controls/sew_023_restZT_data-sLorRoiLog.txt\"\r\n# Control6 \"Sample Data/Controls/uxk_001_restZT_data-sLorRoiLog.txt\"\r\n\r\n# Patient1 \"Sample data/Patients/ark_016_restZT_data-sLorRoiLog.txt\"\r\n# Patient2 \"Sample data/Patients/ctc_007_restZT_data-sLorRoiLog.txt\"\r\n# Patient3 \"Sample data/Patients/cxt_019_restZT_data-sLorRoiLog.txt\"\r\n# Patient4 \"Sample data/Patients/kjs_037_restZT_data-sLorRoiLog.txt\"\r\n# Patient5 \"Sample data/Patients/kxm_047_restZT_data-sLorRoiLog.txt\"\r\n# Patient6 \"Sample data/Patients/mxn_002_restZT_data-sLorRoiLog.txt\"\r\n# Patient7 \"Sample data/Patients/scj_009_restZT_data-sLorRoiLog.txt\"\r\n\r\n\r\n# list to hold the user's desired frequencies, in the order entered\r\nchosenFrequencies = []\r\n\r\n\r\n# function to take user input for frequency bands used in their experiment\r\ndef ask_for_frequencies():\r\n print(\r\n \"\\nWhat frequency bands are you evaluating in your data? Please enter them in the order in which they are present in the input files. Enter all frequency bands in one line (e.g. ABCGK).\")\r\n\r\n # function to return chosen frequency band, which can then be added to the chosenFrequencies list\r\n def frequencyChoice(c):\r\n switcher = {\r\n \"A\": 'Delta',\r\n \"a\": 'Delta',\r\n \"B\": 'Theta',\r\n \"b\": 'Theta',\r\n \"C\": 'Alpha',\r\n \"c\": 'Alpha',\r\n \"D\": 'Alpha1',\r\n \"d\": 'Alpha1',\r\n \"E\": 'Alpha2',\r\n \"e\": 'Alpha2',\r\n \"F\": 'Alpha3',\r\n \"f\": 'Alpha3',\r\n \"G\": 'Beta',\r\n \"g\": 'Beta',\r\n \"H\": 'Beta1',\r\n \"h\": 'Beta1',\r\n \"I\": 'Beta2',\r\n \"i\": 'Beta2',\r\n \"J\": 'Beta3',\r\n \"j\": 'Beta3',\r\n \"K\": 'Gamma',\r\n \"k\": 'Gamma',\r\n \"L\": 'Low Gamma',\r\n \"l\": 'Low Gamma',\r\n \"M\": 'High Gamma',\r\n \"m\": 'High Gamma',\r\n \"N\": 'Mu',\r\n \"n\": 'Mu',\r\n }\r\n return switcher.get(c, 0)\r\n\r\n # variable to process input validation\r\n valid = True\r\n\r\n # function to display all frequency bands to user and request selection of desired bands\r\n def inputFreq():\r\n choice = input(\"\"\"\r\n A: Delta\r\n B: Theta\r\n C: Alpha\r\n D: Alpha1\r\n E: Alpha2\r\n F: Alpha3\r\n G: Beta\r\n H: Beta1\r\n I: Beta2\r\n J: Beta3\r\n K: Gamma\r\n L: Low Gamma\r\n M: High Gamma\r\n N: Mu\"\"\"\r\n\r\n \"\\n\\n\\nPlease enter your desired bands: \\n\")\r\n return choice\r\n\r\n # list of all valid entries to query\r\n frequencyList = ['A', 'a', 'B', 'b', 'C', 'c', 'D', 'd', 'E', 'e', 'F', 'f', 'G', 'g', 'H', 'h', 'I', 'i', 'J', 'j',\r\n 'K', 'k', 'L', 'l', 'M', 'm', 'N', 'n']\r\n\r\n # parse inputted frequencies into an itemized list and add each frequency band to chosenFrequencies list\r\n c = list(inputFreq())\r\n print(c)\r\n for cv in range(len(c)):\r\n if c[cv] in frequencyList:\r\n valid = True\r\n else:\r\n valid = False\r\n\r\n # if input is invalid, continue requesting user to input valid frequencies until they do so\r\n while (not valid):\r\n print(\"\\nYou have entered invalid frequencies. Please try again.\\n\")\r\n c = list(inputFreq())\r\n for cv in range(len(c)):\r\n if c[cv] in frequencyList:\r\n valid = True\r\n else:\r\n valid = False\r\n\r\n # add selected frequency band names into chosenFrequencies list\r\n for bands in range(len(c)):\r\n chosenFrequencies.append(frequencyChoice(c[bands]))\r\n\r\n # while t != \"P\" and t != \"p\":\r\n # choice = input(\"\"\"\r\n # A: Delta\r\n # B: Theta\r\n # C: Alpha\r\n # D: Alpha1\r\n # E: Alpha2\r\n # F: Alpha3\r\n # H: Beta\r\n # I: Beta1\r\n # J: Beta2\r\n # K: Beta3\r\n # L: Gamma\r\n # M: Low Gamma\r\n # N: High Gamma\r\n # O: Mu\r\n # P: Stop\r\n\r\n # Please enter your choice: \\n\"\"\")\r\n # if choice == \"A\" or choice == \"a\":\r\n # frequency.append(1)\r\n # if choice == \"B\" or choice == \"b\":\r\n # frequency.append(2)\r\n # if choice == \"C\" or choice == \"c\":\r\n # frequency.append(3)\r\n # if choice == \"D\" or choice == \"d\":\r\n # frequency.append(4)\r\n # if choice == \"E\" or choice == \"e\":\r\n # frequency.append(5)\r\n # if choice == \"F\" or choice == \"f\":\r\n # frequency.append(6)\r\n # if choice == \"G\" or choice == \"g\":\r\n # frequency.append(7)\r\n # if choice == \"H\" or choice == \"h\":\r\n # frequency.append(8)\r\n # if choice == \"X\" or choice == \"x\":\r\n # t = \"X\" #Stop asking what frequency they're using.\r\n # print(frequency)\r\n # print(frequency[0])\r\n\r\n\r\n# run the function to get frequencies from user\r\nask_for_frequencies()\r\nprint(chosenFrequencies)\r\n\r\n#Making an excel file and sorting data\r\ndef export_to_excel():\r\n controls_organized = xlsxwriter.Workbook(\"Controls_Organized.xlsx\")\r\n patients_organized = xlsxwriter.Workbook(\"Patients_Organized.xlsx\")\r\n sheets_controls = []\r\n sheets_patients = []\r\n for number_of_freq in range(len(chosenFrequencies)):\r\n sheets_controls.append(controls_organized.add_worksheet(str(chosenFrequencies[number_of_freq])))\r\n sheets_patients.append(patients_organized.add_worksheet(str(chosenFrequencies[number_of_freq])))\r\n for row_controls in range(num_Controls):\r\n sheets_controls[number_of_freq].write(row_controls+1, 0, Controls[row_controls].getName())\r\n for column_controls in range(np.size(Controls[row_controls].getData(), 1)):\r\n if row_controls == 0:\r\n sheets_controls[number_of_freq].write(row_controls, column_controls+1, 'ROI %s' % str(column_controls+1))\r\n sheets_controls[number_of_freq].write(row_controls+1, column_controls+1, Controls[row_controls].getData()[number_of_freq, column_controls])\r\n for row_patients in range(num_Patients):\r\n for column_patients in range(np.size(Patients[row_patients].getData(), 1)):\r\n sheets_patients[number_of_freq].write(row_patients, column_patients, Patients[row_patients].getData()[number_of_freq, column_patients])\r\n controls_organized.close()\r\n patients_organized.close()\r\n\r\nwhile True:\r\n try:\r\n export_to_excel()\r\n break\r\n except IndexError:\r\n chosenFrequencies = []\r\n print(\"\\nYour data doesn't have this many frequencies. Please try again.\")\r\n ask_for_frequencies()\r\n print(chosenFrequencies)\r\n except PermissionError:\r\n print(\"\\nThe file you're attempting to create already exists and is currently in use. Please close it and try again.\")\r\n exit(1)\r\n\r\n#Making the graph\r\nfor trim_c in range(\r\n num_Controls):\r\n Controls[trim_c].trimData(len(chosenFrequencies), np.size(Controls[0].getData(),1))\r\n\r\nfor trim_p in range(\r\n num_Patients):\r\n Patients[trim_p].trimData(len(chosenFrequencies), np.size(Patients[0].getData(),1))\r\n\r\nsumMatrixControls = np.zeros((len(chosenFrequencies), np.size(Controls[0].getData(),1)))\r\nsumMatrixPatients = np.zeros((len(chosenFrequencies), np.size(Patients[0].getData(),1)))\r\nfor x in Controls:\r\n sumMatrixControls += np.array(x.getData())\r\nfor y in Patients:\r\n sumMatrixPatients += np.array(y.getData())\r\n\r\n# averageMatrix = ((np.array(Control1.data)) + (np.array(Control2.data)) + (np.array(Control3.data)) + (np.array(Control4.data)) + (np.array(Control5.data)) + (np.array(Control6.data)))/num_Controls\r\naverageMatrixControls = sumMatrixControls / num_Controls # create an average of all the matrices read from text files\r\naverageMatrixPatients = sumMatrixPatients / num_Patients # create an average of all the matrices read from text files\r\n\r\nlist(averageMatrixControls)\r\naverageMatrixControls.tolist()\r\nprint(\"\\nAverage Matrix Controls: \\n\", averageMatrixControls)\r\n\r\nlist(averageMatrixPatients)\r\naverageMatrixPatients.tolist()\r\nprint(\"\\nAverage Matrix Patients: \\n\", averageMatrixPatients)\r\n\r\ngap = 0.4\r\n\r\nprint_graphs = input(\"\\nDo you want any of the data graphed? Enter Y for yes and N for no.\\n\")\r\nvalid_graph_inputs = {'Y', 'y', 'N', 'n'}\r\nwhile print_graphs not in valid_graph_inputs:\r\n print_graphs = input(\"That is not a valid response. Please try again. Enter Y for yes and N for no.\")\r\n\r\nif print_graphs == 'Y' or print_graphs == 'y':\r\n ROIs = input(\"\\nPlease enter the numbers of the ROIs that you wanted graphed, in order, separated by commas.\\n\")\r\n ROIs_split = ROIs.split(\",\")\r\n num_of_graphs = list(map(int, ROIs_split))\r\n\r\n def makeGraphs():\r\n if(len(num_of_graphs) <= np.size(Controls[0].getData(),1)):\r\n for graphs in range(len(num_of_graphs)):\r\n plotMatrixControls = [0] * len(chosenFrequencies)\r\n plotMatrixPatients = [0] * len(chosenFrequencies)\r\n for x in range(len(chosenFrequencies)):\r\n plotMatrixControls[x] = averageMatrixControls[x, num_of_graphs[graphs]-1]\r\n plotMatrixPatients[x] = averageMatrixPatients[x, num_of_graphs[graphs]-1]\r\n y_pos = np.arange(len(chosenFrequencies))\r\n plt.figure(graphs + 1)\r\n plt.bar(y_pos, plotMatrixControls, align='center', alpha=0.5, width=0.4)\r\n plt.bar(y_pos + gap, plotMatrixPatients, align='center', alpha=0.5, width=0.4)\r\n plt.xticks(y_pos + (gap / 2), chosenFrequencies)\r\n plt.ylabel('Average')\r\n plt.title('ROI %d' % num_of_graphs[graphs])\r\n plt.savefig('ROI %d.png' % num_of_graphs[graphs])\r\n print(\"\\nYour graphs have been created and saved successfully.\")\r\n else:\r\n raise Exception\r\n\r\n while True:\r\n try:\r\n makeGraphs()\r\n break\r\n except IndexError:\r\n num_of_graphs = []\r\n print(\"You don't have this many ROIs in your data.\")\r\n ROIs = input(\"\\nPlease enter the numbers of the ROIs that you wanted graphed, in order, separated by commas.\")\r\n ROIs_split = ROIs.split(\",\")\r\n num_of_graphs = list(map(int, ROIs_split))\r\n","sub_path":"Old Versions/Average.py","file_name":"Average.py","file_ext":"py","file_size_in_byte":15630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"70255341","text":"from django.shortcuts import render\nfrom django.shortcuts import render, HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth import authenticate, login\nfrom .form import LoginForm\nfrom django.http import JsonResponse\nfrom django.urls import reverse\nimport json\nfrom django.urls import reverse\n\n\n# Create your views here.\ndef auth_login(request):\n if request.method == \"POST\":\n ret = {\"status\": None, \"url\": None}\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n cd = login_form.cleaned_data\n user = authenticate(username=cd['username'], password=cd['password'])\n if user:\n login(request, user)\n ret[\"status\"] = \"success\"\n ret[\"url\"] = reverse('assets:dashboard')\n return JsonResponse(ret)\n else:\n ret[\"status\"] = \"failed\"\n return JsonResponse(ret)\n else:\n ret[\"status\"] = \"invalid\"\n ret[\"message\"] = \"Invalid information, pls check the information you input\"\n return JsonResponse(ret)\n\n if request.method == 'GET':\n login_form = LoginForm()\n return render(request, 'registration/login.html', {\"form\": login_form})\n","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"177150947","text":"import unittest\nfrom BSTestRunner import BSTestRunner\nimport time\n\n#指定测试用例或者测试报告的路径\ntest_dir = './test_case'\nreport_dir = './reports'\n\n#加载用例\ndiscover = unittest.defaultTestLoader.discover(test_dir,pattern='test_weather.py')\n\n\nnow = time.strftime('%Y-%m-%d %H_%M_%S')\nreport_name = report_dir+'/'+now+'test_report.html'\n\nwith open(report_name,'wb') as f:\n runner = BSTestRunner(stream=f,title='Weather API Test Report')\n runner.run(discover)","sub_path":"requests_test/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"619024406","text":"# this is for reference only. you don't need to use this file.\nimport os\n\n\ndef feed_infer(output_file, infer_func):\n \"\"\"\n output_file (str): file path to write output (Be sure to write in this location.)\n infer_func (function): user's infer function bound to 'nsml.bind()'\n \"\"\"\n try: \n import nsml\n root = os.path.join(nsml.DATASET_PATH)\n except:\n root = '../nipa_video/'\n\n predicted_labels = infer_func(root, phase='test') # [1, 2, 3, 4]\n predicted_labels = ' '.join([str(label) for label in predicted_labels]) # '1 2 3 4'\n \n with open(output_file, 'w') as f:\n f.write(predicted_labels)\n\n if os.stat(output_file).st_size == 0:\n raise AssertionError('output result of inference is nothing')\n","sub_path":"6_vcls_age/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"180507382","text":"#coding=utf-8\n\nimport test_strage\nimport json\n\nclass Coin58():\n def __init__(self, fileNames=[\"\"], times = 60):\n self.FileNames = fileNames\n self.DataPool = test_strage.getData(self.FileNames)\n\n self.Seconds = times\n\n self.OpenOrders = {}\n self.OpenOrders[\"bids\"] = []\n self.OpenOrders[\"asks\"] = []\n\n self.FinishOrders = {} \n self.FinishOrders[\"bids\"] = []\n self.FinishOrders[\"asks\"] = []\n def Next(self):\n self.DataPool.next()\n\n def getOrderBook(self):\n try:\n line = self.DataPool.next()\n ##print(\"line:\", line, \" type:\", type(line))\n lineJson = json.loads(line)\n return lineJson[\"asks\"], lineJson[\"bids\"], lineJson[\"curTime\"]\n except Exception as e:\n print(\"Exception:\", str(e))\n return [],[],0\n def PlaceSellOrder(self, price, amount, curTime):\n self.OpenOrders[\"asks\"].append([price, amount, curTime])\n\n def PlaceBuyOrder(self, price, amount, curTime):\n self.OpenOrders[\"bids\"].append([price, amount, curTime])\n\n def printOrder(self, d=\"\"):\n print(\"self.OpenOrders:\",self.OpenOrders, \"self.FinishOrders:\", self.FinishOrders, \" d:\", d)\n\n def checkBuyOrder(self, askPrice, curTime):\n size = len(self.OpenOrders[\"bids\"]) \n i = 0\n while i < size:\n price, amount, preTime = self.OpenOrders[\"bids\"].pop(0)\n interSec = curTime - preTime\n if (price >= askPrice) and (interSec > 4) :\n self.FinishOrders[\"bids\"].append([price, amount, preTime])\n else :\n if interSec <= self.Seconds:\n self.OpenOrders[\"bids\"].append([price, amount, preTime])\n else :\n print(\"checkBuyOrder cancelOrder:\", price, amount, \" preTime:\", preTime, \" interSec:\", interSec, \" curTime:\", curTime, \" self.Seconds:\", self.Seconds)\n i += 1 \n def checkSellOrder(self, bidPrice, curTime):\n size = len(self.OpenOrders[\"asks\"]) \n i = 0\n while i < size:\n price, amount, preTime = self.OpenOrders[\"asks\"].pop(0)\n interSec= curTime - preTime\n if (price <= bidPrice) and (interSec > 4):\n self.FinishOrders[\"asks\"].append([price, amount, preTime])\n else :\n if interSec <= self.Seconds:\n self.OpenOrders[\"asks\"].append([price, amount, preTime])\n else :\n print(\"checkSellOrder cancelOrder:\", price, amount, preTime, \" interSec:\", interSec, \" curTime:\", curTime, \" self.Seconds:\", self.Seconds)\n i += 1 \n\n def Check(self, askPrice, bidPrice, curTime):\n #self.printOrder()\n self.checkBuyOrder(askPrice, curTime) \n self.checkSellOrder(bidPrice, curTime)\n\n ##self.printOrder()\n while 1:\n if len(self.FinishOrders[\"asks\"])>0 and len(self.FinishOrders[\"bids\"])>0:\n sellPrice, sellAmout, sellSecs = self.FinishOrders[\"asks\"].pop(0)\n buyPrice, buyAmout, buySecs = self.FinishOrders[\"bids\"].pop(0)\n money = sellPrice-buyPrice\n\n smallTime = sellSecs\n if buySecs < smallTime :\n smallTime = buySecs\n print(\"gain money:\", money, \" sellSecs:\",sellSecs, \" buySecs:\", buySecs, \" cost time:\", (curTime-smallTime), \" curTime:\", curTime)\n return money\n else :\n break\n return 0\n\nif __name__ == \"__main__\":\n fileNames = [\"./1.txt\"]\n fileNames = [\"./data/2021-01-05\"]\n coin58 = Coin58(fileNames)\n i = 0\n while i < 50:\n asks, bids, curTime = coin58.getOrderBook()\n print(\"i:\", i, \" asks:\", asks, \" bids:\", bids, \" curTime:\", curTime)\n if (not asks) and (not bids) and (curTime==0):\n break\n i += 1\n","sub_path":"test_coin_58.py","file_name":"test_coin_58.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"25024549","text":"#************************************************************************ \n # letter_freq -- program to calculate and print letter frequencies. \n # \n # Author: Saad Qazi \n # \n # Purpose: Calculate letter frequencies of words. \n # \n # Usage: \n # Runs the program and the output is printed in string format. \n#************************************************************************\n\n\n\ndef count_letters(word):\n\tif not word:\n\t\treturn ''\n\tword = word.replace(' ', '').lower()\n\tfreq_dict = {}\n\tfor letter in word:\n\t\tfreq_dict[letter] = freq_dict.get(letter,0) + 1\n\tret = ''\n\tfor letter in word:\n\t\tif letter in freq_dict:\n\t\t\tret += letter + str(freq_dict[letter]) + ' '\n\t\t\tdel freq_dict[letter]\n\tprint(ret[:-1])\n\n\n\nalphabets = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','x','y','z','A', 'B', 'C', 'D', 'E', 'F', 'G', 'H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','X','Y','Z']\n\ndef count_of_letter(alpha,word):\n count = 0\n for i in range(len(word)):\n if (word[i]) == (alpha):\n count = count + 1\n return count\n\n\ndef letter_freq_v1(word):\n for i in range(len(alphabets)):\n count = count_of_letter(alphabets[i],word)\n\n if count > 0:\n print (alphabets[i]+str(count)+ ' ',end='')\n\n\n","sub_path":"Frequency.py","file_name":"Frequency.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"166272302","text":"from collections import defaultdict\nimport unittest\n\n\nclass DAG:\n \"\"\"Representing Directed Acyclic Graph(DAG)\n Attributes:\n graph(dict):Key of the dict represents node of graph and value of dict is\n list of directed edges from node to other nodes\"\"\"\n\n def __init__(self, graph={}):\n self.graph = graph\n\n def addnode(self, node):\n \"\"\"Add node to graph\n :arg node to add\n :returns None\"\"\"\n self.graph[node] = list()\n\n def removenode(self, node):\n \"\"\"Remove node from graph and all its edges\n :arg node to remove\n :return None\"\"\"\n self.graph.pop(node, None)\n for othernode in self.nodes():\n if node in self.adjacentnodes(othernode):\n self.removeedge(othernode, node)\n\n def nodes(self):\n \"\"\"Nodes in graph\n :returns List of nodes\"\"\"\n return list(self.graph.keys())\n\n def adjacentnodes(self, node):\n \"\"\"Returns the Adjacent nodes from given node\n :arg node\n :return list of adjacent nodes\n :returns None is node is not in graph\"\"\"\n return self.graph[node] if node in self.nodes() else None\n\n def addedge(self, origin, dst):\n \"\"\"Add edge in graph\n :arg origin origin of edge\n :arg dst destination of edge\n :returns None\"\"\"\n if origin in self.nodes() and dst in self.nodes():\n self.graph[origin].append(dst)\n\n def removeedge(self, origin, dst):\n \"\"\"Remove edge in graph\n :arg origin origin of edge\n :arg dst destination of edge\n :returns None\"\"\"\n if origin in self.nodes() and dst in self.adjacentnodes(origin):\n self.graph[origin].remove(dst)\n\n def edges(self):\n \"\"\"Edges in graph\n :returns List of tuples as edges\"\"\"\n return [(node, end) for node in self.nodes() for end in self.adjacentnodes(node)]\n\n def alltraversalpaths(self):\n \"\"\"All traversal paths in graph\n :returns List of all traversal paths\"\"\"\n visited = {}\n indegrees = defaultdict(int)\n result = []\n for node, edges in self.graph.items():\n visited[node] = False\n for end in self.adjacentnodes(node):\n indegrees[end] += 1\n self.__all_traversal_paths([], visited, indegrees, result)\n return result\n\n def __all_traversal_paths(self, path, visited, indegrees, result):\n \"\"\"Helper function for finding all traversal paths\n :arg path list of current path\n :arg visited path dict for nodes with boolean value for visited or not\n :arg indegrees dict of indegrees of all nodes \n :arg result list of all traversal paths\"\"\"\n for node in self.nodes():\n # If node is not visited and No edge is pointed to node\n if not visited[node] and indegrees[node] == 0:\n # Make node as visited\n visited[node] = True\n # Add current node to path\n path.append(node)\n # Decrease inDegrees of every edge from node\n for end in self.adjacentnodes(node):\n indegrees[end] -= 1\n # Recursively find all traversal paths from this node\n self.__all_traversal_paths(path, visited, indegrees, result)\n # Make node unvisited to find other traversal paths\n visited[node] = False\n # Remove this node from path\n path.pop()\n # Increase inDegrees of every edge from node\n for end in self.adjacentnodes(node):\n indegrees[end] += 1\n\n # Add path to result if all nodes are visited\n if all(visited.values()):\n result.append(tuple(path.copy()))\n\n\nclass TestDAG(unittest.TestCase):\n\n def testOne(self):\n graph = {'1': ['3'], '2': ['3', '5'], '3': ['4'], '4': [], '5': ['4']}\n traversal_paths = {('1', '2', '3', '5', '4')\n , ('1', '2', '5', '3', '4')\n , ('2', '1', '3', '5', '4')\n , ('2', '1', '5', '3', '4')\n , ('2', '5', '1', '3', '4')}\n dag = DAG(graph)\n self.assertEqual(traversal_paths, set(dag.alltraversalpaths()))\n dag.removenode('4')\n traversal_paths = {('1', '2', '3', '5')\n , ('1', '2', '5', '3')\n , ('2', '1', '3', '5')\n , ('2', '1', '5', '3')\n , ('2', '5', '1', '3')}\n self.assertEqual(traversal_paths, set(dag.alltraversalpaths()))\n\n def testTwo(self):\n graph = {'1': ['3', '4'], '2': ['3'], '3': [], '4': []}\n traversal_paths = {('1', '2', '3', '4')\n , ('1', '4', '2', '3')\n , ('1', '2', '4', '3')\n , ('2', '1', '3', '4')\n , ('2', '1', '4', '3')}\n dag = DAG(graph)\n self.assertEqual(traversal_paths, set(dag.alltraversalpaths()))\n dag.removeedge('2', '3')\n edges = {('1', '3'), ('1', '4')}\n self.assertEqual(edges, set(dag.edges()))\n\n def testThree(self):\n graph = {'1': ['2', '3'], '2': [], '3': []}\n traversal_paths = {('1', '2', '3'), ('1', '3', '2')}\n dag = DAG(graph)\n self.assertEqual(traversal_paths, set(dag.alltraversalpaths()))\n dag.addnode('4')\n self.assertEqual(set(dag.nodes()), {'1', '2', '3', '4'})\n dag.addedge('2', '4')\n dag.addedge('3', '4')\n traversal_paths = {('1', '2', '3', '4'), ('1', '3', '2', '4')}\n self.assertEqual(traversal_paths, set(dag.alltraversalpaths()))\n\n def testEmpty(self):\n traversal_paths = {()}\n dag_empty = DAG()\n self.assertEqual(traversal_paths, set(dag_empty.alltraversalpaths()))\n\n\ndef main():\n unittest.main()\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/DAG.py","file_name":"DAG.py","file_ext":"py","file_size_in_byte":6004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"52396467","text":"__author__ = 'solpie'\nfrom view import events\nfrom deco.qtBase import DecoView\nfrom model.smModel import SmModel\n\n\nclass MainWinView(DecoView):\n def __init__(self, main_win=None):\n super(MainWinView, self).__init__(main_win)\n\n def init(self):\n main_win = self.get_ui()\n self.connect(main_win.lyricEditor, events.TEXT_CHANGED, self.on_lyric_changed)\n\n def on_lyric_changed(self, e=None):\n sm_model = SmModel()\n main_win = self.get_ui()\n lyric = str(main_win.lyricEditor.toPlainText().toUtf8())\n sm_model.set_lyric(lyric)\n\n","sub_path":"src/view/mainWinView.py","file_name":"mainWinView.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"496937390","text":"###########################################\n####### Dungeon Generator -- main.py ######\n###########################################\n####### Breiny Games (c) 2011 #############\n###########################################\n## This file uses functions from the ##\n## 'map_generator.py' module to generate ##\n## the layout of a random dungeon, which ##\n## is then read in by the 'map_loader.py'##\n## module and displayed visually in a ##\n## pygame window. Documentation is in ##\n## 'map_generator.py' module. ##\n###########################################\n###########################################\n## Created by Christopher Breinholt ##\n## Breiny Games (c) 2011 ##\n## http://breinygames.blotspot.com/ ##\n## ##\n## To Use: Run the main.py file. ##\n## Press spacebar to generate a new ##\n## dungeon. Press return to print the ##\n## stats to the shell. Press S to save ##\n## the dungeon. Press L to load. ##\n## ##\n## You can also run the text_only.py ##\n## file to just print generated dungeons ##\n## the shell instead of rendered on the ##\n## screen in a pygame window. This kind ##\n## of gives you a better idea of how ##\n## the generator actually works and what ##\n## it does. ##\n###########################################\n\n\n\n\n\nimport pygame\nimport pickle\nimport time\nfrom map_generator import *\nfrom map_loader import *\n\n\n\n\n\ndef main():\n\n\n pygame.init()\n\n resolution = (800, 640)\n screen = pygame.display.set_mode(resolution)\n pygame.display.set_caption(\"Random Dungeon Generator\")\n clock = pygame.time.Clock()\n\n map = Map()\n \n dungeon = Dungeon((100, 80), \"Neverland\", 50, (4, 4), (12, 12), (8, 8))\n dungeon.generate_dungeon()\n map.load_dungeon(dungeon)\n\n running = True\n while running:\n\n clock.tick(10)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n running = False\n elif event.key == pygame.K_SPACE:\n dungeon.generate_dungeon()\n map.load_dungeon(dungeon)\n elif event.key == pygame.K_RETURN:\n dungeon.print_info(True)\n elif event.key == pygame.K_s:\n save = open(\"saved_dungeon.txt\", \"w\")\n pickle.dump(dungeon, save)\n save.close()\n elif event.key == pygame.K_l:\n save = open(\"saved_dungeon.txt\", \"r\")\n dungeon = pickle.load(save)\n save.close()\n map.load_dungeon(dungeon)\n\n map.draw(screen)\n pygame.display.flip()\n\n\n pygame.quit()\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/2013.11/random-dungeon-generator-v-1-6/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"118824168","text":"from Bio import SeqIO\nfrom Bio.SeqIO import AbiIO\nimport click\n\n\n@click.command(short_help='Convert Sanger sequencing format (ABI) to FASTQ')\n@click.argument('filename')\n@click.option('--verbose', is_flag=True, help=\"Show progress messages\")\ndef abi_to_fastq(filename, verbose=True):\n # Open the Sanger sequencing trace\n if verbose:\n click.echo(f'Reading \"{filename}\" ...')\n\n record = SeqIO.read(filename, 'abi')\n\n # Remove bases with probability score less than 0.05\n if verbose:\n click.echo(f'\\tTrimming bases with probability score < 0.05 ...')\n trimmed = AbiIO._abi_trim(record)\n\n if verbose:\n click.echo('\\tBefore trimming: ' + str(len(record)) +\n '\\tAfter trimming: ' + str(len(trimmed)))\n\n # Write the trimmed file to fastq\n fastq = filename.replace('.ab1', '.fastq')\n SeqIO.write(trimmed, fastq, 'fastq')\n\n # Write a message to the user (myself) so they know what happened\n click.echo(f'\\tDone. Wrote to \"{fastq}\"')\n\n","sub_path":"winky/abi_to_fastq.py","file_name":"abi_to_fastq.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"459524930","text":"import requests\r\nimport json\r\nimport base64\r\nimport os\r\nimport time\r\nfrom pprint import pprint\r\n\r\n# path = r'resumes\\Other\\non_indian_cvs\\EY_Kitman Tsang_Cosec Mgr.docx'\r\n# path = r'resumes\\best\\Arindam_Presales.docx'\r\npath = r\"resumes\\sample_CVs\\Resume_1.docx\"\r\n# path = r\"resumes\\Other\\non_indian_cvs\\DwightIT-QA-Analyst_layout.pdf\"\r\n# path = r'resumes\\sample_CVs\\Resume_2.pdf'\r\n# path = r'resumes\\sample_CVs\\Resume_2.docx'\r\n# path = r\"resumes\\sample_CVs\\my_resume.pdf\"\r\n# path = r'resumes\\Resumes_latest\\2MichaelFarros.doc'\r\n# path = r'resumes\\Resumes_latest\\Lawrence Acosta.docx'\r\n# path = r\"resumes\\Resumes_latest\\Kevin_Resumev2.docx\"\r\n# path = r'resumes\\Resumes_latest\\Derrick-Joyner (1).pdf'\r\n# path = r'resumes\\Resumes_latest\\Garstang-Resume-LinuxAdmin.pdf' # Wrong name because space between name chars\r\n# path = r\"resumes\\Resumes_latest\\Friedlander_Resume.pdf\"\r\n# path = r\"resumes\\Resumes_latest\\Eric_Kao_Resume.pdf\"\r\n# path = r'resumes\\Resumes_latest\\EllenJacobs.pdf'\r\n# path = r'resumes\\Resumes_latest\\'\r\n# Mult mobile nums - Wrong Name identification\r\n# path = r\"resumes\\Resumes_latest\\Gary_Greenberg_resume_09_10.pdf\"\r\n# path = r'uploaded_files\\zipped_resume.zip'\r\npath = r\"resumes\\sample_CVs\\my_resume.pdf\"\r\npath = r\"C:\\Users\\Mohit Khanwale\\Downloads\\Eric_Sundby_Resume_eSolytics_Alterxy_Oracle_ETL_Developer.docx\"\r\n# path = r\"uploaded_files\\BenDean.pdf\"\r\n\r\n# file_name, file_extension = os.path.splitext(path)\r\nfile_name, file_extension = os.path.basename(path).split(\".\")\r\n\r\nprint(\"file_name-\", file_name)\r\nprint(\"file_extension-\", file_extension)\r\ntry:\r\n with open(path, \"rb\") as f:\r\n base64str = base64.b64encode(f.read()).decode(\"UTF-8\")\r\nexcept UnicodeDecodeError:\r\n print(\r\n \"critical\",\r\n \"Unicode Decode error while converting file to Base64 string-\",\r\n __name__,\r\n 1,\r\n )\r\nexcept Exception:\r\n print(\r\n \"critical\",\r\n \"Some other error occured while converting file to Base64 string\",\r\n __name__,\r\n 1,\r\n )\r\n\r\n\r\n# payload = {\r\n# \"ResumeAsBase64String\": base64str,\r\n# \"file_name\": file_name,\r\n# \"file_extension\": file_extension\r\n# }\r\n\r\npayload = {\r\n 'ResumeAsBase64String': base64str,\r\n 'file_name': file_name,\r\n 'file_extension': file_extension\r\n}\r\n\r\nheaders = {\"username\": \"markabbot\", \"api-token\": \"ab8a7ff7-6659-4a44-b7d9-064612d825fa\"}\r\n\r\n# print(file_extension, file_name)\r\n# print(base64str)\r\n\r\n# http://149.28.197.77/api/v1/cvparser/single\r\nres = requests.post(\r\n \"http://149.28.197.77/api/v1/cvparser/single\",\r\n json=payload,\r\n headers=headers,\r\n)\r\nprint(\"response-\", res)\r\n# print(res.text)\r\n# print(res.json())\r\nprint(time.perf_counter())\r\npprint(json.loads(res.text))\r\n","sub_path":"test_scripts/single_api_Call.py","file_name":"single_api_Call.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"86312061","text":"import re\r\n\r\n# Patron para validar la expresion regular\"\r\npatron = re.compile(\"^\\\\d{8}-\\\\d$\")\r\n\r\n\r\ndef validate_id_number(dui):\r\n # Variables globales a utilizar\r\n global residuoVerificador, residuo\r\n # Se verifica si cumple el patron para continuar con la validacion\r\n if patron.match(dui):\r\n # Se obtiene la posicion del digito verificador\r\n digitoVerificador = int(dui[9])\r\n\r\n # se inicia Variable de posiciones a operar dentro de la cadena\r\n numero = 10\r\n # Obtenemos la cadena sin guion\r\n cadenaDui = dui[0:8]\r\n\r\n # contador\r\n sum = 0\r\n # Se itera la cadena operando segun las posiciones\r\n for indice in range(len(cadenaDui)):\r\n caracter = cadenaDui[indice]\r\n numero = numero - 1\r\n # sumamos el resultado de multiplicar las posiciones * cada elemento de la cadena o #Dui\r\n sum = sum + (numero * int(caracter))\r\n residuo = sum % 10\r\n\r\n # Obtenemos el residuo y lo evaluamos\r\n if residuo > 0:\r\n residuo = residuo - 10\r\n\r\n # Si el residuo es mayor que 0 se realiza la resta de 10\r\n # luego se realiza la validacion del documento\r\n if abs(residuo) == digitoVerificador or residuo == 0:\r\n # Si el residuo es igual al digito verificador o igual a cero\r\n # El documento se da por validado si no es invalido\r\n\r\n return True\r\n else:\r\n\r\n return False\r\n else:\r\n print(\"Unrecognized pattern\")\r\n\r\n# validate_id_number('00322123-1')\r\n","sub_path":"ocr_procces_lib/validate_document.py","file_name":"validate_document.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"388037442","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis spider is a Boonrawd spider created on top of the ATSSpider\nscrapy crawl boonrawd -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"https://recruit.boonrawd.co.th/job_list.php\"\n\nSample Url:\nhttps://recruit.boonrawd.co.th/job_list.php\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix\n\n\nclass Boonrawd(ATSSpider):\n\n name = \"boonrawd\"\n ref_re = compile(r\"\\?idjob=(\\d+)\")\n\n def parse(self, response):\n sel = Selector(response)\n jobs = sel.xpath(\n '//table//tr/td/a[contains(@href, \"idjob\")]/@href').extract()\n for job_url in jobs:\n yield Request(\n callback=self.parse_job_callback(),\n url=urljoin(response.url, job_url)\n )\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n\n loader.add_xpath(\n 'company',\n '//span[text()=\"%s\"]/following-sibling::text()[1]' % unicode('สถานที่ปฏิบัติงาน :', 'utf-8')\n )\n loader.add_xpath(\n 'description',\n '//span[contains(text(), \"%s\")]/following-sibling::node()' % unicode('ข้อมูลตำแหน่งงาน :', 'utf-8')\n )\n loader.add_xpath('title', '//td[@class=\"topic_white\"]/text()')\n\n loader.add_value(\n 'referencenumber', response.url,\n Prefix('%s-' % self.name), re=self.ref_re\n )\n loader.add_value('url', response.url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/boonrawd.py","file_name":"boonrawd.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"606267437","text":"# -*- coding: UTF-8 -*-\n#\n# rtfetch -- Plugin for http://pravtor.ru (based on rutracker.py)\n# Copyright (C) 2012 Devaev Maxim \n# Copyright (C) 2013 Vitaly Lipatov \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n#####\n\n\nfrom rtlib import const\nfrom rtlib import fetcherlib\n\nimport urllib\nimport urllib2\nimport cookielib\nimport re\n\n\n##### Public constants #####\nFETCHER_NAME = \"pravtor\"\nFETCHER_VERSION = 0\n\nPRAVTOR_DOMAIN = \"pravtor.ru\"\nPRAVTOR_LOGIN_URL = \"http://%s/login.php\" % (PRAVTOR_DOMAIN)\nPRAVTOR_VIEWTOPIC_URL = \"http://%s/viewtopic.php\" % (PRAVTOR_DOMAIN)\nPRAVTOR_DL_URL = \"http://%s/download.php\" % (PRAVTOR_DOMAIN)\n\n\n##### Public classes #####\nclass Fetcher(fetcherlib.AbstractFetcher) :\n\tdef __init__(self, user_name, passwd, url_retries, url_sleep_time, proxy_url, interactive_flag) :\n\t\tfetcherlib.AbstractFetcher.__init__(self, user_name, passwd, url_retries, url_sleep_time, proxy_url, interactive_flag)\n\n\t\tself.__user_name = user_name\n\t\tself.__passwd = passwd\n\t\tself.__url_retries = url_retries\n\t\tself.__url_sleep_time = url_sleep_time\n\t\tself.__proxy_url = proxy_url\n\t\tself.__interactive_flag = interactive_flag\n\n\t\tself.__comment_regexp = re.compile(r\"http://pravtor\\.(ru|spb\\.ru)/viewtopic\\.php\\?p=(\\d+)\")\n\n\t\tself.__hash_regexp = re.compile(r\"([a-fA-F0-9]+)\")\n\t\tself.__loginform_regexp = re.compile(r\"\")\n\t\tself.__torrent_id_regexp = re.compile(r\"\")\n\n\t\tself.__cookie_jar = None\n\t\tself.__opener = None\n\t\tself.__torrent_id = None\n\n\n\t### Public ###\n\n\t@classmethod\n\tdef plugin(cls) :\n\t\treturn FETCHER_NAME\n\n\t@classmethod\n\tdef version(cls) :\n\t\treturn FETCHER_VERSION\n\n\t###\n\n\tdef match(self, torrent) :\n\t\treturn ( not self.__comment_regexp.match(torrent.comment() or \"\") is None )\n\n\tdef login(self) :\n\t\tself.assertNonAnonymous(self.__user_name)\n\t\tself.__cookie_jar = cookielib.CookieJar()\n\t\tself.__opener = fetcherlib.buildTypicalOpener(self.__cookie_jar, self.__proxy_url)\n\t\ttry :\n\t\t\tself.__tryLogin()\n\t\texcept :\n\t\t\tself.__cookie_jar = None\n\t\t\tself.__opener = None\n\t\t\traise\n\n\tdef loggedIn(self) :\n\t\treturn ( not self.__opener is None )\n\n\tdef torrentChanged(self, torrent) :\n\t\tself.assertMatch(torrent)\n\t\tself.__torrent_id = None\n\t\treturn ( torrent.hash() != self.__fetchHash(torrent) )\n\n\tdef fetchTorrent(self, torrent) :\n\t\tcomment_match = self.__comment_regexp.match(torrent.comment() or \"\")\n\t\tself.assertFetcher(not comment_match is None, \"No comment match\")\n\t\ttopic_id = comment_match.group(1)\n\n\t\tassert not self.__torrent_id is None, \"Programming error, torrent_id == None\"\n\n\t\tcookie = cookielib.Cookie(\n\t\t\tversion=0,\n\t\t\tname=\"bb_dl\",\n\t\t\tvalue=topic_id,\n\t\t\tport=None,\n\t\t\tport_specified=False,\n\t\t\tdomain=\"\",\n\t\t\tdomain_specified=False,\n\t\t\tdomain_initial_dot=False,\n\t\t\tpath=\"/\",\n\t\t\tpath_specified=True,\n\t\t\tsecure=False,\n\t\t\texpires=None,\n\t\t\tdiscard=True,\n\t\t\tcomment=None,\n\t\t\tcomment_url=None,\n\t\t\trest={ \"HttpOnly\" : None },\n\t\t\trfc2109=False,\n\t\t)\n\t\tself.__cookie_jar.set_cookie(cookie)\n\t\trequest = urllib2.Request(PRAVTOR_DL_URL+(\"?id=%d\" % (self.__torrent_id)), \"\", headers={\n\t\t\t\t\"Referer\" : PRAVTOR_VIEWTOPIC_URL+(\"?t=%s\" % (topic_id)),\n\t\t\t\t\"Origin\" : \"http://%s\" % (PRAVTOR_DOMAIN),\n\t\t\t\t\"User-Agent\" : const.BROWSER_USER_AGENT,\n\t\t\t})\n\n\t\tdata = self.__readUrlRetry(request)\n\t\tself.assertValidTorrentData(data)\n\t\treturn data\n\n\n\t### Private ###\n\n\tdef __tryLogin(self) :\n\t\tpost_dict = {\n\t\t\t\"login_username\" : self.__user_name.decode(\"utf-8\").encode(\"cp1251\"),\n\t\t\t\"login_password\" : self.__passwd.decode(\"utf-8\").encode(\"cp1251\"),\n\t\t\t\"login\" : \"\\xc2\\xf5\\xee\\xe4\",\n\t\t}\n\t\tdata = self.__readUrlRetry(PRAVTOR_LOGIN_URL, urllib.urlencode(post_dict))\n\t\tself.assertLogin(self.__loginform_regexp.search(data) is None, \"Invalid login or password\")\n\n\tdef __fetchHash(self, torrent) :\n\t\tdata = self.__readUrlRetry(torrent.comment() or \"\")\n\n\t\thash_match = self.__hash_regexp.search(data)\n\t\tself.assertFetcher(not hash_match is None, \"Hash is not found\")\n\n\t\ttorrent_id = self.__torrent_id_regexp.search(data)\n\t\tself.assertFetcher(not torrent_id is None, \"Torrent ID is not found\")\n\t\tself.__torrent_id = int(torrent_id.group(1))\n\n\t\treturn hash_match.group(1).lower()\n\n\tdef __readUrlRetry(self, *args_list, **kwargs_dict) :\n\t\tkwargs_dict.setdefault(\"opener\", self.__opener)\n\t\tkwargs_dict.setdefault(\"retry_codes_list\", (503, 404))\n\t\tkwargs_dict[\"retries\"] = self.__url_retries\n\t\tkwargs_dict[\"sleep_time\"] = self.__url_sleep_time\n\t\treturn fetcherlib.readUrlRetry(*args_list, **kwargs_dict)\n\n","sub_path":"rtlib/fetchers/pravtor.py","file_name":"pravtor.py","file_ext":"py","file_size_in_byte":5163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"654036878","text":"#coding:utf-8\r\n\r\nclass Node(object):\r\n def __init__(self, value, parent = None):\r\n self.value = value\r\n self.left = None\r\n self.right = None\r\n self.height = 1\r\n self.setParent(parent)\r\n\r\n @staticmethod\r\n def GetHeight(node):\r\n return 0 if node is None else node.height\r\n\r\n def updateHeight(self):\r\n self.height = 1 + max(Node.GetHeight(self.left), Node.GetHeight(self.right))\r\n\r\n def getBalance(self):\r\n return Node.GetHeight(self.left) - Node.GetHeight(self.right)\r\n\r\n def setLeft(self, left):\r\n self.left = left\r\n if left:\r\n left.parent = self\r\n self.updateHeight()\r\n\r\n def setRight(self, right):\r\n self.right = right\r\n if right:\r\n right.parent = self\r\n self.updateHeight()\r\n\r\n def setParent(self, parent):\r\n self.parent = parent\r\n if parent is None:\r\n return\r\n \r\n if self.value < parent.value:\r\n parent.left = self\r\n else:\r\n parent.right = self\r\n parent.updateHeight()\r\n\r\n def dump(self, level=0, prefix=''):\r\n print(\"%s%s %s => %s, h=%d\" % (' ' * level, prefix, self.value, '' if self.parent is None else self.parent.value, self.height))\r\n if self.left:\r\n self.left.dump(level + 1, 'L')\r\n if self.right:\r\n self.right.dump(level + 1, 'R')\r\n\r\n\r\nclass AVL(object):\r\n def __init__(self):\r\n self.root = None\r\n\r\n def dump(self):\r\n if self.root is None:\r\n print('')\r\n else:\r\n self.root.dump()\r\n\r\n def find(self, value):\r\n node = self.root\r\n while node:\r\n if value == node.value:\r\n return node\r\n elif value < node.value:\r\n node = node.left\r\n else:\r\n node = node.right\r\n return None\r\n\r\n def add(self, value):\r\n if self.root is None:\r\n self.root = Node(value)\r\n return self.root\r\n\r\n node = self.root\r\n while node is not None:\r\n parent = node\r\n if value == node.value:\r\n return node\r\n if value < node.value:\r\n node = node.left\r\n else:\r\n node = node.right\r\n\r\n new = Node(value, parent)\r\n self.rebalance(parent)\r\n return new\r\n\r\n def rotateLeft(self, node):\r\n parent = node.parent\r\n x = node.right\r\n node.setRight(x.left)\r\n x.setLeft(node)\r\n x.setParent(parent)\r\n return x\r\n\r\n def rotateRight(self, node):\r\n parent = node.parent\r\n x = node.left\r\n node.setLeft(x.right)\r\n x.setRight(node)\r\n x.setParent(parent)\r\n return x\r\n\r\n def rotateLeftThenRight(self, node):\r\n node.left = self.rotateLeft(node.left)\r\n return self.rotateRight(node)\r\n\r\n def rotateRightThenLeft(self, node):\r\n node.right = self.rotateRight(node.right)\r\n return self.rotateLeft(node)\r\n\r\n def rebalance(self, node):\r\n if node.getBalance() > 1: #left\r\n if node.left.getBalance() >= 0: #left\r\n node = self.rotateRight(node)\r\n else:\r\n node = self.rotateLeftThenRight(node)\r\n elif node.getBalance() < -1: #right\r\n if node.right.getBalance() < 0: #right\r\n node = self.rotateLeft(node)\r\n else:\r\n node = self.rotateRightThenLeft(node)\r\n\r\n if node.parent is None:\r\n self.root = node\r\n else:\r\n self.rebalance(node.parent)\r\n\r\n def delete(self, value):\r\n node = self.find(value)\r\n if node is None:\r\n return None\r\n\r\n while node.left or node.right:\r\n if node.getBalance() > 0:\r\n self.rotateRight(node)\r\n else:\r\n self.rotateLeft(node)\r\n\r\n parent = node.parent\r\n if parent:\r\n if parent.left == node:\r\n parent.left = None\r\n else:\r\n parent.right = None\r\n self.rebalance(parent)\r\n else:\r\n self.root = None\r\n\r\n return node\r\n\r\n\r\ntree = AVL()\r\nfor i in [40, 50, 60, 70, 65, 80]:\r\n tree.add(i)\r\n tree.dump()\r\n print()\r\n \r\nprint(tree.find(60))\r\n\r\nfor i in [40, 50, 60, 70, 65, 80]:\r\n node = tree.delete(i)\r\n print('Delete', i)\r\n tree.dump()\r\n if node: node.dump(prefix='D')\r\n print()\r\n \r\n\r\n","sub_path":"test/avl.py","file_name":"avl.py","file_ext":"py","file_size_in_byte":4511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"510139168","text":"import cv2\nimport numpy as np\n\n\nSTEP = 10\nSIZE_STEP = 20\nMIN_FRAME_SIZE = 20\nTARGET_SIZE = (100, 100)\n\n\ndef image_iterate(image, model, cnn_model):\n \"\"\" Function which iterate whole input image with frames - from min. size of frame to maximum size\n\n :param image: array of image as nums (height, width, 3)\n :return: list of 4 elements tuples with (x, y, w, h) of frames where are butts\n \"\"\"\n # image = cv2.resize(image, (600, 600))\n height, width = image.shape[0:2]\n frame_size = MIN_FRAME_SIZE\n\n if height > width:\n max_size = height\n else:\n max_size = width\n step = STEP\n all_frames = []\n boxes = []\n for s in range(int((max_size - MIN_FRAME_SIZE) / SIZE_STEP + 1)):\n act_y = 0\n for i in range(int(height / (frame_size - step))):\n act_x = 0\n for j in range(int(width / (frame_size - step))):\n frame = image[act_y: (act_y + frame_size), act_x: (act_x + frame_size), ]\n frame = cv2.resize(frame, TARGET_SIZE)\n boxes.append(tuple((act_x, act_y, frame_size, frame_size)))\n all_frames.append(frame)\n act_x += step\n act_y += step\n frame_size += SIZE_STEP\n step = int(frame_size / 2)\n\n all_frames = np.asarray(all_frames)\n cnn_prediction = cnn_model.predict(all_frames)\n clf_pred = np.round(model.predict(cnn_prediction))\n butt_index = np.where(clf_pred == 1.0)[0]\n end_frames = []\n for butt in butt_index:\n end_frames.append(boxes[butt])\n\n return end_frames\n\n\n","sub_path":"python/image_preproc.py","file_name":"image_preproc.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"182243462","text":"import uuid\nfrom threading import Lock\nimport opentracing\n\nfrom opentracing.scope_managers import ThreadLocalScopeManager\nfrom thundra.opentracing.recorder import ThundraRecorder, RecordEvents\nfrom thundra.opentracing.span import ThundraSpan\nfrom thundra.opentracing.span_context import ThundraSpanContext\n\n\nclass ThundraTracer(opentracing.Tracer):\n\n __instance = None\n\n @staticmethod\n def get_instance():\n return ThundraTracer() if ThundraTracer.__instance is None else ThundraTracer.__instance\n\n def __init__(self, recorder=None, scope_manager=None):\n scope_manager = ThreadLocalScopeManager() if scope_manager is None else scope_manager\n super(ThundraTracer, self).__init__(scope_manager)\n self.recorder = recorder or ThundraRecorder()\n self.lock = Lock()\n self.global_span_order = 0\n self.test_xray_traces = []\n ThundraTracer.__instance = self\n\n def start_active_span(self,\n operation_name,\n child_of=None,\n references=None,\n trace_id=None,\n transaction_id=None,\n span_id=None,\n parent_span_id=None,\n tags=None,\n start_time=None,\n span_order=-1,\n ignore_active_span=False,\n finish_on_close=True):\n span_id = span_id or str(uuid.uuid4())\n _span = self.start_span(operation_name=operation_name,\n child_of=child_of,\n references=references,\n trace_id=trace_id,\n transaction_id=transaction_id,\n span_id=span_id,\n parent_span_id=parent_span_id,\n tags=tags,\n start_time=start_time,\n span_order=span_order,\n ignore_active_span=ignore_active_span)\n return self.scope_manager.activate(_span, finish_on_close)\n\n def start_span(self,\n operation_name=None,\n class_name=None,\n domain_name=None,\n child_of=None,\n references=None,\n trace_id=None,\n transaction_id=None,\n span_id=None,\n parent_span_id=None,\n tags=None,\n start_time=None,\n span_order=-1,\n ignore_active_span=False):\n\n with self.lock:\n self.global_span_order += 1\n\n _span_order = span_order\n if _span_order == -1:\n _span_order = self.global_span_order\n\n _parent_context = None\n if child_of is not None:\n _parent_context = child_of if isinstance(child_of, opentracing.SpanContext) else child_of.context\n elif references is not None and len(references) > 0:\n _parent_context = references[0].referenced_context\n\n if not ignore_active_span and _parent_context is None:\n _scope = self.scope_manager.active\n if _scope is not None and _scope.span is not None:\n _parent_context = _scope.span.context\n\n _trace_id = trace_id\n _transaction_id = transaction_id\n _span_id = span_id\n _parent_span_id = parent_span_id\n\n if _parent_context is not None:\n _trace_id = _trace_id or _parent_context.trace_id\n _transaction_id = _transaction_id or _parent_context.transaction_id\n _parent_span_id = _parent_span_id or _parent_context.span_id\n\n _context = ThundraSpanContext(trace_id=_trace_id,\n transaction_id=_transaction_id,\n span_id=_span_id,\n parent_span_id=_parent_span_id)\n _span = ThundraSpan(self,\n operation_name=operation_name,\n class_name=class_name,\n domain_name=domain_name,\n context=_context,\n tags=tags,\n start_time=start_time,\n span_order=_span_order)\n\n self.recorder.record(RecordEvents.START_SPAN, _span)\n return _span\n\n def get_active_span(self):\n return self.recorder.get_active_span()\n\n def get_finished_stack(self):\n return self.recorder.finished_span_stack\n\n def record(self, event, span):\n self.recorder.record(event, span)\n\n def inject(self, span_context, format, carrier):\n raise NotImplementedError('inject method not implemented yet')\n\n def extract(self, format, carrier):\n raise NotImplementedError('extract method not implemented yet')\n\n def clear(self):\n self.recorder.clear()\n\n def add_span_listener(self, listener):\n self.recorder.add_listener(listener)","sub_path":"thundra/opentracing/tracer.py","file_name":"tracer.py","file_ext":"py","file_size_in_byte":5149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"637502657","text":"import os, sys, subprocess\nimport time\n\ndef tempera():\n #Dirección donde se halla el archivo de datos I/O del sensor 1-Wire\n dir = '/sys/bus/w1/devices/w1_bus_master1/28-3c01d075bc1a/driver/28-3c01d075bc1a/temperature'\n #Uso del módulo subprocess, argumentos capture_outpus y text activados\n temptr = subprocess.run(['cat', dir], capture_output=True, text=True)\n #Se a tipo entero y divide por 1000 para hallar la temperatura en grados Celsius\n temptr = int(temptr.stdout)/1000.0\n\n t = time.localtime() #se crea una instancia usando la librería time\n tiempo = time.strftime(\"%Y%m%d%H%M%S\", t) #se obtiene, año, mes, día, hora, minuto y segundo\n\n nombre_archivo = str(temptr) + \".csv\" #nombre inicial del archivo nuevo\n archivo = open(nombre_archivo, \"a\") #se escribe archivo nuevo\n\n try:\n while (True): #se ejecuta el programa hasta que se haga, ctrl c, pasa al except\n #se ejecuta por cada ciclo el codigo de acceso a datos al directorio w1_bus_master1\n temptr = subprocess.run(['cat', dir], capture_output=True, text=True)\n temptr = int(temptr.stdout)/1000.0\n #se sobreescribe/escriben los datos de temperatura de entrada del sensor\n archivo.write(str(temptr)+\"°C, \"+ time.strftime(\"%H:%M:%S\", time.localtime())+\"\\n\")\n print(temptr)\n time.sleep(10) #cada 10 segundos se ingresan datos\n except:\n archivo.close() \n nuevo_name = tiempo + \"_\" + str(temptr) + \".csv\"\n #se renombra el archivo de la forma:\n #AñoMesDíaHoraMinutoSegundo_últimaTemperatura\n os.rename(nombre_archivo, nuevo_name) \n \n print(\"\\nSalió\")\n","sub_path":"1-wire.py","file_name":"1-wire.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"418135183","text":"import numpy\r\nimport os\r\nimport re\r\nfrom src.PathControlPoint import PathControlPoint\r\nfrom src.SliderPath import SliderPath\r\nfrom sympy.functions.elementary.integers import ceiling\r\n\r\ndef main():\r\n \r\n for file in os.listdir(\".\"):\r\n if file.endswith(\".osu\"):\r\n FD = open(file, 'r', encoding=\"utf8\")\r\n FDW = open(file[:-5]+\"-INVIS].osu\", 'w', encoding=\"utf8\")\r\n lines = FD.readlines()\r\n sliders = []\r\n \r\n # Finding global sv multiplier\r\n gsv = -1\r\n for line in lines:\r\n match = re.search(r\"SliderMultiplier:(\\d+(\\.\\d+)?)\", line)\r\n if match:\r\n gsv = float(match.group(1))\r\n if (gsv < 0.4):\r\n gsv = 0.4\r\n if (gsv > 3.6):\r\n gsv = 3.6\r\n if (gsv == -1):\r\n FDW.write(\"SliderMultiplier is NaN or not found in %s\" % (file))\r\n exit()\r\n \r\n # Making a list of bpm*sv points\r\n insideTPts = False\r\n bpmpts = []\r\n curbpm = 0\r\n for line in lines:\r\n if line == \"[TimingPoints]\\n\":\r\n insideTPts = True\r\n if insideTPts and line == \"\\n\":\r\n insideTPts = False\r\n \r\n match = re.search(r\"^(\\d+),(\\d+(\\.\\d+)?(E(\\+|-)\\d+)?),(\\d+),(\\d+),(\\d+),(\\d+),1,\", line)\r\n if match and insideTPts:\r\n curbpm = float(60000/float(match.group(2)))\r\n bpmpts.append((int(match.group(1)), curbpm))\r\n \r\n match = re.search(r\"^(\\d+),(-?\\d+(\\.\\d+)?(E(\\+|-)\\d+)?),\\d+,\\d+,\\d+,\\d+,0\", line)\r\n if match and insideTPts:\r\n bpmpts.append((int(match.group(1)), float(-100*curbpm/float(match.group(2)))))\r\n \r\n # Searching for sliders with no reverses (slides=1)\r\n for line in lines:\r\n match = re.search(r\"^(-?\\d+),(-?\\d+),(\\d+),(\\d+),(\\d+),(B|P|L)(\\|(-?\\d+:-?\\d+))*,1,(\\d+(\\.\\d+)?)\", line)\r\n if match:\r\n xpos = int(match.group(1))\r\n ypos = int(match.group(2))\r\n time = int(match.group(3))\r\n objtype = int(match.group(4))\r\n hitSound = int(match.group(5))\r\n sliderType = match.group(6)\r\n rest = re.sub(r\"^(-?\\d+),(-?\\d+),(\\d+),(\\d+),(\\d+),(B|P|L)\", \"\", line)\r\n positionsstring = rest[0:rest.find(\",\")]\r\n positions = re.findall(r\"\\|(-?\\d+:-?\\d+)\", positionsstring) \r\n rest = re.sub(r\"^(\\|-?\\d+:-?\\d+)*,\", \"\", rest)\r\n match = re.search(r\"^(\\d+),(\\d+(\\.\\d+)?)\", rest)\r\n slides = int(match.group(1))\r\n length = float(match.group(2))\r\n rest = re.sub(r\"^(\\d+),(\\d+(\\.\\d+)?)\", \"\", rest)\r\n \r\n poslist = []\r\n for entry in positions:\r\n match2 = re.search(r\"(-?\\d+):(-?\\d+)\", entry)\r\n pos = (int(match2.group(1)), int(match2.group(2)))\r\n poslist.append(pos)\r\n \r\n # If a slider is already distorted somehow, we should not modify it.\r\n if not distorted(poslist):\r\n # The value passed to bpm isn't actually just the bpm - it's the bpm times the current sv multiplier, or what the bpm would have to be if the sv multiplier were 1 at that point.\r\n # The way I'm getting this is very ugly but it does work\r\n sliders.append(processSlider(bpmpts[numpy.where(numpy.array(bpmpts)[:,0] <= time)[0][-1]][1], gsv, xpos, ypos, time, objtype, hitSound, sliderType, poslist, slides, length, rest))\r\n \r\n \r\n \r\n # Making new .osu file\r\n insideTPts = False\r\n unchangedline = True\r\n prevtimingpoint = (-1, -1, -1, -1, -1, -1, -1)\r\n for line in lines:\r\n unchangedline = True\r\n if line == \"[TimingPoints]\\n\":\r\n insideTPts = True\r\n if insideTPts and line == \"\\n\":\r\n insideTPts = False\r\n \r\n unchangedline = False\r\n # Collect all sliders that occur after the last processed timing point, and make their timing points\r\n matchingsliders = [s for s in sliders if s[9] != 0 and s[2] > prevtimingpoint[0]]\r\n rest = re.sub(r\"^(\\d+),(\\d+(\\.\\d+)?),(\\d+),(\\d+),(\\d+),(\\d+),1,\", \"\", line)\r\n if matchingsliders:\r\n for s in matchingsliders:\r\n # We move the slider back one ms so that timing for the rest of the song doesn't get offset by +1ms\r\n FDW.write(\"%d,%.15E,%s,%s,%s,%s,1,%s\" % (s[2]-1, s[9], prevtimingpoint[2], prevtimingpoint[3], prevtimingpoint[4], prevtimingpoint[5], prevtimingpoint[6]))\r\n FDW.write(\"%d,NaN,%s,%s,%s,%s,0,%s\" % (s[2]-1, prevtimingpoint[2], prevtimingpoint[3], prevtimingpoint[4], prevtimingpoint[5], prevtimingpoint[6]))\r\n FDW.write(\"%d,%.15E,%s,%s,%s,%s,1,%s\" % (s[2], 60000/(bpmpts[numpy.where(numpy.array(bpmpts)[:,0] == prevtimingpoint[0])[0][-1]][1]*prevtimingpoint[1]/-100), prevtimingpoint[2], prevtimingpoint[3], prevtimingpoint[4], prevtimingpoint[5], prevtimingpoint[6]))\r\n FDW.write(\"%d,%.15E,%s,%s,%s,%s,0,%s\" % (s[2], prevtimingpoint[1], prevtimingpoint[2], prevtimingpoint[3], prevtimingpoint[4], prevtimingpoint[5], prevtimingpoint[6]))\r\n FDW.write(\"\\n\")\r\n \r\n # Uninherited timing point matching\r\n match = re.search(r\"^(\\d+),(\\d+(\\.\\d+)?(E(\\+|-)\\d+)?),(\\d+),(\\d+),(\\d+),(\\d+),1,\", line)\r\n if match and insideTPts:\r\n unchangedline = False\r\n # Collect all sliders that occur before the currently processing timing point and after the previously processed timing point, and make their timing points\r\n matchingsliders = [s for s in sliders if (s[9] != 0 and (s[2] > prevtimingpoint[0] and s[2] < int(match.group(1))))]\r\n rest = re.sub(r\"^(\\d+),(\\d+(\\.\\d+)?(E(\\+|-)\\d+)?),(\\d+),(\\d+),(\\d+),(\\d+),1,\", \"\", line)\r\n if matchingsliders:\r\n for s in matchingsliders:\r\n # We move the slider back one ms so that timing for the rest of the song doesn't get offset by +1ms\r\n FDW.write(\"%d,%.15E,%s,%s,%s,%s,1,%s\" % (s[2]-1, s[9], prevtimingpoint[2], prevtimingpoint[3], prevtimingpoint[4], prevtimingpoint[5], prevtimingpoint[6]))\r\n FDW.write(\"%d,NaN,%s,%s,%s,%s,0,%s\" % (s[2]-1, prevtimingpoint[2], prevtimingpoint[3], prevtimingpoint[4], prevtimingpoint[5], prevtimingpoint[6]))\r\n FDW.write(\"%d,%.15E,%s,%s,%s,%s,1,%s\" % (s[2], 60000/(bpmpts[numpy.where(numpy.array(bpmpts)[:,0] == prevtimingpoint[0])[0][-1]][1]*prevtimingpoint[1]/-100), prevtimingpoint[2], prevtimingpoint[3], prevtimingpoint[4], prevtimingpoint[5], prevtimingpoint[6]))\r\n FDW.write(\"%d,%.15E,%s,%s,%s,%s,0,%s\" % (s[2], prevtimingpoint[1], prevtimingpoint[2], prevtimingpoint[3], prevtimingpoint[4], prevtimingpoint[5], prevtimingpoint[6]))\r\n \r\n # Override uninherited timing point if it occurs at the same time as the sliders' timing points\r\n matchingsliders = [s for s in sliders if (s[9] != 0 and s[2] == int(match.group(1)))]\r\n if matchingsliders:\r\n s = matchingsliders[0]\r\n # We move the slider back one ms so that timing for the rest of the song doesn't get offset by +1ms\r\n FDW.write(\"%d,%.15E,%s,%s,%s,%s,1,%s\" % (s[2]-1, s[9], match.group(6), match.group(7), match.group(8), match.group(9), rest))\r\n FDW.write(\"%d,NaN,%s,%s,%s,%s,0,%s\" % (s[2]-1, match.group(6), match.group(7), match.group(8), match.group(9), rest))\r\n FDW.write(\"%d,%s,%s,%s,%s,%s,1,%s\" % (s[2], match.group(2), match.group(6), match.group(7), match.group(8), match.group(9), rest))\r\n else:\r\n FDW.write(line)\r\n \r\n # prevtimingpoint = (time, inherited timing point beatLength, meter, sampleSet, sampleIndex, volume, effects)\r\n # Inherited timing point beatLength is -100 because it is treated as the default (which is -100) until an inherited timing point sets it.\r\n prevtimingpoint = (int(match.group(1)), -100, match.group(6), match.group(7), match.group(8), match.group(9), rest)\r\n \r\n # Inherited timing point matching\r\n match = re.search(r\"^(\\d+),(-?\\d+(\\.\\d+)?(E(\\+|-)\\d+)?),(\\d+),(\\d+),(\\d+),(\\d+),0,\", line)\r\n if match and insideTPts:\r\n unchangedline = False\r\n # Collect all sliders that occur before the currently processing timing point and after the previously processed timing point, and make their timing points\r\n matchingsliders = [s for s in sliders if (s[9] != 0 and (s[2] > prevtimingpoint[0] and s[2] < int(match.group(1))))]\r\n rest = re.sub(r\"^(\\d+),(-?\\d+(\\.\\d+)?(E(\\+|-)\\d+)?),(\\d+),(\\d+),(\\d+),(\\d+),0,\", \"\", line)\r\n if matchingsliders:\r\n for s in matchingsliders:\r\n # We move the slider back one ms so that timing for the rest of the song doesn't get offset by +1ms\r\n FDW.write(\"%d,%.15E,%s,%s,%s,%s,1,%s\" % (s[2]-1, s[9], prevtimingpoint[2], prevtimingpoint[3], prevtimingpoint[4], prevtimingpoint[5], prevtimingpoint[6]))\r\n FDW.write(\"%d,NaN,%s,%s,%s,%s,0,%s\" % (s[2]-1, prevtimingpoint[2], prevtimingpoint[3], prevtimingpoint[4], prevtimingpoint[5], prevtimingpoint[6]))\r\n FDW.write(\"%d,%.15E,%s,%s,%s,%s,1,%s\" % (s[2], 60000/(bpmpts[numpy.where(numpy.array(bpmpts)[:,0] == prevtimingpoint[0])[0][-1]][1]*prevtimingpoint[1]/-100), prevtimingpoint[2], prevtimingpoint[3], prevtimingpoint[4], prevtimingpoint[5], prevtimingpoint[6]))\r\n FDW.write(\"%d,%.15E,%s,%s,%s,%s,0,%s\" % (s[2], prevtimingpoint[1], prevtimingpoint[2], prevtimingpoint[3], prevtimingpoint[4], prevtimingpoint[5], prevtimingpoint[6]))\r\n \r\n # Override inherited timing point if it occurs at the same time as the sliders' timing points\r\n matchingsliders = [x for x in sliders if (x[9] != 0 and x[2] == int(match.group(1)))]\r\n if matchingsliders:\r\n s = matchingsliders[0]\r\n # We move the slider back one ms so that timing for the rest of the song doesn't get offset by +1ms\r\n FDW.write(\"%d,%.15E,%s,%s,%s,%s,1,%s\" % (s[2]-1, s[9], match.group(6), match.group(7), match.group(8), match.group(9), rest))\r\n FDW.write(\"%d,NaN,%s,%s,%s,%s,0,%s\" % (s[2]-1, match.group(6), match.group(7), match.group(8), match.group(9), rest))\r\n FDW.write(\"%d,%.15E,%s,%s,%s,%s,1,%s\" % (s[2], 60000/(bpmpts[numpy.where(numpy.array(bpmpts)[:,0] == s[2])[0][-1]][1]*float(match.group(2))/-100), match.group(6), match.group(7), match.group(8), match.group(9), rest))\r\n FDW.write(\"%d,%s,%s,%s,%s,%s,0,%s\" % (s[2], match.group(2), match.group(6), match.group(7), match.group(8), match.group(9), rest))\r\n else:\r\n FDW.write(line)\r\n \r\n # prevtimingpoint = (time, inherited timing point beatLength, meter, sampleSet, sampleIndex, volume, effects)\r\n prevtimingpoint = (int(match.group(1)), float(match.group(2)), match.group(6), match.group(7), match.group(8), match.group(9), rest)\r\n \r\n # Slider HitObject matching\r\n match = re.search(r\"^(-?\\d+),(-?\\d+),(\\d+),(\\d+),(\\d+),(B|P|L)(\\|(-?\\d+:-?\\d+))*,(\\d+),(\\d+(\\.\\d+)?)\", line)\r\n if match:\r\n unchangedline = False\r\n matchingsliders = [x for x in sliders if (x[9] != 0 and x[2] == int(match.group(3)))]\r\n if matchingsliders:\r\n s = matchingsliders[0]\r\n # We move the slider back one ms so that timing for the rest of the song doesn't get offset by +1ms\r\n FDW.write(\"%d,%d,%d,%d,%d,L|%s,%d,%f%s\" % (s[0], s[1], s[2]-1, s[3], s[4], \"|\".join(\":\".join(str(y) for y in x) for x in s[5]), s[6], s[7], s[8]))\r\n else:\r\n FDW.write(line)\r\n \r\n match = re.search(r\"^Version:(.*)$\", line)\r\n if match:\r\n unchangedline = False\r\n FDW.write(\"Version:%s-INVIS\\n\" % match.group(1))\r\n \r\n if unchangedline:\r\n FDW.write(line)\r\n \r\n FDW.close()\r\n FD.close()\r\n \r\ndef processSlider(bpm, gsv, xpos, ypos, time, objtype, hitSound, sliderType, poslist, slides, length, rest):\r\n if sliderType == \"L\":\r\n pathtype = PathControlPoint.LINEAR\r\n elif sliderType == \"P\":\r\n pathtype = PathControlPoint.PERFECT\r\n else:\r\n pathtype = PathControlPoint.BEZIER\r\n \r\n # Convert poslist to list of PathControlPoints\r\n ControlPoints = [PathControlPoint(numpy.array((xpos,ypos), dtype='int64'), pathtype)]\r\n for i in range(0,len(poslist)):\r\n if i0 and poslist[i-1] == poslist[i]:\r\n # We have a new segment starting here\r\n ControlPoints.append(PathControlPoint(numpy.array(poslist[i], dtype='int64'), pathtype))\r\n else:\r\n ControlPoints.append(PathControlPoint(numpy.array(poslist[i], dtype='int64'), None))\r\n \r\n sliderpath = SliderPath(ControlPoints, length)\r\n \r\n xpoints = []\r\n ypoints = []\r\n tlen = round(1000*length/(5/3*bpm*gsv))\r\n \r\n # sliderpath.PositionAt returns the loaction of the sliderball pre-snap. We want post-snap so this requires some additional structure.\r\n # First we calculate how many ms each linear segment is used for\r\n mspersegment = [0]*len(sliderpath.cumulativeLength)\r\n for i in range(0,tlen+1):\r\n idx = sliderpath.indexOfDistance(sliderpath.progressToDistance(i/tlen))\r\n if idx < 0:\r\n idx = 0\r\n if idx > len(mspersegment):\r\n idx = len(mspersegment)-1\r\n mspersegment[idx] += 1\r\n # Edge case handled separately; if the index of the progress to distance is 0 then we consider it to simply be the first point.\r\n for j in range(0,mspersegment[0]):\r\n pt = sliderpath.calculatedPath[0]\r\n xpoints.append(round(pt[0]))\r\n ypoints.append(round(pt[1]))\r\n for i in range(1,len(mspersegment)):\r\n p0 = sliderpath.calculatedPath[i-1]\r\n p1 = sliderpath.calculatedPath[i]\r\n for j in range(1,mspersegment[i]+1):\r\n pt = p0+(p1-p0)*j/mspersegment[i]\r\n xpoints.append(round(pt[0]))\r\n ypoints.append(round(pt[1]))\r\n \r\n # Define newposlist\r\n newposlist = []\r\n framedist = 2*67141632+2*33587200+xpos+ypos-xpoints[0]-ypoints[0]\r\n snaptol = 50000;\r\n \r\n newposlist.append((4196352+xpos, ypos))\r\n newposlist.append((4196352+xpos, 2099200+ypos))\r\n newposlist.append((8392704+xpos, 2099200+ypos))\r\n newposlist.append((8392704+xpos, 4198400+ypos))\r\n newposlist.append((16785408+xpos, 4198400+ypos))\r\n newposlist.append((16785408+xpos, 8396800+ypos))\r\n newposlist.append((33570816+xpos, 8396800+ypos))\r\n newposlist.append((33570816+xpos, 16793600+ypos))\r\n newposlist.append((67141632+xpos, 16793600+ypos))\r\n newposlist.append((67141632+xpos, 33587200+ypos+snaptol))\r\n newposlist.append((67141632+xpos, ypoints[0]))\r\n newposlist.append((xpoints[0], ypoints[0]))\r\n curlen = framedist+2*snaptol;\r\n for t in range(1,tlen):\r\n newposlist.append((67141632+xpos, ypoints[t-1]))\r\n newposlist.append((67141632+xpos, round(33587200+0.5*(ypos-xpos+xpoints[t-1]+xpoints[t]+ypoints[t-1]+ypoints[t]-xpoints[0]-ypoints[0]))))\r\n if ((ypos-xpos+xpoints[t-1]+xpoints[t]+ypoints[t-1]+ypoints[t]-xpoints[0]-ypoints[0]) % 2 == 1):\r\n curlen = curlen+1\r\n \r\n # This adds and subtracts a bunch of things to cancel everything\r\n # out (sometimes the rounding will add an extra pixel) and make\r\n # sure the length the slider travels to get to each pixel we want\r\n # the sliderball to appear on stays the same.\r\n \r\n newposlist.append((67141632+xpos, ypoints[t]))\r\n newposlist.append((xpoints[t], ypoints[t]))\r\n curlen = curlen + framedist\r\n \r\n # Fixes some rendering issues by making the last segment of length 0\r\n newposlist.append(newposlist[-1])\r\n newposlist.append(newposlist[-1])\r\n \r\n return (xpos, ypos, time, objtype, hitSound, newposlist, slides, curlen, rest, 5/3*gsv*60/framedist)\r\n\r\ndef distorted(poslist):\r\n minx = min(pos[0] for pos in poslist)\r\n maxx = max(pos[0] for pos in poslist)\r\n miny = min(pos[1] for pos in poslist)\r\n maxy = max(pos[1] for pos in poslist)\r\n # 2^14 is an experimentally found constant. Distortion is undefined behavior in OpenGL\r\n return maxx-minx > 2**14 or maxy-miny > 2**14\r\n \r\n\r\nif __name__==\"__main__\": main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"213959774","text":"\"\"\"\nIt allows us to return elements from an iterable one at the time.\n\"\"\"\n\n\ndef generator_function(num):\n for i in range(num):\n yield i\n\n\ndef performance(func):\n from time import time\n\n def wrapper(*args, **kwargs):\n t1 = time()\n result = func(*args, **kwargs)\n t2 = time()\n print(f\"took {t2 - t1} s\")\n return result\n\n return wrapper\n\n\n@performance\ndef long_time():\n print(\"long time: \", end=\"\")\n for i in range(10000000): # range is a generator\n i * 5\n\n\n@performance\ndef long_time2():\n print(\"long time 2: \", end=\"\")\n for i in list(range(10000000)):\n i * 5\n\n\ndef fib(number):\n a, b = 0, 1\n for i in range(number):\n yield a\n a, b = b, a + b\n\n\ndef main():\n for item in generator_function(1000):\n # print(item) # We get one by one\n pass\n\n g = generator_function(100)\n print(next(g))\n print(next(g))\n print(next(g))\n\n long_time()\n long_time2()\n\n for n in fib(9):\n print(n)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"05_Generators/Generators.py","file_name":"Generators.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"451467818","text":"import torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\n\r\nfrom ysg_treelstm import Tree\r\n\r\n\r\n# module for childsumtreelstm\r\nclass ChildSumTreeLSTM(nn.Module):\r\n def __init__(self, voca_size, in_dim, mem_dim, device):\r\n super(ChildSumTreeLSTM, self).__init__()\r\n self.device = device\r\n self.in_dim = in_dim\r\n self.mem_dim = mem_dim\r\n self.emb = nn.Embedding(voca_size, self.in_dim).to(self.device)\r\n self.emb.weight.requires_grad = True\r\n self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim).to(self.device)\r\n self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim).to(self.device)\r\n self.fx = nn.Linear(self.in_dim, self.mem_dim).to(self.device)\r\n self.fh = nn.Linear(self.mem_dim, self.mem_dim).to(self.device)\r\n\r\n def node_forward(self, inputs, child_c, child_h):\r\n child_h_sum = torch.sum(child_h, dim=0, keepdim=True)\r\n # child_h_sum = child_h_sum.to(self.device)\r\n iou = self.ioux(inputs) + self.iouh(child_h_sum) #TODO 没有偏置 ?\r\n i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)\r\n i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)\r\n f = torch.sigmoid(\r\n self.fh(child_h) +\r\n self.fx(inputs).repeat(len(child_h), 1)\r\n )\r\n fc = torch.mul(f, child_c)\r\n\r\n c = torch.mul(i, u) + torch.sum(fc, dim=0, keepdim=True)\r\n h = torch.mul(o, torch.tanh(c))\r\n return c, h\r\n\r\n def forward(self, tree):\r\n for idx in range(tree.num_children):\r\n self.forward(tree.children[idx])\r\n\r\n inputs = torch.LongTensor([tree.op]).to(self.device)\r\n inputs = self.emb(inputs)\r\n #TODO 加全连接层\r\n if tree.num_children == 0:\r\n # child_c = inputs[0].detach().new(1, self.mem_dim).fill_(0.).requires_grad_()\r\n # child_h = inputs[0].detach().new(1, self.mem_dim).fill_(0.).requires_grad_()\r\n\r\n #child_c = torch.Tensor(1, self.mem_dim).zero_().requires_grad_().to(self.device)\r\n child_c = torch.Tensor(1, self.mem_dim).new_ones(1,self.mem_dim).requires_grad_().to(self.device)\r\n #child_h = torch.Tensor(1, self.mem_dim).zero_().requires_grad_().to(self.device)\r\n child_h = torch.Tensor(1, self.mem_dim).new_ones(1, self.mem_dim).requires_grad_().to(self.device)\r\n else:\r\n child_c, child_h = zip(* map(lambda x: x.state, tree.children))\r\n child_c, child_h = torch.cat(child_c, dim=0), torch.cat(child_h, dim=0)\r\n\r\n tree.state = self.node_forward(inputs, child_c, child_h)\r\n del inputs, child_c, child_h\r\n return tree.state\r\n\r\n\r\n# module for distance-angle similarity\r\nclass Similarity(nn.Module):\r\n def __init__(self, mem_dim, hidden_dim, num_classes, device):\r\n super(Similarity, self).__init__()\r\n self.device = device\r\n self.mem_dim = mem_dim\r\n self.hidden_dim = hidden_dim\r\n self.num_classes = num_classes\r\n self.wh = nn.Linear(2 * self.mem_dim, self.hidden_dim).to(self.device)\r\n self.wp = nn.Linear(self.hidden_dim, self.num_classes).to(self.device)\r\n self._out = nn.Linear(self.mem_dim, self.hidden_dim).to(self.device)\r\n # self._outt = nn.Linear(self.hidden_dim, 64).cuda()\r\n\r\n def forward(self, lvec, rvec): #二分类\r\n\r\n mult_dist = torch.mul(lvec, rvec)\r\n abs_dist = torch.abs(torch.add(lvec, -rvec))\r\n vec_dist = torch.cat((mult_dist, abs_dist), 1)\r\n\r\n out = torch.sigmoid(self.wh(vec_dist))\r\n out = torch.softmax(self.wp(out), dim=1)\r\n return out\r\n '''\r\n lout, rout = self._out(lvec), self._out(rvec)\r\n #lout, rout = self._outt(lout), self._outt(rout)\r\n MAB = torch.mul(torch.norm(lout).cuda(), torch.norm(rout).cuda()).cuda()\r\n AB = torch.dot(lout.squeeze(), rout.squeeze()).cuda()\r\n cos_similartiy = torch.div(torch.add(1, torch.div(AB, MAB).cuda()), 2)\r\n return torch.cat((torch.sub(1, cos_similartiy).unsqueeze(0), cos_similartiy.unsqueeze(0)))\r\n '''\r\n\r\n# putting the whole model together\r\nclass SimilarityTreeLSTM(nn.Module):\r\n def __init__(self, vocab_size, in_dim, mem_dim, hidden_dim, num_classes, device):\r\n super(SimilarityTreeLSTM, self).__init__()\r\n # self.emb = nn.Embedding(vocab_size, in_dim, padding_idx=0, sparse=sparsity)\r\n # if freeze:\r\n # self.emb.weight.requires_grad = False\r\n self.embmodel = ChildSumTreeLSTM(vocab_size, in_dim, mem_dim, device)\r\n self.similarity = Similarity(mem_dim, hidden_dim, num_classes, device)\r\n\r\n def forward(self, ltree, rtree):\r\n # linputs = self.emb(linputs)\r\n # rinputs = self.emb(rinputs)\r\n lstate, lhidden = self.embmodel(ltree)\r\n rstate, rhidden = self.embmodel(rtree)\r\n output = self.similarity(lstate, rstate)\r\n return output\r\n\r\n\r\ndef test_ChildSumTreeLSTM():\r\n from ysg_treelstm import Tree\r\n trees =[]\r\n for i in range(10,15):\r\n t = Tree()\r\n t.op = i\r\n trees.append(t)\r\n trees[0].add_child(trees[1])\r\n trees[0].add_child(trees[2])\r\n trees[2].add_child(trees[3])\r\n trees[2].add_child(trees[4])\r\n st = SimilarityTreeLSTM(80, 10, 16, 10, 2, torch.device(\"cpu\"))\r\n root = trees[0]\r\n output = st(root, root)\r\n print(output)\r\n\r\n\r\n\r\ndef test_detach():\r\n input = torch.Tensor([1])\r\n x = input.detach().new(1, 100).fill_(0.)\r\n y = x.requires_grad_()\r\n print(y)\r\nif __name__ == '__main__':\r\n test_ChildSumTreeLSTM()\r\n # test_detach()","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"246844027","text":"import os\nfrom random import randint\n\ndef get(l, r):\n\treturn randint(1, 10**10) % (r - l) + l\n\nn = 0\na = []\nb = 0\n\ndef gen():\n\tglobal a, b, n\n\tn = 1\n\twhile n % 2 == 1:\n\t\tn = get(1, 5) * 2\n\t\ta = []\n\t\tfor i in range(n):\n\t\t\ta.append(get(0, 10))\n\t\ta.sort()\n\t\tfans = []\n\t\tfor i in a:\n\t\t\tif len(fans) == 0 or i != fans[-1]:\n\t\t\t\tfans.append(i)\n\t\t\telif len(fans) > 0:\n\t\t\t\tfans.remove(i)\n\t\ta = fans\n\t\tn = len(a)\n\t\tb = get(1, 10)\n\ndef check(x):\n\tglobal a, b\n\tans = 0\n\tfor i in a:\n\t\tans = ans ^ (i + x)\n\treturn ans == b\n\ndef solve():\n\tglobal b\n\tans = []\n\tfor x in range(0, 1000):\n\t\tif (check(x)):\n\t\t\tans.append(x)\n\treturn ans\n\ndef main():\n\tglobal a, b, n\n\tfor te in range(100):\n\t\tgen()\n\t\tans = solve()\n\t\tif len(ans) == 0:\n\t\t\tprint(\"n =\", n, \"a =\", a, \"b =\", b, \"ans =\", ans)\n\n\nmain()","sub_path":"2020/Заочка/G/stress_idea.py","file_name":"stress_idea.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"350298819","text":"#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom distutils.core import Extension\n\n\nLONG_DESCRIPTION = \"\"\"\n\tscikit-rf is an open source approach to RF/Microwave engineering implemented in the Python programming language.\n\"\"\"\nsetup(name='scikit-rf',\n\tversion='1.0.0',\n\tlicense='new BSD',\n\tdescription='Object Oriented Microwave Engineering',\n\tlong_description=LONG_DESCRIPTION,\n\tauthor='Alex Arsenovic',\n\tauthor_email='alexanderarsenovic@gmail.com',\n\turl='http://www.scikit-rf.org',\n\tpackages=find_packages(),\n\tinstall_requires = ['numpy', 'scipy'],\n\textras_require={},\n\t#ext_modules=[Extension('skrf.src.connect', ['skrf/src/connect.c', ], export_symbols=['innerconnect_s','connect_s'])],\n\tpackage_dir={'skrf':'skrf'},\n\tinclude_package_data = True\n\t)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"126658943","text":"#Given an m x n matrix of non-negative integers representing the height of each unit cell in a continent, the \"Pacific ocean\" touches the left and top edges of the matrix and the \"Atlantic ocean\" touches the right and bottom edges.\n\n#Water can only flow in four directions (up, down, left, or right) from a cell to another one with height equal or lower.\n\n#Find the list of grid coordinates where water can flow to both the Pacific and Atlantic ocean.\n\n#Note:\n\n#The order of returned grid coordinates does not matter.\n#Both m and n are less than 150.\n\n\n#Example:\n\n#Given the following 5x5 matrix:\n\n# Pacific ~ ~ ~ ~ ~\n# ~ 1 2 2 3 (5) *\n# ~ 3 2 3 (4) (4) *\n# ~ 2 4 (5) 3 1 *\n# ~ (6) (7) 1 4 5 *\n# ~ (5) 1 1 2 4 *\n# * * * * * Atlantic\n\n#Return:\n\n#[[0, 4], [1, 3], [1, 4], [2, 2], [3, 0], [3, 1], [4, 0]] (positions with parentheses in above matrix).\n\nfrom typing import List\nclass Solution:\n def pacificAtlantic(self, matrix: List[List[int]]) -> List[List[int]]:\n if not matrix or not matrix[0]:\n return []\n\n m, n = len(matrix), len(matrix[0])\n\n p_visited = set()\n a_visited = set()\n\n directions = [(-1, 0), (1, 0), (0, 1), (0, -1)]\n\n def dfs(visited, x, y):\n visited.add((x, y))\n for dx, dy in directions:\n nx, ny = x+dx, y+dy\n if 0 <= nx < m and 0 <= ny < n and (nx, ny) not in visited and matrix[nx][ny] >= matrix[x][y]:\n dfs(visited, nx, ny)\n\n for i in range(m):\n dfs(p_visited, i, 0)\n dfs(a_visited, i, n-1)\n\n for j in range(n):\n dfs(p_visited, 0, j)\n dfs(a_visited, m-1, j)\n\n return list(p_visited.intersection(a_visited))","sub_path":"python_code/417_Pacific_Atlantic_Water_Flow.py","file_name":"417_Pacific_Atlantic_Water_Flow.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"393914","text":"import torch.nn as nn\nimport math\nimport torch.utils.model_zoo as model_zoo\nimport torch\nimport torch.nn.functional as F\n\nfrom ops.TCP.TCP_module import TCP\n\nfrom torch.nn.init import orthogonal_\n\n__all__ = ['Res2Net', 'res2net50']\n\n\nclass MEModule(nn.Module):\n \"\"\" Motion exciation module\n \n :param reduction=16\n :param n_segment=8/16\n \"\"\"\n def __init__(self, channel, reduction=16, n_segment=8):\n super(MEModule, self).__init__()\n self.channel = channel\n self.reduction = reduction\n self.n_segment = n_segment\n self.conv1 = nn.Conv2d(\n in_channels=self.channel,\n out_channels=self.channel//self.reduction,\n kernel_size=1,\n bias=False)\n self.bn1 = nn.BatchNorm2d(num_features=self.channel//self.reduction)\n\n self.conv2 = nn.Conv2d(\n in_channels=self.channel//self.reduction,\n out_channels=self.channel//self.reduction,\n kernel_size=3,\n padding=1,\n groups=channel//self.reduction,\n bias=False)\n\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.sigmoid = nn.Sigmoid()\n\n self.pad = (0, 0, 0, 0, 0, 0, 0, 1)\n\n self.conv3 = nn.Conv2d(\n in_channels=self.channel//self.reduction,\n out_channels=self.channel,\n kernel_size=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(num_features=self.channel)\n\n self.identity = nn.Identity()\n\n def forward(self, x):\n nt, c, h, w = x.size()\n bottleneck = self.conv1(x) # nt, c//r, h, w\n bottleneck = self.bn1(bottleneck) # nt, c//r, h, w\n\n # t feature\n reshape_bottleneck = bottleneck.view((-1, self.n_segment) + bottleneck.size()[1:]) # n, t, c//r, h, w\n t_fea, __ = reshape_bottleneck.split([self.n_segment-1, 1], dim=1) # n, t-1, c//r, h, w\n\n # apply transformation conv to t+1 feature\n conv_bottleneck = self.conv2(bottleneck) # nt, c//r, h, w\n # reshape fea: n, t, c//r, h, w\n reshape_conv_bottleneck = conv_bottleneck.view((-1, self.n_segment) + conv_bottleneck.size()[1:])\n __, tPlusone_fea = reshape_conv_bottleneck.split([1, self.n_segment-1], dim=1) # n, t-1, c//r, h, w\n \n # motion fea = t+1_fea - t_fea\n # pad the last timestamp\n diff_fea = tPlusone_fea - t_fea # n, t-1, c//r, h, w\n # pad = (0,0,0,0,0,0,0,1)\n diff_fea_pluszero = F.pad(diff_fea, self.pad, mode=\"constant\", value=0) # n, t, c//r, h, w\n diff_fea_pluszero = diff_fea_pluszero.view((-1,) + diff_fea_pluszero.size()[2:]) #nt, c//r, h, w\n y = self.avg_pool(diff_fea_pluszero) # nt, c//r, 1, 1\n y = self.conv3(y) # nt, c, 1, 1\n y = self.bn3(y) # nt, c, 1, 1\n y = self.sigmoid(y) # nt, c, 1, 1\n y = y - 0.5\n output = x + x * y.expand_as(x)\n return output\n\nclass ShiftModule(nn.Module):\n \"\"\"1D Temporal convolutions, the convs are initialized to act as the \"Part shift\" layer\n \"\"\"\n\n def __init__(self, input_channels, n_segment=8, n_div=8, mode='shift'):\n super(ShiftModule, self).__init__()\n self.input_channels = input_channels\n self.n_segment = n_segment\n self.fold_div = n_div\n self.fold = self.input_channels // self.fold_div\n self.conv = nn.Conv1d(\n 2*self.fold, 2*self.fold,\n kernel_size=3, padding=1, groups=2*self.fold,\n bias=False)\n # weight_size: (2*self.fold, 1, 3)\n if mode == 'shift':\n # import pdb; pdb.set_trace()\n self.conv.weight.requires_grad = True\n self.conv.weight.data.zero_()\n self.conv.weight.data[:self.fold, 0, 2] = 1 # shift left\n self.conv.weight.data[self.fold: 2 * self.fold, 0, 0] = 1 # shift right\n if 2*self.fold < self.input_channels:\n self.conv.weight.data[2 * self.fold:, 0, 1] = 1 # fixed\n elif mode == 'fixed':\n self.conv.weight.requires_grad = True\n self.conv.weight.data.zero_()\n self.conv.weight.data[:, 0, 1] = 1 # fixed\n elif mode == 'norm':\n self.conv.weight.requires_grad = True\n\n def forward(self, x):\n # shift by conv\n # import pdb; pdb.set_trace()\n nt, c, h, w = x.size()\n n_batch = nt // self.n_segment\n x = x.view(n_batch, self.n_segment, c, h, w)\n x = x.permute([0, 3, 4, 2, 1]) # (n_batch, h, w, c, n_segment)\n x = x.contiguous().view(n_batch*h*w, c, self.n_segment)\n x = self.conv(x) # (n_batch*h*w, c, n_segment)\n x = x.view(n_batch, h, w, c, self.n_segment)\n x = x.permute([0, 4, 3, 1, 2]) # (n_batch, n_segment, c, h, w)\n x = x.contiguous().view(nt, c, h, w)\n return x\n\nclass Bottle2neckShift(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, baseWidth=26, scale=4, stype='normal'):\n \"\"\" Constructor\n Args:\n inplanes: input channel dimensionality\n planes: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n downsample: None when stride = 1\n baseWidth: basic width of conv3x3\n scale: number of scale.\n type: 'normal': normal set. 'stage': first block of a new stage.\n \"\"\"\n super(Bottle2neckShift, self).__init__()\n\n width = int(math.floor(planes * (baseWidth/64.0)))\n\n self.me = MEModule(width*scale, reduction=16, n_segment=8)\n\n self.conv1 = nn.Conv2d(inplanes, width*scale, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(width*scale)\n\n if scale == 1:\n self.nums = 1\n else:\n self.nums = scale - 1\n if stype == 'stage':\n self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)\n convs = []\n bns = []\n shifts = []\n for i in range(self.nums):\n convs.append(nn.Conv2d(width, width, kernel_size=3, stride=stride,\n padding=1, bias=False))\n bns.append(nn.BatchNorm2d(width))\n shifts.append(ShiftModule(width, n_segment=8, n_div=2, mode='fixed'))\n shifts.append(ShiftModule(width, n_segment=8, n_div=2, mode='shift'))\n\n self.convs = nn.ModuleList(convs)\n self.bns = nn.ModuleList(bns)\n self.shifts = nn.ModuleList(shifts)\n\n self.conv3 = nn.Conv2d(width*scale, planes * self.expansion,\n kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stype = stype\n self.scale = scale\n self.width = width\n\n def forward(self, x):\n # import pdb; pdb.set_trace()\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.me(out)\n\n spx = torch.split(out, self.width, 1) # 4*(nt, c/4, h, w)\n for i in range(self.nums):\n if i == 0 or self.stype == 'stage':\n sp = spx[i]\n else:\n sp = sp + spx[i]\n sp = self.shifts[i](sp)\n sp = self.convs[i](sp)\n sp = self.relu(self.bns[i](sp))\n if i == 0:\n out = sp\n else:\n out = torch.cat((out, sp), 1)\n last_sp = spx[self.nums]\n last_sp = self.shifts[self.nums](last_sp)\n if self.scale != 1 and self.stype == 'normal':\n out = torch.cat((out, last_sp), 1)\n elif self.scale != 1 and self.stype == 'stage':\n if self.stype =='stage' and spx[-1].shape[1] == 208:\n out = torch.cat((out, last_sp), 1)\n # print(out.shape)\n else:\n out = torch.cat((out, self.pool(last_sp)), 1)\n\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\nclass Bottle2neck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, baseWidth=26, scale = 4, stype='normal'):\n \"\"\" Constructor\n Args:\n inplanes: input channel dimensionality\n planes: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n downsample: None when stride = 1\n baseWidth: basic width of conv3x3\n scale: number of scale.\n type: 'normal': normal set. 'stage': first block of a new stage.\n \"\"\"\n super(Bottle2neck, self).__init__()\n\n width = int(math.floor(planes * (baseWidth/64.0)))\n self.conv1 = nn.Conv2d(inplanes, width*scale, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(width*scale)\n \n if scale == 1:\n self.nums = 1\n else:\n self.nums = scale -1\n if stype == 'stage':\n self.pool = nn.AvgPool2d(kernel_size=3, stride = stride, padding=1)\n convs = []\n bns = []\n for i in range(self.nums):\n convs.append(nn.Conv2d(width, width, kernel_size=3, stride = stride, padding=1, bias=False))\n bns.append(nn.BatchNorm2d(width))\n self.convs = nn.ModuleList(convs)\n self.bns = nn.ModuleList(bns)\n\n self.conv3 = nn.Conv2d(width*scale, planes * self.expansion, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stype = stype\n self.scale = scale\n self.width = width\n\n def forward(self, x):\n import pdb; pdb.set_trace()\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n spx = torch.split(out, self.width, 1)\n for i in range(self.nums):\n if i==0 or self.stype=='stage':\n sp = spx[i]\n else:\n sp = sp + spx[i]\n sp = self.convs[i](sp)\n sp = self.relu(self.bns[i](sp))\n if i==0:\n out = sp\n else:\n out = torch.cat((out, sp), 1)\n if self.scale != 1 and self.stype=='normal':\n out = torch.cat((out, spx[self.nums]),1)\n elif self.scale != 1 and self.stype=='stage':\n out = torch.cat((out, self.pool(spx[self.nums])),1)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\nclass Res2Net(nn.Module):\n\n def __init__(self, block, layers, baseWidth = 26, scale = 4, num_classes=1000,\n TCP_module=None, segment=None,\n ):\n self.inplanes = 64\n super(Res2Net, self).__init__()\n self.baseWidth = baseWidth\n self.scale = scale\n self.num_segments = segment\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=1)\n\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n if TCP_module is not None:\n print('Adding TCP module...')\n self.TCP = TCP_module\n else:\n self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n self.TCP = None\n\n\n for m in self.modules():\n if m == self.TCP :#reverse the initialization in TCP\n break\n elif isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample=downsample,\n stype='stage', baseWidth = self.baseWidth, scale=self.scale))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, baseWidth = self.baseWidth, scale=self.scale))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n if self.TCP is not None:\n x = self.TCP(x)\n else :\n x = self.avgpool(x)\n\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef res2net50(pretrained=False, **kwargs):\n \"\"\"Constructs a Res2Net-50 model.\n Res2Net-50 refers to the Res2Net-50_26w_4s.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth = 26, scale = 4, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['res2net50_26w_4s']))\n return model\n\ndef tea50_8f(TCP_module=None, **kwargs):\n \"\"\"Constructs a TEA model.\n part of the TEA model refers to the Res2Net-50_26w_4s.\n Args:\n TCP_module: if not None, generating TCP Net.\n \"\"\"\n\n model = Res2Net(Bottle2neckShift, [3, 4, 6, 3], baseWidth = 26, scale = 4,\n TCP_module=TCP_module, **kwargs)\n # if pretrained:\n # model.load_state_dict(model_zoo.load_url(model_urls['res2net50_26w_4s']),\n # strict=False)\n return model\n\ndef res2net50_26w_4s(pretrained=False, **kwargs):\n \"\"\"Constructs a Res2Net-50_26w_4s model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth = 26, scale = 4, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['res2net50_26w_4s']))\n return model\n\ndef res2net101_26w_4s(pretrained=False, **kwargs):\n \"\"\"Constructs a Res2Net-50_26w_4s model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = Res2Net(Bottle2neck, [3, 4, 23, 3], baseWidth = 26, scale = 4, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['res2net101_26w_4s']))\n return model\n\ndef res2net50_26w_6s(pretrained=False, **kwargs):\n \"\"\"Constructs a Res2Net-50_26w_4s model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth = 26, scale = 6, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['res2net50_26w_6s']))\n return model\n\ndef res2net50_26w_8s(pretrained=False, **kwargs):\n \"\"\"Constructs a Res2Net-50_26w_4s model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth = 26, scale = 8, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['res2net50_26w_8s']))\n return model\n\ndef res2net50_48w_2s(pretrained=False, **kwargs):\n \"\"\"Constructs a Res2Net-50_48w_2s model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth = 48, scale = 2, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['res2net50_48w_2s']))\n return model\n\ndef res2net50_14w_8s(pretrained=False, **kwargs):\n \"\"\"Constructs a Res2Net-50_14w_8s model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth = 14, scale = 8, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['res2net50_14w_8s']))\n return model\n\n\n\nif __name__ == '__main__':\n images = torch.rand(8, 3, 224, 224)\n model = res2net50shift(pretrained=True)\n output = model(images)\n print(output.size())\n","sub_path":"ops/torchvision/tea50_8f.py","file_name":"tea50_8f.py","file_ext":"py","file_size_in_byte":16933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"561584432","text":"import os\nimport tempfile\nimport datetime\nimport dask.dataframe as dd\nfrom fsspec.implementations.local import LocalFileSystem\nimport numpy as np\nimport pandas as pd\nfrom pandas.testing import assert_frame_equal\nimport pytest\n\nfrom adlfs import AzureBlobFileSystem, AzureBlobFile\n\n\nURL = \"http://127.0.0.1:10000\"\nACCOUNT_NAME = \"devstoreaccount1\"\nKEY = \"Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==\" # NOQA\nCONN_STR = f\"DefaultEndpointsProtocol=http;AccountName={ACCOUNT_NAME};AccountKey={KEY};BlobEndpoint={URL}/{ACCOUNT_NAME};\" # NOQA\n\n\ndef assert_almost_equal(x, y, threshold, prop_name=None):\n if x is None and y is None:\n return\n assert abs(x - y) <= threshold\n\n\ndef test_connect(storage):\n AzureBlobFileSystem(account_name=storage.account_name, connection_string=CONN_STR)\n\n\ndef assert_blob_equals(blob, expected_blob):\n irregular_props = [\n \"etag\",\n ]\n\n time_based_props = [\n \"last_modified\",\n \"creation_time\",\n \"deleted_time\",\n \"last_accessed_on\",\n ]\n # creating a shallow copy since we are going to pop properties\n shallow_copy = {**blob}\n for time_based_prop in time_based_props:\n time_value = shallow_copy.pop(time_based_prop, None)\n expected_time_value = expected_blob.pop(time_based_prop, None)\n assert_almost_equal(\n time_value,\n expected_time_value,\n datetime.timedelta(minutes=1),\n prop_name=time_based_prop,\n )\n\n for irregular_prop in irregular_props:\n shallow_copy.pop(irregular_prop, None)\n expected_blob.pop(irregular_prop, None)\n\n content_settings = dict(sorted(shallow_copy.pop(\"content_settings\", {}).items()))\n expected_content_settings = dict(\n sorted(expected_blob.pop(\"content_settings\", {}).items())\n )\n assert content_settings == expected_content_settings\n assert shallow_copy == expected_blob\n\n\ndef assert_blobs_equals(blobs, expected_blobs):\n assert len(blobs) == len(expected_blobs)\n for blob, expected_blob in zip(blobs, expected_blobs):\n assert_blob_equals(blob, expected_blob)\n\n\ndef test_ls(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR,\n )\n\n ## these are containers\n assert fs.ls(\"\") == [\"data\"]\n assert fs.ls(\"/\") == [\"data\"]\n assert fs.ls(\".\") == [\"data\"]\n assert fs.ls(\"*\") == [\"data\"]\n\n ## these are top-level directories and files\n assert fs.ls(\"data\") == [\"data/root\", \"data/top_file.txt\"]\n assert fs.ls(\"/data\") == [\"data/root\", \"data/top_file.txt\"]\n\n # root contains files and directories\n assert fs.ls(\"data/root\") == [\n \"data/root/a\",\n \"data/root/a1\",\n \"data/root/b\",\n \"data/root/c\",\n \"data/root/d\",\n \"data/root/e+f\",\n \"data/root/rfile.txt\",\n ]\n assert fs.ls(\"data/root/\") == [\n \"data/root/a\",\n \"data/root/a1\",\n \"data/root/b\",\n \"data/root/c\",\n \"data/root/d\",\n \"data/root/e+f\",\n \"data/root/rfile.txt\",\n ]\n\n ## slashes are not not needed, but accepted\n assert fs.ls(\"data/root/a\") == [\"data/root/a/file.txt\"]\n assert fs.ls(\"data/root/a/\") == [\"data/root/a/file.txt\"]\n assert fs.ls(\"/data/root/a\") == [\"data/root/a/file.txt\"]\n assert fs.ls(\"/data/root/a/\") == [\"data/root/a/file.txt\"]\n assert fs.ls(\"data/root/b\") == [\"data/root/b/file.txt\"]\n assert fs.ls(\"data/root/b/\") == [\"data/root/b/file.txt\"]\n assert fs.ls(\"data/root/a1\") == [\"data/root/a1/file1.txt\"]\n assert fs.ls(\"data/root/a1/\") == [\"data/root/a1/file1.txt\"]\n assert fs.ls(\"data/root/e+f\") == [\n \"data/root/e+f/file1.txt\",\n \"data/root/e+f/file2.txt\",\n ]\n assert fs.ls(\"data/root/e+f/\") == [\n \"data/root/e+f/file1.txt\",\n \"data/root/e+f/file2.txt\",\n ]\n\n ## file details\n files = fs.ls(\"data/root/a/file.txt\", detail=True)\n assert_blobs_equals(\n files,\n [\n {\n \"name\": \"data/root/a/file.txt\",\n \"size\": 10,\n \"type\": \"file\",\n \"archive_status\": None,\n \"deleted\": None,\n \"creation_time\": storage.insert_time,\n \"last_modified\": storage.insert_time,\n \"deleted_time\": None,\n \"last_accessed_on\": None,\n \"remaining_retention_days\": None,\n \"tag_count\": None,\n \"tags\": None,\n \"metadata\": {},\n \"content_settings\": {\n \"content_type\": \"application/octet-stream\",\n \"content_encoding\": None,\n \"content_language\": None,\n \"content_md5\": bytearray(\n b\"x\\x1e^$]i\\xb5f\\x97\\x9b\\x86\\xe2\\x8d#\\xf2\\xc7\"\n ),\n \"content_disposition\": None,\n \"cache_control\": None,\n },\n }\n ],\n )\n\n # c has two files\n assert_blobs_equals(\n fs.ls(\"data/root/c\", detail=True),\n [\n {\n \"name\": \"data/root/c/file1.txt\",\n \"size\": 10,\n \"type\": \"file\",\n \"archive_status\": None,\n \"deleted\": None,\n \"creation_time\": storage.insert_time,\n \"last_modified\": storage.insert_time,\n \"deleted_time\": None,\n \"last_accessed_on\": None,\n \"remaining_retention_days\": None,\n \"tag_count\": None,\n \"tags\": None,\n \"metadata\": {},\n \"content_settings\": {\n \"content_type\": \"application/octet-stream\",\n \"content_encoding\": None,\n \"content_language\": None,\n \"content_md5\": bytearray(\n b\"x\\x1e^$]i\\xb5f\\x97\\x9b\\x86\\xe2\\x8d#\\xf2\\xc7\"\n ),\n \"content_disposition\": None,\n \"cache_control\": None,\n },\n },\n {\n \"name\": \"data/root/c/file2.txt\",\n \"size\": 10,\n \"type\": \"file\",\n \"archive_status\": None,\n \"deleted\": None,\n \"creation_time\": storage.insert_time,\n \"last_modified\": storage.insert_time,\n \"deleted_time\": None,\n \"last_accessed_on\": None,\n \"remaining_retention_days\": None,\n \"tag_count\": None,\n \"tags\": None,\n \"metadata\": {},\n \"content_settings\": {\n \"content_type\": \"application/octet-stream\",\n \"content_encoding\": None,\n \"content_language\": None,\n \"content_md5\": bytearray(\n b\"x\\x1e^$]i\\xb5f\\x97\\x9b\\x86\\xe2\\x8d#\\xf2\\xc7\"\n ),\n \"content_disposition\": None,\n \"cache_control\": None,\n },\n },\n ],\n )\n\n # with metadata\n assert_blobs_equals(\n fs.ls(\"data/root/d\", detail=True),\n [\n {\n \"name\": \"data/root/d/file_with_metadata.txt\",\n \"size\": 10,\n \"type\": \"file\",\n \"archive_status\": None,\n \"deleted\": None,\n \"creation_time\": storage.insert_time,\n \"last_modified\": storage.insert_time,\n \"deleted_time\": None,\n \"last_accessed_on\": None,\n \"remaining_retention_days\": None,\n \"tag_count\": None,\n \"tags\": None,\n \"metadata\": {\"meta\": \"data\"},\n \"content_settings\": {\n \"content_type\": \"application/octet-stream\",\n \"content_encoding\": None,\n \"content_language\": None,\n \"content_md5\": bytearray(\n b\"x\\x1e^$]i\\xb5f\\x97\\x9b\\x86\\xe2\\x8d#\\xf2\\xc7\"\n ),\n \"content_disposition\": None,\n \"cache_control\": None,\n },\n }\n ],\n )\n\n ## if not direct match is found throws error\n with pytest.raises(FileNotFoundError):\n fs.ls(\"not-a-container\")\n\n with pytest.raises(FileNotFoundError):\n fs.ls(\"data/not-a-directory/\")\n\n with pytest.raises(FileNotFoundError):\n fs.ls(\"data/root/not-a-file.txt\")\n\n\ndef test_ls_no_listings_cache(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name,\n connection_string=CONN_STR,\n use_listings_cache=False,\n )\n result = fs.ls(\"data/root\")\n assert len(result) > 0 # some state leaking between tests\n\n\ndef test_info(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n\n container_info = fs.info(\"data\")\n assert_blob_equals(\n container_info,\n {\n \"name\": \"data\",\n \"type\": \"directory\",\n \"size\": None,\n \"deleted\": None,\n \"last_modified\": storage.insert_time,\n \"metadata\": None,\n },\n )\n\n container2_info = fs.info(\"data/root\")\n assert_blob_equals(\n container2_info, {\"name\": \"data/root\", \"type\": \"directory\", \"size\": None}\n )\n\n dir_info = fs.info(\"data/root/c\")\n assert_blob_equals(\n dir_info, {\"name\": \"data/root/c\", \"type\": \"directory\", \"size\": None}\n )\n\n file_info = fs.info(\"data/root/a/file.txt\")\n assert_blob_equals(\n file_info,\n {\n \"name\": \"data/root/a/file.txt\",\n \"size\": 10,\n \"type\": \"file\",\n \"archive_status\": None,\n \"deleted\": None,\n \"creation_time\": storage.insert_time,\n \"last_modified\": storage.insert_time,\n \"deleted_time\": None,\n \"last_accessed_on\": None,\n \"remaining_retention_days\": None,\n \"tag_count\": None,\n \"tags\": None,\n \"metadata\": {},\n \"content_settings\": {\n \"content_type\": \"application/octet-stream\",\n \"content_encoding\": None,\n \"content_language\": None,\n \"content_md5\": bytearray(\n b\"x\\x1e^$]i\\xb5f\\x97\\x9b\\x86\\xe2\\x8d#\\xf2\\xc7\"\n ),\n \"content_disposition\": None,\n \"cache_control\": None,\n },\n },\n )\n file_with_meta_info = fs.info(\"data/root/d/file_with_metadata.txt\")\n assert_blob_equals(\n file_with_meta_info,\n {\n \"name\": \"data/root/d/file_with_metadata.txt\",\n \"size\": 10,\n \"type\": \"file\",\n \"archive_status\": None,\n \"deleted\": None,\n \"creation_time\": storage.insert_time,\n \"last_modified\": storage.insert_time,\n \"deleted_time\": None,\n \"last_accessed_on\": None,\n \"remaining_retention_days\": None,\n \"tag_count\": None,\n \"tags\": None,\n \"metadata\": {\"meta\": \"data\"},\n \"content_settings\": {\n \"content_type\": \"application/octet-stream\",\n \"content_encoding\": None,\n \"content_language\": None,\n \"content_md5\": bytearray(\n b\"x\\x1e^$]i\\xb5f\\x97\\x9b\\x86\\xe2\\x8d#\\xf2\\xc7\"\n ),\n \"content_disposition\": None,\n \"cache_control\": None,\n },\n },\n )\n\n\ndef test_find(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n\n ## just the directory name\n assert fs.find(\"data/root/a\") == [\"data/root/a/file.txt\"] # NOQA\n assert fs.find(\"data/root/a/\") == [\"data/root/a/file.txt\"] # NOQA\n\n assert fs.find(\"data/root/c\") == [\n \"data/root/c/file1.txt\",\n \"data/root/c/file2.txt\",\n ]\n assert fs.find(\"data/root/c/\") == [\n \"data/root/c/file1.txt\",\n \"data/root/c/file2.txt\",\n ]\n\n ## all files\n assert fs.find(\"data/root\") == [\n \"data/root/a/file.txt\",\n \"data/root/a1/file1.txt\",\n \"data/root/b/file.txt\",\n \"data/root/c/file1.txt\",\n \"data/root/c/file2.txt\",\n \"data/root/d/file_with_metadata.txt\",\n \"data/root/e+f/file1.txt\",\n \"data/root/e+f/file2.txt\",\n \"data/root/rfile.txt\",\n ]\n assert fs.find(\"data/root\", withdirs=False) == [\n \"data/root/a/file.txt\",\n \"data/root/a1/file1.txt\",\n \"data/root/b/file.txt\",\n \"data/root/c/file1.txt\",\n \"data/root/c/file2.txt\",\n \"data/root/d/file_with_metadata.txt\",\n \"data/root/e+f/file1.txt\",\n \"data/root/e+f/file2.txt\",\n \"data/root/rfile.txt\",\n ]\n\n # all files and directories\n assert fs.find(\"data/root\", withdirs=True) == [\n \"data/root/a/\",\n \"data/root/a/file.txt\",\n \"data/root/a1/\",\n \"data/root/a1/file1.txt\",\n \"data/root/b/\",\n \"data/root/b/file.txt\",\n \"data/root/c/\",\n \"data/root/c/file1.txt\",\n \"data/root/c/file2.txt\",\n \"data/root/d/\",\n \"data/root/d/file_with_metadata.txt\",\n \"data/root/e+f/\",\n \"data/root/e+f/file1.txt\",\n \"data/root/e+f/file2.txt\",\n \"data/root/rfile.txt\",\n ]\n assert fs.find(\"data/root/\", withdirs=True) == [\n \"data/root/a/\",\n \"data/root/a/file.txt\",\n \"data/root/a1/\",\n \"data/root/a1/file1.txt\",\n \"data/root/b/\",\n \"data/root/b/file.txt\",\n \"data/root/c/\",\n \"data/root/c/file1.txt\",\n \"data/root/c/file2.txt\",\n \"data/root/d/\",\n \"data/root/d/file_with_metadata.txt\",\n \"data/root/e+f/\",\n \"data/root/e+f/file1.txt\",\n \"data/root/e+f/file2.txt\",\n \"data/root/rfile.txt\",\n ]\n\n ## missing\n assert fs.find(\"data/missing\") == []\n\n ## prefix search\n assert fs.find(\"data/root\", prefix=\"a\") == [\n \"data/root/a/file.txt\",\n \"data/root/a1/file1.txt\",\n ]\n\n assert fs.find(\"data/root\", prefix=\"a\", withdirs=True) == [\n \"data/root/a/\",\n \"data/root/a/file.txt\",\n \"data/root/a1/\",\n \"data/root/a1/file1.txt\",\n ]\n\n find_results = fs.find(\"data/root\", prefix=\"a1\", withdirs=True, detail=True)\n assert_blobs_equals(\n list(find_results.values()),\n [\n {\"name\": \"data/root/a1/\", \"size\": 0, \"type\": \"directory\"},\n {\n \"name\": \"data/root/a1/file1.txt\",\n \"size\": 10,\n \"type\": \"file\",\n \"archive_status\": None,\n \"deleted\": None,\n \"creation_time\": storage.insert_time,\n \"last_modified\": storage.insert_time,\n \"deleted_time\": None,\n \"last_accessed_on\": None,\n \"remaining_retention_days\": None,\n \"tag_count\": None,\n \"tags\": None,\n \"metadata\": {},\n \"content_settings\": {\n \"content_type\": \"application/octet-stream\",\n \"content_encoding\": None,\n \"content_language\": None,\n \"content_md5\": bytearray(\n b\"x\\x1e^$]i\\xb5f\\x97\\x9b\\x86\\xe2\\x8d#\\xf2\\xc7\"\n ),\n \"content_disposition\": None,\n \"cache_control\": None,\n },\n },\n ],\n )\n\n\ndef test_find_missing(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n assert fs.find(\"data/roo\") == []\n\n\ndef test_glob(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n\n ## just the directory name\n assert fs.glob(\"data/root\") == [\"data/root\"]\n\n # top-level contents of a directory\n assert fs.glob(\"data/root/\") == [\n \"data/root/a\",\n \"data/root/a1\",\n \"data/root/b\",\n \"data/root/c\",\n \"data/root/d\",\n \"data/root/e+f\",\n \"data/root/rfile.txt\",\n ]\n assert fs.glob(\"data/root/*\") == [\n \"data/root/a\",\n \"data/root/a1\",\n \"data/root/b\",\n \"data/root/c\",\n \"data/root/d\",\n \"data/root/e+f\",\n \"data/root/rfile.txt\",\n ]\n\n assert fs.glob(\"data/root/b/*\") == [\"data/root/b/file.txt\"] # NOQA\n assert fs.glob(\"data/root/b/**\") == [\"data/root/b/file.txt\"] # NOQA\n\n ## across directories\n assert fs.glob(\"data/root/*/file.txt\") == [\n \"data/root/a/file.txt\",\n \"data/root/b/file.txt\",\n ]\n\n ## regex match\n assert fs.glob(\"data/root/*/file[0-9].txt\") == [\n \"data/root/a1/file1.txt\",\n \"data/root/c/file1.txt\",\n \"data/root/c/file2.txt\",\n \"data/root/e+f/file1.txt\",\n \"data/root/e+f/file2.txt\",\n ]\n\n ## text files\n assert fs.glob(\"data/root/*/file*.txt\") == [\n \"data/root/a/file.txt\",\n \"data/root/a1/file1.txt\",\n \"data/root/b/file.txt\",\n \"data/root/c/file1.txt\",\n \"data/root/c/file2.txt\",\n \"data/root/d/file_with_metadata.txt\",\n \"data/root/e+f/file1.txt\",\n \"data/root/e+f/file2.txt\",\n ]\n\n ## all text files\n assert fs.glob(\"data/**/*.txt\") == [\n \"data/root/a/file.txt\",\n \"data/root/a1/file1.txt\",\n \"data/root/b/file.txt\",\n \"data/root/c/file1.txt\",\n \"data/root/c/file2.txt\",\n \"data/root/d/file_with_metadata.txt\",\n \"data/root/e+f/file1.txt\",\n \"data/root/e+f/file2.txt\",\n \"data/root/rfile.txt\",\n ]\n\n ## all files\n assert fs.glob(\"data/root/**\") == [\n \"data/root/a\",\n \"data/root/a/file.txt\",\n \"data/root/a1\",\n \"data/root/a1/file1.txt\",\n \"data/root/b\",\n \"data/root/b/file.txt\",\n \"data/root/c\",\n \"data/root/c/file1.txt\",\n \"data/root/c/file2.txt\",\n \"data/root/d\",\n \"data/root/d/file_with_metadata.txt\",\n \"data/root/e+f\",\n \"data/root/e+f/file1.txt\",\n \"data/root/e+f/file2.txt\",\n \"data/root/rfile.txt\",\n ]\n assert fs.glob(\"data/roo**\") == [\n \"data/root\",\n \"data/root/a\",\n \"data/root/a/file.txt\",\n \"data/root/a1\",\n \"data/root/a1/file1.txt\",\n \"data/root/b\",\n \"data/root/b/file.txt\",\n \"data/root/c\",\n \"data/root/c/file1.txt\",\n \"data/root/c/file2.txt\",\n \"data/root/d\",\n \"data/root/d/file_with_metadata.txt\",\n \"data/root/e+f\",\n \"data/root/e+f/file1.txt\",\n \"data/root/e+f/file2.txt\",\n \"data/root/rfile.txt\",\n ]\n\n ## missing\n assert fs.glob(\"data/missing/*\") == []\n\n\ndef test_open_file(storage, mocker):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n\n f = fs.open(\"/data/root/a/file.txt\")\n\n result = f.read()\n assert result == b\"0123456789\"\n\n close = mocker.patch.object(f.container_client, \"close\")\n f.close()\n print(fs.ls(\"/data/root/a\"))\n\n close.assert_called_once()\n\n\n# def test_open_context_manager(storage, mocker):\n# \"\"\"\n# Memory profiling shows this is working, but its failing the test\n# Due to the behavior of the MagicMock. Needs to be fixed\n# \"\"\"\n# # \"test closing azure client with context manager\"\n# fs = AzureBlobFileSystem(\n# account_name=storage.account_name, connection_string=CONN_STR\n# )\n\n# with fs.open(\"/data/root/a/file.txt\") as f:\n# close = mocker.patch.object(f.container_client, \"close\")\n# result = f.read()\n# assert result == b\"0123456789\"\n# close.assert_called_once()\n\n\ndef test_rm(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n fs.rm(\"/data/root/a/file.txt\")\n\n with pytest.raises(FileNotFoundError):\n fs.ls(\"/data/root/a/file.txt\", refresh=True)\n\n\ndef test_rm_recursive(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n\n assert \"data/root/c\" in fs.ls(\"/data/root\")\n\n assert fs.ls(\"data/root/c\") == [\n \"data/root/c/file1.txt\",\n \"data/root/c/file2.txt\",\n ]\n fs.rm(\"data/root/c\", recursive=True)\n assert \"data/root/c\" not in fs.ls(\"/data/root\")\n\n with pytest.raises(FileNotFoundError):\n fs.ls(\"data/root/c\")\n\n\ndef test_mkdir(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR,\n )\n\n # Verify mkdir will create a new container when create_parents is True\n fs.mkdir(\"new-container\", create_parents=True)\n assert \"new-container\" in fs.ls(\".\")\n fs.rm(\"new-container\")\n\n # Verify a new container will not be created when create_parents\n # is False\n with pytest.raises(PermissionError):\n fs.mkdir(\"new-container\", create_parents=False)\n\n with pytest.raises(ValueError):\n fs.mkdir(\"bad_container_name\")\n\n # Test creating subdirectory when container does not exist\n # Since mkdir is a no-op, if create_parents=True, it will create\n # the top level container, but will NOT create nested directories\n fs.mkdir(\"new-container/dir\", create_parents=True)\n assert \"new-container/dir\" not in fs.ls(\"new-container\")\n assert \"new-container\" in fs.ls(\".\")\n fs.rm(\"new-container\", recursive=True)\n\n # Test that creating a directory when already exists passes\n fs.mkdir(\"data\")\n assert \"data\" in fs.ls(\".\")\n\n # Test raising error when container does not exist\n with pytest.raises(PermissionError):\n fs.mkdir(\"new-container/dir\", create_parents=False)\n\n\ndef test_makedir(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR,\n )\n\n # Verify makedir will create a new container when create_parents is True\n with pytest.raises(FileExistsError):\n fs.makedir(\"data\", exist_ok=False)\n\n # The container and directory already exist. Should pass\n fs.makedir(\"data\", exist_ok=True)\n assert \"data\" in fs.ls(\".\")\n\n # Test creating subdirectory when container does not exist. Again\n # Since makedir is a no-op, this can create the container, but not write nested directories\n fs.makedir(\"new-container/dir\")\n assert \"new-container/dir\" not in fs.ls(\"new-container\")\n assert \"new-container\" in fs.ls(\".\")\n fs.rm(\"new-container\", recursive=True)\n\n\ndef test_makedir_rmdir(storage, caplog):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR,\n )\n\n fs.makedir(\"new-container\")\n assert \"new-container\" in fs.ls(\"\")\n assert fs.ls(\"new-container\") == []\n\n with fs.open(path=\"new-container/file.txt\", mode=\"wb\") as f:\n f.write(b\"0123456789\")\n\n with fs.open(\"new-container/dir/file.txt\", \"wb\") as f:\n f.write(b\"0123456789\")\n\n with fs.open(\"new-container/dir/file2.txt\", \"wb\") as f:\n f.write(b\"0123456789\")\n\n # Verify that mkdir will raise an exception if the directory exists\n # and exist_ok is False\n with pytest.raises(FileExistsError):\n fs.makedir(\"new-container/dir/file.txt\", exist_ok=False)\n\n # mkdir should raise an error if the container exists and\n # we try to create a nested directory, with exist_ok=False\n with pytest.raises(FileExistsError):\n fs.makedir(\"new-container/dir2\", exist_ok=False)\n\n # Check that trying to overwrite an existing nested file in append mode works as expected\n # if exist_ok is True\n fs.makedir(\"new-container/dir/file2.txt\", exist_ok=True)\n assert \"new-container/dir/file2.txt\" in fs.ls(\"new-container/dir\")\n\n # Also verify you can make a nested directory structure\n with fs.open(\"new-container/dir2/file.txt\", \"wb\") as f:\n f.write(b\"0123456789\")\n assert \"new-container/dir2/file.txt\" in fs.ls(\"new-container/dir2\")\n fs.rm(\"new-container/dir2\", recursive=True)\n\n fs.rm(\"new-container/dir\", recursive=True)\n fs.touch(\"new-container/file2.txt\")\n assert fs.ls(\"new-container\") == [\n \"new-container/file.txt\",\n \"new-container/file2.txt\",\n ]\n\n fs.rm(\"new-container/file.txt\")\n fs.rm(\"new-container/file2.txt\")\n fs.rmdir(\"new-container\")\n\n assert \"new-container\" not in fs.ls(\"\")\n\n\n@pytest.mark.skip\ndef test_append_operation(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n fs.mkdir(\"append-container\")\n\n # Check that appending to an existing file works as expected\n with fs.open(\"append-container/append_file.txt\", \"ab\") as f:\n f.write(b\"0123456789\")\n with fs.open(\"append-container/append_file.txt\", \"ab\") as f:\n f.write(b\"0123456789\")\n with fs.open(\"new-container/dir/file2.txt\", \"rb\") as f:\n outfile = f.read()\n assert outfile == b\"01234567890123456789\"\n\n fs.rm(\"append-container\", recursive=True)\n\n\ndef test_mkdir_rm_recursive(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n\n fs.mkdir(\"test-mkdir-rm-recursive\")\n assert \"test-mkdir-rm-recursive\" in fs.ls(\"\")\n\n with fs.open(\"test-mkdir-rm-recursive/file.txt\", \"wb\") as f:\n f.write(b\"0123456789\")\n\n with fs.open(\"test-mkdir-rm-recursive/dir/file.txt\", \"wb\") as f:\n f.write(b\"ABCD\")\n\n with fs.open(\"test-mkdir-rm-recursive/dir/file2.txt\", \"wb\") as f:\n f.write(b\"abcdef\")\n\n assert fs.find(\"test-mkdir-rm-recursive\") == [\n \"test-mkdir-rm-recursive/dir/file.txt\",\n \"test-mkdir-rm-recursive/dir/file2.txt\",\n \"test-mkdir-rm-recursive/file.txt\",\n ]\n\n fs.rm(\"test-mkdir-rm-recursive\", recursive=True)\n\n assert \"test-mkdir-rm-recursive\" not in fs.ls(\"\")\n assert fs.find(\"test-mkdir-rm-recursive\") == []\n\n\ndef test_deep_paths(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n\n fs.mkdir(\"test-deep\")\n assert \"test-deep\" in fs.ls(\"\")\n\n with fs.open(\"test-deep/a/b/c/file.txt\", \"wb\") as f:\n f.write(b\"0123456789\")\n\n assert fs.ls(\"test-deep\") == [\"test-deep/a\"]\n assert fs.ls(\"test-deep/\") == [\"test-deep/a\"]\n assert fs.ls(\"test-deep/a\") == [\"test-deep/a/b\"]\n assert fs.ls(\"test-deep/a/\") == [\"test-deep/a/b\"]\n assert fs.find(\"test-deep\") == [\"test-deep/a/b/c/file.txt\"]\n assert fs.find(\"test-deep/\") == [\"test-deep/a/b/c/file.txt\"]\n assert fs.find(\"test-deep/a\") == [\"test-deep/a/b/c/file.txt\"]\n assert fs.find(\"test-deep/a/\") == [\"test-deep/a/b/c/file.txt\"]\n\n fs.rm(\"test-deep\", recursive=True)\n\n assert \"test-deep\" not in fs.ls(\"\")\n assert fs.find(\"test-deep\") == []\n\n\ndef test_large_blob(storage):\n import hashlib\n import io\n import shutil\n from pathlib import Path\n\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n\n # create a 20MB byte array, ensure it's larger than blocksizes to force a\n # chuncked upload\n blob_size = 120_000_000\n # blob_size = 2_684_354_560\n assert blob_size > fs.blocksize\n assert blob_size > AzureBlobFile.DEFAULT_BLOCK_SIZE\n\n data = b\"1\" * blob_size\n _hash = hashlib.md5(data)\n expected = _hash.hexdigest()\n\n # create container\n fs.mkdir(\"chunk-container\")\n\n # upload the data using fs.open\n path = \"chunk-container/large-blob.bin\"\n with fs.open(path, \"ab\") as dst:\n dst.write(data)\n\n assert fs.exists(path)\n assert fs.size(path) == blob_size\n\n del data\n\n # download with fs.open\n bio = io.BytesIO()\n with fs.open(path, \"rb\") as src:\n shutil.copyfileobj(src, bio)\n\n # read back the data and calculate md5\n bio.seek(0)\n data = bio.read()\n _hash = hashlib.md5(data)\n result = _hash.hexdigest()\n\n assert expected == result\n\n # do the same but using upload/download and a tempdir\n path = path = \"chunk-container/large_blob2.bin\"\n with tempfile.TemporaryDirectory() as td:\n local_blob: Path = Path(td) / \"large_blob2.bin\"\n with local_blob.open(\"wb\") as fo:\n fo.write(data)\n assert local_blob.exists()\n assert local_blob.stat().st_size == blob_size\n\n fs.upload(str(local_blob), path)\n assert fs.exists(path)\n assert fs.size(path) == blob_size\n\n # download now\n local_blob.unlink()\n fs.download(path, str(local_blob))\n assert local_blob.exists()\n assert local_blob.stat().st_size == blob_size\n\n\ndef test_dask_parquet(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n fs.mkdir(\"test\")\n STORAGE_OPTIONS = {\n \"account_name\": \"devstoreaccount1\",\n \"connection_string\": CONN_STR,\n }\n df = pd.DataFrame(\n {\n \"col1\": [1, 2, 3, 4],\n \"col2\": [2, 4, 6, 8],\n \"index_key\": [1, 1, 2, 2],\n \"partition_key\": [1, 1, 2, 2],\n }\n )\n\n dask_dataframe = dd.from_pandas(df, npartitions=1)\n for protocol in [\"abfs\", \"az\"]:\n dask_dataframe.to_parquet(\n \"{}://test/test_group.parquet\".format(protocol),\n storage_options=STORAGE_OPTIONS,\n engine=\"pyarrow\",\n )\n\n fs = AzureBlobFileSystem(**STORAGE_OPTIONS)\n assert fs.ls(\"test/test_group.parquet\") == [\n \"test/test_group.parquet/_common_metadata\",\n \"test/test_group.parquet/_metadata\",\n \"test/test_group.parquet/part.0.parquet\",\n ]\n fs.rm(\"test/test_group.parquet\")\n\n df_test = dd.read_parquet(\n \"abfs://test/test_group.parquet\",\n storage_options=STORAGE_OPTIONS,\n engine=\"pyarrow\",\n ).compute()\n assert_frame_equal(df, df_test)\n\n A = np.random.randint(0, 100, size=(10000, 4))\n df2 = pd.DataFrame(data=A, columns=list(\"ABCD\"))\n ddf2 = dd.from_pandas(df2, npartitions=4)\n dd.to_parquet(\n ddf2,\n \"abfs://test/test_group2.parquet\",\n storage_options=STORAGE_OPTIONS,\n engine=\"pyarrow\",\n )\n assert fs.ls(\"test/test_group2.parquet\") == [\n \"test/test_group2.parquet/_common_metadata\",\n \"test/test_group2.parquet/_metadata\",\n \"test/test_group2.parquet/part.0.parquet\",\n \"test/test_group2.parquet/part.1.parquet\",\n \"test/test_group2.parquet/part.2.parquet\",\n \"test/test_group2.parquet/part.3.parquet\",\n ]\n df2_test = dd.read_parquet(\n \"abfs://test/test_group2.parquet\",\n storage_options=STORAGE_OPTIONS,\n engine=\"pyarrow\",\n ).compute()\n assert_frame_equal(df2, df2_test)\n\n a = np.full(shape=(10000, 1), fill_value=1)\n b = np.full(shape=(10000, 1), fill_value=2)\n c = np.full(shape=(10000, 1), fill_value=3)\n d = np.full(shape=(10000, 1), fill_value=4)\n B = np.concatenate((a, b, c, d), axis=1)\n df3 = pd.DataFrame(data=B, columns=list(\"ABCD\"))\n ddf3 = dd.from_pandas(df3, npartitions=4)\n dd.to_parquet(\n ddf3,\n \"abfs://test/test_group3.parquet\",\n partition_on=[\"A\", \"B\"],\n storage_options=STORAGE_OPTIONS,\n engine=\"pyarrow\",\n )\n assert fs.glob(\"test/test_group3.parquet/*\") == [\n \"test/test_group3.parquet/A=1\",\n \"test/test_group3.parquet/_common_metadata\",\n \"test/test_group3.parquet/_metadata\",\n ]\n df3_test = dd.read_parquet(\n \"abfs://test/test_group3.parquet\",\n filters=[(\"A\", \"=\", 1)],\n storage_options=STORAGE_OPTIONS,\n engine=\"pyarrow\",\n ).compute()\n df3_test = df3_test[[\"A\", \"B\", \"C\", \"D\"]]\n df3_test = df3_test[[\"A\", \"B\", \"C\", \"D\"]].astype(int)\n assert_frame_equal(df3, df3_test)\n\n A = np.random.randint(0, 100, size=(10000, 4))\n df4 = pd.DataFrame(data=A, columns=list(\"ABCD\"))\n ddf4 = dd.from_pandas(df4, npartitions=4)\n dd.to_parquet(\n ddf4,\n \"abfs://test/test_group4.parquet\",\n storage_options=STORAGE_OPTIONS,\n engine=\"pyarrow\",\n flavor=\"spark\",\n write_statistics=False,\n )\n fs.rmdir(\"test/test_group4.parquet/_common_metadata\", recursive=True)\n fs.rmdir(\"test/test_group4.parquet/_metadata\", recursive=True)\n fs.rm(\"test/test_group4.parquet/_common_metadata\")\n fs.rm(\"test/test_group4.parquet/_metadata\")\n assert fs.ls(\"test/test_group4.parquet\") == [\n \"test/test_group4.parquet/part.0.parquet\",\n \"test/test_group4.parquet/part.1.parquet\",\n \"test/test_group4.parquet/part.2.parquet\",\n \"test/test_group4.parquet/part.3.parquet\",\n ]\n df4_test = dd.read_parquet(\n \"abfs://test/test_group4.parquet\",\n storage_options=STORAGE_OPTIONS,\n engine=\"pyarrow\",\n ).compute()\n assert_frame_equal(df4, df4_test)\n\n A = np.random.randint(0, 100, size=(10000, 4))\n df5 = pd.DataFrame(data=A, columns=list(\"ABCD\"))\n ddf5 = dd.from_pandas(df5, npartitions=4)\n dd.to_parquet(\n ddf5,\n \"abfs://test/test group5.parquet\",\n storage_options=STORAGE_OPTIONS,\n engine=\"pyarrow\",\n )\n assert fs.ls(\"test/test group5.parquet\") == [\n \"test/test group5.parquet/_common_metadata\",\n \"test/test group5.parquet/_metadata\",\n \"test/test group5.parquet/part.0.parquet\",\n \"test/test group5.parquet/part.1.parquet\",\n \"test/test group5.parquet/part.2.parquet\",\n \"test/test group5.parquet/part.3.parquet\",\n ]\n df5_test = dd.read_parquet(\n \"abfs://test/test group5.parquet\",\n storage_options=STORAGE_OPTIONS,\n engine=\"pyarrow\",\n ).compute()\n assert_frame_equal(df5, df5_test)\n\n\ndef test_metadata_write(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n fs.mkdir(\"test-metadata-write\")\n data = b\"0123456789\"\n metadata = {\"meta\": \"data\"}\n\n # standard blob type\n with fs.open(\"test-metadata-write/file.txt\", \"wb\", metadata=metadata) as f:\n f.write(data)\n info = fs.info(\"test-metadata-write/file.txt\")\n assert info[\"metadata\"] == metadata\n metadata_changed_on_write = {\"meta\": \"datum\"}\n with fs.open(\n \"test-metadata-write/file.txt\", \"wb\", metadata=metadata_changed_on_write\n ) as f:\n f.write(data)\n info = fs.info(\"test-metadata-write/file.txt\")\n assert info[\"metadata\"] == metadata_changed_on_write\n\n # append blob type\n new_metadata = {\"data\": \"meta\"}\n with fs.open(\"test-metadata-write/append-file.txt\", \"ab\", metadata=metadata) as f:\n f.write(data)\n\n # try change metadata on block appending\n with fs.open(\n \"test-metadata-write/append-file.txt\", \"ab\", metadata=new_metadata\n ) as f:\n f.write(data)\n info = fs.info(\"test-metadata-write/append-file.txt\")\n\n # azure blob client doesn't seem to support metadata mutation when appending blocks\n # lets be sure this behavior doesn't change as this would imply\n # a potential breaking change\n assert info[\"metadata\"] == metadata\n\n # getxattr / setxattr\n assert fs.getxattr(\"test-metadata-write/file.txt\", \"meta\") == \"datum\"\n fs.setxattrs(\"test-metadata-write/file.txt\", metadata=\"data2\")\n assert fs.getxattr(\"test-metadata-write/file.txt\", \"metadata\") == \"data2\"\n assert fs.info(\"test-metadata-write/file.txt\")[\"metadata\"] == {\"metadata\": \"data2\"}\n\n # empty file and nested directory\n with fs.open(\n \"test-metadata-write/a/b/c/nested-file.txt\", \"wb\", metadata=metadata\n ) as f:\n f.write(b\"\")\n assert fs.getxattr(\"test-metadata-write/a/b/c/nested-file.txt\", \"meta\") == \"data\"\n fs.setxattrs(\"test-metadata-write/a/b/c/nested-file.txt\", metadata=\"data2\")\n assert fs.info(\"test-metadata-write/a/b/c/nested-file.txt\")[\"metadata\"] == {\n \"metadata\": \"data2\"\n }\n fs.rmdir(\"test-metadata-write\")\n\n\ndef test_put_file(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n lfs = LocalFileSystem()\n\n fs.mkdir(\"putdir\")\n\n # Check that put on an empty file works\n with open(\"sample.txt\", \"wb\") as f:\n f.write(b\"\")\n fs.put(\"sample.txt\", \"putdir/sample.txt\")\n fs.get(\"putdir/sample.txt\", \"sample2.txt\")\n\n with open(\"sample.txt\", \"rb\") as f:\n f1 = f.read()\n with open(\"sample2.txt\", \"rb\") as f:\n f2 = f.read()\n assert f1 == f2\n\n lfs.rm(\"sample.txt\")\n lfs.rm(\"sample2.txt\")\n\n # Check that put on a file with data works\n with tempfile.NamedTemporaryFile(\"wb\") as f:\n f.write(b\"01234567890\")\n\n fs.put(f.name, \"putdir/sample3.txt\")\n with open(f.name, \"rb\") as g:\n f3 = g.read()\n\n with tempfile.TemporaryDirectory() as td:\n dst = os.path.join(td, \"sample4.txt\")\n fs.get(\"putdir/sample3.txt\", dst)\n\n with open(dst, \"rb\") as f:\n f4 = f.read()\n\n assert f3 == f4\n fs.rm(\"putdir\", recursive=True)\n\n\ndef test_isdir(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n fs.touch(\"data/root/a/file.txt\")\n assert fs.isdir(\"data\") is True\n assert fs.isdir(\"data/top_file.txt\") is False\n assert fs.isdir(\"data/root\") is True\n assert fs.isdir(\"data/root/\") is True\n assert fs.isdir(\"data/root/rfile.txt\") is False\n assert fs.isdir(\"data/root/a\") is True\n assert fs.isdir(\"data/root/a/\") is True\n\n\ndef test_isfile(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n\n assert fs.isfile(\"data\") is False\n assert fs.isfile(\"data/top_file.txt\") is True\n assert fs.isfile(\"data/root\") is False\n assert fs.isfile(\"data/root/\") is False\n assert fs.isfile(\"data/root/rfile.txt\") is True\n fs.touch(\"data/root/null_file.txt\")\n assert fs.isfile(\"data/root/null_file.txt\") is True\n\n\ndef test_isdir(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n fs.touch(\"data/root/a/file.txt\")\n assert fs.isdir(\"data\") is True\n assert fs.isdir(\"data/top_file.txt\") is False\n assert fs.isdir(\"data/root\") is True\n assert fs.isdir(\"data/root/\") is True\n assert fs.isdir(\"data/root/rfile.txt\") is False\n assert fs.isdir(\"data/root/a\") is True\n assert fs.isdir(\"data/root/a/\") is True\n\n\ndef test_isdir_cache(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n print(\"checking isdir cache\")\n files = fs.ls(\"data/root\") # noqa: F841\n assert fs.isdir(\"data/root/a\") is True\n assert fs.isdir(\"data/root/a/\") is True\n assert fs.isdir(\"data/root/rfile.txt\") is False\n\n\ndef test_cat(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n fs.mkdir(\"catdir\")\n data = b\"0123456789\"\n with fs.open(\"catdir/catfile.txt\", \"wb\") as f:\n f.write(data)\n result = fs.cat(\"catdir/catfile.txt\")\n assert result == data\n fs.rm(\"catdir/catfile.txt\")\n\n\ndef test_cat_file(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n fs.mkdir(\"catdir\")\n data = b\"0123456789\"\n with fs.open(\"catdir/catfile.txt\", \"wb\") as f:\n f.write(data)\n\n result = fs.cat_file(\"catdir/catfile.txt\", start=1, end=2)\n assert result == b\"1\"\n\n result = fs.cat_file(\"catdir/catfile.txt\", start=8)\n assert result == b\"89\"\n\n result = fs.cat_file(\"catdir/catfile.txt\", end=2)\n assert result == b\"01\"\n\n result = fs.cat_file(\"abfs://catdir/catfile.txt\")\n assert result == data\n fs.rm(\"catdir/catfile.txt\")\n\n\ndef test_cat_file_missing(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n fs.mkdir(\"catdir\")\n with pytest.raises(FileNotFoundError):\n fs.cat_file(\"catdir/not/exist\")\n\n with pytest.raises(FileNotFoundError):\n fs.cat_file(\"does/not/exist\")\n\n\ndef test_url(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR, account_key=KEY\n )\n fs.mkdir(\"catdir\")\n data = b\"0123456789\"\n with fs.open(\"catdir/catfile.txt\", \"wb\") as f:\n f.write(data)\n\n import requests\n\n r = requests.get(fs.url(\"catdir/catfile.txt\"))\n assert r.status_code == 200\n assert r.content == data\n\n fs.rm(\"catdir/catfile.txt\")\n\n\ndef test_cp_file(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n fs.mkdir(\"homedir\")\n fs.mkdir(\"homedir/enddir\")\n fs.touch(\"homedir/startdir/test_file.txt\")\n fs.cp_file(\"homedir/startdir/test_file.txt\", \"homedir/enddir/test_file.txt\")\n files = fs.ls(\"homedir/enddir\")\n assert \"homedir/enddir/test_file.txt\" in files\n\n fs.rm(\"homedir\", recursive=True)\n\n\ndef test_exists(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n\n assert fs.exists(\"data/top_file.txt\")\n assert fs.exists(\"data\")\n assert fs.exists(\"data/\")\n assert not fs.exists(\"non-existent-container\")\n assert not fs.exists(\"non-existent-container/\")\n assert fs.exists(\"\")\n assert not fs.exists(\"data/not-a-key\")\n\n\ndef test_exists_directory(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n\n fs.mkdir(\"temp-exists\")\n fs.touch(\"temp-exists/data/data.txt\")\n fs.touch(\"temp-exists/data/something/data.txt\")\n fs.invalidate_cache()\n\n assert fs.exists(\"temp-exists/data/something/\")\n assert fs.exists(\"temp-exists/data/something\")\n assert fs.exists(\"temp-exists/data/\")\n assert fs.exists(\"temp-exists/data\")\n assert fs.exists(\"temp-exists/\")\n assert fs.exists(\"temp-exists\")\n\n\ndef test_find_with_prefix(storage):\n fs = AzureBlobFileSystem(\n account_name=storage.account_name, connection_string=CONN_STR\n )\n test_bucket_name = \"data\"\n\n for cursor in range(25):\n fs.touch(test_bucket_name + f\"/prefixes/test_{cursor}\")\n\n fs.touch(test_bucket_name + \"/prefixes2\")\n assert len(fs.find(test_bucket_name + \"/prefixes\")) == 25\n assert len(fs.find(test_bucket_name, prefix=\"prefixes\")) == 26\n\n assert len(fs.find(test_bucket_name + \"/prefixes/test_\")) == 0\n assert len(fs.find(test_bucket_name + \"/prefixes\", prefix=\"test_\")) == 25\n assert len(fs.find(test_bucket_name + \"/prefixes/\", prefix=\"test_\")) == 25\n\n test_1s = fs.find(test_bucket_name + \"/prefixes/test_1\")\n assert len(test_1s) == 1\n assert test_1s[0] == test_bucket_name + \"/prefixes/test_1\"\n\n test_1s = fs.find(test_bucket_name + \"/prefixes/\", prefix=\"test_1\")\n assert len(test_1s) == 11\n assert test_1s == [test_bucket_name + \"/prefixes/test_1\"] + [\n test_bucket_name + f\"/prefixes/test_{cursor}\" for cursor in range(10, 20)\n ]\n","sub_path":"adlfs/tests/test_spec.py","file_name":"test_spec.py","file_ext":"py","file_size_in_byte":42939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"180798760","text":"# 파라메트릭 서치\ndef remove_rock(rocks, mid, n):\n prev = 0\n remove = 0\n\n for r in rocks:\n if r - prev > mid:\n prev = r\n else: # mid 보다 작은 값이 있다면 제거해야 할 돌 갯수 증가\n remove += 1\n if remove > n: # 정해진 돌보다 많이 제거해야 한다면\n return True\n else:\n return False\n\n\ndef solution(distance, rocks, n):\n low = 0\n high = distance\n rocks.sort()\n while low <= high:\n mid = (low + high) >> 1 # mid => 정답값 예측\n if remove_rock(rocks, mid, n):\n high = mid - 1 # mid 값이 높은 것\n else:\n low = mid + 1 # mid 값이 낮은 것\n return low\n\nprint(solution(25, [2, 14, 11, 21, 17], 2))","sub_path":"Programmers/Lv4/Lv4_징검다리.py","file_name":"Lv4_징검다리.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"436635003","text":"from __future__ import with_statement\nimport time\nimport tempfile\n\nimport mock\n\nfrom twisted.trial import unittest\n\nimport buildbotcustom.misc\nfrom buildbotcustom.misc import _nextSlowIdleSlave, _nextL10nSlave,\\\n _nextFastSlave, _nextFastReservedSlave, _nextSlowSlave,\\\n setReservedFileName\n\nclass TestNextSlaveFuncs(unittest.TestCase):\n def setUp(self):\n # Reset these each time\n buildbotcustom.misc.fastRegexes = ['fast']\n buildbotcustom.misc.nReservedFastSlaves = 0\n buildbotcustom.misc.nReservedSlowSlaves = 0\n\n # Prevent looking for reserved slaves file\n buildbotcustom.misc._checkedReservedSlaveFile = time.time()\n\n self.slaves = slaves = []\n for name in ('fast1', 'fast2', 'fast3', 'slow1', 'slow2', 'slow3'):\n slave = mock.Mock()\n slave.slave.slavename = name\n slaves.append(slave)\n self.slow_slaves = [s for s in self.slaves if \"slow\" in s.slave.slavename]\n self.fast_slaves = [s for s in self.slaves if \"fast\" in s.slave.slavename]\n\n self.builder = builder = mock.Mock()\n builder.builder_status.buildCache.keys.return_value = []\n builder.slaves = self.slaves\n\n def test_nextFastSlave_AllAvail(self):\n \"\"\"Test that _nextFastSlave and _nextFastReservedSlave return a fast\n slave when all slaves are available.\"\"\"\n for func in _nextFastReservedSlave, _nextFastSlave:\n slave = func(self.builder, self.slaves, only_fast=True)\n self.assert_(slave.slave.slavename.startswith(\"fast\"))\n\n def test_nextFastSlave_OnlySlowAvail(self):\n \"\"\"Test that _nextFastSlave and _nextFastReservedSlave return None\n slave when only slow slaves are available, and only_fast is True.\"\"\"\n for func in _nextFastReservedSlave, _nextFastSlave:\n slave = func(self.builder, self.slow_slaves, only_fast=True)\n self.assert_(slave is None)\n\n def test_nextFastSlave_OnlySlowAvail_notOnlyFast(self):\n \"\"\"Test that _nextFastSlave and _nextFastReservedSlave return a slow\n slave when only slow slaves are available and only_fast is False.\"\"\"\n for func in _nextFastReservedSlave, _nextFastSlave:\n slave = func(self.builder, self.slow_slaves, only_fast=False)\n self.assert_(slave.slave.slavename.startswith(\"slow\"))\n\n def test_nextFastReservedSlave_reserved(self):\n \"\"\"Test that _nextFastReservedSlave returns a fast slave if there's one\n reserved.\"\"\"\n buildbotcustom.misc.nReservedFastSlaves = 1\n\n # Only one fast slave available\n available_slaves = [s for s in self.slaves if s.slave.slavename == 'fast2']\n slave = _nextFastReservedSlave(self.builder, available_slaves)\n self.assert_(slave.slave.slavename == \"fast2\")\n\n def test_nextFastSlave_reserved(self):\n \"\"\"Test that _nextFastSlave returns None if there's one slave\n reserved.\"\"\"\n buildbotcustom.misc.nReservedFastSlaves = 1\n\n # Only one fast slave available\n available_slaves = [s for s in self.slaves if s.slave.slavename == 'fast2']\n slave = _nextFastSlave(self.builder, available_slaves)\n self.assert_(slave is None)\n\n def test_nextFastSlave_allslow(self):\n \"\"\"Test that _nextFastSlave works if the builder is configured with\n just slow slaves. This handles the case for platforms that don't have a\n fast/slow distinction.\"\"\"\n buildbotcustom.misc.nReservedFastSlaves = 1\n self.builder.slaves = self.slow_slaves\n\n slave = _nextFastSlave(self.builder, self.slow_slaves, only_fast=True)\n self.assert_(slave.slavename.startswith('slow'))\n\n def test_nextSlowSlave(self):\n \"\"\"Test that _nextSlowSlave returns a slow slave if one is available.\"\"\"\n slave = _nextSlowSlave(self.builder, self.slaves)\n self.assert_(slave.slave.slavename.startswith(\"slow\"))\n\n def test_nextSlowSlave_OnlyFastAvail(self):\n \"\"\"Test that _nextSlowSlave returns a fast slave if no slow slaves are\n available.\"\"\"\n slave = _nextSlowSlave(self.builder, self.fast_slaves)\n self.assert_(slave.slave.slavename.startswith(\"fast\"))\n\n def test_nextSlowIdleSlave_avail(self):\n \"\"\"Test that _nextSlowIdleSlave returns a slow slave if enough slow\n slaves are available.\"\"\"\n func = _nextSlowIdleSlave(1)\n slave = func(self.builder, self.slaves)\n self.assert_(slave.slave.slavename.startswith(\"slow\"))\n\n def test_nextSlowIdleSlave_unavail(self):\n \"\"\"Test that _nextSlowIdleSlave returns None if not enough slow\n slaves are available.\"\"\"\n func = _nextSlowIdleSlave(5)\n slave = func(self.builder, self.slaves)\n self.assert_(slave is None)\n\n def test_nextL10nSlave_avail(self):\n \"\"\"Test that _nextL10nSlave returns a slow slave if the first slow\n slave is available.\"\"\"\n func = _nextL10nSlave(1)\n slave = func(self.builder, self.slaves)\n self.assert_(slave.slave.slavename == 'slow1')\n\n def test_nextL10nSlave_unavail(self):\n \"\"\"Test that _nextL10nSlave returns None if the first slow slave is not\n available.\"\"\"\n func = _nextL10nSlave(1)\n available_slaves = [s for s in self.slaves if s.slave.slavename != 'slow1']\n slave = func(self.builder, available_slaves)\n self.assert_(slave is None)\n\n def test_update_reserved(self):\n \"\"\"Test that updates to the reserved file are obeyed, and that calls to\n the _nextFast functions pick it up.\"\"\"\n reservedFile = tempfile.NamedTemporaryFile()\n buildbotcustom.misc._checkedReservedSlaveFile = 0\n # Need to fake out time.time\n with mock.patch.object(time, 'time') as time_method:\n setReservedFileName(reservedFile.name)\n time_method.return_value = 0\n self.assertEquals(buildbotcustom.misc.nReservedFastSlaves, 0)\n\n # Only one fast slave available, but none are reserved yet\n available_slaves = [s for s in self.slaves if s.slave.slavename == 'fast2']\n slave = _nextFastSlave(self.builder, available_slaves)\n self.assert_(slave.slave.slavename == 'fast2')\n\n # Reserve 1 slave\n reservedFile.write('1')\n reservedFile.flush()\n time_method.return_value = 61\n\n # Only one fast slave available, but 1 is reserved\n available_slaves = [s for s in self.slaves if s.slave.slavename == 'fast2']\n\n # Check that the regular function doesn't get it\n slave = _nextFastSlave(self.builder, available_slaves, only_fast=True)\n self.assertEquals(buildbotcustom.misc.nReservedFastSlaves, 1)\n self.assert_(slave is None)\n\n # But our reserved function now does\n slave = _nextFastReservedSlave(self.builder, available_slaves, only_fast=True)\n self.assert_(slave.slave.slavename == 'fast2')\n\n def test_update_reserved_blank(self):\n \"\"\"Test that updates to the reserved file are obeyed, and that calls to\n the _nextFast functions pick it up.\"\"\"\n reservedFile = tempfile.NamedTemporaryFile()\n reservedFile.write('5')\n reservedFile.flush()\n buildbotcustom.misc._checkedReservedSlaveFile = 0\n # Need to fake out time.time\n with mock.patch.object(time, 'time') as time_method:\n setReservedFileName(reservedFile.name)\n time_method.return_value = 61\n self.assertEquals(buildbotcustom.misc.nReservedFastSlaves, 0)\n\n # Only one fast slave available, but all are reserved yet\n available_slaves = [s for s in self.slaves if s.slave.slavename == 'fast2']\n slave = _nextFastSlave(self.builder, available_slaves)\n self.assert_(slave is None)\n self.assertEquals(buildbotcustom.misc.nReservedFastSlaves, 5)\n\n # Empty out reserved slaves file\n reservedFile.seek(0)\n reservedFile.write('')\n reservedFile.truncate()\n reservedFile.flush()\n time_method.return_value = buildbotcustom.misc._checkedReservedSlaveFile + 61\n\n # Only one fast slave available, but none are reserved\n available_slaves = [s for s in self.slaves if s.slave.slavename == 'fast2']\n\n # Check that the regular function gets it\n slave = _nextFastSlave(self.builder, available_slaves, only_fast=True)\n self.assertEquals(buildbotcustom.misc.nReservedFastSlaves, 0)\n self.assert_(slave.slave.slavename == 'fast2')\n","sub_path":"test/test_misc_nextslaves.py","file_name":"test_misc_nextslaves.py","file_ext":"py","file_size_in_byte":8647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"418595571","text":"'''\n\nThis is on the latest version of camb (1.0.1)\n\n-- several features from the old one aren't working: check https://camb.readthedocs.io/en/latest/CAMBdemo.html\n\n\n\nhttps://www.codecogs.com/latex/eqneditor.php\n\n\\Omega_m h^2 : [ 0.12, 0.155] \\\\\n\\Omega_b h^2 : [ 0.0215, 0.0235] \\\\\n\\sigma_8 : [0.7, 0.9] \\\\\nn_s : [0.85, 1.05] \\\\\nh : [0.55, 0.85] \\\\\n\\tau : [0.01, 0.8] \\\\\n\nN_{eff} : [1, 5] \\\\\n\\sum m_\\nu : [0, 3] \\\\\nr_{0.05}: [0, 2] \\\\\n\n\\omega_0 : [-1.3, -0.7] \\\\\n\\omega_a : [-1.73. 1.28] \\\\\n\\omega_\\nu : [0.0, 0.01]\n\n\n\n'''\n\n#################### LHC ################\n\n\n''' Latin hypercube design\nhttps://pythonhosted.org/pyDOE/randomized.html\n\n# /home/nes/MEGA/Google_drive/KU courses/Spring2017/SAMSI_May/AllV/latin.py\n\n'''\n\nimport numpy as np\nimport pyDOE as pyDOE\n\nfrom scipy.stats.distributions import norm\n\n\ndef rescale01(xmin, xmax, f):\n return (f - xmin) / (xmax - xmin)\n\n\n# import SetPub\n# SetPub.set_pub()\n\ntotalFiles = 2 #1024\nnum_para = 10\n\nnp.random.seed(17)\n\nPlotAll = False\nSaveCls = False\n###### NEED TO RECHECK THESE VALUES OMEGAM ~ 0.112\n\nOmegaM = np.linspace(0.10, 0.140, totalFiles)\nOmegab = np.linspace(0.0205, 0.0235, totalFiles)\nsigma8 = np.linspace(0.7, 0.9, totalFiles)\nh = np.linspace(0.55, 0.85, totalFiles)\nns = np.linspace(0.85, 1.05, totalFiles)\nOmega0 = np.linspace(-1.3, -0.7, totalFiles)\nOmegaA = np.linspace(-1.5, 1.0, totalFiles)\ntau = np.linspace(0.01, 0.6, totalFiles)\nmnu = np.linspace(0, 3, totalFiles)\nneff = np.linspace(1.5, 3.5, totalFiles) \n\n# OmegaA = np.linspace(-1.73, 1.28, totalFiles)\n# tau = np.linspace(0.01, 0.8, totalFiles)\n\n#################################################\n#################################################\n\nAllLabels = [r'$\\tilde{\\Omega}_m$', r'$\\tilde{\\Omega}_b$', r'$\\tilde{\\sigma}_8$', r'$\\tilde{h}$',\n r'$\\tilde{n}_s$', r'$\\tilde{\\Omega}_0$', r'$\\tilde{\\Omega}_a$', r'$\\tilde{\\tau}$',\n r'$\\sum m_\\nu$', r'$N_{eff}$']\n\nAllPara = np.vstack([OmegaM, Omegab, sigma8, h, ns, Omega0, OmegaA, tau, mnu, neff])\nprint(AllPara)\n\nlhd = pyDOE.lhs(num_para, samples=totalFiles, criterion=None) # c cm corr m\nprint(lhd)\n\n##\nif PlotAll:\n\timport matplotlib.pylab as plt\n\tf, a = plt.subplots(num_para, num_para, sharex=True, sharey=True)\n\tplt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)\n\tplt.rcParams.update({'font.size': 8})\n\n\tfor i in range(num_para):\n\t\tfor j in range(i+1):\n\t\t\tprint(i,j)\n\t\t\tif(i!=j):\n\t\t \t\ta[i, j].scatter(lhd[:, i], lhd[:, j], s=5)\n\t\t \t\ta[i, j].grid(True)\n\t\t\telse:\n\t\t \t\ta[i, i].text(0.4, 0.4, AllLabels[i], size = 'xx-large')\n\t\t \t\thist, bin_edges = np.histogram(lhd[:,i], density=True, bins=64)\n\t\t \t\ta[i,i].bar(bin_edges[:-1], hist/hist.max(), width=0.2)\n\t\t \t\tplt.xlim(0,1)\n\t\t \t\tplt.ylim(0,1)\n\n\n\tplt.savefig('../Cl_data/Plots/ExtendedPlots/ExtendedLatinSq.png', figsize=(10, 10))\n\tplt.show()\n\n\nidx = (lhd * totalFiles).astype(int)\n\nAllCombinations = np.zeros((totalFiles, num_para))\nfor i in range(num_para):\n AllCombinations[:, i] = AllPara[i][idx[:, i]]\n\nnp.savetxt('../Cl_data/Data/ExtendedLatinCosmoP5'+str(totalFiles)+'.txt', AllCombinations) #### no\n# saving files because the its random everytime\n\n\nAllCombinations = AllPara.T\nnp.savetxt('../Cl_data/Data/GridCosmoP5'+str(totalFiles)+'.txt', AllCombinations)\n\nprint(AllCombinations)\n############################## CAMB ###############################\n\nimport numpy as np\nimport camb\nimport itertools\nfrom camb import model, initialpower\n\nimport time\ntime0 = time.time()\n\n\"\"\"\nfirst 2 outputs from CAMB - totCL and unlensed CL both are 0's. \nCAMBFast maybe better?\nCosmoMC works well with CAMB\nhttp://camb.readthedocs.io/en/latest/CAMBdemo.html\nhttps://wiki.cosmos.esa.int/planckpla2015/index.php/CMB_spectrum_%26_Likelihood_Code\n\"\"\"\n\nlmax0 = 12000 ## something off above 8250\nell_max = 10000\nmax_k = 20000 # 2xell_max is good\n#lmax0 = 3000 ## something off above 8250 -- sorted now\n# model.lmax_lensed.value = 8250 by default\n#ell_max = 2500\n\n\n\npara5 = np.loadtxt('../Cl_data/Data/ExtendedLatinCosmoP5'+str(totalFiles)+'.txt')\n\nif PlotAll:\n\n\tf, a = plt.subplots(num_para, num_para, sharex=True, sharey=True)\n\tplt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)\n\tplt.rcParams.update({'font.size': 8})\n\n\n\tfor i in range(num_para):\n\t\tfor j in range(i+1):\n\t\t\t#print(i,j)\n\t\t\tif(i!=j):\n\t\t\t\ta[i, j].scatter(para5[:, i], para5[:, j], s=10)\n\t\t\t\ta[i, j].grid(True)\n\t\t\telse:\n\t\t\t\ta[i, i].text(0.4, 0.4, AllLabels[i], size = 'xx-large')\n\t\t\t\thist, bin_edges = np.histogram(para5[:,i], density=True, bins=64)\n\t\t\t\ta[i,i].bar(bin_edges[:-1], hist/hist.max(), width=0.2)\n\tplt.show()\n\n#\n#Set up a new set of parameters for CAMB\n#\n# Get CMB power spectra, as requested by the spectra argument. All power spectra are l(l+1)C_l/2pi\n# self owned numpy arrays (0..lmax, 0..3), where 0..3 index are TT, EE, BB TT,\n# unless raw_cl is True in which case return just C_l.\n# For the lens_potential the power spectrum returned is that of the deflection.\n\n#----------- for sigma_8---------------\n\n#Now get matter power spectra and sigma8 at redshift 0 and 0.8\n# pars = camb.CAMBparams()\n# pars.set_cosmology(H0=67.5, ombh2=0.022, omch2=0.122)\n# pars.set_dark_energy() #re-set defaults\n# pars.InitPower.set_params(ns=0.965)\n#Not non-linear corrections couples to smaller scales than you want\n# pars.set_matter_power(redshifts=[0.], kmax=2.0)\n#\n#Linear spectra\n# pars.NonLinear = model.NonLinear_none\n# results = camb.get_results(pars)\n# kh, z, pk = results.get_matter_power_spectrum(minkh=1e-4, maxkh=1, npoints = 200)\n# s8 = np.array(results.get_sigma8())\n#\n# #Non-Linear spectra (Halofit)\n# pars.NonLinear = model.NonLinear_both\n# results.calc_power_spectra(pars)\n# kh_nonlin, z_nonlin, pk_nonlin = results.get_matter_power_spectrum(minkh=1e-4, maxkh=1, npoints = 200)\n#\n# print(results.get_sigma8())\n\n\n# AllLabels = [r'$\\tilde{\\Omega}_m$', r'$\\tilde{\\Omega}_b$', r'$\\tilde{\\sigma}_8$', r'$\\tilde{\n# h}$', r'$\\tilde{n}_s$']\n\n\n#---------------------------------------\nAllTT = np.zeros(shape=(totalFiles, num_para + ell_max + 1) ) # TT\nAllEE = np.zeros(shape=(totalFiles, num_para + ell_max + 1) ) #\nAllBB = np.zeros(shape=(totalFiles, num_para + ell_max + 1) )\nAllTE = np.zeros(shape=(totalFiles, num_para + ell_max + 1) ) # Check if this is actually TE --\n# negative\n# values and CAMB documentation incorrect.\n\n\nnewErr = 'STOP SIGINT1: Integration timed out'\n\nfor i in range(totalFiles):\n\tprint(i, para5[i])\n\n\t# Set up a new set of parameters for CAMB\n\t# pars = camb.CAMBparams()\n\t# camb.set_halofit_version('takahashi') ########## 1.0.1 ISSUE\n\t# This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency\n\t# pars.set_cosmology(H0=67.05, ombh2=0.02225, omch2=0.1198,\n\t# tau=0.079, num_massive_neutrinos=0, mnu=0.0,\n\t# standard_neutrino_neff=3.046)\n\t# pars.InitPower.set_params(As=2.2065e-9, ns=0.9645)\n\t# pars.set_for_lmax(ell_max, max_eta_k=12000, lens_potential_accuracy=4);\n\t# pars.set_accuracy(AccuracyBoost=3, lAccuracyBoost=3, lSampleBoost=3, DoLateRadTruncation=False)\n\t# pars.AccuratePolarization = True\n\t# pars.AccurateReionization = True\n\t# pars.YHe = 0.24\n\t# # pars.omegan = 0.0006445\n\t# pars.omegak = 0.\n\t# pars.set_nonlinear_lensing(True)\n\n\n\t####################################################################################################\n\t####################################################################################################\n\n\ttry: \n\n\t\tprint(20*'=+')\n\t\tpars = camb.CAMBparams()\n\n\t\t####### Adding neutrinos, DE etc #########\n\t\tpars.set_cosmology(H0=100*para5[i, 3], ombh2=para5[i, 1], omch2=para5[i, 0], mnu=para5[i, 8],\n\t\t\t omk=0, tau=para5[i, 7], standard_neutrino_neff=para5[i, 9])\n\n\t\t## https://camb.readthedocs.io/en/latest/model.html#camb.model.CAMBparams.set_cosmology\n\n\t\t##### \"mnu --sum of neutrino masses (in eV, Omega_nu is calculated approximately from this\n\t\t### assuming neutrinos non-relativistic today). Set the field values directly if you need\n\t\t### finer control or more complex models.\" ######\n\n\t\tpars.InitPower.set_params(ns=para5[i, 4], r=0)\n\n\t\t######### DARK ENERGY #############\n\n\t\t# The dark energy model can be changed as in the previous example, or by assigning to pars.DarkEnergy.\n\t\t# e.g. use the PPF model\n\t\tfrom camb.dark_energy import DarkEnergyPPF, DarkEnergyFluid\n\n\t\t# pars.DarkEnergy = DarkEnergyPPF(w=-1.2, wa=0.2)\n\t\tpars.DarkEnergy = DarkEnergyPPF(w=para5[i, 5], wa=para5[i, 6])\n\t\tprint('w, wa model parameters:\\n\\n', pars.DarkEnergy)\n\t\t# results = camb.get_background(pars)\n\n\t\t# or can also use a w(a) numerical function\n\t\t# (note this will slow things down; make your own dark energy class in fortran for best performance)\n\t\t# a = np.logspace(-5, 0, 1000)\n\t\t# w = -1.2 + 0.2 * (1 - a)\n\t\t# pars.DarkEnergy = DarkEnergyPPF()\n\t\t# pars.DarkEnergy.set_w_a_table(a, w)\n\t\t# print('Table-interpolated parameters (w and wa are set to estimated values at 0):\\n\\n'\n\t\t# , pars.DarkEnergy)\n\t\t# results2 = camb.get_background(pars)\n\t\t#\n\t\t# rho, _ = results.get_dark_energy_rho_w(a)\n\t\t# rho2, _ = results2.get_dark_energy_rho_w(a)\n\t\t# plt.plot(a, rho, color='k')\n\t\t# plt.plot(a, rho2, color='r', ls='--')\n\t\t# plt.ylabel(r'$\\rho/\\rho_0$')\n\t\t# plt.xlabel('$a$')\n\t\t# plt.xlim(0, 1)\n\t\t# plt.title('Dark enery density');\n\n\t\t###################################\n\n\n\t\t# pars.set_for_lmax(lmax= lmax0, max_eta_k=None, k_eta_fac=12.5 , lens_potential_accuracy=1,\n\t\t# lens_k_eta_reference = 20000)\n\n\t\tpars.set_for_lmax(lmax = lmax0, max_eta_k=max_k, lens_potential_accuracy=4);\n\n\n\t\t## THIS IS ONLY FOR ACCURACY,\n\t\t## actual lmax is set in results.get_cmb_power_spectra\n\n\t\t# model.lmax_lensed = 10000 ## doesn't work\n\n\n\t\tpars.set_accuracy(AccuracyBoost=3, lAccuracyBoost=3, lSampleBoost=3, DoLateRadTruncation=False)\n\n\t\tpars.AccuratePolarization = True\n\t\tpars.AccurateReionization = True\n\t\tpars.YHe = 0.24 ##helium_fraction\n\t\t# pars.omegan = 0.0006445\n\t\tpars.omegak = 0.\n\t\tpars.set_nonlinear_lensing(True)\n\n\n\n\t\t# print model.lmax_lensed ########## 1.0.1 ISSUE\n\n\n\t\t#-------- sigma_8 --------------------------\n\t\tpars.set_matter_power(redshifts=[0.], kmax=2.0)\n\t\t#pars.set_matter_power(redshifts=[0.], kmax=max_k)\n\t\t# Linear spectra\n\t\t# pars.NonLinear = model.NonLinear_none\n\n\t\tresults = camb.get_results(pars)\n\t\tkh, z, pk = results.get_matter_power_spectrum(minkh=1e-4, maxkh=1, npoints=200)\n\t\t#kh, z, pk = results.get_matter_power_spectrum(minkh=1e-4, maxkh=max_k, npoints=200)\n\t\ts8 = np.array(results.get_sigma8())\n\n\t\t# Non-Linear spectra (Halofit)\n\t\tpars.NonLinear = model.NonLinear_both\n\n\t\tresults.calc_power_spectra(pars)\n\t\tkh_nonlin, z_nonlin, pk_nonlin = results.get_matter_power_spectrum(minkh=1e-4, maxkh=1, npoints=200)\n\t\t#kh_nonlin, z_nonlin, pk_nonlin = results.get_matter_power_spectrum(minkh=1e-4, maxkh=max_k, npoints=200)\n\t\tsigma8_camb = results.get_sigma8() # present value of sigma_8 --- check kman, mikh etc\n\t\t#---------------------------------------------------\n\n\t\tsigma8_input = para5[i, 2]\n\n\t\tsigma_ratio = (sigma8_input ** 2) / (sigma8_camb ** 2) # rescale factor\n\t\t# sigma_ratio = 1\n\t\t# #---------------------------------------------------\n\t\t# pars.set_for_lmax(lmax= lmax0, k_eta_fac=2.5 , lens_potential_accuracy=0) ## THIS IS ONLY\n\n\n\t\t#calculate results for these parameters\n\t\t# results0 = camb.get_results(pars) ### Why this again??????????\n\n\n\t\t#get dictionary of CAMB power spectra\n\t\tpowers =results.get_cmb_power_spectra(pars, CMB_unit='muK', lmax=ell_max)\n\t\t# powers =results.get_cmb_power_spectra(pars, CMB_unit='muK')\n\t\t# powers0 =results0.get_cmb_power_spectra(pars, CMB_unit='muK')\n\n\n\t\ttotCL = powers['total']*sigma_ratio\n\t\tunlensedCL = powers['unlensed_scalar']*sigma_ratio\n\n\t\tAllTT[i] = np.hstack([para5[i], totCL[:,0] ])\n\t\tAllEE[i] = np.hstack([para5[i], totCL[:,1] ])\n\t\tAllBB[i] = np.hstack([para5[i], totCL[:,2] ])\n\t\tAllTE[i] = np.hstack([para5[i], totCL[:,3] ])\n\n\n\t\t# np.save('../Cl_data/Data/LatintotCLP4'+str(totalFiles)+'_'+str(i) +'.npy', totCL)\n\t\t# np.save('../Cl_data/Data/LatinunlensedCLP4'+str(totalFiles)+'_'+str(i)+'.npy', unlensedCL)\n\t\tprint(totCL)\n\t\tprint(40*'-+')\n\t#except newErr as error:\n\t#\tprint(error)\t\n\texcept:\n\t\tprint('Error, moving on')\n\n\n#####################################################################\n#####################################################################\n\nif SaveCls:\n\tls = np.arange(totCL.shape[0])\n\n\t# np.save('../Cl_data/Data/LatinPara5P4_'+str(totalFiles)+'.npy', para5)\n\tnp.savetxt('../Cl_data/Data/Extended_ls_'+str(totalFiles)+'.txt', ls)\n\n\tnp.savetxt('../Cl_data/Data/ExtendedTTCl_'+str(totalFiles)+'.txt', AllTT)\n\tnp.savetxt('../Cl_data/Data/ExtendedEECl_'+str(totalFiles)+'.txt', AllEE)\n\tnp.savetxt('../Cl_data/Data/ExtendedBBCl_'+str(totalFiles)+'.txt', AllBB)\n\tnp.savetxt('../Cl_data/Data/ExtendedTECl_'+str(totalFiles)+'.txt', AllTE)\n\ntime1 = time.time()\nprint('camb time:', time1 - time0)\n\nif PlotAll:\n\tMainDir = '../Cl_data/'\n\tPlotsDir = MainDir+'Plots/'+'ExtendedPlots/'\n\tparamNo = 9\n\tsortedArg = np.argsort(para5[:, paramNo])\n\t\n\tplt.figure(32)\n\n\tfig, ax = plt.subplots(2,2, figsize = (12,8))\n\n\tlineObj = ax[0,0].plot(AllTT[:, num_para + 1:].T[:, sortedArg])\n\tax[0,0].set_yscale('log')\n\tax[0,0].set_xscale('log')\n\tax[0,0].set_ylabel(r'$C^{TT}_l$')\n\tax[0,0].set_xlabel('$l$')\n\n\tax[0,0].legend(iter(lineObj), para5[:, paramNo][sortedArg].round(decimals=2),\n\t\t title = AllLabels[paramNo])\n\n\t# ax[0,0].legend(iter(lineObj), tau.round(decimals=2), title = r'\\tau')\n\t# ax[0,0].legend(iter(lineObj), OmegaA.round(decimals=2), title = r'\\omega_a')\n\t# ax[0,0].legend(iter(lineObj), mnu.round(decimals=2), title = r'$\\sum m_\\nu$')\n\t# ax[0,0].legend(iter(lineObj), para5[:, 2][sortedArg].round(decimals=4), title = r'$\\sigma_8$')\n\n\tax[1,0].plot(AllTE[:, num_para + 1:].T[:, sortedArg])\n\tax[1,0].set_xscale('log')\n\tax[1,0].set_ylabel(r'$C^{TE}_l$')\n\tax[1,0].set_xlabel('$l$')\n\n\tax[1,1].plot(AllEE[:, num_para + 1:].T[:, sortedArg])\n\tax[1,1].set_yscale('log')\n\tax[1,1].set_xscale('log')\n\tax[1,1].set_ylabel(r'$C_l^{EE}$')\n\tax[1,1].set_xlabel('$l$')\n\n\n\tax[0,1].plot(AllBB[:, num_para + 1:].T[:, sortedArg])\n\tax[0,1].set_ylabel(r'$C_l^{BB}$')\n\t# ax[0,1].set_yscale('log')\n\tax[0,1].set_xscale('log')\n\tax[0,1].set_xlabel('$l$')\n\tplt.savefig(PlotsDir + 'Param' + str(paramNo) + '_ExtendedClAll_'+str(totalFiles)+'.png')\n\n\tplt.show()\n","sub_path":"New_CAMBGen2.py","file_name":"New_CAMBGen2.py","file_ext":"py","file_size_in_byte":14332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"553672654","text":"#https://www.youtube.com/watch?v=8bNIkfRJZpo&t=10s\n#following this series UP TO back propagation\n\nimport neuralnetwork as nn\nimport numpy as np\n\nwith np.load(\"mnist.npz\") as data:\n\ttraining_images = data['training_images']\n\ttraining_labels = data['training_labels']\n\nlayer_sizes = (784,5,10)\n\nnet = nn.NeuralNetwork(layer_sizes)\n\n# prediction = net.predict(training_images)\n# print(prediction[0] , \"\\n\")\n# print(\"Prediction: \" , np.argmax(prediction[0]) , \"\\n\")\n# print(\"Actual: \" , np.argmax(training_labels[0]))\n# print(\"\\n\")\n\nnet.print_accuracy(training_images, training_labels)","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"449735144","text":"# Copyright 2022 Huawei Technologies Co., Ltd.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n_base_ = [\n '../_base_/models/retinanet_r50_fpn.py',\n '../_base_/datasets/openimages_detection.py',\n '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'\n]\n\nmodel = dict(bbox_head=dict(num_classes=601))\n\noptimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(\n _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=26000,\n warmup_ratio=1.0 / 64,\n step=[8, 11])\n\n# NOTE: `auto_scale_lr` is for automatically scaling LR,\n# USER SHOULD NOT CHANGE ITS VALUES.\n# base_batch_size = (32 GPUs) x (2 samples per GPU)\nauto_scale_lr = dict(base_batch_size=64)\n","sub_path":"PyTorch/built-in/cv/detection/SSD_for_PyTorch/configs/openimages/retinanet_r50_fpn_32x2_1x_openimages.py","file_name":"retinanet_r50_fpn_32x2_1x_openimages.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"4596967","text":"import psycopg2\nimport sqlite3\nimport pandas as pd\n\n# Create empty sqlite3 database (Could be input later?)\nsl_conn = sqlite3.connect('titanic.sqlite3')\n\n# load in csv to pandas\ndf = pd.read_csv('titanic.csv')\nprint('pandas table:')\n# print('df.head()')\nprint(df.shape)\n# print(df.columns)\ndf['Name'] = df['Name'].str.replace(r\"[\\\"\\',]\", '')\n\n# Populate database\ndf.to_sql('titanic', sl_conn, if_exists='replace')\n\n# Create a cursor\nsl_curs = sl_conn.cursor()\n\n\n# Step 1: Extract\n# Grab values from table\nget_values = 'SELECT * FROM titanic'\ntitanic_values = sl_curs.execute(get_values).fetchall()\n\n# Consider:\nprint('PRAGMA (SQLite?) table:')\nprint(sl_curs.execute('PRAGMA table_info(titanic);').fetchall())\n\n\n# Step 2: Transform\n\ncreate_table_statement = '''\nCREATE TABLE titanic(\n index SERIAL PRIMARY KEY,\n Survived INT NOT NULL,\n Pclass INT,\n Name VARCHAR(85),\n Sex TEXT,\n Age REAL,\n Siblings_Spouse INT,\n Parents_Children INT,\n Fare REAL\n);\n'''\n\n# Access ElephantSQL database\ndbname = 'jzdtgbzn'\nuser = 'jzdtgbzn'\npassword = 'cw63FpnYdYoEO4Ih1WNiDLdCPXadK0RP'\nhost = 'balarama.db.elephantsql.com'\n\npg_conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host)\n\n# Open postgres cursor\npg_curs = pg_conn.cursor()\n\n# Create our table in pg\npg_curs.execute(\"DROP TABLE IF EXISTS titanic\")\npg_curs.execute(create_table_statement)\npg_conn.commit()\n\n# Mystery code\nshow_tables = '''\nSELECT\n *\nFROM\n pg_catalog.pg_tables\nWHERE\n schemaname != 'pg_catalog'\nAND schemaname != 'information_schema';\n'''\npg_curs.execute(show_tables)\nprint('postgreSQL table:')\nprint(pg_curs.fetchall())\n\n\n# Step ? Populate:\nfor person in titanic_values:\n insert_statement = '''\n INSERT INTO titanic (Survived, Pclass, Name, Sex, Age, \n Siblings_Spouse, Parents_Children, Fare) \n VALUES ''' + str(person[1:]) + ';'\n pg_curs.execute(insert_statement)\npg_conn.commit()\n\n\n# Test\npg_curs.execute('SELECT * FROM titanic')\npg_titanic = pg_curs.fetchall()\n\n# for person, pg_person in zip(titanic_values, pg_titanic):\n# assert person == pg_person\n# print(type(pg_titanic))\n# print(pg_titanic[:5])\n\n# Survived:\n'''\nSELECT count(Survived)\nFROM titanic\nWHERE Survived=1;\n'''\n\n# Died:\n'''\nSELECT count(Survived)\nFROM titanic\nWHERE Survived=1;\n'''\n\n# How many passengers in each class?\n'''\nSELECT Pclass, count(Name)\nfrom titanic\nGROUP BY Pclass;\n'''\n\n# How many survived / died within each class?\n'''\nSELECT Pclass, Survived, count(*)\nfrom titanic\nGROUP BY Pclass, Survived\nORDER BY Pclass, Survived;\n'''\n# What was the average age of survivors vs nonsurvivors?\n'''\nSELECT avg(Age), Survived\nfrom titanic\nGROUP BY Survived\n'''\n# ALSO CASE:\n'''\nSELECT avg(Age) || (CASE WHEN Survived = 1 THEN\n' avg age of survivors' ELSE ' avg age of nonsurvivors'\nEND)\nfrom titanic\nGROUP BY Survived\n'''\n\n# What was the average age of each passenger class?\n'''\nSELECT Age, Pclass\nFROM titanic\nGROUP BY Pclass\n'''\n\n# What was the average fare by passenger class? By survival?\n'''\nSELECT Pclass, Fare, Survived\nfrom titanic\ngroup by Pclass, Survived;\n'''\n\n# How many siblings/spouses aboard on average, by passenger class? By survival?\n'''\n\n'''","sub_path":"module2-sql-for-analysis/insert_titanic.py","file_name":"insert_titanic.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"554732003","text":"import os\n\nfrom Config import ConfigWriter\nfrom ControlSocket import ControlSocket\nfrom define import *\n\nDEBUG = False\n\nclass Click(object):\n '''A click object, including all the information\n about a remote click'''\n main_click = False\n online = False\n IPaddr = ''\n ddosservice = ['NAT','UDP','syn flood']\n config = 'default config'\n configPath = './config/'\n\n def __init__(self,name,con_ipaddr,listen_ipaddr,\\\n app_server_ip,listen_broadcast,listen_device,\\\n listen_ether,net_controlPort,net_dataPort):\n if name == 'main_click':\n self.main_click = True\n if DEBUG:\n ipaddr = '192.168.3.136'\n self.name = name\n self.IPaddr = con_ipaddr\n self.controlPort = net_controlPort\n self.dataPort = net_dataPort\n self.using_port = 0\n self.controller = ControlSocket(con_ipaddr,net_controlPort[self.using_port])\n self.writer_controlPort = (8081,8082)\n self.writer_dataport = 8083\n self.writer = ConfigWriter(self.writer_controlPort,listen_ipaddr,app_server_ip,listen_broadcast,listen_device,listen_ether)\n self.datapipe = None\n self.online = True\n\n def ChangeConfig(self):\n f = open('./newconfig/'+self.name+'.click','r')\n config = f.read(-1)\n f.close()\n if self.using_port == 0:\n controlPort = self.controlPort[1]\n else:\n controlPort = self.controlPort[0]\n if self.controller.HotConfig(config,controlPort):\n self.controlPort = controlPort\n f = open('./config/'+name+'.click','w+')\n f.write(config)\n f.close()\n return True\n # self.controlPort = self.newControlPort\n # return True\n else:\n return '更改配置失败'\n\n def CloseClick(self):\n self.controller.Close()\n os.remove('')\n\n def CreateConfig(self, strategy, balackList, whiteList):\n if self.using_port == 0:\n controlPort = self.controlPort[1]\n else:\n controlPort = self.controlPort[0]\n newconfig = self.writer.NewConfig(controlPort,strategy,balackList,whiteList,self.name)\n self.newconfig = newconfig\n file = open('./newconfig/'+self.name+'.click','w+')\n file.write(newconfig)\n file.close()\n return newconfig\n\n# def main():\n# click = Click('test','192.168.4.130')\n# click.ChangeConfig()\n \n# if __name__ == '__main__':\n# main()\n","sub_path":"click-server/Click.py","file_name":"Click.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"273846067","text":"\"\"\"\n9.8. Iterators\n\"\"\"\n\n\n# By now you have probably noticed that most container objects can be looped over using a for statement:\nfor element in [1, 2, 3]:\n print(element)\nfor element in (1, 2, 3):\n print(element)\nfor key in {'one': 1, 'two': 2}:\n print(key)\nfor char in \"123\":\n print(char)\nfor line in open(\"myfile.txt\"):\n print(line, end='\\n')\n\n# Behind the scenes, the for statement calls iter() on the container object. The function returns an iterator object\n# that defines the method __next__() which accesses elements in the container one at a time\n\ns = 'abc'\nit = iter(s)\nprint(next(it))\nprint(next(it))\nprint(next(it))\n\n\n# Having seen the mechanics behind the iterator protocol, it is easy to add iterator behavior to your classes.\n# Define an __iter__() method which returns an object with a __next__() method. If the class defines __next__(),\n# then __iter__() can just return self:\n\n\nclass Reverse:\n \"\"\"Iterator for looping over a sequence backwards.\"\"\"\n def __init__(self, data):\n self.data = data\n self.index = len(data)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index == 0:\n raise StopIteration\n self.index = self.index - 1\n return self.data[self.index]\n\n\nrev = Reverse('spam')\nfor char in rev:\n print(char)\n","sub_path":"classes/iterators/iterators.py","file_name":"iterators.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"7572184","text":"#!/usr/bin/env python\n\nimport configparser\nfrom test_ini_vars import cookiecutter\nfrom test_ini_vars import django\nfrom test_ini_vars import oeuvre\nfrom test_ini_vars import pyramid\n\n\nclass ToxIniParser:\n def __init__(self, ini_file):\n \"\"\"Use configparser to load ini_file into self.config\"\"\"\n self.__config = configparser.ConfigParser()\n self.__config.read(ini_file)\n self.__number_of_sections = len(self.__config.sections())\n\n @property\n def number_of_sections(self):\n \"\"\"Return the number of sections in the ini file.\n New to properties? -> https://pybit.es/property-decorator.html\n \"\"\"\n return self.__number_of_sections\n\n @number_of_sections.setter\n def number_of_sections(self, value):\n self.__number_of_sections = value\n\n @property\n def environments(self):\n \"\"\"Return a list of environments\n (= \"envlist\" attribute of [tox] section)\"\"\"\n envs = self.__config[\"tox\"][\"envlist\"].split()\n envs = [s.strip(\",\") for s in envs]\n envs = [s.split(\",\") for s in envs]\n\n flat_list = []\n for sublist in envs:\n for item in sublist:\n flat_list.append(item)\n\n return flat_list\n\n @property\n def base_python_versions(self):\n \"\"\"Return a list of all basepython across the ini file\"\"\"\n py_versions = []\n for section in self.__config.sections():\n try:\n if self.__config[section][\"basepython\"] not in py_versions:\n py_versions.append(self.__config[section][\"basepython\"])\n\n except KeyError:\n continue\n\n return py_versions\n\n\nini_files = (cookiecutter, django, oeuvre, pyramid)\nf = \"some_file.txt\"\n\nfor ini_file in ini_files:\n with open(f, \"w\") as tmpfile:\n tmpfile.write(ini_file)\n\n tip = ToxIniParser(f)\n\n print(tip.number_of_sections)\n print(tip.environments)\n print(sorted(tip.base_python_versions))\n\n","sub_path":"oop/ini.py","file_name":"ini.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"646369672","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 31 13:03:11 2020\n\n@author: user\n\"\"\"\n\n\nfrom binarytree import build \nfrom binarytree import tree, bst, heap\nfrom binarytree import Node\n'''\nGiven a binary tree, return the sum of values of its deepest leaves.\n \n\nExample 1:\n\n\n\nInput: root = [1,2,3,4,5,null,6,7,null,null,null,null,8]\nOutput: 15\n \n\nConstraints:\n\nThe number of nodes in the tree is between 1 and 10^4.\nThe value of nodes is between 1 and 100.\n'''\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution(object):\n def deepestLeavesSum(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n root=TreeNode()\n deepestSum = 0\n deepestDepth = 0\n q = [(root, deepestDepth)] #List[(TreeNode node, int depth)]\n while q:\n node, depth = q.pop(0)\n if depth > deepestDepth: #new depth attained, reset sum\n deepestDepth = depth\n deepestSum = 0\n deepestSum += node.val #add to sum of nodes at current depth\n\t\t\t\n\t\t\t# add node's children to queue\n if node.left:\n q.append((node.left,depth+1))\n if node.right:\n q.append((node.right,depth+1))\n return deepestSum\n \nnull=None\nroot = [1,2,3,4,5,null,6,7,null,null,null,null,8]\ny=Solution()\nprint(y.deepestLeavesSum(root))","sub_path":"binaryTreeSum.py","file_name":"binaryTreeSum.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"309424259","text":"# Given an integer n, check if it's possible to increase n's roundness by swapping some pair of its digits.\n\ndef increaseNumberRoundness(n):\n zeros = str(n).count('0')\n count = 0\n for i in range(len(str(n))-1,0,-1):\n if str(n)[i] == \"0\":\n count += 1\n else:\n break\n return zeros > count","sub_path":"Arcade/The Core/increaseNumberRoundness.py","file_name":"increaseNumberRoundness.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"526661848","text":"import logging\nimport xml.etree.ElementTree as etree\n\nfrom chisualizer.util import Rectangle\n\n# a registry of all visualization descriptors which can be instantiated\n# indexed by name which it can be instantiated under.\nxml_registry = {}\ndef xml_register(name=None):\n def wrap(cls):\n local_name = name\n if local_name == None:\n local_name = cls.__name__\n if local_name in xml_registry:\n raise NameError(\"Attempting to re-register a XML descriptor '%s'\" %\n local_name)\n xml_registry[local_name] = cls\n logging.debug(\"Registered XML descriptor class '%s'\" % local_name)\n return cls\n return wrap\n\nclass VisualizerDescriptor(object):\n \"\"\"An visualizer descriptor file.\"\"\"\n def __init__(self, filename, api):\n \"\"\"Initialize this descriptor from a file and given a ChiselApi object.\"\"\"\n self.api = api\n self.parse_from_xml(filename)\n\n def parse_from_xml(self, filename):\n \"\"\"Parse this descriptor from an XML file.\"\"\"\n from chisualizer.visualizers.VisualizerRoot import VisualizerRoot\n xml_root = etree.parse(filename).getroot()\n vis_root = VisualizerRoot(self.api)\n vis_root.parse_children(xml_root)\n vis_root.instantiate_visualizer()\n self.vis_root = vis_root\n self.visualizer = vis_root.visualizer\n\n def layout_cairo(self, cr):\n size_x, size_y = self.visualizer.layout_cairo(cr)\n return Rectangle((0, 0), (size_x, size_y))\n \n def draw_cairo(self, cr, rect):\n return self.visualizer.draw_cairo(cr, rect, 0)\n\n def get_theme(self):\n # TODO refactor this, probably makes more sense to set themes here\n return self.vis_root.get_theme()\n\nclass VisualizerParseError(BaseException):\n pass\n\nclass Base(object):\n \"\"\"Abstract base class for visualizer descriptor objects.\"\"\"\n def parse_warning(self, msg):\n \"\"\"Emits a warning message for XML parsing, automatically prepending\n the class name and reference.\"\"\"\n logging.warning(\"Parsing warning for %s: '%s': %s\" % \n (self.__class__.__name__, self.ref, msg))\n def parse_error(self, msg):\n \"\"\"Emits an error message for XML parsing, automatically prepending\n the class name and reference and throwing an exception\"\"\"\n logging.warning(\"Parsing ERROR for %s: '%s': %s\" % \n (self.__class__.__name__, self.ref, msg))\n raise VisualizerParseError(msg) \n \n def parse_element_int(self, element, param, default):\n got = element.get(param, None)\n if got is None:\n return default\n try:\n return int(got, 0)\n except ValueError:\n self.parse_warning(\"unable to convert %s='%s' to int, default to %s\" %\n (param, got, default))\n return default\n \n def get_chisel_api(self):\n \"\"\"Returns the ChiselApi object used to access node values.\n Returns None if not available or if this visualizer wasn't properly\n instantiated.\"\"\"\n return self.root.get_chisel_api()\n \n def get_theme(self):\n \"\"\"Returns a Theme object, mapping descriptions to numerical colors.\"\"\"\n return self.root.get_theme()\n \n def get_ref(self, ref):\n \"\"\"Returns the container VisualizerDescriptor object.\"\"\"\n return self.root.get_ref(ref)\n \n @staticmethod\n def from_xml(element, parent):\n #assert isinstance(element, etree.Element)\n if element.tag in xml_registry:\n rtn = xml_registry[element.tag].from_xml_cls(element, parent)\n logging.debug(\"Loaded %s: '%s'\", rtn.__class__.__name__, rtn.ref)\n return rtn\n else:\n raise NameError(\"Unknown class '%s'\" % element.tag)\n \n @classmethod\n def from_xml_cls(cls, element, parent):\n \"\"\"Initializes this descriptor from a XML etree Element.\"\"\"\n #assert isinstance(element, etree.Element)\n new = cls()\n new.parent = parent\n new.root = parent.root\n new.ref = element.get('ref', '(anon)')\n return new\n","sub_path":"src/chisualizer/Base.py","file_name":"Base.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"483258745","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom bson import ObjectId\nfrom tornado import gen\n\nfrom base_app.classes.debug import Debug\nfrom base_app.models.mongodb.user.general_info.general_info import UserModel\nfrom user_app.handlers.base import BaseHandler, authentication\n\n__author__ = 'Morteza'\n\n\nclass ContentAnalysisHandler(BaseHandler):\n @gen.coroutine\n @authentication()\n def post(self, *args):\n try:\n action = self.get_argument('action', '')\n __name = \"\"\n if action == \"main_source_news\":\n __name = self.get_argument('source', '')\n elif action == \"news_group\":\n __name = self.get_argument('news_group', '')\n elif action == \"news_maker\":\n __name = self.get_argument('news_maker', '')\n if __name != '':\n r = UserModel(_id=self.current_user, content=__name).add_content(action)['value']\n r['_id'] = str(r['_id'])\n r['action'] = action\n self.update_full_current_user()\n self.status = True\n self.value = r\n else:\n self.messages.append('همه موارد را وارد کنید')\n\n except:\n Debug.get_exception(sub_system='admin', severity='error', tags='search_news_sidebar')\n self.write(self.result)\n\n @gen.coroutine\n @authentication()\n def put(self, *args):\n try:\n content = self.get_argument('content_id', '')\n action = self.get_argument('action', '')\n name = self.get_argument('name', '')\n method = self.get_argument('method', '')\n if method == \"update\":\n if content != '' and action != '' and name != '':\n r = UserModel(_id=self.current_user, content=ObjectId(content)).update_content(action, name)['value']\n self.update_full_current_user()\n self.status = True\n self.value = r\n elif method == \"sort\":\n try:\n contents = self.request.arguments['content']\n except:\n contents = []\n for i in range(len(contents)):\n UserModel(_id=self.current_user, content=ObjectId(contents[i])).update_content_sort(action, i + 1)\n self.update_full_current_user()\n self.status = True\n\n else:\n self.messages.append('همه موارد را وارد کنید')\n except:\n Debug.get_exception(sub_system='admin', severity='error', tags='search_news_sidebar')\n self.write(self.result)\n\n @gen.coroutine\n @authentication()\n def delete(self, *args):\n try:\n content = self.get_argument('content_id', '')\n action = self.get_argument('action', '')\n if content != '':\n r = UserModel(_id=self.current_user, content=ObjectId(content)).delete_content(action)['value']\n self.update_full_current_user()\n self.status = True\n self.value = str(r)\n else:\n self.messages.append('همه موارد را وارد کنید')\n except:\n Debug.get_exception(sub_system='admin', severity='error', tags='search_news_sidebar')\n self.write(self.result)\n","sub_path":"user_app/handlers/content_analysis.py","file_name":"content_analysis.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"576724979","text":"import requests\n\nurl = \"https://api.extremecloudiq.com/pcg/key-based/ports/***\"\n\npayload=\"{\\\"pcg_port_assignment\\\":[{\\\"device_id\\\":***,\\\"eth1_user_id\\\":***,\\\"eth2_user_id\\\":***,\\\"eth3_user_id\\\":***}]}\"\nheaders = {\n 'accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Authorization': '***'\n}\n\nresponse = requests.request(\"POST\", url, headers=headers, data=payload)\n\nprint(response.text)\n","sub_path":"XIQ API Python collection/Configuration-PCG(key-based)/Assign ports for AP.py","file_name":"Assign ports for AP.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"67249625","text":"from shapely.geometry import Point\nfrom shapely.geometry.polygon import Polygon\nfrom shapely.geometry import LinearRing\nimport numpy as np\nfrom .geometry_object_superclass import _3dGeometrySuperclass\nfrom klampt.model import trajectory\nfrom klampt import vis\nimport random\n\nclass LegRange(_3dGeometrySuperclass):\n\n '''\n This class stores a shapely circle and is used to represent the range of end effectors. It only supports 2d circles\n which is not precise but is sufficient for current purposes.\n '''\n\n def __init__(self, P, r, height_map, name=None):\n\n self.name = name\n self.x = P[0]\n self.y = P[1]\n self.z = P[2]\n self.center = Point(self.make_2d(P))\n self.R = r\n self.shapely_poly = self.center.buffer(self.R)\n _3dGeometrySuperclass.__init__(self, height_map, name, self.shapely_poly)\n\n def visualize(self, arc_step=15):\n milestones = []\n for i in range(0, 361, arc_step):\n x = self.x + self.R*np.cos(np.deg2rad(i))\n y = self.y + self.R*np.sin(np.deg2rad(i))\n try:\n z = self.height_map.height_at_xy(x,y)\n except ValueError:\n z = 0\n milestones.append([x, y, z+.01])\n circle = trajectory.Trajectory(milestones=milestones)\n if not self.name:\n self.name = \"circle \"+str(random.randint(1,1000))\n vis.add(self.name, circle)\n","sub_path":"src/utils/geometry_objects/_2d_circle_geometry.py","file_name":"_2d_circle_geometry.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"407005240","text":"from pyramid.httpexceptions import HTTPFound\n\nUSER_WELCOME = \"Welcome! The Alchemist Vault is a repository of key startup resources. \" \\\n \"To get started, go to My Profile under the top right drop-down menu and \" \\\n \"enter all profile details for you and your company. Be sure to enter general \" \\\n \"and specific tags for your Areas of Expertise and your company's Industry Keywords. \" \\\n \"Also upload your profile picture and your company's logo.\"\n\nNO_USER_MESSAGE = \"No user exists in our system with this LinkedIn email address (%(email)s). \" \\\n \"Please try again with another email address for LinkedIn Authorization \" \\\n \"here or contact \" \\\n \"%(admin)s\" \\\n \" if you need help logging in. \" \\\n \"LinkedIn provides us with your primary email address only, \" \\\n \"please make sure you have sent the primary email (%(email)s) to Alchemist Accelerator staff. \" \\\n \"Check your LinkedIn email settings here\"\n\nUSER_INACTIVE_MESSAGE = \"Your user is inactive, if you just registered please \" \\\n \"wait until your user gets activated by the Alchemist Accelerator staff. \" \\\n \"Otherwise please contact \" \\\n \"%(admin)s \" \\\n \"to request re-activation of your account.\"\n\n\ndef redirect_check_first_login(default_url, request, user=None):\n if not user:\n user = request.user\n default_url = request.session.get('login_url', default_url)\n if user.has_logged_in:\n return HTTPFound('%s?just_logged_in=1' % default_url)\n request.session.flash(USER_WELCOME)\n profile_url = '/%s/%s' % (user.primary_type, user.id)\n user.has_logged_in = True\n return HTTPFound('%s?just_logged_in=1' % profile_url)\n\n\nimport login\nimport confirm_acc\n","sub_path":"alchemist/auth/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"177275433","text":"from django.conf import settings;\nfrom urllib import urlencode, quote\nimport unicodedata\n\ndef strip_accents(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn')\n\n\nclass ClientRouterHelperClass(object):\n _instance = None\n\n url_schema = 'https://' if settings.HTTPS_SUPPORT else 'http://'\n base_url = url_schema + settings.CLIENT_BASE_URL\n\n def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = super(ClientRouterHelperClass, cls).__new__(cls, *args, **kwargs)\n return cls._instance\n\n def act_index_url(self):\n return u\"{}/explore/vagas\".format(self.base_url)\n def new_act_url(self):\n return u\"{}/cadastro/vaga/\".format(self.base_url)\n def view_act_url(self, act_slug):\n return u\"{}/vaga/{}\".format(self.base_url, act_slug)\n\n def view_volunteer_url(self, volunteer_slug):\n return u\"{}/voluntario/{}\".format(self.base_url, volunteer_slug)\n\n def view_nonprofit_url(self, nonprofit_slug):\n return u\"{}/ong/{}\".format(self.base_url, nonprofit_slug)\n\n def edit_nonprofit_url(self, nonprofit_slug):\n return u\"{}/editar/ong/{}\".format(self.base_url, nonprofit_slug)\n\n def edit_project_url(self, project_slug):\n return u\"{}/editar/vaga/{}\".format(self.base_url, project_slug)\n\n MAIL_ROUTINE_URL = {\n 'ask_project_exp': {\n 'nonprofit': 'https://docs.google.com/forms/d/1cYbjzxC-ETSSVdz76I62t0oToRs7OXZCGEpm7taryY8/viewform',\n 'volunteer': 'https://docs.google.com/forms/d/1MdBYnmnH_EAiku1m0yeEPg7jTLyW7uzCJ5GcYXiMQkc/viewform',\n }\n }\n MAIL_ROUTINE_MONITORING_BASE_CONFIRM_URL = \"https://docs.google.com/forms/d/1zelGspQUTntp8hUSJXzLTPQXt0G3COPQ3ZYHR5blOj8/viewform\"\n MAIL_ROUTINE_MONITORING_BASE_REFUTE_URL = \"https://docs.google.com/forms/d/1ZXcVgWENGWfCDnDDJ-JcFUFrHOcgRvWlK5Y1VZdniUY/viewform\"\n\n def mail_routine_monitoring_build_form_url(self, confirmation=True, volunteer_email=None, nonprofit_name=None, act_state=None):\n query_string = urlencode({ # Fields on the form\n \"entry.1864240677\": volunteer_email.encode('utf8'),\n \"entry.701739852\": nonprofit_name.encode('utf8'),\n \"entry.478646075\": act_state.encode('utf8')\n })\n\n if confirmation:\n base_url = self.MAIL_ROUTINE_MONITORING_BASE_CONFIRM_URL\n else:\n base_url = self.MAIL_ROUTINE_MONITORING_BASE_REFUTE_URL\n return u\"{}?edit_requested=true&{}\".format(base_url, query_string)\n\n def mail_ask_about_project_experience_url(self, to, obj):\n base_url = self.MAIL_ROUTINE_URL['ask_project_exp'][to]\n if base_url:\n act_state = \"\" # todo : placeholder\n if to == 'volunteer':\n project = obj.project\n query_string = urlencode({ # Fields on the form\n 'entry.221477937': obj.volunteer.user.email,\n 'entry.1166737892': project.nonprofit.name,\n 'entry.1735336797': project.name,\n 'entry.765610574': act_state,\n })\n else:\n project = obj\n query_string = urlencode({ # Fields on the form\n 'entry.1023722142': project.email,\n 'entry.1963768658': project.nonprofit.name,\n 'entry.321022436': project.name,\n 'entry.742260943': project.applied_count,\n 'entry.262236178': act_state,\n })\n return u\"{}?edit_requested=true&{}\".format(base_url, query_string)\n else:\n pass\n\nclass MailAssetsHelperClass(object):\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = super(MailAssetsHelperClass, cls).__new__(cls, *args, **kwargs)\n return cls._instance\n\n def __init__(self):\n self.check = \"https://s3.amazonaws.com/atados-us/images/check.png\"\n self.iconFacebook = \"https://s3.amazonaws.com/atados-us/images/icon-fb.png\"\n self.iconInstagram = \"https://s3.amazonaws.com/atados-us/images/icon-insta.png\"\n self.logoAtadosSmall = \"https://s3.amazonaws.com/atados-us/images/logo.small.png\"\n self.logoAtadosSmall2 = \"https://s3.amazonaws.com/atados-us/images/mandala.png\"\n\nClientRouter = ClientRouterHelperClass()\nMailAssetsHelper = MailAssetsHelperClass()\n\n\n","sub_path":"atados_core/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"98490971","text":"\"\"\"\nPython implementation of Simple Polygon,\n\"\"\"\n\nimport operator\nfrom random import shuffle\n\nfrom ComputationalGeometry.modules import utils\n\n\ndef simple_polygon(P):\n final_pts = []\n\n max_x = max([item[0] for item in P])\n min_y = min([item[1] for item in P if item[0] == max_x])\n extreme_pt = [max_x, min_y]\n print(\"Extreme point:\", extreme_pt)\n\n final_pts.append(extreme_pt)\n P.remove(extreme_pt)\n\n dict = {}\n dict_dist = {}\n for i in range(0, len(P)):\n if extreme_pt[0] == P[i][0]:\n final_pts.append(P[i])\n else:\n theta = (extreme_pt[1] - P[i][1]) / (extreme_pt[0] - P[i][0])\n dist = pow(extreme_pt[0] - P[i][0], 2) + pow(extreme_pt[1] - P[i][1], 2)\n\n dict_dist[tuple(P[i])] = dist\n\n if theta not in dict:\n dict[theta] = []\n\n if len(dict[theta]) > 0:\n lst = dict[theta]\n lst.append(P[i])\n for idx in range(0, len(dict[theta]) - 1):\n if dist > dict_dist[tuple(dict[theta][idx])]:\n lst.insert(idx, P[i])\n lst.pop()\n dict[theta] = lst\n else:\n dict[theta].append(P[i])\n\n sorted_lst = sorted(dict.items(), key=operator.itemgetter(0))\n for element in sorted_lst:\n for point in element[1]:\n final_pts.append(point)\n\n return final_pts\n\n\nP = utils.dummy_simple_polygon()\nshuffle(P)\npts = simple_polygon(P)\nprint(pts)\n\nimport matplotlib.pyplot as plt\n\ncoord = pts\ncoord.append(coord[0])\n\nxs, ys = zip(*coord)\n\nplt.figure()\nplt.plot(xs, ys)\nplt.plot([pts[0][0]], [pts[0][1]], marker='o', markersize=4, color=\"red\")\nplt.show()\n","sub_path":"ComputationalGeometry/simplePolygon.py","file_name":"simplePolygon.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"237237933","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom colorama import Fore\nfrom spotify_ripper.utils import *\nimport os\nimport time\nimport spotify\nimport requests\nimport spotipy\nimport spotipy.client\nfrom spotipy.oauth2 import SpotifyClientCredentials\n\nclient_credentials_sp = None\n\ndef init_client_credentials_sp():\n\n global client_credentials_sp\n if client_credentials_sp is None:\n client_credentials_manager = SpotifyClientCredentials()\n client_credentials_sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)\n client_credentials_sp.trace = False\n\n return client_credentials_sp\n\nclass WebAPI(object):\n\n def __init__(self, args):\n self.args = args\n self.cache = {\n \"artist_albums\": {},\n \"artists_on_album\": {},\n \"genres\": {},\n \"charts\": {},\n \"large_coverart\": {}\n }\n\n def cache_result(self, name, uri, result):\n self.cache[name][uri] = result\n\n def get_cached_result(self, name, uri):\n return self.cache[name].get(uri)\n\n def request_json(self, url, msg):\n print(Fore.GREEN + \"Attempting to retrieve \" + msg +\n \" from Spotify's Web API\" + Fore.RESET)\n print(Fore.CYAN + url + Fore.RESET)\n sp = init_client_credentials_sp()\n try:\n res = sp._get(url)\n except spotify.SpotifyException as e:\n print(Fore.YELLOW + \"URL returned non-200 HTTP code: \" +\n str(e.http_status) + \" message: \" + e.msg + Fore.RESET)\n return None\n return res\n\n def request_url(self, url, msg):\n print(Fore.GREEN + \"Attempting to retrieve \" + msg +\n \" from Spotify's Web API\" + Fore.RESET)\n print(Fore.CYAN + url + Fore.RESET)\n res = requests.get(url)\n if res.status_code == 200:\n return res\n else:\n print(Fore.YELLOW + \"URL returned non-200 HTTP code: \" +\n str(res.status_code) + Fore.RESET)\n return None\n\n def api_url(self, url_path):\n return 'https://api.spotify.com/v1/' + url_path\n\n def charts_url(self, url_path):\n return 'https://spotifycharts.com/' + url_path\n\n def get_artist_albums(self, artist_id):\n args = self.args\n\n sp = init_client_credentials_sp()\n\n albums = []\n album_uris = []\n results = sp.artist_albums(artist_id, args.artist_album_type, args.artist_album_market)\n albums.extend(results['items'])\n while results['next']:\n results = sp.next(results)\n albums.extend(results['items'])\n\n # check for cached result\n cached_result = self.get_cached_result(\"artist_albums\", artist_id)\n if cached_result is not None:\n return cached_result\n\n album_uris += [album['uri'] for album in albums]\n\n print(str(len(album_uris)) + \" albums found\")\n self.cache_result(\"artist_albums\", artist_id, album_uris)\n return album_uris\n\n def get_artists_on_album(self, uri):\n\n # check for cached result\n cached_result = self.get_cached_result(\"artists_on_album\", uri)\n if cached_result is not None:\n return cached_result\n\n # extract album id from uri\n uri_tokens = uri.split(':')\n if len(uri_tokens) != 3:\n return None\n sp = init_client_credentials_sp()\n album = sp.album(uri_tokens[2])\n if album is None:\n return None\n\n result = [artist['name'] for artist in album['artists']]\n self.cache_result(\"artists_on_album\", uri, result)\n return result\n\n # genre_type can be \"artist\" or \"album\"\n def get_genres(self, genre_type, track):\n def get_genre_json(spotify_id):\n url = self.api_url(genre_type + 's/' + spotify_id)\n return self.request_json(url, \"genres\")\n\n # extract album id from uri\n item = track.artists[0] if genre_type == \"artist\" else track.album\n uri = item.link.uri\n\n # check for cached result\n cached_result = self.get_cached_result(\"genres\", uri)\n if cached_result is not None:\n return cached_result\n\n uri_tokens = uri.split(':')\n if len(uri_tokens) != 3:\n return None\n\n json_obj = get_genre_json(uri_tokens[2])\n if json_obj is None:\n return None\n\n result = json_obj[\"genres\"]\n self.cache_result(\"genres\", uri, result)\n return result\n\n def get_large_coverart(self, uri):\n def get_track_json(track_id):\n url = self.api_url('tracks/' + track_id)\n return self.request_json(url, \"track\")\n\n def get_image_data(url):\n response = self.request_url(url, \"cover art\")\n return response.content\n\n # check for cached result\n cached_result = self.get_cached_result(\"large_coverart\", uri)\n if cached_result is not None:\n return get_image_data(cached_result)\n\n # extract album id from uri\n uri_tokens = uri.split(':')\n if len(uri_tokens) != 3:\n return None\n\n track = get_track_json(uri_tokens[2])\n if track is None:\n return None\n\n try:\n images = track['album']['images']\n except KeyError:\n return None\n\n for image in images:\n if image[\"width\"] == 640:\n self.cache_result(\"large_coverart\", uri, image[\"url\"])\n return get_image_data(image[\"url\"])\n\n return None\n\n\n","sub_path":"spotify_ripper/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":5564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"16037554","text":"import numpy as np\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.preprocessing import LabelEncoder\nimport bace.classifiers.neural.neural_constants as neural_constants\nimport bace.classifiers.neural.data_slicer as data_slicer\n\nfrom keras.models import Sequential\nfrom keras.utils import np_utils\nfrom keras import layers\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\nfrom bace.classifiers.neural.glove import load_glove\n\nfrom typing import Dict\n\nimport pickle\n\nclass NeuralClassifier:\n \"\"\"\n\n \"\"\"\n\n def __init__(self):\n \"\"\"Initializes a neural classifier's attributes\n\n \"\"\"\n # a list of tuples of (type, data_clean, true_label)\n self.labelled_data = []\n self.labelled_validation_data = []\n self.model = None\n self.tokenizer = None\n self.labels = []\n self.label_encoder = None\n #force\n\n def pickle(self, fname, keep_data=False):\n \"\"\"Pickles this classifier\n\n Parameters\n ----------\n fname : a file name\n keep_data : if test/validation data should be kept (will increase size of file)\n\n\n \"\"\"\n with open(fname, 'w') as f:\n if keep_data:\n pickle.dump(self, f)\n else:\n temp_l_data = self.labelled_data\n temp_v_data = self.labelled_validation_data\n self.labelled_data = []\n self.labelled_validation_data = []\n pickle.dump(self, f)\n self.labelled_data = temp_l_data\n self.labelled_validation_data = temp_v_data\n \n def to_pred(self, pred):\n \"\"\"\n\n Parameters\n ----------\n pred : array_like\n A real vector st len(pred) == len(self.labels)\n\n Returns\n -------\n str\n The label string at the index of the first maximal value of pred\n\n \"\"\"\n maxi = 0\n for i in range(1, len(pred)):\n if pred[i] > maxi:\n maxi = i\n return self.labels[maxi]\n \n def to_pred_comparison(self, pred):\n \"\"\"\n\n Parameters\n ----------\n pred : array_like\n A real vector st len(pred) == len(self.labels)\n\n Returns\n -------\n array_like\n An array of tuples of (labels, prediction_prob) for each value in pred,\n in descending order by probability\n\n \"\"\"\n probs = [(self.labels[i], pred[i])for i in range(len(pred))]\n probs.sort(key=lambda x: x[1], reverse=True)\n return probs\n\n def add_data(self, file_id : str, data : str, true_label):\n \"\"\"Adds the given data point to this model's data\n\n Parameters\n ----------\n file_id : str\n an id for the the file this data point is drawn from\n data : str\n true_label\n The true label for this daa point\n\n \"\"\"\n\n # CURRENTLY NOT TAKING IN PRE-TOKENIZED FILE, DISCUSS WITH TEAM ABOUT ALTERING CLASSIFIER INTERFACE\n if true_label not in self.labels:\n self.labels.append(true_label)\n self.labelled_data.append((file_id, data, true_label))\n\n def add_validation_data(self, file_id : str, data : str, true_label : int):\n \"\"\"Adds the given data point to this model's validation data\n\n Parameters\n ----------\n file_id : str\n an id for the the file this data point is drawn from\n data : str\n true_label\n The true label for this daa point\n \"\"\"\n if true_label not in self.labels:\n self.labels.append(true_label)\n self.labelled_validation_data.append((file_id, data, true_label))\n\n def train(self,\n max_number_tokens=neural_constants.MAX_NUMBER_TOKENS,\n slice_length=neural_constants.SLICE_LENGTH,\n slice_overlap=neural_constants.SLICE_OVERLAP,\n glove_file=neural_constants.GLOVE_FILE,\n glove_dimensions=neural_constants.GLOVE_DIMENSIONS,\n diagnostic_printing=False,\n num_epochs=10,\n batch_size=5):\n \"\"\"\n\n Parameters\n ----------\n max_number_tokens : int, optional\n The maximum number of distinct tokens allowed by the tokenizer.\n With more data, this value should increase\n slice_length : int, optional\n The length of the subslices sent that are sent through the model.\n With more data, this value should increase\n This value should probably not be greater than half the length of a typical document\n slice_overlap : float, optional\n The percent of each slice that is overlapped with its neigbors\n This value should be in the range [0,1), but probably not above .2\n glove_file : str, optional\n The .txt file containing the glove embeddings to use for this classifier\n glove_dimensions : str, optional\n The number of dimensions of the given glove_file\n diagnostic_printing : bool, optional\n True to run output some statistics on all validation data\n num_epochs : int, optional\n The number of epochs to train the model for.\n Determined experimentally\n batch_size : int, optional\n The batch size to use when training the model\n Determined experimentally\n\n \"\"\"\n\n has_validation = len(self.labelled_validation_data) > 0\n # create the tokenizer\n self.tokenizer = Tokenizer(num_words=max_number_tokens)\n training_data = [text for _, text, _ in self.labelled_data]\n self.tokenizer.fit_on_texts(training_data)\n self.label_encoder = LabelEncoder()\n self.label_encoder.fit(self.labels)\n\n self.label_encoder = LabelEncoder()\n self.label_encoder.fit(self.labels)\n # now build our training data_clean\n X_train = self.tokenizer.texts_to_sequences(training_data)\n\n if has_validation:\n X_validation = self.tokenizer.texts_to_sequences([text for _, text, _ in self.labelled_validation_data])\n\n X_train, y_train_labels = data_slicer.slice_data(X_train,\n [y for _, _, y in self.labelled_data],\n slice_length=slice_length,\n overlap_percent=slice_overlap)\n if has_validation:\n X_validation, y_validation_labels = data_slicer.slice_data(X_validation,\n [y for _, _, y in self.labelled_validation_data],\n slice_length=slice_length,\n overlap_percent=slice_overlap)\n # convert labels to 1-hots\n self.label_encoder = LabelEncoder()\n self.label_encoder.fit(self.labels)\n\n y_train = np_utils.to_categorical(self.label_encoder.transform(y_train_labels))\n if has_validation:\n y_validation = np_utils.to_categorical(self.label_encoder.transform(y_validation_labels))\n\n\n # pad them as necessary\n if has_validation:\n X_validation = np.array(pad_sequences(X_validation, padding=\"post\", maxlen=slice_length))\n X_train = pad_sequences(X_train, padding=\"post\", maxlen=slice_length)\n\n # force change\n\n # get our glove embeddings\n glove = load_glove(glove_file, self.tokenizer.word_index, glove_dimensions)\n\n # compute some neural_constants\n vocab_size = len(self.tokenizer.word_index) + 1\n\n # set model parameters\n self.model = Sequential()\n\n model_layers = [\n # must have these two layers firsts\n layers.Embedding(vocab_size,\n glove_dimensions,\n weights=[glove],\n input_length=slice_length,\n trainable=False),\n # now we have some options\n\n # as more data becomes available, a more optimal sequence of inner layers\n # may be discoverable\n layers.GlobalMaxPool1D(),\n layers.Dense(45, activation=\"relu\"),\n layers.Dense(20, activation=\"sigmoid\"),\n\n # final layer for the output probability distribution\n layers.Dense(len(self.labels), activation=\"softmax\")\n ]\n # add them in\n for layer in model_layers:\n self.model.add(layer)\n self.model.compile(optimizer=\"adam\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n \"\"\"\n print(np.shape(X_train))\n print(np.shape(y_train))\n print(np.shape(X_validation))\n print(np.shape(y_validation))\n \"\"\"\n\n #X_train, y_train = shuffle_parallel_arrays(X_train, y_train)\n\n # now we fit (can take a while)\n if has_validation:\n self.model.fit(X_train, y_train,\n epochs=num_epochs,\n verbose=False,\n shuffle=True,\n validation_data=(X_validation, y_validation),\n batch_size=batch_size)\n else:\n self.model.fit(X_train, y_train,\n epochs=num_epochs,\n verbose=False,\n shuffle=True,\n batch_size=batch_size)\n if diagnostic_printing and has_validation:\n def cm(true, pred):\n m = confusion_matrix(true, pred)\n print(\"Confusion matrix\")\n print(\" {0:3s} {1:3s}\".format(\"P+\", \"P-\"))\n print(\"T+ {0:<3d} {1:<3d}\".format(m[1][1], m[0][1]))\n print(\"T- {0:<3d} {1:<3d}\".format(m[1][0], m[0][0]))\n\n\n y_train_pred = [x for x in list(self.model.predict(X_train, verbose=False))]\n y_validation_pred = [x for x in list(self.model.predict(X_validation, verbose=False))]\n\n loss, acc = self.model.evaluate(X_train, y_train, verbose=False)\n print(\"Train L/A asd: {0:.4f} {1:.4f}\".format(loss, acc))\n # cm(y_train, y_train_pred)\n loss, acc = self.model.evaluate(X_validation, y_validation, verbose=False)\n print(\"Validation L/A: {0:.4f} {1:.4f}\".format(loss, acc))\n #cm(y_validation, y_validation_pred)\n\n nc = 0\n for i in range(len(X_validation)):\n print(y_validation_labels[i],self.to_pred(y_validation_pred[i]), y_validation_pred[i])\n if y_validation_labels[i] == self.to_pred(y_validation_pred[i]):\n nc += 1\n print(\"acc:\", nc/len(y_validation_labels))\n\n def predict(self, str,\n slice_length=neural_constants.SLICE_LENGTH,\n slice_overlap=neural_constants.SLICE_OVERLAP):\n \"\"\"\n\n Parameters\n ----------\n str : str\n a string of text to predict\n slice_length : int, optional\n the slice length to use. Should match the model's slice length\n slice_overlap : float, optional\n The percent of each slice that is overlapped with its neigbors\n This value should be in the range [0,1), but probably not above .2\n\n Returns\n -------\n distribution: array_like\n The probability distribution s.t. distribution[i] == P(label of str == self.labels[i])\n Where len(distribution) == len(self.labels)\n And sum(distribution) == 1\n And for all i distribution[i] >= 0\n \"\"\"\n tokenized = self.tokenizer.texts_to_sequences([str])\n slices, _ = data_slicer.slice_data(tokenized,\n None,\n slice_length=slice_length,\n overlap_percent=slice_overlap)\n #print(slices)\n X = np.array(pad_sequences(slices, padding=\"post\", maxlen=slice_length))\n #print(X)\n predictions = [x for x in list(self.model.predict(X, verbose=False))]\n\n s = predictions[0]\n for p in predictions[1:]:\n for i in range(len(s)):\n s[i] += p[i]\n return self.to_pred_comparison([x / sum(s) for x in s])\n\n def slice_and_predict(self, str,\n slice_length=neural_constants.SLICE_LENGTH,\n slice_overlap=neural_constants.SLICE_OVERLAP):\n \"\"\"Slices and predicts the input string for each slice\n\n Parameters\n ----------\n str : str\n a string of text to predict\n slice_length : int, optional\n the slice length to use. Should match the model's slice length\n slice_overlap : float, optional\n The percent of each slice that is overlapped with its neigbors\n This value should be in the range [0,1), but probably not above .2\n\n Returns\n -------\n distribution: array_like\n The probability distribution s.t. distribution[i] == P(label of str == self.labels[i])\n Where len(distribution) == len(self.labels)\n And sum(distribution) == 1\n And for all i distribution[i] >= 0\n \"\"\"\n tokenized = self.tokenizer.texts_to_sequences([str])\n slices, _ = data_slicer.slice_data(tokenized,\n None,\n slice_length=slice_length,\n overlap_percent=slice_overlap)\n restored = self.tokenizer.sequences_to_texts(slices)\n #print(slices)\n X = np.array(pad_sequences(slices, padding=\"post\", maxlen=slice_length))\n #print(X)\n predictions = [x for x in list(self.model.predict(X, verbose=False))]\n return [\n (self.to_pred(predictions[i]),\n restored[i])\n for i in range(len(slices))]","sub_path":"bace/classifiers/neural/neural_classifier.py","file_name":"neural_classifier.py","file_ext":"py","file_size_in_byte":14116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"519450988","text":"# Copyright (c) 2021, NVIDIA CORPORATION.\n\"\"\"Base class for Frame types that only have a single column.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Any, Dict, MutableMapping, Optional, Tuple, TypeVar, Union\n\nimport cupy\nimport numpy as np\nimport pandas as pd\n\nimport cudf\nfrom cudf._typing import Dtype\nfrom cudf.api.types import _is_scalar_or_zero_d_array\nfrom cudf.core.column import ColumnBase, as_column\nfrom cudf.core.frame import Frame\n\nT = TypeVar(\"T\", bound=\"Frame\")\n\n\nclass SingleColumnFrame(Frame):\n \"\"\"A one-dimensional frame.\n\n Frames with only a single column share certain logic that is encoded in\n this class.\n \"\"\"\n\n _SUPPORT_AXIS_LOOKUP = {\n 0: 0,\n None: 0,\n \"index\": 0,\n }\n\n def _reduce(\n self, op, axis=None, level=None, numeric_only=None, **kwargs,\n ):\n if axis not in (None, 0):\n raise NotImplementedError(\"axis parameter is not implemented yet\")\n\n if level is not None:\n raise NotImplementedError(\"level parameter is not implemented yet\")\n\n if numeric_only not in (None, True):\n raise NotImplementedError(\n \"numeric_only parameter is not implemented yet\"\n )\n return getattr(self._column, op)(**kwargs)\n\n def _scan(self, op, axis=None, *args, **kwargs):\n if axis not in (None, 0):\n raise NotImplementedError(\"axis parameter is not implemented yet\")\n\n return super()._scan(op, axis=axis, *args, **kwargs)\n\n @classmethod\n def _from_data(\n cls,\n data: MutableMapping,\n index: Optional[cudf.core.index.BaseIndex] = None,\n name: Any = None,\n ):\n\n out = super()._from_data(data, index)\n if name is not None:\n out.name = name\n return out\n\n @property\n def name(self):\n \"\"\"Get the name of this object.\"\"\"\n return next(iter(self._data.names))\n\n @name.setter\n def name(self, value):\n self._data[value] = self._data.pop(self.name)\n\n @property\n def ndim(self):\n \"\"\"Get the dimensionality (always 1 for single-columned frames).\"\"\"\n return 1\n\n @property\n def shape(self):\n \"\"\"Get a tuple representing the dimensionality of the Index.\"\"\"\n return (len(self),)\n\n def __iter__(self):\n # Iterating over a GPU object is not efficient and hence not supported.\n # Consider using ``.to_arrow()``, ``.to_pandas()`` or ``.values_host``\n # if you wish to iterate over the values.\n cudf.utils.utils.raise_iteration_error(obj=self)\n\n def __bool__(self):\n raise TypeError(\n f\"The truth value of a {type(self)} is ambiguous. Use \"\n \"a.empty, a.bool(), a.item(), a.any() or a.all().\"\n )\n\n @property\n def _num_columns(self):\n return 1\n\n @property\n def _column(self):\n return self._data[self.name]\n\n @_column.setter\n def _column(self, value):\n self._data[self.name] = value\n\n @property\n def values(self): # noqa: D102\n return self._column.values\n\n @property\n def values_host(self): # noqa: D102\n return self._column.values_host\n\n def to_cupy(\n self,\n dtype: Union[Dtype, None] = None,\n copy: bool = True,\n na_value=None,\n ) -> cupy.ndarray: # noqa: D102\n return super().to_cupy(dtype, copy, na_value).flatten()\n\n def to_numpy(\n self,\n dtype: Union[Dtype, None] = None,\n copy: bool = True,\n na_value=None,\n ) -> np.ndarray: # noqa: D102\n return super().to_numpy(dtype, copy, na_value).flatten()\n\n def tolist(self): # noqa: D102\n\n raise TypeError(\n \"cuDF does not support conversion to host memory \"\n \"via the `tolist()` method. Consider using \"\n \"`.to_arrow().to_pylist()` to construct a Python list.\"\n )\n\n to_list = tolist\n\n @classmethod\n def from_arrow(cls, array):\n \"\"\"Create from PyArrow Array/ChunkedArray.\n\n Parameters\n ----------\n array : PyArrow Array/ChunkedArray\n PyArrow Object which has to be converted.\n\n Raises\n ------\n TypeError for invalid input type.\n\n Returns\n -------\n SingleColumnFrame\n\n Examples\n --------\n >>> import cudf\n >>> import pyarrow as pa\n >>> cudf.Index.from_arrow(pa.array([\"a\", \"b\", None]))\n StringIndex(['a' 'b' None], dtype='object')\n >>> cudf.Series.from_arrow(pa.array([\"a\", \"b\", None]))\n 0 a\n 1 b\n 2 \n dtype: object\n \"\"\"\n return cls(ColumnBase.from_arrow(array))\n\n def to_arrow(self):\n \"\"\"\n Convert to a PyArrow Array.\n\n Returns\n -------\n PyArrow Array\n\n Examples\n --------\n >>> import cudf\n >>> sr = cudf.Series([\"a\", \"b\", None])\n >>> sr.to_arrow()\n \n [\n \"a\",\n \"b\",\n null\n ]\n >>> ind = cudf.Index([\"a\", \"b\", None])\n >>> ind.to_arrow()\n \n [\n \"a\",\n \"b\",\n null\n ]\n \"\"\"\n return self._column.to_arrow()\n\n @property\n def is_unique(self):\n \"\"\"Return boolean if values in the object are unique.\n\n Returns\n -------\n bool\n \"\"\"\n return self._column.is_unique\n\n @property\n def is_monotonic(self):\n \"\"\"Return boolean if values in the object are monotonically increasing.\n\n This property is an alias for :attr:`is_monotonic_increasing`.\n\n Returns\n -------\n bool\n \"\"\"\n return self.is_monotonic_increasing\n\n @property\n def is_monotonic_increasing(self):\n \"\"\"Return boolean if values in the object are monotonically increasing.\n\n Returns\n -------\n bool\n \"\"\"\n return self._column.is_monotonic_increasing\n\n @property\n def is_monotonic_decreasing(self):\n \"\"\"Return boolean if values in the object are monotonically decreasing.\n\n Returns\n -------\n bool\n \"\"\"\n return self._column.is_monotonic_decreasing\n\n @property\n def __cuda_array_interface__(self):\n return self._column.__cuda_array_interface__\n\n def factorize(self, na_sentinel=-1):\n \"\"\"Encode the input values as integer labels.\n\n Parameters\n ----------\n na_sentinel : number\n Value to indicate missing category.\n\n Returns\n --------\n (labels, cats) : (cupy.ndarray, cupy.ndarray or Index)\n - *labels* contains the encoded values\n - *cats* contains the categories in order that the N-th\n item corresponds to the (N-1) code.\n\n Examples\n --------\n >>> import cudf\n >>> s = cudf.Series(['a', 'a', 'c'])\n >>> codes, uniques = s.factorize()\n >>> codes\n array([0, 0, 1], dtype=int8)\n >>> uniques\n StringIndex(['a' 'c'], dtype='object')\n \"\"\"\n return cudf.core.algorithms.factorize(self, na_sentinel=na_sentinel)\n\n def _make_operands_for_binop(\n self,\n other: T,\n fill_value: Any = None,\n reflect: bool = False,\n *args,\n **kwargs,\n ) -> Dict[Optional[str], Tuple[ColumnBase, Any, bool, Any]]:\n \"\"\"Generate the dictionary of operands used for a binary operation.\n\n Parameters\n ----------\n other : SingleColumnFrame\n The second operand.\n fill_value : Any, default None\n The value to replace null values with. If ``None``, nulls are not\n filled before the operation.\n reflect : bool, default False\n If ``True``, swap the order of the operands. See\n https://docs.python.org/3/reference/datamodel.html#object.__ror__\n for more information on when this is necessary.\n\n Returns\n -------\n Dict[Optional[str], Tuple[ColumnBase, Any, bool, Any]]\n The operands to be passed to _colwise_binop.\n \"\"\"\n # Get the appropriate name for output operations involving two objects\n # that are Series-like objects. The output shares the lhs's name unless\n # the rhs is a _differently_ named Series-like object.\n if (\n isinstance(other, (SingleColumnFrame, pd.Series, pd.Index))\n and self.name != other.name\n ):\n result_name = None\n else:\n result_name = self.name\n\n # This needs to be tested correctly\n if isinstance(other, SingleColumnFrame):\n other = other._column\n elif not _is_scalar_or_zero_d_array(other):\n # Non-scalar right operands are valid iff they convert to columns.\n try:\n other = as_column(other)\n except Exception:\n return NotImplemented\n\n return {result_name: (self._column, other, reflect, fill_value)}\n","sub_path":"python/cudf/cudf/core/single_column_frame.py","file_name":"single_column_frame.py","file_ext":"py","file_size_in_byte":9110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"204506531","text":"#!/usr/bin/env python3\nimport shutil\nimport os\nimport sys\nimport pytest\nimport pyopentxs\nfrom pyopentxs import notary\nimport subprocess\n\n\ndef create_fresh_ot_config():\n # this creates fresh data in ../ot-clean-data/.ot\n if os.path.exists(pyopentxs.config_dir):\n shutil.rmtree(pyopentxs.config_dir)\n\n # create a client wallet just for making the server contract\n os.system(\"opentxs --dummy-passphrase changepw\")\n\n # create server contract and empty the client side data\n setup_data = notary.setup(open('../test-data/sample-contracts/localhost.xml'), total_servers=2)\n p = subprocess.Popen([\"opentxs-notary\", \"--only-init\"], stdin=subprocess.PIPE)\n outs, errs = p.communicate(input=setup_data.getvalue(), timeout=20)\n\n # set cron interval to shorter than default\n config_data = notary.config.read()\n config_data['cron']['ms_between_cron_beats'] = '2500' # in milliseconds\n notary.config.write()\n\n\ndef fresh_setup():\n '''opentxs-notary must be on the PATH'''\n\n create_fresh_ot_config()\n print(\"created fresh config, restarting...\")\n notary.restart()\n print(\"restarted.\")\n # wait for ready\n # doesn't seem to be necessary\n # time.sleep(2)\n\n\nif __name__ == \"__main__\":\n fresh_setup()\n pytest.main(sys.argv[1:])\n pyopentxs.cleanup()\n","sub_path":"python3/runtests.py","file_name":"runtests.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"537267720","text":"import os\nimport json\nimport gzip\nimport numpy as np\nfrom monty.serialization import loadfn, dumpfn\nfrom monty.shutil import gzip_dir, compress_file, decompress_file\nfrom monty.io import zopen\n\nfrom fireworks import FireTaskBase, Firework, FWAction, explicit_serialize\n\nfrom pymatgen.io.vasp.outputs import Vasprun, Xdatcar\nfrom pymatgen.analysis.structure_analyzer import VoronoiAnalyzer\n\nfrom atomate.utils.utils import get_logger, env_chk, load_class\nfrom atomate.vasp.firetasks.glue_tasks import CopyVaspOutputs, get_calc_loc\nfrom atomate.vasp.database import VaspCalcDb\n\nfrom mpmorph.analysis.md_data import MD_Data\nfrom mpmorph.analysis.structural_analysis import RadialDistributionFunction, \\\n BondAngleDistribution, CageCorrelationFunction, VoronoiAnalysis\nfrom mpmorph.analysis.transport import VDOS, Viscosity, Diffusion\nfrom mpmorph.runners.rescale_volume import RescaleVolume\n\n__authors__ = 'Nicholas Winner, Muratahan Aykol'\n\nlogger = get_logger(__name__)\n\n\n@explicit_serialize\nclass MDAnalysisTask(FireTaskBase):\n required_params = []\n optional_params = ['time_step', 'get_rdf', 'get_diffusion', 'get_viscosity',\n 'get_vdos', 'get_run_data', 'checkpoint_dirs', 'analysis_spec']\n\n def run_task(self, fw_spec):\n\n get_rdf = self.get('get_rdf') or True\n get_diffusion = self.get('get_diffusion') or True\n get_viscosity = self.get('get_viscosity') or True\n get_vdos = self.get('get_vdos') or True\n get_run_data = self.get('get_run_data') or True\n time_step = self.get('time_step') or 2\n checkpoint_dirs = fw_spec.get('checkpoint_dirs', False)\n\n calc_dir = get_calc_loc(True, fw_spec[\"calc_locs\"])[\"path\"]\n calc_loc = os.path.join(calc_dir, 'XDATCAR.gz')\n\n ionic_step_skip = self.get('ionic_step_skip') or 1\n ionic_step_offset = self.get('ionic_step_offset') or 0\n\n analysis_spec = self.get('analysis_spec') or {}\n\n logger.info(\"Reading in ionic_steps...\")\n\n decompress_file(\"ionic_steps.json.gz\")\n ionic_steps = loadfn(\"ionic_steps.json\")\n structures = [s.structure for s in ionic_steps]\n compress_file(\"ionic_steps.json\")\n\n db_dict = {}\n db_dict.update({'density': float(structures[0].density)})\n db_dict.update(structures[0].composition.to_data_dict)\n\n if get_rdf:\n logger.info(\"LOGGER: Calculating radial distribution functions...\")\n rdf = RadialDistributionFunction(structures=structures)\n rdf_dat = rdf.get_radial_distribution_functions(nproc=4)\n db_dict.update({'rdf': rdf.get_rdf_db_dict()})\n del rdf\n del rdf_dat\n\n if get_vdos:\n logger.info(\"LOGGER: Calculating vibrational density of states...\")\n vdos = VDOS(structures)\n vdos_dat = vdos.calc_vdos_spectrum(time_step=time_step*ionic_step_skip)\n vdos_diff = vdos.calc_diffusion_coefficient(time_step=time_step*ionic_step_skip)\n db_dict.update({'vdos': vdos_dat})\n del vdos\n del vdos_dat\n\n if get_diffusion:\n logger.info(\"LOGGER: Calculating the diffusion coefficients...\")\n diffusion = Diffusion(structures, t_step=time_step, l_lim=50, skip_first=250, block_l=1000, ci=0.95)\n D = {'msd':{}, 'vdos':{}}\n for s in structures[0].types_of_specie:\n D['msd'][s.symbol] = diffusion.getD(s.symbol)\n if vdos_diff:\n D['vdos'] = vdos_diff\n db_dict.update({'diffusion': D})\n del D\n\n if get_viscosity:\n logger.info(\"LOGGER: Calculating the viscosity...\")\n viscosities = []\n if checkpoint_dirs:\n for dir in checkpoint_dirs:\n visc = Viscosity(dir).calc_viscosity()\n viscosities.append(visc['viscosity'])\n viscosity_dat = {'viscosity': np.mean(viscosities), 'StdDev': np.std(viscosities)}\n db_dict.update({'viscosity': viscosity_dat})\n del viscosity_dat\n\n if get_run_data:\n if checkpoint_dirs:\n logger.info(\"LOGGER: Assimilating run stats...\")\n data = MD_Data()\n for directory in checkpoint_dirs:\n data.parse_md_data(directory)\n md_stats = data.get_md_stats()\n else:\n logger.info(\"LOGGER: Getting run stats...\")\n data = MD_Data()\n data.parse_md_data(calc_dir)\n md_stats = data.get_md_stats()\n db_dict.update({'md_data': md_stats})\n\n if analysis_spec:\n logger.info(\"LOGGER: Adding user-specified data...\")\n db_dict.update(analysis_spec)\n\n logger.info(\"LOGGER: Pushing data to database collection...\")\n db_file = env_chk(\">>db_file<<\", fw_spec)\n db = VaspCalcDb.from_db_file(db_file, admin=True)\n db.collection = db.db[\"md_data\"]\n db.collection.insert_one(db_dict)\n\n return FWAction()\n\n\n@explicit_serialize\nclass MDStats(FireTaskBase):\n\n required_params = []\n optional_params = ['checkpoint_dirs']\n\n def run_task(self, fw_spec):\n\n checkpoint_dirs = self.get(\"checkpoint_dirs\", False)\n calc_dir = get_calc_loc(True, fw_spec[\"calc_locs\"])[\"path\"]\n\n if checkpoint_dirs:\n logger.info(\"LOGGER: Assimilating run stats...\")\n data = MD_Data()\n for directory in checkpoint_dirs:\n data.parse_md_data(directory)\n md_stats = data.get_md_stats()\n else:\n logger.info(\"LOGGER: Getting run stats...\")\n data = MD_Data()\n data.parse_md_data(calc_dir)\n md_stats = data.get_md_stats()\n\n os.mkdir(os.path.join(calc_dir, 'md_stats'))\n dumpfn(md_stats, os.path.join(calc_dir, 'md_stats', 'md_stats.json'))\n\n\n@explicit_serialize\nclass VaspMdToDbTask(FireTaskBase):\n pass\n\n\n@explicit_serialize\nclass VaspMdToDiffusion(FireTaskBase):\n\n required_params = ['time_step']\n optional_params = ['checkpoint_dirs', 'output_file']\n\n def run_task(self, fw_spec):\n\n time_step = fw_spec.get('time_step', 2)\n checkpoint_dirs = fw_spec.get('checkpoint_dirs', False)\n output_file = fw_spec.get('output_file', 'diffusion.json')\n\n calc_dir = get_calc_loc(True, fw_spec[\"calc_locs\"])[\"path\"]\n calc_loc = os.path.join(calc_dir, 'XDATCAR.gz')\n\n if checkpoint_dirs:\n logger.info(\"LOGGER: Assimilating checkpoint structures\")\n structures = []\n for d in checkpoint_dirs:\n structures.extend(Vasprun(os.path.join(d, 'vasprun.xml.gz')).structures)\n else:\n structures = Xdatcar(calc_loc).structures\n\n db_dict = {}\n db_dict.update({'density': float(structures[0].density)})\n db_dict.update(structures[0].composition.to_data_dict)\n\n logger.info(\"LOGGER: Calculating the diffusion coefficients...\")\n diffusion = Diffusion(structures, t_step=time_step, l_lim=50, skip_first=250, block_l=1000, ci=0.95)\n\n vdos = VDOS(structures)\n vdos_dat = vdos.calc_vdos_spectrum(time_step=time_step)\n vdos_diff = vdos.calc_diffusion_coefficient(time_step=time_step)\n\n D = {'msd': {}, 'vdos': {}}\n for s in structures[0].types_of_specie:\n D['msd'][s.symbol] = diffusion.getD(s.symbol)\n D['vdos'] = vdos_diff\n\n db_dict.update({'diffusion': D})\n\n with open(os.path.join(output_file)) as f:\n jsn = json.dumps(db_dict)\n f.write(jsn)\n\n return FWAction()\n\n\n@explicit_serialize\nclass StructuralAnalysis(FireTaskBase):\n \"\"\"\n This task uses lists of ionic_steps to perform several\n common structural analysis schemes and write the results.\n\n Analysis currently supported:\n (1) Radial distribution functions\n -including first shell coordination numbers\n (2) Cage correlation functions\n -requires RDFs be calculated\n (3) Potential of Mean Force\n -not structural, but obtained from g(r)\n -requires RDFs be calculated\n (4) Bond Angle distribution functions\n (5) Voronoi Analysis\n -be careful, voronoi analysis can take a long time and\n be memory intensive for large trajectories\n (6) Polyhedra connectivity distribution functions\n -connection motifs of the voronoi tetrahedra (second-nn correlations)\n\n required_params:\n data_dir: (str) path to the directory containing the ionic_steps file(s). If a single MD\n run was performed, this is likely {$CURRENT_DIR}/analysis. If checkpointing\n or multiple runs were performed, then it is likely the current directory, which\n is the directory of the analysis firework\n\n optional_params (all default to True):\n calc_rdf: (bool) calculate the radial distribution functions.\n calc_bad: (bool) calculate the bond angle distribution functions.\n calc_voronoi: (bool) calculate the voronoi polyhedra distribution.\n calc_cage: (bool) calculate the cage correlation functions.\n calc_pmf: (bool) calculate the potential of mean force from g(r).\n calc_connectivity: (bool) calculate g(r) decomposed by the second NN connectivity motifs.\n\n \"\"\"\n required_params = ['data_dir']\n optional_params = ['calc_rdf', 'calc_bad', 'calc_voronoi', 'calc_cage', 'calc_pmf', 'calc_connectivity']\n\n def run_task(self, fw_spec):\n\n # Get\n data_dir = self.get('data_dir')\n calc_rdf = self.get('calc_rdf', True)\n calc_bad = self.get('calc_bad', True)\n calc_voronoi = self.get('calc_voronoi', False)\n calc_cage = self.get('calc_cage', True)\n calc_pmf = self.get('calc_pmf', False)\n calc_connectivity = self.get('calc_connectivity', False)\n\n ionic_steps = []\n for root, dirs, files in os.walk(data_dir):\n for f in files:\n if 'ionic_steps' in f:\n name, ext = os.path.splitext(f)\n if ext in ('.gz', '.GZ', '.Z'):\n with gzip.open(f, \"rb\") as gzipped:\n d = json.loads(gzipped.read().decode(\"ascii\"))\n else:\n d = loadfn(f)\n ionic_steps.extend(d)\n\n structures = [step['structure'] for step in ionic_steps]\n\n data_dict = {}\n\n if calc_rdf:\n logger.info(\"LOGGER: Calculating radial distribution functions...\")\n rdf = RadialDistributionFunction(structures=structures)\n rdf.get_radial_distribution_functions(nproc=4)\n cns = rdf.get_coordination_numbers()\n fs = rdf.first_coordination_shell_radius\n data_dict.update({'radial_distribution_functions': rdf.as_dict()})\n data_dict.update({'coordination_numbers': cns})\n\n if calc_cage:\n logger.info(\"LOGGER: Calculating cage correlation function...\")\n ccf = CageCorrelationFunction(structures, fs)\n ccf.get_cage_correlation_function()\n # TODO: Make sure the CCFs work\n\n if calc_pmf:\n logger.info(\"LOGGER: Calculating the potential of mean force...\")\n # TODO: Need to include the implementation of PMF here\n\n if calc_bad:\n logger.info(\"LOGGER: Calculating bond angle distribution functions...\")\n bad = BondAngleDistribution(structures=structures)\n bad.get_bond_angle_distribution(nproc=4)\n data_dict.update({'bond_angle_distribution_functions': bad.as_dict()})\n\n if calc_voronoi:\n logger.info(\"LOGGER: Performing voronoi analysis...\")\n va = VoronoiAnalyzer(structures)\n try:\n poly = va.analyze_structures()\n data_dict.update({'voronoi_polyhedra': poly})\n except MemoryError:\n logger.info(\"ERROR: Voronoi analysis failed due to insufficient memory...\")\n\n if calc_connectivity:\n logger.info(\"LOGGER: Getting the connectivity motif distribution functions...\")\n # TODO: Implement after writing connectivity function\n\n # write structural analysis results to json file and then zip it\n write_dir = os.path.join(os.getcwd(), 'structural_analysis')\n os.mkdir(write_dir)\n for k,v in data_dict.items():\n dumpfn(v, os.path.join(write_dir, '{}.json').format(k))\n gzip_dir(write_dir)\n\n return FWAction()\n\n\n@explicit_serialize\nclass ParseSingleTask(FireTaskBase):\n \"\"\"\n In contrast to ParseCheckpointsTask, this task is run at the end of a single MD simulation\n with no checkpointing. It writes the ionic_steps.json file to a folder called \"analysis\"\n in order to keep the functionality of the other analysis tasks more seamless.\n\n required_params:\n (none)\n\n optional_params:\n vasprun: (str) The path of the vasprun file to parse\n Default: {$CURRENT_DIRECTORY}/vasprun.xml.gz\n write_dir: (str) The path of the directory in which to write the ionic_steps.json file\n Default: {$CURRENT_DIRECTORY}/analysis\n filename: (str) The name of the file to which to write the ionic_steps data\n Default: ionic_steps.json (recommended to keep this)\n \"\"\"\n\n required_params = []\n optional_params = ['vasprun', 'write_dir', 'filename']\n\n def run_task(self, fw_spec):\n filepath = self.get('vasprun', os.path.join(os.getcwd(), 'vasprun.xml.gz'))\n write_dir = self.get('write_dir', os.path.join(os.getcwd(), 'analysis'))\n filename = self.get('filename', 'ionic_steps.json')\n\n os.mkdir(write_dir)\n\n ionic_steps = Vasprun(filepath).ionic_steps\n dumpfn(ionic_steps, os.path.join(write_dir, filename))\n compress_file(os.path.join(write_dir, filename))\n\n s = ionic_steps[0]['structure']\n composition = {{'composition': s.composition.to_data_dict}}\n composition.update({'density': float(s.density)})\n\n dumpfn(composition, os.path.join(write_dir, 'composition.json'))\n compress_file(os.path.join(write_dir, 'composition.json'))\n\n\n@explicit_serialize\nclass ParseCheckpointsTask(FireTaskBase):\n \"\"\"\n This function is used to assimilate an MD workflow into the current directory. For example,\n if you ran 3 separate 9 ps simulations, and those simulations were each split into 9 1-ps\n job executions, then this will assimilate the results into a single location.\n\n required_params:\n checkpoint_dirs: (dict) collection of directories that you are assimilating. Dictionary\n format is {'Simulation_1': [array of checkpoint_dirs],\n 'Sumulation_2': [...], ...}\n\n optional_params:\n write_dir: (str) Name of the directory to be created in which to dump the trajectory files.\n Default: current working directory\n\n \"\"\"\n required_params = []\n optional_params = ['write_dir']\n\n def run_task(self, fw_spec):\n checkpoint_dirs = fw_spec.get('checkpoint_dirs')\n write_dir = self.get('write_dir', False)\n\n if write_dir:\n if not os.path.isdir(write_dir):\n os.mkdir(write_dir)\n else:\n write_dir = os.getcwd()\n\n # write each md run (comprised of n checkpoints) to a json file and zip it\n logger.info(\"LOGGER: Assimilating checkpoint data...\")\n ionic_steps = []\n for directory in checkpoint_dirs:\n ionic_steps.extend(Vasprun(os.path.join(directory, \"vasprun.xml.gz\")).ionic_steps)\n dumpfn(ionic_steps, os.path.join(write_dir, 'ionic_steps.json'))\n compress_file(os.path.join(write_dir, 'ionic_steps.json'))\n\n # get composition info\n s = ionic_steps[0]['structure']\n composition = {'composition': s.composition.to_data_dict}\n composition.update({'density': float(s.density)})\n\n # write composition info to json and zip\n dumpfn(composition, os.path.join(write_dir, 'composition.json'))\n compress_file(os.path.join(write_dir, 'composition.json'))\n","sub_path":"mpmorph/workflow/firetasks/parse_outputs.py","file_name":"parse_outputs.py","file_ext":"py","file_size_in_byte":16364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"514903398","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2015 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport mock\n\nfrom fuel_plugin_builder import errors\nfrom fuel_plugin_builder.tests.base import BaseTestCase\nfrom fuel_plugin_builder.validators import ValidatorV1\nfrom fuel_plugin_builder.validators import ValidatorV2\n\nfrom fuel_plugin_builder.validators.schemas.v1 import SchemaV1\nfrom fuel_plugin_builder.validators.schemas.v2 import SchemaV2\n\n\n@mock.patch('fuel_plugin_builder.validators.base.utils')\nclass BaseValidator(BaseTestCase):\n\n __test__ = False\n validator_class = None\n schema_class = None\n\n def setUp(self):\n self.plugin_path = '/tmp/plugin_path'\n self.validator = self.validator_class(self.plugin_path)\n\n def test_validate(self, _):\n mocked_methods = [\n 'check_schemas',\n 'check_tasks',\n 'check_releases_paths',\n 'check_compatibility',\n ]\n self.mock_methods(self.validator, mocked_methods)\n self.validator.validate()\n\n self.validator.check_tasks.assert_called_once_with()\n self.validator.check_schemas.assert_called_once_with()\n self.validator.check_releases_paths.assert_called_once_with()\n self.validator.check_compatibility.assert_called_once_with()\n\n def test_check_schemas(self, _):\n mocked_methods = [\n 'check_env_config_attrs',\n 'validate_file_by_schema'\n ]\n self.mock_methods(self.validator, mocked_methods)\n self.validator.check_schemas()\n\n self.assertEqual(\n [mock.call(\n self.schema_class().metadata_schema,\n self.validator.meta_path),\n mock.call(\n self.schema_class().tasks_schema,\n self.validator.tasks_path)],\n self.validator.validate_file_by_schema.call_args_list)\n self.validator.check_env_config_attrs.assert_called_once_with()\n\n def test_check_releases_paths(self, utils_mock):\n utils_mock.parse_yaml.return_value = {\n 'releases': [{\n 'deployment_scripts_path': '/tmp/deployment_scripts_path',\n 'repository_path': '/tmp/repository_path'}]}\n\n utils_mock.exists.return_value = True\n self.validator.check_releases_paths()\n self.assertEqual(\n utils_mock.exists.call_args_list,\n [mock.call('/tmp/deployment_scripts_path'),\n mock.call('/tmp/repository_path')])\n\n def test_check_releases_paths_error(self, utils_mock):\n utils_mock.parse_yaml.return_value = {\n 'releases': [{\n 'deployment_scripts_path': '/tmp/deployment_scripts_path',\n 'repository_path': '/tmp/repository_path'}]}\n\n utils_mock.exists.return_value = False\n with self.assertRaisesRegexp(\n errors.ReleasesDirectoriesError,\n 'Cannot find directories /tmp/deployment_scripts_path'\n ', /tmp/repository_path for release '):\n self.validator.check_releases_paths()\n\n def test_check_env_config_attrs_do_not_fail_if_empty(self, utils_mock):\n utils_mock.parse_yaml.return_value = None\n self.validator.check_env_config_attrs()\n\n def test_check_env_config_attrs_fail_if_none(self, utils_mock):\n utils_mock.parse_yaml.return_value = {'attributes': None}\n with self.assertRaisesRegexp(\n errors.ValidationError,\n \"File '/tmp/plugin_path/environment_config.yaml', None \"\n \"is not of type 'object', value path 'attributes'\"):\n self.validator.check_env_config_attrs()\n\n def test_check_env_config_attrs_checks_metadata(self, utils_mock):\n utils_mock.parse_yaml.return_value = {\n 'attributes': {'metadata': []}}\n\n with self.assertRaisesRegexp(\n errors.ValidationError,\n \"File '/tmp/plugin_path/environment_config.yaml', \\[\\] is \"\n \"not of type 'object', value path 'attributes -> metadata'\"):\n self.validator.check_env_config_attrs()\n\n def test_check_env_config_attrs_checks_attrs(self, utils_mock):\n utils_mock.parse_yaml.return_value = {\n 'attributes': {\n 'key1': {\n 'type': True,\n 'label': 'text',\n 'value': 'text',\n 'weight': 1}}}\n\n with self.assertRaisesRegexp(\n errors.ValidationError,\n \"File '/tmp/plugin_path/environment_config.yaml', True is not \"\n \"of type 'string', value path 'attributes -> key1 -> type'\"):\n self.validator.check_env_config_attrs()\n\n def test_check_env_config_attrs_restriction_fails(self, utils_mock):\n utils_mock.parse_yaml.return_value = {\n 'attributes': {\n 'key1': {\n 'type': 'text',\n 'label': 'test',\n 'value': 'test',\n 'weight': 1,\n 'restrictions': [\n {\n 'condition': 'false',\n 'action': 'disable'\n },\n {\n 'condition': True,\n 'action': 'hide'\n }\n ]\n }\n }\n }\n\n with self.assertRaisesRegexp(\n errors.ValidationError,\n \"File '/tmp/plugin_path/environment_config.yaml', True is not \"\n \"of type 'string', value path \"\n \"'attributes -> key1 -> restrictions -> 1 -> condition\"):\n self.validator.check_env_config_attrs()\n\n\nclass TestValidatorV1(BaseValidator):\n\n __test__ = True\n validator_class = ValidatorV1\n schema_class = SchemaV1\n\n @mock.patch('fuel_plugin_builder.validators.validator_v1.utils')\n def test_check_tasks(self, utils_mock):\n mocked_methods = [\n 'validate_schema'\n ]\n self.mock_methods(self.validator, mocked_methods)\n utils_mock.parse_yaml.return_value = [\n {'type': 'puppet', 'parameters': 'param1'},\n {'type': 'shell', 'parameters': 'param2'}]\n\n self.validator.check_tasks()\n\n self.assertEqual(\n [mock.call('param1', self.schema_class().puppet_parameters,\n self.validator.tasks_path,\n value_path=[0, 'parameters']),\n mock.call('param2', self.schema_class().shell_parameters,\n self.validator.tasks_path,\n value_path=[1, 'parameters'])],\n self.validator.validate_schema.call_args_list)\n\n @mock.patch('fuel_plugin_builder.validators.base.utils')\n def test_check_compatibility(self, utils_mock):\n utils_mock.parse_yaml.return_value = {\n 'fuel_version': ['5.1', '6.0', '6.1'],\n 'package_version': '1.0.0'}\n\n with self.assertRaisesRegexp(\n errors.ValidationError,\n 'Current plugin format 1.0.0 is not compatible with 5.1 Fuel'\n ' release. Fuel version must be 6.0 or higher.'\n ' Please remove 5.1 version from metadata.yaml file or'\n ' downgrade package_version.'):\n self.validator.check_compatibility()\n\n\nclass TestValidatorV2(BaseValidator):\n\n __test__ = True\n validator_class = ValidatorV2\n schema_class = SchemaV2\n\n @mock.patch('fuel_plugin_builder.validators.validator_v2.utils')\n def test_check_tasks(self, utils_mock):\n mocked_methods = [\n 'validate_schema'\n ]\n self.mock_methods(self.validator, mocked_methods)\n utils_mock.parse_yaml.return_value = [\n {'type': 'puppet', 'parameters': 'param1'},\n {'type': 'shell', 'parameters': 'param2'},\n {'type': 'reboot', 'parameters': 'param3'}]\n\n self.validator.check_tasks()\n\n self.assertEqual(\n [mock.call('param1', self.schema_class().puppet_parameters,\n self.validator.tasks_path,\n value_path=[0, 'parameters']),\n mock.call('param2', self.schema_class().shell_parameters,\n self.validator.tasks_path,\n value_path=[1, 'parameters']),\n mock.call('param3', self.schema_class().reboot_parameters,\n self.validator.tasks_path,\n value_path=[2, 'parameters'])],\n self.validator.validate_schema.call_args_list)\n\n @mock.patch('fuel_plugin_builder.validators.base.utils')\n def test_check_compatibility(self, utils_mock):\n utils_mock.parse_yaml.return_value = {\n 'fuel_version': ['6.0', '6.1'],\n 'package_version': '2.0.0'}\n\n with self.assertRaisesRegexp(\n errors.ValidationError,\n 'Current plugin format 2.0.0 is not compatible with 6.0 Fuel'\n ' release. Fuel version must be 6.1 or higher.'\n ' Please remove 6.0 version from metadata.yaml file or'\n ' downgrade package_version.'):\n self.validator.check_compatibility()\n","sub_path":"fuel_plugin_builder/fuel_plugin_builder/tests/test_validator.py","file_name":"test_validator.py","file_ext":"py","file_size_in_byte":9784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"90297890","text":"\nimport dash\nimport dash_daq as daq\nimport plotly.express as px\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport plotly.graph_objs as go\nfrom django_plotly_dash import DjangoDash\nimport pandas as pd\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = DjangoDash('SimpleExample', external_stylesheets=external_stylesheets)\n\n\napp.layout = html.Div(children=[\n dcc.Graph(\n id='example-graph',\n figure={\n 'data': [\n {'x': [1, 2, 3], 'y': [4, 1, 2], 'type': 'bar', 'name': 'SF'},\n {'x': [1, 2, 3], 'y': [2, 4, 5],\n 'type': 'bar', 'name': u'Montréal'},\n ],\n }\n )\n])\n\n\napp = DjangoDash('Pizza', external_stylesheets=external_stylesheets)\n\napp.layout = html.Div([\n daq.Thermometer(\n value=5,\n scale={'start': 2, 'interval': 3,\n 'labelInterval': 2, 'custom': {\n '2': 'ideal temperature',\n '5': 'projected temperature'\n }}\n )\n])\n","sub_path":"dashboard/grafs.py","file_name":"grafs.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"382459334","text":"# Piglatin translator\n# translates english to Piglatin\n\nprint('''\n Welcome to the Piglatin game!\n ''')\n\nprint('''\n Rules:\n\n Please enter the phrase you would like to translate, without punctuation\n\n For words that begin with consonant sounds, all letters before\n the initial vowel are placed at the end of the word sequence.\n ''')\n\nprint('''\n For words that begin with consonant clusters (multiple consonants that\n form one sound), the whole sound is added to the end.\n ''')\n\nprint('''\n For words that being with vowel sounds, just add \"ay\" to the end.\n ''')\n\ndef translator(sentence):\n '''when passed a string, translates string to piglatin'''\n # Starts by splitting the string into a list of strings\n # Checks that each string is long enough to translate. If not, adds the un-\n # translated string to the translated_phrases string.\n # If the word is three characters or longer, translator determines\n # whether the word starts with a consonant, consonant cluster or vowel.\n # If the word starts with a vowel, call volwel translate.\n # If the word starts with a consonant, call consonant_translate\n # If the word starts with a consonant cluster, call cluster_translate.\n\n word_list = sentence.split(' ')\n translated_phrase = ''\n\n for word in word_list:\n\n if len(word) < 3:\n translated_phrase = translated_phrase + word + ' '\n\n else:\n word = word.lower()\n vowels = ['a', 'e', 'i', 'o', 'u', 'y']\n\n if word[0] in vowels:\n piglatin_translation = vowel_translate(word)\n translated_phrase = translated_phrase + piglatin_translation\n\n else:\n if word[1] in vowels:\n piglatin_translation = consonant_translate(word)\n translated_phrase = translated_phrase + piglatin_translation\n\n else:\n piglatin_translation = cluster_translate(word)\n translated_phrase = translated_phrase + piglatin_translation\n\n return translated_phrase\n\n\ndef vowel_translate(word):\n return word + 'ay '\n\n\ndef consonant_translate(word):\n first_letter = word[0]\n new_string = word[1:]\n return new_string + first_letter + 'ay '\n\n\ndef cluster_translate(word):\n cluster = word[:2]\n new_string = word[2:]\n return new_string + cluster + 'ay '\n\n\nif __name__ == '__main__':\n print(translator(input('Please enter the phrase you would like to translate: ')))\n","sub_path":"piglatin.py","file_name":"piglatin.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"445869456","text":"from django.core.management.base import BaseCommand, CommandError\nfrom mturk_scraper.models import RecipeURL\n\n\nclass Command(BaseCommand):\n help = 'Import recipe urls from text file'\n\n def add_arguments(self, parser):\n parser.add_argument('--path', help='Folder where json files are')\n parser.add_argument('--name', help=\"Which food are these urls for\")\n\n def handle(self, *args, **options):\n\n text_path = options['path']\n recipe_name = options['name']\n with open(text_path, 'r') as f:\n urls = f.readlines()\n RecipeURL.objects.bulk_create([\n RecipeURL(url=url, group_name=recipe_name)\n for url in urls])\n","sub_path":"recipescape_api/mturk_scraper/management/commands/import_urls.py","file_name":"import_urls.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"247156404","text":"# %load q02_data_cleaning_all_2/build.py\n# Default Imports\nimport pandas as pd\nimport numpy as np\nfrom greyatomlib.logistic_regression_project.q02_data_cleaning_all.build import data_cleaning\nfrom greyatomlib.logistic_regression_project.q01_outlier_removal.build import outlier_removal\nfrom sklearn.preprocessing import LabelEncoder\nimport warnings\n\nloan_data = pd.read_csv('data/loan_prediction_uncleaned.csv')\nloan_data = loan_data.drop('Loan_ID', 1)\nloan_data = outlier_removal(loan_data)\nX, y, X_train, X_test, y_train, y_test = data_cleaning(loan_data)\n\n\n# Write your solution here :\ndef data_cleaning_2(X_train, X_test, y_train, y_test):\n num_cols = X_train._get_numeric_data().columns\n for col in num_cols:\n X_train[col] = np.sqrt(X_train[col])\n X_test[col] = np.sqrt(X_test[col])\n cat_data = X_train[['Self_Employed', 'Married', 'Dependents', 'Gender', 'Property_Area', 'Education']]\n enc_res = pd.get_dummies(cat_data,drop_first=True)\n X_train = X_train.join(enc_res)\n for col in ['Self_Employed', 'Married', 'Dependents', 'Gender', 'Property_Area', 'Education']:\n X_train.drop(col, axis=1, inplace=True)\n cat_data = X_test[['Self_Employed', 'Married', 'Dependents', 'Gender', 'Property_Area', 'Education']]\n enc_res = pd.get_dummies(cat_data,drop_first=True)\n X_test = X_test.join(enc_res)\n for col in ['Self_Employed', 'Married', 'Dependents', 'Gender', 'Property_Area', 'Education']:\n X_test.drop(col, axis=1, inplace=True)\n return X_train, X_test, y_train, y_test\n\n\n","sub_path":"q02_data_cleaning_all_2/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"105866896","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\n\ntrain_data = pd.read_csv(\"data/public_listings.csv\")\n\n# Columns with mixed type\ntrain_data.columns[[15, 24, 33, 38, 42, 43, 51]]\n\ntrain_data[train_data.columns[[15, 24, 33, 38, 42, 43, 51]]]\n\n# Row 17613 causes a problem - Read csv file again\ntrain_data = pd.read_csv(\"data/public_listings.csv\", skiprows=[17614])\n\n# Check the first 3 rows\ntrain_data.head(3)\n\n# Shape\ntrain_data.shape\n\n# Basic info\ntrain_data.info()\n\n# Missing values\ntotal = train_data.isnull().sum().sort_values(ascending=False)\npct = round((train_data.isnull().sum() / len(train_data) * 100), 2).sort_values(ascending=False)\n\nmissing_data = pd.concat([total, pct], axis=1, keys=[\"Total\", \"Percent\"])\nmissing_data.head(10)\n\nmissing_data.tail(3)\n","sub_path":"challenge_4/data_exploration.py","file_name":"data_exploration.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"139201013","text":"import csv\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\n\nimport string\nimport unicodedata\n\nimport torch\nfrom torch.autograd import Variable\n\nfrom math import sqrt, ceil\n\nnp.random.seed(0) \n\nall_letters = string.ascii_letters + \" .,;'-\"\nn_letters = len(all_letters)\n\n\n# taken from \"Classifying Names with a Character-Level RNN\" pytorch tutorial\n# Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427\ndef unicode_to_ascii(s):\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn'\n and c in all_letters\n )\n\n\n# taken from \"Classifying Names with a Character-Level RNN\" pytorch tutorial\n# Find letter index from all_letters, e.g. \"a\" = 0\ndef letter_to_index(letter):\n return all_letters.find(letter)\n\n# taken from \"Classifying Names with a Character-Level RNN\" pytorch tutorial\n# Turn a line into a ,\n# or an array of one-hot letter vectors\ndef line_to_tensor(line):\n tensor = torch.zeros(len(line), 1, n_letters)\n for li, letter in enumerate(line):\n tensor[li][0][letter_to_index(letter)] = 1\n return tensor\n\n\ndef display_distribution(all_categories, country_dict):\n # display histogram of distribution of country data points\n num = [] # counts number of cities for each country\n index=0\n for idx, country in enumerate(all_categories):\n for city in country_dict[country]:\n num.append(idx)\n\n n, bins, patches = plt.hist(num, 244, facecolor='green', alpha=0.75)\n\n plt.xlabel('country nr')\n plt.ylabel('number of cities')\n plt.title('world-cities distribution')\n\n plt.grid(True)\n plt.show()\n\ndef build_dict_world_cities():\n\n # Build dictionary with city/country pairs - world-cities data set\n country_dict = {}\n all_categories = []\n old_country = \"\"\n temp_list = []\n with open('../data/world-cities_csv.csv', 'r', encoding=\"utf8\") as f:\n reader = csv.reader(f)\n for row in reader:\n city=row[0]\n country=row[1]\n\n if country == \"country\":\n continue\n\n ascii_name = unicode_to_ascii(city)\n\n if country not in all_categories:\n if len(old_country) > 0:\n country_dict[old_country]=temp_list\n old_country=country\n\n all_categories.append(country)\n temp_list=[]\n\n if(len(ascii_name) > 0):\n temp_list.append(ascii_name)\n else:\n if(len(ascii_name) > 0):\n temp_list.append(ascii_name)\n \n # last country\n country_dict[old_country] = temp_list\n\n n_categories = len(all_categories)\n\n return country_dict, all_categories, n_categories\n\n\ndef build_dict_geonames():\n # Build dictionary with city/country pairs - geonames data set\n country_dict = {}\n all_categories = []\n old_country = \"\"\n temp_list = []\n with open('cities1000.txt', encoding=\"utf8\") as f:\n for line in f:\n a=line.split('\\t')\n country=a[8]\n city=a[2]\n if country not in all_categories:\n if len(old_country) > 0:\n country_dict[old_country]=temp_list\n old_country=country\n all_categories.append(country)\n temp_list=[]\n\n if(len(city) > 0):\n # X.append(city)\n # y.append(country)\n temp_list.append(city)\n # last country\n country_dict[old_country] = temp_list\n n_categories = len(all_categories)\n\n # Build dictionary with geonames country code/country name pairs\n code_dict = {}\n with open('countryInfo.txt', encoding=\"utf8\") as f:\n for line in f:\n a=line.split('\\t')\n code=a[0]\n countryname=a[4]\n code_dict[code]=countryname\n\n return country_dict, all_categories, n_categories, code_dict #, X, y\n\n\ndef partition_data():\n\n train_set = {}\n val_set = {}\n test_set = {}\n\n tot_test=0\n tot_val=0\n tot_train=0\n for category in all_categories:\n cities=country_dict[category]\n np.random.shuffle(cities)\n temptot=len(cities)\n endtrain=ceil(0.7 * temptot)\n endval=ceil(0.7 * temptot) + ceil(0.2 * temptot)\n endtest=temptot\n\n print(category, endtrain, (endval-endtrain), (endtest-endval), \\\n endtrain + (endval-endtrain) + (endtest-endval), temptot)\n\n tot_test += (endtest-endval)\n tot_val += (endval - endtrain)\n tot_train += endtrain\n\n if endtrain > 0:\n train_set[category] = cities[0:endtrain]\n if endval > 0:\n val_set[category] = cities[endtrain:endval]\n if endval < endtest:\n test_set[category] = cities[endval:endtest]\n\n\n # print how many samples there are in the training, validation and test\n # set respectively\n print('tot train', tot_train, 'tot val', tot_val, 'tot test', tot_test)\n\n return train_set, val_set, test_set, tot_train, tot_val, tot_test\n\ndef partition_x_y():\n\n X_train, y_train, X_val, y_val, X_test, y_test = [], [], [], [], [], []\n for country in train_set:\n cities=train_set[country]\n for city in cities:\n X_train.append(city)\n y_train.append(country)\n\n for country in val_set:\n cities=val_set[country]\n for city in cities:\n X_val.append(city)\n y_val.append(country)\n\n for country in test_set:\n cities=test_set[country]\n for city in cities:\n X_test.append(city)\n y_test.append(country) \n\n tot_tr=np.concatenate((np.expand_dims(X_train, axis=1), np.expand_dims(y_train, axis=1)), axis=1)\n np.random.shuffle(tot_tr)\n X_train=tot_tr[:, 0]\n y_train=tot_tr[:, 1]\n\n tot_tr=np.concatenate((np.expand_dims(X_test, axis=1), np.expand_dims(y_test, axis=1)), axis=1)\n np.random.shuffle(tot_tr)\n X_test=tot_tr[:, 0]\n y_test=tot_tr[:, 1]\n\n tot_tr=np.concatenate((np.expand_dims(X_val, axis=1), np.expand_dims(y_val, axis=1)), axis=1)\n np.random.shuffle(tot_tr)\n X_val=tot_tr[:, 0]\n y_val=tot_tr[:, 1]\n\n return X_train, y_train, X_val, y_val, X_test, y_test\n\n\ncountry_dict, all_categories, n_categories, code_dict = build_dict_geonames()\n\n# only keeping countries with at least 300 cities -> this will amount to 55 categories (for geonames data set)\nbig_country_dict={}\nbig_all_cats = []\ntot=0\ntotfiltered=0\n\nfor country in all_categories:\n tot += len(country_dict[country])\n if(len(country_dict[country]) > 300):\n big_all_cats.append(country)\n big_country_dict[country] = country_dict[country]\n totfiltered+=len(big_country_dict[country])\n\n\nprint('remaining categories after filtering')\nfor idx, country in enumerate(big_all_cats):\n print('(' + country + ',' + code_dict[country] + ')') if(idx==len(big_all_cats)-1) else print('(' + country + ',' + code_dict[country] + ')', end=', ')\n\n\nn_categories = len(big_all_cats)\n\n\n# # creating weight vector to handle unbalanced training set\nnum = [] # counts number of cities for each country\nindex=0\nfor country in big_all_cats:\n num.append(len(big_country_dict[country]))\n\nmx = np.max(num)\nclass_weights = []\nfor n in num:\n class_weights.append(int(mx/n))\nclass_weights=torch.FloatTensor(class_weights)\n\n\n# comment this (and comment awaycreation of class_weights vector abovee) if you want to run on ~all~ the data\nall_categories=big_all_cats\ncountry_dict=big_country_dict\n\ntrain_set, val_set, test_set, tot_train, tot_val, tot_test = partition_data()\nX_train, y_train, X_val, y_val, X_test, y_test = partition_x_y() # for \"train_model_deterministic\"\n\n","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":7861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"301199047","text":"from __future__ import division, print_function\n# coding=utf-8\nimport sys\nimport os\nimport glob\nimport re\nimport numpy as np\n\n# Keras\nfrom keras.applications.imagenet_utils import preprocess_input, decode_predictions\nfrom keras.models import load_model\nfrom keras.preprocessing import image\n\n# Flask utils\nfrom flask import Flask, redirect, url_for, request, render_template\nfrom werkzeug.utils import secure_filename\nfrom gevent.pywsgi import WSGIServer\n\n# Define a flask app\napp = Flask(__name__)\n\n# Model saved with Keras model.save()\nMODEL_PATH = 'CNN_MODEL.h5'\n\n# Load your trained model\nmodel = load_model(MODEL_PATH)\n# model._make_predict_function() # Necessary\n# print('Model loaded. Start serving...')\n\n# You can also use pretrained model from Keras\n# Check https://keras.io/applications/\n#from keras.applications.resnet50 import ResNet50\n#model = ResNet50(weights='imagenet')\n# model.save('')\nprint('Model loaded. Check http://127.0.0.1:5000/')\n\n\ndef model_predict(img_path, model):\n test_image = image.load_img(img_path, target_size=(120, 160, 3))\n test_image = image.img_to_array(test_image)\n test_image = np.expand_dims(test_image, axis=0)\n test_image__mean = np.mean(test_image)\n test_image_std = np.std(test_image)\n test_image = (test_image - test_image__mean)/test_image_std\n preds = model.predict(test_image)\n pred_class = model.predict_classes(test_image)\n return preds, pred_class\n\n\n@app.route('/', methods=['GET'])\ndef index():\n # Main page\n return render_template('index.html')\n\n\n@app.route('/predict', methods=['GET', 'POST'])\ndef upload():\n if request.method == 'POST':\n # Get the file from post request\n f = request.files['file']\n\n # Save the file to ./uploads\n basepath = os.path.dirname(__file__)\n file_path = os.path.join(\n basepath, 'uploads', secure_filename(f.filename))\n f.save(file_path)\n\n # Make prediction\n preds, pred_class = model_predict(file_path, model)\n label_list = ['akiec', 'bcc', 'bkl', 'df', 'nv', 'mel', 'vasc']\n l = ['Actinic keratoses', 'Basal cell carcinoma', 'Benign keratosis-like lesions',\n 'Dermatofibroma', 'Melanocytic nevi', 'Melanoma', 'Vascular lesions']\n\n result = l[preds.argmax()]\n # Process your result for human\n # pred_class = preds.argmax(axis=-1) # Simple argmax\n # Convert to string\n return result\n return \"hello\"\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"35293001","text":"import os\nfrom functions import *\n\t\ndef get_args():\n\n\tparser = ArgumentParser()\n\t\n\tparser.add_argument(\"k\", help=\"display a square of a given number\",\n\t\t\t\t\t\ttype=int)\n\n\tparser.add_argument(\"time\", help=\"time length of detections\",\n\t\t\t\t\t\ttype=int)\n\n\tparser.add_argument(\"num_dets\", help=\"number of detections per frame\",\n\t\t\t\t\t\ttype=int)\n\n\targs = parser.parse_args()\n\treturn args.k, args.time, args.num_dets\n\ndef cleanup():\n\tfiles = [\n\t\t'data/c.txt',\n\t\t'data/c2.txt',\n\t\t'data/detections.txt',\n\t\t'data/edges.txt',\n\t\t'data/indices.json',\n\t\t'data/rangelist.json',\n\t\t'solutions/exp.txt',\n\t\t'solutions/exp2.txt',\n\t\t'mats/A.mat',\n\t\t'mats/G.mat',\n\t\t'mats/b.mat',\n\t\t'mats/h.mat',\n\t\t'mats/c.mat',\n\t\t'mats/c2.mat'\n\t]\n\n\tfor fl in files:\n\t\ttry:\n\t\t\tos.remove(fl)\n\t\texcept: \t\n\t\t\tpass\n\ndef get_kruser():\n\tpass\n\t\ndef main():\n\tcleanup()\n\n\tk, time, num_dets = get_args()\n\t#d = get_random_data()\n\tprint('getting detections')\n\t#d = get_grid()\n\t#d = get_stochastic_data(time, num_dets)\n\t#d = get_whirligig_detections()\n\td = generate_toy_data()\n\t#d_inds = vert_indices(d)\n\n\t# Get different types of edges\n\tprint('getting data structures')\n\te_source, e_sink, e_temp, e_loop, e_rep, e_all, inflows, outflows = get_data_structures(d)\n\tindices = get_indices(d, e_all, e_source, e_sink, e_temp, e_loop, e_rep)\n\t#print(inflows.keys(), outflows.keys())\n\t\n\t# let's get flow computation to work correctly\n\t#inflows, outflows = generate_flows(e_all, e_source, e_sink)\n\tprint('getting objective')\n\tc = get_objective_one(d, e_all, e_source, e_sink, e_temp, e_loop, e_rep)\n\n\t#print('c_max', c.max())\n\n\t#plt.figure()\n\t#plt.hist(c)\n\t#plt.show()\n\t\n\t#c = generate_cost(e_source, e_cross, e_sink, d)\n\tprint('getting constraints')\n\tA, b, G, h = generate_constraints(d, e_all, e_source, e_sink, e_temp, e_loop, e_rep, indices, inflows, outflows, k)\n\t#print('ab', A, b) \n\t#print('gh', G, h)\n\t#print('c', c)\n\t#print(c)\n\n\t#print(e_all)\n\n\trangelist = get_rangelist(e_all, e_source, e_sink, e_temp, e_loop, e_rep)\n\n\tsave_vars(c, e_all, A, b, G, h, d, rangelist, indices)#, G, h) # save all of these variables to file\n\n\t#print()\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"repulsor.py","file_name":"repulsor.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"375029263","text":"def solution(A):\n list_of_unique_elems = list(set(A))\n num_without_pair = None\n for elem in list_of_unique_elems:\n condition = [1 for x in A if x == elem]\n accum = sum(condition)\n if accum % 2 == 1:\n num_without_pair = elem\n return num_without_pair\n\n\ndef solution2(A):\n unpaired = [A[0]]\n\n for each_elem in A[1:]:\n matched = False\n num_unpaired = len(unpaired)\n i = 0\n while i < num_unpaired and not matched:\n if each_elem == unpaired[i]:\n unpaired.pop(i)\n matched = True\n else:\n i = i + 1\n if not matched:\n unpaired.append(each_elem)\n\n return unpaired[0]\n\n\ndef solution3(A):\n A.sort()\n A = A + [A[-1]]\n for x in range(0, len(A), 2):\n a = A[x]\n b = A[x + 1]\n if a != b:\n return a\n return A[-1]\n","sub_path":"code/lesson_02.py","file_name":"lesson_02.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"653216352","text":"#!/usr/bin/env python\n\nimport rospy\nimport math\n\nfrom robosub2019.msg import Key\nfrom robosub2019.msg import MotorCommands\n\n\n'''\nMotorCommands:\n 0: Port Forward\n 1: Starboard Forward\n 2: Fore Strafe\n 3: Aft Strafe\n 4: Port Fore Depth\n 5: Starboard Fore Depth\n 6: Port Aft Depth\n 7: Starboard Aft Depth\n'''\n\n\n## Start Variables\ncom_pub = None\n\ncom_msg = None\n\ncommand_timeout = dict()\ntimeout_delay = 200\n\npower = 1\n\nkey_mappings = {119: {0: 1, 1: 1}, # w - forward\n 97: {2: -1, 3: -1}, # a - strafe left\n 115: {0: -1, 1: -1}, # s - reverse\n 100: {2: 1, 3: 1}, # d - strafe right\n 113: {2: -1, 3: 1}, # q - rotate left\n 101: {2: 1, 3: -1}, # e - rotate right\n 99: {4: -1, 5: -1, 6: -1, 7: -1}, # c - descend\n 32: {4: 1, 5: 1, 6: 1, 7: 1}, # space - ascend\n 120: {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0}, # x - stop all\n 116: {0: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1}, # t - start all\n 49: {0: 1}, # 1 - port forward\n 50: {1: 1}, # 2 - starboard forward\n 51: {2: 1}, # 3 - fore strafe\n 52: {3: 1}, # 4 - aft strafe\n 53: {4: 1}, # 5 - port fore depth\n 54: {5: 1}, # 6 - starboard fore depth\n 55: {6: 1}, # 7 - port aft depth\n 56: {7: 1}} # 7 - starboard aft depth\n\n## End Variables\n\n\ndef user_input(key, down):\n global command_timeout\n \n key = int(key.code)\n\n if not down:\n command_timeout[key] = datetime.now().microsecond + timeout_delay\n return\n\n set_commands(key)\n\n\ndef set_commands(key, power = 1):\n global com_msg\n\n if key in key_mappings:\n for key, value in key_mappings[key].items():\n com_msg.commands[key] = value * power\n\n\ndef key_down(key):\n user_input(key, True)\n\n\ndef key_up(key):\n user_input(key, False)\n\n\ndef send_commands():\n global com_msg, com_pub\n\n com_msg.header.seq += 1\n com_msg.header.stamp = rospy.get_rostime()\n \n com_pub.publish(com_msg)\n\n\n\ndef main():\n global com_msg, com_pub, command_timeout\n\n rospy.init_node('UserInput')\n rospy.Subscriber(\"keyboard/keydown\", Key, key_down)\n rospy.Subscriber(\"keyboard/keyup\", Key, key_up)\n\n com_pub = rospy.Publisher(\"command/motor\", MotorCommands, queue_size=32)\n\n com_msg = MotorCommands()\n com_msg.header.seq = 0\n com_msg.header.stamp = rospy.get_rostime()\n com_msg.header.frame_id = \"0\"\n\n rate = rospy.Rate(8)\n while not rospy.is_shutdown():\n # Need timeout because of remote desktop latency\n for key, value in command_timeout.items():\n if datetime.now().microsecond > value:\n set_commands(key, 0)\n command_timeout.pop(key, None)\n\n send_commands()\n rate.sleep()\n \n rospy.spin()\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"src/user_input.py","file_name":"user_input.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"530530336","text":"\"\"\"\nDP implementation\n\n'.' matches any single character\n'*' matches zero or more of the preceding element\n\"\"\"\n\ndef is_match(input, pattern):\n if len(pattern)==0:\n return len(input)==0\n\n if len(pattern)>1 and pattern[1] == \"*\":\n return is_match(input, pattern[2:]) or (len(input)!=0 and (pattern[0]==\".\" or pattern[0]==input[0]) and is_match(input[1:], pattern))\n else:\n return len(input)!=0 and (pattern[0]==\".\" or pattern[0]==input[0]) and is_match(input[1:], pattern[1:])\n\ndef test_match(input, pattern, expected):\n result = is_match(input, pattern)\n\n if result != expected:\n print(\"input={} pattern={} match={} expected={}\".format(input, pattern, result, expected))\n\n return result\n\nif __name__ == \"__main__\":\n test_match(\"aa\", \"a*\",True)\n test_match(\"aa\", \"a\", False)\n test_match(\"aab\", \"c*a*b\", True)\n test_match(\"aab\", \"c*x*b*a\", False)\n test_match(\"ab\", \".*\", True)\n test_match(\"aab\", \"c*x*a*b\", True)\n test_match(\"mississippi\", \"mis*is*p*.\", False)\n test_match(\"ab\", \".*c\", False)\n test_match(\"aaa\", \"a*a\", True)\n test_match(\"mississippi\", \"mis*is*ip*.\", True)\n test_match(\"a\", \"ab*.\", False)\n test_match(\"aj\", \"ab*.\", True)\n test_match(\"ab\", \".*..\", True)\n test_match(\"a\", \"ab*c*\", True)\n","sub_path":"leetcode/regular_expression_10/regex_dp.py","file_name":"regex_dp.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"314505319","text":"para_dict = {\n 'random_tap_cnt': 0, # 随机点击事件次数\n 'bank_trading_cnt': 300, # 消费循环次数\n 'nonsecret_trading_cnt': 300, # 免密循环次数\n}\n\ndesired_caps = {\n 'platformName': 'Android',\n 'platformVersion': '5.1.1',\n 'deviceName': '0123456789ABCDEF',\n 'app': '',\n 'appPackage': 'com.centerm.cpay.payment',\n 'appActivity': '.activity.WelcomeActivity',\n 'appWaitActivity': '.activity.MainActivity',\n 'newCommandTimeout': '120',\n 'noReset': True,\n}\n\nActivityCaps = {\n 'Welcome': '.activity.WelcomeActivity',\n 'Trading': '.activity.TradingActivity',\n 'Main': '.activity.MainActivity',\n 'InputMoney': '.activity.InputMoneyActivity',\n 'CheckCard': '.activity.CheckCardActivity',\n 'InputPasswd': '.activity.InputPasswdActivity',\n 'Esignatrue': '.activity.EsignatrueActivity',\n 'Result': '.activity.ResultActivity',\n}\n\npinPad = {\n '0': 'com.centerm.dev.pinpad:id/btn_key0',\n '1': 'com.centerm.dev.pinpad:id/btn_key1',\n '2': 'com.centerm.dev.pinpad:id/btn_key2',\n '3': 'com.centerm.dev.pinpad:id/btn_key3',\n '4': 'com.centerm.dev.pinpad:id/btn_key4',\n '5': 'com.centerm.dev.pinpad:id/btn_key5',\n '6': 'com.centerm.dev.pinpad:id/btn_key6',\n '7': 'com.centerm.dev.pinpad:id/btn_key7',\n '8': 'com.centerm.dev.pinpad:id/btn_key8',\n '9': 'com.centerm.dev.pinpad:id/btn_key9',\n 'cancle': 'com.centerm.dev.pinpad:id/btn_key_cancel',\n 'clear': 'com.centerm.dev.pinpad:id/btn_key_clear',\n 'confirm': 'com.centerm.dev.pinpad:id/btn_key_confirm'\n}\n\nEntryItem = {\n 'ic': 'bank_card_entry',\n 'wx': 'wx_entry',\n 'ali': 'ali_entry',\n 'sign': [(90, 430)],\n 'non-secret': [(270, 430)],\n # 'non-secret': [(100, 100)],\n 'citizen': [(450, 430)],\n 'point': [(90, 600)],\n 'point-query': [(270, 600)],\n 'fshl': [(450, 600)],\n 'balance': [(90, 780)],\n 'transfer': [(270, 780)],\n 'bank-revoke': [(450, 780)],\n 'scan-revoke': [(90, 430)],\n 'pre-authorization': [(270, 430)],\n 'trading-query': [(450, 430)],\n 'sales-return': [(90, 600)],\n 'pjs': [(270, 600)],\n 'setting': [(450, 600)],\n}\n\nTradingType = {\n 'scan': 'go_scan_pay',\n 'code': 'go_code_pay',\n}\n\nElementID = {\n 'dialog_msg': 'dialog_message',\n 'positive_btn': 'positive_btn',\n 'negative_btn': 'negative_btn',\n 'result_back_btn': 'result_back_btn',\n 'title_show': 'title_show',\n 'loading_tip_show': 'loading_tip_show',\n 'number_pad_show3': 'number_pad_show3',\n 'amt_confirm_btn': 'amt_confirm_btn',\n 'confirm_btn': 'confirm_btn',\n 'amount_show': 'amount_show',\n 'entry_name_show': 'entry_name_show',\n 'entry_top_name_show': 'entry_top_name_show',\n 'back_btn': 'back_btn',\n 'result_text': 'result_text',\n 'card': 'bank_card_entry',\n 'wx': 'wx_entry',\n 'ali': 'ali_entry',\n 'scan': 'go_scan_pay',\n 'code': 'go_code_pay',\n 'values': 'com.centerm.cpay.payment:id/value'\n}\n","sub_path":"config/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"347509462","text":"import random\r\n\r\nWIDTH = 8 \r\nHEIGHT = 8\r\n\r\n# Getting new board input\r\ndef game_board():\r\n board = []\r\n for column in range(HEIGHT):\r\n board.append(input())\r\n return board\r\n\r\n# Selecting the player tile\r\ndef enter_player_tile():\r\n tile = ''\r\n while not (tile == 'W' or tile == 'B'):\r\n print('Do you want to be W or B ?')\r\n tile = input().upper()\r\n if tile == 'W':\r\n return['W','B','player1']\r\n else:\r\n return['B','W','player1']\r\n \r\n# who gets first move\r\ndef who_goes_first():\r\n #random choose the player who goes first \r\n if random.randint(0, 1) == 0:\r\n return 'player1'\r\n else:\r\n return 'player2'\r\n\r\n# Printing the existing board\r\ndef print_board(board):\r\n for column in range(HEIGHT):\r\n print(''.join(map(str, board[column])))\r\n return board\r\n\r\n#score board\r\ndef Score_Board(board):\r\n # Determine the score by counting the tiles. Returns a dictionary with keys 'W' and 'B'.\r\n W_score = 0\r\n B_score = 0\r\n for x in range(8):\r\n for y in range(8):\r\n if board[x][y] == 'W':\r\n W_score += 1\r\n if board[x][y] == 'B':\r\n B_score += 1\r\n return {'W':W_score, 'B':B_score}\r\n\r\n#getting the players move\r\ndef get_player_move(board, player_move):\r\n keys = '1 2 3 4 5 6 7 8'.split()\r\n\r\n while True:\r\n print('Enter your Move, \"Q\" to end the game, or \"L\" to available moves.')\r\n move = input()\r\n if move == 'Q':\r\n return 'quit'\r\n if move == 'L':\r\n return 'list'\r\n if move[0] == 'M' and len(move) == 3 and move[1] in keys and move[2] in keys:\r\n x = int(move[1])\r\n y = int(move[2])\r\n if is_valid_move(board, player_move, x-1, y-1) == False:\r\n print('invalid move')\r\n continue\r\n else:\r\n break\r\n break\r\n else:\r\n print('That is not a valid move. Enter the column (1-8) and then the row (1-8).')\r\n print('For example, 81 will move on the top-right corner.')\r\n return [x, y]\r\n\r\ndef get_valid_moves(board, tile):\r\n # Returns a list of [x,y] lists of valid moves for the given player on the given board.\r\n valid_moves = []\r\n \r\n for x in range(8):\r\n for y in range(8):\r\n if is_valid_move(board, tile, x, y) != False:\r\n valid_moves.append([x+1, y+1])\r\n return valid_moves\r\ndef is_on_board(x, y):\r\n return x >= 0 and x <= WIDTH - 1 and y >= 0 and y <= HEIGHT - 1\r\n\r\ndef is_valid_move(board, tile, xstart, ystart):\r\n dup_board = [[1 for x in range(8)] for y in range(8)]\r\n if board[xstart][ystart] != '-':\r\n return False \r\n \r\n for i in range(WIDTH):\r\n for j in range(HEIGHT):\r\n if i == xstart:\r\n if j == ystart:\r\n dup_board[i][j] = tile\r\n else:\r\n dup_board[i][j] = board[i][j]\r\n else:\r\n dup_board[i][j] = board[i][j]\r\n \r\n if tile == 'W':\r\n other_tile ='B'\r\n else :\r\n other_tile = 'W'\r\n tiles_to_flip =[]\r\n for xdirection, ydirection in [[0,1], [1,1], [1,0], [1,-1], [0,-1], [-1,-1], [-1,0], [-1,1]]:\r\n x, y = xstart, ystart\r\n x += xdirection\r\n y += ydirection\r\n if is_on_board(x,y) and board[x][y] == other_tile:\r\n x += xdirection\r\n y += ydirection\r\n if not is_on_board(x,y):\r\n continue\r\n while board[x][y] == other_tile:\r\n x += xdirection\r\n y += ydirection\r\n if not is_on_board(x,y):\r\n break\r\n if not is_on_board(x,y):\r\n continue\r\n if board[x][y] == tile:\r\n while True:\r\n x -= xdirection\r\n y -= ydirection\r\n if x == xstart and y == ystart:\r\n break\r\n tiles_to_flip.append([x,y])\r\n \r\n if len(tiles_to_flip) == 0:\r\n return False\r\n return [tiles_to_flip, dup_board]\r\n\r\ndef make_move(board, tile, xstart, ystart):\r\n tiles_to_flip, sample = is_valid_move(board, tile, xstart, ystart)\r\n \r\n if tiles_to_flip == False:\r\n return False\r\n sample[xstart][ystart] = tile\r\n for x , y in tiles_to_flip:\r\n sample[x][y] = tile\r\n return sample\r\n\r\nprint('welcome to Goro Hasegawa creation')\r\nprint('Enter number of games to be played ?')\r\nvalue = int(input())\r\nfor game in range(value):\r\n new_game_board = game_board()\r\n player1_move, player2_move, turn = enter_player_tile()\r\n #turn = who_goes_first()\r\n print('The ' + turn + ' will start the game')\r\n while True:\r\n if turn == 'player1':\r\n move = get_player_move(new_game_board, player1_move)\r\n if move == 'quit':\r\n print_board(new_game_board)\r\n break\r\n elif move == 'list':\r\n list = get_valid_moves(new_game_board,player1_move)\r\n print('%s' % list)\r\n continue\r\n else:\r\n new_game_board = make_move(new_game_board, player1_move, move[0]-1, move[1]-1)\r\n scores = Score_Board(new_game_board)\r\n print('White - %s. Black = %s.' % (scores['W'], scores['B']))\r\n turn = 'player2'\r\n else:\r\n move = get_player_move(new_game_board, player2_move)\r\n if move == 'quit':\r\n print_board(new_game_board)\r\n break\r\n elif move == 'list':\r\n list = get_valid_moves(new_game_board, player2_move)\r\n print('%s' % list)\r\n continue\r\n else:\r\n new_game_board = make_move(new_game_board, player2_move, move[0]-1, move[1]-1)\r\n scores = Score_Board(new_game_board)\r\n print('White - %s. Black = %s.' % (scores['W'], scores['B']))\r\n turn = 'player1'","sub_path":"othello.py","file_name":"othello.py","file_ext":"py","file_size_in_byte":5985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"243521501","text":"from django.db import models, transaction\nfrom datetime import datetime\nfrom dateutil.tz import tzutc\nfrom django.contrib.gis.db import models\nfrom django.contrib.auth.models import User\nfrom guardian.shortcuts import get_perms\nimport math\nfrom catamidb.models import Image, Deployment\nfrom random import sample\nfrom django.db.utils import IntegrityError\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nimport numpy as np\n\nimport random\nimport logging\n\n\nclass AnnotationCodes(models.Model):\n \"\"\"The base annotation (CAAB) structure.\n\n This stores all the levels of the classifaction tree\n with parent filled in as appropriate.\n \"\"\"\n caab_code = models.CharField(max_length=8, unique=True) # 8 numbers\n cpc_code = models.CharField(max_length=5, unique=True) # CPC Code file code\n point_colour = models.CharField(max_length=6) # hex RGB colour\n code_name = models.CharField(max_length=100)\n description = models.CharField(max_length=200)\n parent = models.ForeignKey(\n 'projects.AnnotationCodes',\n blank=True,\n null=True\n )\n\n def __unicode__(self):\n return \"{0} - ({1})\".format(self.code_name, self.caab_code)\n\n\nclass QualifierCodes(models.Model):\n \"\"\"Qualifiers to annotations.\n\n Examples include anthropogenic labels, or natural labels\n that include bleaching, dead etc.\n \"\"\"\n parent = models.ForeignKey('self', blank=True, null=True, related_name=\"children\")\n short_name = models.CharField(max_length=200)\n description = models.CharField(max_length=200)\n\n #active - should this be displayed to the users\n #in case you want to keep the label for historical purposes,\n # but not display it to the users\n active = models.BooleanField()\n\n\nclass Project(models.Model):\n \"\"\"\n Projects contain a set of images that a user works with. They also have\n associated worksets which are image sets to annotate.\n \"\"\"\n\n name = models.CharField(max_length=100)\n description = models.TextField(blank=True, null=True)\n owner = models.ForeignKey(User, null=True)\n creation_date = models.DateTimeField()\n modified_date = models.DateTimeField()\n images = models.ManyToManyField(Image, null=True)\n\n class Meta:\n unique_together = (('owner', 'name', 'creation_date'), )\n permissions = (\n ('view_project', 'View the project.'),\n )\n\n\nclass AnnotationSet(models.Model):\n \"\"\"\n An annotated set is used to contain a set of images to be annotated.\n \"\"\"\n\n IMAGE_SAMPLING_METHODOLOGY_CHOICES = (\n (0, 'Random'),\n (1, 'Stratified'),\n (2, 'Spatial'),\n (3, 'All'),\n )\n\n POINT_SAMPLING_METHODOLOGY_CHOICES = (\n (-1, 'Not Applicable'),\n (0, 'Random Point'),\n (1, 'Stratified Point'),\n (2, 'Fixed 5 Point'),\n (3, 'Uniform Grid'),\n )\n\n ANNOTATATION_SET_TYPE_CHOICES = (\n (0, 'Point'),\n (1, 'Whole Image'),\n )\n\n project = models.ForeignKey('projects.Project')\n owner = models.ForeignKey(User, null=True)\n name = models.CharField(max_length=100)\n description = models.TextField(blank=True, null=True)\n creation_date = models.DateTimeField()\n modified_date = models.DateTimeField()\n images = models.ManyToManyField(Image, related_name='projects')\n image_sampling_methodology = models.IntegerField(choices=IMAGE_SAMPLING_METHODOLOGY_CHOICES)\n point_sampling_methodology = models.IntegerField(choices=POINT_SAMPLING_METHODOLOGY_CHOICES)\n annotation_set_type = models.IntegerField(choices=ANNOTATATION_SET_TYPE_CHOICES)\n\n class Meta:\n unique_together = (('owner', 'name', 'creation_date'), )\n permissions = (\n ('view_annotationset', 'View the annotation set.'),\n )\n\n\nclass Annotation(models.Model):\n \"\"\"The common base for Point and Whole image annotations.\n \"\"\"\n\n image = models.ForeignKey('catamidb.Image')\n owner = models.ForeignKey(User, null=True)\n\n #loose reference to AnnotationCode table\n annotation_caab_code = models.CharField(max_length=200)\n\n #loose reference to qualifier table\n qualifier_short_name = models.CharField(max_length=200)\n\n #secondary annotation code and qualifier \n annotation_caab_code_secondary = models.CharField(max_length=200, blank=True)\n qualifier_short_name_secondary = models.CharField(max_length=200, blank=True)\n\n class Meta:\n \"\"\"Defines Metaparameters of the model.\"\"\"\n abstract = True\n\n\nclass PointAnnotationManager(models.Manager):\n \"\"\" Handles logic functions related to points annotations \"\"\"\n\n def apply_random_sampled_points(self, annotation_set, sample_size):\n \"\"\" Randomly apply points to the images attached to this annotation\n set \"\"\"\n\n images = annotation_set.images.all()\n points_to_bulk_save = []\n\n # iterate through the images and create points\n for image in images:\n for i in range(int(sample_size)):\n\n point_annotation = PointAnnotation()\n\n point_annotation.annotation_set = annotation_set\n point_annotation.image = image\n point_annotation.owner = annotation_set.owner\n point_annotation.x = random.uniform(0.008, 0.992) #random.random()\n point_annotation.y = random.uniform(0.008, 0.992)\n\n point_annotation.annotation_caab_code = \"\"\n point_annotation.qualifier_short_name = \"\"\n\n point_annotation.annotation_caab_code_secondary = \"\"\n point_annotation.qualifier_short_name_secondary = \"\"\n\n #point_annotation.save()\n points_to_bulk_save.append(point_annotation)\n\n # do the bulk save - for performance\n PointAnnotation.objects.bulk_create(points_to_bulk_save)\n\n def import_sampled_points(self, annotation_set, import_data):\n \"\"\" create annotation points with information from uploaded CSV and add to this annotation\n set \"\"\"\n\n images = annotation_set.images.all()\n points_to_bulk_save = []\n\n # iterate through the images and create points\n for image in images:\n for annotation in import_data[str(image.deployment.id)][image.image_name]:\n\n point_annotation = PointAnnotation()\n point_annotation.annotation_set = annotation_set\n point_annotation.image = image\n point_annotation.owner = annotation_set.owner\n point_annotation.x = annotation['Point in Image'].split(',')[0]\n point_annotation.y = annotation['Point in Image'].split(',')[1]\n\n point_annotation.annotation_caab_code = annotation['Annotation Code']\n point_annotation.qualifier_short_name = annotation['Qualifier Name']\n\n point_annotation.annotation_caab_code_secondary = annotation['Annotation Code 2']\n point_annotation.qualifier_short_name_secondary = annotation['Qualifier Name 2']\n\n #point_annotation.save()\n points_to_bulk_save.append(point_annotation)\n\n # do the bulk save - for performance\n PointAnnotation.objects.bulk_create(points_to_bulk_save)\n\n def apply_stratified_sampled_points(self, annotation_set, sample_size):\n \"\"\" Apply points to the images attached to this annotation set using\n stratified sampling \"\"\"\n\n #TODO: implement\n return None\n\n def apply_uniform_grid_points(self, annotation_set, sample_size):\n \"\"\" Apply a uniform grid of points to an image. \"\"\"\n\n images = annotation_set.images.all()\n points_to_bulk_save = []\n\n # take the square root of the sample size and round\n square = math.sqrt(int(sample_size))\n rows = columns = round(square)\n\n # +1 to the rows and cols\n rows += 1\n columns += 1\n\n # create the grid\n row_points = np.linspace(0.008, 0.992, num=rows, endpoint=False)\n column_points = np.linspace(0.008, 0.992, num=columns, endpoint=False)\n\n # pop the first item from the arrays - we do this so we get an even spacing excluding edges\n row_points = np.delete(row_points, 0)\n column_points = np.delete(column_points, 0)\n\n # apply the points to the images\n for image in images:\n for row in row_points:\n for column in column_points:\n\n point_annotation = PointAnnotation()\n\n point_annotation.annotation_set = annotation_set\n point_annotation.image = image\n point_annotation.owner = annotation_set.owner\n point_annotation.x = row\n point_annotation.y = column\n\n point_annotation.annotation_caab_code = \"\"\n point_annotation.qualifier_short_name = \"\"\n\n point_annotation.annotation_caab_code_secondary = \"\"\n point_annotation.qualifier_short_name_secondary = \"\"\n\n #point_annotation.save()\n points_to_bulk_save.append(point_annotation)\n\n # do the bulk save - for performance\n PointAnnotation.objects.bulk_create(points_to_bulk_save)\n\n def apply_fixed_five_points(self, annotation_set):\n \"\"\" 5 points based on AIMS standard \"\"\"\n\n images = annotation_set.images.all()\n points_to_bulk_save = []\n\n # create the grid\n row_points = [0.25, 0.25, 0.5, 0.75, 0.75]\n column_points = [0.25, 0.75, 0.5, 0.75, 0.25]\n\n # apply the points to the images\n for image in images:\n for i in range(int(5)):\n point_annotation = PointAnnotation()\n\n point_annotation.annotation_set = annotation_set\n point_annotation.image = image\n point_annotation.owner = annotation_set.owner\n point_annotation.x = row_points[i]\n point_annotation.y = column_points[i]\n\n point_annotation.annotation_caab_code = \"\"\n point_annotation.qualifier_short_name = \"\"\n\n point_annotation.annotation_caab_code_secondary = \"\"\n point_annotation.qualifier_short_name_secondary = \"\"\n\n #point_annotation.save()\n points_to_bulk_save.append(point_annotation)\n\n # do the bulk save - for performance\n PointAnnotation.objects.bulk_create(points_to_bulk_save)\n\nclass WholeImageAnnotationManager(models.Manager):\n \"\"\" Handles logic functions related to whole image annotations \"\"\"\n\n def apply_whole_image_points(self, annotation_set):\n \"\"\" Randomly apply points to the images attached to this annotation\n set \"\"\"\n \n whole_image_annotation_count = 4\n images = annotation_set.images.all()\n points_to_bulk_save = []\n\n # iterate through the images and create points\n for image in images:\n for i in range(whole_image_annotation_count):\n whole_image_annotation = WholeImageAnnotation()\n\n whole_image_annotation.annotation_set = annotation_set\n whole_image_annotation.image = image\n whole_image_annotation.owner = annotation_set.owner\n\n whole_image_annotation.annotation_caab_code = \"\"\n whole_image_annotation.qualifier_short_name = \"\"\n\n whole_image_annotation.annotation_caab_code_secondary = \"\"\n whole_image_annotation.qualifier_short_name_secondary = \"\"\n\n points_to_bulk_save.append(whole_image_annotation)\n\n # do the bulk save - for performance\n WholeImageAnnotation.objects.bulk_create(points_to_bulk_save)\n\n def import_whole_image_points(self,annotation_set, import_data):\n \"\"\" create annotation points with information from uploaded CSV and add to this annotation\n set \"\"\"\n\n images = annotation_set.images.all()\n points_to_bulk_save = []\n\n # iterate through the images and create points\n for image in images:\n for annotation in import_data[str(image.deployment.id)][image.image_name]:\n whole_image_annotation = WholeImageAnnotation()\n\n whole_image_annotation.annotation_set = annotation_set\n whole_image_annotation.image = image\n whole_image_annotation.owner = annotation_set.owner\n\n whole_image_annotation.annotation_caab_code = annotation['Annotation Code']\n whole_image_annotation.qualifier_short_name = ['Qualifier Name']\n\n whole_image_annotation.annotation_caab_code_secondary = annotation['Annotation Code']\n whole_image_annotation.qualifier_short_name_secondary = ['Qualifier Name 2']\n\n points_to_bulk_save.append(whole_image_annotation)\n\n # do the bulk save - for performance\n WholeImageAnnotation.objects.bulk_create(points_to_bulk_save)\n\n @transaction.commit_on_success\n def copy_annotations_to_image(self, annotation_set_id, source_image_id, destination_image_id):\n \"\"\"\n Copies whole image annotations from one image to another\n \"\"\"\n\n # get whole image annotations for the source image\n source_image_annotations = WholeImageAnnotation.objects.filter(annotation_set=annotation_set_id,\n image=source_image_id)\n\n # get whole image annotations for the destination image\n destination_image_annotations = WholeImageAnnotation.objects.filter(annotation_set=annotation_set_id,\n image=destination_image_id)\n\n # delete the annotations from destination\n for annotation in destination_image_annotations:\n annotation.delete()\n\n # copy annotations from source\n for annotation in source_image_annotations:\n WholeImageAnnotation(annotation_set_id=annotation_set_id,\n image_id=destination_image_id,\n annotation_caab_code=annotation.annotation_caab_code,\n qualifier_short_name=annotation.qualifier_short_name,\n coverage_percentage=annotation.coverage_percentage).save()\n\n def check_if_images_have_same_annotations(self, annotation_set_id, image_one, image_two):\n\n # get whole image annotations for the source image\n image_one_annotations = WholeImageAnnotation.objects.filter(annotation_set=annotation_set_id,\n image=image_one)\n\n # get whole image annotations for the destination image\n image_two_annotations = WholeImageAnnotation.objects.filter(annotation_set=annotation_set_id,\n image=image_two)\n\n results_one = image_one_annotations.filter(annotation_caab_code=\"\")\n results_two = image_two_annotations.filter(annotation_caab_code=\"\")\n\n # if there are no annoatations on either, then not the same\n if (image_one_annotations.count() or image_two_annotations.count()) == 0:\n return \"false\"\n\n if results_one.count() == image_one_annotations.count():\n return \"false\"\n\n if results_two.count() == image_two_annotations.count():\n return \"false\"\n\n #if sizes are different, then they are not the same\n if image_one_annotations.count() != image_two_annotations.count():\n return \"false\"\n\n # loop through and check if A and B have the same contents\n for annotation in image_one_annotations:\n results = image_two_annotations.filter(annotation_caab_code=annotation.annotation_caab_code,\n coverage_percentage=annotation.coverage_percentage)\n\n # no ? then these lists are not the same\n if results.count() == 0:\n return \"false\"\n\n return \"true\"\n\n\nclass PointAnnotation(Annotation):\n \"\"\"\n A Point annotation.\n\n Contains position within the image (as a percent from top left) and\n the set to which it belongs.\n \"\"\"\n\n annotation_set = models.ForeignKey('projects.AnnotationSet')\n\n x = models.FloatField(validators = [MinValueValidator(0.0), MaxValueValidator(100.0)])\n y = models.FloatField(validators = [MinValueValidator(0.0), MaxValueValidator(100.0)])\n\n\nclass WholeImageAnnotation(Annotation):\n \"\"\"\n A Whole Image annotation.\n\n Needed to distinguish the difference between point and whole image\n annotation.\n \"\"\"\n\n annotation_set = models.ForeignKey('projects.AnnotationSet')\n\n # -1 signifies that no percentage cover has been given\n coverage_percentage = models.IntegerField(validators = [MinValueValidator(-1), MaxValueValidator(100)], default = -1)\n","sub_path":"projects/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":17086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"148639195","text":"''' handles all of the activity coming in to the server '''\nimport json\nfrom base64 import b64decode\nfrom Crypto.Hash import SHA256\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Signature import pkcs1_15\nimport django.db.utils\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseBadRequest, HttpResponseNotFound\nfrom django.views.decorators.csrf import csrf_exempt\nimport requests\n\nfrom fedireads import models, outgoing\nfrom fedireads import status as status_builder\nfrom fedireads.remote_user import get_or_create_remote_user\nfrom fedireads.tasks import app\n\n\n@csrf_exempt\ndef inbox(request, username):\n ''' incoming activitypub events '''\n # TODO: should do some kind of checking if the user accepts\n # this action from the sender probably? idk\n # but this will just throw a 404 if the user doesn't exist\n try:\n models.User.objects.get(localname=username)\n except models.User.DoesNotExist:\n return HttpResponseNotFound()\n\n return shared_inbox(request)\n\n\n@csrf_exempt\ndef shared_inbox(request):\n ''' incoming activitypub events '''\n # TODO: should this be functionally different from the non-shared inbox??\n if request.method == 'GET':\n return HttpResponseNotFound()\n\n try:\n activity = json.loads(request.body)\n except json.decoder.JSONDecodeError:\n return HttpResponseBadRequest()\n\n try:\n verify_signature(request)\n except ValueError:\n return HttpResponse(status=401)\n\n handlers = {\n 'Follow': handle_follow,\n 'Accept': handle_follow_accept,\n 'Reject': handle_follow_reject,\n 'Create': handle_create,\n 'Like': handle_favorite,\n 'Announce': handle_boost,\n 'Add': {\n 'Tag': handle_tag,\n },\n 'Undo': {\n 'Follow': handle_unfollow,\n 'Like': handle_unfavorite,\n },\n 'Update': {\n 'Person': None,# TODO: handle_update_user\n 'Document': None# TODO: handle_update_book\n },\n }\n activity_type = activity['type']\n\n handler = handlers.get(activity_type, None)\n if isinstance(handler, dict):\n handler = handler.get(activity['object']['type'], None)\n\n if not handler:\n return HttpResponseNotFound()\n\n handler.delay(activity)\n return HttpResponse()\n\n\ndef verify_signature(request):\n ''' verify rsa signature '''\n signature_dict = {}\n for pair in request.headers['Signature'].split(','):\n k, v = pair.split('=', 1)\n v = v.replace('\"', '')\n signature_dict[k] = v\n\n try:\n key_id = signature_dict['keyId']\n headers = signature_dict['headers']\n signature = b64decode(signature_dict['signature'])\n except KeyError:\n raise ValueError('Invalid auth header')\n\n response = requests.get(\n key_id,\n headers={'Accept': 'application/activity+json'}\n )\n if not response.ok:\n raise ValueError('Could not load public key')\n\n actor = response.json()\n key = RSA.import_key(actor['publicKey']['publicKeyPem'])\n\n comparison_string = []\n for signed_header_name in headers.split(' '):\n if signed_header_name == '(request-target)':\n comparison_string.append('(request-target): post %s' % request.path)\n else:\n comparison_string.append('%s: %s' % (\n signed_header_name,\n request.headers[signed_header_name]\n ))\n comparison_string = '\\n'.join(comparison_string)\n\n signer = pkcs1_15.new(key)\n digest = SHA256.new()\n digest.update(comparison_string.encode())\n\n # raises a ValueError if it fails\n signer.verify(digest, signature)\n\n return True\n\n\n@app.task\ndef handle_follow(activity):\n ''' someone wants to follow a local user '''\n # figure out who they want to follow\n to_follow = models.User.objects.get(actor=activity['object'])\n # figure out who they are\n user = get_or_create_remote_user(activity['actor'])\n try:\n request = models.UserFollowRequest.objects.create(\n user_subject=user,\n user_object=to_follow,\n relationship_id=activity['id']\n )\n except django.db.utils.IntegrityError as err:\n if err.__cause__.diag.constraint_name != 'userfollowrequest_unique':\n raise\n # Duplicate follow request. Not sure what the correct behaviour is, but\n # just dropping it works for now. We should perhaps generate the\n # Accept, but then do we need to match the activity id?\n return\n\n if not to_follow.manually_approves_followers:\n status_builder.create_notification(\n to_follow,\n 'FOLLOW',\n related_user=user\n )\n outgoing.handle_accept(user, to_follow, request)\n else:\n status_builder.create_notification(\n to_follow,\n 'FOLLOW_REQUEST',\n related_user=user\n )\n\n\n@app.task\ndef handle_unfollow(activity):\n ''' unfollow a local user '''\n obj = activity['object']\n if not obj['type'] == 'Follow':\n #idk how to undo other things\n return HttpResponseNotFound()\n try:\n requester = get_or_create_remote_user(obj['actor'])\n to_unfollow = models.User.objects.get(actor=obj['object'])\n except models.User.DoesNotExist:\n return HttpResponseNotFound()\n\n to_unfollow.followers.remove(requester)\n\n\n@app.task\ndef handle_follow_accept(activity):\n ''' hurray, someone remote accepted a follow request '''\n # figure out who they want to follow\n requester = models.User.objects.get(actor=activity['object']['actor'])\n # figure out who they are\n accepter = get_or_create_remote_user(activity['actor'])\n\n try:\n request = models.UserFollowRequest.objects.get(\n user_subject=requester,\n user_object=accepter\n )\n request.delete()\n except models.UserFollowRequest.DoesNotExist:\n pass\n accepter.followers.add(requester)\n\n\n@app.task\ndef handle_follow_reject(activity):\n ''' someone is rejecting a follow request '''\n requester = models.User.objects.get(actor=activity['object']['actor'])\n rejecter = get_or_create_remote_user(activity['actor'])\n\n try:\n request = models.UserFollowRequest.objects.get(\n user_subject=requester,\n user_object=rejecter\n )\n request.delete()\n except models.UserFollowRequest.DoesNotExist:\n pass\n\n\n@app.task\ndef handle_create(activity):\n ''' someone did something, good on them '''\n user = get_or_create_remote_user(activity['actor'])\n\n if not 'object' in activity:\n return HttpResponseBadRequest()\n\n if user.local:\n # we really oughtn't even be sending in this case\n return\n\n if activity['object'].get('fedireadsType') and \\\n 'inReplyToBook' in activity['object']:\n try:\n if activity['object']['fedireadsType'] == 'Review':\n builder = status_builder.create_review_from_activity\n elif activity['object']['fedireadsType'] == 'Quotation':\n builder = status_builder.create_quotation_from_activity\n else:\n builder = status_builder.create_comment_from_activity\n\n # create the status, it'll throw a valueerror if anything is missing\n builder(user, activity['object'])\n except ValueError:\n return HttpResponseBadRequest()\n else:\n # TODO: should only create notes if they are relevent to a book,\n # so, not every single thing someone posts on mastodon\n try:\n status = status_builder.create_status_from_activity(\n user,\n activity['object']\n )\n if status and status.reply_parent:\n status_builder.create_notification(\n status.reply_parent.user,\n 'REPLY',\n related_user=status.user,\n related_status=status,\n )\n except ValueError:\n return HttpResponseBadRequest()\n\n\n\n@app.task\ndef handle_favorite(activity):\n ''' approval of your good good post '''\n try:\n status_id = activity['object'].split('/')[-1]\n status = models.Status.objects.get(id=status_id)\n liker = get_or_create_remote_user(activity['actor'])\n except (models.Status.DoesNotExist, models.User.DoesNotExist):\n return\n\n if not liker.local:\n status_builder.create_favorite_from_activity(liker, activity)\n\n status_builder.create_notification(\n status.user,\n 'FAVORITE',\n related_user=liker,\n related_status=status,\n )\n\n\n@app.task\ndef handle_unfavorite(activity):\n ''' approval of your good good post '''\n favorite_id = activity['object']['id']\n fav = status_builder.get_favorite(favorite_id)\n if not fav:\n return HttpResponseNotFound()\n\n fav.delete()\n\n\n@app.task\ndef handle_boost(activity):\n ''' someone gave us a boost! '''\n try:\n status_id = activity['object'].split('/')[-1]\n status = models.Status.objects.get(id=status_id)\n booster = get_or_create_remote_user(activity['actor'])\n except (models.Status.DoesNotExist, models.User.DoesNotExist):\n return HttpResponseNotFound()\n\n if not booster.local:\n status_builder.create_boost_from_activity(booster, activity)\n\n status_builder.create_notification(\n status.user,\n 'BOOST',\n related_user=booster,\n related_status=status,\n )\n\n\n@app.task\ndef handle_tag(activity):\n ''' someone is tagging or shelving a book '''\n user = get_or_create_remote_user(activity['actor'])\n if not user.local:\n book = activity['target']['id'].split('/')[-1]\n status_builder.create_tag(user, book, activity['object']['name'])\n","sub_path":"fedireads/incoming.py","file_name":"incoming.py","file_ext":"py","file_size_in_byte":9839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"430736329","text":"'''Grab Impact load calculator - Alan Turner\nDetermines maximum applied force whilst absorbing impact energy and resultant\ndeflection of structural system'''\n\nimport math\n\nProject= 'Stevens Standard Hopper = 1.4t/m^3'\nJN=16062\nprint ('Job Number: ',JN)\nprint ('Project: ',Project)\n\n#----------------------Input Bulk Material Details--------------------#\nDens=1400 #Density of bulk material (kg/m^3)\n\n#----------------------Input Grab Details And Calculate Energy------------#\nGrab_Type='ORTS GMBH 12m^3 VSFC-RC'\nG_grab_dry=7300 #(kg) ORTS\nVol_grab=12 #Refer to grab volume/density table (m^3)\nG_grab_material= Vol_grab*Dens# (kg)\nG_grab_wet=G_grab_dry+G_grab_material\nprint('The total dry mass of the grab is {0:.2f}kg, and the wet mass of the grab is {1:.2f}kg \\n'.format(G_grab_dry, G_grab_wet))\n\nV_grab_vert= 0.25# velocity of grab for vertical impact case m/s\nU_total_vert= 0.5*G_grab_wet*(V_grab_vert**2) # Total energy to be absorbed by structural system (J)\nprint('The Total Grab Energy to be Absorbed ={0:.2f} (J) for a grab '\n 'impact @ {1:.2f} m/s vertically into grizzly'.format(U_total_vert, V_grab_vert))\n\nV_grab_side= 0.42 # velocity of grab for side impact case m/s\nU_total_side= 0.5*G_grab_wet*(V_grab_side**2) # Total energy to be absorbed by structural system (J)\nprint('The Total Grab Energy to be Absorbed ={0:.2f} (J) for a grab '\n 'impact @ {1:.2f} m/s into the side of the hopper'.format(U_total_side, V_grab_side))\n\nV_grab_end = 0.83 # velocity of grab for end impact case m/s\nU_total= 0.5*G_grab_wet*(V_grab_end**2) # Total energy to be absorbed by structural system (J)\nprint('The Total Grab Energy to be Absorbed ={0:.2f}(j) for a grab moving\\n'\n ' into the hopper end, or vertically into the grizzly @ {1:.2f} m/s'.format(U_total, V_grab_end))\n\n#----------------------Input Grab Details And Calculate Energy------------#\n#Grab_Type='Verstegen 12m^3'\n#G_grab_dry=9000 #(kg) ORTS\n#Vol_grab=12 #Refer to grab volume/density table (m^3)\n#G_grab_material= Vol_grab*Dens# (kg)\n#G_grab_wet=G_grab_dry+G_grab_material\n#V_grab=1.388 # m/s grab velocity\n#U_total= 0.5*G_grab_wet*(V_grab**2) # Total energy to be absorbed by structural system (J)\n#print('The Total Grab Energy to be Absorbed (J)={0:0.2f}'.format(U_total))\n\n#-------------------------------------------------------------------#\nLC ='\\nLOAD CASE 5 GRIZZLY Simply Supported Beam System (vertical deflection)'\nL_b=4 # Beam length (m)\nN_b=4 # Number of beams loaded in beam system\nE=200000e6 # Modulus of elasticity (Pa)\nIx_b=26.04*1e-6 # Second moment of area of beam m^4\n#Calculate Force and Deflection\nG_grab_sw=((G_grab_wet*9.81)/1000)/N_b #Load from self weight of grab\nU_b=U_total_vert/N_b\nP_b=((math.sqrt(((48*E*Ix_b*U_b))/(0.5*math.pow(L_b,3))))/1000) + G_grab_sw\nDef_b=((P_b*math.pow(L_b,3))/(48*E*Ix_b))*1e6\nprint (LC)\nprint('Resultant centrally applied point load per beam (kN) = {0:.2f}'.format(P_b))\nprint ('Calculated deflection (mm) = {0:.2f}'.format(Def_b))\n\n#--------------------------------------------------------------------#\nLC = '\\nLOAD CASE 6 - GRAB SIDE IMPACT (Y-DIR) - Portal frame (lateral deflection, pin base (one col per portal only)'\nN_c=4 # Number of columns doing work\nU_c=U_total_side/N_c # Energy absorbed by each portal\nL_c=5.7 # Column length (m)\nIc=416e-6 # Average Column Moment of Inertia (m^4)\nE=200000e6 # modulus of elasticity (Pa)\n\n#Calculate Force and Deflection\nP_c=math.sqrt((24*E*Ic*U_c)/(math.pow(L_c,3)))\nDefl_col=((P_c*math.pow(L_c,3))/(24*E*Ic))*1e3\n#Report Results\nprint (LC)\nprint('the Y-Dir point load applied to each column-beam connection (kN)= {0:.2f}'.format(P_c/1000))\nprint('the resultant deflection at the column-beam connection (mm) = {0:.2f}'.format(Defl_col))\n\n#--------------------------------------------------------------------#\nLC = '\\nLOAD CASE 7 - GRAB END IMPACT (Y-DIR) - Portal frame (lateral deflection, pin base)'\nN_p=2 # Number of portal frames in system\nU_p=U_total/N_p # Energy absorbed by each portal\nL_c=4.5 # Column length (m)\nLb=7 # Beam Length (m)\nk=Lb/L_c # Ratio of beam to column length\nIc=330e-6 # Column Moment of Inertia (m^4)\nIb=4071e-6 # Beam Moment of Inertia (m^4)\nalpha=Ib/Ic # Ratio of moment of inertia of beams to columns\nE=200000e6 # modulus of elasticity (Pa)\nx=4+((2*k)/alpha)\n#Calculate Force and Deflection\nP_c=(math.sqrt((48*E*Ic*U_p)/(math.pow(L_c,3)*x)))\nDef_portal=x*((P_c*math.pow(Lb,3))/(24*E*Ic))*1e3\n#Report Results\nprint (LC)\nprint('the Y-Dir point load applied to each column-beam connection (kN)= {0:.2f}'.format(P_c/1000))\nprint('the resultant deflection at the column-beam connection (mm) = {0:.2f}'.format(Def_portal))\n#--------------------------------------------------------------------#\n\n'''\nJob Number: 16062\nProject: Stevens Standard Hopper = 1.4t/m^3\nThe total dry mass of the grab is 7300.00kg, and the wet mass of the grab is 24100.00kg\n\nThe Total Grab Energy to be Absorbed =753.12 (J) for a grab impact @ 0.25 m/s vertically into grizzly (0.9km/hr)\nThe Total Grab Energy to be Absorbed =2125.62 (J) for a grab impact @ 0.42 m/s into the side of the hopper (1.5km/hr)\nThe Total Grab Energy to be Absorbed =8301.24(j) for a grab moving\n into the hopper end, or vertically into the grizzly @ 0.83 m/s (3km/hr)\n\nLOAD CASE 5 GRIZZLY Simply Supported Beam System (vertical deflection)\nResultant centrally applied point load per beam (kN) = 97.46\nCalculated deflection (mm) = 24.95\n\nLOAD CASE 6 - GRAB SIDE IMPACT (Y-DIR) - Portal frame (lateral deflection, pin base (one col per portal only)\nthe Y-Dir point load applied to each column-beam connection (kN)= 75.70\nthe resultant deflection at the column-beam connection (mm) = 7.02\n\nLOAD CASE 7 - GRAB END IMPACT (Y-DIR) - Portal frame (lateral deflection, pin base)\nthe Y-Dir point load applied to each column-beam connection (kN)= 184.21\nthe resultant deflection at the column-beam connection (mm) = 169.62\n\n\n'''","sub_path":"Hopper/Impact Loads/16062_Grab Impact Loads_R1.pyw","file_name":"16062_Grab Impact Loads_R1.pyw","file_ext":"pyw","file_size_in_byte":5882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"426096246","text":"import os\n# import json\nimport alfred\n# from subprocess import call\n\n\nconfig_filename = 'config.json'\n\n\nclass AlfredScriptWorkflow(object):\n\n max_results = 9\n\n def __init__(self):\n \"\"\" Setup \"\"\"\n\n # Read bundle info and config path\n self.placeholder = ''\n for x in alfred.preferences['objects']:\n if x['type'] == 'alfred.workflow.input.scriptfilter':\n self.placeholder = x['config']['title']\n self.config_path = os.path.join(alfred.work(False), config_filename)\n\n def read_config(self, data):\n \"\"\" Read config data and parse into `config` \"\"\"\n raise NotImplementedError()\n\n def process(self, query_str):\n \"\"\" Entry point \"\"\"\n results = self.get_items_for_query(query_str)\n if results:\n xml = alfred.xml(results,\n self.max_results) # compiles the XML answer\n alfred.write(xml) # writes the XML back to Alfred\n\n def get_items_for_query(self, query_str):\n \"\"\" Return items for the query string \"\"\"\n raise NotImplementedError()\n\n def display_message(self, message, subtitle=None, arg=None,\n icon='icon.png'):\n \"\"\" Inform them that something's wrong \"\"\"\n if message is None:\n # Display same message as the placeholder\n message = self.placeholder\n xml = alfred.xml([\n alfred.Item(\n title=message,\n subtitle=subtitle,\n attributes={\n 'uid': alfred.uid(0),\n 'arg': arg\n },\n icon=icon\n )\n ]) # compiles the XML answer\n alfred.write(xml) # writes the XML back to Alfred\n","sub_path":"workflow/alfred_script_workflow.py","file_name":"alfred_script_workflow.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"299377801","text":"import numpy as np\nfrom .field import scalar_field, vector_field\nfrom fidimag.common.cuboid_mesh import CuboidMesh\n\n\ndef test_initialise_scalar():\n mesh = CuboidMesh(1, 1, 1, 1, 1, 1)\n f = scalar_field(mesh, lambda r: r[0] + r[1] + r[2])\n assert np.allclose(f, np.array((1.5)))\n\n\ndef test_initialise_vector():\n mesh = CuboidMesh(1, 1, 1, 1, 1, 1)\n v = vector_field(mesh, lambda r: 2 * r)\n assert np.allclose(v, np.array((1, 1, 1)))\n\n\n","sub_path":"fidimag/atomistic/field_test.py","file_name":"field_test.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"176561602","text":"from unittest import TestCase, main\nfrom subscription import Subscriptions\n\n\nclass TestSubscription(TestCase):\n def test_should_can_create_and_get_a_subscription(self):\n Subscriptions().create('sub')\n\n assert Subscriptions().get('sub').name == 'sub', \\\n 'Should can create and get a subscription'\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"backend-solution/tests/test_subscription.py","file_name":"test_subscription.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"605890670","text":"import pytest\nfrom spacy.tokens.doc import Underscore\n\nimport spacy\nfrom spacy.lang.en import English\nfrom spacy.tokens import Doc, DocBin\n\nfrom ..util import make_tempdir\n\n\ndef test_serialize_empty_doc(en_vocab):\n doc = Doc(en_vocab)\n data = doc.to_bytes()\n doc2 = Doc(en_vocab)\n doc2.from_bytes(data)\n assert len(doc) == len(doc2)\n for token1, token2 in zip(doc, doc2):\n assert token1.text == token2.text\n\n\ndef test_serialize_doc_roundtrip_bytes(en_vocab):\n doc = Doc(en_vocab, words=[\"hello\", \"world\"])\n doc.cats = {\"A\": 0.5}\n doc_b = doc.to_bytes()\n new_doc = Doc(en_vocab).from_bytes(doc_b)\n assert new_doc.to_bytes() == doc_b\n\n\ndef test_serialize_doc_roundtrip_disk(en_vocab):\n doc = Doc(en_vocab, words=[\"hello\", \"world\"])\n with make_tempdir() as d:\n file_path = d / \"doc\"\n doc.to_disk(file_path)\n doc_d = Doc(en_vocab).from_disk(file_path)\n assert doc.to_bytes() == doc_d.to_bytes()\n\n\ndef test_serialize_doc_roundtrip_disk_str_path(en_vocab):\n doc = Doc(en_vocab, words=[\"hello\", \"world\"])\n with make_tempdir() as d:\n file_path = d / \"doc\"\n file_path = str(file_path)\n doc.to_disk(file_path)\n doc_d = Doc(en_vocab).from_disk(file_path)\n assert doc.to_bytes() == doc_d.to_bytes()\n\n\ndef test_serialize_doc_exclude(en_vocab):\n doc = Doc(en_vocab, words=[\"hello\", \"world\"])\n doc.user_data[\"foo\"] = \"bar\"\n new_doc = Doc(en_vocab).from_bytes(doc.to_bytes())\n assert new_doc.user_data[\"foo\"] == \"bar\"\n new_doc = Doc(en_vocab).from_bytes(doc.to_bytes(), exclude=[\"user_data\"])\n assert not new_doc.user_data\n new_doc = Doc(en_vocab).from_bytes(doc.to_bytes(exclude=[\"user_data\"]))\n assert not new_doc.user_data\n\n\ndef test_serialize_doc_span_groups(en_vocab):\n doc = Doc(en_vocab, words=[\"hello\", \"world\", \"!\"])\n doc.spans[\"content\"] = [doc[0:2]]\n new_doc = Doc(en_vocab).from_bytes(doc.to_bytes())\n assert len(new_doc.spans[\"content\"]) == 1\n\n\ndef test_serialize_doc_bin():\n doc_bin = DocBin(attrs=[\"LEMMA\", \"ENT_IOB\", \"ENT_TYPE\", \"NORM\", \"ENT_ID\"], store_user_data=True)\n texts = [\"Some text\", \"Lots of texts...\", \"...\"]\n cats = {\"A\": 0.5}\n nlp = English()\n for doc in nlp.pipe(texts):\n doc.cats = cats\n doc.spans[\"start\"] = [doc[0:2]]\n doc[0].norm_ = \"UNUSUAL_TOKEN_NORM\"\n doc[0].ent_id_ = \"UNUSUAL_TOKEN_ENT_ID\"\n doc_bin.add(doc)\n bytes_data = doc_bin.to_bytes()\n\n # Deserialize later, e.g. in a new process\n nlp = spacy.blank(\"en\")\n doc_bin = DocBin().from_bytes(bytes_data)\n reloaded_docs = list(doc_bin.get_docs(nlp.vocab))\n for i, doc in enumerate(reloaded_docs):\n assert doc.text == texts[i]\n assert doc.cats == cats\n assert len(doc.spans) == 1\n assert doc[0].norm_ == \"UNUSUAL_TOKEN_NORM\"\n assert doc[0].ent_id_ == \"UNUSUAL_TOKEN_ENT_ID\"\n\n\ndef test_serialize_doc_bin_unknown_spaces(en_vocab):\n doc1 = Doc(en_vocab, words=[\"that\", \"'s\"])\n assert doc1.has_unknown_spaces\n assert doc1.text == \"that 's \"\n doc2 = Doc(en_vocab, words=[\"that\", \"'s\"], spaces=[False, False])\n assert not doc2.has_unknown_spaces\n assert doc2.text == \"that's\"\n\n doc_bin = DocBin().from_bytes(DocBin(docs=[doc1, doc2]).to_bytes())\n re_doc1, re_doc2 = doc_bin.get_docs(en_vocab)\n assert re_doc1.has_unknown_spaces\n assert re_doc1.text == \"that 's \"\n assert not re_doc2.has_unknown_spaces\n assert re_doc2.text == \"that's\"\n\n\n@pytest.mark.parametrize(\n \"writer_flag,reader_flag,reader_value\",\n [\n (True, True, \"bar\"),\n (True, False, \"bar\"),\n (False, True, \"nothing\"),\n (False, False, \"nothing\"),\n ],\n)\ndef test_serialize_custom_extension(en_vocab, writer_flag, reader_flag, reader_value):\n \"\"\"Test that custom extensions are correctly serialized in DocBin.\"\"\"\n Doc.set_extension(\"foo\", default=\"nothing\")\n doc = Doc(en_vocab, words=[\"hello\", \"world\"])\n doc._.foo = \"bar\"\n doc_bin_1 = DocBin(store_user_data=writer_flag)\n doc_bin_1.add(doc)\n doc_bin_bytes = doc_bin_1.to_bytes()\n doc_bin_2 = DocBin(store_user_data=reader_flag).from_bytes(doc_bin_bytes)\n doc_2 = list(doc_bin_2.get_docs(en_vocab))[0]\n assert doc_2._.foo == reader_value\n Underscore.doc_extensions = {}\n","sub_path":"spacy/tests/serialize/test_serialize_doc.py","file_name":"test_serialize_doc.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"335560589","text":"api_key = \"AIzaSyBeYT5lJU1-VrMMOxRFGc-W7IyuKPLpEqk\"\n\nfrom googleapiclient.discovery import build\n\n\nclass Video:\n def __init__(self, title, link):\n self.title = title\n self.link = link\n \nyoutube = build('youtube', 'v3', developerKey=api_key)\n\nprint(youtube)\n\nreq = youtube.search().list(q='interview', part='snippet', type='video', maxResults = 1)\n\nres = req.execute()\n\nvideo_list = []\n\n#youtube_json = res.json()\n\n#print(res)\n\n#print(len(res['items']))\n\n#print(res['items'][0]) # view all info of the first video returned\n\nfor item in res['items']: # print all the titles(minor change to this in order to print the URL's of the videos in the web page)\n video_title = item['snippet']['title']\n video_url = \"https://www.youtube.com/watch?v=\" + item['id']['videoId']\n video_list.append(Video(video_title, video_url))\n \n #print(item['snippet']['title'])\n #print(\"https://www.youtube.com/watch?v=\" + item['id']['videoId'])\n #print(item['snippet']['description'])\n\n ","sub_path":"JobHub/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"60837008","text":"\"\"\"Example file for signals.\"\"\"\n\nfrom pyiab.plugins import plugin_class\nfrom pyaib.components import observe, awaits_signal\nfrom pyaib.signals import emit_signal, await_signal\nimport re\n\n@plugin_class('names')\nclass Names:\n \"\"\"This plugin provides a command ('names') that outputs a list of all\n nicks currently in the channel.\"\"\"\n def __init__(self, irc_c, config):\n print(\"Names plugin loaded\")\n\n @keyword('names')\n def get_list_of_names(self, irc_c, message, trigger, args, kwargs):\n # Sends a NAMES request to the server, to get a list of nicks for the\n # current channel.\n # Issue the NAMES request:\n irc_c.RAW(\"NAMES %s\" % message.channel)\n # The request has been sent.\n # pyaib is asynchronous, so another function will recieve the response\n # from this request.\n # That function must send the data here via a signal.\n try:\n # Wait for the signal (up to 10 seconds).\n response = await_signal(irc_c, 'NAMES_RESPONSE', timeout=10.0)\n # await_signal returns whatever data we choose to send, or True.\n except TimeoutError:\n message.reply(\"The request timed out.\")\n return\n # The NAMES response is now saved.\n channel = response[0]\n names = response[1]\n assert channel == message.channel\n message.reply(\"List of channel members: %s\" % \", \".join(names))\n # Warning, this will annoy everyone in the channel.\n\n @observe('IRC_MSG_353') # 353 indicates a NAMES response.\n def recieve_names(self, irc_c, message):\n # The response is in message.args as a single string.\n # \"MYNICK = #channel :nick1 nick2 nick3\"\n # Split that up into individual names:\n response = re.split(r\"\\s:?\", message.args.strip())[2:]\n channel = response[0]\n names = response[1:]\n # Great, we've caught the NAMES response.\n # Now send it back to the function that wanted it.\n emit_signal(irc_c, 'NAMES_RESPONSE', data=(channel, names))\n # The signal name can be anything, so long as emit_signal and\n # await_signal use the same one.\n","sub_path":"example/plugins/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"565248114","text":"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom genotypes import STEPS\nfrom utils import mask2d\nfrom utils import LockedDropout\nfrom utils import embedded_dropout, argmax, prepare_sequence, log_sum_exp\n\nimport torch.autograd as autograd\nimport torch.optim as optim\n\ntorch.manual_seed(1)\n\nINITRANGE = 0.04\n\n\nclass DARTSCell(nn.Module):\n def __init__(self, ninp, nhid, dropouth, dropoutx, genotype):\n super(DARTSCell, self).__init__()\n self.nhid = nhid\n self.dropouth = dropouth\n self.dropoutx = dropoutx\n self.genotype = genotype\n\n # genotype is None when doing arch search\n steps = len(\n self.genotype.recurrent) if self.genotype is not None else STEPS\n self._W0 = nn.Parameter(\n torch.Tensor(ninp + nhid,\n 2 * nhid).uniform_(-INITRANGE, INITRANGE))\n self._Ws = nn.ParameterList([\n nn.Parameter(\n torch.Tensor(nhid, 2 * nhid).uniform_(-INITRANGE, INITRANGE))\n for i in range(steps)\n ])\n\n def forward(self, inputs, hidden):\n T, B = inputs.size(0), inputs.size(1)\n\n if self.training:\n x_mask = mask2d(B, inputs.size(2), keep_prob=1. - self.dropoutx)\n h_mask = mask2d(B, hidden.size(2), keep_prob=1. - self.dropouth)\n else:\n x_mask = h_mask = None\n\n hidden = hidden[0]\n hiddens = []\n for t in range(T):\n hidden = self.cell(inputs[t], hidden, x_mask, h_mask)\n hiddens.append(hidden)\n hiddens = torch.stack(hiddens)\n return hiddens, hiddens[-1].unsqueeze(0)\n\n def _compute_init_state(self, x, h_prev, x_mask, h_mask):\n if self.training:\n xh_prev = torch.cat([x * x_mask, h_prev * h_mask], dim=-1)\n else:\n xh_prev = torch.cat([x, h_prev], dim=-1)\n c0, h0 = torch.split(xh_prev.mm(self._W0), self.nhid, dim=-1)\n c0 = c0.sigmoid()\n h0 = h0.tanh()\n s0 = h_prev + c0 * (h0 - h_prev)\n return s0\n\n def _get_activation(self, name):\n if name == 'tanh':\n f = F.tanh\n elif name == 'relu':\n f = F.relu\n elif name == 'sigmoid':\n f = F.sigmoid\n elif name == 'identity':\n f = lambda x: x\n else:\n raise NotImplementedError\n return f\n\n def cell(self, x, h_prev, x_mask, h_mask):\n s0 = self._compute_init_state(x, h_prev, x_mask, h_mask)\n\n states = [s0]\n for i, (name, pred) in enumerate(self.genotype.recurrent):\n s_prev = states[pred]\n if self.training:\n ch = (s_prev * h_mask).mm(self._Ws[i])\n else:\n ch = s_prev.mm(self._Ws[i])\n c, h = torch.split(ch, self.nhid, dim=-1)\n c = c.sigmoid()\n fn = self._get_activation(name)\n h = fn(h)\n s = s_prev + c * (h - s_prev)\n states += [s]\n output = torch.mean(\n torch.stack([states[i] for i in self.genotype.concat], -1), -1)\n return output\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n def __init__(self,\n ntoken,\n ntag,\n ninp,\n nhid,\n nhidlast,\n dropout=0.5,\n dropouth=0.5,\n dropoutx=0.5,\n dropouti=0.5,\n dropoute=0.1,\n cell_cls=DARTSCell,\n genotype=None):\n super(RNNModel, self).__init__()\n self.lockdrop = LockedDropout()\n self.encoder = nn.Embedding(ntoken, ninp)\n\n assert ninp == nhid == nhidlast\n if cell_cls == DARTSCell:\n assert genotype is not None\n self.rnns = [cell_cls(ninp, nhid, dropouth, dropoutx, genotype)]\n else:\n assert genotype is None\n self.rnns = [cell_cls(ninp, nhid, dropouth, dropoutx)]\n\n self.rnns = torch.nn.ModuleList(self.rnns)\n self.decoder = nn.Linear(ninp, ntoken)\n self.decoder.weight = self.encoder.weight # weight tying\n self.init_weights()\n\n self.ninp = ninp\n self.nhid = nhid\n self.nhidlast = nhidlast\n self.dropout = dropout\n self.dropouti = dropouti\n self.dropoute = dropoute\n self.ntoken = ntoken\n self.ntag = ntag\n self.cell_cls = cell_cls\n\n # # Maps the output of the RNN into tag space.\n # self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)\n\n # # Matrix of transition parameters. Entry i,j is the score of\n # # transitioning *to* i *from* j.\n # self.transitions = nn.Parameter(\n # torch.randn(self.tagset_size, self.tagset_size))\n\n # # These two statements enforce the constraint that we never transfer\n # # to the start tag and we never transfer from the stop tag\n # self.transitions.data[tag_to_ix[START_TAG], :] = -10000\n # self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000\n\n #to do tagging\n self.ibo_classifier = nn.Linear(nhid, ntag)\n\n def init_weights(self):\n self.encoder.weight.data.uniform_(-INITRANGE, INITRANGE)\n self.decoder.bias.data.fill_(0)\n self.decoder.weight.data.uniform_(-INITRANGE, INITRANGE)\n\n def forward(self, input, hidden, return_h=False):\n batch_size = input.size(1)\n\n emb = embedded_dropout(self.encoder,\n input,\n dropout=self.dropoute if self.training else 0)\n emb = self.lockdrop(emb, self.dropouti)\n\n raw_output = emb\n new_hidden = []\n raw_outputs = []\n outputs = []\n print(\"len rnns:\", len(self.rnns))\n for l, rnn in enumerate(self.rnns):\n current_input = raw_output\n raw_output, new_h = rnn(raw_output, hidden[l])\n new_hidden.append(new_h)\n raw_outputs.append(raw_output)\n hidden = new_hidden\n\n output = self.lockdrop(raw_output, self.dropout)\n outputs.append(output)\n\n logit = self.decoder(output.view(-1, self.ninp))\n log_prob = nn.functional.log_softmax(logit, dim=-1)\n model_output = log_prob\n model_output = model_output.view(-1, batch_size, self.ntoken)\n\n print(len(hidden), hidden[0].shape)\n # ibo_logits = None\n ibo_logits = self.ibo_classifier(output.view(-1, self.ninp))\n ibo_log_prob = nn.functional.log_softmax(ibo_logits, dim=-1)\n ibo_log_prob = ibo_log_prob.view(-1, batch_size, self.ntag)\n\n\n if return_h:\n return model_output, ibo_log_prob, hidden, raw_outputs, outputs\n return model_output, ibo_log_prob, hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n return [weight.new(1, bsz, self.nhid).zero_()]\n\nclass BiRNN_CRF(nn.Module):\n\n def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):\n super(BiRNN_CRF, self).__init__()\n self.embedding_dim = embedding_dim\n self.hidden_dim = hidden_dim\n self.vocab_size = vocab_size\n self.tag_to_ix = tag_to_ix\n self.tagset_size = len(tag_to_ix)\n\n self.word_embeds = nn.Embedding(vocab_size, embedding_dim)\n self.rnn = nn.RNN(embedding_dim, hidden_dim // 2,\n num_layers=1, bidirectional=True)\n\n # Maps the output of the RNN into tag space.\n self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)\n\n # Matrix of transition parameters. Entry i,j is the score of\n # transitioning *to* i *from* j.\n self.transitions = nn.Parameter(\n torch.randn(self.tagset_size, self.tagset_size))\n\n # These two statements enforce the constraint that we never transfer\n # to the start tag and we never transfer from the stop tag\n self.transitions.data[tag_to_ix[START_TAG], :] = -10000\n self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000\n\n self.hidden = self.init_hidden()\n\n def init_hidden(self):\n return (torch.randn(2, 1, self.hidden_dim // 2),\n torch.randn(2, 1, self.hidden_dim // 2))\n\n def _forward_alg(self, feats):\n # Do the forward algorithm to compute the partition function\n init_alphas = torch.full((1, self.tagset_size), -10000.)\n # START_TAG has all of the score.\n init_alphas[0][self.tag_to_ix[START_TAG]] = 0.\n\n # Wrap in a variable so that we will get automatic backprop\n forward_var = init_alphas\n\n # Iterate through the sentence\n for feat in feats:\n alphas_t = [] # The forward tensors at this timestep\n for next_tag in range(self.tagset_size):\n # broadcast the emission score: it is the same regardless of\n # the previous tag\n emit_score = feat[next_tag].view(\n 1, -1).expand(1, self.tagset_size)\n # the ith entry of trans_score is the score of transitioning to\n # next_tag from i\n trans_score = self.transitions[next_tag].view(1, -1)\n # The ith entry of next_tag_var is the value for the\n # edge (i -> next_tag) before we do log-sum-exp\n next_tag_var = forward_var + trans_score + emit_score\n # The forward variable for this tag is log-sum-exp of all the\n # scores.\n alphas_t.append(log_sum_exp(next_tag_var).view(1))\n forward_var = torch.cat(alphas_t).view(1, -1)\n terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]\n alpha = log_sum_exp(terminal_var)\n return alpha\n\n def _get_rnn_features(self, sentence):\n self.hidden = self.init_hidden()\n embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)\n rnn_out, self.hidden = self.rnn(embeds, self.hidden)\n rnn_out = rnn_out.view(len(sentence), self.hidden_dim)\n rnn_feats = self.hidden2tag(rnn_out)\n return rnn_feats\n\n def _score_sentence(self, feats, tags):\n # Gives the score of a provided tag sequence\n score = torch.zeros(1)\n tags = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long), tags])\n for i, feat in enumerate(feats):\n score = score + \\\n self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]\n score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]\n return score\n\n def _viterbi_decode(self, feats):\n backpointers = []\n\n # Initialize the viterbi variables in log space\n init_vvars = torch.full((1, self.tagset_size), -10000.)\n init_vvars[0][self.tag_to_ix[START_TAG]] = 0\n\n # forward_var at step i holds the viterbi variables for step i-1\n forward_var = init_vvars\n for feat in feats:\n bptrs_t = [] # holds the backpointers for this step\n viterbivars_t = [] # holds the viterbi variables for this step\n\n for next_tag in range(self.tagset_size):\n # next_tag_var[i] holds the viterbi variable for tag i at the\n # previous step, plus the score of transitioning\n # from tag i to next_tag.\n # We don't include the emission scores here because the max\n # does not depend on them (we add them in below)\n next_tag_var = forward_var + self.transitions[next_tag]\n best_tag_id = argmax(next_tag_var)\n bptrs_t.append(best_tag_id)\n viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))\n # Now add in the emission scores, and assign forward_var to the set\n # of viterbi variables we just computed\n forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)\n backpointers.append(bptrs_t)\n\n # Transition to STOP_TAG\n terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]\n best_tag_id = argmax(terminal_var)\n path_score = terminal_var[0][best_tag_id]\n\n # Follow the back pointers to decode the best path.\n best_path = [best_tag_id]\n for bptrs_t in reversed(backpointers):\n best_tag_id = bptrs_t[best_tag_id]\n best_path.append(best_tag_id)\n # Pop off the start tag (we dont want to return that to the caller)\n start = best_path.pop()\n assert start == self.tag_to_ix[START_TAG] # Sanity check\n best_path.reverse()\n return path_score, best_path\n\n def neg_log_likelihood(self, sentence, tags):\n feats = self._get_rnn_features(sentence)\n forward_score = self._forward_alg(feats)\n gold_score = self._score_sentence(feats, tags)\n return forward_score - gold_score\n\n def forward(self, sentence): # dont confuse this with _forward_alg above.\n # Get the emission scores from the BiRNN\n rnn_feats = self._get_rnn_features(sentence)\n\n # Find the best path, given the features.\n score, tag_seq = self._viterbi_decode(rnn_feats)\n return score, tag_seq\n\nif __name__==\"__main__\":\n START_TAG = \"\"\n STOP_TAG = \"\"\n EMBEDDING_DIM = 5\n HIDDEN_DIM = 4\n\n # Make up some training data\n training_data = [(\n \"the wall street journal reported today that apple corporation made money\".split(),\n \"B I I I O O O B I O O\".split()\n ), (\n \"georgia tech is a university in georgia\".split(),\n \"B I O O O O B\".split()\n )]\n\n word_to_ix = {}\n for sentence, tags in training_data:\n for word in sentence:\n if word not in word_to_ix:\n word_to_ix[word] = len(word_to_ix)\n\n tag_to_ix = {\"B\": 0, \"I\": 1, \"O\": 2, START_TAG: 3, STOP_TAG: 4}\n\n model = BiRNN_CRF(len(word_to_ix), tag_to_ix, EMBEDDING_DIM, HIDDEN_DIM)\n optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)\n\n # Check predictions before training\n with torch.no_grad():\n precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)\n precheck_tags = torch.tensor([tag_to_ix[t] for t in training_data[0][1]], dtype=torch.long)\n print(model(precheck_sent))\n\n # Make sure prepare_sequence from earlier in the RNN section is loaded\n for epoch in range(\n 300): # again, normally you would NOT do 300 epochs, it is toy data\n for sentence, tags in training_data:\n # Step 1. Remember that Pytorch accumulates gradients.\n # We need to clear them out before each instance\n model.zero_grad()\n\n # Step 2. Get our inputs ready for the network, that is,\n # turn them into Tensors of word indices.\n sentence_in = prepare_sequence(sentence, word_to_ix)\n targets = torch.tensor([tag_to_ix[t] for t in tags], dtype=torch.long)\n\n # Step 3. Run our forward pass.\n loss = model.neg_log_likelihood(sentence_in, targets)\n\n # Step 4. Compute the loss, gradients, and update the parameters by\n # calling optimizer.step()\n loss.backward()\n optimizer.step()\n\n # Check predictions after training\n with torch.no_grad():\n precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)\n print(model(precheck_sent))\n # We got it!\n","sub_path":"rnn/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":15476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"618091728","text":"from torch.utils.data import Dataset, DataLoader\nfrom config import config\nimport torch\n\n\nclass BERTdataset(Dataset):\n def __init__(self, data, label, hard_neg):\n super(BERTdataset, self).__init__()\n self.data = data\n self.label = label\n self.hard_neg = hard_neg\n self.max_len = config.MAX_LEN\n self.tokenizer = config.TOKENIZER\n\n def __len__(self):\n return len(self.data)\n\n # def __getitem__(self, idx):\n # data = self.data[idx]\n # data = \" \".join(data.split())\n # label = self.label[idx]\n # label = \" \".join(label.split())\n # hard_neg = self.hard_neg[idx]\n # hard_neg = \" \".join(hard_neg.split())\n #\n # inputs1 = self.tokenizer.encode_plus(\n # data,\n # None,\n # truncation=True,\n # add_special_tokens=True,\n # max_length=self.max_len,\n # padding='max_length'\n # )\n #\n # ids1 = torch.tensor(inputs1['input_ids'], dtype=torch.long).view(1,-1)\n # mask1 = torch.tensor(inputs1['attention_mask'], dtype=torch.long).view(1,-1)\n # token_type_ids1 = torch.tensor(inputs1['token_type_ids'], dtype=torch.long).view(1,-1)\n #\n # inputs2 = self.tokenizer.encode_plus(\n # label,\n # None,\n # truncation=True,\n # add_special_tokens=True,\n # max_length=self.max_len,\n # padding='max_length'\n # )\n #\n # ids2 = torch.tensor(inputs2['input_ids'], dtype=torch.long).view(1,-1)\n # mask2 = torch.tensor(inputs2['attention_mask'], dtype=torch.long).view(1,-1)\n # token_type_ids2 = torch.tensor(inputs2['token_type_ids'], dtype=torch.long).view(1,-1)\n #\n # inputs3 = self.tokenizer.encode_plus(\n # hard_neg,\n # None,\n # truncation=True,\n # add_special_tokens=True,\n # max_length=self.max_len,\n # padding='max_length'\n # )\n #\n # ids3 = torch.tensor(inputs3['input_ids'], dtype=torch.long).view(1,-1)\n # mask3 = torch.tensor(inputs3['attention_mask'], dtype=torch.long).view(1,-1)\n # token_type_ids3 = torch.tensor(inputs3['token_type_ids'], dtype=torch.long).view(1,-1)\n #\n #\n # ids = torch.cat((ids1,ids2,ids3),dim=0)\n # mask = torch.cat((mask1,mask2,mask3),dim=0)\n # token_type_ids = torch.cat((token_type_ids1,token_type_ids2,token_type_ids3), dim=0)\n # return {\n # 'ids': ids,\n # 'mask': mask,\n # \"token_type_ids\": token_type_ids\n # }\n\n def __getitem__(self, idx):\n data = self.data[idx]\n data = \" \".join(data.split())\n label = self.label[idx]\n label = \" \".join(label.split())\n hard_neg = self.hard_neg[idx]\n hard_neg = \" \".join(hard_neg.split())\n text = []\n text.append(data)\n text.append(label)\n text.append(hard_neg)\n\n inputs = self.tokenizer(\n text,\n None,\n truncation=True,\n add_special_tokens=True,\n max_length=self.max_len,\n padding='max_length'\n )\n\n ids = torch.tensor(inputs['input_ids'], dtype=torch.long)\n mask = torch.tensor(inputs['attention_mask'], dtype=torch.long)\n token_type_ids = torch.tensor(inputs['token_type_ids'], dtype=torch.long)\n\n\n return {\n 'ids': ids,\n 'mask': mask,\n \"token_type_ids\": token_type_ids\n }","sub_path":"BERTdataset.py","file_name":"BERTdataset.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"609043360","text":"import numpy as np\nimport cv2\nimport math\nfrom matplotlib import pyplot as plt\n\ndef hough_circle(img, threshold=100):\n # Initialize the accumulator\n row, col = img.shape\n thetas = np.deg2rad(np.arange(360))\n width = min(row, col)\n rad = np.arange(5, 6)\n acc = np.zeros((row, col, len(rad)), dtype=int)\n # Cache reusable data\n cos_t = np.cos(thetas)\n sin_t = np.sin(thetas)\n # Extract edge points\n row_idx, col_idx = np.nonzero(img)\n for r in range(len(rad)):\n print('Detecting circles of radius {}'.format(rad[r]))\n for i in range(len(row_idx)):\n x = col_idx[i]\n y = row_idx[i]\n for t in range(len(thetas)):\n a = int(y - rad[r] * sin_t[t])\n b = int(x - rad[r] * cos_t[t])\n if a >= 0 and a < row and b >= 0 and b < col:\n acc[a, b, r] += 1\n indices = np.argwhere(acc > threshold)\n print(indices.shape)\n print(indices)\n return acc, indices, rad\n\ndef draw(img, acc, indices, rad, threshold=200):\n # Sort the accumulator by its indices\n idx = np.argsort(-acc, axis=None)\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n row = img.shape[0]\n col = img.shape[1]\n rds = len(rad)\n # The grid size of the 3d array in order to find index\n grid = rds * col\n circles = list()\n # Iterate through all elements\n for i in idx:\n # Find the original indices\n cur = np.zeros(3, dtype=int)\n cur[0] = int(i // grid)\n cur[1] = int((i % grid) // rds)\n cur[2] = int((i % grid) % rds)\n # Compared with the threshold, if the accumulator is\n # less than the threshold, then stop because the array is sorted\n # all the elements after are less than threshold\n if acc[cur[0], cur[1], cur[2]] < threshold:\n break\n if len(circles) == 0:\n circles.append(cur)\n else:\n # Iterate through the results, if there is one result that are close\n # to an existing result, ignore it. If there isn't one, append to results.\n for c, cidx in enumerate(circles):\n if np.linalg.norm(cidx - cur) < 20:\n break\n if c == len(circles) - 1:\n circles.append(cur)\n # Draw all the results in the image\n for ro, co, rd in circles:\n print(acc[ro, co, rd])\n radius = int(rad[rd])\n cv2.circle(img, (co, ro), radius, (0, 0, 255), 2)\n\n\n\n\n\nimg = cv2.imread('f4.jpg', 0)\nline_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\ngauss = cv2.GaussianBlur(img, (5, 5), 1)\nedge_img = cv2.Canny(gauss, 50, 150, None, 3)\nacc, idx, rad = hough_circle(edge_img)\ndraw(line_img, acc, idx, rad)\ncv2.imwrite('circle-detected-face6.jpg', line_img)\nplt.imshow(line_img)\nplt.show()","sub_path":"Computer Vision/Assignment-1/Task-2/hough_circle.py","file_name":"hough_circle.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"123245300","text":"\n\n#calss header\nclass _TOPKNOT():\n\tdef __init__(self,): \n\t\tself.name = \"TOPKNOT\"\n\t\tself.definitions = [u'long hair tied up onto the top part of the back of the head']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_topknot.py","file_name":"_topknot.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"436263714","text":"import numpy as np\nimport os\nimport glob\nfrom lib import utils\nimport librosa\nimport time\nfrom lib import config\nimport math\nfrom multiprocessing import Process, Queue\nimport scipy.io, scipy.io.wavfile, scipy.signal\nfrom sklearn.feature_extraction import image\n\n\nclass DataReader(object):\n\n def __init__(self, speech, output_dir, norm_dir, dist_num, mode='mfsc', is_training=True):\n\n self._is_training = is_training\n\n self._win_len = config.win_len\n self._nperseg = config.nperseg\n self._time_width = config.time_width\n self._dist_num = dist_num\n self._inpur_dir = 0\n self._output_dir = output_dir\n self._norm_dir = norm_dir\n self._mode = mode\n self._batch_size = 0\n self._input_file_list = [1]\n\n self._output_file_list = sorted(glob.glob(output_dir + '/*.mat'))\n\n self._file_len = len(self._input_file_list)\n\n self._num_file = 0\n self._start_idx = 0\n\n self.eof = False\n self.file_change = False\n self.num_samples = 0\n self._speech = speech\n self._inputs = 0\n\n if self._is_training:\n self._outputs = 0\n assert self._file_len == len(self._output_file_list)\n\n self._train_mean, self._train_std = self.norm_process(norm_dir + '/norm.mat')\n\n def norm_process(self, norm_dir):\n\n norm_param = scipy.io.loadmat(norm_dir)\n\n if self._mode is 'mfsc':\n mag_mean = norm_param[\"mag_mean\"]\n mag_std = norm_param[\"mag_std\"]\n\n train_mean = np.transpose(mag_mean, (1, 0))\n train_std = np.transpose(mag_std, (1, 0))\n\n # train_mean = np.transpose(np.concatenate((mag_mean, phase_mean), axis=0), (1, 0))\n # train_std = np.transpose(np.concatenate((mag_std, phase_std), axis=0), (1, 0))\n elif self._mode is 'stft':\n mag_mean = norm_param[\"mag_mean\"]\n mag_std = norm_param[\"mag_std\"]\n phase_mean = norm_param[\"phase_mean\"]\n phase_std = norm_param[\"phase_std\"]\n\n train_mean = np.transpose(np.concatenate((mag_mean, phase_mean), axis=0), (1, 0))\n train_std = np.transpose(np.concatenate((mag_std, phase_std), axis=0), (1, 0))\n return train_mean, train_std\n\n def next_batch(self, batch_size):\n\n self._batch_size = batch_size\n\n if self._start_idx == 0:\n\n self._inputs = self._read_input(self._input_file_list[self._num_file])\n\n # self._inputs = self._padding(self._read_input(self._input_file_list[self._num_file]),\n # batch_size, self._time_width)\n # self._inputs = self._normalize(self._inputs)\n # self._inputs = np.reshape(self._inputs, (-1, self._time_width,) + self._inputs.shape[1:])\n\n if self._is_training:\n self._outputs = self._read_output(self._output_file_list[self._num_file])\n\n # self._outputs = self._padding(self._read_output(self._output_file_list[self._num_file]),\n # batch_size, self._time_width)\n # self._outputs = np.reshape(self._outputs, (-1, self._time_width))\n\n assert np.shape(self._inputs)[0] == np.shape(self._outputs)[0], \\\n (\"# samples is not matched between input: %d and output: %d files\"\n % (np.shape(self._inputs)[0], np.shape(self._outputs)[0]))\n\n # self.num_samples = np.shape(self._outputs)[0]\n self.num_samples = np.shape(self._inputs)[0]\n\n if self._start_idx + batch_size > self.num_samples:\n\n self._start_idx = 0\n self.file_change = True\n self._num_file += 1\n\n if self._num_file > self._file_len - 1:\n self.eof = True\n self._num_file = 0\n\n self._inputs = self._read_input(self._input_file_list[self._num_file])\n\n # self._inputs = self._padding(self._read_input(self._input_file_list[self._num_file]),\n # batch_size, self._time_width)\n # self._inputs = self._normalize(self._inputs)\n # self._inputs = np.reshape(self._inputs, (-1, self._time_width,) + self._inputs.shape[1:])\n\n if self._is_training:\n self._outputs = self._read_output(self._output_file_list[self._num_file])\n\n # self._outputs = self._padding(self._read_output(self._output_file_list[self._num_file]),\n # batch_size, self._time_width)\n # self._outputs = np.reshape(self._outputs, (-1, self._time_width))\n\n assert np.shape(self._inputs)[0] == np.shape(self._outputs)[0], \\\n (\"# samples is not matched between input: %d and output: %d files\"\n % (np.shape(self._inputs)[0], np.shape(self._outputs)[0]))\n\n self.num_samples = np.shape(self._inputs)[0]\n\n else:\n self.file_change = False\n self.eof = False\n\n inputs = self._inputs[self._start_idx:self._start_idx + self._batch_size, :]\n\n if self._is_training:\n\n outputs = self._outputs[self._start_idx:self._start_idx + batch_size]\n else:\n outputs = np.zeros((inputs.shape[0]))\n\n self._start_idx += self._batch_size\n print(inputs.shape[0])\n return inputs, outputs\n\n def _normalize(self, x):\n x = (x - self._train_mean) / self._train_std\n return x\n\n def _read_input(self, input_file_dir):\n\n dataname = ''\n\n if self._is_training:\n if os.path.exists(dataname):\n feat = np.load(dataname)\n else:\n data, _ = librosa.load(input_file_dir, config.fs)\n # data = self._power_normalize(data)\n # _, data = scipy.io.wavfile.read(input_file_dir)\n # self._nperseg = rate*self._win_len\n\n feat = self.stft_dist(data, self._dist_num)\n\n np.save(dataname, feat)\n else:\n data = self._speech\n # data = data/np.max(np.abs(data))\n\n # data = self._power_normalize(data)\n # _, data = scipy.io.wavfile.read(input_file_dir)\n # self._nperseg = rate*self._win_len\n\n feat = self.stft_dist(data)\n self._batch_size = feat.shape[0]\n return feat\n\n def _read_output(self, output_file_dir):\n label = np.squeeze(scipy.io.loadmat(output_file_dir)[\"label\"])\n label = np.mean(librosa.util.frame(label, frame_length=int(self._nperseg), hop_length=int(self._nperseg * 0.5)),\n axis=0)\n label = (label >= 0.5).choose(label, 1)\n label = (label < 0.5).choose(label, 0).astype(np.int32)\n print(label.shape)\n label = self._padding2(label, self._batch_size)\n return label\n\n @staticmethod\n def _power_normalize(sig):\n beta = 1000 / (math.sqrt(np.sum(sig ** 2)) / (sig.shape[0]))\n sig = sig * beta\n return sig\n\n @staticmethod\n def _padding(inputs, batch_size, width):\n pad_size = batch_size * width - inputs.shape[0] % (batch_size * width)\n pad_shape = (pad_size,) + inputs.shape[1:]\n inputs = np.concatenate((inputs, np.zeros(pad_shape, dtype=np.float32)))\n\n # window_pad = np.zeros((w_val, inputs.shape[1]))\n # inputs = np.concatenate((window_pad, inputs, window_pad), axis=0)\n return inputs\n\n @staticmethod\n def _padding2(inputs, batch_size):\n pad_size = batch_size - inputs.shape[0] % (batch_size)\n pad_shape = (pad_size,) + inputs.shape[1:]\n inputs = np.concatenate((inputs, np.zeros(pad_shape, dtype=np.float32)))\n\n # window_pad = np.zeros((w_val, inputs.shape[1]))\n # inputs = np.concatenate((window_pad, inputs, window_pad), axis=0)\n return inputs\n\n def stft_dist(self, data):\n\n result = self.get_stft(data)\n\n # result = np.reshape(result, (-1, result.shape[2], result.shape[3]))\n\n pad = np.expand_dims(np.zeros((int(config.time_width / 2), result.shape[1])), axis=2)\n\n result = self._normalize(result)\n result = np.squeeze(np.concatenate((pad, result, pad), axis=0))\n result = image.extract_patches_2d(result, (config.time_width, config.n_mels))\n result = np.expand_dims(self._padding2(result, self._batch_size), axis=3)\n # print(result.shape)\n return result\n\n def get_stft(self, data):\n\n data = np.asarray(data).astype(dtype=np.float32)\n # nfft = np.int(2**(np.floor(np.log2(self._nperseg)+1)))\n\n # _, _, Zxx = scipy.signal.stft(data, fs=fs, nperseg=self._nperseg, nfft=int(nfft))\n if self._mode is 'mfsc':\n Zxx = librosa.feature.melspectrogram(data, sr=config.fs, n_fft=int(config.nfft),\n hop_length=int(self._nperseg * 0.5),\n n_mels=config.n_mels, fmin=300, fmax=8000)\n\n mfsc = np.transpose(np.expand_dims(Zxx, axis=2), (1, 0, 2))[:-1, :]\n\n # label = np.mean(\n # librosa.util.frame(data, frame_length=int(self._nperseg), hop_length=int(self._nperseg * 0.5)), axis=0)\n return mfsc\n\n\n\n def reader_initialize(self):\n self._num_file = 0\n self._start_idx = 0\n self.eof = False\n\n def eof_checker(self):\n return self.eof\n\n def file_change_checker(self):\n return self.file_change\n\n def file_change_initialize(self):\n self.file_change = False\n\n\nif __name__ == '__main__':\n dist_num = 4\n mode = 'mfsc'\n train_input_path = os.path.abspath('../data/train/wav')\n train_output_path = os.path.abspath('../data/train/lab')\n norm_path = os.path.abspath('../data/train/norm')\n\n train_dr = DataReader(train_input_path, train_output_path, norm_path, dist_num, mode, is_training=True)\n while True:\n sample = train_dr.next_batch(512)\n print(train_dr._num_file)\n\n a = 1","sub_path":"lib/datareader_test.py","file_name":"datareader_test.py","file_ext":"py","file_size_in_byte":10059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"151151181","text":"# abs(1) # 绝对值\n# all('a') # 判断一个序列是中有一个值为假其他是真,也是假,类似于and\n# any('a') # 判断一个序列中有一个值为真,则返回真 类似或\n# ascii('a') # 用不到\n# bin() # 二进制\n# bool() # 判断是否是true\n# bytes() # 不可修改\n# bytearray() # 可变的bytes\n# callable('a') # 判断一个对象是否可以调用\n# chr(97) # ascii 码 数字对应 字符\n# ord('b') # 将assic 中的而数字\n# classmethod() # 面向对象使用\n# compile() #\n# complex() # 复数\n# eval() # 执行简单的计算,不能进行赋值\n# exec() #\n# dict()\n# dir() # 返回\n# divmod(10, 2) # 返回商,余数\n# filter(lambda x: x>5,range(10)) # 返回满足条件的值\n# frozenset({1, 2, 3, 3, 5}) # 将一个集合变为只读\n# globals() # 当前程序开辟的所有空间都以字典的形式打印出来,只显示全局变量\n# locals() # 只打印局部变量\n# hash() # 判断是否可hash\n# help() # 帮助手册\n# hex() # 10进制转16进制\n# max(3, 6) # 返回最大值\n# min(3, 6) # 求最小值\n# next() #\n# object()\n# oct() #\n# pow(4, 9) # 4的9次方\n# oct(8) # 八进制\n# open() # 打开文件\n# print() # print可以往文件输入\n# # ####print 写入到文件中\nmsg = \"ancd\"\nf = open(\"tofile\", 'w', encoding='utf-8')\nprint(msg, sep='|', end='', file=f)\n#################################\n# range() # 返回一个序列\n# repr() # 返回一个字符串格式\n# reversed()\ndata = list(range(10))\ndate = reversed(data)\nprint(data)\nprint(round(2.6)) # 整数是奇数,4舍五入, 如果是双数,5舍6入\n\n\n# 列表转集合\ndata = [2, 3, 3, 5]\na = set(data)\nprint()\n# #######\na = 'abc'\nprint(sorted(a))\n# zip() #\na = [1, 3, 5, 7, 9]\nb = [2, 4, 6, 8]\nfor i in zip(a, b):\n print(i)\n\n\n\n\n","sub_path":"student_day3/内置函数.py","file_name":"内置函数.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"137478470","text":"#Sequential 2D matrix mulltiplication \n\nimport numpy as np\nimport sys\nimport time\n\n#2D matrix multiplication. C = A*B\ndef rank2Tensor(A, B, C): \n if len(A[0]) != len(B):\n sys.exit('The number of columns in Matrix A is not equal to the number of rows in Matrix B')\n\n for i in range(0, len(A)):\n for j in range(0, len(B[0])):\n for k in range(len(B)):\n C[i][j] += A[i][k] * B[k][j]\n return C\n\n#Array bounds\nN = [10, 20, 30]\n#set seed to ensure that new random numbers is always generated\nnp.random.seed()\nnoThreads = 0\nfor n in N:\n #generate A and B matrices with random numbers between 0 and n. Dimension is n*n. Generate an empty C matrix of dimention n*n\n print(\"N is \",n)\n A = np.random.randint(0,n, size=(n, n))\n B = np.random.randint(0, n, size=(n, n))\n C = np.zeros((n, n), dtype=int)\n\n #start timer for timing matrix multiplication\n t1 = time.perf_counter()\n rank2Tensor(A,B,C)\n #end timer for timing matrix multiplication\n t2 = time.perf_counter()\n #calculate time of program execution\n print(f\"Time taken for a %s x %s with Python MP was {t2 - t1:0.4f} sec for %s threads\"%(n,n,noThreads))","sub_path":"Matrix/2DMatrixOri.py","file_name":"2DMatrixOri.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"271348926","text":"import os\nimport random\nfrom pathlib import Path\nfrom os import walk\nfrom moviepy.editor import *\nfrom nltk import tokenize\n\n\ndef concat_vid(text, keywords, save_dir):\n fnames = next(walk(save_dir), (None, None, []))[2]\n mov_paths = [os.path.join(save_dir, x) for x in fnames if '.DS_Store' not in x]\n dur_per_clip = 7\n height = 540\n\n def clip_vid(path):\n return VideoFileClip(path).subclip(0, dur_per_clip).resize(height=height)\n\n def clip_full(full, len_clips):\n full.set_duration(full.duration)\n\n def clip_into_clips(clip, start, end):\n return clip.subclip(start, end)\n\n times = [x for x in range(0, dur_per_clip * len_clips + dur_per_clip, dur_per_clip)]\n ret_clips = []\n for idx, start in enumerate(times[:-1]):\n full = full.set_duration(full.duration)\n new_clip = clip_into_clips(full, start, times[idx + 1])\n ret_clips.append(new_clip)\n\n return ret_clips\n\n def add_text(clip, text):\n\n def chunks(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i:i + n]\n\n text_split = text.split(' ')\n if len(text_split) > 5:\n text_len = len(text.split(' '))\n\n split_1 = ' '.join(text_split[:text_len // 2])\n split_2 = ' '.join(text_split[text_len // 2:])\n split_3 = None\n\n if len(text_split[:text_len // 2]) > 15:\n chunks = list(chunks(text_split, text_len // 3))\n split_1 = ' '.join(chunks[0])\n split_2 = ' '.join(chunks[1])\n split_3 = ' '.join(chunks[2] + chunks[3])\n\n fontsize = 18\n opacity = 0.9\n w, h = clip.size\n txt_clip = TextClip(split_1, fontsize=fontsize, font=\"Helvetica-Bold\", color='white')\n txt_col = txt_clip.on_color(size=(clip.w + txt_clip.w, txt_clip.h + 10), color=(0, 102, 255),\n pos=(6, 'center'), col_opacity=opacity)\n txt_clip = txt_col.set_pos([30, h - 90]).set_duration(dur_per_clip)\n\n txt_clip2 = TextClip(split_2, fontsize=fontsize, font=\"Helvetica-Bold\", color='white')\n txt_col2 = txt_clip2.on_color(size=(clip.w + txt_clip2.w, txt_clip2.h + 10), color=(204, 0, 255),\n pos=(6, 'center'), col_opacity=opacity)\n txt_clip2 = txt_col2.set_pos([50, h - 60]).set_duration(dur_per_clip)\n\n if split_3 is not None:\n txt_clip3 = TextClip(split_3, fontsize=fontsize, font=\"Helvetica-Bold\", color='white')\n txt_col3 = txt_clip3.on_color(size=(clip.w + txt_clip3.w, txt_clip3.h + 10), color=(255, 255, 102),\n pos=(6, 'center'), col_opacity=opacity)\n txt_clip3 = txt_col3.set_pos([70, h - 30]).set_duration(dur_per_clip)\n\n return CompositeVideoClip([clip, txt_clip, txt_clip2, txt_clip3])\n\n return CompositeVideoClip([clip, txt_clip, txt_clip2])\n\n\n\n else:\n txt_clip = TextClip(text, size=([1700, 0]), font=\"Helvetica-Bold\", color='white')\n txt_clip = txt_clip.set_pos([]).set_duration(dur_per_clip)\n im_width, im_height = txt_clip.size\n\n color_clip = ColorClip(size=(int(im_width * 1.1), int(im_height * 1.4)),\n color=(0, 255, 255))\n color_clip = color_clip.set_opacity(.6).set_duration(dur_per_clip).set_pos('center')\n\n return CompositeVideoClip([clip, color_clip, txt_clip])\n\n def get_vids_per_sentence(text, keywords):\n sentences = tokenize.sent_tokenize(text)\n m_paths = []\n for sent in sentences:\n if len(sent) > 1:\n def words_in_string(word_list, a_string):\n return list(set(word_list).intersection(a_string.split()))\n\n vid_key = words_in_string(keywords, sent)\n\n m_keys = []\n for m in mov_paths:\n if any(word in m for word in vid_key):\n m_keys.append(m)\n\n try:\n m_paths.append(random.sample(m_keys, 1))\n except ValueError:\n pass\n else:\n pass\n\n return m_paths, sentences\n\n mov_paths_on_key, texts = get_vids_per_sentence(text, keywords)\n mov_paths_on_key = [item for sublist in mov_paths_on_key for item in sublist]\n\n clips = [clip_vid(path) for path in mov_paths_on_key]\n len_clips = len(clips)\n sub_final = concatenate_videoclips(clips, method='compose')\n\n clips_reso = clip_full(sub_final, len_clips)\n clips_add_text = [add_text(clip, texts[idx]) for idx, clip in enumerate(clips_reso)]\n final = concatenate_videoclips(clips_add_text)\n\n final.write_videofile(os.path.join(save_dir, 'gen_file', 'output.mp4'))\n","sub_path":"src/vid_editor.py","file_name":"vid_editor.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"431341042","text":"import pandas as pd\r\nfrom shapely.geometry import mapping\r\nfrom shapely.wkt import loads\r\nimport argparse\r\nimport geojson\r\nimport geojsonio\r\nimport os\r\nimport psycopg2\r\nimport pyautogui\r\nimport time\r\nimport logging\r\n\r\nWORK_PATH = os.path.dirname(__file__) # script location\r\n\r\ng_logger = logging.getLogger(\"record_log\")\r\ng_logger.setLevel(logging.INFO)\r\n\r\ng_fh = logging.FileHandler(filename=\"{0}\".format(os.path.join(WORK_PATH, \"logger.log\")), mode=\"w\")\r\ng_fh.setLevel(logging.INFO)\r\n\r\ng_console = logging.StreamHandler()\r\ng_console.setLevel(logging.INFO)\r\n\r\ng_formatter = logging.Formatter(fmt='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\r\n datefmt='%a, %d %b %Y %H:%M:%S')\r\ng_fh.setFormatter(g_formatter)\r\ng_console.setFormatter(g_formatter)\r\n\r\ng_logger.addHandler(g_fh)\r\ng_logger.addHandler(g_console)\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser(description='Create Test Cases.')\r\n parser.add_argument('--dbHost', required=True, help='dbHost')\r\n parser.add_argument('--newdbName', required=True, help='newyear,like 19Q1,18Q4')\r\n parser.add_argument('--olddbName', required=True, help='oldyear,like 19Q1,18Q4')\r\n parser.add_argument('--region',required=True, help='the region you want to run')\r\n args = parser.parse_args()\r\n return args\r\n\r\ndef execute_sql(sql_command, connection):\r\n cursor = connection.cursor()\r\n command = sql_command\r\n cursor.execute(command)\r\n rows = cursor.fetchall()\r\n return rows[0][0]\r\n\r\n\r\ndef get_diff(result_new_year,result_old_year):\r\n if result_old_year is None:\r\n diff = result_new_year\r\n elif result_new_year is None:\r\n diff = result_old_year\r\n else:\r\n result_old_year = loads(result_old_year)\r\n result_new_year = loads(result_new_year)\r\n diff = result_old_year.difference(result_new_year) #diff=old_year-new_year\r\n return diff\r\n\r\ndef convert_to_string(text, file_name):\r\n with open(file_name + 'txt', 'a',encoding='utf-8') as text_file:\r\n text_file.write('%s\\n' % text)\r\n with open(file_name + 'txt', 'r', encoding='utf-8') as text_file:\r\n content = text_file.readlines()\r\n content = ''.join(content)\r\n with open(file_name + '.html', 'w', encoding='utf-8') as fobj:\r\n fobj.writelines('''\r\n \r\n ''')\r\n fobj.write('%s\\n' % text)\r\n fobj.writelines('''\r\n \r\n ''')\r\n\r\n return content\r\n\r\ndef generate_wkt_string(point1,point2,display_class,dbhost,dbname,sql_condition,comment):\r\n if display_class != 'none':\r\n display_class_condition = \"AND c.tags->'display_class'='{display_class}'\".format(display_class=display_class)\r\n else:\r\n display_class_condition = ''\r\n\r\n g_logger.info(\"the display_class_condition is %s\"%(display_class_condition))\r\n sql_command=generate_sql_command(point1, point2, display_class_condition, sql_condition, comment)\r\n connection = get_db_connection(dbhost, dbname, 'postgres', 'postgres', 5432)\r\n result=execute_sql(sql_command, connection)\r\n print(sql_command)\r\n return result\r\n\r\n\r\ndef get_db_connection(dbHost, dbName, dbUser, dbPass, dbPort):\r\n connection = psycopg2.connect(database=dbName, user=dbUser, password=dbPass, host=dbHost, port=dbPort)\r\n return connection\r\n\r\ndef generate_sql_command(point1,point2,display_class_condition,sql_condition,comment):\r\n sql_command = '''SELECT ST_AsText(ST_Union(tmp.geo)) \r\nfrom \r\n (SELECT \r\n CASE WHEN \r\n member_type = 'N' THEN \r\n (SELECT geom\r\n FROM \r\n nodes n \r\n WHERE \r\n n.id = member_id\r\n ) WHEN member_type = 'W' THEN (\r\n SELECT \r\n (linestring) \r\n FROM \r\n ways w \r\n WHERE \r\n w.id = member_id\r\n ) WHEN member_type = 'R' THEN (\r\n SELECT \r\n (\r\n ST_Union(t.geo)\r\n ) \r\n FROM \r\n (\r\n (\r\n SELECT \r\n c.linestring as geo \r\n FROM \r\n ways c \r\n WHERE \r\n c.id in (\r\n select \r\n rm2.member_id \r\n from \r\n relation_members rm2 \r\n where \r\n rm2.relation_id = rm.member_id\r\n )\r\n ) \r\n UNION \r\n (\r\n SELECT \r\n c.geom as geo \r\n FROM \r\n nodes c \r\n WHERE \r\n c.id in (\r\n select \r\n rm2.member_id \r\n from \r\n relation_members rm2 \r\n where \r\n rm2.relation_id = rm.member_id\r\n )\r\n )\r\n ) t\r\n ) ELSE null END AS geo \r\n FROM \r\n relation_members rm, \r\n relations c, \r\n ways b \r\n WHERE \r\n {sql_condition} and {sql_condition_case_type}\r\n {display_class_condition}\r\n and rm.relation_id = c.id \r\n and rm.member_id = b.id \r\n and ST_Intersects(\r\n linestring, \r\n ST_SetSRID(\r\n ST_MakeBox2D(\r\n ST_Point({point1}), \r\n ST_Point({point2})\r\n ), \r\n 4326\r\n )\r\n )\r\n ) tmp'''\r\n sql_command=sql_command.format(point1=point1,point2=point2,display_class_condition=display_class_condition,\r\n sql_condition=sql_condition,sql_condition_case_type=comment)\r\n return sql_command\r\n\r\n\r\n\r\ndef change_color(geojson_string):\r\n head='''{\r\n \"type\": \"FeatureCollection\",\r\n \"features\": [\r\n {\r\n \"type\": \"Feature\",\r\n \"properties\": {\r\n \"stroke\": \"#fd0902\",\r\n \"stroke-width\": 2,\r\n \"stroke-opacity\": 1\r\n },\r\n \"geometry\":'''\r\n tail=''' }\r\n ]\r\n}'''\r\n geojson_string=head+geojson_string+tail\r\n return(geojson_string)\r\n\r\ndef compare_geometry_between_two_year(boundingBoxOne, boundingBoxTwo, displayClass, dbHost, newdbName,olddbName, sqlCondition, comment):\r\n result_new_year=generate_wkt_string(boundingBoxOne, boundingBoxTwo, displayClass, dbHost, newdbName, sqlCondition, comment)\r\n result_old_year = generate_wkt_string(boundingBoxOne, boundingBoxTwo, displayClass, dbHost, olddbName, sqlCondition, comment)\r\n wkt_string = get_diff(result_new_year, result_old_year)\r\n content = convert_to_string(wkt_string, caseName)\r\n return content\r\n\r\nargs = parse_args()\r\ndbHost = args.dbHost\r\nnewdbName = args.newdbName\r\nolddbName = args.olddbName\r\nregion = args.region\r\n\r\ndf = pd.read_csv(os.path.split(os.path.realpath(__file__))[0] + os.sep+'cencor_case_'+region + '.csv')\r\npass_number = 0\r\ndf['result'] ='not test'\r\nfor index,row in df.iterrows():\r\n caseName = getattr(row, 'caseName')\r\n item = getattr(row, 'item')\r\n boundingBoxOne = getattr(row, 'boundingBoxOne')\r\n boundingBoxTwo = getattr(row, 'boundingBoxTwo')\r\n if getattr(row, 'displayClass') != 'none':\r\n displayClass = int(getattr(row,'displayClass'))\r\n else:\r\n displayClass = getattr(row,'displayClass')\r\n sqlCondition = getattr(row, 'sqlCondition')\r\n comment = getattr(row, 'comment')\r\n content = compare_geometry_between_two_year(boundingBoxOne, boundingBoxTwo, displayClass, dbHost, newdbName, olddbName, sqlCondition, comment)\r\n g_logger.info(\"Now runing the case:{casename},the sqlCondition {sqlCondition},the comment is {comment},the displayclass \"\r\n \"is:{displayclass} the content is {content}\".format(casename=caseName,sqlCondition=sqlCondition,comment=comment,displayclass=displayClass,content=content))\r\n if 'None' in content:\r\n df['result'][index] = 'Nothing'\r\n print('Both two version has nothing in this displayclass')\r\n elif 'EMPTY' in content:\r\n pass_number += 1\r\n df['result'][index] = 'Pass'\r\n print('No difference between these two versions in this displayclass')\r\n else:\r\n df['result'][index] = 'Fail'\r\n geojson_string = geojson.dumps(mapping(loads(content)))\r\n geojson_string=change_color(geojson_string)\r\n geojsonio.display(geojson_string)\r\n time.sleep(5)\r\n pyautogui.screenshot(os.path.split(os.path.realpath(__file__))[0] +os.sep +caseName + \".png\")\r\n\r\ndf.to_csv(os.path.split(os.path.realpath(__file__))[0] + os.sep+'result.csv')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"trunk/Cencorship_test/PBF_layer_test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":8426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"647822458","text":"import regex\nfrom doc_curation.md.library.metadata_helper import title_from_text\nfrom indic_transliteration import sanscript\n\n\ndef remove_non_content_text(content, remove_parenthized_text=True):\n # For correct regex matching.\n content = \"\\n%s\\n\\n\" % content\n from doc_curation.utils import patterns\n # remove summary tags\n content = regex.sub(patterns.SUMMARY, \"\", content)\n # Remove remaining tags\n content = regex.sub(\"<[^>\\n]+?>\", \"\", content)\n # remove footnote definitions\n content = regex.sub(patterns.FOOTNOTE_DEFINITION, \"\", content)\n # Remove footnote markers\n content = regex.sub(r\"\\[\\^.+?\\]\", \"\", content)\n # Remove section titles\n content = regex.sub(r\"\\n#.+?\\n\", \"\\n\", content)\n # Remove quote markers\n content = regex.sub(r\"\\n> +\", \"\\n\", content)\n # Remove js comments\n content = regex.sub(patterns.JS_COMMENTS, \"\", content)\n if remove_parenthized_text:\n # Remove paranthesized text\n content = regex.sub(r\"\\(.+?\\)\", \"\", content)\n # Remove final digits\n content = regex.sub(r\"[\\d०-९]+\\s*$\", \"\", content)\n\n # Undo initial additions\n content = regex.sub(r\"^\\n\", \"\", content)\n content = regex.sub(r\"\\n\\n$\", \"\", content)\n return content\n\n\ndef get_comparison_text(text):\n text = sanscript.SCHEMES[sanscript.DEVANAGARI].remove_numerals(in_string=text)\n text = title_from_text(text=text, num_words=40, target_title_length=1000, title_id=None)\n return text.strip()\n","sub_path":"doc_curation/md/content_processor/stripper.py","file_name":"stripper.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"347505967","text":"import base64\n\n'''\n Criação de chave no Elastic\n POST /_security/api_key\n {\n \"name\": \"my-api-key\"\n }\n\n key = {\n \"id\": \"XXXXXXXXXXXXXXXXXXXX\",\n \"name\": \"my-api-key\",\n \"api_key\": \"YYYYYYYYYYYYYYYYY\"\n }\n'''\n\ndef setHeaders(contextType):\n\n headers = {\n \"Content-type\": contextType,\n \"Accept\": \"application/json\"\n }\n\n return headers","sub_path":"utils/httputils.py","file_name":"httputils.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"88541294","text":"#author = Riley Doyle\n#date = 12/4/20\n#file = CO2Sys_Program_pHTA\n#status = working\n#Taken From Pierrot, D. E. Lewis,and D. W. R. Wallace. 2006. MS Excel Program Developed for \n#CO2 System Calculations. ORNL/CDIAC-105a. Carbon Dioxide Information Analysis Center, \n#Oak Ridge National Laboratory, U.S. Department of Energy, Oak Ridge, Tennessee. \n#doi: 10.3334/CDIAC/otg.CO2SYS_XLS_CDIAC105a\n\nimport numpy as np\nfrom CO2Sys_functions import *\nfrom constants import *\ndef CO2Sys_Program_pHTA(T, S, P, TP, TSi, TA, Tout, Pout, pH):\n ## Inputs\n RGasConstant = 83.1451 \n sqrSal = np.sqrt(S)\n Tk = T + 273.15 #K\n Pbar = P/10 #bars\n RT = RGasConstant*Tk\n \n ## Constants at Input Conditions\n [K1, K0, K2, TB, KB, KW, KP1, KP2, KP3, KSi, TS, KS, TF, KF, VPFac, FugFac, fH, pHfactor, FREEtoTOT, SWStoTOT] = constants(Tk, T, S, sqrSal, P, Pbar, RT)\n \n ## In\n TCin = CalculateTCfromTApH(TA, pH, K1, K0, K2, TB, KB, KW, KP1, KP2, KP3, TP, TSi, KSi, TS, KS, TF, KF)\n TC = TCin\n fCO2 = CalculatefCO2fromTCpH(TC, pH, K1, K2, K0)\n pCO2 = fCO2/FugFac\n [HCO3in, CO3in, BAlkin, OHin, PAlkin, SiAlkin, Hfreein, HSO4in, HFin] = CalculateAlkParts(pH, TC, K1, K0, K2, TB, KB, KW, KP1, KP2, KP3, TP, TSi, KSi, TS, KS, TF, KF)\n CO2in = TCin - CO3in - HCO3in\n Revellein = RevelleFactor(TA, TC, K1, K0, K2, TB, KB, KW, KP1, KP2, KP3, TP, TSi, KSi, TS, KS, TF, KF)\n [OmegaCain, OmegaArin] = CaSolubility(S, Tk, RT, TC, pH, sqrSal, T, Pbar, K0, K1, K2)\n xCO2dryin = pCO2/VPFac \n TCin = TC\n \n #Output \n T = Tout\n Tk = T + 273.15 #K\n P = Pout #dbars\n Pbar = P/10 #bars\n RT = RGasConstant*Tk\n ## Constants at Ouput Conditions\n [K1, K0, K2, TB, KB, KW, KP1, KP2, KP3, KSi, TS, KS, TF, KF, VPFac, FugFac, fH, pHfactor, FREEtoTOT, SWStoTOT] = constants(Tk, T, S, sqrSal, P, Pbar, RT)\n \n ## Out\n TCout = TC\n fCO2out = CalculatefCO2fromTCpH(TC, pH, K1, K2, K0)\n pCO2out = fCO2out/FugFac\n [HCO3out, CO3out, BAlkout, OHout, PAlkout, SiAlkout, Hfreeout, HSO4out, HFout] = CalculateAlkParts(pH, TC, K1, K0, K2, TB, KB, KW, KP1, KP2, KP3, TP, TSi, KSi, TS, KS, TF, KF)\n CO2out = TCout - CO3out - HCO3out\n Revelleout = RevelleFactor(TA, TC, K1, K0, K2, TB, KB, KW, KP1, KP2, KP3, TP, TSi, KSi, TS, KS, TF, KF)\n [OmegaCaout, OmegaArout] = CaSolubility(S, Tk, RT, TC, pH, sqrSal, T, Pbar, K0, K1, K2)\n xCO2dryout = pCO2out/VPFac \n return [K1, K0, K2, TB, KB, KW, KP1, KP2, KP3, KSi, TS, KS, TF, KF, VPFac, FugFac, fH, pHfactor, FREEtoTOT, SWStoTOT, HCO3in, CO3in, BAlkin, OHin, PAlkin, SiAlkin, Hfreein, HSO4in, HFin, HCO3out, CO3out, BAlkout, OHout, PAlkout, SiAlkout, Hfreeout, HSO4out, HFout, TA, TC, Revellein, OmegaCain, OmegaArin,xCO2dryin, Revelleout, OmegaCaout, OmegaArout,xCO2dryout, CO2in, CO2out, pH, pCO2out]","sub_path":"Python files/CO2Sys_Program_pHTA.py","file_name":"CO2Sys_Program_pHTA.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"539829372","text":"from twitter import *\nimport json\nimport time\n\n_consumer_key = 'xxx'\n_consumer_secret = 'xxx'\n_access_token_key = 'xxx'\n_access_token_secret = 'xxx'\n\nt = Twitter(auth = OAuth(_access_token_key, _access_token_secret,\n\t_consumer_key, _consumer_secret))\n\ndef followback (api):\n\tfollowers = api.followers.list()[\"users\"]\n\tfor f in followers:\n\t\tif not f[\"following\"]:\n\t\t\tapi.friendships.create(user_id=f[\"id\"])\n\nwhile 1:\n\tfollowback(t)\t\t\t\n\ttime.sleep(600)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"59550992","text":"def sortbyfrq(a):\n frequency = {}\n for ele in a:\n if ele in frequency:\n frequency[ele]['count'] += 1\n else:\n frequency[ele] = {'count':1}\n l=list(frequency.keys())\n for i in range(len(l)):\n for j in range(i + 1, len(l)):\n num1=l[i] \n num2=l[j] \n if frequency[num1]['count'] > frequency[num2]['count']:\n l[i], l[j] = l[j], l[i]\n p=[]\n for ele in l:\n count = frequency[ele]['count'] \n for x in range(count):\n p.append(ele)\n return p \n \nprint(sortbyfrq([5,5,5,1,1,1,3,3,2,7,7,7,7])) ","sub_path":"sort_element_by_frequency.py","file_name":"sort_element_by_frequency.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"371156369","text":"\n\nfreq = 0\nold = [freq]\nfound = False\n\nwhile not found: \n with open('freq.txt', 'r') as f:\n for i in f:\n freq += int(i)\n if freq in old:\n print(freq)\n found = True\n break\n old.append(freq)\n","sub_path":"2018/day1-part2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"331439112","text":"# regions is list of tuples of the form (TAG1, TAG2)\n# each tag may only appear at most once among all regions\n\nfrom .util import make_documentclass, render_to_png, tag_to_str, check_tag_occurs \nfrom . import DEFAULT_HIGHLIGHT_COLOR\n\ndef apply_code_highlighting(code, all_tags, regions_to_highlight, highlighted_wrapin):\n\n for tag in all_tags:\n check_tag_occurs(code, tag)\n\n for (from_tag, to_tag) in regions_to_highlight:\n for tag in (from_tag, to_tag):\n if not tag in all_tags:\n raise ValueError(\"tag %s was not registered\" % (tag,))\n code = code.replace(tag_to_str(from_tag), highlighted_wrapin[0])\n code = code.replace(tag_to_str(to_tag), highlighted_wrapin[1])\n\n for tag in all_tags:\n code = code.replace(tag_to_str(tag), \"\")\n\n return code\n\n\ndef make_code_prelude(color, user_prelude):\n return \"\\n\".join([\n r\"\"\"\\usepackage{listings}\n \\usepackage{xcolor}\n \\usepackage{highlighter}\"\"\",\n r\"\"\"\\definecolor{highlightcolor}{HTML}{\"\"\" + str(color) + r\"}\",\n r\"\"\"\\tikzset{highlighter/.style = {highlightcolor, line width = \\baselineskip}}\n \"\"\",\n user_prelude])\n\ndef render_code(\n code, all_tags, regions_to_highlight, png,\n listings_settings,\n varwidth_frac=None, color=DEFAULT_HIGHLIGHT_COLOR,\n user_prelude=\"\"):\n\n (lstset, (esc_start, esc_end)) = listings_settings\n code = apply_code_highlighting(code, all_tags, regions_to_highlight,\n (esc_start + r\"\\HighlightFrom\" + esc_end, esc_start + r\"\\HighlightTo{}\" + esc_end))\n documentclass = make_documentclass(varwidth_frac)\n prelude = make_code_prelude(color, user_prelude)\n begindocument = r\"\\begin{document}\"\n beginlisting = r\"\\begin{lstlisting}\"\n endlisting = r\"\\end{lstlisting}\"\n enddocument = r\"\\end{document}\"\n\n # combine\n latex_source = \"\\n\".join([\n documentclass,\n prelude,\n lstset,\n begindocument,\n beginlisting,\n code,\n endlisting,\n enddocument])\n\n render_to_png(latex_source, png)\n","sub_path":"src/tutorial_highlighter/code_highlight.py","file_name":"code_highlight.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"315331396","text":"from utils.listnode import ListNode\n\n\nclass Solution:\n def deleteDuplicates(self, head: ListNode) -> ListNode:\n # 洗数据\n if not head or not head.next:\n return head\n\n # 可能有头节点变动的情况,要哑节点\n dummy = ListNode(-1)\n dummy.next = head\n\n cur = dummy\n while cur and cur.next and cur.next.next:\n if cur.next.val == cur.next.next.val:\n end = cur.next.next\n while end and end.val == cur.next.val:\n end = end.next\n cur.next = end\n else:\n cur = cur.next\n\n return dummy.next\n","sub_path":"week4/remove_duplicates_from_sorted_list_ii.py","file_name":"remove_duplicates_from_sorted_list_ii.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"59899432","text":"#!/usr/bin/python3\n\nimport os\nimport time\n\ndebug = 0\nif debug == 0:\n f = os.system\nelse:\n f = print\n\n\nbpaccList = []\n\nfor line in open('bp_keys'):\n bpaccName = line[:line.find(',')]\n bpaccList.append(bpaccName)\n\n\n#transfer EOT to bp accounts from AACio\n\namount = \"20000.0000 AAC\"\nfor bpaccount in bpaccList:\n f('claac transfer aacio '+ bpaccount +''' \"'''+ amount+'''\"''')\n time.sleep(0.01)\n\n\n\nvoaccList = []\n\nfor line1 in open('vo_keys'):\n voaccName = line1[:line1.find(',')]\n voaccList.append(voaccName)\n\n\n\n# transfer EOT to voters account from AACio\n\namount = \"10000000.0000 AAC\"\nfor voaccount in voaccList:\n f('claac transfer aacio '+ voaccount +''' \"'''+ amount+'''\"''')\n time.sleep(0.01)\n","sub_path":"tests/ck_test/0-vote-test-scripts/7-issue_aac.py","file_name":"7-issue_aac.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"2273047","text":"from random import randint\nimport timeit\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n \ndef geraLista(tam):\n lista = []\n for i in range(tam):\n n = randint(1,1*tam)\n if n not in lista: lista.append(n)\n return lista\n \ndef desenhaGrafico(x,y,yl,name,xl = \"Tamanho da Lista de Números\"):\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(111)\n ax.plot(x,y, label = \"Bubble Sort\")\n ax.legend(bbox_to_anchor=(1, 1),bbox_transform=plt.gcf().transFigure)\n plt.ylabel(yl)\n plt.xlabel(xl)\n fig.savefig(name)\n\nnsw = 0\n\n#Bubble Sort\ndef bubbleSort(vetor):\n global nsw\n nsw = 0\n tamanho_l = len(vetor)\n for i in range(tamanho_l):\n troca = False\n for j in range(1, tamanho_l - i):\n if vetor[j] < vetor[j - 1]:\n vetor[j], vetor[j - 1] = vetor[j - 1], vetor[j]\n nsw=nsw+1\n troca = True\n if not troca:\n break\n\ntam_vetor = [10000,20000,50000,100000]\nvetores = []\n\nfor i in range(len(tam_vetor)):\n vetores.append(geraLista(tam_vetor[i]))\n\nswaps = []\ntempos = []\n\nfor i in range(len(tam_vetor)):\n tempos.append(timeit.timeit(\"bubbleSort({})\".format(vetores[i]), setup=\"from __main__ import bubbleSort\",number=1))\n swaps.append(nsw)\n\ndesenhaGrafico(tam_vetor, swaps, \"Quantidade de operações (SWAP)\", \"Swaps.png\")\ndesenhaGrafico(tam_vetor, tempos, \"Tempo para ordenar pelo método\", \"Tempos.png\")\n","sub_path":"CT0101.py","file_name":"CT0101.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"196398004","text":"import numpy as np\nimport xarray as xr\n\nimport pandas as pd\nPoint = None\ngpd = None\n\n\ndef _make_dataset(size, variable_name='VHI', lonmin=-180.0, lonmax=180.0,\n latmin=-55.152, latmax=75.024, add_times=True, const=False,\n start_date='1999-01-01', end_date='2001-12-31'):\n\n lat_len, lon_len = size\n # create the vector\n longitudes = np.linspace(lonmin, lonmax, lon_len)\n latitudes = np.linspace(latmin, latmax, lat_len)\n\n dims = ['lat', 'lon']\n coords = {'lat': latitudes,\n 'lon': longitudes}\n\n if add_times:\n times = pd.date_range(start_date, end_date, name='time', freq='M')\n size = (len(times), size[0], size[1])\n dims.insert(0, 'time')\n coords['time'] = times\n var = np.random.randint(100, size=size)\n if const:\n var *= 0\n var += 1\n\n ds = xr.Dataset({variable_name: (dims, var)}, coords=coords)\n\n return ds, (lonmin, lonmax), (latmin, latmax)\n\n\ndef _create_dummy_precip_data(tmp_path,\n start_date='1999-01-01',\n end_date='2001-12-31'):\n data_dir = tmp_path / 'data' / 'interim' / 'chirps_preprocessed'\n if not data_dir.exists():\n data_dir.mkdir(parents=True, exist_ok=True)\n\n precip, _, _ = _make_dataset(\n (30, 30), variable_name='precip',\n start_date=start_date, end_date=end_date\n )\n precip.to_netcdf(data_dir / 'chirps_kenya.nc')\n\n return data_dir\n\n\nclass CreateSHPFile:\n def __init__(self):\n # import Point and Geopandas\n global Point\n if Point is None:\n from shapely.geometry import Point\n\n global gpd\n if gpd is None:\n import geopandas as gpd\n\n @staticmethod\n def create_demo_shapefile(filepath):\n df = pd.DataFrame({\n 'PROVID': [10, 20],\n 'PROVINCE': ['NAIROBI', 'KIAMBU'],\n })\n\n p1 = Point((34.27795473150634, 0.3094489371060183))\n p2 = Point((35.45785473150634, 0.0118489371060182))\n\n gdf = gpd.GeoDataFrame(df, geometry=[p1, p2])\n gdf['geometry'] = gdf.buffer(0.2)\n gdf.to_file(driver='ESRI Shapefile', filename=filepath)\n","sub_path":"tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"646515502","text":"import sys\nimport socket\nimport json\nimport logging\nimport xmltodict\nimport ssl\nimport os\n\nserver_address = ('172.16.16.104', 16000)\n\ndef make_socket(destination_address='localhost',port=12000):\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = (destination_address, port)\n logging.warning(f\"connecting to {server_address}\")\n sock.connect(server_address)\n return sock\n except Exception as ee:\n logging.warning(f\"error {str(ee)}\")\n\ndef make_secure_socket(destination_address='localhost',port=10000):\n try:\n #get it from https://curl.se/docs/caextract.html\n\n context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n context.verify_mode=ssl.CERT_OPTIONAL\n context.load_verify_locations(os.getcwd() + '/domain.crt')\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = (destination_address, port)\n logging.warning(f\"connecting to {server_address}\")\n sock.connect(server_address)\n secure_socket = context.wrap_socket(sock,server_hostname=destination_address)\n logging.warning(secure_socket.getpeercert())\n return secure_socket\n except Exception as ee:\n logging.warning(f\"error {str(ee)}\")\n\ndef deserialisasi(s):\n logging.warning(f\"deserialisasi {s.strip()}\")\n return json.loads(s)\n \n\ndef send_command(command_str,is_secure=False):\n alamat_server = server_address[0]\n port_server = server_address[1]\n# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# gunakan fungsi diatas\n if is_secure == True:\n sock = make_secure_socket(alamat_server,port_server)\n else:\n sock = make_socket(alamat_server,port_server)\n\n logging.warning(f\"connecting to {server_address}\")\n try:\n logging.warning(f\"sending message \")\n sock.sendall(command_str.encode())\n # Look for the response, waiting until socket is done (no more data)\n data_received=\"\" #empty string\n while True:\n #socket does not receive all data at once, data comes in part, need to be concatenated at the end of process\n data = sock.recv(16)\n if data:\n #data is not empty, concat with previous content\n data_received += data.decode()\n if \"\\r\\n\\r\\n\" in data_received:\n break\n else:\n # no more data, stop the process by break\n break\n # at this point, data_received (string) will contain all data coming from the socket\n # to be able to use the data_received as a dict, need to load it using json.loads()\n hasil = deserialisasi(data_received)\n logging.warning(\"data received from server:\")\n return hasil\n except Exception as ee:\n logging.warning(f\"error during data receiving {str(ee)}\")\n return False\n\n\n\ndef getdatapemain(nomor=0,is_secure=False):\n cmd=f\"getdatapemain {nomor}\\r\\n\\r\\n\"\n hasil = send_command(cmd,is_secure=is_secure)\n return hasil\n\ndef lihatversi(is_secure=False):\n cmd=f\"versi \\r\\n\\r\\n\"\n hasil = send_command(cmd,is_secure=is_secure)\n return hasil\n \n\n\nif __name__=='__main__':\n h = lihatversi(is_secure=True)\n if (h):\n print(h)\n\n h = getdatapemain(1,is_secure=True)\n if (h):\n print(h['nama'],h['nomor'])\n else:\n print(\"kegagalan pada data transfer\")\n\n h = getdatapemain(2,is_secure=True)\n if (h):\n print(h['nama'],h['nomor'])\n else:\n print(\"kegagalan pada data transfer\")\n\n h = getdatapemain(3,is_secure=True)\n if (h):\n print(h['nama'],h['nomor'])\n else:\n print(\"kegagalan pada data transfer\")\n\n h = getdatapemain(4,is_secure=True)\n if (h):\n print(h['nama'],h['nomor'])\n else:\n print(\"kegagalan pada data transfer\")\n","sub_path":"progjar1a/client_side/tcp_client.py","file_name":"tcp_client.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"631443279","text":"from keras.models import load_model\nimport pickle\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.backend import clear_session\nfrom gensim.utils import simple_preprocess\nimport nltk\nimport os\nfrom nltk.corpus import wordnet\nimport pandas as pd\nfrom collections import Counter\nfrom googletrans import Translator\n#nltk.download('stopwords')\nstop_words = set(nltk.corpus.stopwords.words('english'))\nstop_words_spanish = set(nltk.corpus.stopwords.words('spanish'))\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\nfrom sklearn.metrics import confusion_matrix\n\n\n\n\nallowed_tags = ['JJ', 'JJR', 'JJS', # adjectives\n 'NN', 'NNS', # nouns\n 'RB', 'RBR', 'RBS', # adverbs\n 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ' # verbs\n ]\n\ndef clean_reviews_english(review):\n clean_tokens = [word for word in simple_preprocess(review) if word not in stop_words]\n clean_tokens = [token for token, pos in nltk.pos_tag(clean_tokens) if pos in allowed_tags]\n return ' '.join(clean_tokens)\n\ndef clean_reviews_spanish(review):\n clean_tokens = [word for word in simple_preprocess(review) if word not in stop_words_spanish]\n clean_tokens = [token for token, pos in nltk.pos_tag(clean_tokens) if pos in allowed_tags]\n sentence = ' '.join(clean_tokens)\n translator = Translator()\n sentence = translator.translate(sentence).text\n return sentence\n\n# Model Used\n # Sequential()\n # Embedding()\n # Bidirectional(LSTM(100))\n # Dropout(0.5)\n # Dense(50)\n # Dopout(0.2)\n # Dense(1)\n\ndef load_model_required():\n with open(os.path.join(os.path.dirname(__file__), 'tokenizer.pickle'), 'rb') as handle:\n tokenizer = pickle.load(handle)\n model = load_model(os.path.join(os.path.dirname(__file__), 'model_bilstm.h5'))\n\n return model, tokenizer\n\ndef extract_info(review, product_name, language):\n allowed_tags = ['NN', 'NNS'] #nouns\n product_name_tokens = [element.lower() for element in product_name.split()]\n product_name_tokens.append('stars')\n\n for word in product_name_tokens:\n for syn in wordnet.synsets(word):\n stop_words.add(syn)\n for lm in syn.lemmas():\n stop_words.add(lm.name())\n stop_words.add(word)\n words_to_remove=['love', 'life', 'non', 'nothing', 'reason',\n 'day', 'days', 'months', 'connects', 'people', 'person',\n 'customer', 'amazon', 'center', 'clothes', 'point', 'hope',\n 'minutes', 'service']\n for i in words_to_remove:\n stop_words.add(i)\n clean_tokens = [word for word in simple_preprocess(review) if word not in stop_words]\n clean_tokens =list(set(clean_tokens))\n sentence = ' '.join([token for token, pos in nltk.pos_tag(clean_tokens) if pos in allowed_tags])\n if language is 'es':\n translator = Translator()\n sentence = translator.translate(sentence, src='en', dest='es').text\n return sentence\n\ndef predict_star(value):\n if value < 0.2: star = 1\n else:\n if value >= 0.2 and value < 0.4: star = 2\n else:\n if value >= 0.4 and value < 0.6: star = 3\n else:\n if value >= 0.6 and value < 0.8: star = 4\n else: star = 5\n return star\n\ndef sa_amazon(df, product_name, language):\n model, tokenizer = load_model_required()\n print(\"Model Loaded\")\n if language is 'es':\n df['Clean'] = df['Review'].apply(clean_reviews_spanish)\n else:\n df['Clean'] = df['Review'].apply(clean_reviews_english)\n X_test = pad_sequences(tokenizer.texts_to_sequences(df['Clean']), maxlen=30, padding='post')\n df['Prediction'] = model.predict(X_test)\n df['Prediction'] = df['Prediction'].apply(lambda x: round(x, 3))\n df['Predicted Star'] = df['Prediction'].apply(predict_star)\n df['Keywords'] = df['Clean'].apply(extract_info, product_name=product_name, language=language)\n clear_session()\n return df\n\ndef pros_cons(df):\n pros_cons = {}\n num_reviews = len(df)\n for keywords, prediction in zip(df['Keywords'], df['Prediction']):\n for word in keywords.split():\n if word in pros_cons:\n if float(prediction) < 0.5: pros_cons[word] = pros_cons[word] -1\n else: pros_cons[word] = pros_cons[word] + 1\n else:\n if float(prediction) < 0.5: pros_cons[word] = -1\n else: pros_cons[word] = + 1\n pros_cons = sorted((value, key) for (key,value) in pros_cons.items())\n cons = []\n cons_value = []\n pros = []\n pros_value = []\n top = 5\n threshold = 0.05\n for element in range(0,top-1):\n value = pros_cons[element][0]/num_reviews\n if value < threshold:\n cons.append(pros_cons[element][1])\n cons_value.append(value)\n for element in range(1,top):\n value = pros_cons[-1 * element][0]/num_reviews\n if value > threshold:\n pros.append(pros_cons[-1*element][1])\n pros_value.append(value)\n\n return pd.DataFrame({'Pros': pros,\n 'Pros_Value': pros_value}),\\\n pd.DataFrame({'Cons': cons,\n 'Cons_Value': cons_value})\n\n\n\n\ndef polar_count(df):\n num_reviews = len(df)\n stars, predicted = [],[]\n my_dict = Counter(df['Stars'])\n\n counter = 1\n for key in sorted(my_dict):\n\n while counter != key:\n stars.append(0)\n counter += 1\n\n stars.append(my_dict[key] / num_reviews)\n counter += 1\n\n my_dict = Counter(df['Predicted Star'])\n\n counter = 1\n for key in sorted(my_dict):\n while counter != key:\n predicted.append(0)\n counter += 1\n predicted.append(my_dict[key] / num_reviews)\n counter += 1\n\n return stars, predicted\n\ndef get_confusion_matrix(df):\n\n conf = confusion_matrix(df['Stars'], df['Predicted Star'])\n conf = conf.tolist()\n conf.insert(0, ['1 Star', '2 Stars', '3 Stars', '4 Stars', '5 Stars'])\n\n return conf\n\ndef convert_to_bin(num):\n\n if num > 3: return 1\n if num < 3: return -1\n else: return 0\n\ndef get_confusion_matrix_bin(df):\n\n aux = df[['Stars', 'Predicted Star']]\n aux['Stars'] = aux['Stars'].apply(convert_to_bin)\n aux['Predicted Star'] = aux['Predicted Star'].apply(convert_to_bin)\n conf = confusion_matrix(aux['Stars'], aux['Predicted Star'])\n conf = conf.tolist()\n conf.insert(0, ['Negative', 'Neutral', 'Positive'])\n\n return conf\n\n","sub_path":"web_app/sentiment_analysis.py","file_name":"sentiment_analysis.py","file_ext":"py","file_size_in_byte":6520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"232507415","text":"from argparse import ArgumentParser\nimport tensorflow as tf\nfrom archs import P_Net, losses, O_Net, R_Net\nimport time\n# from math import pow\nimport os\nfrom random import shuffle\nimport numpy as np\nfrom PIL import Image\nimport math\nimport matplotlib.pyplot as plt\nfrom generator import generator_img_region\n\n\ndef initialize_uninitialized(sess):\n global_vars = tf.global_variables()\n is_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars])\n not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]\n if len(not_initialized_vars):\n sess.run(tf.variables_initializer(not_initialized_vars))\ndef get_by_indx(imgs, labs, com_labs, ori_imgs, ori_labels, crops, loss_sorted):\n n_imgs = []\n n_labs = []\n n_com_labs = []\n n_ori_imgs = []\n n_ori_labels = []\n n_crops = []\n for u in loss_sorted:\n n_imgs.append(imgs[u])\n n_labs.append(labs[u])\n n_com_labs.append(com_labs[u])\n n_ori_imgs.append(ori_imgs[u])\n n_ori_labels.append(ori_labels[u])\n n_crops.append(crops[u])\n return imgs, \\\n labs, \\\n com_labs, \\\n ori_imgs, \\\n ori_labels, \\\n crops\n\n\nbatch = 30\nepoch = 1000000\nlear = 1e-4\nlss = []\n\nparser = ArgumentParser()\nparser.add_argument(\"-S\", \"--save-log\", help=\"save to train_log\", dest=\"save_log\", default=\"3\")\nparser.add_argument(\"-G\", \"--gpu-memory\", help=\"gpu memary used\", type=float, dest=\"gpu_memory\", default=\"0.4\")\nparser.add_argument(\"-P\", \"--part-train\", help=\"which part to train \", type=str, dest=\"part_train\", default=\"O\")\n\nargs = parser.parse_args()\n\n\nsave_log = os.path.join('train_log',\n args.save_log)\n\nif not os.path.isdir(save_log):\n os.mkdir(save_log)\n\n# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory)\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\n\noutput_num = 10\n\nwith tf.Session(config=config) as sess:\n if args.part_train == 'P':\n net = P_Net.P_Net()\n loss_rate = [1, 0.5, 0.5]\n image_size = 12\n dy = [5, 13, 20]\n\n elif args.part_train == 'R':\n net = R_Net.R_Net()\n loss_rate = [1, 0.5, 0.5]\n image_size = 24\n dy = [5, 13, 20]\n\n elif args.part_train == 'O':\n net = O_Net.O_Net()\n loss_rate = [1, 0.5, 1]\n image_size = 48\n dy = [10, 22, 40]\n\n ge = generator_img_region.generator(json_path='/data/train_label.json',\n image_path='/data/img_pyramids/',\n image_size=image_size,\n batch=int(batch / 3),\n image_from_each_face=3,\n dy=dy)\n\n img = tf.placeholder(tf.float32,\n [None, None, None, 3])\n lab = tf.placeholder(tf.float32,\n [None, output_num])\n com_lab = tf.placeholder(tf.float32,\n [None, output_num])\n lr = tf.placeholder(tf.float32,\n [None])\n\n n_output = net.pr(img)\n output = tf.reshape(n_output, [-1, 10])\n cls = output[:, :2]\n bbox = output[:, 2:6]\n eye_reg = output[:, 6:]\n\n lab_cls = lab[:, :2]\n lab_bbox = lab[:, 2:6]\n lab_eye_reg = lab[:, 6:]\n\n com_lab_cls = com_lab[:, :2]\n com_lab_bbox = com_lab[:, 2:6]\n com_lab_eye_reg = com_lab[:, 6:]\n\n cls_loss = losses.cross_entropy_loss(cls, lab_cls)\n bbox_loss = losses.Euclidean_loss(bbox, lab_bbox)*com_lab_bbox\n bbox_loss = tf.squeeze(bbox_loss)\n eye_reg_loss = losses.Euclidean_loss(eye_reg, lab_eye_reg)*com_lab_eye_reg\n eye_reg_loss = tf.squeeze(eye_reg_loss)\n\n loss = loss_rate[0] * cls_loss + \\\n loss_rate[1] * tf.reduce_sum(bbox_loss, 1) + \\\n loss_rate[2] * tf.reduce_sum(eye_reg_loss, 1)\n t_loss = loss\n # loss = 0.5 * tf.reduce_sum(bbox_loss, 3) + 0.5 * tf.reduce_sum(eye_reg_loss, 3)\n\n train_step = tf.train.MomentumOptimizer(lr[0],\n 0.9). \\\n minimize(t_loss)\n\n model_af = tf.train.Saver()\n initialize_uninitialized(sess)\n\n print('begin: ')\n for seq in range(epoch):\n\n if (seq + 1) % 2000 == 0:\n lear *= 0.5\n print(lear)\n\n begin_time = time.time()\n\n imgs, \\\n labs, \\\n com_labs, \\\n ori_imgs, \\\n ori_labels, \\\n crops \\\n = ge.__next__(image_size)\n\n total_loss = sess.run(loss,\n feed_dict={img: imgs,\n lab: labs,\n com_lab: com_labs,\n lr: [lear]})\n\n loss_sorted = sorted(range(len(total_loss.tolist())), key=lambda k: total_loss[k])\n loss_sorted = loss_sorted[:int(len(total_loss)*0.7)]\n\n imgs, \\\n labs, \\\n com_labs, \\\n ori_imgs, \\\n ori_labels, \\\n crops = \\\n get_by_indx(imgs,\n labs,\n com_labs,\n ori_imgs,\n ori_labels,\n crops,\n loss_sorted)\n\n sess.run(train_step,\n feed_dict={img: imgs,\n lab: labs,\n com_lab: com_labs,\n lr: [lear]})\n\n if seq % 5 == 0:\n\n print('\\nSequence:', str(seq))\n\n [ls_t,\n out] = sess.run([t_loss,\n output],\n feed_dict={img: imgs,\n lab: labs,\n com_lab: com_labs,\n lr: [lear]})\n ls_t = np.sum(ls_t)\n lss.append(ls_t)\n if len(lss) > 1e4:\n lss.remove(lss[0])\n\n plt.plot(range(len(lss)), lss)\n feed_back_folder = os.path.join(save_log, 'feed_back')\n\n if not os.path.isdir(feed_back_folder):\n os.mkdir(feed_back_folder)\n plt.savefig(os.path.join(feed_back_folder,\n 'l' + str(int(seq / 5e4)) + '.png'))\n #plt.show()\n plt.clf()\n\n avg_loss = sum([0.9 * \\\n math.pow(0.1,\n len(lss) - 1 - lsins) \\\n * ls \\\n for lsins, ls in enumerate(lss)])\n\n print('spand time: {0:.3f}, loss: {1:.3f}'. \\\n format(time.time() - begin_time\n , avg_loss\n ))\n\n if (seq + 1) % 1000 == 0:\n save_folder = os.path.join(save_log, 'models')\n\n if not os.path.isdir(save_folder):\n os.mkdir(save_folder)\n\n model_af.save(sess,\n os.path.join(save_folder,\n str(seq + 1) + 'save_net.ckpt'))\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"403653814","text":"# coding: utf-8\nimport argparse\nimport json\nimport math\nfrom os import path\n\nimport torch\nfrom logzero import logger\nfrom tqdm import tqdm\n\nfrom batchiterator import NtcBucketIterator\nfrom evaluation import evaluate_joint_softmax_multiclass_without_none\nfrom models import JointSoftmaxE2EStackedBiRNN\nfrom utils import load_dataset, pretrained_word_vecs\n\nBERT_DIM = 768\n\n\ndef create_arg_parser():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--test_data', type=path.abspath, default=None)\n parser.add_argument('--test_bert_embed_file', type=path.abspath, default=None, help=\"hdf5 file\")\n parser.add_argument('--model_file', type=str)\n parser.add_argument('--tag', type=str, default=\"dev\",\n help='type of evaluation data split')\n parser.add_argument('--thres', action='store_true')\n parser.add_argument('--scores', action='store_true')\n\n return parser\n\n\ndef decode(out_dir, data, tag, model, model_id, thres):\n print('# Decode')\n file = open(out_dir + \"/predict-\" + tag + '-' + model_id + \".txt\", \"w\")\n data.create_batches()\n model.eval()\n for xss, yss in tqdm(data, mininterval=5):\n out_each_word, out_all_words = model(xss)\n\n # for pred_no in range(yss.size()[0]):\n for pred_no in range(len(yss)):\n predict = out_each_word[pred_no].cpu()\n predict = torch.pow(torch.zeros(predict.size()) + math.e, predict.data)\n\n # add\n p_id, sent_id, doc_name = yss[pred_no]\n out_dict = {\"pred\": p_id, \"sent\": sent_id, \"file\": doc_name}\n for label_idx, label in enumerate([\"ga\", \"o\", \"ni\"]):\n max_idx = torch.argmax(predict[:, label_idx])\n max_score = predict[max_idx][label_idx] - thres[label_idx]\n if max_score >= 0:\n out_dict[label] = int(max_idx)\n out_dict = json.dumps(out_dict)\n print(out_dict, file=file)\n\n\ndef decode_scores(out_dir, data, tag, model, model_id):\n logger.info('# Decode Scores')\n file = open(out_dir + \"/score-\" + tag + '-' + model_id + \".txt\", \"w\")\n data.create_batches()\n model.eval()\n for xss, yss in tqdm(data, mininterval=5):\n out_each_word, out_all_words = model(xss)\n\n for pred_no in range(len(yss)):\n predict = out_each_word[pred_no].cpu()\n predict = torch.pow(torch.zeros(predict.size()) + math.e, predict.data)\n p_id, sent_id, doc_name = yss[pred_no]\n out_dict = {\"pred\": p_id, \"sent\": sent_id, \"file\": doc_name, \"scores\": predict.tolist()}\n out_dict = json.dumps(out_dict)\n print(out_dict, file=file)\n\n\ndef calculate_train_threshold(train_args, model):\n logger.info(\"Train args:\")\n for k, v in train_args.items():\n logger.info(\"\\t{}: {}\".format(k, v))\n\n # Load Dataset\n data_train = load_dataset(train_args[\"train\"], train_args[\"data_size\"])\n if train_args[\"train_method\"] == \"concat\":\n data_pseudo = load_dataset(train_args[\"pseudo\"], train_args[\"data_size\"]) if train_args[\"pseudo\"] else []\n data_train += data_pseudo\n\n data_train = NtcBucketIterator(data_train, train_args[\"batch_size\"],\n multi_predicate=train_args[\"multi_predicate\"],\n bert=train_args[\"bert\"],\n load_cpu=train_args.get(\"load_cpu\"),\n bert_embed_file=train_args.get(\"train_bert_embed_file\"),\n joint_softmax=True)\n\n thres_set_ga = list(map(lambda n: n / 100.0, list(range(10, 71, 1))))\n thres_set_wo = list(map(lambda n: n / 100.0, list(range(20, 86, 1))))\n thres_set_ni = list(map(lambda n: n / 100.0, list(range(0, 61, 1))))\n thres_lists = [thres_set_ga, thres_set_wo, thres_set_ni]\n labels = [\"ga\", \"wo\", \"ni\", \"all\"]\n\n data_train.create_batches()\n model.eval()\n thres, *_ = evaluate_joint_softmax_multiclass_without_none(model, data_train, len(data_train),\n labels, thres_lists, logger)\n logger.info(\"Best threshold: {}\".format(thres))\n\n return thres\n\n\ndef main():\n parser = create_arg_parser()\n args = parser.parse_args()\n\n out_dir = path.dirname(args.model_file)\n model_id = path.basename(out_dir)\n with open(path.join(out_dir, \"args.json\")) as fi:\n train_args = json.load(fi)\n\n word_embedding_matrix = pretrained_word_vecs(train_args[\"wiki_embed_dir\"], \"/wordIndex.txt\") if train_args[\n \"wiki\"] else None\n\n model = JointSoftmaxE2EStackedBiRNN(hidden_dim=train_args[\"hidden_dim\"],\n n_layers=train_args[\"n_layers\"],\n out_dim=4,\n embedding_matrix=word_embedding_matrix,\n fixed_word_vec=train_args[\"fixed_word_vec\"],\n multi_predicate=train_args[\"multi_predicate\"],\n use_wiki_vec=train_args[\"wiki\"],\n use_bert_vec=train_args[\"bert\"],\n bert_dim=BERT_DIM,\n dev_bert_embed_file=args.test_bert_embed_file,\n load_cpu=train_args[\"load_cpu\"],\n dropout=train_args[\"dropout\"])\n\n model.load_state_dict(torch.load(args.model_file))\n\n if torch.cuda.is_available():\n model = model.cuda()\n\n data = load_dataset(args.test_data, 100)\n data = NtcBucketIterator(data, train_args[\"batch_size\"], multi_predicate=train_args[\"multi_predicate\"],\n bert=train_args[\"bert\"], decode=True,\n load_cpu=train_args[\"load_cpu\"], bert_embed_file=args.test_bert_embed_file)\n\n if args.scores:\n decode_scores(out_dir, data, args.tag, model, model_id)\n else:\n if args.thres:\n fn = path.join(path.dirname(args.model_file), \"best.thresh\")\n with open(fn) as fi:\n threshold = json.load(fi)\n logger.info(\"Loaded Threshold: {}\".format(threshold))\n else:\n threshold = calculate_train_threshold(train_args, model)\n\n with open(path.join(out_dir, \"test.thresh\"), \"w\") as fo:\n json.dump(threshold, fo)\n\n new_model_id = model_id + \"-\" + \"-\".join(str(i) for i in threshold)\n print(new_model_id)\n\n decode(out_dir, data, args.tag, model, new_model_id, threshold)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/jp_pas/decode_joint_softmax.py","file_name":"decode_joint_softmax.py","file_ext":"py","file_size_in_byte":6683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"21850892","text":"# Copyright (c) 2015 by Farsight Security, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nfrom cStringIO import StringIO\nimport email.utils\nimport httplib\nimport logging\nimport mimetypes\nimport os\nimport pipes\nimport subprocess\nimport tempfile\nimport urllib\nimport urllib2\n\nlogger = logging.getLogger(__name__)\n\nclass RsyncHandler(urllib2.BaseHandler):\n def __init__(self, rsync_path='rsync', rsync_rsh=None, tmpdir=None):\n self.rsync_path = rsync_path\n self.rsync_rsh = rsync_rsh\n self.tmpdir = tmpdir\n\n def rsync_rsh_open(self, req):\n logger.debug('Opening rsync+rsh')\n host = req.get_host()\n if not host:\n raise urllib2.URLError('rsync+ssh error: not host given')\n if ':' in host:\n raise urllib2.URLError('rsync+ssh error: \\':\\' character not supported in host')\n\n path, attrs = urllib.splitattr(req.get_selector())\n if not path:\n raise urllib2.URLError('rsync+ssh error: no path given')\n\n source = '{}:{}'.format(host, path)\n return self.do_rsync(source, attrs=attrs)\n\n def rsync_open(self, req):\n logger.debug('Opening rsync')\n source, attrs = urllib.splitattr(req.get_full_url())\n return self.do_rsync(source, attrs=attrs)\n\n def do_rsync(self, source, attrs=[]):\n options = dict()\n for attr in attrs:\n k,_,v = attr.partition('=')\n options[k] = v\n options.setdefault('rsync_path', self.rsync_path)\n options.setdefault('rsync_rsh', self.rsync_rsh)\n\n cmd_args = [options['rsync_path'], '-t', '--whole-file']\n\n if options['rsync_rsh']:\n cmd_args.extend(('-e', options['rsync_rsh']))\n\n fn = source.rpartition('/')[2]\n tf = tempfile.mktemp(prefix='rsync--{}.'.format(fn), dir=self.tmpdir)\n\n cmd_args.extend((source, tf))\n logger.debug('Callling {}'.format(' '.join(map(pipes.quote, cmd_args))))\n\n stderr = tempfile.TemporaryFile(dir=self.tmpdir)\n try:\n subprocess.check_call(cmd_args, stderr=stderr)\n\n tf_stat = os.stat(tf)\n fp = open(tf)\n except subprocess.CalledProcessError:\n stderr.seek(0)\n raise urllib2.URLError('rsync error: {}'.format(stderr.read()))\n finally:\n try:\n os.unlink(tf)\n except OSError as e:\n logger.error('Error unlinking {}: {}'.format(tf, e))\n\n headers = StringIO()\n mtype = mimetypes.guess_type(source)[0]\n if mtype:\n print ('Content-type: {}'.format(mtype), file=headers)\n logger.debug('Content-type: {}'.format(mtype))\n\n print ('Content-length: {:0d}'.format(tf_stat.st_size), file=headers)\n logger.debug('Content-length: {:0d}'.format(tf_stat.st_size))\n\n print ('Last-modified: {}'.format(email.utils.formatdate(tf_stat.st_mtime, usegmt=True)), file=headers)\n logger.debug('Last-modified: {}'.format(email.utils.formatdate(tf_stat.st_mtime, usegmt=True)))\n\n headers.seek(0)\n msg = httplib.HTTPMessage(fp=headers, seekable=True)\n\n return urllib.addinfourl(fp, msg, source)\n\n handler_order = urllib2.UnknownHandler.handler_order - 1\nsetattr(RsyncHandler, 'rsync+rsh_open', RsyncHandler.rsync_rsh_open)\n\ndef install(*args, **kwargs):\n logger.debug('Installing RsyncHandler')\n opener = urllib2.build_opener(RsyncHandler(*args, **kwargs))\n urllib2.install_opener(opener)\n\ninstall()\n","sub_path":"dnstable_manager/rsync.py","file_name":"rsync.py","file_ext":"py","file_size_in_byte":4028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"567056691","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 26 08:22:43 2020\n\n@author: 33664\n\"\"\"\nfrom tkinter import *\nimport numpy as np\nimport pickle\n\nclass Vect4D:\n def __init__(self,x,y,z,t):\n self.x = x\n self.y = y\n self.z = z\n self.t = t\n def __setitem__(self,i,valeur):\n assert i in range(4)\n if(i == 0):\n self.x = valeur\n elif(i == 1):\n self.y = valeur\n elif(i == 2):\n self.z = valeur\n else:\n self.t = valeur\n def __getitem__(self,i):\n assert i in range(4)\n if(i == 0):\n return self.x\n elif(i == 1):\n return self.y\n elif(i == 2):\n return self.z\n else:\n return self.t\n def __str__(self):\n return '('+str(self.x)+', '+str(self.y)+', '+str(self.z)+', '+str(self.t)+')'\n def module(self):\n return 'module: '+ ( (self.x)**2 + (self.y)**2 + (self.z)**2 + (self.t)**2 )**0.5\n def __add__(self,v):\n return Vect4D(self.x + v.x, self.y + v.y, self.z + v.z, self.t + v.t)\n def __sub__(self,v):\n return Vect4D(self.x - v.x, self.y - v.y, self.z - v.z, self.t - v.t) \n def __bool__(self,v):\n res = False\n if(self.x == v.x and self.y == v.y and self.z == v.z and self.t == v.t):\n res = True\n return res\n def __mul__(self, v):\n if(type(v) == float or type(v) == int):\n return Vect4D(self.x * v, self.y * v,self.z * v, self.t * v)\n elif(type(v) == Vect4D):\n return self.x * v.x +self.y * v.y + self.z * v.z + self.t * v.t\n else:\n return 'erreur'\n \n\n\nclass Mat4D:\n def __init__(self,v1 = None,v2 = None, v3 = None, v4 = None):\n if(v1 == None and v2 == None and v3 == None and v4 == None):\n input(\"création matrice: \\n\")\n v1_0 = int(input(\"v1[0]: \"))\n v1_1 = int(input(\"v1[1]: \"))\n v1_2 = int(input(\"v1[2]: \"))\n v1_3 = int(input(\"v1[3]: \"))\n v1 = Vect4D(v1_0,v1_1,v1_2,v1_3)\n v2_0 = int(input(\"v2[0]: \"))\n v2_1 = int(input(\"v2[1]: \"))\n v2_2 = int(input(\"v2[2]: \"))\n v2_3 = int(input(\"v2[3]: \"))\n v2 = Vect4D(v2_0,v2_1,v2_2,v2_3)\n v3_0 = int(input(\"v3[0]: \"))\n v3_1 = int(input(\"v3[1]: \"))\n v3_2 = int(input(\"v3[2]: \"))\n v3_3 = int(input(\"v3[3]: \"))\n v3 = Vect4D(v3_0,v3_1,v3_2,v3_3)\n v4_0 = int(input(\"v4[0]: \"))\n v4_1 = int(input(\"v4[1]: \"))\n v4_2 = int(input(\"v4[2]: \"))\n v4_3 = int(input(\"v4[3]: \"))\n v4 = Vect4D(v4_0,v4_1,v4_2,v4_3)\n self.v1 = v1\n self.v2 = v2\n self.v3 = v3\n self.v4 = v4\n \n def __str__(self):\n mat = \"\"\n mat = mat + \"(\" + str(self.v1.x) + \" \" + str(self.v2.x) + \" \" + str(self.v3.x) + \" \" + str(self.v4.x) + \")\" + \"\\n\"\n mat = mat + \"(\" + str(self.v1.y)+ \" \" + str(self.v2.y) + \" \" + str(self.v3.y) + \" \" + str(self.v4.y) + \")\" + \"\\n\"\n mat = mat + \"(\" + str(self.v1.z) + \" \" + str(self.v2.z) + \" \" + str(self.v3.z) + \" \" + str(self.v4.z) + \")\" + \"\\n\"\n mat = mat + \"(\" + str(self.v1.t) + \" \" + str(self.v2.t) + \" \" + str(self.v3.t) + \" \" + str(self.v4.t) + \")\" + \"\\n\"\n return mat\n def __add__(self,m):\n return Mat4D(self.v1 + m.v1 , self.v2 + m.v2, self.v3 + m.v3 , self.v4 + m.v4)\n def __sub__(self,m):\n assert type(m) == Mat4D\n return Mat4D(self.v1 - m.v1, self.v2 - m.v2, self.v3 - m.v3, self.v4 - m.v4) \n def __bool__(self,m):\n assert type(m) == Mat4D\n res = False\n if(self.v1 == m.v1 and self.v2 == m.v2 and self.v3 == m.v3 and self.v4 == m.v4):\n res = True\n return res\n def __mul__(self, m):\n if(type(m) == float or type(m) == int):\n return Mat4D(self.v1 * m, self.v2 * m, self.v3 * m, self.v4 * m)\n elif(type(m) == Vect4D):\n v_1 = Vect4D(self.v1.x, self.v2.x, self.v3.x, self.v4.x) #on crée des vecteurs lignes pour faciliter la multiplication car les vecteurs de Vect4D des vecteurs colonnes\n v_2 = Vect4D(self.v1.y, self.v2.y, self.v3.y, self.v4.y)\n v_3 = Vect4D(self.v1.z, self.v2.z, self.v3.z, self.v4.z)\n v_4 = Vect4D(self.v1.t, self.v2.t, self.v3.t, self.v4.t)\n return Vect4D(m * v_1, m * v_2, m * v_3, m * v_4)\n elif(type(m) == Mat4D):\n l1c1 = self.v1.x * m.v1.x + self.v2.x * m.v1.y + self.v3.x * m.v1.z + self.v4.x * m.v1.t\n l1c2 = self.v1.x * m.v2.x + self.v2.x * m.v2.y + self.v3.x * m.v2.z + self.v4.x * m.v2.t\n l1c3 = self.v1.x * m.v3.x + self.v2.x * m.v3.y + self.v3.x * m.v3.z + self.v4.x * m.v3.t\n l1c4 = self.v1.x * m.v4.x + self.v2.x * m.v4.y + self.v3.x * m.v4.z + self.v4.x * m.v4.t\n l2c1 = self.v1.y * m.v1.x + self.v2.y * m.v1.y + self.v3.y * m.v1.z + self.v4.y * m.v1.t\n l2c2 = self.v1.y * m.v2.x + self.v2.y * m.v2.y + self.v3.y * m.v2.z + self.v4.y * m.v2.t\n l2c3 = self.v1.y * m.v3.x + self.v2.y * m.v3.y + self.v3.y * m.v3.z + self.v4.y * m.v3.t\n l2c4 = self.v1.y * m.v4.x + self.v2.y * m.v4.y + self.v3.y * m.v4.z + self.v4.y * m.v4.t\n l3c1 = self.v1.z * m.v1.x + self.v2.z * m.v1.y + self.v3.z * m.v1.z + self.v4.z * m.v1.t\n l3c2 = self.v1.z * m.v2.x + self.v2.z * m.v2.y + self.v3.z * m.v2.z + self.v4.z * m.v2.t\n l3c3 = self.v1.z * m.v3.x + self.v2.z * m.v3.y + self.v3.z * m.v3.z + self.v4.z * m.v3.t\n l3c4 = self.v1.z * m.v4.x + self.v2.z * m.v4.y + self.v3.z * m.v4.z + self.v4.z * m.v4.t\n l4c1 = self.v1.t * m.v1.x + self.v2.t * m.v1.y + self.v3.t * m.v1.z + self.v4.t * m.v1.t\n l4c2 = self.v1.t * m.v2.x + self.v2.t * m.v2.y + self.v3.t * m.v2.z + self.v4.t * m.v2.t\n l4c3 = self.v1.t * m.v3.x + self.v2.t * m.v3.y + self.v3.t * m.v3.z + self.v4.t * m.v3.t\n l4c4 = self.v1.t * m.v4.x + self.v2.t * m.v4.y + self.v3.t * m.v4.z + self.v4.t * m.v4.t\n v1 = Vect4D(l1c1, l2c1, l3c1, l4c1) #Vecteur colonne 1\n v2 = Vect4D(l1c2, l2c2, l3c2, l4c2) #Vecteur colonne 2\n v3 = Vect4D(l1c3, l2c3, l3c3, l4c3) #Vecteur colonne 3\n v4 = Vect4D(l1c4, l2c4, l3c4, l4c4) #Vecteur colonne 4 \n return Mat4D(v1,v2,v3,v4)# \n else:\n return 'erreur'\n def __setitem__(self,i,j,valeur):\n assert i in range(4)\n assert j in range(4)\n if(i == 0):\n self.v1[j] = valeur\n elif(i == 1):\n self.v2[j] = valeur\n elif(i == 2):\n self.v3[i] = valeur\n elif(i == 3):\n self.v3[j] = valeur\n def __getitem__(self,i,j):\n assert i in range(4)\n assert j in range(4)\n if(i == 0):\n return self.v1[j]\n elif(i == 1):\n return self.v2[j]\n elif(i == 2):\n return self.v3[j]\n else:\n return self.v4[j]\n \n \ndef Id4():\n return Mat4D( (1,0,0,0) , (0,1,0,0) , (0,0,1,0) , (0,0,0,1) )\n#symétrie\ndef SymX():\n return Mat4D(Vect4D(-1, 0, 0, 0), Vect4D(0, 1, 0, 0), Vect4D(0, 0, 1, 0), Vect4D(0, 0, 0, 1))\ndef SymY():\n return Mat4D(Vect4D(1, 0, 0, 0), Vect4D(0, -1, 0, 0), Vect4D(0, 0, 1, 0), Vect4D(0, 0, 0, 1))\ndef SymZ():\n return Mat4D(Vect4D(1, 0, 0, 0), Vect4D(0, 1, 0, 0), Vect4D(0, 0, -1, 0), Vect4D(0, 0, 0, 1))\n#translation\ndef TransX(a):\n return Mat4D(Vect4D(1, 0, 0, 0), Vect4D(0, 1, 0, 0), Vect4D(0, 0, 1, 0), Vect4D(a, 0, 0, 1))\ndef TransY(a):\n return Mat4D(Vect4D(1, 0, 0, 0), Vect4D(0, 1, 0, 0), Vect4D(0, 0, 1, 0), Vect4D(0, a, 0, 1))\ndef TransZ(a):\n return Mat4D(Vect4D(1, 0, 0, 0), Vect4D(0, 1, 0, 0), Vect4D(0, 0, 1, 0), Vect4D(0, 0, a, 1))\n#rotation\ndef RotX(theta):\n return Mat4D(Vect4D(1, 0, 0, 0), Vect4D(0, np.cos(theta), -np.sin(theta), 0), Vect4D(0, np.sin(theta), np.cos(theta), 0), Vect4D(0, 0, 0, 1))\ndef RotY(theta):\n return Mat4D(Vect4D(np.cos(theta), 0, -np.sin(theta), 0), Vect4D(0, 1, 0, 0), Vect4D(np.sin(theta), 0, np.cos(theta), 0), Vect4D(0, 0, 0, 1))\ndef RotZ(theta):\n return Mat4D(Vect4D(np.cos(theta), -np.sin(theta), 0, 0), Vect4D(np.sin(theta), np.cos(theta), 0, 0), Vect4D(0, 0, 1, 0), Vect4D(0, 0, 0, 1))\n \n\n \n\n\n#if __name__ == __main__:\n# Id4()\nwindow = Tk()\nwindow.title(\"Transformation d'une matrice\") \nwindow.geometry(\"1080x720\")\nwindow.config(background = 'beige')\n\n\n\n## FRAME QUI CONTIENT LE TITRE\nframe_text = Frame(window,bg='beige',bd=1,relief=SUNKEN)\nframe_text.pack(padx=40,fill=X)\n\n\n# titre de la fenêtre\nlabel_title = Label(frame_text,text=\"Transformation d'une matrice\", font=('Arial',35),bg='beige',fg='black')\nlabel_title.pack(pady=5)\n\n\n\nlabel_coord = Label(window,text=\"Saisissez les coordonnées:\", font=('Arial',20),bg='beige',fg='black')\nlabel_coord.pack()\n\n## FRAME QUI CONTIENT LES BOUTONS\nframe_mat = Frame(window,bg='beige',bd=1,relief=SUNKEN)\nframe_mat.pack(padx=352.5,pady=15,fill=X)\n\nframe_calcul = Frame(window,bg='beige',bd=1,relief=SUNKEN)\nframe_calcul.pack(padx=352.5,pady=40)\n\n\n# l'utilisateur entre les coordonnées à transformer\n\nx = Label(frame_mat,text=\"X:\", font=('Arial',20),bg='beige',fg='black')\nx.grid(row=0,column=0)\ny = Label(frame_mat,text=\"Y:\", font=('Arial',20),bg='beige',fg='black')\ny.grid(row=0,column=1)\nz = Label(frame_mat,text=\"Z:\", font=('Arial',20),bg='beige',fg='black')\nz.grid(row=0,column=2)\n\n\nx_entry = Entry(frame_mat,bg='beige',fg='black')\nx_entry.grid(row=1,column=0,sticky='ew',ipady=10)\ny_entry = Entry(frame_mat,bg='beige',fg='black')\ny_entry.grid(row=1,column=1,sticky='ew',ipady=10)\nz_entry = Entry(frame_mat,bg='beige',fg='black')\nz_entry.grid(row=1,column=2,sticky='ew',ipady=10)\n\n\n\n\n# l'utilisateur saisit les variables de transformation\n\nlabel_title2 = Label(window,text=\"Saisissez les variables:\", font=('Arial',20),bg='beige',fg='black')\nlabel_title2.place(x=400,y=210)\ntheta1 = Label(frame_calcul,text=\"Theta1: \\n(rotation autour de l'axe X)\", font=('Arial',15),bg='beige',fg='black')\ntheta1.grid(row=1,column=0)\ntheta2 = Label(frame_calcul,text=\"Theta2: \\n(rotation autour de l'axe Y)\", font=('Arial',15),bg='beige',fg='black')\ntheta2.grid(row=2,column=0)\ntheta3 = Label(frame_calcul,text=\"Theta3: \\n(rotation autour de l'axe X)\", font=('Arial',15),bg='beige',fg='black')\ntheta3.grid(row=3,column=0)\ntheta4 = Label(frame_calcul,text=\"Theta4: \\n(rotation autour de l'axe Z)\", font=('Arial',15),bg='beige',fg='black')\ntheta4.grid(row=4,column=0)\nl = Label(frame_calcul,text=\"L: \\n(translation le long de l'axe X)\", font=('Arial',15),bg='beige',fg='black')\nl.grid(row=5,column=0)\ntheta1_e = Entry(frame_calcul,bg='beige',fg='black')\ntheta1_e.grid(row=1,column=1,sticky='nesw')\ntheta2_e = Entry(frame_calcul,bg='beige',fg='black')\ntheta2_e.grid(row=2,column=1,sticky='nesw')\ntheta3_e = Entry(frame_calcul,bg='beige',fg='black')\ntheta3_e.grid(row=3,column=1,sticky='nesw')\ntheta4_e = Entry(frame_calcul,bg='beige',fg='black')\ntheta4_e.grid(row=4,column=1,sticky='nesw')\nL_e = Entry(frame_calcul,bg='beige',fg='black')\nL_e.grid(row=5,column=1,sticky='nesw')\n\ndef valider():\n M1 = RotX(float(theta1_e.get())/180 * np.pi) #en rad\n M2 = RotY(float(theta2_e.get())/180 * np.pi)\n M3 = RotX(float(theta3_e.get())/180 * np.pi)\n M4 = RotZ(float(theta4_e.get())/180 * np.pi)\n L = TransX(float(L_e.get()))\n M = M1 * M2 * M3 * M4 * L\n matricetxt.set(str(M) + \"\\n\\n\")\n V = Vect4D(float(x_entry.get()), float(y_entry.get()), float(z_entry.get()), 1)\n vecteurtxt.set(\"(\" + str(abs((M*V).x)) + \", \" + str(abs((M*V).y)) + \", \" + str(abs((M*V).z)) + \", \" + str(abs((M*V).t)) + \")\\n\\n\\n\\n\")\n\nmatricetxt = StringVar()\nmatricetxt.set(\"(0.0, 0.0, 0.0, 0.0)\\n(0.0, 0.0, 0.0, 0.0)\\n(0.0, 0.0, 0.0, 0.0)\\n(0.0, 0.0, 0.0, 0.0)\\n\\n\")\nvecteurtxt = StringVar()\nvecteurtxt.set(\"(0, 0, 0, 0)\\n\\n\\n\\n\")\n\nmat = Label(window, textvariable = matricetxt,font=('Arial',15),bg='beige',fg='black')\nmat.pack(expand=YES,side=RIGHT)\nmat_text = Label(window, text = 'Matrice',font=('Arial',15),bg='beige',fg='black')\nmat_text.place(x=760,y=670)\n\nvect = Label(window, textvariable = vecteurtxt,font=('Arial',15),bg='beige',fg='black')\nvect.pack(expand=YES,side=LEFT)\nvect_text = Label(window, text = 'Vecteur en coordonnées absolues',font=('Arial',15),bg='beige',fg='black')\nvect_text.place(x=100,y=620)\n\n\nvalid = Button(window,text='Valider',font=('Arial',15),bg='black',fg='beige',command=valider)\nvalid.place(height=50,x=500,y=600)\n\n\n\nwindow.mainloop()\n\n\n\n","sub_path":"BOULEKBACHE_BENJIRA_TD6/BOULEKBACHE_BENJIRA_TD6.py","file_name":"BOULEKBACHE_BENJIRA_TD6.py","file_ext":"py","file_size_in_byte":12557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"370221875","text":"from collections import OrderedDict\n\nclass Menu:\n def __init__(self):\n self.options = None\n\n def get_selection(self):\n while True:\n choice = input('Please choose an option: ')\n if choice not in self.options:\n print('Invalid Entry')\n else:\n return choice\n\n def show(self):\n print()\n\n for key, value in self.options.items():\n print(\"{} - {}\".format(key, value))\n\n print()\n\nclass Main(Menu):\n def __init__(self):\n super().__init__()\n self.options = OrderedDict([\n ('A', 'Add a task to the database'),\n ('S', 'Search for a task in the database'),\n ('Q', 'Quit the program'),\n ])\n\nclass Search(Menu):\n def __init__(self):\n super().__init__()\n self.options = OrderedDict([\n ('E', 'Search by employee'),\n ('T', 'Search by task name'),\n ('M', 'Search by mins spent on task'),\n ('D', 'Search by date'),\n ('R', 'Search by date range'),\n ('X', 'Search by text in notes'),\n ('Q', 'Return to the main menu')\n ])\n\nclass Operators(Menu):\n def __init__(self):\n super().__init__()\n self.options = OrderedDict(\n [('==', 'Equal to that value'),\n ('>', 'Greater than that value'),\n ('<', 'Less than that value'),\n ('>=', 'Greater than or equal to that value'),\n ('<=', 'Less than or equal to that value'),\n ('!=', 'Not equal to that value')\n ])\n\nclass Edit(Menu):\n def __init__(self):\n super().__init__()\n self.options = OrderedDict(\n [\n (\"E\", \"Edit the employees name\"),\n (\"D\", \"Edit the task date\"),\n (\"N\", \"Edit the task name\"),\n (\"T\", \"Edit the task time\"),\n (\"X\", \"Edit the task notes\"),\n ]\n )\n","sub_path":"menus.py","file_name":"menus.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"435111355","text":"from mflog import get_logger\n\nlogger = get_logger(\"myapp\")\n\n\ndef application(environ, start_response):\n status = '200 OK'\n output = b'Hello World!'\n logger.info(\"this is a test message\")\n response_headers = [('Content-Type', 'text/plain'),\n ('Content-Length', str(len(output)))]\n start_response(status, response_headers)\n return [output]\n","sub_path":"adm/templates/plugins/python3_raw_wsgi/{{cookiecutter.name}}/main/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"135329882","text":"import enum\nimport asyncio\nimport base64\nimport typing\nfrom dataclasses import dataclass, field\n\nfrom mangum.types import ASGIScope, ASGIMessage, ASGIApp, AWSMessage\n\n\nclass ASGICycleState(enum.Enum):\n REQUEST = enum.auto()\n RESPONSE = enum.auto()\n\n\n@dataclass\nclass ASGICycle:\n\n scope: ASGIScope\n state: ASGICycleState = ASGICycleState.REQUEST\n binary: bool = False\n response: AWSMessage = field(default_factory=dict)\n\n def __post_init__(self) -> None:\n self.loop = asyncio.get_event_loop()\n self.app_queue: asyncio.Queue = asyncio.Queue(loop=self.loop)\n\n def __call__(self, app: ASGIApp) -> AWSMessage:\n asgi_instance = app(self.scope, self.asgi_receive, self.asgi_send)\n asgi_task = self.loop.create_task(asgi_instance)\n self.loop.run_until_complete(asgi_task)\n return self.response\n\n async def asgi_receive(self) -> ASGIMessage:\n message = await self.app_queue.get()\n return message\n\n async def asgi_send(self, message: ASGIMessage) -> None: # pragma: no cover\n raise NotImplementedError\n\n def put_message(self, message: ASGIMessage) -> None:\n self.app_queue.put_nowait(message)\n","sub_path":"mangum/protocols/asgi.py","file_name":"asgi.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"29742969","text":"#!/usr/bin/env python3\n\nimport os\nimport stat\nimport subprocess\nimport sys\n\nIGNORED_DIRECTORIES = set([\"doc\", \"source\"])\nIGNORED_FILES = set(\n [\n \"README\",\n \"context-version.pdf\",\n \"context-version.png\",\n \"context.rme\",\n \"ls-R\",\n \"times.htf\",\n ]\n)\n\n\ndef collect_files(path):\n for entry in os.listdir(path):\n full_path = os.path.join(path, entry)\n sb = os.lstat(full_path)\n if stat.S_ISDIR(sb.st_mode):\n if entry not in IGNORED_DIRECTORIES:\n for subentry in collect_files(full_path):\n yield os.path.join(entry, subentry)\n elif stat.S_ISREG(sb.st_mode):\n if entry not in IGNORED_FILES:\n yield entry\n elif not stat.S_ISLNK(sb.st_mode):\n raise Exception(\"Found non-directory/file/symlink: \" + full_path)\n\n\ndef create_tarballs(path):\n # Obtain files and directories at this point.\n directories = []\n files = []\n for entry in os.listdir(path):\n full_path = os.path.join(path, entry)\n sb = os.lstat(full_path)\n if stat.S_ISDIR(sb.st_mode):\n if entry not in IGNORED_DIRECTORIES:\n directories.append(entry)\n elif stat.S_ISREG(sb.st_mode):\n if entry not in IGNORED_FILES:\n files.append(entry)\n elif not stat.S_ISLNK(sb.st_mode):\n raise Exception(\"Found non-directory/file/symlink: \" + full_path)\n\n if files:\n for directory in directories:\n for entry in collect_files(os.path.join(path, directory)):\n files.append(os.path.join(directory, entry))\n if len(files) > (3500 if directories else 5000):\n raise Exception(\n \"Package %s is too large. It has %d files.\" % (path, len(files))\n )\n\n subprocess.check_call(\n [\n \"tar\",\n \"-C\",\n path,\n \"--mtime=0\",\n \"--owner=0\",\n \"--group=0\",\n \"--numeric-owner\",\n \"-cJf\",\n os.path.join(\"output\", \"--\".join(path.split(\"/\")) + \".tar.xz\"),\n ]\n + sorted(files)\n )\n else:\n for entry in directories:\n create_tarballs(os.path.join(path, entry))\n\n\ncreate_tarballs(sys.argv[1])\n","sub_path":"create_tarballs.py","file_name":"create_tarballs.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"164441163","text":"\nfrom typing import List \nfrom utils import *\n \nclass Solution_023_MergekSortedLists_3:\n def mergeKLists(self, lists):\n \"\"\"\n :type lists: List[ListNode]\n :rtype: ListNode\n \"\"\"\n amount = len(lists)\n interval = 1\n while interval < amount:\n for i in range(0, amount - interval, interval * 2):\n lists[i] = self.merge2Lists(lists[i], lists[i + interval])\n interval *= 2\n return lists[0] if amount > 0 else None\n\n def merge2Lists(self, l1, l2):\n head = point = ListNode(0)\n while l1 and l2:\n if l1.val <= l2.val:\n point.next = l1\n l1 = l1.next\n else:\n point.next = l2\n l2 = l1\n l1 = point.next.next\n point = point.next\n if not l1:\n point.next=l2\n else:\n point.next=l1\n return head.next\n\nif __name__ == \"__main__\":\n nums = [2, 7, 11, 15]\n target = 26\n s = \"aa\"\n arrays = [[1, 2, 3], [4, 5, 6]]\n print(arrays)\n ","sub_path":"solution/python/023_MergekSortedLists_3.py","file_name":"023_MergekSortedLists_3.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"462124685","text":"from utils import slow_print\n\nclass Player:\n def __init__(self, name, current_room):\n self.name = name\n self.current_room = current_room\n self.inventory = []\n \n def perform_action(self, *args):\n if len(args) > 1:\n if args[0] == \"grab\":\n self.grab_item(args[1])\n if args[0] == \"drop\":\n self.drop_item(args[1])\n elif args[0] == \"i\":\n self.show_inventory()\n else:\n self.move(args[0])\n\n def move(self, direction):\n err_msg = \"\\nYou can't move that direction! Choose a different direction.\\n\"\n try:\n next_room = getattr(self.current_room, f'{direction}_to')\n self.current_room = next_room\n except AttributeError:\n slow_print(err_msg)\n \n def grab_item(self, item_name):\n try:\n\n item = next(x for x in self.current_room.items if x.name == item_name)\n item.on_take()\n self.inventory.append(item)\n self.current_room.remove_item(item)\n except StopIteration:\n slow_print(f'Current room does not have {item_name}')\n \n def drop_item(self, item_name):\n try:\n item = next(x for x in self.inventory if x.name == item_name)\n self.inventory = [x for x in self.inventory if x.name != item_name]\n self.current_room.add_items(item)\n except StopIteration:\n slow_print(f'{item_name} is not in your inventory.')\n \n def show_inventory(self):\n slow_print('Inventory: ') \n for i, x in enumerate(self.inventory):\n if i == len(self.inventory) - 1:\n print(x.name)\n else:\n print(f'{x.name},', end=\" \")\n\n\n\n\n \n\n\n\n \n\n","sub_path":"src/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"369420491","text":"import pandas\r\nimport math\r\nimport copy\r\n\r\ndef cross_validation(dataset, k):\r\n partitions = []\r\n kFolds = []\r\n training_dict = []\r\n test_dict = []\r\n fold_dict = []\r\n\r\n data = dataset['data']\r\n\r\n len_data = len(data)\r\n #Define o número de instâncias em cada fold\r\n instancesPerFold = len_data/k\r\n \r\n if instancesPerFold < 2:\r\n raise Exception(\"Quantidade de folds muito grande para o conjunto de dados\")\r\n \r\n print('Generating kFolds...')\r\n \r\n instancesPerFold = round(instancesPerFold)\r\n \r\n #Ordena o dataset - onde columns é a classe e count é a frequencia de cada instancia\r\n columns = dataset['target']\r\n count = data[columns].value_counts()\r\n data = data.sort_values(by=columns)\r\n count = count.sort_index()\r\n count = count.tolist()\r\n \r\n start = 0\r\n end = len_data\r\n aux = []\r\n #Divide o dataset no numero de classes onde cada classe n possui n/k instancias \r\n for j in range(len(count)):\r\n instances = int(round(count[j]/k))\r\n aux = [data[i:i+instances] for i in range(start, end, instances)]\r\n start = start + count[j]\r\n partitions.append(aux)\r\n \r\n \r\n trainingSet = []\r\n fold = []\r\n #Tenta acessar cada particao pra pegar o primeiro de cada classe e formar um fold\r\n for i in range(k):\r\n testSet = []\r\n\r\n #Insere o primeiro de cada partition pra fazer o cross-validation estratificado \r\n for j in range(len(partitions)):\r\n testSet.append(partitions[j][i])\r\n\r\n #Copia o dataset para trainingSet \r\n trainingSet = copy.deepcopy(partitions[0])\r\n #Concatena para normalizar \r\n testSet = pandas.concat(testSet) \r\n trainingSet = pandas.concat(trainingSet)\r\n #Exclui o conjunto de dados usado por testSet \r\n trainingSet = trainingSet[~trainingSet.apply(tuple,1).isin(testSet.apply(tuple,1))] \r\n\r\n trainingSet = trainingSet.reset_index(drop=True)\r\n testSet = testSet.reset_index(drop=True)\r\n\r\n training_dict = {\r\n 'data': trainingSet,\r\n 'attributes': dataset['attributes'],\r\n 'target': dataset['target']\r\n }\r\n \r\n test_dict = {\r\n 'data': testSet,\r\n 'attributes': dataset['attributes'],\r\n 'target': dataset['target']\r\n }\r\n \r\n fold_dict = {\r\n 'trainingSet': training_dict,\r\n 'testSet': test_dict\r\n }\r\n\r\n #print(\"testSet:\\n\",testSet)\r\n #print(\"trainingSet:\\n\", trainingSet)\r\n \r\n kFolds.insert(k, fold_dict) \r\n \r\n \r\n\r\n #print(kFolds)\r\n print('kFolds successfully generated.')\r\n return kFolds","sub_path":"cross_validation.py","file_name":"cross_validation.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"510572879","text":"#!/usr/bin/python3\n\"\"\"\nFabric script that distributes an archive to your web servers\nusing the function do_deploy\n\"\"\"\nfrom fabric.api import env, run, put, hosts\n# from datetime import datetime\nimport os\n\nenv.hosts = ['34.74.23.57', '35.196.161.89']\n\n\ndef do_deploy(archive_path):\n \"\"\"\n upload the archive to the /tmp/ directory of the web server\n uncompress the archive\n delete the archive from the web server\n handle symbolic links\n return True is all operations went well, False otherwise\n \"\"\"\n if not os.path.exists(archive_path):\n return False\n try:\n put(archive_path, '/tmp/')\n file_name = archive_path.split('/')[-1]\n file_name_noext = file_name.split('.')[0]\n new_folder = '/data/web_static/releases/' + file_name_noext + '/'\n run('sudo mkdir -p {}'.format(new_folder))\n run('sudo tar -xzf /tmp/{} -C {}'.format(file_name, new_folder))\n run('sudo rm /tmp/{}'.format(file_name))\n run('sudo mv {}web_static/* {}'.format(new_folder, new_folder))\n run('sudo rm -rf {}web_static'.format(new_folder))\n run('sudo rm -rf /data/web_static/current')\n run('sudo ln -s {} /data/web_static/current'.format(new_folder))\n print(\"New version deployed!\")\n return True\n except:\n return False\n","sub_path":"2-do_deploy_web_static.py","file_name":"2-do_deploy_web_static.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"104929793","text":"# This file is part of Superdesk.\n#\n# Copyright 2013, 2014, 2015, 2016, 2017 Sourcefabric z.u. and contributors.\n#\n# For the full copyright and license information, please see the\n# AUTHORS and LICENSE files distributed with this source code, or\n# at https://www.sourcefabric.org/superdesk/license\nfrom copy import deepcopy\nfrom superdesk import Resource, Service, get_resource_service\nfrom superdesk.errors import SuperdeskApiError\nfrom superdesk.metadata.item import ITEM_STATE, CONTENT_STATE, PUBLISH_STATES\nfrom eve.utils import config\nfrom planning.common import ASSIGNMENT_WORKFLOW_STATE\nfrom apps.archive.common import get_user, is_assigned_to_a_desk\nfrom apps.content import push_content_notification\nfrom superdesk.notification import push_notification\n\n\nclass AssignmentsLinkService(Service):\n def on_create(self, docs):\n for doc in docs:\n self._validate(doc)\n\n def create(self, docs):\n ids = []\n production = get_resource_service('archive')\n assignments_service = get_resource_service('assignments')\n assignments_complete = get_resource_service('assignments_complete')\n items = []\n\n for doc in docs:\n assignment = assignments_service.find_one(req=None, _id=doc.pop('assignment_id'))\n assignments_service.validate_assignment_action(assignment)\n item = production.find_one(req=None, _id=doc.pop('item_id'))\n reassign = doc.pop('reassign')\n\n # set the state to in progress if item in published state\n updates = {'assigned_to': deepcopy(assignment.get('assigned_to'))}\n updates['assigned_to']['state'] = ASSIGNMENT_WORKFLOW_STATE.COMPLETED if \\\n item.get(ITEM_STATE) in [CONTENT_STATE.PUBLISHED, CONTENT_STATE.CORRECTED] else \\\n ASSIGNMENT_WORKFLOW_STATE.IN_PROGRESS\n\n # on fulfiling the assignment the user is assigned the assignment, for add to planning it is not\n if reassign:\n user = get_user()\n if user and str(user.get(config.ID_FIELD)) != (assignment.get('assigned_to') or {}).get('user'):\n updates['assigned_to']['user'] = str(user.get(config.ID_FIELD))\n\n # reference the item to the assignment\n production.system_update(\n item[config.ID_FIELD],\n {'assignment_id': assignment[config.ID_FIELD]},\n item\n )\n\n get_resource_service('delivery').post([{\n 'item_id': item[config.ID_FIELD],\n 'assignment_id': assignment[config.ID_FIELD],\n 'planning_id': assignment['planning_item'],\n 'coverage_id': assignment['coverage_item']\n }])\n\n if item.get(ITEM_STATE) in [CONTENT_STATE.PUBLISHED, CONTENT_STATE.CORRECTED]:\n assignments_complete.update(assignment[config.ID_FIELD], updates, assignment)\n else:\n assignments_service.patch(assignment[config.ID_FIELD], updates)\n\n # if the item is publish then update those items as well\n if item.get(ITEM_STATE) in PUBLISH_STATES:\n get_resource_service('published').update_published_items(\n item[config.ID_FIELD],\n 'assignment_id', assignment[config.ID_FIELD])\n\n item['assignment_id'] = assignment[config.ID_FIELD]\n\n # Save assignment history\n assignment_history_service = get_resource_service('assignments_history')\n assignment_history_service.on_item_content_link(updates, assignment)\n\n doc.update(item)\n ids.append(doc[config.ID_FIELD])\n items.append(item)\n\n if item.get(ITEM_STATE) not in [CONTENT_STATE.PUBLISHED, CONTENT_STATE.CORRECTED]:\n # publishing planning item\n assignments_service.publish_planning(assignment['planning_item'])\n\n push_content_notification(items)\n push_notification(\n 'content:link',\n item=str(item[config.ID_FIELD]),\n assignment=str(assignment[config.ID_FIELD])\n )\n return ids\n\n def _validate(self, doc):\n assignment = get_resource_service('assignments').find_one(\n req=None,\n _id=doc.get('assignment_id')\n )\n\n if not assignment:\n raise SuperdeskApiError.badRequestError('Assignment not found.')\n\n item = get_resource_service('archive').find_one(\n req=None,\n _id=doc.get('item_id')\n )\n\n if not item:\n raise SuperdeskApiError.badRequestError('Content item not found.')\n\n if item.get('assignment_id'):\n raise SuperdeskApiError.badRequestError(\n 'Content is already linked to an assignment. Cannot link assignment and content.'\n )\n\n if not is_assigned_to_a_desk(item):\n raise SuperdeskApiError.badRequestError(\n 'Content not in workflow. Cannot link assignment and content.'\n )\n\n delivery = get_resource_service('delivery').find_one(\n req=None,\n assignment_id=doc.get('assignment_id')\n )\n\n if delivery:\n raise SuperdeskApiError.badRequestError(\n 'Content already exists for the assignment. Cannot link assignment and content.'\n )\n\n\nclass AssignmentsLinkResource(Resource):\n endpoint_name = resource_title = 'assignments_link'\n url = 'assignments/link'\n schema = {\n 'assignment_id': {\n 'type': 'string',\n 'required': True\n },\n 'item_id': {\n 'type': 'string',\n 'required': True\n },\n 'reassign': {\n 'type': 'boolean',\n 'required': True\n }\n }\n\n resource_methods = ['POST']\n item_methods = []\n\n privileges = {'POST': 'archive'}\n","sub_path":"server/planning/assignments/assignments_link.py","file_name":"assignments_link.py","file_ext":"py","file_size_in_byte":5909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"227944641","text":"# Copyright 2019 Apex.AI, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport socket\n\n# To coordinate ROS_DOMAIN_IDs between multiple test process instances, we\n# open a high numbered port \"PORT_BASE + ROS_DOMAIN_ID.\"\n# If we manage to open the port, then we can use that ROS_DOMAIN_ID for the duration of the\n# test run\n_PORT_BASE = 22119 # I picked this randomly as a high port that probably won't be in use\n\n\nclass _sockwrapper():\n \"\"\"Wraps sockets to keep them open, but appear like a number from 1 to 100.\"\"\"\n\n def __init__(self, socket):\n self.__socket = socket\n\n def __str__(self):\n return str(self.__socket.getsockname()[1] - _PORT_BASE)\n\n\nclass _default_selector:\n\n def __init__(self):\n # When we need to coordinate 10 or 20 domains, it's about 10x faster\n # to start with a random seed value here.\n # It's also the difference between 1ms and 100us so it's totally insignificant.\n # Leaving this here in case it's ever useful in the future to speed up domain selection:\n # self._value = random.randint(1, 100)\n\n # Slower, but deterministic:\n # Always start at '1' so if there's weirdness where domains are colliding when\n # they shouldn't, it's easier to debug.\n self._value = 1\n\n def __call__(self):\n retval = ((self._value - 1) % 100) + 1\n self._value += 1\n return retval\n\n\ndef get_coordinated_domain_id(*, selector=None):\n \"\"\"\n Get a ROS_DOMAIN_ID from 1 to 100 that will not conflict with other ROS_DOMAIN_IDs.\n\n Processes can use get_coordinated_domain_id to generate ROS_DOMAIN_IDs that allow them to\n use ROS2 without unexpected cross-talk between processes.\n This is similar to the ROS1 rostest behavior of putting the ROS master on a unique port.\n\n Users of get_coordintaed_gomain_id must keep the returned object alive. If the returned\n object is garbage collected, the ROS_DOMAIN_ID it represents is returned to the pool\n of available values.\n \"\"\"\n if selector is None:\n selector = _default_selector()\n\n # Try 100 times to get a unique ROS domain ID. The default number of parallel colcon\n # test runners is 12, so it's extremely unlikely that more than 12 ROS_DOMAIN_IDs need\n # to be coordinated at once.\n for attempt in range(100):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.bind(('', _PORT_BASE + selector()))\n except OSError:\n continue\n else:\n return _sockwrapper(s)\n else:\n raise Exception('Failed to get a unique domain ID')\n","sub_path":"domain_coordinator/domain_coordinator/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"331551555","text":"from time import time\n\nimport numpy as np\nfrom scipy.stats import multivariate_normal as normal\nfrom experiments.lnpdfs.create_target_lnpfs import build_GPR_iono_lnpdf\nfrom sampler.elliptical_slice.bovy_mcmc.elliptical_slice import elliptical_slice as ess_update\n\nnum_dimensions = 34\nprior_chol = np.eye(num_dimensions)\nprior = normal(np.zeros(34), np.eye(num_dimensions))\n\ntarget_lnpdf = build_GPR_iono_lnpdf()\n\ndef sample(n_samps, path=None):\n samples = np.empty((n_samps, num_dimensions))\n likelihoods = np.empty((n_samps))\n iters = []\n nfevals = []\n target_lnpdf.counter = 0\n timestamps = [time()]\n cur_theta = prior.rvs(1)\n cur_lnpdf = target_lnpdf(cur_theta, without_prior=True)\n for i in range(0, n_samps):\n if i % 10000 == 0:\n print('iter' + str(i))\n iters.append(i)\n nfevals.append(target_lnpdf.counter)\n timestamps.append(time())\n [cur_theta, cur_lnpdf] = ess_update(cur_theta, prior_chol, target_lnpdf, pdf_params=(True,),\n cur_lnpdf=cur_lnpdf)\n samples[i] = cur_theta\n likelihoods[i] = cur_lnpdf # without prior!\n\n iters.append(i)\n nfevals.append(target_lnpdf.counter)\n timestamps.append(time())\n if path is not None:\n np.savez(path+\"processed_data\", iter=iters, samples=np.array(all_samples), fevals=np.array(nfevals), timestamps=np.array(timestamps))\n print(\"done\")\n\n\nif __name__ == '__main__':\n sample(1000)\n\n","sub_path":"python/experiments/ESS/iono.py","file_name":"iono.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"532365586","text":"# costs for each action:\n# change 5\n# delete 20\n# insert 20\nfrom pythonds.basic.stack import Stack\n\nclass Table:\n def __init__(self, m, n, defaultValue = 0):\n self.data = []\n for _ in range(n):\n self.data.append([defaultValue] * m)\n\n def __getitem__(self, idx):\n return self.data[idx]\n\n def __str__(self):\n return str(self.data)\n\ndef edit_distance(str1, str2, actions):\n table = Table(len(str1) + 1, len(str2) + 1, 0)\n\n # init table\n for m in range(len(str1) + 1):\n table[0][m] = m * 20\n\n for n in range(len(str2) + 1):\n table[n][0] = n * 20\n\n for i in range(1, len(str2) + 1):\n for j in range(1, len(str1) + 1):\n minCost = 99\n minCostAction = None\n temp = None\n\n temp = table[i - 1][j] + 20\n if temp < minCost:\n minCost = temp\n minCostAction = (i - 1, j, 'insert', str2[i - 1])\n\n temp = table[i][j - 1] + 20\n if temp < minCost:\n minCost = temp\n minCostAction = (i, j - 1, 'delete', str1[j - 1])\n\n temp = table[i - 1][j - 1] + (0 if str2[i - 1] == str1[j - 1] else 5)\n if temp < minCost:\n minCost = temp\n minCostAction = (i - 1, j - 1, 'noaction') if str2[i - 1] == str1[j - 1] else (i - 1, j - 1, 'change', str1[j - 1], 'to', str2[i - 1])\n\n table[i][j] = minCost\n actions[i][j] = minCostAction\n\n return table[len(str2)][len(str1)]\n\ndef printAction(actions, j, i):\n stack = Stack()\n while i != 0 or j != 0:\n action = actions[i][j]\n stack.push(action)\n i = action[0]\n j = action[1]\n\n while not stack.isEmpty():\n item = stack.pop()\n if item[2] != 'noaction':\n print((*item[2:]), 'on', item[0])\n\ndef main():\n str1 = 'sunday'\n str2 = 'saturday'\n actions = Table(len(str1) + 1, len(str2) + 1, None)\n distance = edit_distance(str1, str2, actions)\n print(str1, 'to', str2)\n print('cost', distance)\n printAction(actions, len(str1), len(str2))\n\nif __name__ == '__main__':\n main()\n","sub_path":"base/edit_distance.py","file_name":"edit_distance.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"569267495","text":"import win32con\r\nimport win32service\r\n\r\n\r\ndef ListServices():\r\n\r\n accessSCM = win32con.GENERIC_READ\r\n accessSrv = win32service.SC_MANAGER_ALL_ACCESS\r\n\r\n # Open Service Control Manager\r\n hscm = win32service.OpenSCManager(None, None, accessSCM)\r\n\r\n # Enumerate Service Control Manager DB\r\n typeFilter = win32service.SERVICE_WIN32_OWN_PROCESS # Le type de service de notre fake service\r\n stateFilter = win32service.SERVICE_STATE_ALL\r\n\r\n statuses = win32service.EnumServicesStatus(hscm, typeFilter, stateFilter)\r\n\r\n for (short_name, desc, status) in statuses:\r\n if status[1]==4: # if running \r\n print(short_name, status)\r\n\r\n\r\nListServices()\r\n","sub_path":"RunServ.py","file_name":"RunServ.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"570280715","text":"import logging\nimport subprocess\nimport threading\n\n# Visuals\nfrom threading import Thread\n\nfrom matplotlib import style\n#import matplotlib.pyplot as plt\n\n## Live Visuals\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport time\nfrom matplotlib import style\n\n\n# File management\nfrom shutil import move\nfrom os import remove\nimport matplotlib.animation as animation\n\n# Read ping.sh and replace locations where appropriate\n# Get local from traceroute.txt (first hop)\n\n# Track number of tests\ntests: int = 0\ntestsPerIteration: int = 20\n\n# Initialize holders for local/target ping latency data\n# Stores all ping test results\nlocalPings = {}\ntargetPings = {}\n\n# Stores last ping test results\nlastLocal = {}\nlastTarget = {}\n\n# Store test stats\nmaxLocal = 0.0\nminLocal = 0.0\navgLocal = 0.0\n\nmaxTarget = 0.0\nminTarget = 0.0\navgTarget = 0.0\n\n# Global Vars\nglobal STOP_THREADS\n\n# set up visualization\nstyle.use('fivethirtyeight')\n\nplt.ion()\nfigure = plt.figure()\nlocalGraph = figure.add_subplot(211)\ntargetGraph = figure.add_subplot(212, sharex=localGraph)\n\nplt.gcf().subplots_adjust(bottom=0.15)\nplt.gcf().subplots_adjust(left=0.15)\nplt.gcf().subplots_adjust(right=0.9)\n\nlocalGraph.title.set_text('Local Latency')\ntargetGraph.title.set_text('Target Latency')\n\n\n\n\n# Update the statistics for the ping sets\ndef UpdateStats() -> None:\n global maxLocal, minLocal, avgLocal, maxTarget, minTarget, avgTarget\n print(\"Updating Stats...\")\n # Update Local\n maxLocal = max(localPings.values())\n minLocal = min(localPings.values())\n avgLocal = sum(localPings.values()) / len(localPings)\n # Update Target\n maxTarget = max(targetPings.values())\n minTarget = min(targetPings.values())\n avgTarget = sum(targetPings.values()) / len(targetPings)\n # Print stats\n PrintStats()\n\n\ndef PrintStats() -> None:\n global maxLocal, minLocal, avgLocal, maxTarget, minTarget, avgTarget, tests\n print(\"TEST STATISTICS:\")\n print(\"LOCAL\")\n print(\"MIN: \" + str(minLocal))\n print(\"MAX: \" + str(maxLocal))\n print(\"AVG: \" + str(avgLocal))\n print(\"PACKETS: \" + str(len(localPings)))\n print(\"PACKET LOSS \" + str((tests-len(localPings))/tests))\n\n print(\"TARGET\")\n print(\"MIN: \" + str(minTarget))\n print(\"MAX: \" + str(maxTarget))\n print(\"AVG: \" + str(avgTarget))\n print(\"PACKETS: \" + str(len(targetPings)))\n print(\"PACKET LOSS: \" + str((tests-len(targetPings))/tests))\n\n\ndef InitScripts() -> None:\n \"\"\" Create scripts with a default format so the variables can be found easily\n \"\"\"\n f = open(\"ping.sh\", \"w+\")\n f.write(\"#!/bin/bash\\n cowsay 'Pinging Website and Recording data... this may take a while'\\n\")\n f.write(\"ping -i .200 -c 20 LOCAL > localping.txt\\n ping -i .200 -c 20 TARGET > targetping.txt\")\n f.close()\n\n f = open(\"traceroute.sh\", \"w+\")\n f.write(\"#!/bin/bash\\n rm traceroute.txt\\n sudo -S traceroute -I -n TARGET > traceroute.txt\")\n f.close()\n\nsubprocess.run(\"./perms.sh\", shell=True, check=True)\n\n\n\ndef GetTarget() -> None:\n \"\"\" None -> None\n\n Get target location from user and update scripts\n \"\"\"\n # Credit for input: https://www.w3schools.com/python/ref_func_input.asp\n print(\"Please enter a location to test: \")\n target = input()\n GetLocal(target) # Get local address from traceroute.sh and replace it in the ping script\n\n\n# Get local address\ndef GetLocal(target) -> None:\n \"\"\" Str -> None\n\n Get local address from traceroute.sh and replace it in the ping script\n \"\"\"\n print(\"Getting address!\")\n SubstutituteTrace(target)\n subprocess.run(\"./traceroute.sh\", shell=True, check=True)\n # Get local\n f = open(\"traceroute.txt\", \"r\")\n line = f.readlines()[2]\n local = line.split(\" \")[3]\n print(\"LOCAL: \" + local + \" TARGET: \" + target)\n Substitute(local, target)\n\n\ndef SubstutituteTrace(target) -> None:\n \"\"\" Str -> None\n\n Replace the TARGET locaiton in the traceroute script\n \"\"\"\n f = open(\"traceroute.sh\", \"r\")\n newF = open(\"traceroute2.sh\", \"w+\")\n lines = f.readlines()\n for line in lines:\n line = line.replace(\"TARGET\", target)\n newF.write(line)\n\n f.close()\n newF.close()\n\n # Remove original file\n remove(\"./traceroute.sh\")\n # Move new file\n move(\"./traceroute2.sh\", \"./traceroute.sh\")\n subprocess.run(\"./perms.sh\", shell=True, check=True)\n\n\ndef Substitute(local, target) -> None:\n \"\"\" Str, Str -> None\n\n Substitute the local and target variables in the bash scripts\n \"\"\"\n print(\"Substituting locations\")\n # Credit for file handling: https://www.guru99.com/reading-and-writing-files-in-python.html\n # Read ping.sh and replace the local and target\n f = open(\"ping.sh\", \"r\")\n newF = open(\"ping2.sh\", \"w+\")\n lines = f.readlines()\n for line in lines:\n line = line.replace(\"TARGET\", target)\n line = line.replace(\"LOCAL\", local)\n newF.write(line)\n\n f.close()\n newF.close()\n\n # Credit for remove and move method:\n # https://stackoverflow.com/questions/39086/search-and-replace-a-line-in-a-file-in-python\n # Remove original file\n remove(\"./ping.sh\")\n # Move new file\n move(\"./ping2.sh\", \"./ping.sh\")\n subprocess.run(\"./perms.sh\", shell=True, check=True)\n\n\ndef LiveVis(infoType: str, presType: str):\n #aniLocal = animation.FuncAnimation(figLocal, animate(\"LOCAL_PING\"), interval=1000)\n #aniTarget = animation.FuncAnimation(figTarget, animate(\"TARGET_PING\"), interval=1000)\n\n\n if infoType == \"LOCAL_PING\":\n\n # Format file object as list of lines\n localPingFileData = open('localping.txt', 'r').read()\n localPingFormattedFile = open('localping_formatted.txt', 'w+')\n localPingLines: list = localPingFileData.split(\"\\n\")\n\n # Iterate through each line in total set of lines\n for localLine in localPingLines:\n\n # Skip first line of file as well as every other line which doesn't list immediate ping info\n if str(localLine).__contains__(\"PING\") or not str(localLine).__contains__(\"icmp_seq=\"):\n continue\n\n else:\n # Split valid lines\n localLineSplit = localLine.split(\" \")\n # print(localLineSplit) #DEBUG\n\n # Retrieve package sequence (Ie: number of package being sent)\n localIndex: float = float(localLineSplit[4].strip(\"icmp_seq=\")) + (tests - testsPerIteration)\n\n # Retrieve latency of package (in milliseconds)\n localPingLatency: float = float(localLineSplit[6].strip(\"time=\"))\n localPings[len(localPings) + 1]: float = localPingLatency\n lastLocal[localIndex]: float = float(localLineSplit[6].strip(\"time=\"))\n\n # Store localPings data in localping_formatted.txt\n ## Also account for\n localPingFormattedFile.write(\"{0},{1}\\n\".format(localIndex, localPingLatency))\n\n # Store localPings info for easier use\n localPingsX: list = [*lastLocal.keys()] # List of Package sequence numbers\n localPingsY: list = [*lastLocal.values()] # List of Latency of each package corresponding to sequence\n\n # plot and Set axis dimensions\n localGraph.plot(localPingsX, localPingsY)\n\n if(presType == \"VERBOSE\"):\n localGraph.axis([1, max(localPingsX), 1, max(localPingsY)])\n elif(presType == \"SHORT\"):\n localGraph.axis([localPingsX[tests - testsPerIteration], max(localPingsX), 1, max(localPingsY)])\n\n localGraph.title.set_text('Local Latency')\n\n elif infoType == \"TARGET_PING\":\n\n # Format file object as list of lines\n targetPingFileData = open('targetping.txt', 'r').read()\n targetPingLines: list = targetPingFileData.split(\"\\n\")\n\n # Iterate through each line in total set of lines\n for targetLine in targetPingLines:\n\n # Skip first line of file as well as every other line which doesn't list immediate ping info\n if str(targetLine).__contains__(\"PING\") or not str(targetLine).__contains__(\"icmp_seq=\"):\n continue\n else:\n # Split valid lines\n targetLineSplit = targetLine.split(\" \")\n # print(targetLineSplit) #DEBUG\n # Retrieve package sequence (Ie: number of package being sent)\n try:\n targetIndex = float(targetLineSplit[4].strip(\"icmp_seq=\")) + tests - testsPerIteration\n except ValueError:\n targetIndex = float(targetLineSplit[5].strip(\"icmp_seq=\")) + tests - testsPerIteration\n\n # Retrieve latency of package (in milliseconds)\n try:\n targetPingLatency: float = float(targetLineSplit[6].strip(\"time=\"))\n targetPings[len(localPings) + 1]: float = targetPingLatency\n lastTarget[targetIndex]: float = float(targetLineSplit[6].strip(\"time=\"))\n except ValueError:\n targetPingLatency: float = float(targetLineSplit[7].strip(\"time=\"))\n targetPings[len(localPings) + 1]: float = targetPingLatency\n lastTarget[targetIndex]: float = float(targetLineSplit[7].strip(\"time=\"))\n\n\n\n # Store localPings info for easier use\n targetPingsX: list = [*lastTarget.keys()] # List of Package sequence numbers\n targetPingsY: list = [*lastTarget.values()] # List of Latency of each package corresponding to sequence\n\n # plot and Set axis dimensions\n targetGraph.plot(targetPingsX, targetPingsY)\n\n\n if(presType == \"VERBOSE\"):\n targetGraph.axis([1, max(targetPingsX), 1, max(targetPingsY)])\n elif(presType == \"SHORT\"):\n targetGraph.axis([targetPingsX[tests - testsPerIteration], max(targetPingsX), 1, max(targetPingsY)])\n\n targetGraph.title.set_text('Target Latency')\n\n'''\n\n # Format data from local/targetping.txt\n formatData(infoType)\n\n if (infoType == \"LOCAL_PING\"):\n localPullData = open('localping_formatted.txt', 'r').read()\n\n localDataArray = localPullData.split('\\n')\n localXArr = []\n localYArr = []\n\n for localDataLine in localDataArray:\n if len(localDataLine) > 1:\n localX, localY = localDataLine.split(',')\n localXArr.append(float(localX))\n localYArr.append(float(localY))\n\n localGraph.clf()\n localGraph.plot(localXArr, localYArr)\n\n elif (infoType == \"TARGET_PING\"):\n targetPullData = open('targetping_formatted.txt', 'r').read()\n\n targetDataArray = targetPullData.split('\\n')\n targetXArr = []\n targetYArr = []\n\n for targetDataLine in targetDataArray:\n if len(targetDataLine) > 1:\n targetX, targetY = targetDataLine.split(',')\n targetXArr.append(float(targetX))\n targetYArr.append(float(targetY))\n\n targetGraph.clf()\n targetGraph.plot(targetXArr, targetYArr)\n\n'''\n'''\n # Show whichever plot was constructed by function call\n plt.gcf().subplots_adjust(bottom=0.15)\n plt.gcf().subplots_adjust(left=0.15)\n plt.gcf().subplots_adjust(right=0.9)\n # plt.show()\n'''\n'''\n \"\"\" (str) -> None\n\n Takes as an input the name of the info desired to be visualized, then plots a\n graph corresponding to said info and prints out related data. *called by DataCollect*\n\n Currently only supports following infoTypes:\n - \"LOCAL_PING\" -> Statically Visualizes localping.txt latency\n - \"TARGET_PING\" -> Statically Visualizes targetping.txt latency\n \"\"\"\n\n fig = plt.figure()\n\n\n if infoType == \"LOCAL_PING\":\n\n # Format file object as list of lines\n localPingFileData = open('localping.txt', 'r').read()\n localPingLines: list = localPingFileData.split(\"\\n\")\n\n # Iterate through each line in total set of lines\n for localLine in localPingLines:\n\n # Skip first line of file as well as every other line which doesn't list immediate ping info\n if str(localLine).__contains__(\"PING\") or not str(localLine).__contains__(\"icmp_seq=\"):\n continue\n\n else:\n # Split valid lines\n localLineSplit = localLine.split(\" \")\n # print(localLineSplit) #DEBUG\n\n # Retrieve package sequence (Ie: number of package being sent)\n localIndex: float = float(localLineSplit[4].strip(\"icmp_seq=\"))\n\n # Retrieve latency of package (in milliseconds)\n localPings[len(localPings) + 1]: float = float(localLineSplit[6].strip(\"time=\"))\n lastLocal[localIndex]: float = float(localLineSplit[6].strip(\"time=\"))\n\n # print(localPings)\n # Store localPings info for easier use\n localPingsX: list = [*lastLocal.keys()] # List of Package sequence numbers\n localPingsY: list = [*lastLocal.values()] # List of Latency of each package corresponding to sequence\n\n # Title plot and label axis\n plt.title(\"Local Ping Latency\")\n plt.xlabel(\"Number of Packet sent\")\n plt.ylabel(\"Latency (in ms)\")\n\n\n\n # plot and Set axis dimensions\n ax1 = fig.add_subplot(max(localPingsX), max(localPingsY) + 5, 1)\n ax1.clear()\n ax1.plot(localPingsX, localPingsY)\n\n elif infoType == \"TARGET_PING\":\n\n # Format file object as list of lines\n targetPingFileData = open('targetping.txt', 'r').read()\n targetPingLines: list = targetPingFileData.split(\"\\n\")\n\n # Iterate through each line in total set of lines\n for targetLine in targetPingLines:\n\n # Skip first line of file as well as every other line which doesn't list immediate ping info\n if str(targetLine).__contains__(\"PING\") or not str(targetLine).__contains__(\"icmp_seq=\"):\n continue\n else:\n # Split valid lines\n targetLineSplit = targetLine.split(\" \")\n # print(targetLineSplit) #DEBUG\n # Retrieve package sequence (Ie: number of package being sent)\n try:\n targetIndex = float(targetLineSplit[4].strip(\"icmp_seq=\"))\n except ValueError:\n targetIndex = float(targetLineSplit[5].strip(\"icmp_seq=\"))\n\n # Retrieve latency of package (in milliseconds)\n try:\n targetPings[len(targetPings) + 1]: float = float(targetLineSplit[6].strip(\"time=\"))\n lastTarget[targetIndex]: float = float(targetLineSplit[6].strip(\"time=\"))\n except ValueError:\n targetPings[len(targetPings) + 1]: float = float(targetLineSplit[7].strip(\"time=\"))\n lastTarget[targetIndex]: float = float(targetLineSplit[7].strip(\"time=\"))\n\n # print(targetPings)\n # Store targetPings info for easier use\n targetPingsX = [*lastTarget.keys()]\n targetPingsY = [*lastTarget.values()]\n\n # Title Plot and label axis\n plt.title(\"Target Ping Latency\")\n plt.xlabel(\"Number of Packet sent\")\n plt.ylabel(\"Latency (in ms)\")\n\n # Plot and set axis dimensions\n #plt.plot(targetPingsX, targetPingsY)\n plt.axis([1, max(targetPingsX), 1, max(targetPingsY) + 5])\n\n # Show whichever plot was constructed by function call\n plt.gcf().subplots_adjust(bottom=0.15)\n plt.gcf().subplots_adjust(left=0.15)\n plt.gcf().subplots_adjust(right=0.9)\n ani = animation.FuncAnimation(fig, a, interval=1000)\n plt.show()\n\n ani = animation.FuncAnimation(fig, animate, interval=1000)\n plt.show()\n'''\n\n\n\ndef StaticVis(infoType: str) -> None:\n \"\"\" (str) -> None\n Takes as an input the name of the info desired to be visualized, then plots a\n graph corresponding to said info and prints out related data. *called by DataCollect*\n Currently only supports following infoTypes:\n - \"LOCAL_PING\" -> Statically Visualizes localping.txt latency\n - \"TARGET_PING\" -> Statically Visualizes targetping.txt latency\n \"\"\"\n\n if infoType == \"LOCAL_PING\":\n\n # Format file object as list of lines\n localPingFileData = open('localping.txt', 'r').read()\n localPingLines: list = localPingFileData.split(\"\\n\")\n\n # Iterate through each line in total set of lines\n for localLine in localPingLines:\n\n # Skip first line of file as well as every other line which doesn't list immediate ping info\n if str(localLine).__contains__(\"PING\") or not str(localLine).__contains__(\"icmp_seq=\"):\n continue\n\n else:\n # Split valid lines\n localLineSplit = localLine.split(\" \")\n # print(localLineSplit) #DEBUG\n\n # Retrieve package sequence (Ie: number of package being sent)\n localIndex: float = float(localLineSplit[4].strip(\"icmp_seq=\"))\n\n # Retrieve latency of package (in milliseconds)\n localPings[len(localPings)+1]: float = float(localLineSplit[6].strip(\"time=\"))\n lastLocal[localIndex]: float = float(localLineSplit[6].strip(\"time=\"))\n\n #print(localPings)\n # Store localPings info for easier use\n localPingsX: list = [*lastLocal.keys()] # List of Package sequence numbers\n localPingsY: list = [*lastLocal.values()] # List of Latency of each package corresponding to sequence\n\n # Title plot and label axis\n plt.title(\"Local Ping Latency\")\n plt.xlabel(\"Number of Packet sent\")\n plt.ylabel(\"Latency (in ms)\")\n\n # plot and Set axis dimensions\n plt.plot(localPingsX, localPingsY)\n plt.axis([1, max(localPingsX), 1, max(localPingsY) + 5])\n\n elif infoType == \"TARGET_PING\":\n\n # Format file object as list of lines\n targetPingFileData = open('targetping.txt', 'r').read()\n targetPingLines: list = targetPingFileData.split(\"\\n\")\n\n # Iterate through each line in total set of lines\n for targetLine in targetPingLines:\n\n # Skip first line of file as well as every other line which doesn't list immediate ping info\n if str(targetLine).__contains__(\"PING\") or not str(targetLine).__contains__(\"icmp_seq=\"):\n continue\n else:\n # Split valid lines\n targetLineSplit = targetLine.split(\" \")\n # print(targetLineSplit) #DEBUG\n # Retrieve package sequence (Ie: number of package being sent)\n try:\n targetIndex = float(targetLineSplit[4].strip(\"icmp_seq=\"))\n except ValueError:\n targetIndex = float(targetLineSplit[5].strip(\"icmp_seq=\"))\n\n # Retrieve latency of package (in milliseconds)\n try:\n targetPings[len(targetPings)+1]: float = float(targetLineSplit[6].strip(\"time=\"))\n lastTarget[targetIndex]: float = float(targetLineSplit[6].strip(\"time=\"))\n except ValueError:\n targetPings[len(targetPings) + 1]: float = float(targetLineSplit[7].strip(\"time=\"))\n lastTarget[targetIndex]: float = float(targetLineSplit[7].strip(\"time=\"))\n\n #print(targetPings)\n # Store targetPings info for easier use\n targetPingsX = [*lastTarget.keys()]\n targetPingsY = [*lastTarget.values()]\n\n # Title Plot and label axis\n plt.title(\"Target Ping Latency\")\n plt.xlabel(\"Number of Packet sent\")\n plt.ylabel(\"Latency (in ms)\")\n\n # Plot and set axis dimensions\n plt.plot(targetPingsX, targetPingsY)\n plt.axis([1, max(targetPingsX), 1, max(targetPingsY) + 5])\n\n # Show whichever plot was constructed by function call\n plt.gcf().subplots_adjust(bottom=0.15)\n plt.gcf().subplots_adjust(left=0.15)\n plt.gcf().subplots_adjust(right=0.9)\n plt.show()\n\n\n\n# Run data collection processes\ndef DataCollect() -> None:\n # Call ping script\n subprocess.run(\"./ping.sh\", shell=True, check=True)\n\n\ndef Visualize(visInfoTypes: list) -> None:\n print(\"Preparing visualization...\")\n # calls visualize for each ping type\n '''\n localGraph.clear()\n LiveVis(visInfoTypes[0], \"VERBOSE\")\n targetGraph.clear()\n LiveVis(visInfoTypes[1], \"VERBOSE\")\n '''\n\n\n localGraph.clear()\n LiveVis(visInfoTypes[0], \"SHORT\")\n targetGraph.clear()\n LiveVis(visInfoTypes[1], \"SHORT\")\n\n plt.xlabel(\"Number of Packets sent\")\n plt.ylabel(\"Latency (in ms)\")\n\n figure.canvas.draw()\n\n # Show whichever plot was constructed by function call\n\n #plt.show()\n\n\n\n\n\n\ndef Clean() -> None:\n print(\"Cleaning...\")\n subprocess.run(\"./rm.sh\", shell=True, check=True)\n\n\n\n#Main\nif __name__ == \"__main__\":\n STOP_THREADS = False\n # Have user input target\n\n # User traceroute to get local\n InitScripts()\n targetThread = threading.Thread(target=GetTarget())\n targetThread.start()\n targetThread.join()\n\n while True:\n try:\n # Data collection\n collectionThread: Thread = threading.Thread(target=DataCollect())\n collectionThread.start()\n collectionThread.join()\n\n tests += testsPerIteration\n\n # Visualization\n visThread = threading.Thread(target=Visualize([\"LOCAL_PING\", \"TARGET_PING\"]))\n visThread.start()\n visThread.join()\n\n # Print Stats\n statThread = threading.Thread(target=UpdateStats())\n statThread.start()\n statThread.join()\n\n # Cleanup\n cleanupThread = threading.Thread(target=Clean)\n cleanupThread.start()\n cleanupThread.join()\n\n except KeyboardInterrupt:\n break\n","sub_path":"packetman.py","file_name":"packetman.py","file_ext":"py","file_size_in_byte":22253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"484056589","text":"from django.conf.urls import url\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom .views import *\n\nurlpatterns = [\n url(r'^dmail/$', CreateDMail.as_view(), name=\"create\"),\n url(r'^dmail/(?P[0-9]+)/$',\n DetailsDMail.as_view(), name=\"details\"),\n\n url(r'^divergence/$', CreateDivergence.as_view(), name=\"create\"),\n url(r'^divergence/(?P[0-9]+)/$',\n DetailsDivergence.as_view(), name=\"details\"),\n url(r'^divergence/current', CurrentDivergence.as_view(), name=\"current\")\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)","sub_path":"dmail/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"298199974","text":"from model.group import Group\nfrom random import randrange\n\ndef test_modify_some_group(app):\n if app.group.count() == 0:\n app.group.create(Group(name = \"Group for modification\"))\n index = randrange(app.group.count())\n old_groups = app.group.get_group_list()\n group = Group(name =\"Modified name\", header=\"Modified header\", footer=\"Modifiesd footer\")\n group.id = old_groups[index].id\n app.group.modify_group_by_index(group, index)\n new_groups = app.group.get_group_list()\n old_groups[index] = group\n assert sorted(new_groups, key=Group.id_or_max) == sorted(old_groups, key=Group.id_or_max)\n\n\n\ndef test_modify_group_name(app):\n if app.group.count() == 0:\n app.group.create(Group(name = \"Group for modification\"))\n old_groups = app.group.get_group_list()\n group = Group(name =\"New name\")\n group.id = old_groups[0].id\n app.group.modify_first_group(group)\n new_groups = app.group.get_group_list()\n old_groups[0] = group\n assert sorted(new_groups, key=Group.id_or_max) == sorted(old_groups, key=Group.id_or_max)\n\n\ndef test_modify_group_header(app):\n if app.group.count() == 0:\n app.group.create(Group(name = \"Group for modification\"))\n app.group.modify_first_group(Group(header=\"new header\"))\n\n\n","sub_path":"tests/test_modify_group.py","file_name":"test_modify_group.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"189565701","text":"from flask_appbuilder import Model\nfrom flask_appbuilder.models.mixins import AuditMixin, FileColumn, ImageColumn\nfrom sqlalchemy import Column, Integer, String, ForeignKey, DateTime, VARCHAR, DECIMAL\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.dialects import postgresql\n\nclass NotaFiscalGrupo(Model):\n\tid = Column(Integer, primary_key = True)\n\tname = Column(String(50), unique=True, nullable=False)\n\t#notafiscal_id = Column(Integer, ForeignKey('notafiscal.id'), nullable=False)\n\t#notafiscal = relationship('NotaFiscal')\n\n\tdef __repr__(self):\n\t\treturn self.name\n\nclass NotaFiscal(Model):\n\tid = Column(Integer, primary_key=True)\n\tcreated_at=Column(DateTime)\n\treference_month = Column(Integer)\n\treference_year = Column(Integer)\n\tdocument = Column(VARCHAR(length = 14))\n\tdescription = Column(VARCHAR(length=256))\n\tamount = Column(DECIMAL(precision =18, scale = 2))\n\tis_active = Column(VARCHAR(length=3))\n\tdeactive_at = Column(DateTime)\n\tnota_fiscal_grupo_id = Column(Integer, ForeignKey('nota_fiscal_grupo.id'), nullable=False)\n\tnota_fiscal_grupo = relationship(\"NotaFiscalGrupo\")\n\t\n\tdef __repr__(self):\n\t\treturn self.name\n\n\n","sub_path":"notfisc/app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"518973238","text":"import cybox.bindings.uri_object_1_2 as uri_binding\n\nfrom cybox.common import DefinedObject, AnyURI\n\nclass URI(DefinedObject):\n _XSI_TYPE = \"URIObjectType\"\n \n TYPE_URL = \"URL\"\n TYPE_GENERAL = \"General URN\"\n TYPE_DOMAIN = \"Domain Name\"\n\n TYPES = (TYPE_URL, TYPE_GENERAL, TYPE_DOMAIN)\n\n def __init__(self):\n self.value = None\n pass\n\n # Properties\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n if isinstance(value, AnyURI):\n self._value = value\n else:\n self._value = AnyURI(value)\n\n @property\n def type_(self):\n return self._type\n\n @type_.setter\n def type_(self, type_):\n if type_ not in self.TYPES:\n raise ValueError(\"Invalid URL Type: {0}\".format(type_))\n self._type = type_\n\n # Import/Export\n def to_obj(self):\n uriobject = uri_binding.URIObjectType()\n uriobject.set_anyAttributes_({'xsi:type' : 'URIObj:URIObjectType'})\n uriobject.set_type(self.type_)\n uriobject.set_Value(self.value.to_obj())\n return uriobject\n\n def to_dict(self):\n return {\n 'type': self.type_,\n 'value': self.value.to_dict(),\n 'xsi_type' : self._XSI_TYPE,\n }\n\n @staticmethod\n def from_obj(uri_obj):\n uri = URI()\n uri.type_ = uri_obj.get_type()\n uri.value = AnyURI.from_obj(uri_obj.get_Value())\n return uri\n\n @staticmethod\n def from_dict(uri_dict):\n uri = URI()\n if 'type' in uri_dict:\n uri.type_ = uri_dict['type']\n if 'value' in uri_dict:\n uri.value = AnyURI.from_dict(uri_dict['value'])\n\n return uri\n","sub_path":"cybox/objects/uri_object.py","file_name":"uri_object.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"179649513","text":"# Store address data into register A.\ndef getAddressType1(segment, index):\n seg2Reg = {\n 'local': 'LCL',\n 'argument': 'ARG',\n 'this': 'THIS',\n 'that': 'THAT'\n }\n\n pre = _saveRegisterD()\n post = _setRegisterAandD()\n middle = [\n # Get address and store it into register D.\n '@' + seg2Reg[segment],\n 'D=M',\n '@' + index,\n 'D=D+A',\n ]\n\n return pre + middle + post\n\n\ndef getAddressType2(segment, index):\n seg2Reg = {\n 'pointer': '3',\n 'temp': '5',\n }\n\n pre = _saveRegisterD()\n post = _setRegisterAandD()\n middle = [\n # Get address and store it into register D.\n '@' + seg2Reg[segment],\n 'D=A',\n '@' + index,\n 'D=D+A',\n ]\n\n return pre + middle + post\n\n\ndef getAddressType3(segment, index, symbol):\n pre = _saveRegisterD()\n post = _setRegisterAandD()\n middle = [\n # Get address and store it into register D.\n '@' + symbol + '.' + index,\n 'D=A',\n ]\n\n return pre + middle + post\n\n\ndef _saveRegisterD():\n return [\n # Copy data from register D to R13.\n '@R13',\n 'M=D',\n ]\n\n\ndef _setRegisterAandD():\n return [\n # Copy address from register D to R14.\n '@R14',\n 'M=D',\n # Copy data from register R13 to D.\n '@R13',\n 'D=M',\n # Copy address from register R14 to A.\n '@R14',\n 'A=M',\n ]\n","sub_path":"07/asmAddress.py","file_name":"asmAddress.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"181500138","text":"from numpy import pi\nfrom ....Classes.Segment import Segment\n\n\ndef comp_magnetization_dict(self, is_north=True):\n \"\"\"Compute the dictionary of the magnetization direction of the magnets (key=magnet_X, value=angle[rad])\n Mangetization angle with Hole centered on Ox axis\n\n Parameters\n ----------\n self : HoleMLSRPM\n a HoleMLSRPM object\n is_north: True\n True: comp north magnetization, else add pi [rad]\n\n Returns\n -------\n mag_dict: dict\n magnetization dictionary (key=magnet_X, value=angle[rad])\n \"\"\"\n\n # Comp magnet\n point_dict = self._comp_point_coordinate()\n\n mag_dict = dict()\n Z3 = point_dict[\"Z3\"]\n Z4 = point_dict[\"Z4\"]\n Z7 = point_dict[\"Z7\"]\n Z8 = point_dict[\"Z8\"]\n\n Zch = (Z3 + Z4) / 2\n Zcl = (Z7 + Z8) / 2\n S0 = Segment(Zch, Zcl)\n mag_dict[\"magnet_0\"] = S0.comp_normal()\n\n ####Comp_normal direction?\n\n if not is_north:\n mag_dict[\"magnet_0\"] += pi\n\n if self.magnetization_dict_offset is not None:\n for key, value in self.magnetization_dict_offset:\n mag_dict[key] += value\n\n return mag_dict\n","sub_path":"pyleecan/Methods/Slot/HoleMLSRPM/comp_magnetization_dict.py","file_name":"comp_magnetization_dict.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"643855929","text":"import networkx as nx\nimport scipy as sc\nimport matplotlib.pylab as p\nfrom matplotlib import pyplot as plt\n\ndef GenRdmAdjList(N = 2, C = 0.5):\n \"\"\"\n \"\"\"\n Ids = range(N)\n ALst = []\n for i in Ids:\n if sc.random.uniform(0,1,1) < C:\n Lnk = sc.random.choice(Ids,2).tolist()\n if Lnk[0] != Lnk[1]:\n ALst.append(Lnk)\n return ALst\n\nMaxN = 30\nC = 0.75\n\nAdjL = sc.array(GenRdmAdjList(MaxN,C))\n\nSps = sc.unique(AdjL)\n\nSizRan = ([-10,10]) #use log10 scale\nSizs = sc.random.uniform(SizRan[0],SizRan[1],MaxN)\n\np.hist(Sizs) #log10 scale\n# p.show()\np.hist(10 ** Sizs) #raw scale\n# p.show()\np.close('all') # close all open plot objects\n\nf1 = p.figure()\npos = nx.circular_layout(Sps)\n\nG = nx.Graph()\n\nG.add_nodes_from(Sps)\nG.add_edges_from(tuple(AdjL))\n\nNodSizs= 1000 * (Sizs-min(Sizs))/(max(Sizs)-min(Sizs)) \n\n\nnx.draw_networkx(G, pos, node_size = NodSizs)\np.show()\nf1.savefig('../results/network.pdf')\n\n","sub_path":"week7/code/DrawFW.py","file_name":"DrawFW.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"34655121","text":"# -*- coding: utf-8 -*-\n# Copyright 2017 Tecnativa - Pedro M. Baeza\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nfrom odoo import api, fields, models\nimport math\n\n\nclass AccountInvoice(models.Model):\n _inherit = 'account.invoice'\n\n equipment_ids = fields.Many2many(\n comodel_name=\"maintenance.equipment\", compute=\"_compute_equipment_ids\",\n string=\"Equipments\",\n )\n\n @api.multi\n @api.depends('invoice_line_ids', 'invoice_line_ids.equipment_ids')\n def _compute_equipment_ids(self):\n for invoice in self:\n invoice.equipment_ids = [\n (6, 0, invoice.mapped('invoice_line_ids.equipment_ids').ids),\n ]\n\n @api.multi\n def action_invoice_cancel(self):\n res = super(AccountInvoice, self).action_invoice_cancel()\n self.mapped('equipment_ids').unlink()\n return res\n\n\nclass AccountInvoiceLine(models.Model):\n _inherit = 'account.invoice.line'\n\n equipment_ids = fields.One2many(\n comodel_name=\"maintenance.equipment\", inverse_name=\"invoice_line_id\",\n string=\"Equipments\",\n )\n\n # def _prepare_equipment_vals_list(self, invoice_line):\n # vals_list = []\n # num = int(math.ceil(invoice_line.quantity))\n # for i in range(num):\n # vals_list.append({\n # 'name': \"{} [{}/{}]\".format(invoice_line.name, i + 1, num),\n # 'category_id': (\n # invoice_line.asset_profile_id.equipment_category_id.id\n # ),\n # 'invoice_line_id': invoice_line.id,\n # 'cost': invoice_line.price_subtotal / invoice_line.quantity,\n # 'partner_id': invoice_line.invoice_id.partner_id.id,\n # })\n # return vals_list\n #\n # @api.multi\n # def asset_create(self):\n # for line in self.filtered('asset_profile_id.equipment_category_id'):\n # # Create equipments\n # equipments = self.env['maintenance.equipment']\n # for vals in self._prepare_equipment_vals_list(line):\n # equipments += equipments.create(vals)\n # # Link assets to equipments\n # # HACK: There's no way to inherit method knowing the created asset\n # prev_assets = self.env['account.asset'].search([])\n # super(AccountInvoiceLine, line).asset_create()\n # current_assets = self.env['account.asset'].search([])\n # asset = current_assets - prev_assets\n # asset.write({'equipment_ids': [(4, x) for x in equipments.ids]})\n","sub_path":"magnus_assets_equipment_link/models/account_invoice.py","file_name":"account_invoice.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"354352000","text":"import services.controlers.loggControler\nimport services.querys.form204_2Query\nfrom services.exceptions import *\nimport os\nimport hashlib\nimport re\nimport cgi\nimport urllib\n\nclass Forms204_2Controler:\n\tdef get(self):\n\t\tform204_2Query = services.querys.form204_2Query.Form204_2Query()\n\t\tform204_2List=[]\n\t\ttry:\n\t\t\tdata = form204_2Query.getAll()\n\t\t\tfor form204_2Obj in data:\n\t\t\t\tform204_2=form204_2Obj.toDictReduced()\n\t\t\t\tform204_2List.append(form204_2)\n\t\texcept Exception as e:\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Critical', ERROR_NO_DEFINIDO, e.message)\n\t\treturn form204_2List\n\t'''\n\tdef getAllWithFilter(self, sessionJson):\n\t\tform204_2Objs = []\n\t\ttry:\n\t\t\t# Verifica tipo de usuario\n\t\t\tform204_2Query = services.querys.form204_2Query.Form204_2Query()\n\t\t\tuserTypeQuery = services.querys.userTypeQuery.UserTypeQuery()\n\t\t\tif int(sessionJson[\"userTypeId\"]) == int(userTypeQuery.getUserTypeByAutoId(0)[\"id\"]):\n\t\t\t\tform204_2Objs = form204_2Query.getAll()\n\t\t\telse:\n\t\t\t\tsessionJson[\"companyIdSession\"] = int(sessionJson[\"companyIdSession\"])\n\t\t\t\tform204_2Objs = form204_2Query.getByCompany(sessionJson[\"companyIdSession\"])\n\t\texcept Exception as e:\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Critical', ERROR_NO_DEFINIDO, e.message)\n\t\treturn form204_2Objs\n\n\tdef getWithFilter(self, sessionJson):\n\t\tform204_2Query = services.querys.form204_2Query.Form204_2Query()\n\t\tform204_2List=[]\n\t\ttry:\n\t\t\tdata = self.getAllWithFilter(sessionJson)\n\t\t\tfor form204_2Obj in data:\n\t\t\t\tform204_2=form204_2Obj.toDictFront()\n\t\t\t\tform204_2List.append(form204_2)\n\t\texcept Exception as e:\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Critical', ERROR_NO_DEFINIDO, e.message)\n\t\treturn form204_2List\n\t'''\n\tdef getById(self, identifier):\n\t\tform204_2={}\n\t\ttry:\n\t\t\tform204_2Query = services.querys.form204_2Query.Form204_2Query()\n\t\t\tattributesList=[]\n\t\t\tform204_2Obj = form204_2Query.getById(identifier)\n\t\t\tform204_2 = form204_2Obj.toDictFront()\n\t\texcept Exception as e:\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Critical', ERROR_NO_DEFINIDO, e.message)\n\t\treturn form204_2\n\n\tdef getByEventId(self, eventId, numOpPeriod):\n\t\tform204_2List=[]\n\t\ttry:\n\t\t\tform204_2Query = services.querys.form204_2Query.Form204_2Query()\n\t\t\tdata = form204_2Query.getByEventId(eventId)\n\t\t\tif numOpPeriod == None:\n\t\t\t\tdata = form204_2Query.getByEventId(eventId)\n\t\t\telse:\n\t\t\t\tdata = form204_2Query.getByEventAndPeriod(eventId, numOpPeriod)\n\t\t\tfor form204_2Obj in data:\n\t\t\t\tform204_2=form204_2Obj.toDictFront()\n\t\t\t\tform204_2List.append(form204_2)\n\t\texcept Exception as e:\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Critical', ERROR_NO_DEFINIDO, e.message)\n\t\treturn form204_2List\n\n\tdef getByNumOpPeriod(self, eventId, numOpPeriod):\n\t\tform204_2List=[]\n\t\ttry:\n\t\t\tform204_2Query = services.querys.form204_2Query.Form204_2Query()\n\t\t\tdata = form204_2Query.getByEventAndPeriod(eventId, numOpPeriod)\n\t\t\tfor form204_2Obj in data:\n\t\t\t\tform204_2=form204_2Obj.toDictFront()\n\t\t\t\tform204_2List.append(form204_2)\n\t\texcept Exception as e:\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Critical - Forms204_2Controler: getByNumOpPeriod()', ERROR_NO_DEFINIDO, e.message)\n\t\treturn form204_2List\n\n\tdef add(self, eventId, numOpPeriod, system, cityCode, canal, assignedTo, \n\t\t\tmedical, evacuation, other, systemLocation, observations):\n\t\tmessage=\"FORMULARIO_204_2_NO_REGISTRADO\"\n\t\tstate = 300\n\t\ttry:\n\t\t\t#sessionJson[\"userTypeId\"] = int(sessionJson[\"userTypeId\"])\n\t\t\t#sessionJson[\"companyIdSession\"] = int(sessionJson[\"companyIdSession\"])\n\t\t\tform204_2Query = services.querys.form204_2Query.Form204_2Query()\n\t\t\t# VALIDACIONES\n\t\t\tdoRegister = True\n\t\t\t'''\n\t\t\tif form204_2Query.getExists(name):\n\t\t\t\tdoRegister = False\n\t\t\t\tstate = 203\n\t\t\t\tmessage = message + \": EL ELEMENTO YA EXISTE\"\n\t\t\t'''\n\t\t\t# REGISTRO\n\t\t\tif doRegister == True:\n\t\t\t\tquery = form204_2Query.add(eventId, numOpPeriod, system, cityCode, canal, assignedTo, \n\t\t\t\t\tmedical, evacuation, other, systemLocation, observations)\n\t\t\t\tmessage=\"FORMULARIO_204_2_REGISTRADO\"\n\t\t\t\tstate = OK\n\t\texcept Exception as e:\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Critical', ERROR_NO_DEFINIDO, e.message)\n\t\treturn message, state\n\n\tdef edit(self, eventId, numOpPeriod, system, cityCode, canal, assignedTo, \n\t\t\tmedical, evacuation, other, systemLocation, observations, identifier):\n\t\tmessage=\"FORMULARIO_204_2_NO_MODIFICADO\"\n\t\tstate = 300\n\t\ttry:\n\t\t\tform204_2Query = services.querys.form204_2Query.Form204_2Query()\n\t\t\tobj = form204_2Query.getById(identifier)\n\t\t\tif eventId is not None:\n\t\t\t\tobj.eventId=eventId\n\t\t\tif numOpPeriod is not None:\n\t\t\t\tobj.numOpPeriod=numOpPeriod\n\t\t\tif system is not None:\n\t\t\t\tobj.system=system\n\t\t\tif cityCode is not None:\n\t\t\t\tobj.cityCode=cityCode\n\t\t\tif canal is not None:\n\t\t\t\tobj.canal=canal\n\t\t\tif assignedTo is not None:\n\t\t\t\tobj.assignedTo=assignedTo\n\t\t\tif medical is not None:\n\t\t\t\tobj.medical=medical\n\t\t\tif evacuation is not None:\n\t\t\t\tobj.evacuation=evacuation\n\t\t\tif other is not None:\n\t\t\t\tobj.other=other\n\t\t\tif systemLocation is not None:\n\t\t\t\tobj.systemLocation=systemLocation\n\t\t\tif observations is not None:\n\t\t\t\tobj.observations=observations\n\t\t\tform204_2Query.edit(obj)\n\t\t\tmessage=\"FORMULARIO_204_2_MODIFICADO\"\n\t\t\tstate = OK\n\t\texcept Exception as e:\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Critical', ERROR_NO_DEFINIDO, e.message)\t\n\t\treturn message, state","sub_path":"services/controlers/forms204_2Controler.py","file_name":"forms204_2Controler.py","file_ext":"py","file_size_in_byte":5657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"239359472","text":"\"\"\"\nThe approach here is to capture the element window-wise. we keep our low at 0th index and high at len(arr)-k.\nbecause we can have the window starting position only from len(arr)-k position onwards if the element is\ntowards the right-most. Now we start finding the mid, if the given x- element at mid is greater than mid+kth\nindex value - x, this means that a better window exsists towards our right side of mid, so we move low to \nmid+1 else it means that a better window is towards the left side of the mid, so we move towards the left\nmaking high to mid. we do this until we find the best window and then at last we have the starting index i.e\nlow and low+k elements as the best window as solution.\n\nTime complexity - O(log n -k)\nSpace complexity - O(1)\nLeetcode running\n\n\"\"\"\ndef findClosestElements(self, arr, k, x):\n if x >= arr[-1]:\n return arr[-k:]\n \n if x <= arr[0]:\n return arr[:k]\n \n low = 0\n high = len(arr)-k \n while low < high:\n mid = low+(high-low) // 2\n \n if x - arr[mid] > arr[mid+k] - x:\n low = mid+1\n else:\n high = mid\n return arr[low:low+k]\n ","sub_path":"K-closest.py","file_name":"K-closest.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"5710467","text":"import cv2 as cv\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef show_image(image_path,type = \"matplotlib\"):\n\n image = cv.imread(image_path, 0)\n if type == \"cv\":\n cv.imshow(\"original\",image)\n cv.waitKey(0)\n cv.destroyWindow()\n else:\n plt.imshow(image,cmap = 'gray', interpolation = 'bicubic')\n plt.xticks([])\n plt.yticks([])\n plt.show()\ndef show_cam_video():\n cap = cv.VideoCapture(0)\n #cap.open(0)\n while True:\n ret, frame = cap.read()\n gray = cv.cvtColor(frame, cv.COLORMAP_BONE)\n\n cv.imshow('frame',gray)\n if cv.waitKey(1) & 0xFF == ord('q'):\n break\n cap.release()\n cv.destroyAllWindows()\n\ndef record_camera_video():\n\tpass\n\ndef draw_a_line():\n\n\timg = np.zeros((512,512,3),np.uint8)\n\tcv.line(img,(0,0),(511,511),(255,0,0),5)\n\n\n","sub_path":"Python/CV/cv_utilities/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"60255728","text":"\"\"\"\nconv.py\n=========\n\nConvolutional NN modules and custom blocks\n\nCreated by Maxim Ziatdinov (email: ziatdinovmax@gmail.com)\n\"\"\"\nfrom typing import Union, Tuple, List\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..utils import get_activation, get_bnorm, get_conv, get_maxpool\n\nfrom warnings import warn, filterwarnings\n\nfilterwarnings(\"ignore\", module=\"torch.nn.functional\")\n\ntt = torch.tensor\n\n\nclass convEncoderNet(nn.Module):\n \"\"\"\n Standard convolutional encoder\n \"\"\"\n def __init__(self,\n input_dim: Tuple[int],\n input_channels: int = 1,\n latent_dim: int = 2,\n layers_per_block: List[int] = None,\n hidden_dim: int = 32,\n batchnorm: bool = True,\n activation: str = \"lrelu\",\n softplus_out: bool = True,\n pool: bool = True,\n ) -> None:\n \"\"\"\n Initializes encoder module\n \"\"\"\n super(convEncoderNet, self).__init__()\n if layers_per_block is None:\n layers_per_block = [1, 2, 2]\n output_dim = (tt(input_dim) // 2**len(layers_per_block)).tolist()\n output_channels = hidden_dim * len(layers_per_block)\n self.latent_dim = latent_dim\n self.feature_extractor = FeatureExtractor(\n len(input_dim), input_channels, layers_per_block, hidden_dim,\n batchnorm, activation, pool)\n self.features2latent = features_to_latent(\n [output_channels, *output_dim], 2*latent_dim)\n self.activation_out = nn.Softplus() if softplus_out else lambda x: x\n\n def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor]:\n \"\"\"\n Forward pass\n \"\"\"\n x = self.feature_extractor(x)\n encoded = self.features2latent(x)\n mu, sigma = encoded.split(self.latent_dim, 1)\n sigma = self.activation_out(sigma)\n return mu, sigma\n\n\nclass convDecoderNet(nn.Module):\n \"\"\"\n Standard convolutional decoder\n \"\"\"\n def __init__(self,\n latent_dim: int,\n output_dim: int,\n output_channels: int = 1,\n layers_per_block: List[int] = None,\n hidden_dim: int = 96,\n batchnorm: bool = True,\n activation: str = \"lrelu\",\n sigmoid_out: bool = True,\n upsampling_mode: str = \"bilinear\",\n ) -> None:\n \"\"\"\n Initializes decoder module\n \"\"\"\n super(convDecoderNet, self).__init__()\n if layers_per_block is None:\n layers_per_block = [2, 2, 1]\n input_dim = (tt(output_dim) // 2**len(layers_per_block)).tolist()\n self.latent2features = latent_to_features(\n latent_dim, [hidden_dim, *input_dim])\n self.upsampler = Upsampler(\n len(output_dim), hidden_dim, layers_per_block, output_channels,\n batchnorm, activation, upsampling_mode)\n self.activation_out = nn.Sigmoid() if sigmoid_out else lambda x: x\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass\n \"\"\"\n x = self.latent2features(x)\n x = self.activation_out(self.upsampler(x))\n return x\n\n\nclass ConvBlock(nn.Module):\n \"\"\"\n Creates a block of layers each consisting of convolution operation,\n (optional) nonlinear activation and (optional) batch normalization\n \"\"\"\n def __init__(self,\n ndim: int,\n nlayers: int,\n input_channels: int,\n output_channels: int,\n kernel_size: Union[Tuple[int], int] = 3,\n stride: Union[Tuple[int], int] = 1,\n padding: Union[Tuple[int], int] = 1,\n batchnorm: bool = False,\n activation: str = \"lrelu\",\n pool: bool = False,\n ) -> None:\n \"\"\"\n Initializes module parameters\n \"\"\"\n super(ConvBlock, self).__init__()\n if not 0 < ndim < 4:\n raise AssertionError(\"ndim must be equal to 1, 2 or 3\")\n activation = get_activation(activation)\n block = []\n for i in range(nlayers):\n input_channels = output_channels if i > 0 else input_channels\n block.append(get_conv(ndim)(input_channels, output_channels,\n kernel_size=kernel_size, stride=stride, padding=padding))\n if activation is not None:\n block.append(activation())\n if batchnorm:\n block.append(get_bnorm(ndim)(output_channels))\n if pool:\n block.append(get_maxpool(ndim)(2, 2))\n self.block = nn.Sequential(*block)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Defines a forward pass\n \"\"\"\n output = self.block(x)\n return output\n\n\nclass UpsampleBlock(nn.Module):\n \"\"\"\n Upsampling performed using bilinear or nearest-neigbor interpolation\n followed by 1-by-1 convolution, which an be used to reduce a number of\n feature channels\n \"\"\"\n def __init__(self,\n ndim: int,\n input_channels: int,\n output_channels: int,\n scale_factor: int = 2,\n mode: str = \"bilinear\") -> None:\n \"\"\"\n Initializes module parameters\n \"\"\"\n super(UpsampleBlock, self).__init__()\n warn_msg = (\"'bilinear' mode is not supported for 1D and 3D;\" +\n \" switching to 'nearest' mode\")\n if mode not in (\"bilinear\", \"nearest\"):\n raise NotImplementedError(\n \"Use 'bilinear' or 'nearest' for upsampling mode\")\n if not 0 < ndim < 4:\n raise AssertionError(\"ndim must be equal to 1, 2 or 3\")\n if mode == \"bilinear\" and ndim in (3, 1):\n warn(warn_msg, category=UserWarning)\n mode = \"nearest\"\n self.mode = mode\n self.scale_factor = scale_factor\n self.conv = get_conv(ndim)(\n input_channels, output_channels,\n kernel_size=1, stride=1, padding=0)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Defines a forward pass\n \"\"\"\n x = F.interpolate(\n x, scale_factor=self.scale_factor, mode=self.mode)\n return self.conv(x)\n\n\nclass FeatureExtractor(nn.Sequential):\n \"\"\"\n Convolutional feature extractor\n \"\"\"\n def __init__(self,\n ndim: int,\n input_channels: int = 1,\n layers_per_block: List[int] = None,\n nfilters: int = 32,\n batchnorm: bool = True,\n activation: str = \"lrelu\",\n pool: bool = True,\n ) -> None:\n \"\"\"\n Initializes feature extractor module\n \"\"\"\n super(FeatureExtractor, self).__init__()\n if layers_per_block is None:\n layers_per_block = [1, 2, 2]\n for i, layers in enumerate(layers_per_block):\n in_filters = input_channels if i == 0 else nfilters * i\n block = ConvBlock(ndim, layers, in_filters, nfilters * (i+1),\n batchnorm=batchnorm, activation=activation,\n pool=pool)\n self.add_module(\"c{}\".format(i), block)\n\n\nclass Upsampler(nn.Sequential):\n \"\"\"\n Convolutional upsampler\n \"\"\"\n def __init__(self,\n ndim: int,\n input_channels: int = 96,\n layers_per_block: List[int] = None,\n output_channels: int = 1,\n batchnorm: bool = True,\n activation: str = \"lrelu\",\n upsampling_mode: str = \"bilinear\",\n ) -> None:\n \"\"\"\n Initializes upsampler module\n \"\"\"\n super(Upsampler, self).__init__()\n if layers_per_block is None:\n layers_per_block = [2, 2, 1]\n\n nfilters = input_channels\n for i, layers in enumerate(layers_per_block):\n in_filters = nfilters if i == 0 else nfilters // i\n block = ConvBlock(ndim, layers, in_filters, nfilters // (i+1),\n batchnorm=batchnorm, activation=activation,\n pool=False)\n self.add_module(\"conv_block_{}\".format(i), block)\n up = UpsampleBlock(ndim, nfilters // (i+1), nfilters // (i+1),\n mode=upsampling_mode)\n self.add_module(\"up_{}\".format(i), up)\n\n out = ConvBlock(ndim, 1, nfilters // (i+1), output_channels,\n 1, 1, 0, activation=None)\n self.add_module(\"output_layer\", out)\n\n\nclass features_to_latent(nn.Module):\n \"\"\"\n Maps features (usually, from a convolutional net/layer) to latent space\n \"\"\"\n def __init__(self, input_dim: Tuple[int], latent_dim: int = 2) -> None:\n super(features_to_latent, self).__init__()\n self.reshape_ = torch.prod(tt(input_dim))\n self.fc_latent = nn.Linear(self.reshape_, latent_dim)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = x.view(-1, self.reshape_)\n return self.fc_latent(x)\n\n\nclass latent_to_features(nn.Module):\n \"\"\"\n Maps latent vector to feature space\n \"\"\"\n def __init__(self, latent_dim: int, out_dim: Tuple[int]) -> None:\n super(latent_to_features, self).__init__()\n self.reshape_ = out_dim\n self.fc = nn.Linear(latent_dim, torch.prod(tt(out_dim)).item())\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.fc(x)\n return x.view(-1, *self.reshape_)\n","sub_path":"pyroved/nets/conv.py","file_name":"conv.py","file_ext":"py","file_size_in_byte":9682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"130784439","text":"from datetime import datetime\nfrom datetime import timedelta\n\n\ndef extract_model_json(ext_lst, model_json):\n data = {}\n for ext in ext_lst:\n ext_value = model_json.get(ext, None)\n if ext_value:\n data[ext] = ext_value\n return data\n\n\ndef get_date(datester, days):\n date = datetime.strptime(datester, \"%Y-%m-%d\")\n # start_date = (date - timedelta(days=days)).strftime(\"%Y-%m-%d\")\n start_date = date - timedelta(days=days)\n return start_date\n\n","sub_path":"oil_monitoring_system/apps/oilfield/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"632628910","text":"################################################################################\n# Gauss Elimination Script\n#\n# Purpose: This application impelments Gauss Elimination to solve a system.\n# Users can choose to either use pivoting or not. This lab explores Gauss\n# elimination algorithms.\n#\n# Author: Kevin Chen\n# Contact: chenk106@mcmaster.ca\n# GitHub: https://github.com/CNIVEK/GaussEliminationLab\n#\n################################################################################\nimport sys\n\nimport numpy as np\n\nclass Gauss:\n\n # Matrix intialization\n def start(self):\n # self.n = int(input(\"Enter the number of unknowns: \"))\n self.n = 2\n self.A = np.zeros((self.n,self.n+1)) # Matrix creation, n xy n+1 size\n self.R = np.zeros((self.n,self.n+1)) # Multiplication matrix for columns\n self.x = np.zeros(self.n) # Solution array\n\n # print(\"Enter augmented matrix coefficients:\")\n # for i in range(self.n):\n # for j in range (self.n+1):\n # self.A[i][j] = input(\"matrix[\"+ str(i) +\"][\"+ str(j) +\"] = \")\n\n self.A = np.array([[4.03, 2.16, -4.61],\n [6.21, 3.35, -7.19]])\n\n # Applying Gauss Elimination\n def gaussElim(self):\n print(f\"\\nGuass Elimination\\ninitial matrix:\\n {self.A}\")\n for i in range(self.n-1): # loop over columns\n if self.A[i][i] == 0:\n sys.exit(\"pivor is zero, exiting\")\n\n print(f\"\\ntransformation #{i+1}:\")\n\n # Compute multipliers for current column\n for j in range(i+1, self.n):\n self.R[j][i] = self.A[j][i] / self.A[i][i]\n\n print(f\"row{j+1} = row{j+1} - {self.R[j][i]} * row{i+1}\")\n\n # Apply transformation to remaining submatrix\n for k in range(self.n+1):\n for j in range(self.n):\n self.A[j][k] = self.A[j][k] - self.R[j][i] * self.A[i][k]\n\n print(f\"result:\\n {self.A}\")\n\n # Applying Gauss Elimination with Partial Pivoting\n def gaussElimPivot(self):\n print(f\"\\nGuass Elimination with Partial Pivoting\\ninitial matrix:\\n {self.A}\")\n for i in range(self.n-1): # loop over columns\n for p in range(i,self.n): # search for pivot\n if abs(self.A[p][i]) > abs(self.A[i][i]): # compare values for largest\n self.A[[i,p]] = self.A[[p,i]] # interchange rows\n print(f\"\\ninterchange: r{i+1} <=> r{p+1} \\nresult: \\n {self.A}\")\n else:\n pass\n print(f\"\\ntransformation #{i+1}:\")\n\n # Compute multipliers for current column\n for j in range(i+1, self.n):\n self.R[j][i] = self.A[j][i] / self.A[i][i]\n\n print(f\"row{j+1} = row{j+1} - {self.R[j][i]} * row{i+1}\")\n\n # Apply transformation to remaining submatrix\n for k in range(self.n+1):\n for j in range(self.n):\n self.A[j][k] = self.A[j][k] - self.R[j][i] * self.A[i][k]\n\n print(f\"result:\\n {self.A}\")\n\n # Back substitution\n def backSub(self):\n self.x[self.n-1] = self.A[self.n-1][self.n] / self.A[self.n-1][self.n-1]\n for i in range(self.n-2,-1,-1):\n self.x[i] = self.A[i][self.n]\n for j in range(i+1, self.n):\n self.x[i] = self.x[i] - self.A[i][j] * self.x[j]\n self.x[i] = self.x[i] / self.A[i][i]\n\n # Display solution\n def displaySolution(self):\n print(\"\\nSolution is: \")\n for i in range(self.n):\n print(\"x%d = %0.2f\" %(i+1,self.x[i]))\n\ng = Gauss()\ng.start()\nchoice = input(\"\\nEnter 1 for Pivoting, 0 for non-pivoting: \")\nif choice == '1':\n g.gaussElimPivot()\nelse:\n g.gaussElim()\ng.backSub()\ng.displaySolution()","sub_path":"Mathematics/assignment_1/src/GwoP.py","file_name":"GwoP.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"88914599","text":"import smartcar\n\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\n\nfrom flask import Flask, redirect, request, jsonify\nfrom flask_cors import CORS\n\nfrom shapely.geometry import Point\nfrom shapely.geometry.polygon import Polygon\n\nimport numpy as np\nimport json\nimport os\nimport threading\n\nclass Zone(object):\n def __init__(self, zone_id, owner, hours_active_weekly = [], points = []):\n self.owner = owner\n self.zone_id = zone_id\n self.hours_active_weekly = hours_active_weekly\n self.points = points\n\n @staticmethod\n def from_dict_firebase(source, zone_id):\n return Zone(source['owner'], zone_id, source['hours_active_weekly'], [(p.latitude, p.longitude) for p in source['points']])\n \n @staticmethod\n def from_dict_frontend(source, zone_id):\n return Zone(source['owner'], zone_id, source['hours_active_weekly'], source['points'])\n \n def to_dict_firebase(self):\n return {u'owner' : self.owner, u'hours_active_weekly' : self.hours_active_weekly, u'points' : [firestore.GeoPoint(p[0], p[1]) for p in self.points]}\n\n def to_dict(self):\n return {'owner' : self.owner, 'id' : zone_id, 'hours_active_weekly' : self.hours_active_weekly, 'points' : self.points}\n\n def __str__(self):\n return json.dumps(self.__dict__)\n\n def __repr__(self):\n return self.__str__()\n \napp = Flask(__name__)\nCORS(app)\n\n#Firestore creds\ncred = credentials.Certificate('../key.json')\nfirebase_admin.initialize_app(cred)\n\ndb = firestore.client()\n\n# global variable to save our access_token\naccess = None\n\nclient = smartcar.AuthClient(\n client_id=os.environ.get('CLIENT_ID'),\n client_secret=os.environ.get('CLIENT_SECRET'),\n redirect_uri=os.environ.get('REDIRECT_URI'),\n scope=['read_vehicle_info read_odometer read_location control_security control_security:lock read_vin'],\n test_mode=True,\n)\n\n@app.route('/login', methods=['GET'])\ndef login():\n auth_url = client.get_auth_url()\n return redirect(auth_url)\n\n@app.route('/exchange', methods=['GET'])\ndef exchange():\n user_id = request.args.get('user_id')\n user = db.collection(u'users').document(user_id).get().to_dict()\n\n code = request.args.get('code')\n \n global access\n access = client.exchange_code(code)\n print(access['access_token'])\n\n user['access_token'] = access['access_token']\n\n db.collection(u'users').document(user_id).set(user)\n\n return '', 200\n\n@app.route('/zones', methods=['GET'])\ndef zones():\n user_id = request.args.get('user_id')\n docs = db.collection(u'zones').where(u'owner', u'==', user_id).get()\n\n resp = []\n\n for doc in docs:\n print(doc.id)\n resp.append(Zone.from_dict_firebase(doc.to_dict()))\n\n print(resp)\n return \"{\" + str(resp) + \"}\"\n\n@app.route('/zone', methods=['POST'])\ndef zone():\n data = json.loads(request.data)\n zone = Zone.from_dict_frontend(data['zone'], data['id'])\n print(zone.zone_id)\n print(zone.to_dict)\n print(zone.to_dict_firebase)\n db.collection(u'zones').document(zone.zone_id).set(zone.to_dict_firebase())\n\n return '', 200\n\n@app.route('/vehicles', methods=['GET'])\ndef vehicles():\n user_id = requests.args.get('user_id')\n \n user = db.collection(u'users').document(user_id).get().to_dict()\n if user['access_token_expire_utc'] - time.time() < 60 * 5:\n True #Refresh token here\n \n access_token = user['access_token']\n\n vehicle_ids = smartcar.get_vehicle_ids(access_token)['vehicles']\n\n response = []\n for i in range(len(vehicle_ids)):\n vehicle = smartcar.Vehicle(vehicle_ids[i], access['access_token'])\n\n info = vehicle.info()\n print(info)\n vin = vehicle.vin()\n print(vin)\n odometer = vehicle.odometer()\n print(odometer)\n location = vehicle.location()\n print(location)\n\n data = {\"info\":info, \"vin\":vin, \"odometer\":odometer, \"location\":location}\n response.append(data)\n\n return jsonify(response)\n\ndef vehiclesInZones():\n zones = db.collection(u'zones').get()\n\n for zone in zones:\n if isZoneActive(zone):\n poly = Polygon(zone.to_dict()[\"points\"])\n\n user = db.collection(u'users').document(zone.to_dict()[\"owner\"]).get().to_dict()\n if user['access_token_expire_utc'] - time.time() < 60 * 5:\n True #Refresh token here\n \n access_token = user['access_token']\n vehicles = smartcar.get_vehicle_ids(access_token)['vehicles']\n\n for i in range(len(vehicles)):\n vehicle = smartcar.Vehicle(vehicles[i], access_token)\n location = vehicle.location()\n point = Point(location.latitude, location.longitude)\n if not poly.contains(point):\n True # Do something\n \n threading.Timer(60, vehiclesInZones).start()\n\n\ndef isZoneActive(zone):\n return True\n\nif __name__ == '__main__':\n app.run(port=8000)\n","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"612460499","text":"# pip install streamlit\n\nimport streamlit as st\nimport pandas as pd\nimport xgboost\nimport re\nimport numpy as np\nfrom sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn import decomposition, ensemble\nfrom sklearn.metrics import f1_score\nimport pickle\nfrom functools import partial\nimport pandas as pd\nimport numpy as np\nimport random\nimport re\nimport time\nimport datetime\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import sent_tokenize\nfrom sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm, neighbors\nfrom sklearn.preprocessing import LabelEncoder\nfrom transformers import AutoTokenizer\nimport torch\nfrom torch.utils.data import TensorDataset, random_split, DataLoader, RandomSampler, SequentialSampler\nfrom transformers import AutoModelForSequenceClassification, AdamW, BertConfig\nfrom transformers import get_linear_schedule_with_warmup\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport tensorflow as tf\nimport numpy as np\n\nNELAGT_MODELS = {\n \"Bagging_word_level\": \"For Unigram Tf-Idf feature vectors using Random Forest Classifier\",\n \"nela-gt-svm-uni\": \"\",\n \"article_transformers\":\"\",\n \"nela-gt-titles-svm\":\"\",\n \"Bagging_word_level_title\":\"\",\n \"rnn_lstm\":\"\",\n \"nela-gt-title-roberta\": \"\"\n\n}\nCOVID_MODELS = {\"covid-svm-uni\": \"\", \"Bagging_word_level\":\"\", \"covid-roberta\": \"\"}\nLABELS = ['fake','real']\nPYTORCH_ARTICLE_TRANSFORMERS_MODEL = None\nDEVICE = torch.device(\"cpu\")\n\ndef article_transformer(inp_text):\n \n tokenizer = AutoTokenizer.from_pretrained('textattack/roberta-base-SST-2')\n model = PYTORCH_ARTICLE_TRANSFORMERS_MODEL\n \n REPLACE_BY_SPACE_RE = re.compile('[/(){}\\[\\]\\|@,;]')\n BAD_SYMBOLS_RE = re.compile('[^0-9a-z #+_]')\n STOPWORDS = []\n def clean_text(text): \n text = text.lower() # lowercase text\n text = REPLACE_BY_SPACE_RE.sub(' ', text) # replace REPLACE_BY_SPACE_RE symbols by space in text. substitute the matched string in REPLACE_BY_SPACE_RE with space.\n text = BAD_SYMBOLS_RE.sub('', text) # remove symbols which are in BAD_SYMBOLS_RE from text. substitute the matched string in BAD_SYMBOLS_RE with nothing. \n text = ' '.join(word for word in text.split() if word not in STOPWORDS) # remove stopwors from text\n return text\n \n def predict(article, model):\n article = clean_text(article)\n article.replace('\\d+', '')\n input_ids = []\n attention_masks = []\n MAX_LENGTH = 220\n encoded_dict = tokenizer.encode_plus(\n article, # Sentence to encode.\n truncation=True,\n add_special_tokens = True, # Add '[CLS]' and '[SEP]'\n max_length = MAX_LENGTH, # Pad & truncate all sentences.\n pad_to_max_length = True,\n return_attention_mask = True, # Construct attn. masks.\n return_tensors = 'pt', # Return pytorch tensors.\n )\n input_ids.append(encoded_dict['input_ids'])\n attention_masks.append(encoded_dict['attention_mask'])\n input_ids = torch.cat(input_ids, dim=0)\n attention_masks = torch.cat(attention_masks, dim=0)\n # labels = torch.tensor(categories)\n batch_size = 1\n prediction_data = TensorDataset(input_ids, attention_masks)\n prediction_sampler = SequentialSampler(prediction_data)\n prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size)\n model.eval()\n for batch in prediction_dataloader:\n # Add batch to GPU\n batch = tuple(t.to(DEVICE) for t in batch)\n b_input_ids, b_input_mask = batch\n with torch.no_grad():\n # Forward pass, calculate logit predictions\n outputs = model(b_input_ids, token_type_ids=None, \n attention_mask=b_input_mask)\n logits = outputs[0]\n logits = logits.detach().cpu().numpy()\n return np.argmax(logits[0])\n\n def get_split(text1):\n l_total = []\n l_parcial = []\n if len(text1.split())//150 >0:\n n = len(text1.split())//150\n else: \n n = 1\n for w in range(n):\n if w == 0:\n l_parcial = text1.split()[:200]\n l_total.append(\" \".join(l_parcial))\n else:\n l_parcial = text1.split()[w*150:w*150 + 200]\n l_total.append(\" \".join(l_parcial))\n return l_total\n\n def pipeline(article, model):\n splits = get_split(article)\n fake = 0\n real = 0\n for i in splits:\n if predict(i, model):\n fake += 1\n else:\n real += 1\n if real > fake:\n return \"REAL\"\n else:\n return \"FAKE\"\n\n return pipeline(inp_text, model)\n\ndef label_encode(val):\n return LABELS.index(val)\n\ndef clean_text(text):\n REPLACE_BY_SPACE_RE = re.compile('[/(){}\\[\\]\\|@,;]')\n BAD_SYMBOLS_RE = re.compile('[^0-9a-z #+_]')\n STOPWORDS = []\n text = text.lower() # lowercase text\n text = REPLACE_BY_SPACE_RE.sub(' ', text) # replace REPLACE_BY_SPACE_RE symbols by space in text. substitute the matched string in REPLACE_BY_SPACE_RE with space.\n text = BAD_SYMBOLS_RE.sub('', text) # remove symbols which are in BAD_SYMBOLS_RE from text. substitute the matched string in BAD_SYMBOLS_RE with nothing. \n text = ' '.join(word for word in text.split() if word not in STOPWORDS) # remove stopwors from text\n return text\n\ndef vectorize(t, input_text):\n out_vector = None\n pickle_path = \"./models/\" + t +\"/tfidf_vect.pkl\"\n\n with open(pickle_path, 'rb') as file:\n pickle_model = pickle.load(file)\n\n out_vector = pickle_model.transform(input_text)\n return out_vector\n\ndef vectorize_title(t, input_text):\n out_vector = None\n pickle_path = \"./models/\" + t +\"/tfidf_vect_title.pkl\"\n\n with open(pickle_path, 'rb') as file:\n pickle_model = pickle.load(file)\n\n out_vector = pickle_model.transform(input_text)\n return out_vector\n\n\ndataset = st.sidebar.selectbox(\n 'Choose a dataset',\n [\"None\"]+ [\"covid\",\"nelagt\"], key=\"dataset\")\n\nmodels = None\nif(dataset and dataset == \"covid\"):\n models = COVID_MODELS\nelif(dataset and dataset == 'nelagt'):\n models = NELAGT_MODELS\n\nif dataset != \"None\":\n option = st.sidebar.selectbox(\n 'Choose a model',\n [\"None\"] + list(models.keys()), key = \"model\")\n\n\n\n\nif(models):\n def load_return_model(t, model_name):\n pickle_path = \"./models/\"+ t +\"/\"+model_name+\".pkl\"\n with open(pickle_path, 'rb') as file:\n pickle_model = pickle.load(file)\n return pickle_model\n\n\n if option != \"None\":\n models[option]\n\n if st.checkbox('Predict Manual Text', key=\"manual_text\"):\n inp_text = st.text_input(\"Input Text\", value='', type='default')\n \n if(inp_text):\n if dataset==\"nelagt\" and option == \"article_transformers\":\n with open('E:\\\\pramu\\\\projects\\\\final_project_fake_news_classsification\\\\Identification-of-fake-news-in-online-news-media\\\\streamlit\\\\models\\\\nelagt\\\\nela-gt-article-cascadingRoberta-epoch2.pt','rb') as f:\n PYTORCH_ARTICLE_TRANSFORMERS_MODEL = torch.load(f, map_location = DEVICE)\n\n predicted = article_transformer(inp_text)\n predicted\n elif dataset==\"nelagt\" and option == \"rnn_lstm\":\n new_model = tf.keras.models.load_model('E:\\\\pramu\\\\projects\\\\final_project_fake_news_classsification\\\\Identification-of-fake-news-in-online-news-media\\\\streamlit\\\\models\\\\nelagt\\\\rnn_lstm_v1.tf')\n pred = new_model.predict_classes(np.array([inp_text]))\n print(pred)\n if pred == 0:\n pred = \"reliable\"\n else:\n pred = \"unreliabe\"\n pred\n elif dataset==\"covid\" and option == \"covid-roberta\":\n with open('E:\\\\pramu\\\\projects\\\\final_project_fake_news_classsification\\\\Identification-of-fake-news-in-online-news-media\\\\streamlit\\\\models\\\\covid\\\\covid-roberta.pt','rb') as f:\n PYTORCH_ARTICLE_TRANSFORMERS_MODEL = torch.load(f, map_location = DEVICE)\n\n predicted = article_transformer(inp_text)\n predicted\n elif dataset == \"covid\" and option == \"nela-gt-title-roberta\":\n with open('E:\\\\pramu\\\\projects\\\\final_project_fake_news_classsification\\\\Identification-of-fake-news-in-online-news-media\\\\streamlit\\\\models\\\\nelagt\\\\nela-gt-title-roberta.pt','rb') as f:\n PYTORCH_ARTICLE_TRANSFORMERS_MODEL = torch.load(f, map_location = DEVICE)\n\n predicted = article_transformer(inp_text)\n predicted\n else:\n cleaned = [clean_text(inp_text)]\n vectorized = vectorize(dataset, cleaned)\n pickle_model = load_return_model(dataset, option)\n\n Ypredict = pickle_model.predict(vectorized)\n # proba = pickle_model.predict_proba(vectorized)\n # proba\n # explainer = LimeTextExplainer(class_names=[0,2])\n print(Ypredict)\n if dataset == \"covid\":\n if Ypredict[0] == 0:\n Ypredict = \"fake\"\n else:\n Ypredict = \"real\"\n if dataset == \"nelagt\":\n if Ypredict[0] == 0:\n Ypredict = \"reliable\"\n else:\n Ypredict = \"unreliable\"\n Ypredict\n \n # exp = explainer.explain_instance(cleaned, proba, num_features=6)\n # exp = explainer.explain_instance(cleaned, pickle_model.predict_proba, num_features=6)\n\n\n if st.checkbox('Predict for n random samples'):\n n_sample = st.text_input(\"n:\", value='', type='default', key='n_sample')\n if n_sample: \n if dataset==\"nelagt\":\n totalData = pd.read_csv('../nela10.csv')\n if(option == \"article_transformers\"):\n with open('E:\\\\pramu\\\\projects\\\\final_project_fake_news_classsification\\\\Identification-of-fake-news-in-online-news-media\\\\streamlit\\\\models\\\\nelagt\\\\nela-gt-article-cascadingRoberta-epoch2.pt','rb') as f:\n PYTORCH_ARTICLE_TRANSFORMERS_MODEL = torch.load(f, map_location = DEVICE)\n totalData = totalData.sample(n=int(n_sample))\n totalData = totalData.drop(['id','date','source','title','author','url','published','published_utc','collection_utc'],axis=1)\n totalData[\"Predicted\"] = totalData.content.apply(article_transformer)\n totalData\n elif \"title\" in option: #TITLE\n if option == \"nela-gt-title-roberta\":\n with open('E:\\\\pramu\\\\projects\\\\final_project_fake_news_classsification\\\\Identification-of-fake-news-in-online-news-media\\\\streamlit\\\\models\\\\nelagt\\\\nela-gt-title-roberta.pt','rb') as f:\n PYTORCH_ARTICLE_TRANSFORMERS_MODEL = torch.load(f, map_location = DEVICE)\n totalData = totalData.sample(n=int(n_sample))\n totalData = totalData.drop(['id','date','source','content','author','url','published','published_utc','collection_utc'],axis=1)\n totalData[\"Predicted\"] = totalData.title.apply(article_transformer)\n totalData\n else: \n totalData = totalData.sample(n=int(n_sample))\n totalData = totalData.drop(['id','date','source','content','author','url','published','published_utc','collection_utc'],axis=1)\n totalData.title = totalData.title.apply(clean_text)\n totalData.title = totalData.title.str.replace('\\d+', '')\n pickle_model = load_return_model(\"nelagt\", option)\n totalData[\"Predicted_Reliability\"] = pickle_model.predict(vectorize_title(\"nelagt\", totalData[\"title\"].tolist()))\n totalData[\"Predicted_Reliability\"] = totalData[\"Predicted_Reliability\"].apply(lambda x : \"reliable\" if x == 0 else \"unreliable\")\n totalData\n elif option == \"rnn_lstm\":\n totalData = totalData.sample(n=int(n_sample))\n totalData = totalData.drop(['id','date','source','title','author','url','published','published_utc','collection_utc'],axis=1)\n new_model = tf.keras.models.load_model('E:\\\\pramu\\\\projects\\\\final_project_fake_news_classsification\\\\Identification-of-fake-news-in-online-news-media\\\\streamlit\\\\models\\\\nelagt\\\\rnn_lstm_v1.tf')\n totalData[\"predicted\"] = new_model.predict_classes(totalData[\"content\"].tolist())\n totalData[\"predicted\"] = totalData[\"predicted\"].apply(lambda x : \"reliable\" if x == 0 else \"unreliable\")\n totalData[\"Reliability\"] = totalData[\"Reliability\"].apply(lambda x : \"reliable\" if x == 0 else \"unreliable\")\n totalData\n else:\n totalData = totalData.sample(n=int(n_sample))\n totalData = totalData.drop(['id','date','source','title','author','url','published','published_utc','collection_utc'],axis=1)\n totalData.content = totalData.content.apply(clean_text)\n totalData.content = totalData.content.str.replace('\\d+', '')\n pickle_model = load_return_model(\"nelagt\", option)\n totalData[\"Predicted_Reliability\"] = pickle_model.predict(vectorize(\"nelagt\", totalData[\"content\"].tolist()))\n totalData[\"Reliability\"] = totalData[\"Reliability\"].apply(lambda x : \"reliable\" if x == 0 else \"unreliable\")\n\n totalData[\"Predicted_Reliability\"] = totalData[\"Predicted_Reliability\"].apply(lambda x : \"reliable\" if x == 0 else \"unreliable\")\n totalData\n elif dataset==\"covid\":\n totalData = pd.read_csv('../Covid_Constraint_English_Train - Sheet1.csv')\n totalData = totalData.sample(n=int(n_sample))\n if option == \"covid-roberta\":\n with open('E:\\\\pramu\\\\projects\\\\final_project_fake_news_classsification\\\\Identification-of-fake-news-in-online-news-media\\\\streamlit\\\\models\\\\covid\\\\covid-roberta.pt','rb') as f:\n PYTORCH_ARTICLE_TRANSFORMERS_MODEL = torch.load(f, map_location = DEVICE)\n totalData[\"Predicted\"] = totalData.tweet.apply(article_transformer)\n totalData[\"Predicted\"] = totalData[\"Predicted\"].apply(lambda x : \"real\" if x == \"FAKE\" else \"fake\")\n totalData\n else:\n totalData.label = totalData.label.apply(label_encode)\n totalData.tweet = totalData.tweet.apply(clean_text)\n totalData.tweet = totalData.tweet.str.replace('\\d+', '')\n pickle_model = load_return_model(\"covid\", option)\n totalData[\"Predicted\"] = pickle_model.predict(vectorize(\"covid\", totalData[\"tweet\"].tolist()))\n totalData[\"label\"] = totalData[\"label\"].apply(lambda x : \"real\" if x == 1 else \"fake\")\n totalData[\"Predicted\"] = totalData[\"Predicted\"].apply(lambda x : \"real\" if x == 1 else \"fake\")\n totalData\n\n\n\n\n\n\n","sub_path":"streamlit/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":16334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"236160522","text":"# www.NeatChange.com\n# Make a difference in your life !\n#\n# Poplar Oct 23 2017\n# 数字游戏\n\n\"\"\"\n\n时间限制:1秒 空间限制:32768K\n\n\n题目描述:\n小易邀请你玩一个数字游戏,小易给你一系列的整数.你们俩使用这些整数玩游戏.\n每次小易会任意说一个数字出来,然后你需要从这一系列数字中选取一部分出来让它们的和等于小易所说的数字. \n例如:如果{2,1,2,7}是你有的一系列数,小易说的数字是11.你可以得到方案2+2+7 = 11.\n如果顽皮的小易想坑你,他说的数字是6,那么你没有办法拼凑出和为6 现在小易给你n个数,让你找出无法从n个数中选取部分求和的数字中的最小数.\n\n\n输入描述:\n输入第一行为数字个数n (n ≤ 20)\n第二行为n个数xi (1 ≤ xi ≤ 100000)\n\n\n输出描述:\n输出最小不能由n个数选取求和组成的数\n\n\n示例:\n\n输入\n3\n5 1 2\n\n输出\n4\n\n\"\"\"\n\nn, nums = int(input()), list(map(int, input().split()))\nnums.sort()\noutput = 0\n\nfor i in range(n):\n if nums[i] > output + 1:\n break\n else:\n output += nums[i]\n\nprint(output+1)\n\n'''\n运行时间:56ms 占用内存:5108k\n'''\n\n","sub_path":"Study/Notes/Algorithm/17.数字游戏.py","file_name":"17.数字游戏.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"630619978","text":"class Solution(object):\n def groupAnagrams(self, strs):\n map = {}\n for str in strs :\n key = self.getKey(str)\n if map.get(key) == None :\n map[key] = []\n\n anagrams = map.get(key, [])\n anagrams.append(str)\n\n return map.values()\n\n def getKey(self, chars):\n list = [0 for idx in range(ord('z') - ord('a') + 1)]\n for ch in chars :\n idx = ord(ch) - ord('a')\n list[idx] = list[idx] + 1\n\n return \"\".join([str(ch) for ch in list])\n\nsolution = Solution()\nprint(solution.groupAnagrams([\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"]))\nprint(solution.groupAnagrams([\"hos\",\"boo\",\"nay\",\"deb\",\"wow\",\"bop\",\"bob\",\"brr\",\"hey\",\"rye\",\"eve\",\"elf\",\"pup\",\"bum\",\"iva\",\"lyx\",\"yap\",\"ugh\",\"hem\",\"rod\",\"aha\", \"nam\",\"gap\",\"yea\",\"doc\",\"pen\",\"job\",\"dis\",\"max\",\"oho\",\"jed\",\"lye\",\"ram\",\"pup\",\"qua\",\"ugh\",\"mir\",\"nap\",\"deb\",\"hog\",\"let\",\"gym\",\"bye\",\"lon\",\"aft\",\"eel\",\"sol\",\"jab\"]))\nprint(solution.groupAnagrams([\"tin\",\"ram\",\"zip\",\"cry\",\"pus\",\"jon\",\"zip\",\"pyx\"]))\n","sub_path":"src/main/python/leetcode/group-anagrams.py","file_name":"group-anagrams.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"215515812","text":"from sklearn import datasets\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\n###引入数据###\r\nload_data = datasets.load_boston()\r\ndata_X = load_data.data\r\ndata_y = load_data.target\r\nprint(data_X.shape)\r\n# (506, 13)data_X共13个特征变量\r\n\r\n'''\r\n数据训练完成之后得到模型,我们可以根据不同模型得到相应的属性和功能,并将其输出得到直观结果。\r\n假如通过线性回归训练之后得到线性函数y=0.3x+1,我们可通过_coef得到模型的系数为0.3,通过_intercept得到模型的截距为1。\r\n'''\r\n\r\n###训练数据###\r\nmodel = LinearRegression()\r\nmodel.fit(data_X, data_y)\r\n# 预测前4个数据\r\nmodel.predict(data_X[:4, :])\r\n\r\n###属性和功能###\r\nprint(model.coef_)\r\nprint(model.intercept_)\r\n\r\n# 模型的参数\r\nprint(model.get_params())\r\n\r\n# 对训练情况进行打分\r\nprint(model.score(data_X, data_y))\r\n","sub_path":"basic_/sklearn_/model_.py","file_name":"model_.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"606331623","text":"import click\n\n@click.command()\n@click.option('--builderclass', default='buildApp', help='Name of your app class that returns root widget.')\n@click.option('--rootwidget', default='rootWidget', help='Name of your root widget found in Python file',)\n\ndef build(builderclass, rootwidget):\n f = open(builderclass.lower() + '.kv', \"a+\")\n f.write('<'+rootwidget+'>:')\n\n p = open('yourApp.py','a+')\n p.write(\"import kivy\\n\"\n \"from kivy.uix.boxlayout import BoxLayout\\n\"\n \"from kivy.app import App\\n\"\n \"\\n\"\n \"class \"+rootwidget+\"(BoxLayout):\\n\"\n \" def __init__(self):\\n\"\n \" super(\"+rootwidget+\", self).__init__()\\n\"\n \"\\n\"\n \"\\n\"\n \"class \"+ builderclass+\"(App):\\n\"\n \" def build(self):\\n\"\n \" return \"+ rootwidget+\"()\\n\"\n \"\\n\"\n \"\\n\"\n \"if __name__=='__main__':\\n\"\n \" \"+builderclass+\"().run()\")\n\nif __name__=='__main__':\n build()\n\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"537162341","text":"n = 3\n\n# recursive - o(2^n) runtime, o(n) space\ndef climbStairs(n):\n if n == 0 or n == 1:\n return 1\n return climbStairs(n-1) + climbStairs(n-2)\n\n# recursive w/ memoization - o(n) runtime, o(n) space\ndef climbStairs(n):\n memo = {0: 1, 1: 1}\n res = helper(n, memo)\n return res\n \ndef helper(n, memo):\n if n not in memo:\n memo[n] = helper(n-1, memo) + helper(n-2, memo)\n return memo[n]\n\n# dp - o(n) runtime, o(n) space\ndp = {0: 1, 1: 1}\nif n >= 2:\n for i in range(2, n+1):\n dp[i] = dp[i-1] + dp[i-2]\nreturn dp[n]\n","sub_path":"easy/70.py","file_name":"70.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"81830467","text":"\nimport time\nimport json\nimport base64\nimport msgpack\nfrom schema import Schema, And, Optional\nfrom datetime import datetime\nfrom algosdk import mnemonic\nfrom algosdk.account import address_from_private_key\nfrom algosdk.error import *\nfrom algosdk.future.transaction import PaymentTxn\nfrom inequality_indexes import *\nfrom algo_query import *\n\n\ndef wait_for_confirmation(algod_client, transaction_id, timeout):\n \"\"\"Wait until the transaction is confirmed or rejected, or until 'timeout'\n number of rounds have passed.\n\n Args:\n algod_client (AlgodClient): Algod Client\n transaction_id (str): the transaction to wait for\n timeout (int): maximum number of rounds to wait\n\n Returns:\n (dict): pending transaction information, or throws an error if the\n transaction is not confirmed or rejected in the next timeout rounds\n \"\"\"\n start_round = algod_client.status()[\"last-round\"] + 1\n current_round = start_round\n\n while current_round < start_round + timeout:\n algod_client.status_after_block(current_round)\n try:\n pending_txn = algod_client.pending_transaction_info(transaction_id)\n except Exception:\n return\n if pending_txn.get(\"confirmed-round\", 0) > 0:\n return pending_txn\n elif pending_txn[\"pool-error\"]:\n raise Exception(\n 'pool error: {}'.format(pending_txn[\"pool-error\"]))\n current_round += 1\n raise Exception(\n 'pending tx not found in timeout rounds, timeout value = : {}'.format(\n timeout))\n\n\ndef post_ppos_dex_data(algod_client, indexer_client, passphrase,\n algo_threshold):\n\n private_key = mnemonic.to_private_key(passphrase)\n\n account = {'pk': address_from_private_key(private_key),\n 'sk': private_key}\n\n CONNECTION_ATTEMPT_DELAY_SEC = 3\n MAX_CONNECTION_ATTEMPTS = 10\n MICROALGO_TO_ALGO = 1 / 10 ** 6\n MICROALGO_TOTAL_SUPPLY = 10 ** 16\n\n attempts = 1\n params = None\n ledger = None\n while attempts <= MAX_CONNECTION_ATTEMPTS:\n try:\n params = algod_client.suggested_params()\n ledger = algod_client.ledger_supply()\n break\n except AlgodHTTPError:\n print(f\"Algod Client connection attempt \"\n f\"{attempts}/{MAX_CONNECTION_ATTEMPTS}\")\n print(\"Trying to contact Algod Client again...\")\n time.sleep(CONNECTION_ATTEMPT_DELAY_SEC)\n finally:\n attempts += 1\n if attempts > MAX_CONNECTION_ATTEMPTS:\n quit(\"Unable to connect to Algod Client.\")\n\n attempts = 1\n algo_owners = None\n while attempts <= MAX_CONNECTION_ATTEMPTS:\n try:\n algo_owners = get_algo_owners(indexer_client, algo_threshold)\n break\n except IndexerHTTPError:\n print(f\"Indexer Client connection attempt \"\n f\"{attempts}/{MAX_CONNECTION_ATTEMPTS}\")\n print(\"Trying to contact Indexer Client again...\")\n time.sleep(CONNECTION_ATTEMPT_DELAY_SEC)\n finally:\n attempts += 1\n if attempts > MAX_CONNECTION_ATTEMPTS:\n quit(\"Unable to connect to Indexer Client.\")\n\n stakes = [account['amount'] * MICROALGO_TO_ALGO for\n account in algo_owners]\n algo_hhi = herfindahl_hirschman_index(stakes)\n online_stakes = [account['amount'] * MICROALGO_TO_ALGO\n for account in algo_owners\n if account['status'] == 'Online']\n algo_dynamics = ledger['total-money'] / MICROALGO_TOTAL_SUPPLY\n ppos_online_stake = ledger['online-money'] / ledger['total-money']\n ppos_online_accounts = len(online_stakes) / len(algo_owners)\n ppos_gini = gini_index(online_stakes)\n ppos_theil_l = theil_l_index(online_stakes)\n ppos_theil_t = theil_t_index(online_stakes)\n ppos_hhi = herfindahl_hirschman_index(online_stakes)\n ppos_dex = (algo_dynamics\n * ppos_online_stake\n * ppos_online_accounts\n * (1 - ppos_gini))\n\n note = {'algo_threshold': algo_threshold,\n 'accounts': len(algo_owners),\n 'algo_hhi': algo_hhi,\n 'algo_dynamics': algo_dynamics,\n 'ppos_online_stake': ppos_online_stake,\n 'ppos_online_accounts': ppos_online_accounts,\n 'ppos_gini': ppos_gini,\n 'ppos_theil_l': ppos_theil_l,\n 'ppos_theil_t': ppos_theil_t,\n 'ppos_hhi': ppos_hhi,\n 'ppos_dex': ppos_dex,\n 'timestamp': str(datetime.now())}\n\n bytes_note = msgpack.packb(note)\n\n unsigned_txn = PaymentTxn(sender=account['pk'],\n sp=params,\n receiver=account['pk'],\n amt=0,\n note=bytes_note)\n\n signed_txn = unsigned_txn.sign(account['sk'])\n txid = algod_client.send_transaction(signed_txn)\n print(\"Publishing Algorand PPoS Dex data in txID: {}\".format(txid))\n\n try:\n confirmed_txn = wait_for_confirmation(algod_client, txid, 4)\n except Exception as err:\n print(err)\n return\n\n print(\"txID: {}\".format(txid), \" confirmed in round: {}\\n\".format(\n confirmed_txn.get(\"confirmed-round\", 0)))\n print(\"Transaction information:\\n{}\".format(\n json.dumps(confirmed_txn, indent=4)))\n\n\ndef get_ppos_dex_data(indexer_client, ppos_dex_address, algo_threshold,\n start_block=11476070, end_block=None):\n\n CONNECTION_ATTEMPT_DELAY_SEC = 3\n MAX_CONNECTION_ATTEMPTS = 10\n\n attempts = 1\n ppos_dex_txns_note = None\n while attempts <= MAX_CONNECTION_ATTEMPTS:\n try:\n ppos_dex_txns_note = get_address_txns_note(\n indexer_client, ppos_dex_address, start_block, end_block)\n break\n except IndexerHTTPError:\n print(f\"Indexer Client connection attempt \"\n f\"{attempts}/{MAX_CONNECTION_ATTEMPTS}\")\n print(\"Trying to contact Indexer Client again...\")\n time.sleep(CONNECTION_ATTEMPT_DELAY_SEC)\n finally:\n attempts += 1\n if attempts > MAX_CONNECTION_ATTEMPTS:\n quit(\"Unable to connect to Indexer Client.\")\n\n # TODO: make 'algo_hhi' and 'ppos_hhi' mandatory fileds in the schema\n schema = Schema({\n 'algo_threshold': int,\n 'accounts': And(int, lambda n: 0 <= n),\n Optional('algo_hhi'): And(float, lambda n: 0 <= n <= 1),\n 'algo_dynamics': And(float, lambda n: 0 <= n),\n 'ppos_online_stake': And(float, lambda n: 0 <= n <= 1),\n 'ppos_online_accounts': And(float, lambda n: 0 <= n <= 1),\n 'ppos_gini': And(float, lambda n: 0 <= n <= 1),\n 'ppos_theil_l': And(float, lambda n: 0 <= n),\n 'ppos_theil_t': And(float, lambda n: 0 <= n),\n Optional('ppos_hhi'): And(float, lambda n: 0 <= n <= 1),\n 'ppos_dex': And(float, lambda n: 0 <= n <= 1),\n 'timestamp': str\n })\n\n ppos_dex_data = []\n for txn_note in ppos_dex_txns_note:\n try:\n data = schema.validate(\n msgpack.unpackb(base64.b64decode(txn_note))\n )\n if data['algo_threshold'] == algo_threshold:\n ppos_dex_data += [data]\n except:\n pass\n\n if not ppos_dex_data:\n quit(f\"Impossible to find valid PPos Dex data published by \"\n f\"{ppos_dex_address} starting from block {start_block}.\")\n\n return ppos_dex_data\n","sub_path":"ppos_dex_data.py","file_name":"ppos_dex_data.py","file_ext":"py","file_size_in_byte":7482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"538402644","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as pl\n\nfrom dynamic_graph.sot.reaching import CubicInterpolationSE3\n\ninterpolation = CubicInterpolationSE3 ('interpolation')\n\ninit = ((1.,0.,0.,0.),\n (0.,1.,0.,1.),\n (0.,0.,1.,0.),\n (0.,0.,0.,1.))\n\ngoal = ((1.,0.,0.,0.),\n (0.,1.,0.,4.),\n (0.,0.,1.,0.),\n (0.,0.,0.,1.))\n\ninterpolation.init.value = init\ninterpolation.goal.value = goal\n\nsamplingPeriod = .01\ninterpolation.setSamplingPeriod (samplingPeriod)\ninterpolation.start (2.)\n\ntimes = []\nvalues = []\nfor t in range (220):\n interpolation.reference.recompute (t)\n reference = interpolation.reference.value\n interpolation.init.value = reference\n times.append (t*samplingPeriod)\n values.append (reference [1][3])\n\nx = np.array (times)\ny = np.array (values)\n\nfig = pl.figure ()\nax = fig.add_subplot (111)\nax.plot (x, y)\n\npl.show ()\n","sub_path":"ball_reaching/test_interpolation.py","file_name":"test_interpolation.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"274973034","text":"\n\ndef box_intersect(box_1, box_2):\n line_1_1 = [box_1[0], box_1[0] + box_1[2]]\n line_1_2 = [box_1[1], box_1[1] + box_1[3]]\n line_2_1 = [box_2[0], box_2[0] + box_2[2]]\n line_2_2 = [box_2[1], box_2[1] + box_2[3]]\n if line_intersect(line_1_1,line_2_1) and line_intersect(line_1_2,line_2_2):\n return True\n else:\n return False\n\ndef line_intersect(line_1, line_2):\n if line_1[0] > line_2[1] or line_1[1] < line_2[0]:\n return False\n else:\n return True\n\ndef to_float(list):\n new_list = []\n for item in list:\n new_list.append(float(item))\n return new_list\n\nTP = 0\nFP = 0\nFN = 0\n\n\nlist_no_nodule = []\nlist_with_nodule = []\n\nwith open('result_per_slice.txt', 'r') as file:\n for line in file.readlines():\n line = line.strip()\n filename = line.split('L')[0].strip()\n try:\n rest = line.split('L')[1].strip()\n except:\n pass\n if rest:\n true_box = rest.split('D')[0].strip().split(' ')\n if true_box != ['']:\n true_box = to_float(true_box)\n # print(true_box)\n try:\n predict_box = rest.split('D')[1].strip().split(' ')\n if predict_box != ['']:\n predict_box = to_float(predict_box)\n # print(predict_box)\n except:\n pass\n\n if 'colored_lung_mask' in filename:\n if predict_box == [''] :\n FN += 1\n else:\n nodule_number = len(true_box)/4\n if nodule_number == 1:\n if box_intersect(true_box, predict_box):\n TP += 1\n else:\n print('no intersection')\n print(true_box)\n print(predict_box)\n FP += 1\n elif nodule_number == 2:\n if len(predict_box)/4 != 2:\n print('fewer prediction')\n print(true_box)\n print(predict_box)\n elif box_intersect(true_box[0:4], predict_box[0:4]) and box_intersect(true_box[4:8], predict_box[4:8]):\n TP += 1\n elif box_intersect(true_box[0:4], predict_box[4:8]) and box_intersect(true_box[4:8], predict_box[0:4]):\n TP += 1\n else:\n print('no intersection')\n print(true_box)\n print(predict_box)\n FP += 1\n else:\n print(filename)\n print('nodule_number: %d' % (nodule_number))\n print('truth: '+ str(true_box))\n print('predict: '+ str(predict_box))\n\n elif 'black_slice' in filename:\n if predict_box:\n print('FP: ' + str(predict_box))\n FP += 1\n\nprint('TP: %d, FP: %d, FN: %d'%(TP,FP,FN))\n\n\n","sub_path":"result_per_noudle.py","file_name":"result_per_noudle.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"480385659","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Homing\n@software: PyCharm Community Edition\n@file: PE-zl.py\n@time: 2017/6/11 1:04\n\"\"\"\n\nimport urllib.request\nimport sys\nimport io\nfrom bs4 import BeautifulSoup\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')\nimport urllib.parse\n\ndef get_content(page):\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0 ', 'host':'img01.zhaopin.cn'} # 模拟浏览器\n url ='http://sou.zhaopin.com/jobs/searchresult.ashx?jl='+urllib.parse.quote(\"全国\")+'&kw=pe'+urllib.parse.quote(\"工程师\") + '&sm=0&isadv=0&sg=6142d161a62a4464ad32074d98bc167d&p='+str(page)+'.html'\n opener = urllib.request.build_opener()\n opener.addheaders = [headers]\n data = opener.open(url, timeout=30)\n html = data.read()\n data.close()\n return html\n\n\n\n\nl = []\n\nfor page in range(1,28):\n soup = BeautifulSoup(get_content(page), 'lxml',from_encoding='utf-8')\n for i in soup.find_all('td', class_=['zwmc', 'gsmc', 'zwyx', 'gzdd', 'gxsj']):\n if len(i.get_text().strip()) > 1:\n l.append(i.get_text().strip())\n i = str(i).strip()\n k = i.find('href')\n m = i.find('htm')\n t = i.find(r'jobs')\n\n if k > 0 and t<0 and len(i.strip())>1 and len(i)>k+6 :\n if m<0 and 'java' not in i :\n l.append('Not exist')\n else:\n l.append(str(i[k+6:m+3]).strip())\n\n\n\nl1 = []\nfor i in l:\n if len(i)>1:\n l1.append(i)\n\n\nfo = open('pe_job_zl.csv', 'w', encoding='utf-8')\nfo.write('岗位')\nfo.write(',')\nfo.write('公司')\nfo.write(',')\nfo.write('链接')\nfo.write(',')\nfo.write('工资')\nfo.write(',')\nfo.write('地区')\nfo.write(',')\nfo.write('发布时间')\nfo.write('\\n')\nfor i in range(len(l1)):\n if i%6 == 0:\n fo.write(l1[i])\n fo.write(' , ')\n if i%6 == 1:\n fo.write(l1[i])\n fo.write(' , ')\n if i%6 ==2:\n fo.write(l1[i])\n fo.write(' , ')\n if i%6 ==3:\n fo.write(l1[i])\n fo.write(' , ')\n if i%6 ==4:\n fo.write(l1[i])\n fo.write(' , ')\n if i%6 ==5:\n fo.write(l1[i])\n fo.write('\\n')\n\n\n\n\n\n\n\n\n\n\n","sub_path":"python/PE-zl.py","file_name":"PE-zl.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"181330304","text":"# Verilen aralıkta asal sayı bulma\n\nsayac = 0\nsayi1 = int(input(\"Sayı 1: \"))\nsayi2 = int(input(\"Sayı 2: \"))\n\nprint(sayi1, ' ve ', sayi2, ' Arasındaki Asal Sayılar:')\n\nfor sayi in range(sayi1, sayi2 + 1):\n if sayi > 1:\n for i in range(2, sayi):\n if (sayi % i) == 0:\n break\n else:\n print(sayi)\n sayac = sayac + 1\nprint('Verilen aralıkta {0} adet asal sayı var.'.format(sayac))","sub_path":"Algoritma2.py","file_name":"Algoritma2.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"33285785","text":"import os\nimport numpy as np\nimport collections as collections\nimport pandas as pd\n\n#import invisible_cities.database.load_db as db\n#import invisible_cities.io.pmaps_io as pmio\n\nimport krcal.dev.corrections as corrections\nimport csth .utils.hpeak_tables as hptab\n\n\n#-----------------------------------\n# Events table\n#-----------------------------------\n\n\ndef event_list(hits):\n evts = np.unique(hits.event)\n #print(' number of events ', len(evts))\n npks = [len(np.unique(hits.npeak[hits.event == evt])) for evt in evts]\n\n \"\"\"ievts, ipks = [], []\n for i, evt in enumerate(evts):\n for ipk in range(npks[i]):\n hsel = np.logical_and(hits.event == evt, hits.npeak == ipk)\n nhits = int(np.sum(hsel))\n if (nhits > 0):\n ievts.append(evt); ipks.append(ipk)\n \"\"\"\n return hptab.EventList(evts, npks)\n\n\ndef event_table(elist, hits):\n\n evts, npks = elist.event, elist.peak\n\n size = int(np.sum(npks))\n\n etab = hptab.create_event_table(size)\n\n eindex, sindex, hindex = 0, 0, 0\n for evt, npk in zip(evts, npks):\n for ipk in range(npk):\n etab.event [eindex] = int(evt)\n etab.peak [eindex] = int(ipk)\n etab.s1e [eindex] = 1.\n hsel = np.logical_and(hits.event == evt, hits.npeak == ipk)\n etab.time [eindex] = np.unique(hits.time [hsel])[0]\n etab.x0 [eindex] = np.unique(hits.Xpeak[hsel])[0]\n etab.y0 [eindex] = np.unique(hits.Ypeak[hsel])[0]\n etab.nslices[eindex] = len(np.unique(hits.Z[hsel]))\n etab.sid [eindex] = sindex\n nhits = int(np.sum(hsel))\n etab.nhits [eindex] = nhits\n etab.hid [eindex] = hindex\n zij = hits.Z[hsel]\n nslices = len(np.unique(zij))\n e0ij = hits.E[hsel]\n e0 = np.sum(e0ij)\n z = np.sum(e0ij*zij)/e0\n etab.e0 [eindex] = e0\n etab.z0 [eindex] = z\n q0ij = hits.Q[hsel]\n etab.noqhits[eindex] = np.sum(q0ij <= 0.)\n etab.q0 [eindex] = np.sum(q0ij[q0ij >0])\n eindex += 1\n sindex += nslices\n hindex += nhits\n\n return etab\n\n#---------------------------------\n# Slices table\n#---------------------------------\n\ndef slice_table(etab, hits):\n\n nevts = len(etab.event)\n size = np.sum(etab.nslices)\n #print(\" htab total size \", size)\n stab = hptab.create_slice_table(size)\n\n for i in range(nevts):\n evt, pk = etab.event[i], etab.peak[i]\n sindex, nslices = etab.sid[i] , etab.nslices[i]\n hindex, nhits = etab.hid[i] , etab.nhits[i]\n hsel = np.logical_and(hits.event == evt, hits.npeak == pk)\n stab.event[sindex: sindex + nslices] = evt\n stab.peak [sindex: sindex + nslices] = pk\n zij = hits.Z[hsel]\n zi = np.unique(zij)\n zi.sort()\n selslices = hptab.selection_slices_by_z(zij)\n stab.slice[sindex: sindex + nslices] = range(nslices)\n stab.z0 [sindex: sindex + nslices] = zi\n e0ij = hits.E[hsel]\n stab.e0 [sindex: sindex + nslices] = np.array([np.sum(e0ij[sel]) for sel in selslices])\n q0ij = hits.Q[hsel]\n selnoq = q0ij <= 0.\n q0ij[selnoq] = 0.\n ns = np.array([np.sum(q0ij[sel]>0) for sel in selslices], dtype=int)\n stab.nhits[sindex: sindex + nslices] = ns\n q0i = np.array([np.sum(q0ij[sel]) for sel in selslices])\n stab.q0 [sindex: sindex + nslices] = q0i\n q0i[q0i <= 1.] = 1.\n q0ij[selnoq] = 1.\n x0ij = hits.X[hsel]\n x0ij[selnoq] = hits.Xpeak[hsel].values[selnoq]\n x0i = np.array([np.sum(q0ij[sel]*x0ij[sel]) for sel in selslices])/q0i\n stab.x0 [sindex: sindex + nslices] = x0i\n y0ij = hits.Y[hsel]\n y0ij[selnoq] = hits.Ypeak[hsel].values[selnoq]\n y0i = np.array([np.sum(q0ij[sel]*y0ij[sel]) for sel in selslices])/q0i\n stab.y0 [sindex: sindex + nslices] = y0i\n r0ij = np.sqrt(x0ij*x0ij + y0ij*y0ij)\n rmaxi = np.array([np.sum(r0ij[sel]) for sel in selslices])\n stab.rmax [sindex: sindex + nslices] = rmaxi\n\n return stab\n\n#-----------------------------\n# Hits table\n#-----------------------------\n\n\ndef hit_table(etab, hits):\n\n nevts = len(etab.event)\n\n #print(' event items ', nevts)\n size = np.sum(etab.nhits)\n\n #print(\" htab total size \", size)\n htab = hptab.create_hit_table(size)\n\n for i in range(nevts):\n evt , pk = etab.event[i], etab.peak[i]\n sindex, nslices = etab.sid[i] , etab.nslices[i]\n hindex, nhits = etab.hid[i] , etab.nhits[i]\n\n hsel = np.logical_and(hits.event == evt, hits.npeak == pk)\n htab.event[hindex: hindex+nhits] = evt\n htab.peak [hindex: hindex+nhits] = pk\n htab.nsipm[hindex: hindex+nhits] = hits.nsipm[hsel]\n q0ij = hits.Q[hsel]\n selnoq = q0ij <= 0.\n q0ij[selnoq] = 0.\n xp = np.unique(hits.Xpeak[hsel])[0]\n yp = np.unique(hits.Ypeak[hsel])[0]\n x0ij = hits.X[hsel]\n y0ij = hits.Y[hsel]\n x0ij[selnoq] = xp\n y0ij[selnoq] = yp\n htab.x0 [hindex: hindex+nhits] = x0ij\n htab.y0 [hindex: hindex+nhits] = y0ij\n htab.z0 [hindex: hindex+nhits] = hits.Z[hsel]\n htab.q0 [hindex: hindex+nhits] = q0ij\n htab.e0 [hindex: hindex+nhits] = hits.E[hsel]\n zij = hits.Z[hsel]\n ij = np.zeros(nhits)\n selslices = hptab.selection_slices_by_z(zij)\n for k, kslice in enumerate(selslices):\n ij [kslice] = k\n htab.slice[hindex: hindex+nhits] = ij\n return htab\n\n\n#-----------------------------\n# calibrate hits\n#-----------------------------\n\n\ndef calibrate_hits(htab, calibrate):\n\n x = htab.x0\n y = htab.y0\n z = htab.z0\n\n size = len(htab.q0)\n e1 = np.ones(size)\n q0 = htab.q0\n\n ec, qc = calibrate(x, y, z, None, e1, q0)\n\n htab.e[:] = ec[:]\n htab.q[:] = qc[:]\n\n return htab\n\n\n#----------------------------------------\n# Update the tables\n#----------------------------------------\n\ndef update_tables(etab, stab, htab):\n\n evts = etab.event\n\n for eindex in range(len(evts)):\n evt , pk = etab.event[eindex], etab.peak[eindex]\n sindex, nslices = etab.sid [eindex], etab.nslices[eindex]\n hindex, nhits = etab.hid [eindex], etab.nhits[eindex]\n\n # sum the charge per slices\n hsel = np.logical_and(htab.event == evt, htab.peak == pk)\n ssel = np.logical_and(stab.event == evt, stab.peak == pk)\n\n qij = htab.q[hsel]\n islices = htab.slice[hsel]\n selslices = hptab.selection_slices_by_slice(islices, nslices)\n qi = np.array([np.sum(qij[sel]) for sel in selslices])\n stab.q [sindex: sindex + nslices] = qi\n etab.q [eindex] = np.sum(qi)\n\n # corrected energy per hit\n qij[qij <= 1.] = 1.\n qi [qi <= 1.] = 1.\n eij = htab.e [hsel]\n e0i = stab.e0[ssel]\n for k, kslice in enumerate(selslices):\n eij [kslice] *= e0i[k] * qij [kslice]/qi[k]\n htab.e [hindex: hindex + nhits] = eij\n\n # sum energy per slice\n ei = np.array([np.sum(eij[sel]) for sel in selslices])\n #print('ei ', ei)\n stab.e [sindex: sindex + nslices ] = ei\n ee = np.sum(ei)\n etab.e [eindex] = ee\n\n # compute the average position per slice\n ei[ei <= 1.] = 1.\n if (ee <= 1.):\n ee = 1.\n\n # compute the average position per slice\n x0ij = htab.x0[hsel]\n y0ij = htab.y0[hsel]\n\n xi = np.array([np.sum(x0ij[sel]*eij[sel])/ei[k] for k, sel in enumerate(selslices)])\n yi = np.array([np.sum(y0ij[sel]*eij[sel])/ei[k] for k, sel in enumerate(selslices)])\n stab.x [sindex: sindex + nslices] = xi\n stab.y [sindex: sindex + nslices] = yi\n\n # compute average position per event\n etab.x [eindex] = np.sum(xi*ei)/ee\n etab.y [eindex] = np.sum(yi*ei)/ee\n z0i = stab.z0 [ssel]\n etab.z [eindex] = np.sum(z0i*ei)/ee\n\n # store the maxium hit radius\n etab.rmax [eindex] = np.max(stab.rmax[ssel])\n\n return etab, stab, htab\n\n#------------------------\n# Main driver\n#------------------------\n\n\ndef hpeaks_dfs(hits, calibrate):\n\n elist = event_list(hits)\n\n etab = event_table(elist, hits)\n\n stab = slice_table(etab, hits)\n\n htab = hit_table(etab, hits)\n\n htab = calibrate_hits(htab, calibrate)\n\n etab, stab, htab = update_tables(etab, stab, htab)\n\n edf = hptab.df_from_etable(etab)\n\n sdf = hptab.df_from_stable(stab)\n\n hdf = hptab.df_from_htable(htab)\n\n return edf, sdf, hdf\n","sub_path":"csth/utils/hpeak_hdsts_functions.py","file_name":"hpeak_hdsts_functions.py","file_ext":"py","file_size_in_byte":10318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"23296250","text":"from math import inf\n\nclass Solution:\n def valid(self,i,j,d):\n return (0 <= i < self.h-d and 0 <= j < self.w-1+d and (not self.grid[i][j]) and (not self.grid[i+d][j+1-d]))\n\n def recur(self,i,j,d):\n if (i,j,d) in self.active:\n return inf\n if (i,j,d) in self.memo:\n return self.memo[(i,j,d)]\n self.active.add((i,j,d))\n\n min_moves = inf\n if (i,j,d) == (self.h-1, self.w-2,0):\n min_moves = -1\n else:\n if self.valid(i,j+1,d):\n min_moves = min(min_moves, self.recur(i,j+1,d))\n if self.valid(i+1,j,d):\n min_moves = min(min_moves, self.recur(i+1,j,d))\n if self.valid(i+1-d,j+d,d):\n min_moves = min(min_moves, self.recur(i,j,1-d))\n\n self.active.remove((i,j,d))\n self.memo[(i,j,d)] = min_moves+1\n return min_moves+1\n\n def minimumMoves(self, grid):\n self.active = set() # set of (i,j,d) that is being solved rn\n self.memo = {} #(i,j,d) -> num_moves\n # d = 0 -> horizontal, d = 1 -> vertical\n self.grid = grid\n self.h = len(grid)\n self.w = len(grid[0])\n\n ans = self.recur(0,0,0)\n return ans if ans != inf else -1\n\n\n\n","sub_path":"leetcode/hard/minimum_moves_to_reach_target_with_rotations.py","file_name":"minimum_moves_to_reach_target_with_rotations.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"76344734","text":"import time\nfrom urllib.parse import urljoin\n\nimport portpicker\nimport requests\nimport zmq\nfrom loguru import logger\n\nfrom .pack import serialize, deserialize\nfrom .utils import send_heartbeat, ZMQ_IN, ZMQ_REVERSE\nfrom ..utils import (\n DEFAULT_ZMQ_IO_THREADS,\n get_server_config,\n HEARTBEAT_INTERVAL,\n HEARTBEAT_TIMEOUT,\n)\n\n\nclass SendSocket:\n def __init__(\n self,\n server_url,\n topic,\n broadcast=None,\n sock_type=None,\n ctx=None,\n conflate=None,\n hwm=5,\n public=True,\n ):\n if ctx is None:\n ctx = zmq.Context.instance(DEFAULT_ZMQ_IO_THREADS)\n\n if sock_type is None:\n assert broadcast is not None, \"broadcast or sock_type should be specified\"\n sock_type = zmq.PUB if broadcast else zmq.PUSH\n\n assert sock_type in [zmq.PUB, zmq.PUSH]\n\n if conflate is None:\n conflate = sock_type == zmq.PUB\n\n if public:\n self.socket = _PublicSocket(\n server_url, topic, sock_type, ctx, conflate, hwm\n )\n else:\n self.socket = _PrivateSocket(\n server_url, topic, sock_type, ctx, conflate, hwm\n )\n\n def __del__(self):\n self.close()\n\n def close(self, linger=0):\n if hasattr(self, \"socket\"):\n try:\n self.socket.close(linger)\n except requests.exceptions.ConnectionError:\n pass\n\n def poll(self, timeout=None):\n return self.socket.poll(timeout)\n\n def full(self):\n return self.socket.poll(0) == 0\n\n def send(self, data, timeout=None, compress=False):\n self.socket.send(data, timeout, compress)\n\n\nclass RecvSocket:\n def __init__(\n self,\n server_url,\n topic,\n broadcast=None,\n sock_type=None,\n ctx=None,\n conflate=None,\n hwm=5,\n public=False,\n ):\n if ctx is None:\n ctx = zmq.Context.instance(DEFAULT_ZMQ_IO_THREADS)\n\n if sock_type is None:\n assert broadcast is not None, \"broadcast or sock_type should be specified\"\n sock_type = zmq.SUB if broadcast else zmq.PULL\n\n assert sock_type in [zmq.SUB, zmq.PULL]\n\n if conflate is None:\n conflate = sock_type == zmq.SUB\n\n if public:\n self.socket = _PublicSocket(\n server_url, topic, sock_type, ctx, conflate, hwm\n )\n else:\n self.socket = _PrivateSocket(\n server_url, topic, sock_type, ctx, conflate, hwm\n )\n\n def __del__(self):\n self.close()\n\n def close(self, linger=0):\n if hasattr(self, \"socket\"):\n try:\n self.socket.close(linger)\n except requests.exceptions.ConnectionError:\n pass\n\n def poll(self, timeout=None):\n return self.socket.poll(timeout)\n\n def empty(self):\n return self.socket.poll(0) == 0\n\n def recv(self, timeout=None):\n return self.socket.recv(timeout)\n\n\nclass _PublicSocket:\n def __init__(\n self,\n server_url,\n topic,\n sock_type,\n ctx,\n conflate,\n hwm,\n ):\n self.server_url = server_url\n self.topic = topic\n self.sock_type = sock_type\n\n self._closed = False\n self.wrapper = _SocketWrapper(sock_type, ctx, conflate, hwm)\n\n server_config = get_server_config(self.server_url)\n local_ip = server_config[\"remote_ip\"]\n self.poll_flag = zmq.POLLIN if ZMQ_IN[self.sock_type] else zmq.POLLOUT\n\n port = portpicker.pick_unused_port()\n self.addr = f\"tcp://{local_ip}:{port}\"\n self.wrapper.sock.bind(self.addr)\n\n self.last_heartbeat = 0\n # heartbeat payload\n self.payload = {\n \"type\": self.sock_type,\n \"addr\": self.addr,\n \"topic\": self.topic,\n \"host\": local_ip,\n \"ttl\": HEARTBEAT_TIMEOUT,\n }\n self._send_heartbeat()\n\n def _send_heartbeat(self):\n if time.monotonic() - self.last_heartbeat > HEARTBEAT_INTERVAL:\n try:\n send_heartbeat(self.server_url, self.payload, timeout=5)\n self.last_heartbeat = time.monotonic()\n except Exception as e:\n logger.error(f\"Failed to send heartbeat. addr: {self.addr}, error: {e}\")\n\n def __del__(self):\n self.close()\n\n def close(self, linger=0):\n if self._closed:\n return\n self._closed = True\n\n res = requests.delete(\n urljoin(self.server_url, \"/endpoints\"), json={\"addr\": self.addr}\n )\n res.raise_for_status()\n self.wrapper.sock.close(linger)\n\n def poll(self, timeout=None):\n return self.wrapper.sock.poll(timeout=timeout, flags=self.poll_flag)\n\n def send(self, data, timeout=None, compress=False):\n self._send_heartbeat()\n res = self.poll(timeout)\n if res == 0:\n raise TimeoutError(\"perwez send timeout\")\n self.wrapper.send(data, compress)\n\n def recv(self, timeout=None):\n self._send_heartbeat()\n res = self.poll(timeout)\n if res == 0:\n raise TimeoutError(\"perwez recv timeout\")\n return self.wrapper.recv()\n\n\nclass _PrivateSocket:\n def __init__(\n self,\n server_url,\n topic,\n sock_type,\n ctx,\n conflate,\n hwm,\n ):\n self.server_url = server_url\n self.topic = topic\n self.sock_type = sock_type\n\n self._closed = False\n self.wrapper = _SocketWrapper(sock_type, ctx, conflate, hwm)\n\n server_config = get_server_config(self.server_url)\n self.info_sock = ctx.socket(zmq.SUB)\n self.info_sock.subscribe(b\"\")\n self.info_sock.setsockopt(zmq.CONFLATE, 1)\n self.info_sock.connect(server_config[\"zmq_addr\"])\n\n # addr -> endpoint info\n self.info_dict = {}\n self._info_initialized = False\n\n self.poller = zmq.Poller()\n self.poller.register(self.info_sock, zmq.POLLIN)\n self.poll_flag = zmq.POLLIN if ZMQ_IN[self.sock_type] else zmq.POLLOUT\n self.poller.register(self.wrapper.sock, self.poll_flag)\n\n def _init_endpoints(self):\n self._info_initialized = True\n res = requests.get(urljoin(self.server_url, \"/endpoints\"))\n res.raise_for_status()\n self._update_endpoints(res.json())\n\n def _update_endpoints(self, ep_list):\n ep_list = [\n info\n for info in ep_list\n if info[\"type\"] == ZMQ_REVERSE[self.sock_type]\n and info[\"topic\"] == self.topic\n ]\n new_info_dict = {info[\"addr\"]: info for info in ep_list}\n # disconnect\n for addr in self.info_dict:\n if addr not in new_info_dict:\n self.wrapper.sock.disconnect(addr)\n # connect\n for addr in new_info_dict:\n if addr not in self.info_dict:\n self.wrapper.sock.connect(addr)\n self.info_dict = new_info_dict\n\n def __del__(self):\n self.close()\n\n def close(self, linger=0):\n if self._closed:\n return\n self._closed = True\n\n self.info_sock.close(linger)\n self.wrapper.sock.close(linger)\n\n def poll(self, timeout=None):\n if not self._info_initialized:\n self._init_endpoints()\n remain_time = timeout\n while True:\n ts = time.perf_counter()\n events = dict(self.poller.poll(remain_time))\n if len(events) == 0:\n return 0\n\n if self.info_sock in events:\n res = self.info_sock.recv_json()\n self._update_endpoints(res)\n\n if self.wrapper.sock in events:\n return self.poll_flag\n\n if remain_time is not None:\n duration = time.perf_counter() - ts\n remain_time = max(0, remain_time - duration)\n\n def send(self, data, timeout=None, compress=False):\n res = self.poll(timeout)\n if res == 0:\n raise TimeoutError(\"perwez send timeout\")\n self.wrapper.send(data, compress)\n\n def recv(self, timeout=None):\n res = self.poll(timeout)\n if res == 0:\n raise TimeoutError(\"perwez recv timeout\")\n return self.wrapper.recv()\n\n\nclass _SocketWrapper:\n def __init__(\n self,\n sock_type,\n ctx,\n conflate,\n hwm,\n ):\n self.sock = ctx.socket(sock_type)\n self.sock.setsockopt(zmq.IMMEDIATE, 1)\n\n if sock_type == zmq.SUB:\n self.sock.subscribe(b\"\")\n\n if conflate:\n self.sock.setsockopt(zmq.CONFLATE, 1)\n else:\n self.sock.set_hwm(hwm)\n\n def send(self, data, compress=False):\n packed = serialize(data, compress=compress)\n self.sock.send(packed)\n\n def recv(self):\n bin_data = self.sock.recv()\n res = deserialize(bin_data)\n return res\n","sub_path":"perwez/perwez/client/socket.py","file_name":"socket.py","file_ext":"py","file_size_in_byte":9020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"650683963","text":"# %%\n#######################################\ndef get_etc_shadow_salt(string: str):\n \"\"\"Returns the salt found within the line retreived from the /etc/shadow file\n\n Examples:\n >>> line_from_etc_shadow = 'root:$1$umqC71l2$370xDLmeGD9m4aF/ciIlC.:14425:0:99999:7:::'\\n\n >>> get_etc_shadow_salt(line_from_etc_shadow)\\n\n 'umqC71l2'\n\n References:\n https://linuxize.com/post/etc-shadow-file/\n \"\"\"\n (\n username,\n hash_string,\n last_pw_change,\n min_pw_age,\n max_pw_age,\n warn_period,\n exp_date,\n ) = string.split(\":\")[:-2]\n if \"$\" in hash_string:\n algorithm, salt, thehash = hash_string.split(\"$\")[1:]\n return salt\n\n else:\n return \"There is no $ in the hash_string value.\"\n\n","sub_path":"all_funcs/get_etc_shadow_salt.py","file_name":"get_etc_shadow_salt.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"542710211","text":"import bs4 as bs\nimport urllib.request\nimport re\nimport csv\n\n\nchowhoundDB = []\nheaders=[\"recipe Name\", \"ingedrient\", \"cookTime\", \"calories\", \"fat(g)\", \"carbohydrate(g)\", \"protein(g)\", \"cholesterol(mg)\", \"sodium(mg)\", \"Image Link\",\"recipe link\"]\nchowhoundDB.append(headers)\nflags = []\nwith open('Allergies.txt','r') as f:\n for line in f:\n word = line.replace(\"\\n\", \"\")\n flags.append(word.lower())\n\nfor x in range(10000,12000):#31328\n try:\n print(x)\n str1 = \"https://www.chowhound.com/recipes/\" + str(x) + \"/\"\n sauce = urllib.request.urlopen(str1)\n soup = bs.BeautifulSoup(sauce, 'lxml')\n except:\n continue\n currentItem = []\n \n try:\n rawTitle = soup.find('title').text\n rawTitle = rawTitle.rpartition('-')[0]\n currentItem.append(rawTitle)\n except:\n continue\n \n try:\n ing = []\n temp = soup.findAll('div', class_ = \"freyja_box freyja_box81\")\n for i in temp:\n ing.append(i.text.rstrip())\n ing = ing[0].split('\\n')\n ing.pop(0)\n fl = []\n for i in ing:\n for j in flags:\n if i.lower().find(j) != -1:\n fl.append(j) \n mylist = list(dict.fromkeys(fl))\n currentItem.append(mylist)\n except:\n continue\n \n try:\n div = soup.find('div', class_=\"fr_cooktime\")\n time = div.findAll('time')\n s = time[0].text\n s = re.sub(r'\\s+', '', s)\n print(\"raw time is\")\n print(s)\n if s.find(\"mins\") != -1:\n prefix1 = re.findall(r\"(\\d+)mins\", s)\n else:\n prefix1 = re.findall(r\"(\\d+)min\", s)\n \n prefix2 = re.findall(r\"(\\d+)hr\", s)\n print(prefix1)\n print(prefix2)\n s=''\n if not prefix2:\n hr = 0\n else:\n hr = prefix1[0]\n s = s + str(hr) + \" h \"\n if not prefix1:\n mins = 0\n else:\n mins = prefix1[0]\n s = s + str(mins) + \" m \"\n if s == '':\n continue\n print(\"time is\")\n print(s)\n currentItem.append(s)\n except Exception as e:\n print(e)\n continue\n \n try:\n temp = soup.findAll('span', class_ = \"edamam_val\")\n arr = []\n for i in temp:\n arr.append(i.text)\n if '' in arr:\n continue\n calories = arr[0]\n currentItem.append(calories)\n \n fat = arr[1].replace('g', '')\n currentItem.append(fat)\n \n carbohydrate = arr[6].replace('g', '')\n currentItem.append(carbohydrate)\n \n protein = arr[7].replace('g', '')\n currentItem.append(protein)\n \n cholesterol = arr[8].replace('mg', '')\n currentItem.append(cholesterol)\n \n sodium = arr[9].replace('mg', '')\n currentItem.append(sodium)\n \n \n \n except:\n continue\n \n try:\n image = soup.findAll('meta', property ='og:image')\n currentItem.append(image[0]['content'])\n except:\n continue\n \n try:\n recipe_link = str1\n currentItem.append(recipe_link)\n except:\n continue\n \n print(\"good item\")\n chowhoundDB.append(currentItem)\n\n \nwith open(\"chow.csv\", \"w\") as f:\n writer = csv.writer(f, delimiter=',')\n for line in chowhoundDB:\n try:\n writer.writerow(line)\n except Exception as e:\n print(e)\n continue ","sub_path":"recipe/scrape_chow.py","file_name":"scrape_chow.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"605769176","text":"import sys\nimport time\n\n\nfrom rasa_interface import assert_server_health, parse_utterance\n\ndef send(test_string):\n# String to try parsing\n #test_string = \"table for eight people please\"\n\n\n # Ensure server is running\n try:\n assert_server_health()\n except:\n print(\"Server is not running.\")\n sys.exit()\n else:\n print(\"Server is running.\")\n\n print(\"Sending request to parse string \\\"{}\\\"\".format(test_string))\n print()\n\n # Parse and time it\n start_time = time.clock()\n res = parse_utterance(test_string)\n end_time = time.clock()\n\n print(\"Received result in {} secs.\".format(end_time-start_time))\n print()\n #print(res)\n return res\n","sub_path":"gameBot/sample_bot/test_rasa_interface.py","file_name":"test_rasa_interface.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"375976961","text":"\"\"\"\r\n切换句柄 - 民政部最新行政区划代码数据抓取\r\n\"\"\"\r\nimport sys\r\n\r\nfrom selenium import webdriver\r\nimport time\r\nimport redis\r\nfrom hashlib import md5\r\n\r\nclass MzbSpider:\r\n def __init__(self):\r\n # 设置无界面\r\n self.options = webdriver.ChromeOptions()\r\n self.options.add_argument('--headless')\r\n self.driver = webdriver.Chrome(options=self.options)\r\n\r\n self.driver.get(url='http://www.mca.gov.cn/article/sj/xzqh/2020/')\r\n self.r = redis.Redis(host='localhost',port=6379,db=0)\r\n\r\n def md5_href(self, href):\r\n \"\"\"md5加密的功能函数\"\"\"\r\n m = md5()\r\n m.update(href.encode())\r\n\r\n return m.hexdigest()\r\n\r\n def parse_html(self):\r\n \"\"\"解析提取数据\"\"\"\r\n new_month_a = self.driver.find_element_by_xpath('//*[@id=\"list_content\"]/div[2]/div/ul/table/tbody/tr[1]/td[2]/a')\r\n # 生成指纹\r\n href = new_month_a.get_attribute('href')\r\n finger = self.md5_href(href)\r\n if self.r.sadd('mzb:spiders', finger) == 1:\r\n new_month_a.click()\r\n time.sleep(3)\r\n # 切换句柄\r\n li = self.driver.window_handles\r\n self.driver.switch_to.window(li[1])\r\n # 开始提取数据(单独写了个函数提取数据而已)\r\n self.get_data()\r\n else:\r\n sys.exit('更新完成')\r\n\r\n def get_data(self):\r\n \"\"\"提取具体数据\"\"\"\r\n tr_list = self.driver.find_elements_by_xpath('//tr[@height=\"19\"]')\r\n for tr in tr_list:\r\n item = {}\r\n li = tr.text.split()\r\n item['code'] = li[0]\r\n item['name'] = li[1]\r\n print(item)\r\n # 关闭浏览器\r\n self.driver.quit()\r\n\r\nif __name__ == '__main__':\r\n spider = MzbSpider()\r\n spider.parse_html()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"month05/spider/day05_course/day05_code/07_mzbSpider.py","file_name":"07_mzbSpider.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"654327547","text":"import datetime\nimport json\nimport re\nimport os.path\nimport responses\nfrom unittest import TestCase\n\nfrom freshdesk.v2.api import API\nfrom freshdesk.v2.models import Ticket, Comment, Contact, Customer, Group\n\n\"\"\"\nTest suite for python-freshdesk.\n\nWe test against a dummy helpdesk created for these tests only. It is:\nhttps://pythonfreshdesk.freshdesk.com/\n\"\"\"\n\nDOMAIN = 'pythonfreshdesk.freshdesk.com'\nAPI_KEY = 'MX4CEAw4FogInimEdRW2'\n\n\nclass MockedAPI(API):\n def __init__(self, *args):\n self.resolver = {\n re.compile(r'tickets\\?filter=new_and_my_open&page=1&per_page=100'): self.read_test_file('all_tickets.json'),\n re.compile(r'tickets\\?filter=deleted&page=1&per_page=100'): self.read_test_file('all_tickets.json'),\n re.compile(r'tickets\\?filter=spam&page=1&per_page=100'): self.read_test_file('all_tickets.json'),\n re.compile(r'tickets\\?filter=watching&page=1&per_page=100'): self.read_test_file('all_tickets.json'),\n re.compile(r'tickets\\?page=1&per_page=100'): self.read_test_file('all_tickets.json'),\n re.compile(r'tickets/1$'): self.read_test_file('ticket_1.json'),\n re.compile(r'tickets/1/conversations'): self.read_test_file('conversations.json'),\n re.compile(r'contacts/1$'): self.read_test_file('contact.json'),\n re.compile(r'customers/1$'): self.read_test_file('customer.json'),\n re.compile(r'groups$'): self.read_test_file('groups.json'),\n re.compile(r'groups/1$'): self.read_test_file('group_1.json'),\n }\n super(MockedAPI, self).__init__(*args)\n\n def read_test_file(self, filename):\n path = os.path.join(os.path.dirname(__file__), 'sample_json_data', filename)\n return json.loads(open(path, 'r').read())\n\n def _get(self, url, *args, **kwargs):\n for pattern, data in self.resolver.items():\n if pattern.match(url):\n return data\n\n # No match found, raise 404\n from requests.exceptions import HTTPError\n raise HTTPError('404: mocked_api_get() has no pattern for \\'{}\\''.format(url))\n\n\nclass TestAPIClass(TestCase):\n\n def test_custom_cname(self):\n with self.assertRaises(AttributeError):\n API('custom_cname_domain', 'invalid_api_key')\n\n def test_api_prefix(self):\n api = API('test_domain.freshdesk.com', 'test_key')\n self.assertEqual(api._api_prefix,\n 'https://test_domain.freshdesk.com/api/v2/')\n api = API('test_domain.freshdesk.com/', 'test_key')\n self.assertEqual(api._api_prefix,\n 'https://test_domain.freshdesk.com/api/v2/')\n\n @responses.activate\n def test_403_error(self):\n responses.add(responses.GET,\n 'https://{}/api/v2/tickets/1'.format(DOMAIN),\n status=403)\n\n api = API('pythonfreshdesk.freshdesk.com', 'invalid_api_key')\n from requests.exceptions import HTTPError\n with self.assertRaises(HTTPError):\n api.tickets.get_ticket(1)\n\n\nclass TestTicket(TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.api = MockedAPI(DOMAIN, API_KEY)\n cls.ticket = cls.api.tickets.get_ticket(1)\n cls.ticket_json = json.loads(open(os.path.join(os.path.dirname(__file__),\n 'sample_json_data',\n 'ticket_1.json')).read())\n\n def test_str(self):\n self.assertEqual(str(self.ticket), 'This is a sample ticket')\n\n def test_repr(self):\n self.assertEqual(repr(self.ticket), '')\n\n def test_get_ticket(self):\n self.assertIsInstance(self.ticket, Ticket)\n self.assertEqual(self.ticket.id, 1)\n self.assertEqual(self.ticket.subject, 'This is a sample ticket')\n self.assertEqual(self.ticket.description_text, 'This is a sample ticket, feel free to delete it.')\n self.assertEqual(self.ticket.cc_emails, ['test2@example.com'])\n self.assertIn('foo', self.ticket.tags)\n self.assertIn('bar', self.ticket.tags)\n\n @responses.activate\n def test_create_ticket(self):\n responses.add(responses.POST,\n 'https://{}/api/v2/tickets'.format(DOMAIN),\n status=200, content_type='application/json',\n json=self.ticket_json)\n\n ticket = self.api.tickets.create_ticket('This is a sample ticket',\n description='This is a sample ticket, feel free to delete it.',\n email='test@example.com',\n priority=1, status=2,\n tags=['foo', 'bar'],\n cc_emails=['test2@example.com'])\n self.assertIsInstance(ticket, Ticket)\n self.assertEqual(ticket.subject, 'This is a sample ticket')\n self.assertEqual(ticket.description_text, 'This is a sample ticket, feel free to delete it.')\n self.assertEqual(ticket.priority, 'low')\n self.assertEqual(ticket.status, 'open')\n self.assertEqual(ticket.cc_emails, ['test2@example.com'])\n self.assertIn('foo', ticket.tags)\n self.assertIn('bar', ticket.tags)\n\n @responses.activate\n def test_create_outbound_email(self):\n j = self.ticket_json.copy()\n values = {\n 'subject': 'This is a sample outbound_email',\n 'description_text': 'This is a sample outbound, feel free to delete it.',\n 'status': 5,\n 'email_config_id': 5000054536,\n }\n j.update(values)\n responses.add(responses.POST,\n 'https://{}/api/v2/tickets/outbound_email'.format(DOMAIN),\n status=200, content_type='application/json',\n json=j)\n\n ticket = self.api.tickets.create_outbound_email('This is a sample outbound_email',\n description='This is a sample outbound, feel free to delete it.',\n email='test@example.com',\n email_config_id=5000054536,\n priority=1,\n tags=['foo', 'bar'],\n cc_emails=['test2@example.com'])\n self.assertIsInstance(ticket, Ticket)\n self.assertEqual(ticket.subject, 'This is a sample outbound_email')\n self.assertEqual(ticket.description_text, 'This is a sample outbound, feel free to delete it.')\n self.assertEqual(ticket.priority, 'low')\n self.assertEqual(ticket.status, 'closed')\n self.assertEqual(ticket.cc_emails, ['test2@example.com'])\n self.assertIn('foo', ticket.tags)\n self.assertIn('bar', ticket.tags)\n\n @responses.activate\n def test_update_ticket(self):\n j = self.ticket_json.copy()\n values = {\n 'subject': 'Test subject update',\n 'priority': 3,\n 'status': 4,\n 'tags': ['hello', 'world']\n }\n j.update(values)\n\n responses.add(responses.GET,\n 'https://{}/api/v2/tickets/1'.format(DOMAIN),\n status=200, content_type='application/json', json=j)\n\n responses.add(responses.PUT,\n 'https://{}/api/v2/tickets/1'.format(DOMAIN),\n status=200, content_type='application/json', json=j)\n\n ticket = self.api.tickets.update_ticket(j['id'], **values)\n self.assertEqual(ticket.subject, 'Test subject update')\n self.assertEqual(ticket.status, 'resolved')\n self.assertEqual(ticket.priority, 'high')\n self.assertIn('hello', ticket.tags)\n self.assertIn('world', ticket.tags)\n\n @responses.activate\n def test_delete_ticket(self):\n responses.add(responses.DELETE,\n 'https://{}/api/v2/tickets/1'.format(DOMAIN),\n status=204)\n self.api.tickets.delete_ticket(1)\n\n def test_ticket_priority(self):\n self.assertEqual(self.ticket._priority, 1)\n self.assertEqual(self.ticket.priority, 'low')\n\n def test_ticket_status(self):\n self.assertEqual(self.ticket._status, 2)\n self.assertEqual(self.ticket.status, 'open')\n\n def test_ticket_source(self):\n self.assertEqual(self.ticket._source, 2)\n self.assertEqual(self.ticket.source, 'portal')\n\n def test_ticket_datetime(self):\n self.assertIsInstance(self.ticket.created_at, datetime.datetime)\n self.assertIsInstance(self.ticket.updated_at, datetime.datetime)\n\n def test_new_and_my_open_tickets(self):\n tickets = self.api.tickets.list_new_and_my_open_tickets()\n self.assertIsInstance(tickets, list)\n self.assertEqual(len(tickets), 1)\n self.assertEqual(tickets[0].id, self.ticket.id)\n\n def test_deleted_tickets(self):\n tickets = self.api.tickets.list_deleted_tickets()\n self.assertIsInstance(tickets, list)\n self.assertEqual(len(tickets), 1)\n\n def test_watched_tickets(self):\n tickets = self.api.tickets.list_watched_tickets()\n self.assertIsInstance(tickets, list)\n self.assertEqual(len(tickets), 1)\n self.assertEqual(tickets[0].id, self.ticket.id)\n\n def test_spam_tickets(self):\n tickets = self.api.tickets.list_tickets(filter_name='spam')\n self.assertIsInstance(tickets, list)\n self.assertEqual(len(tickets), 1)\n\n def test_default_filter_name(self):\n tickets = self.api.tickets.list_tickets()\n self.assertIsInstance(tickets, list)\n self.assertEqual(len(tickets), 1)\n self.assertEqual(tickets[0].id, self.ticket.id)\n\n def test_none_filter_name(self):\n tickets = self.api.tickets.list_tickets(filter_name=None)\n self.assertIsInstance(tickets, list)\n self.assertEqual(len(tickets), 1)\n self.assertEqual(tickets[0].id, self.ticket.id)\n\n\nclass TestComment(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.api = MockedAPI(DOMAIN, API_KEY)\n cls.comments = cls.api.comments.list_comments(1)\n cls.comments_json = json.loads(open(os.path.join(\n os.path.dirname(__file__),\n 'sample_json_data',\n 'conversations.json')).read())\n\n def test_comments_list(self):\n self.assertIsInstance(self.comments, list)\n self.assertEqual(len(self.comments), 2)\n self.assertIsInstance(self.comments[0], Comment)\n\n def test_comment_str(self):\n self.assertEqual(str(self.comments[0]), 'This is a private note')\n\n def test_comment_repr(self):\n self.assertEqual(repr(self.comments[0]), '')\n\n @responses.activate\n def test_create_note(self):\n responses.add(responses.POST,\n 'https://{}/api/v2/tickets/1/notes'.format(DOMAIN),\n status=200, content_type='application/json',\n json=self.comments_json[0])\n\n comment = self.api.comments.create_note(1, 'This is a private note')\n self.assertIsInstance(comment, Comment)\n self.assertEqual(comment.body_text, 'This is a private note')\n self.assertEqual(comment.source, 'note')\n\n @responses.activate\n def test_create_reply(self):\n responses.add(responses.POST,\n 'https://{}/api/v2/tickets/1/reply'.format(DOMAIN),\n status=200, content_type='application/json',\n json=self.comments_json[1])\n\n comment = self.api.comments.create_reply(1, 'This is a reply')\n self.assertIsInstance(comment, Comment)\n self.assertEqual(comment.body_text, 'This is a reply')\n self.assertEqual(comment.source, 'reply')\n\n\nclass TestContact(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.api = MockedAPI(DOMAIN, API_KEY)\n cls.contact = cls.api.contacts.get_contact('1')\n\n def test_get_contact(self):\n self.assertIsInstance(self.contact, Contact)\n self.assertEqual(self.contact.name, 'Rachel')\n self.assertEqual(self.contact.email, 'rachel@freshdesk.com')\n self.assertEqual(self.contact.helpdesk_agent, False)\n self.assertEqual(self.contact.customer_id, 1)\n\n def test_contact_datetime(self):\n self.assertIsInstance(self.contact.created_at, datetime.datetime)\n self.assertIsInstance(self.contact.updated_at, datetime.datetime)\n\n def test_contact_str(self):\n self.assertEqual(str(self.contact), 'Rachel')\n\n def test_contact_repr(self):\n self.assertEqual(repr(self.contact), '')\n\n\nclass TestCustomer(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.api = MockedAPI(DOMAIN, API_KEY)\n cls.customer = cls.api.customers.get_customer('1')\n cls.contact = cls.api.contacts.get_contact('1')\n\n def test_customer(self):\n self.assertIsInstance(self.customer, Customer)\n self.assertEqual(self.customer.name, 'ACME Corp.')\n self.assertEqual(self.customer.domains, 'acme.com')\n self.assertEqual(self.customer.cf_custom_key, 'custom_value')\n\n def test_contact_datetime(self):\n self.assertIsInstance(self.customer.created_at, datetime.datetime)\n self.assertIsInstance(self.customer.updated_at, datetime.datetime)\n\n def test_contact_str(self):\n self.assertEqual(str(self.customer), 'ACME Corp.')\n\n def test_contact_repr(self):\n self.assertEqual(repr(self.customer), '')\n\n def test_get_customer_from_contact(self):\n self.customer = self.api.customers.get_customer_from_contact(self.contact)\n self.test_customer()\n\n\nclass TestGroup(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.api = MockedAPI(DOMAIN, API_KEY)\n cls.group = cls.api.groups.get_group(1)\n\n def test_list_groups(self):\n groups = self.api.groups.list_groups()\n self.assertIsInstance(groups, list)\n self.assertEqual(len(groups), 2)\n self.assertEqual(groups[0].id, self.group.id)\n\n def test_group(self):\n self.assertIsInstance(self.group, Group)\n self.assertEqual(self.group.name, 'Entertainers')\n self.assertEqual(self.group.description, 'Singers dancers and stand up comedians')\n\n def test_group_datetime(self):\n self.assertIsInstance(self.group.created_at, datetime.datetime)\n self.assertIsInstance(self.group.updated_at, datetime.datetime)\n\n def test_group_repr(self):\n self.assertEqual(repr(self.group), '')\n","sub_path":"freshdesk/v2/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":14821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"221104754","text":"#!/usr/bin/env python\nfrom setuptools import setup, find_packages\n\nreadme = open(\"README.rst\").read()\n\nsetup(\n\tname = \"yamlns\",\n\tversion = \"0.8\",\n\tdescription = \"YAML serializable dictionary with dual item and attribute accessors\",\n\tauthor = \"David Garcia Garzon\",\n\tauthor_email = \"voki@canvoki.net\",\n\turl = 'https://github.com/GuifiBaix/python-yamlns',\n\tlong_description = readme,\n\tlicense = 'GNU General Public License v3 or later (GPLv3+)',\n\tpackages=find_packages(exclude=['*_[tT]est*']),\n\tscripts=[\n\t\t'yamlns/nstemplate.py',\n\t\t],\n\tinstall_requires=[\n\t\t'PyYAML>=5.3.1', # security\n\t\t'nose',\n\t\t'rednose',\n\t],\n\tinclude_package_data = True,\n\ttest_suite = 'yamlns',\n#\ttest_runner = 'colour_runner.runner.ColourTextTestRunner',\n\tclassifiers = [\n\t\t'Programming Language :: Python',\n\t\t'Programming Language :: Python :: 3',\n\t\t'Topic :: Software Development :: Libraries :: Python Modules',\n\t\t'Intended Audience :: Developers',\n\t\t'Development Status :: 5 - Production/Stable',\n\t\t'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',\n\t\t'Operating System :: OS Independent',\n\t],\n)\n\n","sub_path":"pypi_install_script/yamlns-0.8.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"412484051","text":"# -*- coding: utf-8 -*-\n\nfrom lastuserapp import db\nimport lastuser_core.models as models\nfrom .test_db import TestDatabaseFixture\n\n\nclass TestUserClientPermissions(TestDatabaseFixture):\n\n def test_UserClientPermissions(self):\n \"\"\"\n Test for verifying creation of UserClientPermissions instance\n \"\"\"\n gustav = models.User(username=u'gustav')\n client = self.fixtures.client\n access_permissions = u'siteadmin'\n result = models.UserClientPermissions(user=gustav, client=client, access_permissions=access_permissions)\n db.session.add(result)\n db.session.commit()\n self.assertIsInstance(result, models.UserClientPermissions)\n\n def test_UserClientPermissions_migrate_user(self):\n \"\"\"\n Test for migrating users and transfering their\n client permissions\n \"\"\"\n # scenario 1: when *only* olduser has UserClientPermissions instance\n old_crusoe = self.fixtures.crusoe\n new_crusoe = models.User(username=u'chef-crusoe')\n models.UserClientPermissions.migrate_user(old_crusoe, new_crusoe)\n for each in new_crusoe.client_permissions:\n self.assertIsInstance(each, models.UserClientPermissions)\n self.assertEqual(new_crusoe.client_permissions[0].user, new_crusoe)\n\n # scenario 2: when *both* olduser and newuser have UserClientPermissions instances\n old_oakley = self.fixtures.oakley\n client = self.fixtures.client\n access_permissions_old_oakley = u'siteadmin'\n access_permissions_new_oakley = u'siteeditor'\n old_oakley_userclientperms = models.UserClientPermissions(user=old_oakley, client=client, access_permissions=access_permissions_old_oakley)\n new_oakley = models.User(username=u'oakley-the-stud')\n new_oakley_userclientperms = models.UserClientPermissions(user=new_oakley, client=client, access_permissions=access_permissions_new_oakley)\n db.session.add(old_oakley_userclientperms)\n db.session.add(new_oakley_userclientperms)\n db.session.commit()\n models.UserClientPermissions.migrate_user(old_oakley, new_oakley)\n result = new_oakley.client_permissions[0]\n for each in new_oakley.client_permissions:\n self.assertIsInstance(each, models.UserClientPermissions)\n received_access_permissions = str(result.access_permissions)\n expected_access_permissions = \" \".join([access_permissions_old_oakley, access_permissions_new_oakley])\n self.assertEqual(expected_access_permissions, received_access_permissions)\n\n def test_userclientpermissions_pickername(self):\n \"\"\"\n Test for UserClientPermissions' pickername\n \"\"\"\n finnick = models.User(username=u'finnick', fullname=u\"Finnick Odair\")\n district4 = models.Client(title=u\"District 4\")\n access_permissions = u'siteadmin'\n result = models.UserClientPermissions(user=finnick, client=district4, access_permissions=access_permissions)\n self.assertEqual(result.pickername, finnick.pickername)\n\n def test_userclientpermissions_buid(self):\n \"\"\"\n Test for UserClientPermissions' buid\n \"\"\"\n beetee = models.User(username=u'beetee', fullname=u\"Beetee\")\n district3 = models.Client(title=u'District 3')\n access_permissions = u'siteadmin'\n result = models.UserClientPermissions(user=beetee, client=district3, access_permissions=access_permissions)\n self.assertEqual(result.buid, beetee.buid)\n","sub_path":"tests/unit/lastuser_core/test_model_client_UserClientPermissions.py","file_name":"test_model_client_UserClientPermissions.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"59305366","text":"import pandas as pd\nimport pyodbc\nimport sqlalchemy as alch\nimport urllib\n\ns_driver = '{SQL Server}'\ns_server = 'HQNiceDMDB.avp.ru'\ns_database = 'nice_dw'\ncon_string ='DRIVER=%s;SERVER=%s;DATABASE=%s;Trusted_Connection=True;' % (s_driver, s_server, s_database)\nconn = pyodbc.connect(con_string)\ncursor = conn.cursor()\n\nsql_q = '''\nDECLARE @RC int\nDECLARE @StartTime nvarchar(100)\nDECLARE @StopTime nvarchar(100)\nDECLARE @TZ nvarchar(10)\n\nEXECUTE @RC = [dbo].[Feedback_Export]\n @StartTime = '%s'\n ,@StopTime = '%s'\n ,@TZ = '+04'\n''' % (\"2016-08-01 13:00\", \"2016-09-03 13:00\")\n\ndf = pd.read_sql(sql_q, conn)\n\nd_driver = '{SQL Server}'\nd_server = 'szuev3.avp.ru'\nd_database = 'OutsourceData'\nparams = urllib.quote_plus(\"DRIVER=%s;SERVER=%s;DATABASE=%s;Trusted_Connection=True;\" % (d_driver, d_server, d_database))\nengine = alch.create_engine(\"mssql+pyodbc:///?odbc_connect=%s\" % params)\ndf.to_sql('STG_NICE', engine, if_exists='append', index=True, index_label=None)\n\n","sub_path":"nice/nice.py","file_name":"nice.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"638838500","text":"import tensorflow as tf\nimport numpy as np\n\nimport os\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\nmnist = tf.keras.datasets.fashion_mnist\n\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\ntrain_images = train_images / 255.\ntest_images = test_images / 255.\n\ntrain_images = train_images.reshape(60000, 28, 28, 1)\ntest_images = test_images.reshape(10000, 28, 28, 1)\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=(3,3),\n activation='relu', input_shape=(28, 28, 1)),\n tf.keras.layers.MaxPooling2D(2, 2),\n tf.keras.layers.Conv2D(64, kernel_size=(3,3),\n activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(64, activation='relu'),\n tf.keras.layers.Dense(64, activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax')\n])\n\nmodel.compile(\n loss='sparse_categorical_crossentropy',\n optimizer=tf.keras.optimizers.Adam(),\n metrics=['accuracy']\n)\n\nmodel.fit(\n train_images,\n train_labels,\n epochs=10\n)\n\n\nevaluate = model.evaluate(\n test_images,\n test_labels\n)\n\nprint(evaluate)","sub_path":"intro/week2/conv2d.py","file_name":"conv2d.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"8818825","text":"def is_inside(a = [], b = []):\n check = True\n x = b[0] + b[2]\n y = b[1] - b[3]\n if((b[0] <= a[0] <= x) and (y <= a[1] <= b[1])):\n return check\n else:\n check = False\n return check\n\npoint_list = []\nrectangle_list = []\n\nfor i in range(2):\n p = int(input(\"Enter your position of point: \"))\n point_list.append(p)\nprint(\"Position(x, y) of point: \", point_list)\nprint()\nfor i in range(2):\n r = int(input(\"Enter your position of rectangle: \"))\n rectangle_list.append(r)\nfor i in range(2):\n width_height = int(input(\"Enter your width and height of rectangle: \"))\n rectangle_list.append(width_height)\nprint(\"Position(x, y), width and height of rectangle: \", rectangle_list)\nprint()\n\nprint(is_inside(point_list, rectangle_list))\n","sub_path":"ass5/ex11.py","file_name":"ex11.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"135231688","text":"from django.utils.translation import gettext as _\n\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\n\nfrom .models import Comment, AdminComment, SoldTimeSlotRate\n\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name=\"comments:comment-detail\", lookup_field='id', read_only=True)\n first_name = serializers.SerializerMethodField(read_only=True)\n admin_reply = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = Comment\n fields = ['id', 'url', 'user', 'admin_reply', 'first_name', 'consultant', 'message', 'created', 'updated', ]\n extra_kwargs = {\n 'id': {'read_only': True},\n 'user': {'read_only': True},\n 'created': {'read_only': True},\n 'updated': {'read_only': True},\n }\n\n def get_admin_reply(self, obj):\n admin_reply = None\n try:\n admin_reply = AdminComment.objects.get(comment=obj)\n except:\n pass\n\n if not admin_reply:\n return None\n\n return AdminCommentSerializer(admin_reply).data\n\n def get_first_name(self, obj):\n return obj.user.get_short_name()\n\n def validate(self, attrs):\n request = self.context.get(\"request\", None)\n user = request.user\n\n if not request:\n raise ValidationError({\"detail\": _(\"Request is None\")})\n\n if not user:\n raise ValidationError({\"detail\": _(\"User is None.\")})\n\n return attrs\n\n def create(self, validated_data):\n request = self.context.get(\"request\", None)\n user = request.user\n\n obj = Comment.objects.create(\n user=user,\n consultant=validated_data['consultant'],\n message=validated_data['message'],\n )\n\n return obj\n\n\nclass AdminCommentSerializer(serializers.ModelSerializer):\n class Meta:\n model = AdminComment\n fields = ['id', 'comment', 'message', 'created', 'updated', ]\n\n\nclass SoldTimeSlotRateSerializer(serializers.ModelSerializer):\n class Meta:\n model = SoldTimeSlotRate\n fields = ['sold_time_slot', 'rate', ]\n\n def validate(self, attrs):\n request = self.context.get('request', None)\n user = request.user\n\n sold_time_slot = attrs['sold_time_slot']\n\n if sold_time_slot.sold_to != user:\n raise ValidationError({\"detail\": _(\"This time slot is not sold to this user\")})\n\n return attrs\n\n def create(self, validated_data):\n sold_time_slot = validated_data['sold_time_slot']\n\n if SoldTimeSlotRate.objects.filter(sold_time_slot=sold_time_slot).exists():\n raise ValidationError({\"detail\": \"Rate exists\"})\n\n obj = SoldTimeSlotRate.objects.create(\n sold_time_slot=sold_time_slot,\n rate=validated_data['rate'],\n )\n\n return obj\n","sub_path":"code/sNeeds/apps/comments/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"6742730","text":"import numpy\n\n\nclass Spectra(object):\n \"\"\" This class contains a spectra as a function of energy, radius and time.\n\n The spectra is stored as histogram binned in energy, x, radius, y, and\n time, z. This histogram can be flattened to 2d (energy, radius) or 1d\n (energy).\n\n Args:\n name (str): The name of this spectra\n num_decays (float): The number of decays this spectra is created to\n represent.\n\n Attributes:\n _data (:class:`numpy.ndarray`): The histogram of data\n _name (str): The name of this spectra\n _energy_low (float): Lowest bin edge in MeV\n _energy_high (float): Highest bin edge in MeV\n _energy_bins (int): Number of energy bins\n _energy_width (float): Width of a single bin in MeV\n _radial_low (float): Lowest bin edge in mm\n _radial_high (float): Highest bin edge in mm\n _radial_bins (int): Number of raidal bins\n _radial_width (float): Width of a single bin in mm\n _time_low (float): Lowest bin edge in years\n _time_high (float): Highest bin edge in years\n _time_bins (int): Number of time bins\n _time_width (float): Width of a single bin in yr\n _num_decays (float): The number of decays this spectra currently\n represents.\n _raw_events (int): The number of raw events used to generate the\n spectra. Increments by one with each fill independent of\n weight.\n \"\"\"\n def __init__(self, name, num_decays):\n \"\"\" Initialise the spectra data container.\n \"\"\"\n self._energy_low = 0.0 # MeV\n self._energy_high = 10.0 # MeV\n self._energy_bins = 1000\n self._energy_width = (self._energy_high - self._energy_low) / self._energy_bins\n self._radial_low = 0.0 # mm\n self._radial_high = 10000.0 # mm\n self._radial_bins = 1000\n self._radial_width = (self._radial_high - self._radial_low) / self._radial_bins\n self._time_low = 0.0 # years\n self._time_high = 10.0 # years\n self._time_bins = 10\n self._time_width = (self._time_high - self._time_low) / self._time_bins\n self._num_decays = num_decays\n self._raw_events = 0\n self._data = numpy.zeros(shape=(self._energy_bins,\n self._radial_bins,\n self._time_bins),\n dtype=float)\n self._name = name\n\n def fill(self, energy, radius, time, weight=1.0):\n \"\"\" Fill the bin for the `energy` `radius` and `time` with weight.\n\n Args:\n energy (float): Energy value to fill.\n raidus (float): Radial value to fill.\n time (float): Time value to fill.\n weight (float, optional): Defaults to 1.0, weight to fill the bin\n with.\n\n Raises:\n ValueError: If the energy, radius or time is beyond the bin limits.\n \"\"\"\n if not self._energy_low <= energy < self._energy_high:\n raise ValueError(\"Energy out of range\")\n if not self._radial_low <= radius < self._radial_high:\n raise ValueError(\"Radius out of range\")\n if not self._time_low <= time < self._time_high:\n raise ValueError(\"Time out of range\")\n energy_bin = (energy - self._energy_low) / (self._energy_high - self._energy_low) * self._energy_bins\n radial_bin = (radius - self._radial_low) / (self._radial_high - self._radial_low) * self._radial_bins\n time_bin = (time - self._time_low) / (self._time_high - self._time_low) * self._time_bins\n self._data[energy_bin, radial_bin, time_bin] += weight\n\n def project(self, axis):\n \"\"\" Project the histogram along an `axis`.\n\n Args:\n axis (int): To project onto\n\n Returns:\n The projection of the histogram onto the given axis\n \"\"\"\n if axis == 0:\n return self._data.sum(1).sum(1)\n elif axis == 1:\n return self._data.sum(0).sum(1)\n elif axis == 2:\n return self._data.sum(0).sum(0)\n\n def surface(self, axis):\n \"\"\" Project the histogram along two axis, along the `axis`.\n\n Args:\n axis (int): To project away\n\n Returns:\n The 2d surface of the histogram.\n \"\"\"\n return self._data.sum(axis)\n\n def sum(self):\n \"\"\" Calculate and return the sum of the `_data` values.\n\n Returns:\n The sum of the values in the `_data` histogram.\n \"\"\"\n return self._data.sum()\n\n def scale(self, num_decays):\n \"\"\" Scale THIS spectra to represent *num_decays* worth of decays over\n the entire unshrunken spectra.\n\n This rescales each bin by the ratio of *num_decays* to\n *self._num_decays*, i.e. it changes the spectra from representing\n *self._num_decays* to *num_decays*. *self._num_decays* is updated\n to equal *num_decays* after.\n\n Args:\n num_decays (float): Number of decays this spectra should represent.\n \"\"\"\n self._data = numpy.multiply(self._data, num_decays / self._num_decays)\n self._num_decays = num_decays\n\n def shrink(self, energy_low=None, energy_high=None, radial_low=None,\n radial_high=None, time_low=None, time_high=None):\n \"\"\" Shrink the data such that it only contains values between energy_low\n and energy_high (for example) by slicing. This updates the internal bin\n information as well as the data.\n\n Args:\n energy_low (float): Optional new low bound of the energy.\n energy_low (float): Optional new high bound of the energy.\n radial_low (float): Optional new low bound of the radius.\n radial_low (float): Optional new high bound of the radius.\n time_low (float): Optional new low bound of the time.\n time_low (float): Optional new high bound of the time.\n\n Notes:\n The logic in this method is the same for each dimension, first\n check the new values are within the existing ones (can only compress).\n Then calculate the low bin number and high bin number (relative to the\n existing binning low). Finally update all the bookeeping and slice.\n \"\"\"\n if(energy_low is not None and energy_low < self._energy_low):\n raise ValueError(\"Energy low is below existing bound\")\n if(energy_high is not None and energy_high > self._energy_high):\n raise ValueError(\"Energy high is above existing bound\")\n if(radial_low is not None and radial_low < self._radial_low):\n raise ValueError(\"Radial low is below existing bound\")\n if(radial_high is not None and radial_high > self._radial_high):\n raise ValueError(\"Radial high is above existing bound\")\n if(time_low is not None and time_low < self._time_low):\n raise ValueError(\"Time low is below existing bound\")\n if(time_high is not None and time_high > self._time_high):\n raise ValueError(\"Time high is above existing bound\")\n\n energy_low_bin = 0\n energy_high_bin = self._energy_bins\n if(energy_low is not None and energy_high is not None):\n energy_low_bin = (energy_low - self._energy_low) / self._energy_width\n energy_high_bin = (energy_high - self._energy_low) / self._energy_width\n self._energy_low = energy_low\n self._energy_high = energy_high\n self._energy_bins = int(energy_high_bin - energy_low_bin)\n\n radial_low_bin = 0\n radial_high_bin = self._radial_bins\n if(radial_low is not None and radial_high is not None):\n radial_low_bin = (radial_low - self._radial_low) / self._radial_width\n radial_high_bin = (radial_high - self._radial_low) / self._radial_width\n self._radial_low = radial_low\n self._radial_high = radial_high\n self._radial_bins = int(radial_high_bin - radial_low_bin)\n\n time_low_bin = 0\n time_high_bin = self._time_bins\n if(time_low is not None and time_high is not None):\n time_low_bin = (time_low - self._time_low) / self._time_width\n time_high_bin = (time_high - self._time_low) / self._time_width\n self._time_low = time_low\n self._time_high = time_high\n self._time_bins = int(time_high_bin - time_low_bin)\n\n # Internal bookeeping complete, now slice the data\n self._data = self._data[energy_low_bin:energy_high_bin,\n radial_low_bin:radial_high_bin,\n time_low_bin:time_high_bin]\n\n def add(self, spectrum):\n \"\"\" Adds a spectrum to current spectra object.\n\n Args:\n spectrum (:class:`core.spectra`): Spectrum to add.\n \"\"\"\n if self._energy_low != spectrum._energy_low:\n raise ValueError(\"Lower energy bounds in spectra are not equal.\")\n if self._energy_high != spectrum._energy_high:\n raise ValueError(\"Upper energy bounds in spectra are not equal.\")\n if self._energy_bins != spectrum._energy_bins:\n raise ValueError(\"Number of energy bins in spectra are not equal.\")\n if self._energy_width != spectrum._energy_width:\n raise ValueError(\"Width of energy bins in spectra are not equal.\")\n if self._radial_low != spectrum._radial_low:\n raise ValueError(\"Lower radial bounds in spectra are not equal.\")\n if self._radial_high != spectrum._radial_high:\n raise ValueError(\"Upper radial bounds in spectra are not equal.\")\n if self._radial_bins != spectrum._radial_bins:\n raise ValueError(\"Number of radial bins in spectra are not equal.\")\n if self._radial_width != spectrum._radial_width:\n raise ValueError(\"Width of radial bins in spectra are not equal.\")\n if self._time_low != spectrum._time_low:\n raise ValueError(\"Lower time bounds in spectra are not equal.\")\n if self._time_high != spectrum._time_high:\n raise ValueError(\"Upper time bounds in spectra are not equal.\")\n if self._time_bins != spectrum._time_bins:\n raise ValueError(\"Number of time bins in spectra are not equal.\")\n if self._time_width != spectrum._time_width:\n raise ValueError(\"Width of time bins in spectra are not equal.\")\n self._data += spectrum._data\n self._raw_events += spectrum._raw_events\n self._num_decays += spectrum._num_decays\n","sub_path":"echidna/core/spectra.py","file_name":"spectra.py","file_ext":"py","file_size_in_byte":10522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"153733692","text":"import pandas as pd\nimport pickle\nimport glob, os\nfrom pprint import pprint\n\n# -----------------------------------------------------\n# This file is used to parse the PACS output\n# into a more organized dictionary which will contain\n# the info for mammo and mri breast. Some patients will\n# have mris done and some won't.\n#\n# The dictionary can then be used to run a statistical\n# analyis.\n# -----------------------------------------------------\n\n# -----------------------------------------------------\n# Global Variables\n# -----------------------------------------------------\n\n# --- read and combine breast matches\nglob_path = './csvs/*/*matches*'\n\n# --- load breast mri dictionary\nbreast_mris = pickle.load(open('./breast_mri_dict.pkl', 'rb'))\n\n# ------------------------------------------------------\n# main functions\n# ------------------------------------------------------\ndef process_breast_dataframe(df, brain_mris):\n \"\"\"\n parses the combined df into a dictionary format. \n\n Parameters:\n df - combined dataframe from all matches.csv\n breast_mris - dictionary containing data on breast mris. MRI->studydate/accession\n\n Returns:\n breast_dict - dictionary containing mrns to their corresponding accessions, studydates\n \"\"\" \n # --- define dictionary\n breast_dict = {}\n for mrn, acc, study_date in zip(list(df['mrn']), list(df['accession']), list(df['study_date'])):\n \n # --- remove spaces from entries\n mrn = str(mrn).replace(\" \", \"\")\n \n if str.isdigit(mrn):\n mrn = int(mrn)\n acc = acc.replace(\" \", \"\")\n \n # --- check if mrn contains breast mri\n if mrn in brain_mris.keys():\n \n # --- check if mrn exists\n if mrn not in breast_dict.keys():\n breast_dict[mrn] = {\n acc : {\n 'mri_accession' : brain_mris[mrn]['accession'],\n 'mg_study_date' : study_date,\n 'mri_study_date': brain_mris[mrn]['study_date'],\n 'years_apart': calc_years_apart(int(study_date), int(brain_mris[mrn]['study_date']))\n }\n }\n else:\n breast_dict[mrn][acc] = {\n 'mri_accession' : brain_mris[mrn]['accession'],\n 'mg_study_date' : study_date,\n 'mri_study_date': brain_mris[mrn]['study_date'],\n 'years_apart': calc_years_apart(int(study_date), int(brain_mris[mrn]['study_date']))\n }\n else:\n # --- check if mrn exists in dict\n if mrn not in breast_dict.keys():\n breast_dict[mrn] = {\n acc : {\n 'mg_study_date' : study_date\n }\n }\n else:\n breast_dict[mrn][acc] = {\n 'mg_study_date' : study_date\n }\n return breast_dict\n# ------------------------------------------------------\n# helper functions\n# ------------------------------------------------------\n\ndef calc_years_apart(date1, date2):\n \"\"\"\n calculates years and months apart between two dates.\n\n Paramaters:\n date1 - date in integer format YYYYMMDD\n date2 - date in integer format YYYYMMDD\n\n Returns:\n apart - years/months apart in list form: [years, months]\n \"\"\"\n # --- make sure date1 is the more recent date\n if date2 > date1:\n temp = date1\n date1 = date2\n date2 = temp\n\n # --- parse dates into years and months\n year1 = int(str(date1)[:4])\n year2 = int(str(date2)[:4])\n month1 = int(str(date1)[4:6])\n month2 = int(str(date2)[4:6])\n\n # --- calculate months\n if month1 > month2:\n month_count = month1 - month2\n else:\n # --- decrease year by 1\n year1 -= 1\n month_count = (month1 + 12) - month2\n\n # --- calculate years\n year_count = year1 - year2\n\n # --- place into format\n apart = [year_count, month_count]\n\n return apart\n\ndef parse_pacs_matches_csvs(glob_path):\n \"\"\"\n Parses mrn, accession, studydate csv outputs from \n external 'matches.csv' file.\n\n Parameters:\n glob_path - path to root directory containing matches csvs\n\n Returns:\n df - pandas dataframe containing all matches csvs combined\n \"\"\"\n csvs = glob.glob(glob_path)\n csvs = [pd.read_csv(csv) for csv in csvs]\n df = pd.concat(csvs)\n \n return df\n\nif __name__ == '__main__':\n \n # --- process csvs into one huge df\n df = parse_pacs_matches_csvs(glob_path)\n\n # --- parse df into dictionary format\n breast_dict = process_breast_dataframe(df, breast_mris)\n\n # --- output dictionary to pickle\n pickle.dump(breast_dict, open('./breast_data_dict.pkl', 'wb'))\n\n print('breast data pickle created.')\n","sub_path":"parse_breast_data.py","file_name":"parse_breast_data.py","file_ext":"py","file_size_in_byte":5030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"585091802","text":"# -*- coding: utf-8 -*-\n__author__ = 'vladislav'\n\nfrom flask import Blueprint, request, render_template, flash, g, session, redirect, url_for\nfrom datetime import datetime, date, timedelta\nimport json\nfrom sqlalchemy import or_, func\n\nfrom gt_gora import db\nfrom gt_gora.forms import DateForm\nfrom gt_gora.gt_tools import day_interval\nfrom models import Kontrol\nfrom forms import KontrolRecordForm, KontrolFilterForm\n\nkontrol = Blueprint('kontrol', __name__, url_prefix='/kontrol')\n\n\n@kontrol.route('/last')\ndef last():\n curr = Kontrol.query.filter(or_(Kontrol.date2 == None, Kontrol.date2 == 0)).order_by(Kontrol.date.desc()).all()\n last = db.session.query(Kontrol.id\n , Kontrol.date\n # , Kontrol.date2\n , func.timediff(Kontrol.date2, Kontrol.date).label(\"dt\")\n , Kontrol.category\n , Kontrol.comment\n , Kontrol.message\n , Kontrol.object\n , Kontrol.obj_id\n , Kontrol.prim) \\\n .filter(Kontrol.category!=2) \\\n .order_by(Kontrol.date.desc()).limit(32).all()\n return render_template('kontrol/kontrol_last.html', ent1=curr, ent2=last)\n\n\n@kontrol.route('/byday', methods=['GET', 'POST'])\ndef by_day():\n form = DateForm()\n day = None\n\n if request.method == 'GET':\n lday = db.session.query(Kontrol.date).order_by(Kontrol.date.desc()).limit(1).scalar()\n day = day_interval(str(lday.date()))\n\n if request.method == 'POST':\n if form.validate():\n sday = request.form['date']\n day = day_interval(sday)\n\n entries = None\n if not day:\n flash(u'Ошибка в дате.', category=\"error\")\n else:\n form.date.data = day[0]\n entries = db.session.query(Kontrol.id\n , Kontrol.date\n # , Kontrol.date2\n , func.timediff(Kontrol.date2, Kontrol.date).label(\"dt\")\n , Kontrol.category\n , Kontrol.comment\n , Kontrol.message\n , Kontrol.object\n , Kontrol.obj_id\n , Kontrol.prim) \\\n .filter(Kontrol.date > day[0]) \\\n .filter(Kontrol.date < day[1]) \\\n .filter(Kontrol.category!=2) \\\n .order_by(Kontrol.date.desc()).all()\n return render_template('kontrol/kontrol_by_day.html', form=form, day=day, entries=entries)\n\n\n@kontrol.route('/statist')\ndef statist():\n return render_template('kontrol/kontrol_stat.html')\n\n\n@kontrol.route('/stat')\ndef stat_json():\n lday = db.session.query(Kontrol.date).order_by(Kontrol.date.desc()).limit(1).scalar()\n llday = datetime.date(lday - timedelta(days=30, ))\n\n by_cat = db.session.query(Kontrol.category, func.count(Kontrol.category).label(\"count\")) \\\n .filter(Kontrol.date > llday).group_by(Kontrol.category).order_by(Kontrol.category).all()\n\n by_obj = db.session.query(Kontrol.object, func.count(Kontrol.obj_id).label(\"count\"), Kontrol.obj_id) \\\n .filter(Kontrol.date > llday).group_by(Kontrol.obj_id).order_by(Kontrol.object).all()\n\n by_mes = db.session.query(Kontrol.message, func.count(Kontrol.mes_id).label(\"count\"), Kontrol.category, Kontrol.mes_id) \\\n .filter(Kontrol.date > llday).group_by(Kontrol.mes_id).order_by(Kontrol.message).all()\n\n l = [\n {\"key\": \"Категории\", \"values\": by_cat}\n , {\"key\": \"Типы\", \"values\": by_mes}\n , {\"key\": \"Обьекты\", \"values\": by_obj}\n ]\n return json.dumps(l)\n\n\n@kontrol.route('/records/', methods=['GET', 'POST'])\ndef by_id(id):\n rec = Kontrol.query.get_or_404(id)\n form = KontrolRecordForm(obj=rec)\n if form.validate_on_submit():\n form.populate_obj(rec)\n if form.prim.data.find(\"del\") >= 0:\n db.session.delete(rec)\n else:\n db.session.add(rec)\n db.session.commit()\n flash(u\"Данные изменены\")\n # return redirect(url_for('kontrol.by_id', id=id))\n return redirect(url_for('kontrol.last', id=id))\n return render_template('kontrol/kontrol_id.html', form=form, e=rec)\n\n\n@kontrol.route('/filter', methods=['GET', 'POST'])\ndef filter_view():\n lst = None\n form = KontrolFilterForm()\n if form.validate_on_submit():\n flash(u\"Данные изменены\")\n else:\n flash(u\"GET...\")\n return render_template('kontrol/kontrol_filter.html', form=form, entries=lst)\n\n\n@kontrol.route('/obj/')\ndef obj_id(id):\n rec = db.session.query(Kontrol.id\n , Kontrol.date\n , func.timediff(Kontrol.date2, Kontrol.date).label(\"dt\")\n , Kontrol.category\n , Kontrol.comment\n , Kontrol.message\n , Kontrol.object\n , Kontrol.obj_id\n , Kontrol.prim) \\\n .filter(Kontrol.obj_id == id) \\\n .order_by(Kontrol.date.desc()).limit(100).all()\n return render_template('kontrol/obj_id.html', ent=rec)\n\n@kontrol.route('/type/')\ndef mes_id(id):\n rec = db.session.query(Kontrol.id\n , Kontrol.date\n , func.timediff(Kontrol.date2, Kontrol.date).label(\"dt\")\n , Kontrol.category\n , Kontrol.comment\n , Kontrol.message\n , Kontrol.object\n , Kontrol.obj_id\n , Kontrol.mes_id\n , Kontrol.prim) \\\n .filter(Kontrol.mes_id == id) \\\n .order_by(Kontrol.date.desc()).limit(100).all()\n return render_template('kontrol/mes_id.html', ent=rec)\n\n\n@kontrol.route('/xbyday', methods=['GET', 'POST'])\ndef x_by_day():\n form = DateForm()\n day = None\n\n if request.method == 'GET':\n lday = db.session.query(Kontrol.date).order_by(Kontrol.date.desc()).limit(1).scalar()\n day = day_interval(str(lday.date()))\n\n if request.method == 'POST':\n if form.validate():\n sday = request.form['date']\n day = day_interval(sday)\n\n entries = None\n if not day:\n flash(u'Ошибка в дате.', category=\"error\")\n else:\n form.date.data = day[0]\n entries = db.session.query(Kontrol.id\n , Kontrol.date\n # , Kontrol.date2\n , func.timediff(Kontrol.date2, Kontrol.date).label(\"dt\")\n , Kontrol.category\n , Kontrol.comment\n , Kontrol.message\n , Kontrol.object\n , Kontrol.obj_id\n , Kontrol.mes_id\n , Kontrol.prim) \\\n .filter(Kontrol.date > day[0]) \\\n .filter(Kontrol.date < day[1]) \\\n .filter(Kontrol.category==2) \\\n .order_by(Kontrol.date.desc()).all()\n return render_template('kontrol/kontrol_x_by_day.html', form=form, day=day, entries=entries)\n\n","sub_path":"gt_gora/kontrol/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"307114849","text":"import math\nimport tkinter\n\n\ndef parabola(canvas, size):\n for x in range(-size, size):\n y = (x * x) / size\n plot(canvas, x, y)\n\n\ndef circle(canvas, radius, g, h, outline=\"red\"):\n canvas.create_oval(\n g + radius, h + radius, g - radius, h - radius, outline=outline, width=2\n )\n\n\ndef draw_axes(canvas):\n canvas.update()\n x_origin = canvas.winfo_width() / 2\n y_origin = canvas.winfo_height() / 2\n canvas.configure(scrollregion=(-x_origin, -y_origin, x_origin, y_origin))\n canvas.create_line(-x_origin, 0, x_origin, 0, fill=\"black\")\n canvas.create_line(0, y_origin, 0, -y_origin, fill=\"black\")\n\n\ndef plot(canvas, x, y):\n canvas.create_line(\n x, -y, x + 1, -y + 1, fill=\"red\"\n ) # Invert y axis because Canvas y axis is flipped from math\n\n\nmainWindow = tkinter.Tk()\nmainWindow.title(\"Parabola\")\nmainWindow.geometry(\"640x480\")\n\nmainCanvas = tkinter.Canvas(mainWindow, width=640, height=480)\nmainCanvas.grid(row=0, column=0)\ndraw_axes(mainCanvas)\n\nparabola(mainCanvas, 100)\nparabola(mainCanvas, 200)\ncircle(mainCanvas, 50, 0, 0)\ncircle(mainCanvas, 100, 100, 100, outline=\"purple\")\n\nmainWindow.mainloop()\n","sub_path":"MoreFunctions/MoreFunctions.py","file_name":"MoreFunctions.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"87135839","text":"import pygame\r\nclass MyBallClass(pygame.sprite.Sprite):\r\n def __init__(self, image_file, location):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.image.load(image_file)\r\n self.rect = self.image.get_rect()\r\n self.rect.left, self.rect.top = location\r\n\r\n \r\n\r\nscreen = pygame.display.set_mode([300, 300])\r\nscreen.fill([255, 255, 255]) \r\nima_file = \"ball.png\"\r\nlocation = [10, 10]\r\nball = MyBallClass(ima_file, location)\r\nscreen.blit(ball.image,ball.rect)\r\npygame.display.flip()\r\n \r\n \r\n","sub_path":"2 семестр/pygame/1/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"185724900","text":"#libreries rest_framework\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\n#django import\nfrom django.shortcuts import get_object_or_404\n#app torneo\nfrom applications.torneo.models import Tournament\n#app miscelanea\nfrom applications.miscelanea.models import TeamCategory\n#local impotrs\nfrom .serializers import (\n TeamAddSerializer,\n TeamListSerializer,\n TeamStateUpdateSerializer,\n TeamInscribedSerializer,\n TeamInscribedListSerializer,\n InscribedUpdateSerializer,\n TournamentByInscribedSerializador,\n)\n\nfrom .models import Team, Inscribed\n\n\nclass TeamByUserListViewSet(viewsets.ModelViewSet):\n '''\n servicio para listar equipos por usuario delegado\n '''\n serializer_class = TeamListSerializer\n\n def get_queryset(self):\n user=self.request.user\n queryset = Team.objects.by_delegate(user)\n return queryset\n\n\n\nclass TournamentTeamsListViewSet(viewsets.ModelViewSet):\n '''\n servicio para listar equipos de un torneo\n '''\n serializer_class = TeamInscribedListSerializer\n\n def get_queryset(self):\n pk = self.kwargs['pk']\n flat = self.kwargs['flat']\n queryset = Inscribed.objects.team_by_torneo(pk,flat)\n return queryset\n\n\nclass TeamAddView(viewsets.ViewSet):\n \"\"\"servicio para crear un nuevo equipo\"\"\"\n\n def create(self, request):\n serializado = TeamAddSerializer(data=request.data)\n if serializado.is_valid():\n team_category = TeamCategory.objects.get(\n name=serializado.validated_data['team_category'],\n )\n #\n\n Team(\n name=serializado.validated_data['name'],\n origin=serializado.validated_data['origin'],\n #color=serializado.validated_data['color'],\n team_category=team_category,\n delegate=self.request.user,\n created_by=self.request.user,\n modified_by=self.request.user,\n ).save()\n else:\n print(serializado.errors)\n\n return Response()\n\n\nclass TeamStateUpdateViewSet(viewsets.ViewSet):\n '''\n servicio para cambiar estado de equipo\n '''\n def create(self, request):\n serializado = TeamStateUpdateSerializer(data=request.data)\n if serializado.is_valid():\n equipo = Team.objects.get(pk=serializado.validated_data['pk'])\n equipo.enable = serializado.validated_data['enable']\n equipo.modified_by = self.request.user\n equipo.save()\n else:\n print(serializado.errors)\n\n return Response()\n\n\nclass TeamInscribedViewSet(viewsets.ViewSet):\n '''\n servicio para solicitar inscripcion en torneo\n '''\n def create(self, request):\n serializado = TeamInscribedSerializer(data=request.data)\n if serializado.is_valid():\n equipo = Team.objects.get(pk=serializado.validated_data['team'])\n torneo = Tournament.objects.get(pk=serializado.validated_data['tournament'])\n #creamos o actualizamos incripcion\n #\n obj, created = Inscribed.objects.update_or_create(\n team=equipo,\n tournament=torneo,\n defaults={\n 'modified_by': self.request.user,\n 'created_by': self.request.user,\n 'rejected':False,\n }\n )\n obj.save()\n if not created:\n obj.rejected_count = obj.rejected_count + 1\n obj.save()\n else:\n print(serializado.errors)\n\n return Response()\n\n\nclass InscribedStateUpdateViewSet(viewsets.ViewSet):\n '''\n servicio para confirmar o rechazar inscripcion de equipo\n '''\n def create(self, request):\n serializado = InscribedUpdateSerializer(data=request.data)\n if serializado.is_valid():\n inscribed = Inscribed.objects.get(pk=serializado.validated_data['pk'])\n flat = serializado.validated_data['flat']\n if flat == '0':\n #confirmar\n inscribed.enable = True\n else:\n #rechazar\n inscribed.enable = False\n inscribed.rejected = True\n\n inscribed.modified_by = self.request.user\n inscribed.save()\n else:\n print (serializado.errors)\n\n return Response()\n\n\nclass TournamentByInscribedViewSet(viewsets.ModelViewSet):\n '''\n servicio para listar torneos en los que participa un delgate\n '''\n serializer_class = TournamentByInscribedSerializador\n\n def get_queryset(self):\n user=self.request.user\n queryset = Inscribed.objects.tournament_inscribed_by_user(user)\n return queryset\n","sub_path":"nliga/applications/equipo/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"484301153","text":"import os\n\npercentage_50 = 0.0\npercentage_99 = 0.0\n\nfor i in range(1, 11):\n\n file = open(str(i) + \".txt\", \"r\")\n print(\"File: \" + str(i) + \".txt\")\n # 50% starts at line 47\n # 99% line is at 54\n\n for line in file:\n line_split = line.split()\n\n if line_split:\n if line_split[0] == \"50%\":\n print(line_split[1])\n percentage_50 += float(line_split[1])\n\n elif line_split[0] == \"99%\":\n print(line_split[1])\n percentage_99 += float(line_split[1])\n else:\n pass\n\n file.close()\n\nprint(\" \")\nprint(\"TOTALS: \")\nprint(\"Current 50%: \" + str(percentage_50))\nprint(\"Current 99%: \" + str(percentage_99))\n\npercentage_50 = percentage_50 / 10\npercentage_99 = percentage_99 / 10\n\nprint(\" \")\nprint(\"AVERAGES: \")\nprint(\"Current 50%: \" + str(percentage_50))\nprint(\"Current 99%: \" + str(percentage_99))","sub_path":"linux box/percentages.py","file_name":"percentages.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"450954305","text":"#!/home/user/PycharmProjects/NewWork/bin/python\n# -*- coding: utf-8 -*-\n\n#from django.forms import ModelForm\nfrom django import forms\n# from django_ace import AceWidget\n\nTYPE_OF_LINKS_CHOICES = (\n (0, 'Click', ),\n (1, 'Unsubscribe', ),\n)\n\n\nclass CreativeForm(forms.ModelForm, ):\n # creative = forms.CharField(widget=AceWidget(), )\n\n class Meta:\n from modules.creative.models import Creative\n model = Creative\n fields = ['name', 'creative', ] # 'creative',\n\n\n#class LinksForCreativeForm(forms.Form, ):\n# link_to_real_link = forms.CharField(max_length=128, label=u'Ссылка на линк в Креативе', )\n# real_link = forms.CharField(max_length=256, label=u'Ссылка из креатива', )\n# type_of_link = forms.ChoiceField(choices=TYPE_OF_LINKS_CHOICES, required=True, label=u'Тип ссылки', )\n","sub_path":"modules/creative/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"331109562","text":"import tensorflow as tf\nimport numpy as np\nimport pyomo.environ as pyo\n\nfrom relumip import AnnModel\nfrom relumip.utils.visualization import plot_results_2d\n\n# Load the trained tensorflow model which will be embedded into the optimization problem.\ntf_model = tf.keras.models.load_model('data/peaks_3x10.h5')\n\n# Create a pyomo model into which the ANN will be embedded.\nmodel = pyo.ConcreteModel()\nmodel.construct()\n\n# All network variables will be added to a user-defined block within the model.\nmodel.ann = pyo.Block()\n\n# The network input and output variables have to be defined by the user.\n# For the network input, finite variable bounds have to be supplied (they can be inferred from the data used to train\n# the model, for example).\nmodel.ann.Input1 = pyo.Var(within=pyo.Reals, bounds=(-3, 3))\nmodel.ann.Input2 = pyo.Var(within=pyo.Reals, bounds=(-3, 3))\nmodel.ann.Output = pyo.Var(bounds=(-10000, 10000), within=pyo.Reals)\n\n# Input and output variables are stored in lists to be passes to the AnnModel.\ninput_vars = [model.ann.Input1, model.ann.Input2]\noutput_vars = [model.ann.Output]\n\n# A solver instance has to be defined for bound tightening. Make sure that an appropriate MIP solver is installed.\nsolver = pyo.SolverFactory('glpk')\n\n# Now the AnnModel instance can be created.\nann_model = AnnModel(tf_model=tf_model, modeling_language='PYOMO')\n\n# Input and output variables are connected to the network.\n# The block dedicated for the ANN model has to be passed as well.\nann_model.connect_network_input(opt_model=model.ann, input_vars=input_vars)\nann_model.connect_network_output(opt_model=model.ann, output_vars=output_vars)\n\n# This call generates the network formulation inside the block.\n# The bound tightening strategy has to be specified, for Pyomo the options are 'MIP' or 'LP' (default).\nann_model.embed_network_formulation(bound_tightening_strategy='LP', solver=solver)\n\n# In this example, no additional model components besides the ANN are considered.\n# We choose to minimize the network output and display the solved model.\nmodel.obj = pyo.Objective(expr=model.ann.Output, sense=pyo.minimize)\nres = solver.solve(model)\nmodel.display()\n\n# To visualize the computed results, a test data set is generated within the ANN input domain and the tensorflow model\n# is evaluated on it. The solution point computed above is extracted and shown on the response surface plot.\nsample_input = 6 * np.random.rand(10000, 2) - 3\nsample_output = tf_model.predict(sample_input)\nsol_point = [input_vars[0].value, input_vars[1].value, output_vars[0].value]\nplot_results_2d(sample_input, sample_output, sol_point=sol_point)\n\n# The model parameters computed during bound tightening can be saved for future use of the same model. See the\n# 'load_precomputed_parameters_example.py' file on more information on how to load precomputed parameters\nann_model.save_param('data/peaks3x10_param')\n\n","sub_path":"examples/pyomo_example.py","file_name":"pyomo_example.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"21604982","text":"from django.db import models\nfrom datetime import datetime\n\n# Create your models here.\n\naccount_choices = (\n ('savings', 'SAVINGS'),\n ('current', 'CURRENT'),\n)\n\nclass AccountDetail(models.Model):\n\taccount_number = models.IntegerField()\n\tbalance = models.IntegerField()\n\taccount_type = models.CharField(max_length = 8, choices = account_choices, default = 'savings')\n\temail \t\t\t\t\t\t= models.EmailField(verbose_name=\"email\", max_length=60, unique=True)\n\n\tdef __str__(self):\n\t\treturn self.email\n\nclass Deposit(models.Model):\n\tamount\t\t\t\t\t\t= models.IntegerField()\n\tdep_account\t\t\t\t\t= models.IntegerField(default = 0)\n\tdate\t\t\t\t\t\t= models.DateTimeField(blank=True, null=True, auto_now_add=True)\n\tdep_email\t\t\t\t\t= models.EmailField(verbose_name=\"email\", max_length=60)\n\n\tdef __str__(self):\n\t\treturn self.dep_email\n\nclass Withdraw(models.Model):\n\twithdraw_amount\t\t\t\t= models.IntegerField()\n\twithdraw_account\t\t\t= models.IntegerField()\n\temail\t\t\t\t\t\t= models.EmailField(verbose_name=\"email\", max_length=60)\n\tdate\t\t\t\t\t\t= models.DateTimeField(auto_now_add=True, blank = True)\n\tdef __str__(self):\n\t\treturn self.email\n\nclass Transfer(models.Model):\n\temail\t\t\t\t\t\t= models.EmailField(verbose_name=\"email\", max_length=60)\n\treceiver_account\t\t\t= models.IntegerField()\n\tamount\t\t\t\t\t\t= models.IntegerField()\n\tdate \t\t\t\t\t\t= models.DateTimeField(auto_now_add=True, blank = True)\n\tdef __str__(self):\n\t\treturn self.email\n\nclass History(models.Model):\n\ttransaction\t\t\t\t\t= models.ForeignKey(Transfer, default=11, on_delete = models.SET_DEFAULT)\n\tdate\t\t\t\t\t\t= models.DateTimeField(auto_now_add=True, blank = True)\n\n\nclass Interest(models.Model):\n\ttoday_interest\t\t\t\t= models.IntegerField()\n\temail\t\t\t\t\t\t= models.EmailField(verbose_name=\"email\", max_length=60)\n\tinterest_account\t\t\t= models.IntegerField()\n\tdate \t\t\t\t\t\t= models.DateTimeField(auto_now_add=True, blank = True)\n\n\n","sub_path":"transaction/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"591693916","text":"from math import sqrt\nn = int(input())\nfor _ in range(n):\n a = 0\n b = 0\n k = int(input())\n for i in range(2,int(sqrt(k))+1):\n if k%i == 0:\n a = i\n break\n if a > 0:\n temp = int(sqrt(k/a+1))\n for i in range(a+1,temp+1):\n if (k/a) % i == 0:\n b = i\n break\n if a==0 or b==0:\n print(\"NO\")\n elif(k/(a*b) > b):\n print(\"YES\")\n print(a,b,int( k/(a*b) ) )\n else:\n print(\"NO\")\n","sub_path":"1294c.py","file_name":"1294c.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"513533338","text":"# login form for lottery program #\r\n# calling modules\r\nimport datetime\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\nfrom tkinter.ttk import Combobox\r\nfrom PIL import Image, ImageTk\r\nfrom datetime import *\r\nimport rsaidnumber\r\nimport re\r\nfrom playsound import playsound\r\nimport requests\r\nimport smtplib\r\nimport uuid\r\nfrom random import sample\r\n\r\n# setup\r\nroot = Tk()\r\n# window size\r\nroot.geometry('800x600')\r\n# background colour\r\nroot.config(bg='#f9db17')\r\n# window title\r\nroot.title(\"Login\")\r\n# date and time\r\nnow = datetime.now()\r\n# variables\r\nwinnings = [0, 0, 20, 100.50, 2384, 8584, 10000000]\r\nlotto_list1 = []\r\nlotto_list2 = []\r\nlotto_list3 = []\r\ntotal_amount = 0\r\nuser_id = []\r\n\r\n# regular expression for validating email\r\nregex = '^(\\w|\\.|\\_|\\-)+[@](\\w|\\_|\\-|\\.)+[.]\\w{2,3}$'\r\n\r\n\r\nclass AllInOne:\r\n def __init__(self, master):\r\n # for an image\r\n self.canvas = Canvas(master, width=500, height=200, bg='#f9db17', borderwidth=0, highlightthickness=0)\r\n self.canvas.place(x=135, y=5)\r\n self.img = ImageTk.PhotoImage(Image.open('./Images/ITHUBA-NATIONAL-LOTTERY.png'))\r\n self.canvas.create_image(20, 20, anchor=NW, image=self.img)\r\n # label\r\n self.entry_age_lbl = Label(master, text=\"Please Enter Your ID Number:\", fg=\"black\", bg=\"#f9db17\",\r\n font=\"Consolas 12 bold\")\r\n self.entry_age_lbl.place(x=70, y=200)\r\n self.full_name_lbl = Label(master, text=\"Please Enter Your Full Name:\", fg=\"black\", bg=\"#f9db17\",\r\n font=\"Consolas 12 bold\")\r\n self.full_name_lbl.place(x=70, y=170)\r\n self.e_address_lbl = Label(root, text=\"Please Enter Your Email Address:\", fg=\"black\", bg=\"#f9db17\",\r\n font=\"Consolas 12 bold\")\r\n self.e_address_lbl.place(x=70, y=230)\r\n self.physical_lbl = Label(master, text=\"Please Enter Your Physical Address:\", fg=\"black\", bg=\"#f9db17\",\r\n font=\"Consolas 12 bold\")\r\n self.physical_lbl.place(x=70, y=260)\r\n self.t_c = Label(root, text=\"Terms & Conditions:\", bg=\"#f9db17\", font=\"Consolas 12 bold\", fg=\"red\")\r\n self.t_c.place(x=0, y=500)\r\n self.legal_age = Label(master, text=\"1. You Must Be 18 Years or Older To Enter\", bg=\"#f9db17\", fg=\"black\",\r\n font=\"Consolas 10 bold\")\r\n self.legal_age.place(x=0, y=530)\r\n self.legal_age2 = Label(master, text=\"2. You Must Have A Valid ID\", bg=\"#f9db17\", fg=\"black\",\r\n font=\"Consolas 10 bold\")\r\n self.legal_age2.place(x=0, y=550)\r\n self.legal_age3 = Label(master, text=\"3. User Must Be A SA Citizen\", bg=\"#f9db17\", fg=\"black\",\r\n font=\"Consolas 10 bold\")\r\n self.legal_age3.place(x=0, y=570)\r\n # entry label\r\n self.age_lbl = Entry(master)\r\n self.age_lbl.place(x=450, y=200)\r\n self.full_name_lbl2 = Entry(master)\r\n self.full_name_lbl2.place(x=450, y=170)\r\n self.physical_lbl2 = Entry(master)\r\n self.physical_lbl2.place(x=450, y=260)\r\n\r\n # buttons\r\n self.confirm_btn = Button(master, borderwidth=\"10\", text=\"Verify\", font=\"Consolas 15 bold\", fg=\"white\",\r\n bg=\"black\", command=self.age_verification)\r\n self.confirm_btn.place(x=296, y=320)\r\n self.clear_btn = Button(master, borderwidth=\"10\", text=\"Clear\", font=\"Consolas 15 bold\", fg=\"white\", bg=\"black\",\r\n command=self.clear_input)\r\n self.clear_btn.place(x=70, y=320)\r\n self.exit_btn = Button(master, borderwidth=\"10\", text=\"Exit\", font=\"Consolas 15 bold\", fg=\"white\", bg=\"black\",\r\n command=self.exit_program)\r\n self.exit_btn.place(x=523, y=320)\r\n\r\n self.e_address_lbl2 = Entry(master)\r\n self.e_address_lbl2.place(x=450, y=230)\r\n\r\n def age_verification(self):\r\n email = self.e_address_lbl2.get()\r\n try:\r\n id_number = rsaidnumber.parse(self.age_lbl.get())\r\n age = str((datetime.today() - id_number.date_of_birth) // timedelta(days=365.25))\r\n if len(self.full_name_lbl2.get()) == 0 or len(self.physical_lbl2.get()) == 0:\r\n messagebox.showerror(\"Error\", \"Please Fill In Each Section\")\r\n elif int(age) >= 18:\r\n # player id\r\n player_id = user_id.append(uuid.uuid4())\r\n # appending text\r\n f = open(\"details.txt\", \"a+\")\r\n f.write(\r\n self.full_name_lbl2.get() + \" \" + self.age_lbl.get() + \" \" + self.e_address_lbl2.get() + \" \" + self.physical_lbl2.get() + \" \" + \"Logged into App at:\" + str(\r\n now) + \"\\n\" + \"Your Player ID Is: \" + str(player_id))\r\n f.close()\r\n messagebox.showinfo(\"Success\", \"Let's Play\")\r\n playsound(\"./Audio/lotto-sound.mp3\")\r\n root.withdraw()\r\n self.lotto_window()\r\n else:\r\n messagebox.showinfo('Failure', \"You Are Too Young To Play\")\r\n if re.search(regex, email):\r\n pass\r\n else:\r\n messagebox.showinfo(\"Failure\", \"Invalid Email\")\r\n except ValueError:\r\n messagebox.showinfo(\"Failure\", \"Please Enter A Valid 13 Digit ID Number\")\r\n\r\n def clear_input(self):\r\n self.age_lbl.delete(0, END)\r\n self.physical_lbl2.delete(0, END)\r\n self.e_address_lbl2.delete(0, END)\r\n self.full_name_lbl2.delete(0, END)\r\n\r\n def exit_program(self):\r\n return root.destroy()\r\n\r\n def lotto_window(self):\r\n # setup\r\n lotto = Tk()\r\n # window size\r\n lotto.geometry('800x500')\r\n # background colour\r\n lotto.config(bg='#f9db17')\r\n # window title\r\n lotto.title(\"The South African National Lottery\")\r\n\r\n def play_again():\r\n lotto_list1.clear()\r\n lotto_list2.clear()\r\n lotto_list3.clear()\r\n lotto_nums1.config(text=\"\", bg=\"#f9db17\")\r\n lotto_nums2.config(text=\"\", bg=\"#f9db17\")\r\n lotto_nums3.config(text=\"\", bg=\"#f9db17\")\r\n lotto_nums4.config(text=\"\", bg=\"#f9db17\")\r\n lotto_nums5.config(text=\"\", bg=\"#f9db17\")\r\n lotto_nums6.config(text=\"\", bg=\"#f9db17\")\r\n num_dis1.config(text=\"\")\r\n num_dis7.config(text=\"\")\r\n num_dis13.config(text=\"\")\r\n num1.config(state=NORMAL)\r\n num2.config(state=NORMAL)\r\n num3.config(state=NORMAL)\r\n num4.config(state=NORMAL)\r\n num5.config(state=NORMAL)\r\n num6.config(state=NORMAL)\r\n num7.config(state=NORMAL)\r\n num8.config(state=NORMAL)\r\n num9.config(state=NORMAL)\r\n num10.config(state=NORMAL)\r\n num11.config(state=NORMAL)\r\n num12.config(state=NORMAL)\r\n num13.config(state=NORMAL)\r\n num14.config(state=NORMAL)\r\n num15.config(state=NORMAL)\r\n num16.config(state=NORMAL)\r\n num17.config(state=NORMAL)\r\n num18.config(state=NORMAL)\r\n num19.config(state=NORMAL)\r\n num20.config(state=NORMAL)\r\n num21.config(state=NORMAL)\r\n num22.config(state=NORMAL)\r\n num23.config(state=NORMAL)\r\n num24.config(state=NORMAL)\r\n num25.config(state=NORMAL)\r\n num26.config(state=NORMAL)\r\n num27.config(state=NORMAL)\r\n num28.config(state=NORMAL)\r\n num29.config(state=NORMAL)\r\n num30.config(state=NORMAL)\r\n num31.config(state=NORMAL)\r\n num32.config(state=NORMAL)\r\n num33.config(state=NORMAL)\r\n num34.config(state=NORMAL)\r\n num35.config(state=NORMAL)\r\n num36.config(state=NORMAL)\r\n num37.config(state=NORMAL)\r\n num38.config(state=NORMAL)\r\n num39.config(state=NORMAL)\r\n num40.config(state=NORMAL)\r\n num41.config(state=NORMAL)\r\n num42.config(state=NORMAL)\r\n num43.config(state=NORMAL)\r\n num44.config(state=NORMAL)\r\n num45.config(state=NORMAL)\r\n num46.config(state=NORMAL)\r\n num47.config(state=NORMAL)\r\n num48.config(state=NORMAL)\r\n num49.config(state=NORMAL)\r\n\r\n def exit_program2():\r\n return root.destroy()\r\n\r\n def create_sets(num):\r\n if len(lotto_list1) < 6 and num not in lotto_list1:\r\n lotto_list1.append(num)\r\n num_dis1.config(text=lotto_list1)\r\n elif len(lotto_list1) == 6 and len(lotto_list2) < 6 and num not in lotto_list2:\r\n lotto_list2.append(num)\r\n num_dis7.config(text=lotto_list2)\r\n elif len(lotto_list1) == 6 and len(lotto_list2) == 6 and len(lotto_list3) < 6 and num not in lotto_list3:\r\n lotto_list3.append(num)\r\n num_dis13.config(text=lotto_list3)\r\n\r\n if len(lotto_list3) == 6:\r\n num1.config(state=DISABLED)\r\n num2.config(state=DISABLED)\r\n num3.config(state=DISABLED)\r\n num4.config(state=DISABLED)\r\n num5.config(state=DISABLED)\r\n num6.config(state=DISABLED)\r\n num7.config(state=DISABLED)\r\n num8.config(state=DISABLED)\r\n num9.config(state=DISABLED)\r\n num10.config(state=DISABLED)\r\n num11.config(state=DISABLED)\r\n num12.config(state=DISABLED)\r\n num13.config(state=DISABLED)\r\n num14.config(state=DISABLED)\r\n num15.config(state=DISABLED)\r\n num16.config(state=DISABLED)\r\n num17.config(state=DISABLED)\r\n num18.config(state=DISABLED)\r\n num19.config(state=DISABLED)\r\n num20.config(state=DISABLED)\r\n num21.config(state=DISABLED)\r\n num22.config(state=DISABLED)\r\n num23.config(state=DISABLED)\r\n num24.config(state=DISABLED)\r\n num25.config(state=DISABLED)\r\n num26.config(state=DISABLED)\r\n num27.config(state=DISABLED)\r\n num28.config(state=DISABLED)\r\n num29.config(state=DISABLED)\r\n num30.config(state=DISABLED)\r\n num31.config(state=DISABLED)\r\n num32.config(state=DISABLED)\r\n num33.config(state=DISABLED)\r\n num34.config(state=DISABLED)\r\n num35.config(state=DISABLED)\r\n num36.config(state=DISABLED)\r\n num37.config(state=DISABLED)\r\n num38.config(state=DISABLED)\r\n num39.config(state=DISABLED)\r\n num40.config(state=DISABLED)\r\n num41.config(state=DISABLED)\r\n num42.config(state=DISABLED)\r\n num43.config(state=DISABLED)\r\n num44.config(state=DISABLED)\r\n num45.config(state=DISABLED)\r\n num46.config(state=DISABLED)\r\n num47.config(state=DISABLED)\r\n num48.config(state=DISABLED)\r\n num49.config(state=DISABLED)\r\n\r\n def generate_nums():\r\n global total_amount\r\n winnings_won1 = []\r\n winnings_won2 = []\r\n winnings_won3 = []\r\n\r\n active = 0\r\n active2 = 0\r\n active3 = 0\r\n\r\n count_1 = 0\r\n count_2 = 0\r\n count_3 = 0\r\n\r\n # generating random numbers\r\n gen_nums = sample(range(1, 49), 6)\r\n gen_nums.sort() # sorting generated nums\r\n\r\n if len(lotto_list1) < 6 and active == 0 or len(lotto_list2) < 6 and active2 == 0 or len(\r\n lotto_list3) < 6 and active3 == 0:\r\n messagebox.showerror(\"Error\", \"Please Select 6 Numbers\")\r\n elif len(lotto_list2) < 6 and active2 == 0:\r\n messagebox.showinfo(\"Attention\", \"You Did Not Play The Second Set\")\r\n elif len(lotto_list3) < 6 and active3 == 0:\r\n messagebox.showinfo(\"Attention\", \"You Did Not Play The Third Set\")\r\n\r\n # display in empty label\r\n lotto_nums1.configure(text=gen_nums[0], bg=\"white\")\r\n lotto_nums2.configure(text=gen_nums[1], bg=\"white\")\r\n lotto_nums3.configure(text=gen_nums[2], bg=\"white\")\r\n lotto_nums4.configure(text=gen_nums[3], bg=\"white\")\r\n lotto_nums5.configure(text=gen_nums[4], bg=\"white\")\r\n lotto_nums6.configure(text=gen_nums[5], bg=\"red\")\r\n\r\n for x in gen_nums:\r\n if len(lotto_list1) == 6:\r\n active = 1\r\n if x in lotto_list1:\r\n count_1 += 1\r\n winnings_won1.append(lotto_list1)\r\n if len(lotto_list2) == 6:\r\n active2 = 1\r\n if x in lotto_list2:\r\n count_2 += 1\r\n winnings_won2.append(lotto_list2)\r\n if len(lotto_list3) == 6:\r\n active3 = 1\r\n if x in lotto_list3:\r\n count_3 += 1\r\n winnings_won3.append(lotto_list3)\r\n # one set\r\n if active == 1 and active2 == 0 and active3 == 0:\r\n total_amount = winnings[count_1]\r\n if count_1 <= 1:\r\n messagebox.showinfo(\"Bad Luck!\",\r\n str(count_1) + \" \" + \"Numbers\" + \"\\n\" + \"Your Winnings Are:\" + \" \" + \"R\" + str(\r\n total_amount))\r\n f = open(\"details.txt\", \"a+\")\r\n f.write(\r\n \"\\n\" + \"Number Of Correct Guesses: \" + str(count_1) + \"Winnings: \" + \"R\" + str(total_amount))\r\n f.close()\r\n results = messagebox.askquestion(\"Choose\", \"Would You Like To Play Again?\")\r\n if results == \"yes\":\r\n play_again()\r\n else:\r\n lotto.withdraw()\r\n self.bank_window()\r\n elif count_1 >= 2:\r\n messagebox.showinfo(\"Congratulations!\",\r\n str(count_1) + \" \" + \"Numbers\" + \"\\n\" + \"Your Winnings Are:\" + \" \" + \"R\" + str(\r\n total_amount))\r\n f = open(\"details.txt\", \"a+\")\r\n f.write(\r\n \"\\n\" + \"Number Of Correct Guesses: \" + str(count_1) + \"Winnings: \" + \"R\" + str(total_amount))\r\n f.close()\r\n result = messagebox.askquestion(\"Choose\", \"Would You Like To Convert Your Winnings?\")\r\n if result == \"yes\":\r\n lotto.withdraw()\r\n self.currency_window()\r\n else:\r\n lotto.withdraw()\r\n self.bank_window()\r\n # two sets\r\n elif active == 1 and active2 == 1 and active3:\r\n total_amount = winnings[count_1] + winnings[count_2]\r\n\r\n if count_2 <= 1:\r\n messagebox.showinfo(\"Bad Luck!\",\r\n str(count_2) + \" \" + \"Numbers\" + \"\\n\" + \"Your Winnings Are:\" + \" \" + \"R\" + str(\r\n total_amount))\r\n f = open(\"details.txt\", \"a+\")\r\n f.write(\"\\n\" + \"Number Of Correct Guesses in Set 1 : \" + str(count_1) + \"Number Of Correct \"\r\n \"Guesses in Set 2: \" +\r\n str(count_2) + \"Winnings: \" + \"R\" + str(total_amount))\r\n f.close()\r\n results = messagebox.askquestion(\"Choose\", \"Would You Like To Play Again?\")\r\n if results == \"yes\":\r\n play_again()\r\n else:\r\n lotto.withdraw()\r\n self.bank_window()\r\n elif count_2 >= 2:\r\n messagebox.showinfo(\"Congratulations!\",\r\n str(count_1) + \" \" + \"Numbers\" + \"\\n\" + \"Your Winnings Are:\" + \" \" + \"R\" + str(\r\n winnings[count_1]))\r\n messagebox.showinfo(\"Congratulations!\",\r\n str(count_2) + \" \" + \"Numbers\" + \"\\n\" + \"Your Winnings Are:\" + \" \" + \"R\" + str(\r\n winnings[count_2]))\r\n messagebox.showinfo(\"Congrats\", \"Your total winnings are: \" + str(total_amount))\r\n f = open(\"details.txt\", \"a+\")\r\n f.write(\"\\n\" + \"Number Of Correct Guesses in Set 1 : \" + str(count_1) + \"Number Of Correct \"\r\n \"Guesses in Set 2: \" +\r\n str(count_2) + \"Winnings: \" + \"R\" + str(total_amount))\r\n f.close()\r\n result = messagebox.askquestion(\"Choose\", \"Would You Like To Convert Your Winnings?\")\r\n if result == \"yes\":\r\n lotto.withdraw()\r\n self.currency_window()\r\n else:\r\n lotto.withdraw()\r\n self.bank_window()\r\n # 3 sets\r\n elif active == 1 and active2 == 1 and active3 == 1:\r\n total_amount = winnings[count_1] + winnings[count_2] + winnings[count_3]\r\n if count_3 <= 1:\r\n messagebox.showinfo(\"Bad Luck!\",\r\n str(count_3) + \" \" + \"Numbers\" + \"\\n\" + \"Your Winnings Are:\" + \" \" + \"R\" + str(\r\n total_amount))\r\n f = open(\"details.txt\", \"a+\")\r\n f.write(\"\\n\" + \"Number Of Correct Guesses in Set 1 : \" + str(count_1) + \"Number Of Correct \"\r\n \"Guesses in Set 2: \" +\r\n str(count_2) + \"Number Of Correct Guesses in Set 3: \" + str(\r\n count_3) + \"Winnings: \" + \"R\" + str(total_amount))\r\n f.close()\r\n results = messagebox.askquestion(\"Choose\", \"Would You Like To Play Again?\")\r\n if results == \"yes\":\r\n play_again()\r\n else:\r\n lotto.withdraw()\r\n self.bank_window()\r\n elif count_3 >= 2:\r\n messagebox.showinfo(\"Congratulations!\",\r\n str(count_1) + \" \" + \"Numbers\" + \"\\n\" + \"Your Winnings Are:\" + \" \" + \"R\" + str(\r\n winnings[count_1]))\r\n messagebox.showinfo(\"Congratulations!\",\r\n str(count_2) + \" \" + \"Numbers\" + \"\\n\" + \"Your Winnings Are:\" + \" \" + \"R\" + str(\r\n winnings[count_2]))\r\n messagebox.showinfo(\"Congratulations!\",\r\n str(count_3) + \" \" + \"Numbers\" + \"\\n\" + \"Your Winnings Are:\" + \" \" + \"R\" + str(\r\n winnings[count_3]))\r\n messagebox.showinfo(\"Congratulations!\", \"Your Winnings Are:\" + \" \" + \"R\" + str(\r\n total_amount))\r\n f = open(\"details.txt\", \"a+\")\r\n f.write(\"\\n\" + \"Number Of Correct Guesses in Set 1 : \" + str(count_1) + \"Number Of Correct \"\r\n \"Guesses in Set 2: \" +\r\n str(count_2) + \"Number Of Correct Guesses in Set 3: \" + str(\r\n count_3) + \"Winnings: \" + \"R\" + str(total_amount))\r\n f.close()\r\n result = messagebox.askquestion(\"Choose\", \"Would You Like To Convert Your Winnings?\")\r\n if result == \"yes\":\r\n lotto.withdraw()\r\n self.currency_window()\r\n else:\r\n lotto.withdraw()\r\n self.bank_window()\r\n\r\n # labels\r\n num_lbl = Label(lotto, text=\"Set 1:\", font=\"Consolas 12 bold\", bg=\"#f9db17\")\r\n num_lbl.place(x=600, y=5)\r\n num_dis1 = Label(lotto, text=\"\", bg=\"#f9db17\")\r\n num_dis1.place(x=570, y=50)\r\n\r\n num_lbl2 = Label(lotto, text=\"Set 2:\", font=\"Consolas 12 bold\", bg=\"#f9db17\")\r\n num_lbl2.place(x=600, y=100)\r\n num_dis7 = Label(lotto, text=\"\", bg=\"#f9db17\")\r\n num_dis7.place(x=570, y=145)\r\n\r\n num_lbl3 = Label(lotto, text=\"Set 3:\", font=\"Consolas 12 bold\", bg=\"#f9db17\")\r\n num_lbl3.place(x=600, y=195)\r\n num_dis13 = Label(lotto, text=\"\", bg=\"#f9db17\")\r\n num_dis13.place(x=570, y=240)\r\n\r\n lotto_nums_lbl = Label(lotto, text=\"The Lotto Numbers Are:\", font=\"Consolas 12 bold\", bg=\"#f9db17\")\r\n lotto_nums_lbl.place(x=520, y=280)\r\n lotto_nums1 = Label(lotto, text=\"\", bg=\"#f9db17\", width=4)\r\n lotto_nums1.place(x=550, y=310)\r\n lotto_nums2 = Label(lotto, text=\"\", bg=\"#f9db17\", width=4)\r\n lotto_nums2.place(x=580, y=310)\r\n lotto_nums3 = Label(lotto, text=\"\", bg=\"#f9db17\", width=4)\r\n lotto_nums3.place(x=610, y=310)\r\n lotto_nums4 = Label(lotto, text=\"\", bg=\"#f9db17\", width=4)\r\n lotto_nums4.place(x=640, y=310)\r\n lotto_nums5 = Label(lotto, text=\"\", bg=\"#f9db17\", width=4)\r\n lotto_nums5.place(x=670, y=310)\r\n lotto_nums6 = Label(lotto, text=\"\", bg=\"#f9db17\", width=4)\r\n lotto_nums6.place(x=700, y=310)\r\n\r\n # buttons\r\n num1 = Button(lotto, text=\"01\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(1))\r\n num1.place(x=5, y=5)\r\n num2 = Button(lotto, text=\"02\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(2))\r\n num2.place(x=70, y=5)\r\n num3 = Button(lotto, text=\"03\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(3))\r\n num3.place(x=135, y=5)\r\n num4 = Button(lotto, text=\"04\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(4))\r\n num4.place(x=200, y=5)\r\n num5 = Button(lotto, text=\"05\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(5))\r\n num5.place(x=265, y=5)\r\n num6 = Button(lotto, text=\"06\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(6))\r\n num6.place(x=330, y=5)\r\n num7 = Button(lotto, text=\"07\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(7))\r\n num7.place(x=395, y=5)\r\n num8 = Button(lotto, text=\"08\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(8))\r\n num8.place(x=5, y=50)\r\n num9 = Button(lotto, text=\"09\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(9))\r\n num9.place(x=70, y=50)\r\n num10 = Button(lotto, text=\"10\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(10))\r\n num10.place(x=135, y=50)\r\n num11 = Button(lotto, text=\"11\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(11))\r\n num11.place(x=200, y=50)\r\n num12 = Button(lotto, text=\"12\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(12))\r\n num12.place(x=265, y=50)\r\n num13 = Button(lotto, text=\"13\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(13))\r\n num13.place(x=330, y=50)\r\n num14 = Button(lotto, text=\"14\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(14))\r\n num14.place(x=395, y=50)\r\n num15 = Button(lotto, text=\"15\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(15))\r\n num15.place(x=5, y=100)\r\n num16 = Button(lotto, text=\"16\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(16))\r\n num16.place(x=70, y=100)\r\n num17 = Button(lotto, text=\"17\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(17))\r\n num17.place(x=135, y=100)\r\n num18 = Button(lotto, text=\"18\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(18))\r\n num18.place(x=200, y=100)\r\n num19 = Button(lotto, text=\"19\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(19))\r\n num19.place(x=265, y=100)\r\n num20 = Button(lotto, text=\"20\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(20))\r\n num20.place(x=330, y=100)\r\n num21 = Button(lotto, text=\"21\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(21))\r\n num21.place(x=395, y=100)\r\n num22 = Button(lotto, text=\"22\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(22))\r\n num22.place(x=5, y=150)\r\n num23 = Button(lotto, text=\"23\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(23))\r\n num23.place(x=70, y=150)\r\n num24 = Button(lotto, text=\"24\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(24))\r\n num24.place(x=135, y=150)\r\n num25 = Button(lotto, text=\"25\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(25))\r\n num25.place(x=200, y=150)\r\n num26 = Button(lotto, text=\"26\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(26))\r\n num26.place(x=265, y=150)\r\n num27 = Button(lotto, text=\"27\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(27))\r\n num27.place(x=330, y=150)\r\n num28 = Button(lotto, text=\"28\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(28))\r\n num28.place(x=395, y=150)\r\n num29 = Button(lotto, text=\"29\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(29))\r\n num29.place(x=5, y=200)\r\n num30 = Button(lotto, text=\"30\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(30))\r\n num30.place(x=70, y=200)\r\n num31 = Button(lotto, text=\"31\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(31))\r\n num31.place(x=135, y=200)\r\n num32 = Button(lotto, text=\"32\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(32))\r\n num32.place(x=200, y=200)\r\n num33 = Button(lotto, text=\"33\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(33))\r\n num33.place(x=265, y=200)\r\n num34 = Button(lotto, text=\"34\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(34))\r\n num34.place(x=330, y=200)\r\n num35 = Button(lotto, text=\"35\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(35))\r\n num35.place(x=395, y=200)\r\n num36 = Button(lotto, text=\"36\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(36))\r\n num36.place(x=5, y=250)\r\n num37 = Button(lotto, text=\"37\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(37))\r\n num37.place(x=70, y=250)\r\n num38 = Button(lotto, text=\"38\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(38))\r\n num38.place(x=135, y=250)\r\n num39 = Button(lotto, text=\"39\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(39))\r\n num39.place(x=200, y=250)\r\n num40 = Button(lotto, text=\"40\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(40))\r\n num40.place(x=265, y=250)\r\n num41 = Button(lotto, text=\"41\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(41))\r\n num41.place(x=330, y=250)\r\n num42 = Button(lotto, text=\"42\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(42))\r\n num42.place(x=395, y=250)\r\n num43 = Button(lotto, text=\"43\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(43))\r\n num43.place(x=5, y=300)\r\n num44 = Button(lotto, text=\"44\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(44))\r\n num44.place(x=70, y=300)\r\n num45 = Button(lotto, text=\"45\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(45))\r\n num45.place(x=135, y=300)\r\n num46 = Button(lotto, text=\"46\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(46))\r\n num46.place(x=200, y=300)\r\n num47 = Button(lotto, text=\"47\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(47))\r\n num47.place(x=265, y=300)\r\n num48 = Button(lotto, text=\"48\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(48))\r\n num48.place(x=330, y=300)\r\n num49 = Button(lotto, text=\"49\", bg=\"black\", fg=\"#f9db17\", borderwidth=5, command=lambda: create_sets(49))\r\n num49.place(x=395, y=300)\r\n gen_lotto = Button(lotto, text=\"Generate Numbers\", borderwidth=10, bg=\"black\", fg=\"#f9db17\",\r\n font=\"Consolas 12 bold\",\r\n command=generate_nums)\r\n gen_lotto.place(x=280, y=400)\r\n clear_btn = Button(lotto, text=\"Clear\", borderwidth=10, bg=\"black\", fg=\"#f9db17\", font=\"Consolas 12 bold\",\r\n command=play_again)\r\n clear_btn.place(x=120, y=400)\r\n exit_btn = Button(lotto, text=\"Exit\", borderwidth=10, bg=\"black\", fg=\"#f9db17\", font=\"Consolas 12 bold\",\r\n command=exit_program2)\r\n exit_btn.place(x=500, y=400)\r\n\r\n lotto.mainloop()\r\n\r\n def currency_window(self):\r\n # setting up window\r\n currency = Toplevel()\r\n # window size\r\n currency.geometry(\"500x500\")\r\n # window color\r\n currency.config(bg=\"#f9db17\")\r\n # window title\r\n currency.title(\"Currency Convertor\")\r\n # for an image\r\n canvas = Canvas(root, width=500, height=200, bg='#f9db17', borderwidth=0, highlightthickness=0)\r\n canvas.place(x=-15, y=0)\r\n img2 = PhotoImage(file='./Images/ITHUBA-NATIONAL-LOTTERY.png')\r\n canvas.create_image(20, 20, anchor=NW, image=img2)\r\n\r\n # calling API\r\n response = requests.get(\"https://v6.exchangerate-api.com/v6/48fdd8d31b8c3c5e6b84fa6f/latest/ZAR\")\r\n response = response.json()\r\n\r\n conversion_rate = response[\"conversion_rates\"]\r\n\r\n currency_options = []\r\n for i in conversion_rate.keys():\r\n currency_options.append(i)\r\n\r\n # exit function\r\n def exit_program3():\r\n return root.destroy()\r\n\r\n # clear function\r\n def clear_program3():\r\n display_amount.config(text=\"\", bg=\"#f9db17\")\r\n\r\n def convert_currency():\r\n f = open(\"details.txt\", \"a+\")\r\n f.write(\"\\n\" + \"Converted Winnings:\" + \" \" + str(display_amount.cget(\"text\")))\r\n playsound(\"./Audio/counting-money.mp3\")\r\n amount_entered = float(amount_label.cget('text'))\r\n formula = round(amount_entered * response[\"conversion_rates\"][currency_2_cb.get()], 2)\r\n display_amount.config(text=float(formula))\r\n messagebox.showinfo(\"Success\", \"Please Enter Your Banking Details In The Next Window\")\r\n f = open(\"details.txt\", \"a+\")\r\n f.write(\"Your Converted Winnings Are: \" + str(amount_label.cget(\"text\")))\r\n f.close()\r\n currency.withdraw()\r\n self.bank_window()\r\n\r\n amount = Label(currency, text=\"Your Amount Won:\", font=\"Consolas 12 bold\", bg=\"#f9db17\")\r\n amount.place(x=5, y=180)\r\n currency_1 = Label(currency, text=\"From Currency:\", font=\"Consolas 12 bold\", bg=\"#f9db17\")\r\n currency_1.place(x=5, y=230)\r\n currency_2 = Label(currency, text=\"To Currency:\", font=\"Consolas 12 bold\", bg=\"#f9db17\")\r\n currency_2.place(x=5, y=280)\r\n converted_amount = Label(currency, text=\"Converted Amount:\", font=\"Consolas 12 bold\", bg=\"#f9db17\")\r\n converted_amount.place(x=5, y=330)\r\n display_amount = Label(currency, text=\"\", bg=\"#f9db17\")\r\n display_amount.place(x=190, y=330)\r\n currency_value = Label(currency, text=\"Default Currency is set to Rands(ZAR)\", bg=\"#f9db17\",\r\n font=\"Consolas 10 bold\")\r\n currency_value.place(x=190, y=230)\r\n\r\n # entry\r\n amount_label = Label(currency, text=total_amount, bg=\"#f9db17\")\r\n amount_label.place(x=190, y=180)\r\n\r\n # combo box\r\n currency_2_cb = Combobox(currency)\r\n currency_2_cb['values'] = currency_options\r\n currency_2_cb['state'] = 'readonly'\r\n currency_2_cb.set('Select Currency')\r\n currency_2_cb.place(x=190, y=280)\r\n display_amount.config(text='')\r\n\r\n # buttons\r\n exit_btn2 = Button(currency, borderwidth=\"10\", text=\"Exit\", font=\"Consolas 15 bold\", fg=\"white\", bg=\"black\",\r\n command=exit_program3)\r\n exit_btn2.place(x=400, y=400)\r\n clear_btn = Button(currency, borderwidth=\"10\", text=\"Clear\", font=\"Consolas 15 bold\", fg=\"white\", bg=\"black\",\r\n command=clear_program3)\r\n clear_btn.place(x=5, y=400)\r\n convert_btn = Button(currency, borderwidth=\"10\", text=\"Convert\", font=\"Consolas 15 bold\", fg=\"white\",\r\n bg=\"black\",\r\n command=convert_currency)\r\n convert_btn.place(x=203, y=400)\r\n\r\n # calling API\r\n response = requests.get(\"https://v6.exchangerate-api.com/v6/48fdd8d31b8c3c5e6b84fa6f/latest/ZAR\")\r\n response = response.json()\r\n\r\n currency.mainloop() # to run the program\r\n\r\n def bank_window(self):\r\n bank_wndw = Toplevel()\r\n # setting up the window\r\n bank_wndw.geometry(\"500x500\") # window size\r\n bank_wndw.title(\"Banking Details\") # window title\r\n bank_wndw.config(bg=\"#f9db17\")\r\n\r\n global user_id\r\n\r\n # bank function\r\n def bank_number():\r\n try:\r\n bank_num = acc_num_entry.get()\r\n branch = branch_num_entry.get()\r\n if len(bank_num) == 11 and len(branch) == 6:\r\n f = open(\"details.txt\", \"a+\")\r\n f.write(\r\n acc_num_entry.get() + \" \" + acc_num_entry.get() + \" \" + branch_num_entry.get() + \" \" + combo_box_banks.get() + \"\\n\")\r\n f.close()\r\n # creates SMTP session\r\n s = smtplib.SMTP('smtp.gmail.com', 587)\r\n sender_email_id = 'jeandre.lotto@gmail.com'\r\n receiver_email_id = self.e_address_lbl2.get()\r\n password = \"lifechoices2021\"\r\n p_id = user_id\r\n # start TLS for security\r\n s.starttls()\r\n # Authentication\r\n s.login(sender_email_id, password)\r\n # message to be sent\r\n message = \"Congratulations\\n\"\r\n message = message + \"Your Winnings Are: \" + str(total_amount) + \"Your Player ID is: \" + str(\r\n p_id) + \"Your Banking \" \\\r\n \"Details Are: \" + \\\r\n acc_name_entry.get() + \" \" + acc_num_entry.get() + \" \" + branch_num_entry.get() + \" \" + \\\r\n combo_box_banks.get()\r\n # sending the mail\r\n s.sendmail(sender_email_id, receiver_email_id, message)\r\n # terminating the session\r\n s.quit()\r\n playsound(\"./Audio/submit.mp3\")\r\n messagebox.showinfo(\"Success\", \"Please Check Your Email For Further Instructions\")\r\n else:\r\n messagebox.showinfo(\"Failure\",\r\n \"Please Enter A 11 Digit Bank Account Number and A 6 Digit Branch Code\")\r\n except ValueError:\r\n messagebox.showinfo(\"Invalid\", \"Please Use Digits Only\")\r\n\r\n try:\r\n acc_holder = str(acc_name_entry.get())\r\n except ValueError:\r\n messagebox.showerror(\"Error\", \"Please Use Letters Only\")\r\n\r\n # clear function\r\n def clear_input():\r\n acc_name_entry.delete(0, END)\r\n acc_num_entry.delete(0, END)\r\n branch_num_entry.delete(0, END)\r\n\r\n # exit function\r\n def exit_program():\r\n return root.destroy()\r\n\r\n # for an image\r\n canvas = Canvas(root, width=450, height=200, bg='#f9db17', borderwidth=0, highlightthickness=0)\r\n canvas.place(x=-15, y=5)\r\n img = ImageTk.PhotoImage(Image.open('./Images/ITHUBA-NATIONAL-LOTTERY.png'))\r\n canvas.create_image(20, 20, anchor=NW, image=img)\r\n\r\n # account labels\r\n acc_name = Label(bank_wndw, text=\"Account Holder:\", font=\"Consolas 12 bold\", bg=\"#f9db17\")\r\n acc_name.place(x=50, y=180)\r\n acc_num = Label(bank_wndw, text=\"Account Number:\", font=\"Consolas 12 bold\", bg=\"#f9db17\")\r\n acc_num.place(x=50, y=230)\r\n branch_num = Label(bank_wndw, text=\"Branch Code:\", font=\"Consolas 12 bold\", bg=\"#f9db17\")\r\n branch_num.place(x=50, y=280)\r\n acc_bank = Label(bank_wndw, text=\"Select Your Bank:\", font=\"Consolas 12 bold\", bg=\"#f9db17\")\r\n acc_bank.place(x=50, y=330)\r\n\r\n # account entries\r\n acc_name_entry = Entry(bank_wndw)\r\n acc_name_entry.place(x=250, y=180)\r\n acc_num_entry = Entry(bank_wndw)\r\n acc_num_entry.place(x=250, y=230)\r\n branch_num_entry = Entry(bank_wndw)\r\n branch_num_entry.place(x=250, y=280)\r\n\r\n # buttons\r\n submit_btn = Button(bank_wndw, text=\"Submit\", font=\"Consolas 12 bold\", bg=\"black\", fg=\"#f9db17\", borderwidth=10,\r\n command=bank_number)\r\n submit_btn.place(x=177, y=400)\r\n clear_btn = Button(bank_wndw, text=\"Clear\", font=\"Consolas 12 bold\", bg=\"black\", fg=\"#f9db17\", borderwidth=10,\r\n command=clear_input)\r\n clear_btn.place(x=320, y=400)\r\n exit_btn3 = Button(bank_wndw, text=\"Exit\", font=\"Consolas 12 bold\", bg=\"black\", fg=\"#f9db17\", borderwidth=10,\r\n command=exit_program)\r\n exit_btn3.place(x=50, y=400)\r\n\r\n # ComboBox\r\n combo_box_banks = Combobox(bank_wndw)\r\n combo_box_banks[\"values\"] = \"FNB\", \"Absa\", \"Standard Bank\", \"Capitec\"\r\n combo_box_banks.place(x=250, y=330)\r\n combo_box_banks.set(\"Select Your Bank\")\r\n combo_box_banks['state'] = 'readonly'\r\n\r\n bank_wndw.mainloop()\r\n\r\n\r\n# calling the class\r\nAllInOne(root)\r\n\r\n# to run the program\r\nroot.mainloop()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":39427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"372269777","text":"import logging\nimport ftrack\nimport ftrack_api\n\n\nlogger = logging.getLogger()\n\n\ndef start_time_on_launch(event):\n \"\"\"Modify the application environment and start timer for the task.\"\"\"\n data = event['data']\n\n username = event['source']['user']['username']\n\n session = ftrack_api.Session()\n # Get user from username\n user = session.query('User where username is \"{}\"'.format(username)).one()\n taskid = None\n # Try getting taskid from event selection\n try:\n taskid = data['context']['selection'][0]['entityId']\n except:\n logger.info('Unable to determine task. Timer not starting')\n\n if taskid:\n task = session.query('Task where id is {}'.format(taskid)).one()\n logger.info('Starting timer for task: ' + task['name'])\n user.start_timer(task, force=True)\n\n\ndef register(registry, **kw):\n \"\"\"Register location plugin.\"\"\"\n\n # Validate that registry is the correct ftrack.Registry. If not,\n # assume that register is being called with another purpose or from a\n # new or incompatible API and return without doing anything.\n if registry is not ftrack.EVENT_HANDLERS:\n # Exit to avoid registering this plugin again.\n return\n\n ftrack.EVENT_HUB.subscribe(\n 'topic=ftrack.connect.application.launch',\n start_time_on_launch\n )","sub_path":"ftrack-connect-plugins/custom_hook/start_launch_timer.py","file_name":"start_launch_timer.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"605117594","text":"import pandas as pd\nimport os\nfrom data_handling.pdf_extractor import PdfExtract\nimport re\nfrom data_handling.utilities import is_path_present, create_in_data_path\n\n\nclass ColMap:\n def __init__(self, filepath: str, frac_df_keep=None):\n self.filepath = filepath\n self.check_path()\n self.basename: str = os.path.basename(filepath)\n self.basename = self.basename.split('.')[0]\n self.frac_df_keep = .3 if not frac_df_keep else frac_df_keep if isinstance(frac_df_keep, float) else .3\n \n @property\n def year(self):\n year = re.findall(r'\\d+', self.basename)[0]\n return int(year)\n\n @property\n def year_range(self):\n if self.year < 2017:\n return '2001-2016'\n return '2017'\n\n @property\n def category(self):\n category = re.findall(r'(?<=\\d)[A-z]+$', self.basename)[0]\n return category\n\n @property\n def county(self):\n match = re.match(r'[A-z]+', self.basename)\n return match.group(0)\n\n def get_df(self):\n if hasattr(self, 'df'):\n return self.df\n df: pd.DataFrame = pd.read_fwf(self.filepath, header=None,\n index_col=None, encoding='ISO-8859-1', colspecs=self.col_specs)\n df.columns = self.headers\n setattr(self, 'df', df)\n return df.sample(frac=self.frac_df_keep)\n\n @property\n def col_specs(self):\n pf = PdfExtract(self.category, self.year)\n return [(_range[0], _range[1]) for _range in pf.get_field_dict().values()]\n\n @property\n def headers(self):\n pf = PdfExtract(self.category, self.year)\n columns = list(pf.get_field_dict().keys())\n return columns\n\n def check_path(self):\n if os.path.exists(self.filepath):\n return\n raise FileNotFoundError(f'{self.filepath} is not a valid file path.')\n\n def write_df(self, dir_name):\n write_header = False if is_path_present(self._get_file_save_path(dir_name)) else True\n with open(self._get_file_save_path(dir_name), 'a') as df_file:\n self.get_df().to_csv(df_file, index=None, header=write_header)\n\n def _get_file_save_path(self, dir_name):\n return create_in_data_path(dir_name, self.category+'.csv')\n","sub_path":"data_handling/column_mapper.py","file_name":"column_mapper.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"242850261","text":"# -*- coding: utf-8 -*-\n\n\nimport time\nimport json\nimport socket\nimport argparse\nimport platform\nimport subprocess\nfrom ipaddress import ip_address\nfrom collections import defaultdict\nfrom multiprocessing.pool import Pool as ProcPool\nfrom multiprocessing.dummy import Pool as ThreadPool\n\n\n'''\npip install argparse\npython3\n'''\n\nPORT_RANGE = [1, 1024]\n\n\n\ndef ip_format(ipaddress):\n ip_parser = ipaddress.split('-')\n try:\n start = ip_address(ip_parser[0])\n end = ip_address(ip_parser[-1])\n except:\n raise Exception('IP address format error.')\n if start > end:\n raise Exception('IP segment format error.')\n ip_list = []\n while start <= end:\n ip_list.append(str(start))\n start += 1\n return ip_list\n\n\n\n\ndef ping_test(ip):\n result = subprocess.call(\"ping -n 4 {} > nul\".format(ip),\n shell=True) if platform.system() == \"Windows\" else subprocess.call(\n \"ping -c 4 {} > /dev/null\".format(ip), shell=True)\n return False if result else True\n\n\n\ndef port_test(ip, port):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.connect((ip, port))\n s.shutdown(2)\n return True\n except socket.error:\n return False\n\n\n\ndef scan(ip, port=None):\n if ping_test(ip):\n if port:\n if port_test(ip, port):\n return ip, port\n return ip\n\n\n\ndef run(ipaddress, concurrency, mode, function, write, view):\n ip_list = ip_format(ipaddress)\n if mode == 'thread':\n pool = ThreadPool(concurrency)\n else:\n pool = ProcPool(concurrency)\n t1 = time.time()\n result_list = pool.map(scan, ip_list)\n t2 = time.time()\n available_ip_port = list(filter(None, result_list))\n if view:\n print('ping time: {}'.format(t2 - t1))\n if function == 'tcp':\n ip_port_iter = ((ip, port) for ip in available_ip_port for port in range(PORT_RANGE[0], PORT_RANGE[-1] + 1))\n t3 = time.time()\n result_list = pool.starmap(scan, ip_port_iter)\n t4 = time.time()\n available_ip_port = defaultdict(list)\n for result in result_list:\n if isinstance(result, tuple):\n available_ip_port[result[0]].append(result[1])\n if view:\n print('tcp time: {}'.format(t4 - t3))\n pool.close()\n pool.join()\n print(available_ip_port)\n if write:\n with open(write, 'w+') as f:\n json.dump(available_ip_port, f)\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Port Scanner.')\n parser.add_argument('-n', '--number', default=1, type=int, help='Number of concurrency.')\n parser.add_argument('-m', '--mode', default='thread', choices=['thread', 'proc'],\n help='\"ping\": Scans for available IP. \"tcp\": Scans for available ports.')\n parser.add_argument('-f', '--function', default='ping', choices=['ping', 'tcp'],\n help='\"ping\": Scans for available IP. \"tcp\": Scans for available ports.')\n parser.add_argument('-ip', '--ipaddress', default='127.0.0.1',\n help='IP address or IP segment. eg: 192.168.0.1-192.168.0.100')\n parser.add_argument('-w', '--write', help='Output json file path.')\n parser.add_argument('-v', '--view', action='store_true', help='Check scanner run time.')\n args = parser.parse_args()\n\n\n run(args.ipaddress, args.number, args.mode, args.function, args.write, args.view)","sub_path":"week03/HostScanner/pmap.py","file_name":"pmap.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"372791233","text":"\n# stdlib imports\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\n\n# rammi imports\nfrom rammi.resources import BaseResource\n\n# twisted imports\nfrom twisted.internet.defer import inlineCallbacks, returnValue\n\nclass Planet(BaseResource):\n\n template = 'feeds/list.html'\n\n def __init__(self, db, templates, handler=None):\n BaseResource.__init__(self, db, templates)\n\n self.handler = handler\n\n def getChild(self, key, request):\n \"\"\"\n Might want to cache the Listing object here\n \"\"\"\n return self\n \n @inlineCallbacks\n def control_GET(self, request):\n d = yield self.db.feeds.find(None,\n limit=int(request.args.get('limit',[24])[0]))\n returnValue(dict(feeds=d))\n\n# def render_GET_html(self, result):\n# return self.render_template('feeds/list.html', {'feeds':result})\n\n def render_GET_json(self, result):\n json_obj = {'feeds': [{'title': r['title'],\n 'link': r['link'],\n 'source': r['source'],\n 'summary': r['summary']\n } for r in result]\n }\n\n return json.dumps(json_obj)\n\n def render_GET_text(self, result):\n feeds = '# News\\n'\n for feed in result:\n entry = \"\\n## %(title)s\\n\\n %(source)s\\n%(link)s\\n\\n\"\n feeds += entry % {'title': feed['title'],\n 'source': feed['source'],\n 'link': feed['link']}\n\n return feeds\n","sub_path":"resources/planet.py","file_name":"planet.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"426859690","text":"\"\"\"\nNote that pytest offers a `tmp_path`. \nYou can reproduce locally with\n\n```python\n%load_ext autoreload\n%autoreload 2\nimport os\nimport tempfile\nimport shutil\nfrom pathlib import Path\ntmp_path = Path(tempfile.gettempdir()) / 'pytest-retrieve-authors'\nif os.path.exists(tmp_path):\n shutil.rmtree(tmp_path)\nos.mkdir(tmp_path)\n```\n\"\"\"\n\nimport os\nimport shutil\nimport logging\n\nfrom mkdocs_git_authors_plugin import util\nfrom mkdocs_git_authors_plugin.git import repo\n\n# GitPython\nimport git as gitpython\n\nDEFAULT_CONFIG = {\n \"show_contribution\": False,\n \"show_line_count\": False,\n \"show_email_address\": True,\n \"count_empty_lines\": True,\n \"sort_authors_by_name\": True,\n \"sort_reverse\": False,\n \"sort_authors_by\": \"name\",\n \"authorship_threshold_percent\": 0,\n}\n\n#### Helpers ####\n\n\ndef setup_clean_mkdocs_folder(mkdocs_yml_path, output_path):\n \"\"\"\n Sets up a clean mkdocs directory\n \n outputpath/testproject\n ├── docs/\n └── mkdocs.yml\n \n Args:\n mkdocs_yml_path (Path): Path of mkdocs.yml file to use\n output_path (Path): Path of folder in which to create mkdocs project\n \n Returns:\n testproject_path (Path): Path to test project\n \"\"\"\n\n testproject_path = output_path / \"testproject\"\n\n # Create empty 'testproject' folder\n if os.path.exists(testproject_path):\n logging.warning(\n \"\"\"This command does not work on windows. \n Refactor your test to use setup_clean_mkdocs_folder() only once\"\"\"\n )\n shutil.rmtree(testproject_path)\n\n # Copy correct mkdocs.yml file and our test 'docs/'\n shutil.copytree(\"tests/basic_setup/docs\", testproject_path / \"docs\")\n shutil.copyfile(mkdocs_yml_path, testproject_path / \"mkdocs.yml\")\n\n return testproject_path\n\n\ndef setup_commit_history(testproject_path):\n \"\"\"\n Initializes and creates a git commit history\n in a new mkdocs testproject. \n \n We commit the pages one by one in order \n to create some git depth.\n \n Args:\n testproject_path (Path): Path to test project\n \n Returns:\n repo (repo): git.Repo object\n \"\"\"\n assert not os.path.exists(testproject_path / \".git\")\n\n repo = gitpython.Repo.init(testproject_path, bare=False)\n author = \"Test Person \"\n\n # Change the working directory\n cwd = os.getcwd()\n os.chdir(str(testproject_path))\n\n try:\n repo.git.add(\"mkdocs.yml\")\n repo.git.commit(message=\"add mkdocs\", author=author)\n\n repo.git.add(\"docs/first_page.md\")\n repo.git.commit(message=\"first page\", author=author)\n file_name = testproject_path / \"docs/first_page.md\"\n with open(file_name, \"w+\") as the_file:\n the_file.write(\"Hello\\n\")\n repo.git.add(\"docs/first_page.md\")\n repo.git.commit(message=\"first page update 1\", author=author)\n with open(file_name, \"w\") as the_file:\n the_file.write(\"# First Test Page Edited\\n\\nSome Lorem text\")\n repo.git.add(\"docs/first_page.md\")\n repo.git.commit(message=\"first page update 2\", author=author)\n\n repo.git.add(\"docs/second_page.md\")\n repo.git.commit(message=\"second page\", author=author)\n repo.git.add(\"docs/index.md\")\n repo.git.commit(message=\"homepage\", author=author)\n repo.git.add(\"docs/page_with_tag.md\")\n repo.git.commit(message=\"homepage\", author=author)\n os.chdir(str(cwd))\n except:\n os.chdir(str(cwd))\n raise\n\n os.chdir(cwd)\n return repo\n\n\n#### Tests ####\n\n\ndef test_empty_file(tmp_path):\n\n # Change working directory\n cwd = os.getcwd()\n os.chdir(str(tmp_path))\n\n # Create empty file\n file_name = str(tmp_path / \"new-file\")\n open(file_name, \"a\").close()\n\n # Get authors of empty, uncommitted file\n r = gitpython.Repo.init(tmp_path)\n\n repo_instance = repo.Repo()\n repo_instance.set_config(DEFAULT_CONFIG)\n # TODO: should throw an error?\n repo_instance.page(file_name)\n\n authors = repo_instance.get_authors()\n assert authors == []\n\n # Get authors of empty but committed file\n r.index.add([file_name])\n author = gitpython.Actor(\"Tim\", \"abc@abc.com\")\n r.index.commit(\"initial commit\", author=author)\n\n repo_instance.page(file_name)\n authors = repo_instance.get_authors()\n assert authors == []\n\n os.chdir(cwd)\n ## TODO\n # When the first instance of a commit on a page is skipped as an empty line,\n # the second instance will not have the commit metadata available\n\n\ndef test_retrieve_authors(tmp_path):\n \"\"\"\n Builds a fake git project with some commits.\n\n Args:\n tmp_path (PosixPath): Directory of a tempdir\n \"\"\"\n cwd = os.getcwd()\n os.chdir(str(tmp_path))\n\n # Create file\n file_name = str(tmp_path / \"new-file\")\n with open(file_name, \"w\") as the_file:\n the_file.write(\"Hello\\n\")\n\n # Create git repo and commit file\n r = gitpython.Repo.init(tmp_path)\n r.index.add([file_name])\n author = gitpython.Actor(\"Tim\", \"abc@abc.com\")\n r.index.commit(\"initial commit\", author=author)\n\n # Test retrieving author\n repo_instance = repo.Repo()\n repo_instance.set_config(DEFAULT_CONFIG)\n repo_instance.page(file_name)\n\n authors = repo_instance.get_authors()\n assert len(authors) == 1\n # We don't want to test datetime\n authors = util.page_authors(authors, file_name)\n authors[0][\"last_datetime\"] = None\n\n assert authors == [\n {\n \"name\": \"Tim\",\n \"email\": \"abc@abc.com\",\n \"last_datetime\": None,\n \"lines\": 1,\n \"lines_all_pages\": 1,\n \"contribution\": \"100.0%\",\n \"contribution_all_pages\": \"100.0%\",\n }\n ]\n\n # Now add a line to the file\n # From a second author with same email\n with open(file_name, \"a+\") as the_file:\n the_file.write(\"World\\n\")\n r.index.add([file_name])\n author = gitpython.Actor(\"Tim2\", \"abc@abc.com\")\n r.index.commit(\"another commit\", author=author)\n\n repo_instance = repo.Repo()\n repo_instance.set_config(DEFAULT_CONFIG)\n repo_instance.page(file_name)\n authors = repo_instance.get_authors()\n authors = util.page_authors(authors, file_name)\n authors[0][\"last_datetime\"] = None\n\n assert authors == [\n {\n \"name\": \"Tim\",\n \"email\": \"abc@abc.com\",\n \"last_datetime\": None,\n \"lines\": 2,\n \"lines_all_pages\": 2,\n \"contribution\": \"100.0%\",\n \"contribution_all_pages\": \"100.0%\",\n }\n ]\n\n # Then a third commit from a new author\n with open(file_name, \"a+\") as the_file:\n the_file.write(\"A new line\\n\")\n r.index.add([file_name])\n author = gitpython.Actor(\"John\", \"john@abc.com\")\n r.index.commit(\"third commit\", author=author)\n\n repo_instance = repo.Repo()\n repo_instance.set_config(DEFAULT_CONFIG)\n repo_instance.page(file_name)\n authors = repo_instance.get_authors()\n authors = util.page_authors(authors, file_name)\n authors[0][\"last_datetime\"] = None\n authors[1][\"last_datetime\"] = None\n\n assert authors == [\n {\n \"name\": \"John\",\n \"email\": \"john@abc.com\",\n \"last_datetime\": None,\n \"lines\": 1,\n \"lines_all_pages\": 1,\n \"contribution\": \"33.33%\",\n \"contribution_all_pages\": \"33.33%\",\n },\n {\n \"name\": \"Tim\",\n \"email\": \"abc@abc.com\",\n \"last_datetime\": None,\n \"lines\": 2,\n \"lines_all_pages\": 2,\n \"contribution\": \"66.67%\",\n \"contribution_all_pages\": \"66.67%\",\n },\n ]\n os.chdir(cwd)\n\n\ndef test_mkdocs_in_git_subdir(tmp_path):\n \"\"\"\n Sometimes `mkdocs.yml` is not in the root of the repo.\n We need to make sure things still work in this edge case.\n\n tmp_path/testproject\n website/\n ├── docs/\n └── mkdocs.yml\n \"\"\"\n testproject_path = tmp_path / \"testproject\"\n\n shutil.copytree(\n \"tests/basic_setup/docs\", str(testproject_path / \"website\" / \"docs\")\n )\n shutil.copyfile(\n \"tests/basic_setup/mkdocs.yml\", str(testproject_path / \"website\" / \"mkdocs.yml\")\n )\n\n cwd = os.getcwd()\n os.chdir(str(testproject_path))\n\n # Create file\n file_name = str(testproject_path / \"website\" / \"new-file\")\n with open(file_name, \"w\") as the_file:\n the_file.write(\"Hello\\n\")\n\n # Create git repo and commit file\n r = gitpython.Repo.init(testproject_path)\n r.index.add([file_name])\n author = gitpython.Actor(\"Tim\", \"abc@abc.com\")\n r.index.commit(\"initial commit\", author=author)\n\n # Test retrieving author\n repo_instance = repo.Repo()\n repo_instance.set_config(DEFAULT_CONFIG)\n repo_instance.page(file_name)\n\n authors = repo_instance.get_authors()\n assert len(authors) == 1\n # We don't want to test datetime\n authors = util.page_authors(authors, file_name)\n authors[0][\"last_datetime\"] = None\n\n assert authors == [\n {\n \"name\": \"Tim\",\n \"email\": \"abc@abc.com\",\n \"last_datetime\": None,\n \"lines\": 1,\n \"lines_all_pages\": 1,\n \"contribution\": \"100.0%\",\n \"contribution_all_pages\": \"100.0%\",\n }\n ]\n\n os.chdir(cwd)\n\ndef test_summarize_authors():\n \"\"\"\n Test summary functions. \n TODO\n \"\"\"\n pass\n # authors = [\n # {'name' : 'Tim',\n # 'email' : 'abc@abc.com',\n # 'contribution' : '64.23%'\n # }\n # ]\n\n # # Default case: don't show contribution\n # config = { 'show_contribution' : False }\n # summary = util.Util().summarize(authors, config)\n # assert summary == \"Tim\"\n\n # # Do show contribution,\n # # but hide it because there's only one author\n # config = { 'show_contribution' : True }\n # summary = util.Util().summarize(authors, config)\n # assert summary == \"Tim\"\n\n # # Add another author\n # authors.append({\n # 'name' : 'Tom',\n # 'email' : 'efg@efg.org',\n # 'contribution' : '35.77%'\n # })\n # # Now contribution is displayed\n # summary = util.Util().summarize(authors, config)\n # assert summary == \"Tim (64.23%), Tom (35.77%)\"\n\n# TODO: test authors threshold with commits","sub_path":"tests/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":10510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"528414081","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom webdriver_manager.chrome import ChromeDriverManager\n#from webdriver_manager.IE import IEDriverManager\n''' steps'''\n# Step 1> Open Firefox\nbrowser=webdriver.Chrome(executable_path=ChromeDriverManager().install())\n# Step 2> Enter the URL\nbrowser.get(\"https://www.testandquiz.com/selenium/testing.html\")\nsample_text=browser.find_element_by_xpath(\"//b[normalize-space()='This is sample text.']\").text\nassert sample_text==\"This is sample text.\"\nprint(sample_text)\n\nbrowser.find_element_by_link_text(\"This is a link\").click()\nalert_ele=browser.switch_to.alert\nalert_text=alert_ele.text\nprint(\"alert_text\")\nalert_text.accept()\nprint(\"Link Clicked\")\n\n\n#browser.find_element_by_id(\"fname\")\n'''browser.find_element_by_name(\"txtPassword\").send_keys(\"admin123\")\nbrowser.find_element_by_name(\"Submit\").click()\nbrowser.implicitly_wait(5)\n#time.sleep(5)\n# Step 4>\npage_title=browser.title\nprint(page_title)\nassert page_title==\"OrangeHRM\"'''\nbrowser.close()","sub_path":"UI_Demo/DifferentControlsChecking.py","file_name":"DifferentControlsChecking.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"628888167","text":"import webbrowser\r\nclass Movie():\r\n VALID_RATINGS=[\"R\",\"PG-13\",\"PG\"]\r\n def __init__(self,movie_title,movie_storyline,poster_image,trailer_youtube):\r\n self.title=movie_title\r\n self.storyline=movie_storyline\r\n self.poster_image_url=poster_image\r\n self.trailer_youtube_url=trailer_youtube\r\n\r\n def show_trailer(trailer):\r\n webbrowser.open(trailer)\r\n \r\n","sub_path":"Movie database/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"312270946","text":"import math\nimport struct\nfrom vxiinstrclasses.instrument import *\n\n\n\nclass sdg1032x(instrument):\n \"\"\"This class controls a Siglent 1032X arbitrary waveform generator\"\"\"\n def __init__(self, resourcehost):\n\n instrument.__init__(self, resourcehost)\n\n\n iid = self.identify()\n if (iid[0:29] != 'Siglent Technologies,SDG1032X'):\n raise InstrumentError('Instrument Manufacturer/Model Number Mismatch, '+iid)\n\n\n def console(self):\n \"\"\"Enter debugging console\"\"\"\n self._console(\"Siglent SDG1032X\")\n\n def output(self, param, channel=1):\n \"\"\"Set output parameters\"\"\"\n cstr = \"C{channel}:OUTP {param} \".format(channel=channel, param=param)\n self._write(cstr)\n\n def output_on(self, channel=1):\n \"\"\"Turn output on\"\"\"\n self.output('ON', channel=channel)\n\n def output_off(self, channel=1):\n \"\"\"Turn output off\"\"\"\n self.output('OFF', channel=channel)\n\n def output_sourcez(self, channel=1, load=50000):\n \"\"\"Set output source impedance\"\"\"\n lstr=\"LOAD, {load}\".format(load=load)\n self.output(\"{lstr}\".format(lstr=lstr), channel)\n\n def output_polarity_normal(self, channel=1):\n \"\"\"Set output polarity to normal\"\"\"\n self.output(\"PLRT, NOR\", channel)\n\n def output_set_defaults(self, channel=1):\n \"\"\"Set the output defaults\"\"\"\n self.output(\"OFF, LOAD, HZ, PLRT, NOR\", channel)\n\n def output_polarity_invert(self, channel=1):\n \"\"\"Set output polarity to inverted\"\"\"\n self.output(\"PLRT, INVT\", channel)\n\n def basic_wave(self, type, channel=1, freq=1000, amplitude=1.0, offset=0.0, phase=0.0, sym=float('nan'), duty=float('nan')):\n \"\"\"Output a basic waveform\"\"\"\n if(math.isnan(sym) == False):\n self._write(\n \"C{channel}:BSWV WVTP, {type},FRQ, {freq}HZ,AMP, {amplitude}V,OFST, {offset}V,PHSE, {phase},SYM, {sym}\".format(\n channel=channel, type=type, freq=freq, amplitude=amplitude, offset=offset, phase=phase, sym=sym))\n elif (math.isnan(duty) == False):\n self._write(\n \"C{channel}:BSWV WVTP, {type},FRQ, {freq}HZ,AMP, {amplitude}V,OFST, {offset}V,PHSE, {phase},DUTY, {duty}\".format(\n channel=channel, type=type, freq=freq, amplitude=amplitude, offset=offset, phase=phase, duty=duty))\n\n else:\n self._write(\n \"C{channel}:BSWV WVTP, {type},FRQ, {freq}HZ,AMP, {amplitude}V,OFST, {offset}V,PHSE, {phase}\".format(\n channel=channel, type=type, freq=freq, amplitude=amplitude, offset=offset, phase=phase))\n\n\n def sine(self, channel=1, freq=1000, amplitude=1.0, offset=0.0, phase=0.0):\n \"\"\"Output a sine wave\"\"\"\n self.basic_wave(type=\"SINE\",channel=channel, freq=freq, amplitude=amplitude, offset=offset, phase=phase)\n\n def ramp(self, channel=1, freq=1000, amplitude=1.0, offset=0.0, phase=0.0, sym=50):\n \"\"\"Output a ramp wave\"\"\"\n self.basic_wave(type=\"RAMP\", channel=channel, freq=freq, amplitude=amplitude, offset=offset, phase=phase, sym=sym)\n\n def square(self, channel=1, freq=1000, amplitude=1.0, offset=0.0, phase=0.0, duty=50):\n \"\"\"Output a square wave\"\"\"\n self.basic_wave(type=\"SQUARE\", channel=channel, freq=freq, amplitude=amplitude, offset=offset, phase=phase, duty=duty)\n\n def noise(self, channel=1, stdev=0.200, mean=0):\n \"\"\"Output noise\"\"\"\n self._write(\"C{channel}:BSWV WVTP,NOISE,STDEV,{stdev}V,MEAN,{mean}V\".format(channel=channel, stdev=stdev, mean=mean))\n\n def sweep_off(self, channel):\n \"\"\"Turn sweep mode off\"\"\"\n self._write(\"C{channel}:SWWV STATE,OFF\")\n\n def sinesweep(self, channel=1, time=1.0, amplitude=1.0, mode=\"LIN\", start=300.0, stop=3000.0):\n \"\"\"Basic sine sweep\"\"\"\n sstr=\"C{channel}:SWWV STATE,ON,TYPE,SINE,\".format(channel=channel)\n sstr=sstr+\"TIME,{time}S,AMP,MODE,{mode},START,{start}HZ,STOP,{stop}HZ\".format(time=time, mode=mode, start=start, stop=stop)\n self._write(sstr)\n\n def wave_select_by_name(self, name, channel=1):\n \"\"\"Select an user waveform by name\"\"\"\n sstr = \"C{channel}:ARWV NAME,{name}\".format(channel=channel, name=name)\n self._write(sstr)\n\n def wave_select_by_index(self, index, channel=1):\n \"\"\"Select builtin arbitrary wave by index\"\"\"\n sstr = \"C{channel}:ARWV INDEX,{index}\".format(channel=channel, index=index)\n self._write(sstr)\n\n\n def wave_get_builtin(self):\n \"\"\"Return a dict with built-in wave names and indexes\"\"\"\n sstr = \"STL? BUILDIN\"\n res = self._ask(sstr)\n res = res[4:]\n res = res.replace(' ', '')\n alist = res.split(',')\n alistlen = len(alist)\n # Get rid of leading 'M'on index\n for i in range(alistlen):\n if i % 2 == 0:\n alist[i] = int(alist[i][1:])\n # Convert the list to a dictionary\n return dict(zip(alist[1::2], alist[::2]))\n\n\n\n def wave_set(self, setup, channel=1):\n \"\"\"Send a waveform setup to the arbitrary waveform generator\n Pass in a dict with the following items\n NAME: Name of user waveform e.g. 'test' (mandatory)\n WAVEDATA: List of signed short integers (mandatory)\n FREQ: frequency in hz (optional, default = 1000)\n TYPE: waveform type (optional, default = 5)\"\"\"\n\n # Mandatory parameters\n if type(setup) is not dict or 'WAVEDATA' not in setup or 'NAME' not in setup:\n return\n\n # Optional parameters\n\n if 'TYPE' not in setup:\n setup['TYPE'] = 5\n\n if 'FREQ' not in setup:\n setup['FREQ'] = 1000\n\n #Convert wave data to bytearray\n wavedata = setup['WAVEDATA']\n wdlen = len(wavedata)\n block = bytearray(wdlen*2)\n for i in range(wdlen):\n struct.pack_into('= 0 and cand_r < r and cand_c >= 0 and cand_c < c:\n if grid[cand_r][cand_c] == person_type and (candidate not in visited):\n q.put(candidate)\n visited.add(candidate)\n \n print(type_string if path_exists else \"neither\")\n ","sub_path":"Kattis/10kindsOfPeople.py","file_name":"10kindsOfPeople.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"559568090","text":"import os\nimport logging\nimport json\nimport requests\nfrom flask import Flask\nfrom flask import request\n\nimport subprocess\nfrom env import MyEnv\n\n\napp = Flask(__name__)\n\nSEARCH_DOMAIN = os.environ.get('SEARCH_DOMAIN')\n\nDIR = '/l2-node/l2'\n\nprint(os.environ)\n\n\n@app.route('/')\ndef health():\n return 'Ok'\n \n\n@app.route('/v1/chain/update',methods=['POST'])\ndef update_chain():\n mount_path = request.args.get(\"mount_path\")\n efs_id = request.args.get(\"efs_id\")\n access_point_id = request.args.get(\"access_point_id\")\n _kill_pids()\n the_path = mount_path or '/metis'\n logging.warning(f'update_chain mount_path:{mount_path}, efs_id:{efs_id}, access_point_id:{access_point_id}')\n _umount_path(the_path)\n if efs_id is not None and access_point_id is not None:\n output=_try_cmd(['mount', '-t', 'efs', '-o', f'tls,accesspoint={access_point_id}',f'{efs_id}:', the_path])\n logging.warning(f'mount to ap: {efs_id},{access_point_id},{output}')\n _try_cmd(['mkdir', '-p', f'{the_path}/.ethereum'])\n _try_cmd(['mount','--bind',f'{the_path}/.ethereum','/root/.ethereum'])\n logging.warning(f\"mount to root ethereum: mount --bind {the_path}/.ethereum /root/.ethereum\")\n if output is None or len(output) <= 0:\n body=request.get_data(as_text=True)\n output=_update_chain(json.loads(body))\n return {\n 'data': output.decode('utf-8')\n }\n\n\ndef _kill_pids():\n output = _try_cmd_string(\"/app/process_kill.sh\")\n logging.warning(output)\n\ndef _umount_path(path):\n _try_cmd(['umount', '/root/.ethereum'])\n _try_cmd(['umount', path])\n output = _try_cmd_string(f\"df -h|grep -w {path}\")\n logging.warning(f\"check umount result: {output}\")\n \n\ndef _try_cmd(cmds):\n try:\n return subprocess.check_output(cmds)\n except Exception as e:\n logging.warning(f'exce cmd in update chain error => {cmds}')\n return \"\"\n \n\ndef _try_cmd_string(cmd):\n try:\n return subprocess.check_output(cmd, shell=True)\n except Exception as e:\n logging.warning(f'exce cmd string in update chain error => {cmd}')\n return \"\" \n \n\ndef _update_chain(body):\n logging.warning(f'update_chain to file:{body}')\n if body is not None:\n myEnv = MyEnv('')\n myEnv.SetEnvFile(\"/app/env.sh\")\n myEnv.envs=body\n myEnv.Save()\n logging.warning(f'update_chain to file2:{myEnv.envs}')\n\n output = _try_cmd(['cat','/app/env.sh'])\n logging.warning(output)\n \n output = _try_cmd([f'/app/restart.sh','/app/env.sh'])\n logging.warning(output)\n return output\n\n\n@app.route('/v1/chain/stop',methods=['POST'])\ndef stop_chain():\n _kill_pids()\n mount_path = request.args.get(\"mount_path\")\n the_path = mount_path or '/metis'\n logging.warning('stop_chain...')\n _umount_path(the_path)\n return {\n 'data': \"success\"\n } \n \n\n@app.route('/v1/shell/exec',methods=['POST'])\ndef exec_shell():\n bodys=request.get_data()\n logging.warning(bodys)\n body = json.loads(bodys)\n cmd = body['cmd']\n # tokens=cmd.split(' ')\n # logging.warning(tokens)\n # ls_output = subprocess.check_output(tokens)\n ls_output = _try_cmd_string(cmd)\n logging.warning(ls_output)\n response = {\n 'data': ls_output.decode('utf-8')\n }\n logging.warning(response)\n return response\n\n\n@app.route('/v1/metis/l2/geth')\ndef metis_l2_geth():\n logging.warning('mount to ap')\n efs_id = request.args.get(\"efs_id\")\n access_point_id = request.args.get(\"access_point_id\")\n mount_path = request.args.get(\"mount_path\")\n the_path = mount_path or '/metis'\n ls_output=subprocess.check_output(['mount', '-t', 'efs', '-o', f'tls,accesspoint={access_point_id}',f'{efs_id}:', the_path])\n logging.warning(ls_output)\n response = {\n 'data': ls_output.decode('utf-8')\n }\n logging.warning(response)\n return response\n\n\n@app.route('/v1/batch/submitter')\ndef talk_to_batch_submitter():\n host = f'http://batch_submitter_{SEARCH_DOMAIN}:4567' if SEARCH_DOMAIN else 'http://batch_submitter:4567'\n print(f'Calling batch submitter host: {host}')\n response = requests.get(f'{host}/v1/batch_submitter')\n print(response.content)\n\n return {\n 'data': response.json()\n }, response.status_code\n\n\n@app.route('/v1/dtl')\ndef talk_to_dtl():\n host = f'http://data_transport_layer_{SEARCH_DOMAIN}:7878' if SEARCH_DOMAIN else 'http://data_transport_layer:7878'\n print(f'Calling batch submitter host: {host}')\n response = requests.get(f'{host}/v1/dtl')\n print(response.content)\n\n return {\n 'data': response.json()\n }, response.status_code\n\n\nif __name__ == \"__main__\" :\n _update_chain({'a':'1','b':'2', 'l2v':'10', 'l2r':'http://ip-172-31-12-82.us-east-2.compute.internal:8089/state-dump.latest.json'})","sub_path":"ops/geth-relayer-batch/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"590217855","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n##############################################################################\n# Function: 配置与日志 #\n# Author: why000007@163.com #\n# Date: 09/11/2017 #\n##############################################################################\n\n\n__all__ = ['file','task']\n\nimport os\nfrom ConfigParser import ConfigParser\nfrom logging.handlers import TimedRotatingFileHandler\nimport logging\n\n###########################################################################################################################\nconf_path = os.path.abspath(os.path.join(os.path.dirname(__file__),\"..\",\"conf.ini\"))\nsys_conf = ConfigParser()\nsys_conf.read(conf_path)\n\n###########################################################################################################################\nhandler = TimedRotatingFileHandler(sys_conf.get('sys', 'log_file'), backupCount=7, interval=1, when=\"D\")\nhandler.setFormatter(logging.Formatter('[%(process)d][%(asctime)s][%(threadName)s][%(levelname)s]-| %(message)s'))\nlogger = logging.getLogger('')\nlogger.setLevel(logging.INFO)\nlogging.getLogger('').addHandler(handler)\n\n\nconsole = logging.StreamHandler()\nconsole.setFormatter(logging.Formatter('[%(process)d][%(asctime)s][%(threadName)s][%(filename)s][line:%(lineno)d] %(levelname)s-| %(message)s'))\nconsole.setLevel(logging.DEBUG)\nlogging.getLogger('').addHandler(console)\n\n###########################################################################################################################\ndef get_conf(key, default):\n if sys_conf.has_option(\"sys\", key):\n return sys_conf.get(\"sys\", key)\n else:\n return default\n\nclass CleanError(Exception):\n def __init__(self, message):\n self.__message = message\n def __str__(self):\n return repr(self.__message)\n\n\n\n\n\n\n","sub_path":"PYTHON_CLEAN_FILE/file/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"453724985","text":"#!/usr/bin/env python3\n\n# Copyright (c) 2021 IBM Corporation\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom abc import ABCMeta, abstractmethod\nfrom collections import deque\nimport time\n\nimport cv2\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom geometry_msgs.msg import PoseStamped\nfrom matplotlib import pyplot as plt\nfrom message_filters import ApproximateTimeSynchronizer\nimport message_filters\nimport numpy as np\nimport rospy\nfrom scipy.spatial.transform import Rotation as R\nfrom sensor_msgs.msg import Image, CameraInfo\nfrom std_msgs.msg import ColorRGBA\nimport tf\nimport tf2_ros\nfrom track_people_py.msg import BoundingBox, TrackedBox, TrackedBoxes\nfrom visualization_msgs.msg import Marker, MarkerArray\n\nfrom pointcloud_utils import open3d_utils\n\n\nclass AbsTrackPeople:\n __metaclass__ = ABCMeta\n \n \n def __init__(self, device, minimum_valid_track_time_length):\n # settings for visualization\n self.vis_local = False\n self.vis_global = False\n\n # make sure only one camera is processed\n self.processing_detected_boxes = False\n \n # start initialization\n rospy.init_node('track_people_py', anonymous=True)\n \n self.minimum_valid_track_time_length = minimum_valid_track_time_length\n \n self.device = device\n self.bridge = CvBridge()\n self.detected_boxes_sub = rospy.Subscriber('/track_people_py/detected_boxes', TrackedBoxes, self.detected_boxes_cb)\n self.tracked_boxes_pub = rospy.Publisher('/track_people_py/tracked_boxes', TrackedBoxes, queue_size=1)\n self.visualization_marker_array_pub = rospy.Publisher('/track_people_py/visualization_marker_array', MarkerArray, queue_size=1)\n \n self.frame_id = 0\n self.prev_detect_time_sec = 0\n \n \n @abstractmethod\n def detected_boxes_cb(self, detected_boxes_msg):\n pass\n \n \n def preprocess_msg(self, detected_boxes_msg):\n detect_results = []\n center_bird_eye_global_list = []\n for idx_bbox, bbox in enumerate(detected_boxes_msg.tracked_boxes):\n detect_results.append([bbox.box.xmin, bbox.box.ymin, bbox.box.xmax, bbox.box.ymax])\n center_bird_eye_global_list.append([bbox.center3d.x, bbox.center3d.y, bbox.center3d.z])\n return np.array(detect_results), center_bird_eye_global_list\n \n \n def pub_result(self, detected_boxes_msg, id_list, color_list, tracked_length):\n # publish tracked boxes message\n tracked_boxes_msg = TrackedBoxes()\n tracked_boxes_msg.header = detected_boxes_msg.header\n tracked_boxes_msg.camera_id = detected_boxes_msg.camera_id\n tracked_boxes_msg.pose = detected_boxes_msg.pose\n for idx_bbox, bbox in enumerate(detected_boxes_msg.tracked_boxes):\n if tracked_length[idx_bbox] < self.minimum_valid_track_time_length:\n continue\n tracked_box = TrackedBox()\n tracked_box.header = bbox.header\n tracked_box.track_id = id_list[idx_bbox]\n tracked_box.color = ColorRGBA(color_list[idx_bbox][0], color_list[idx_bbox][1], color_list[idx_bbox][2], 0.0)\n tracked_box.box = bbox.box\n tracked_box.center3d = bbox.center3d\n tracked_boxes_msg.tracked_boxes.append(tracked_box)\n self.tracked_boxes_pub.publish(tracked_boxes_msg)\n\n rospy.loginfo(\"camera ID = \" + detected_boxes_msg.camera_id + \", number of tracked people = \" + str(len(tracked_boxes_msg.tracked_boxes)))\n \n \n def vis_result(self, detected_boxes_msg, id_list, color_list, tracked_length):\n # publish visualization marker array for rviz\n marker_array = MarkerArray()\n for idx_bbox, bbox in enumerate(detected_boxes_msg.tracked_boxes):\n if tracked_length[idx_bbox] < self.minimum_valid_track_time_length:\n continue\n marker = Marker()\n marker.header = bbox.header\n marker.ns = \"track-people\"\n marker.id = id_list[idx_bbox]\n marker.type = Marker.CUBE\n marker.action = Marker.ADD\n marker.lifetime = rospy.Duration(0.5)\n marker.scale.x = 0.5\n marker.scale.y = 0.5\n marker.scale.z = 0.2\n marker.pose.position = bbox.center3d\n marker.pose.orientation.x = 0\n marker.pose.orientation.y = 0\n marker.pose.orientation.z = 0\n marker.pose.orientation.w = 1\n marker.color.r = color_list[idx_bbox][0]\n marker.color.g = color_list[idx_bbox][1]\n marker.color.b = color_list[idx_bbox][2]\n marker.color.a = 1.0\n marker_array.markers.append(marker)\n self.visualization_marker_array_pub.publish(marker_array)\n \n # visualize by 2D plot in global map\n if self.vis_global:\n plt_x = []\n plt_y = []\n plt_color = []\n for idx_bbox, bbox in enumerate(detected_boxes_msg.tracked_boxes):\n if tracked_length[idx_bbox] < self.minimum_valid_track_time_length:\n continue\n plt_x.append(bbox.x)\n plt_y.append(bbox.y)\n plt_color.append(np.array(color_list[idx_bbox]))\n \n plt.figure(2)\n plt.cla()\n ax = plt.gca()\n ax.set_title(\"tracked people in global, camera=\"+detected_boxes_msg.camera_id)\n ax.grid(True)\n ax.legend()\n ax.set_xlabel('y')\n ax.set_ylabel('x')\n plt.scatter(plt_x, plt_y, c=plt_color)\n plt.scatter([-detected_boxes_msg_msg.pose.position.y], [detected_boxes_msg_msg.pose.position.x], c=[np.array([1.0, 0.0, 0.0])], marker='+')\n ax.set_xlim([-detected_boxes_msg_msg.pose.position.y-20,-detected_boxes_msg_msg.pose.position.y+20])\n ax.set_ylim([detected_boxes_msg_msg.pose.position.x-20,detected_boxes_msg_msg.pose.position.x+20])\n plt.draw()\n plt.pause(0.00000000001)\n","sub_path":"track_people_py/scripts/track_abstract_people.py","file_name":"track_abstract_people.py","file_ext":"py","file_size_in_byte":7051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"209043105","text":"import uuid\nimport sqlalchemy as sa\nimport sqlalchemy_utils as sau\nfrom sqlalchemy.orm import relationship\nfrom models.associations import \\\n profiles_articles_relationship_table, articles_tags_relationship_table\nfrom models.database import Base, Session\n\n\nclass ArticleManager(object):\n \"\"\"\n Manager class for model Article\n \"\"\"\n\n def __init__(self):\n self.session = Session()\n\n pass\n\n\nclass Article(Base, sau.Timestamp):\n \"\"\"\n Article model class\n \"\"\"\n __tablename__ = 'conduit_api_article'\n\n # base fields\n uuid = sa.Column(\n 'uuid',\n sau.UUIDType(binary=False),\n primary_key=True,\n nullable=False,\n unique=True,\n default=uuid.uuid4\n )\n title = sa.Column(\n 'title',\n sa.String(100),\n nullable=False\n )\n slug = sa.Column(\n 'slug',\n sa.String(100),\n nullable=False\n )\n description = sa.Column(\n 'description',\n sa.String(300),\n nullable=False\n )\n body = sa.Column(\n 'body',\n sa.Text,\n nullable=False\n )\n\n author_id = sa.Column(\n 'author-id',\n sau.UUIDType(binary=False),\n sa.ForeignKey('conduit_api_profile.user-id'),\n nullable=False,\n primary_key=False,\n unique=True\n )\n\n # relationship fields\n author = relationship('Profile', back_populates='conduit_api_profile')\n favorited_user = relationship(\n 'Profile',\n secondary=profiles_articles_relationship_table,\n back_populates='conduit_api_articles'\n )\n tags = relationship(\n 'Article',\n secondary=articles_tags_relationship_table,\n back_populates='conduit_api_tags'\n )\n\n __manager__ = ArticleManager()\n\n def __repr__(self):\n pass\n\n def __str__(self):\n return 'Conduit API Article Model'\n\n def serialize(self):\n pass\n","sub_path":"src/models/articles.py","file_name":"articles.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"3247403","text":"import datetime\nimport os\nimport shutil\nimport time\nimport zipfile\nimport csv\n#with zipfile.ZipFile('/Users/tanvi/PycharmProjects/assignment/zipfiles/Faileddata_zipfile.zip', 'r') as zip_ref:\n #zip_ref.extractall('/Users/tanvi/PycharmProjects/assignment/data/unzip')\n\ndef iterateUnzipFiles():\n path = \"/Users/tanvi/PycharmProjects/assignment/data/unzip/\"\n file_list=os.listdir(path)\n print(file_list)\n for filename in file_list:\n cleanData(path, filename)\n\n\ndef cleanData(path,filename):\n soureFile = path + filename\n rows = []\n with open (soureFile, 'r') as csvFile:\n reader = csv.reader (csvFile)\n for row in reader:\n dateValue = row[2]\n datev=datetime.datetime.strptime(dateValue,'%Y-%m-%d %H:%M:%S')\n datevaluparsed=datev.date()\n print (datevaluparsed)\n col1 = row[0]\n col2 = row[1]\n col3 = datevaluparsed\n col4 = row[3]\n col5 = row[4]\n col6 = row[5]\n rows.append ({\"intdata\": col1, \"intdata2\": col2, \"datedata\": col3, \"decidata\": col4, \"stringdata\": col5,\n \"datetimedta\": col6})\n writeCleanDataFile (rows, filename)\n\ndef writeCleanDataFile(rows, filename):\n path = \"/Users/tanvi/PycharmProjects/assignment/data/inputs/\"+filename\n print('start writing cleaned file : ', path)\n fieldnames = ['intdata', 'intdata2',\"datedata\",\"decidata\",\"stringdata\",\"datetimedta\"]\n with open (path, 'w', newline='') as csvfile:\n writer = csv.DictWriter (csvfile,fieldnames)\n writer.writerows(rows)\n print ('completed writing cleaned file : ', path)\n\nif __name__ == '__main__':\n iterateUnzipFiles()\n\n","sub_path":"Datacleansing.py","file_name":"Datacleansing.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"78645668","text":"from typing import List, Optional, Union\n\nfrom torchtext.data import Field, Pipeline, RawField, TabularDataset\n\nfrom diagnnose.vocab import attach_vocab\n\n\n# TODO: add all args to arg_parser\ndef import_corpus(\n path: str,\n header: Optional[List[str]] = None,\n header_from_first_line: bool = False,\n to_lower: bool = False,\n vocab_path: Optional[str] = None,\n vocab_from_corpus: bool = False,\n sen_column: str = \"sen\",\n labels_column: str = \"labels\",\n tokenize_columns: Optional[List[str]] = None,\n sep: str = \"\\t\",\n) -> TabularDataset:\n \"\"\" Imports a corpus from a path.\n\n The corpus can either be a raw string or a pickled dictionary.\n Outputs a `Corpus` type, that is used throughout the library.\n\n The raw sentence is assumed to be labeled `sen`.\n Sentences can optionally be labeled, which are assumed to be labeled\n by a `labels` tag.\n\n Parameters\n ----------\n path : str\n Path to corpus file\n header : List[str], optional\n Optional list of attribute names of each column. If not provided\n all lines will be considered to be sentences, with the\n attribute name \"sen\". In case the corpus file contains 2 columns\n the header [\"sen\", \"labels\"] will be assumed.\n to_lower : bool, optional\n Transform entire corpus to lower case, defaults to False.\n header_from_first_line : bool, optional\n Use the first line of the corpus as the attribute names of the\n corpus.\n vocab_path : str, optional\n Path to the model vocabulary, which should a file containing a\n vocab entry at each line.\n vocab_from_corpus : bool, optional\n Create a new vocabulary from the tokens of the corpus itself.\n If set to True `vocab_path` does not need to be provided.\n Defaults to False.\n sen_column : str, optional\n Name of the corpus column containing the raw sentences.\n Defaults to `sen`.\n labels_column : str, optional\n Name of the corpus column containing the sentence labels.\n Defaults to `labels`.\n tokenize_columns : List[str], optional\n List of column names that should be tokenized according to the\n provided vocabulary.\n sep : str, optional\n Column separator of corpus file, either a tsv or csv.\n Defaults to '\\t'.\n\n Returns\n -------\n corpus : TabularDataset\n A TabularDataset containing the parsed sentences and optional labels\n \"\"\"\n assert sep in \"\\t,\", \"separator not recognized, should be either `\\t` or `,`\"\n if tokenize_columns is None:\n tokenize_columns = []\n\n if header is None:\n if header_from_first_line:\n with open(path) as f:\n header = next(f).strip().split(sep)\n else:\n with open(path) as f:\n first_line = next(f).strip().split(sep)\n if len(first_line) == 2:\n header = [sen_column, labels_column]\n else:\n header = [sen_column]\n\n assert sen_column in header, \"`sen` should be part of corpus_header!\"\n\n def preprocess(s: str) -> Union[str, int]:\n return int(s) if s.isdigit() else s\n\n pipeline = Pipeline(convert_token=preprocess)\n fields = {}\n for field in header:\n if field == sen_column or field in tokenize_columns:\n fields[field] = Field(\n batch_first=True, include_lengths=True, lower=to_lower\n )\n elif field == labels_column:\n fields[field] = Field(\n use_vocab=True,\n pad_token=None,\n unk_token=None,\n is_target=True,\n preprocessing=pipeline,\n )\n else:\n fields[field] = RawField(preprocessing=pipeline)\n fields[field].is_target = False\n\n corpus_format = \"tsv\" if sep == \"\\t\" else \"cvs\"\n corpus = TabularDataset(\n fields=fields.items(),\n format=corpus_format,\n path=path,\n skip_header=header_from_first_line,\n csv_reader_params={\"quotechar\": None},\n )\n\n # The current torchtext Vocab does not allow a fixed vocab order so should be attached manually.\n if vocab_path is not None or vocab_from_corpus:\n for column in tokenize_columns + [sen_column]:\n attach_vocab(corpus, vocab_path or path, sen_column=column)\n if labels_column in corpus.fields:\n corpus.fields[labels_column].build_vocab(corpus)\n\n return corpus\n","sub_path":"diagnnose/corpus/import_corpus.py","file_name":"import_corpus.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"299266828","text":"from arvore_implementada import arvore_preta_vermelha\n\nr = arvore_preta_vermelha()\n\nprint('Insira os elementos iniciais ( 0 = preto, 1 = vermelho ) :')\n\nfor ele in list(map(int,input().split())):\n r.inserir(ele)\n\nprint('1.Inserir','2.Inorder','3.Procurar','4.Deletar','5.Visualizar',sep='\\n')\n\nwhile True:\n choice = int(input())\n if choice == 1:\n print('Adiciona um Elemento : ',end='')\n r.inserir(int(input()))\n elif choice == 2:\n print('Inorder : ',end='')\n r.inorder(r.raiz)\n print()\n elif choice == 3:\n print('Procure um elemento : ',end='')\n print(r.procurar(int(input())))\n elif choice == 4:\n print('Delete um Elemento : ',end='')\n r.deletar(int(input()))\n elif choice == 5:\n if r.raiz == r.ext:\n print('Árvore esta vazia')\n else:\n print('raiz -> ',end='')\n r.display(r.raiz)\n else:\n break\n","sub_path":"menu_run.py","file_name":"menu_run.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"119603428","text":"import pandas as pd\nimport numpy as np\nimport sklearn\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport re\nfrom sklearn import linear_model,preprocessing\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.naive_bayes import GaussianNB\n\n\ntrain_data = pd.read_csv(\"train.csv\")\ntest_data = pd.read_csv(\"test.csv\")\n\n# print(train_data.head(10))\n\nprint(train_data.describe())\n# 38% of people are survived is 80\n# age has some missing data\n# lowest age is 4 years old and the maximum age is 80\n\n# create a table to analyze the missing data in titanic\n\"\"\"train_head = train_data.head(10)\nprint(train_head.sort_values(ascending=False,by=[\"Age\"]))\"\"\"\ntotal = train_data.isnull().sum().sort_values(ascending=False)\npercent1 = train_data.isnull().sum()/train_data.isnull().count()*100\npercent2 = (round(percent1,1)).sort_values(ascending=False)\nmissing_data = pd.concat([total,percent2], axis=1, keys=[\"Total\", \"Percent\"])\nprint(missing_data.head(5))\n# Cabin got 77% of data is missing - might drop this data\n# Age got 19.9% of data missing - need to evaluated - solve this problem\n# Embarked got 2% of data is missing - not very important\n\nprint(train_data.columns.values)\n# 11 values with 1 target (Survived)\n# Feature might choose is - Age,Embarked,Sex\n\n# create graph to see the connection between target and feature\n\n# Survived vs Gender\nsurvived = \"survived\"\nnot_survived = \"not survived\"\nfig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10,4))\nplt.title(\"Survival base on Gender\",loc=\"center\")\nwoman = train_data[train_data[\"Sex\"] == \"female\"]\nman = train_data[train_data[\"Sex\"] == \"male\"]\n\n\nax = sns.distplot(woman[woman['Survived'] == 1].Age.dropna(), bins=18, label=survived, ax=axes[0], kde=False)\nax = sns.distplot(woman[woman['Survived'] == 0].Age.dropna(), bins=40, label=not_survived, ax=axes[0], kde=False)\nax.legend()\nax.set_title(\"Woman\")\n\nax = sns.distplot(man[man[\"Survived\"] == 1].Age.dropna(), bins=18, label=survived, ax=axes[1], kde=False)\nax = sns.distplot(man[man[\"Survived\"] == 0].Age.dropna(), bins=40, label=survived, ax=axes[1], kde=False)\nax.legend()\nax.set_title(\"Men\")\n\n# Embarked, Pclass vs Sex\nfacetGrid = sns.FacetGrid(train_data, row=\"Embarked\", size=4.5, aspect=1.6)\nfacetGrid.map(sns.pointplot, \"Pclass\", \"Survived\", \"Sex\", palette=None, order=None, hue_order=None)\nfacetGrid.add_legend()\n\n# Pclass\nsns.barplot(x=\"Pclass\",y=\"Survived\",data=train_data)\n\ngrid = sns.FacetGrid(train_data, col=\"Survived\", row=\"Pclass\", size=2.2, aspect=1.6)\ngrid.map(plt.hist, \"Age\", alpha=.5, bins=20)\ngrid.add_legend()\n\n# SibSp and Parch\ndata = [train_data,test_data]\nfor dataset in data:\n dataset[\"relatives\"] = dataset[\"SibSp\"] + dataset[\"Parch\"]\n dataset.loc[dataset['relatives'] > 0, 'not_alone'] = 0\n dataset.loc[dataset['relatives'] == 0, 'not_alone'] = 1\n dataset['not_alone'] = dataset['not_alone'].astype(int)\n\nprint(train_data['not_alone'].value_counts())\n\naxes = sns.factorplot('relatives',\"Survived\", data=train_data, aspect=2.5,)\n\n# Preprocessing Data\ntrain_data = train_data.drop(['PassengerId'], axis=1)\n\n# Dealing with Missing Data\n\n# CABIN\ndeck = {\"A\":1, \"B\":2, \"C\":3, \"D\":4, \"E\":5, \"F\":6, \"G\":7, \"U\":8}\ndata = [train_data, test_data]\n\nfor dataset in data:\n dataset[\"Cabin\"] = dataset[\"Cabin\"].fillna(\"U0\")\n dataset[\"Deck\"] = dataset[\"Cabin\"].map(lambda x:re.compile(\"([a-zA-Z]+)\").search(x).group())\n dataset[\"Deck\"] = dataset[\"Deck\"].map(deck)\n dataset[\"Deck\"] = dataset[\"Deck\"].fillna(0)\n dataset[\"Deck\"] = dataset[\"Deck\"].astype(int)\n\n# drop the cabin feature - not using it anymore - use the deck one\ntrain_data = train_data.drop([\"Cabin\"], axis=1)\ntest_data = test_data.drop([\"Cabin\"], axis=1)\n\n# AGE\ndata = [train_data,test_data]\nfor dataset in data:\n mean = train_data[\"Age\"].mean()\n std = test_data[\"Age\"].std()\n is_null = dataset[\"Age\"].isnull().sum()\n\n # compute random numbers between the mean, std and is_null\n rand_age = np.random.randint(mean - std, mean + std, size=is_null)\n\n # fill NaN values in Age column with random values generated\n age_slice = dataset[\"Age\"].copy()\n age_slice[np.isnan(age_slice)] = rand_age\n dataset[\"Age\"] = age_slice\n dataset[\"Age\"] = train_data[\"Age\"].astype(int)\n\nprint(train_data[\"Age\"].isnull().sum())\n\ncommon_value = \"S\"\ndata = [train_data,test_data]\nfor dataset in data:\n dataset[\"Embarked\"] = dataset[\"Embarked\"].fillna(common_value)\n\n# Coverting Features\n\n# FARE\ndata = [train_data,test_data]\nfor dataset in data:\n dataset[\"Fare\"] = dataset[\"Fare\"].fillna(0)\n dataset[\"Fare\"] = dataset[\"Fare\"].astype(int)\n\n# NAME\ntitles = {\"Mr\":1, \"Miss\":2, \"Mrs\":3, \"Master\":4, \"Rare\":5}\ndata = [train_data,test_data]\nfor dataset in data:\n # extract titles\n dataset[\"Title\"] = dataset.Name.str.extract('([A-Za-z]+)\\.', expand=False)\n # replace titles with a more common title or as Rare\n dataset[\"Title\"] = dataset[\"Title\"].replace([\"Lady\",\"Countess\",\"Capt\",\"Col\",\"Don\", \"Dr\", \"Major\", \"Rev\", \"Sir\", \"Jonkheer\", \"Dona\"], \"Rare\")\n dataset[\"Title\"] = dataset[\"Title\"].replace(\"Mlle\",\"Miss\")\n dataset[\"Title\"] = dataset[\"Title\"].replace(\"Ms\",\"Miss\")\n dataset[\"Title\"] = dataset[\"Title\"].replace(\"Mme\",\"Mrs\")\n # convert title to number\n dataset[\"Title\"] = dataset[\"Title\"].map(titles)\n # filling NaN with 0 => doesn't affect the calculation later\n dataset[\"Title\"] = dataset[\"Title\"].fillna(0)\n\n# drop axis name and replace with title\ntrain_data = train_data.drop([\"Name\"], axis=1)\ntest_data = test_data.drop([\"Name\"], axis=1)\n\n\n# SEX - convert to numb\ngender = {\"male\":0, \"female\":1}\ndata = [train_data,test_data]\nfor dataset in data:\n dataset[\"Sex\"] = dataset[\"Sex\"].map(gender)\n\n# TICKET\n# drop this column because difficult to convert to numb\n#print(train_data[\"Ticket\"].head()) has 681 unique tickets so large to convert to numb'\n\ntrain_data = train_data.drop([\"Ticket\"],axis=1)\ntest_data = test_data.drop([\"Ticket\"],axis=1)\n\n# EMBARKED\nport = {\"S\":0,\"C\":1,\"Q\":2}\ndata = [train_data,test_data]\nfor dataset in data:\n dataset[\"Embarked\"] = dataset[\"Embarked\"].map(port)\n\n\n#Changing Currently Value\n\n#Age - convert to Age Group => Easier to graph later\ndata = [train_data,test_data]\nfor dataset in data:\n dataset[\"Age\"] = dataset[\"Age\"].astype(int)\n dataset.loc[dataset[\"Age\"] <= 11, \"Age\"] = 0\n dataset.loc[(dataset[\"Age\"] > 11) & (dataset[\"Age\"] <= 18), \"Age\"] = 1\n dataset.loc[(dataset[\"Age\"] > 18) & (dataset[\"Age\"] <= 22), \"Age\"] = 2\n dataset.loc[(dataset[\"Age\"] > 22) & (dataset[\"Age\"] <= 30), \"Age\"] = 3\n dataset.loc[(dataset[\"Age\"] > 30) & (dataset[\"Age\"] <= 40), \"Age\"] = 4\n dataset.loc[(dataset[\"Age\"] > 40) & (dataset[\"Age\"] <= 50), \"Age\"] = 5\n dataset.loc[(dataset[\"Age\"] > 50) & (dataset[\"Age\"] <= 60), \"Age\"] = 6\n dataset.loc[(dataset[\"Age\"] > 60), \"Age\"] = 7\n\n#print(train_data[\"Age\"].value_counts())\n\n# need to call data variable for every time do a for loop - due to the data at that moment has already been modify => using the latest data\n\n# FARE - create Group value the same as Age\ndata = [train_data,test_data]\nfor dataset in data:\n dataset.loc[(dataset[\"Fare\"] <= 7.91), \"Fare\"] = 0\n dataset.loc[(dataset[\"Fare\"] > 7.91) & (dataset[\"Fare\"] <= 14.454), \"Fare\"] = 1\n dataset.loc[(dataset[\"Fare\"] > 14.454) & (dataset[\"Fare\"] <= 31), \"Fare\"] = 2\n dataset.loc[(dataset[\"Fare\"] > 31) & (dataset[\"Fare\"] <= 99), \"Fare\"] = 3\n dataset.loc[(dataset[\"Fare\"] > 99) & (dataset[\"Fare\"] <= 251), \"Fare\"] = 4\n dataset.loc[(dataset[\"Fare\"] > 251), \"Fare\"] = 5\n dataset[\"Fare\"] = dataset[\"Fare\"].astype(int)\n\n# Create New Feature => Increase high accuracy\n\n#Age vs Class\ndata = [train_data,test_data]\nfor dataset in data:\n dataset[\"Age_Class\"] = dataset[\"Age\"]*dataset[\"Pclass\"]\n\n#Fare/Person\ndata = [train_data,test_data]\nfor dataset in data:\n dataset[\"Fare_per_Person\"] = dataset[\"Fare\"] / (dataset[\"relatives\"] +1)\n dataset[\"Fare_per_Person\"] = dataset[\"Fare_per_Person\"].astype(int)\n\n#print(train_data.head(10))\n\n#Building Algorithm Model\n\nx_train = train_data.drop(\"Survived\",axis=1)\ny_train = train_data[\"Survived\"]\nx_test = test_data.iloc[:,1:]\n#print(test_data.head())\n\n#SGD Model\nsgd = linear_model.SGDClassifier(max_iter=5,tol=None)\nsgd.fit(x_train,y_train)\ny_pred_sgd = sgd.predict(x_test)\nsgd_score = sgd.score(x_train,y_train)\nprint(\"SGD Score:\",sgd_score,round(sgd_score*100,2))\n\n#Random Forest\nrd = RandomForestClassifier(n_estimators=100)\nrd.fit(x_train,y_train)\ny_pred_rd = rd.predict(x_test)\nrd_score = rd.score(x_train,y_train)\nprint(\"Random Forest Score:\",rd_score,round(rd_score*100,2))\n\n#SVC\nsvc = LinearSVC()\nsvc.fit(x_train,y_train)\ny_pred_svc = svc.predict(x_test)\nsvc_score = svc.score(x_train,y_train)\nprint(\"Linear SVC Score:\",svc_score,round(svc_score*100,2))\n\n#KNN\nknn = KNeighborsClassifier(n_neighbors=5)\nknn.fit(x_train,y_train)\ny_pred_knn = knn.predict(x_test)\nknn_score = knn.score(x_train,y_train)\nprint(\"KNN Score:\",knn_score,round(knn_score*100,2))\n\n#Perceptron\npt = Perceptron(max_iter=5)\npt.fit(x_train,y_train)\ny_pred_pt = pt.predict(x_test)\npt_score = pt.score(x_train,y_train)\nprint(\"Perceptron Score:\",pt_score,round(pt_score*100,2))\n\n#Decision Tree Classifier\ntree = DecisionTreeClassifier()\ntree.fit(x_train,y_train)\ny_pred_tree = tree.predict(x_test)\ntree_score = tree.score(x_train,y_train)\nprint(\"Decision Tree Classifier Score:\",tree_score,round(tree_score*100,2))\n\n#GaussianNB\nnb = GaussianNB()\nnb.fit(x_train,y_train)\ny_pred_nb = nb.predict(x_test)\nnb_score = nb.score(x_train,y_train)\nprint(\"GaussianNB Score:\",nb_score,round(nb_score*100,2))\n\n#Logistic Regression\nlog = LogisticRegression()\nlog.fit(x_train,y_train)\ny_pred_log = log.predict(x_test)\nlog_score = log.score(x_train,y_train)\nprint(\"Logistic Regression Score:\",log_score,round(log_score*100,2))\n\n#Conclusion - best model\nresults = pd.DataFrame({\"Model\":[\"Support Vector Machines\",\"KNN\",\"Logistic Regression\",\"Random Forest\",\"Naive Bayes Gaussian\",\"Perceptron\",\"SGD\",\"Decision Tree\"],\"Score\":[round(log_score*100,2),round(knn_score*100,2),round(svc_score*100,2),round(rd_score*100,2),round(nb_score*100,2),round(pt_score*100,2),round(sgd_score*100,2),round(tree_score*100,2)]})\nresults_df = results.sort_values(by=\"Model\", ascending=False)\nresults_df = results_df.set_index(\"Model\")\nprint(results_df.head(9))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Portfolio/Titanic/titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":10579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"72915864","text":"import os.path\nimport datetime\nimport string\n\n\nclass FilePath:\n def __init__(self, *paths):\n self.paths = paths\n\n def add_path(self, *paths):\n return FilePath(self.paths + paths)\n\n def __call__(self, *paths, on_exists=\"ignore\"):\n \"\"\"\n if_exists: ignore=just return the name; error=exception if file exists; rename=give alternative name if file exists\n \"\"\"\n filepath = os.path.join(*(self.paths + paths))\n if on_exists == \"ignore\":\n return filepath\n\n file_exists = os.path.exists(filepath)\n\n if on_exists == \"error\" and file_exists:\n raise ValueError(\"Filename {} already exists\".format(filepath))\n\n if on_exists == \"rename\":\n count = 1\n filebase, fileext = os.path.splitext(filepath)\n while file_exists:\n filepath = \"{}-{}{}\".format(filebase, count, fileext)\n count += 1\n file_exists = os.path.exists(filepath)\n return filepath\n\n raise ValueError(\"Unknown on_exists parameter {}\".format(on_exists))\n\n def timestamp_filename(self, filename, timeformat=\"%m.%d %Hh%Mm%S\"):\n # for format see: http://docs.python.org/3/library/datetime.html#strftime-strptime-behavior\n time_text = datetime.datetime.now().strftime(timeformat)\n filename = string.Template(filename).safe_substitute(time=time_text)\n\n filepath = self.__call__(filename)\n return filepath\n","sub_path":"dstkdev/filepath.py","file_name":"filepath.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"576527699","text":"from GameData.cc_logger import ccLogger\n\n\nclass ccSpriteManager:\n\n textures = {}\n sprites = {}\n\n def __init__(cls):\n error = \"ccSpriteManager is an abstract class, can't call __init__!\"\n raise Exception(error)\n\n @classmethod\n def add_texture(cls, texture_name, texture):\n if texture_name in cls.textures:\n ccLogger.warning(texture_name + \" is already loaded.\")\n else:\n cls.textures[texture_name] = texture\n\n @classmethod\n def get_texture(cls, texture_name):\n texture = cls.textures.get(texture_name)\n if texture:\n return texture\n ccLogger.error(texture_name + \" not found.\")\n\n @classmethod\n def add_sprite(cls, sprite_name, sprite):\n if sprite_name in cls.sprites:\n ccLogger.warning(sprite_name + \" is already loaded. It will not be overwritten.\")\n else:\n cls.sprites[sprite_name] = sprite\n\n @classmethod\n def get_sprite(cls, sprite_name):\n sprite = cls.sprites.get(sprite_name)\n if sprite:\n return sprite\n ccLogger.error(sprite_name + \" not found.\")\n","sub_path":"alpha_version/GameData/cc_sprite_manager.py","file_name":"cc_sprite_manager.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"440508526","text":"from tkinter import *\r\nfrom PIL import ImageTk, Image\r\nfrom tkinter import font\r\nfrom Create_Loan import *\r\nfrom Loan_History_User import *\r\nfrom Payment_Page import *\r\nfrom View_Payment_Invoices import *\r\nimport pyrebase\r\n\r\ndef home_pg_user(Uid):\r\n\r\n\tfirebase_config = {\r\n\t\t\"apiKey\" : \"AIzaSyCvuNFssXW4k1cHifCRH_-KP6IPuxJHzqk\",\r\n\t \"authDomain\" : \"financelock-c6ea8.firebaseapp.com\",\r\n\t \"databaseURL\" : \"https://financelock-c6ea8-default-rtdb.firebaseio.com\",\r\n\t \"projectId\" : \"financelock-c6ea8\",\r\n\t \"storageBucket\" : \"financelock-c6ea8.appspot.com\",\r\n\t \"messagingSenderId\" : \"523873568466\",\r\n\t \"appId\" : \"1:523873568466:web:b4259e5cb0e1c85059cf0b\",\r\n\t \"measurementId\" : \"Gender-R1R8CZD552\"\r\n\t}\r\n\r\n\tfirebase = pyrebase.initialize_app(firebase_config)\r\n\r\n\tdb = firebase.database()\r\n\r\n\tdef logout():\r\n\t\tperson = db.child('People').child('Customer').child(Uid).get()\r\n\t\tname = person.val()['name']\r\n\t\tresponse = messagebox.askyesno(\"Are You Sure?\", name + \", are you sure you want to logout? \")\r\n\t\tif response == 1:\r\n\t\t\troot1.destroy()\r\n\r\n\tdef next_but():\r\n\t\tif i.get() == 1:\r\n\t\t\tcreate_loan(Uid)\r\n\t\telif i.get() == 2:\r\n\t\t\tview_det(Uid)\r\n\t\telif i.get() == 3:\r\n\t\t\tpayment_page(Uid)\r\n\t\telse:\r\n\t\t\tview_invoices(Uid)\r\n\r\n\tglobal bg_hpu\r\n\r\n\troot1 = Toplevel()\r\n\troot1.title(\"Home Page User\")\r\n\troot1.iconbitmap('C:/Users/sidsu/Desktop/V.I.T/Project/SWE/Project/BG/icon.ico')\r\n\r\n\tbg_hpu = ImageTk.PhotoImage(Image.open(\"BG/bgmain2.jpeg\"))\r\n\tmy_canvas = Canvas(root1, width = 1300, height = 733)\r\n\tmy_canvas.pack(fill = \"both\", expand = True)\r\n\tmy_canvas.create_image(0, 0, image = bg_hpu, anchor = \"nw\")\r\n\tmy_canvas.create_text(635, 150, text = \"Choose your Services\", font = (\"Helvetica\", 25, \"bold\"), fill = \"steel blue\")\r\n\tmy_canvas.create_rectangle(450, 275, 830, 470, outline = \"black\", fill = \"white\")\r\n\r\n\ti = IntVar()\r\n\ti.set(\"1\")\r\n\tr1 = Radiobutton(root1, text = \"Apply for a Loan\", value = 1, variable = i, background = 'white')\r\n\tr2 = Radiobutton(root1, text = \"View Current or Previous Loan Details\", value = 2, variable = i, background = 'white')\r\n\tr3 = Radiobutton(root1, text = \"Payment Services\", value = 3, variable = i, background = 'white')\r\n\tr4 = Radiobutton(root1, text = \"View Payment Invoices\", value = 4, variable = i, background = 'white')\r\n\tr1_win = my_canvas.create_window(515, 310, window = r1)\r\n\tr2_win = my_canvas.create_window(570, 340, window = r2)\r\n\tr3_win = my_canvas.create_window(517, 370, window = r3)\r\n\tr4_win = my_canvas.create_window(530, 400, window = r4)\r\n\r\n\tnext_button = Button (root1, text = \"Next\", command = next_but, background = \"white\", highlightbackground = \"black\", highlightthickness = 2)\r\n\tnext_button_win = my_canvas.create_window(750, 445, window = next_button)\r\n\r\n\tgoback_button = Button (root1, text = \"Log Out\", command = logout, background = \"white\", highlightbackground = \"black\", highlightthickness = 2)# command = back)\r\n\tgoback_button_win = my_canvas.create_window(525, 445, window = goback_button)\r\n\troot1.grab_set()\r\n\r\n\t#my_canvas.create_line(0, 400, 1300, 400, fill = \"steel blue\")","sub_path":"Home_Page_User.py","file_name":"Home_Page_User.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"563688536","text":"# Karatsuba algorithm for multiplying numbers of n-length\ndef Karatsuba_algo(num1, num2):\n # base case --> Sets the end of reccursion\n n = len(str(num1)) # Gets the length of number\n if (n <= 1):\n return num1*num2\n else:\n # Step 1 ––> get divider (number which splits the input in four parts)\n base = 10\n exponent = n//2\n div_number = pow(base, exponent)\n # Step 2 --> Splits each number into to parts\n a = num1//div_number\n b = num1 % div_number\n c = num2//div_number\n d = num2 % div_number\n # Step 3 --> Do the sub-multiplications using reccursion method\n z_2 = Karatsuba_algo(a, c)\n z_0 = Karatsuba_algo(b, d)\n z_1 = Karatsuba_algo(a + b, c + d) - z_2 - z_0\n\n # Step 3\n result = z_2*pow(div_number, 2) + z_1*div_number + z_0\n return result\n\n\ndef main():\n x = 3141592653589793238462643383279502884197169399375105820974944592\n y = 2718281828459045235360287471352662497757247093699959574966967627\n control = x*y\n Karatsuba_method = Karatsuba_algo(x, y)\n print(\"This should be the result: \" + str(control) +\n \" and here is the result using Karatsuba method: \" + str(Karatsuba_method))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"01_Karatsuba_multiplication_alg.py","file_name":"01_Karatsuba_multiplication_alg.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"507360734","text":"import sys\nfrom PIL import Image\nfrom yolo import YOLO\nfrom imutils.video import VideoStream\nimport time\nimport cv2\nimport os\nimport glob\nimport imutils\nfrom resizevideo import take_and_resize\nimport numpy as np\n\ndef yolodetect(dnn_params, input, output):\n files = glob.glob('output/*.png')\n for f in files:\n\t os.remove(f)\n\n if input == 'cam':\n vs = VideoStream(src=0).start() #or 1\n time.sleep(2.0)\n fps = 0\n yolo = YOLO(dnn_params[\"-mp\"], dnn_params[\"-ap\"], dnn_params[\"-cp\"])\n\n while True:\n start_t = time.time()\n # grab the frame from the threaded video stream and resize it\n # to have a maximum width of 314 pixels\n frame = vs.read()\n #frame = imutils.resize(frame, width=314)\n\n #take frame and return square(for wide angle cameras) in the desired resolution default is (314, 314)\n frame = take_and_resize(frame) #here you can also choose your own ouput resolution like take_and_resize(frame, your resolution) \n \n # loop over the detections\n outBoxes = yolo.detect_image(frame) #Here you can make whatever you want (return[top left x, top left y, bottom right x, bottom right y, class name])\n \n frame = np.asarray(frame)\n if len(outBoxes) > 0:\n for box in outBoxes:\n # extract the bounding box coordinates\n (x, y) = (int(box[0]), int(box[1]))\n (w, h) = (int(box[2]), int(box[3]))\n bbox = [x, y, w, h, box[4]]\n cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0,255,0), 2)\n text = 'classID = {}'.format(box[4])\n cv2.putText(frame, text, (box[0], box[1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 2)\n\n # show the output frame\n #frame = imutils.resize(frame, width=500)\n cv2.imshow(\"Frame\", frame)\n\n\n key = cv2.waitKey(1) & 0xFF\n\n fps = 1/(time.time() - start_t)\n #print(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed()))\n print(\"[INFO] approx. FPS: {:.2f}\".format(fps))\n\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\n\n\n\n\n # do a bit of cleanup\n cv2.destroyAllWindows()\n vs.stop()\n yolo.close_session()\n\n else:\n vs = cv2.VideoCapture(input)\n writer = None\n try:\n\t prop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \\\n\t\t else cv2.CAP_PROP_FRAME_COUNT\n\t total = int(vs.get(prop))\n\t print(\"[INFO] {} total frames in video\".format(total))\n # an error occurred while trying to determine the total\n # number of frames in the video file\n except:\n\t print(\"[INFO] could not determine # of frames in video\")\n\t print(\"[INFO] no approx. completion time can be provided\")\n\t total = -1\n\n yolo = YOLO(dnn_params[\"-mp\"], dnn_params[\"-ap\"], dnn_params[\"-cp\"])\n frameIndex = 0\n while True:\n counter = 0\n start_time = time.time()\n # read the next frame from the file\n (grabbed, frame) = vs.read()\n # if the frame was not grabbed, then we have reached the end\n # of the stream\n if not grabbed:\n break\n\n image = Image.fromarray(frame)\n\n outBoxes = yolo.detect_image(image) #Here you can make whatever you want (return[top left x, top left y, bottom right x, bottom right y, class name])\n\n if len(outBoxes) > 0:\n for box in outBoxes:\n # extract the bounding box coordinates\n (x, y) = (int(box[0]), int(box[1]))\n (w, h) = (int(box[2]), int(box[3]))\n bbox = [x, y, w, h, box[4]]\n cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0,255,0), 2)\n text = 'classID = {}'.format(box[4])\n cv2.putText(frame, text, (box[0], box[1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 2)\n\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n cv2.imwrite(\"output/frame-{}.png\".format(frameIndex), frame)\n\n # check if the video writer is None\n if writer is None:\n # initialize our video writer\n fourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n writer = cv2.VideoWriter(output, fourcc, 30,\n (frame.shape[1], frame.shape[0]), True)\n # write the output frame to disk\n writer.write(frame)\n\n # increase frame index\n frameIndex += 1\n\n # release the file pointers\n print(\"[INFO] cleaning up...\")\n files = glob.glob('output/*.png')\n for f in files:\n os.remove(f)\n yolo.close_session()\n writer.release()\n vs.release()\n","sub_path":"yolodetect.py","file_name":"yolodetect.py","file_ext":"py","file_size_in_byte":5025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"607153660","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author:Wen\r\n\"\"\"\r\nimport get_train_data as g\r\nimport numpy as np\r\nimport cv2\r\nlabel_list = g.label_list\r\ndata_list = g.data_list\r\nfirst_path = g.first_path\r\nclass train_v (object):\r\n \"\"\"\r\n 这个类是数据集的可视化,迭代对象将会输出可视化后的两个数据 , 一个是黑图带label,一个是原图带label的\r\n \"\"\"\r\n def __init__(self,label_list,data_list):\r\n self.label_list = label_list\r\n self.data_list = data_list\r\n def __getitem__(self,index):\r\n \"\"\"\r\n 用于返回可视化结果,注意这里返回的是一张一张的并不是batch,这个脚本可以直接运行\r\n :param index:索引号\r\n :return: 一个是黑图带label,一个是原图带label的,两个都是numpy__array数据类型np.uint8\r\n \"\"\"\r\n label = cv2.imread(self.label_list[index])\r\n data_with_label = cv2.imread(self.data_list[index])\r\n p = np.array([[0, 0, 0], [255, 255, 255], [255, 0, 0], [0, 255, 0], [0, 0, 255]], dtype=np.uint8)\r\n label = label[:, :, 0]\r\n out_label = p[label]\r\n ret, label = cv2.threshold(label, 0, 255, cv2.THRESH_BINARY)\r\n mask_inv = cv2.bitwise_not(label)\r\n data_with_label = cv2.bitwise_and(data_with_label, data_with_label, mask=mask_inv)\r\n data_with_label = cv2.add(data_with_label, out_label)\r\n return data_with_label , out_label\r\n\r\ndef save_train_image(data, label):\r\n \"\"\"\r\n 将可视化后的结果保存\r\n 外部使用:\r\n from train_visualization import vis_for_train\r\n from train_visualization import save_train_image\r\n save_train_image(*vis_for_train[i])\r\n 以上就是这个.py文件的全部作用\r\n :param data:可视化后的数据,原图加label\r\n :param label: 黑图+label\r\n :return:\r\n \"\"\"\r\n cv2.imwrite(\"./train_image.jpg/{0:06d}_data_label_vision.png\".format(int(i)), data_vision)\r\n cv2.imwrite(\"./train_label.png/{0:06d}_data_label_vision.png\".format(int(i)), data_vision)\r\n\r\nvis_for_train = train_v(label_list,data_list)\r\n\"\"\"\r\nvis_for_train 是可视化数据的封装 vis_for_train[i],直接返回数据可视化后的两张图\r\n\"\"\"\r\n\r\nif __name__ ==\"__main__\":\r\n vis_for_train = train_v(label_list,data_list)\r\n for i in range(3):\r\n i=i+1000\r\n data_vision,label_vision = vis_for_train[i]\r\n save_train_image(*vis_for_train[i])\r\n print(\"ok\")\r\n\r\n\r\n","sub_path":"train_visualization.py","file_name":"train_visualization.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"60570526","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api\n\nclass Timing(models.Model): \n _name = \"hc.timing\" \n _description = \"Timing\" \n\n name = fields.Char(\n string=\"Timing Name\", \n required=\"True\", \n help=\"Human-readable label for this timing definition.\")\n event_ids = fields.One2many(\n comodel_name=\"hc.timing.event.date\", \n inverse_name=\"timing_id\", \n string=\"Event Dates\", \n help=\"When the event occurs.\") \n code_id = fields.Many2one(\n comodel_name=\"hc.vs.timing.abbreviation\", \n string=\"Code\", \n help=\"Timing abbreviation.\") \n repeat_ids = fields.One2many(\n comodel_name=\"hc.timing.repeat\", \n inverse_name=\"timing_id\", \n string=\"Repeats\", \n help=\"When the event is to occur.\") \n\nclass TimingRepeat(models.Model): \n _name = \"hc.timing.repeat\" \n _description = \"Timing Repeat\"\n \n timing_id = fields.Many2one(\n comodel_name=\"hc.timing\", \n string=\"Timing\", \n required=\"True\", \n help=\"Timing associated with this repeat.\") \n bounds_type = fields.Selection(\n string=\"Bounds Type\", \n selection=[\n (\"duration\", \"Duration\"), \n (\"range\", \"Range\"), \n (\"period\", \"Period\")], \n help=\"Type of bounds.\") \n bounds_name = fields.Char(\n string=\"Bounds\", \n compute=\"_compute_bounds_name\",\n store=\"True\",\n help=\"Length/Range of lengths, or (Start and/or end) limit.\") \n bounds_duration = fields.Float(\n string=\"Bounds Duration\", \n help=\"Bounds length of time.\")\n bounds_duration_uom_id = fields.Many2one(\n comodel_name=\"product.uom\", \n string=\"Bounds Duration UOM\", \n domain=\"[('category_id','=','Time (UCUM)')]\", \n help=\"Bounds Duration unit of measure.\") \n bounds_range_low = fields.Float(\n string=\"Bounds Range Low\", \n help=\"Low limit of bounds range.\") \n bounds_range_high = fields.Float(\n string=\"Bounds Range High\", \n help=\"High limit of bounds range.\") \n bounds_period_start_date = fields.Datetime(\n string=\"Bounds Period Start Date\", \n help=\"Start of the bounds period.\") \n bounds_period_end_date = fields.Datetime(\n string=\"Bounds Period End Date\", \n help=\"End of the bounds period.\") \n count = fields.Integer(\n string=\"Count\", \n help=\"Number of times to repeat.\") \n count_max = fields.Integer(\n string=\"Count Max\", \n help=\"Maximum number of times to repeat.\") \n duration = fields.Float(\n string=\"Duration\", \n help=\"How long when it happens.\") \n duration_max = fields.Float(\n string=\"Duration Max\", \n help=\"How long when it happens (Max).\")\n duration_unit_id = fields.Many2one(\n comodel_name=\"product.uom\", \n string=\"Duration UOM\",\n domain=\"[('category_id','=','Time (UCUM)')]\", \n help=\"Unit of time (UCUM).\") \n frequency = fields.Integer(\n string=\"Frequency\", \n help=\"Event occurs frequency times per duration.\") \n frequency_max = fields.Integer(\n string=\"Frequency Max\", \n help=\"Event occurs frequency times per duration.\") \n period = fields.Float(\n string=\"Period\", \n help=\"Event occurs frequency times per period.\") \n period_max = fields.Float(\n string=\"Period Max\",\n help=\"Upper limit of period (3-4 hours).\")\n period_unit_id = fields.Many2one(\n comodel_name=\"product.uom\", \n string=\"Period UOM\",\n domain=\"[('category_id','=','Time (UCUM)')]\", \n help=\"Unit of time (UCUM).\") \n day_of_week_ids = fields.Many2many(\n comodel_name=\"hc.vs.days.of.week\", \n relation=\"timing_repeat_day_of_week_rel\", \n string=\"Days of Week\", \n help=\"If one or more days of week is provided, then the action happens only on the specified day(s).\")\n time_of_day_ids = fields.One2many(\n comodel_name=\"hc.timing.repeat.time.of.day\", \n inverse_name=\"repeat_id\", \n string=\"Time Of Days\", \n help=\"Time of day for action.\")\n when_ids = fields.Many2many(\n comodel_name=\"hc.vs.timing.event\", \n relation=\"timing_repeat_when_rel\", \n string=\"When\", \n help=\"Regular life events the event is tied to.\")\n offset = fields.Integer(\n string=\"Offset Minutes\", \n help=\"Minutes from event (before or after).\") \n\n _sql_constraints = [ \n ('bounds_duration_gt_zero',\n 'CHECK(bounds_duration >= 0.0)',\n 'Bounds Duration SHALL be a non-negative value.'),\n\n ('range_low_gt_zero', \n 'CHECK(bounds_range_low >= 0.0)', \n 'Range Low SHALL be a non-negative value.'),\n\n ('range_high_gt_low',\n 'CHECK(bounds_range_high >= bounds_range_low)',\n 'Range High SHALL not be lower than Range Low.'),\n\n ('period_end_gt_start',\n 'CHECK(bounds_period_end_date >= bounds_period_start_date)',\n 'Period End Date SHALL not be lower than Period Start Date.'),\n\n ('count_max_gt_count',\n 'CHECK(count_max >= count)',\n 'Maximum Count SHALL not be lower than Count.'),\n\n ('duration_gt_zero',\n 'CHECK(duration >= 0.0)',\n 'Duration SHALL be a non-negative value.'),\n\n ('duration_max_gt_duration',\n 'CHECK(duration_max >= duration)',\n 'Maximum Duration SHALL not be lower than Duration.'),\n\n ('frequency_gt_zero',\n 'CHECK(frequency >= 0.0)',\n 'Frequency SHALL be a non-negative value.'),\n\n ('frequency_max_gt_frequency',\n 'CHECK(frequency_max >= frequency)',\n 'Maximum Frequency SHALL not be lower than Frequency.'),\n\n ('period_gt_zero',\n 'CHECK(period >= 0.0)',\n 'Period SHALL be a non-negative value.'),\n\n ('period_max_gt_period',\n 'CHECK(period_max >= period)',\n 'Maximum Period SHALL not be lower than Period.') \n ]\n\n @api.depends('bounds_type') \n def _compute_bounds_name(self): \n for hc_timing_repeat in self:\n if hc_timing_repeat.bounds_type == 'duration': \n hc_timing_repeat.bounds_name = str(hc_timing_repeat.bounds_duration) + \" \" + str(hc_timing_repeat.bounds_duration_uom_id.name)\n elif hc_timing_repeat.bounds_type == 'period': \n hc_timing_repeat.bounds_name = \"Between \" + str(hc_timing_repeat.bounds_period_start_date) + \" and \" + str(hc_timing_repeat.bounds_period_end_date)\n elif hc_timing_repeat.bounds_type == 'range': \n hc_timing_repeat.bounds_name = \"Between \" + str(hc_timing_repeat.bounds_range_low) + \" and \" + str(hc_timing_repeat.bounds_range_high)\n\n# Constraints (reference: http://build.fhir.org/datatypes.html#timing)\n\n# tim-1: On Timing.repeat: if there's a duration, there needs to be duration units (expression on Timing.repeat: duration.empty() or durationUnit.exists())\n# Implemented in view as \n\n# tim-7: On Timing.repeat: If there's a durationMax, there must be a duration (expression on Timing.repeat: durationMax.empty() or duration.exists())\n# Implemented in view as \n\n# tim-8: On Timing.repeat: If there's a countMax, there must be a count (expression on Timing.repeat: countMax.empty() or count.exists())\n# Implemented in view as \n \n\nclass TimingEventDate(models.Model): \n _name = \"hc.timing.event.date\" \n _description = \"Timing Event Date\" \n _inherit = [\"hc.basic.association\"] \n\n timing_id = fields.Many2one(\n comodel_name=\"hc.timing\", \n string=\"Timing\", \n required=\"True\", \n help=\"Timing associated with this Timing Event Date.\") \n event_date = fields.Datetime(\n string=\"Event Date\", \n help=\"Event Date associated with this Timing Event Date.\") \n\nclass TimingRepeatTimeOfDay(models.Model):\n _name = \"hc.timing.repeat.time.of.day\"\n _description = \"Timing Repeat Time Of Day\"\n _inherit = [\"hc.basic.association\"]\n\n repeat_id = fields.Many2one(\n comodel_name=\"hc.timing.repeat\", \n string=\"Repeat\", \n help=\"Repeat associated with this Timing Repeat Time Of Day.\") \n time_of_day = fields.Float(\n string=\"Time Of Day\", \n help=\"Time Of Day associated with this Timing Repeat Time Of Day.\") \n\nclass TimingAbbreviation(models.Model):\n _name = \"hc.vs.timing.abbreviation\"\n _description = \"Timing Abbreviation\"\n _inherit = [\"hc.value.set.contains\"]\n\n name = fields.Char(\n string=\"Name\", \n help=\"Name of this timing abbreviation.\") \n code = fields.Char(\n string=\"Code\", \n help=\"Code of this timing abbreviation.\") \n contains_id = fields.Many2one(\n comodel_name=\"hc.vs.timing.abbreviation\", \n string=\"Parent\", \n help=\"Parent timing abbreviation.\") \n\nclass TimingEvent(models.Model):\n _name = \"hc.vs.timing.event\"\n _description = \"Timing Event\"\n _inherit = [\"hc.value.set.contains\"]\n\n name = fields.Char(\n string=\"Name\", \n help=\"Name of this timing event.\") \n code = fields.Char(\n string=\"Code\", \n help=\"Code of this timing event.\") \n contains_id = fields.Many2one(\n comodel_name=\"hc.vs.timing.event\", \n string=\"Parent\", \n help=\"Parent timing event.\") \n\n\n\n","sub_path":"addons/hc_base/models/hc_timing.py","file_name":"hc_timing.py","file_ext":"py","file_size_in_byte":11318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"461545859","text":"# Copyright (C) 2016, 2017 University of Vienna\n# All rights reserved.\n# BSD license.\n# Author: Ali Baharev \nfrom __future__ import print_function, division\nimport os\nfrom hypothesis import given, settings\nfrom hypothesis.strategies import integers\nfrom networkx import OrderedDiGraph, gnm_random_graph\n\nclass MyOrderedDiGraph(OrderedDiGraph):\n \n #@profile\n def subgraph(self, nbunch):\n bunch = self.nbunch_iter(nbunch)\n # create new graph and copy subgraph into it\n H = self.__class__()\n # copy node and attribute dictionaries\n for n in bunch:\n H.node[n]=self.node[n]\n # namespace shortcuts for speed\n H_succ=H.succ\n H_pred=H.pred\n self_succ=self.succ\n self_pred=self.pred\n # add nodes\n for n in H:\n H_succ[n]=H.adjlist_inner_dict_factory()\n H_pred[n]=H.adjlist_inner_dict_factory()\n # add successors\n for u in H_succ:\n Hnbrs=H_succ[u]\n for v,datadict in self_succ[u].items():\n if v in H_succ:\n Hnbrs[v]=datadict\n # add predecessors\n for u in H_pred:\n Hnbrs=H_pred[u]\n for v,datadict in self_pred[u].items():\n if v in H_pred:\n Hnbrs[v]=datadict\n H.graph=self.graph\n return H\n\n#def check_proxy(n, m, seed):\n# check_order(n, m, seed)\n\n#@profile\ndef check_order(n, m, seed):\n g_rnd = gnm_random_graph(n, m, seed=seed, directed=True)\n \n g_orig = MyOrderedDiGraph()\n g_orig.add_edges_from(g_rnd.edges())\n g_orig.add_nodes_from(g_rnd.nodes())\n\n nodes = list(g_orig.nodes())\n \n # still a caveat: must preserve the relative order of the nodes\n g_sub = g_orig.subgraph(nodes)\n\n for n_orig, n_sub in zip(g_orig, g_sub):\n assert n_orig == n_sub\n \n for e_orig, e_sub in zip(g_orig.edges(), g_sub.edges()):\n assert e_orig == e_sub\n\n for n in nodes:\n assert list(g_orig.predecessors(n)) == list(g_sub.predecessors(n))\n assert list(g_orig.successors(n)) == list(g_sub.successors(n))\n \n #print('nodes: %d, edges: %d' % (g_orig.number_of_nodes(), g_orig.number_of_edges())) \n\n\ndef main():\n print('Started generative testing...')\n os.environ['HYPOTHESIS_STORAGE_DIRECTORY'] = '/tmp/ht'\n \n MAX_VALUE = 30\n \n with settings(max_examples=10000):\n decor = given(n = integers(min_value=0, max_value= MAX_VALUE),\n m = integers(min_value=0, max_value=5*MAX_VALUE), \n seed = integers(min_value=1))\n \n decor(check_order)()\n \n print('Done!')\n\n\nif __name__ == '__main__':\n import networkx\n import hypothesis\n print('networkx', networkx.__version__)\n print('hypothesis', hypothesis.__version__)\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"216850795","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 6 19:11:55 2021\n\n@author: Fetibek Aliev\n\"\"\"\n\nt_0 = '3.4 0.7 2.0 0.4 2.5 2.6 1.7 0.2 4.0 2.5'\nt_12 ='6.4 8.3 6.8 6.7 7.4 6.4 8.9 4.7 5.3 7.6'\n\nt_avg = 4.5\n\nt_0_List_float = list(map(float, t_0.split()))\n\nt_12_List_float = list(map(float, t_12.split()))\n\nt_avg_list = [(t_0_List_float[i] + t_12_List_float[i])/2\n for i in range(len(t_0_List_float))]\n\nindex = []\n\nfor i in t_avg_list:\n if i > t_avg:\n index.append(t_avg_list.index(i))\n \nfor i in index:\n print(i)","sub_path":"Task35.py","file_name":"Task35.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"125511948","text":"import dbus\nimport evdev\nimport keymap\nfrom time import sleep\nimport keyremap\nimport pprint\n\nHID_DBUS = 'org.yaptb.btkbservice'\nHID_SRVC = '/org/yaptb/btkbservice'\n\n\nclass Kbrd:\n \"\"\"\n Take the events from a physically attached keyboard and send the\n HID messages to the keyboard D-Bus server.\n \"\"\"\n def __init__(self):\n self.target_length = 6\n self.mod_keys = 0b00000000\n self.pressed_keys = []\n self.pressed_keys.extend([0] * self.target_length)\n self.have_kb = False\n self.dev = None\n self.bus = dbus.SystemBus()\n self.btkobject = self.bus.get_object(HID_DBUS,\n HID_SRVC)\n self.btk_service = dbus.Interface(self.btkobject,\n HID_DBUS)\n self.pp = pprint.PrettyPrinter(indent=4)\n self.wait_for_keyboard()\n\n def wait_for_keyboard(self, event_id=0):\n \"\"\"\n Connect to the input event file for the keyboard.\n Can take a parameter of an integer that gets appended to the end of\n /dev/input/event\n :param event_id: Optional parameter if the keyboard is not event0\n \"\"\"\n while not self.have_kb:\n try:\n # try and get a keyboard - should always be event0 as\n # we're only plugging one thing in\n self.dev = evdev.InputDevice('/dev/input/event{}'.format(\n event_id))\n self.have_kb = True\n except OSError:\n print('Keyboard not found, waiting 3 seconds and retrying')\n sleep(3)\n print('found a keyboard')\n\n def update_mod_keys(self, mod_key, value):\n \"\"\"\n Which modifier keys are active is stored in an 8 bit number.\n Each bit represents a different key. This method takes which bit\n and its new value as input\n :param mod_key: The value of the bit to be updated with new value\n :param value: Binary 1 or 0 depending if pressed or released\n \"\"\"\n\n bit_mask = 1 << (7-mod_key)\n print('update_mod_keys: bit_mask=', bit_mask, value)\n if value: # set bit\n self.mod_keys |= bit_mask\n else: # clear bit\n self.mod_keys &= ~bit_mask\n\n def update_keys(self, norm_key, value):\n\n if value < 1:\n self.pressed_keys.remove(norm_key)\n elif norm_key not in self.pressed_keys:\n self.pressed_keys.insert(0, norm_key)\n len_delta = self.target_length - len(self.pressed_keys)\n if len_delta < 0:\n self.pressed_keys = self.pressed_keys[:len_delta]\n elif len_delta > 0:\n self.pressed_keys.extend([0] * len_delta)\n\n #print('update_keys', norm_key, value, len(self.pressed_keys))\n\n @property\n def state(self):\n \"\"\"\n property with the HID message to send for the current keys pressed\n on the keyboards\n :return: bytes of HID message\n \"\"\"\n # return s\n mod_keys, pressed_keys = keyremap.remap(self.mod_keys, self.pressed_keys)\n #keyremap.remap2(self.mod_keys, self.pressed_keys)\n # s = [0xA1, 0x01, self.mod_keys, 0, *self.pressed_keys]\n # self.pp.pprint(s)\n #self.pp.pprint(['state', self.mod_keys, self.pressed_keys])\n return [0xA1, 0x01, mod_keys, 0, *pressed_keys]\n # return [0xA1, 0x01, self.mod_keys, 0, *self.pressed_keys]\n\n def send_keys(self):\n self.btk_service.send_keys(self.state)\n\n def event_loop(self):\n \"\"\"\n Loop to check for keyboard events and send HID message\n over D-Bus keyboard service when they happen\n \"\"\"\n print('Listening...')\n for event in self.dev.read_loop():\n # only bother if we hit a key and its an up or down event\n if event.type == evdev.ecodes.EV_KEY and event.value < 2:\n key_str = evdev.ecodes.KEY[event.code]\n print('event_loop', event.code, key_str)\n mod_key = keymap.modkey(key_str)\n if mod_key > -1:\n self.update_mod_keys(mod_key, event.value)\n else:\n self.update_keys(keymap.convert(key_str), event.value)\n self.send_keys()\n\n\nif __name__ == '__main__':\n print('Setting up keyboard')\n kb = Kbrd()\n\n print('starting event loop')\n kb.event_loop()\n","sub_path":"btkeyboard/keyboard/kb_client.py","file_name":"kb_client.py","file_ext":"py","file_size_in_byte":4414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"518851680","text":"# -*- coding:utf8 -*\n# Fichier principal de jeu d'Operation Bubblegum\n\nimport pygame\nfrom pygame.locals import *\nimport utilitaires, constantes, play_manager, module_sauvegarde\n\ndef jeu_mainloop(fenetre, recuperation_de_sauvegarde, jouer_musique):\n \"\"\"La mainloop principale du jeu.\n Fait appelle a diverses fonctions afin d'afficher les differents niveaux.\n La recuperation de sauvegarde se declare en True/False\"\"\"\n\n#DECLARATION DES VARIABLES CLES DE JEU\n#NB: Certaines seront changees par le chargement des sauvegardes\n\n #VARIABLES SYSTEME\n running_game = True\n creation_reader = True\n\n #VARIABLES JEU\n niveau_actuel = 0\n ID_narratif_actuel = None\n reponse_choix_precedent = None\n niveau_en_cours = False\n musique_du_niveau = None\n karma_actuel = 0\n\n #VARIABLES GRAPHIQUES\n background_a_afficher = \"niveau_1\"\n rafraichissement = True\n POS_BOUTON_SAUVER_JEU = constantes.POS_BOUTON_SAUVER_JEU()\n POS_BOUTON_QUITTER_JEU = constantes.POS_BOUTON_QUITTER_JEU()\n POS_BOUTON_CHOIX_1 = constantes.POS_BOUTON_CHOIX_1()\n POS_BOUTON_CHOIX_2 = constantes.POS_BOUTON_CHOIX_2()\n POS_BOUTON_CHOIX_3 = constantes.POS_BOUTON_CHOIX_3()\n\n #VARIABLES READER\n chargement_nouveau_texte = True\n texte = \"(Pour avancer un texte sans choix, appuyez sur 'choix 1'.)\\n\"\n\n#=====================================\n# Section recuperation de sauvegarde:\n#=====================================\n\n if(recuperation_de_sauvegarde == True):\n donnees = module_sauvegarde.charger_fichier()\n\n if(donnees != None):\n ID_narratif_actuel = donnees[0] + 1\n niveau_actuel = donnees[1]\n karma_actuel = donnees[2]\n print(\"ID après chargement\", ID_narratif_actuel)\n\n#===================================\n# Section Operation Bubblegum game:\n#===================================\n# NB: CETTE PARTIE DOIT ETRE LA PLUS CLAIRE POSSIBLE !!!\n\n while(running_game == True):\n\n if(niveau_actuel == 0):\n niveau_actuel = introduction_jeu(fenetre, jouer_musique)\n\n elif(niveau_actuel != 0): #Partie du jeu contenant tout les niveaux\n if(niveau_en_cours == False): #Cette section viendra mettre à jour le background et les boutons ainsi que certains mecanismes du jeu.\n\n if(niveau_actuel == 1): #Ici on selectionne le background a afficher et la musique\n utilitaires.background_manager(fenetre, \"principal\")\n musique_du_niveau = \"unwelcome\"\n\n if(pygame.mixer.music.get_busy() == True):\n utilitaires.musique_mute(jouer_musique)\n utilitaires.musique_mute(jouer_musique)\n\n utilitaires.afficher_image(fenetre, \"sauver\", constantes.POS_BOUTON_SAUVER_JEU())\n utilitaires.afficher_image(fenetre, \"quitter\", constantes.POS_BOUTON_QUITTER_JEU())\n utilitaires.afficher_image(fenetre, \"choix_1\", constantes.POS_BOUTON_CHOIX_1())\n utilitaires.afficher_image(fenetre, \"choix_2\", constantes.POS_BOUTON_CHOIX_2())\n utilitaires.afficher_image(fenetre, \"choix_3\", constantes.POS_BOUTON_CHOIX_3())\n\n niveau_en_cours = True #Une fois que les differents composants graphiques sont place, on lance la boucle de jeu\n\n if(ID_narratif_actuel == None):\n ID_narratif_actuel = 111000000000\n reponse_choix_precedent = 0\n\n# ------------------------\n# BOUCLE DE JEU PRINCIPALE\n# ------------------------\n\n while(niveau_en_cours == True):\n if(rafraichissement == True): # On rafraichit l'ecran si demande.\n pygame.display.flip()\n rafraichissement = False\n\n for event in pygame.event.get(): # Evenements à traiter\n\n if(pygame.mixer.music.get_busy() == False and jouer_musique == True):\n utilitaires.musique_manager(musique_du_niveau)\n\n elif(event.type == MOUSEBUTTONDOWN and event.button == 1): #Actions clic gauche par bouton\n pos_mouse_DOWN = pygame.mouse.get_pos()\n\n if(pos_mouse_DOWN[0] > POS_BOUTON_CHOIX_1[0] and\n pos_mouse_DOWN[0] < POS_BOUTON_CHOIX_1[0] + 150 and\n pos_mouse_DOWN[1] > POS_BOUTON_CHOIX_1[1] and\n pos_mouse_DOWN[1] < POS_BOUTON_CHOIX_1[1] + 50): #Bouton choix 1\n\n if(instructions_reader[2] == False):\n reponse_choix_precedent = 0\n rafraichissement = True\n elif(instructions_reader[2] == True):\n reponse_choix_precedent = 1\n chargement_nouveau_texte = True\n\n elif(pos_mouse_DOWN[0] > POS_BOUTON_CHOIX_2[0] and\n pos_mouse_DOWN[0] < POS_BOUTON_CHOIX_2[0] + 150 and\n pos_mouse_DOWN[1] > POS_BOUTON_CHOIX_2[1] and\n pos_mouse_DOWN[1] < POS_BOUTON_CHOIX_2[1] + 50): #Bouton choix 2\n\n if(instructions_reader[2] == False):\n pass\n elif(instructions_reader[2] == True):\n reponse_choix_precedent = 2\n chargement_nouveau_texte = True\n\n elif(pos_mouse_DOWN[0] > POS_BOUTON_CHOIX_3[0] and\n pos_mouse_DOWN[0] < POS_BOUTON_CHOIX_3[0] + 150 and\n pos_mouse_DOWN[1] > POS_BOUTON_CHOIX_3[1] and\n pos_mouse_DOWN[1] < POS_BOUTON_CHOIX_3[1] + 50): #Bouton choix 3\n\n if(instructions_reader[2] == False):\n pass\n elif(instructions_reader[2] == True):\n reponse_choix_precedent = 3\n chargement_nouveau_texte = True\n\n elif(pos_mouse_DOWN[0] > POS_BOUTON_SAUVER_JEU[0] and\n pos_mouse_DOWN[0] < POS_BOUTON_SAUVER_JEU[0] + 150 and\n pos_mouse_DOWN[1] > POS_BOUTON_SAUVER_JEU[1] and\n pos_mouse_DOWN[1] < POS_BOUTON_SAUVER_JEU[1] + 50): #Bouton sauver\n ID_narratif_save = ID_narratif_actuel -1\n donnees_a_sauvegarder = [ID_narratif_save, niveau_actuel, karma_actuel]\n module_sauvegarde.sauvegarder_fichier(donnees_a_sauvegarder)\n\n elif(pos_mouse_DOWN[0] > POS_BOUTON_QUITTER_JEU[0] and\n pos_mouse_DOWN[0] < POS_BOUTON_QUITTER_JEU[0] + 150 and\n pos_mouse_DOWN[1] > POS_BOUTON_QUITTER_JEU[1] and\n pos_mouse_DOWN[1] < POS_BOUTON_QUITTER_JEU[1] + 50): #Bouton quitter\n niveau_en_cours = False\n running_game = False\n section_de_jeu = \"accueil\"\n\n elif(event.type == KEYDOWN):\n if(event.key == K_p):\n utilitaires.musique_volume(True)\n elif(event.key == K_o):\n utilitaires.musique_volume(False)\n elif(event.key == K_k):\n jouer_musique = utilitaires.musique_mute(jouer_musique)\n\n if(chargement_nouveau_texte == True): # Section relative a la gestion du reader\n\n instructions_reader = play_manager.text_choices_manager(ID_narratif_actuel, reponse_choix_precedent, karma_actuel)\n # instructions_reader = #Forme finale : go_to_reader = [ID, nouveau_texte, choix_a_faire, choix_1, choix_2, choix_3, nouveau karma]\n ID_narratif_actuel = instructions_reader[0]\n print(\"Karma actuel =\", karma_actuel)\n if(instructions_reader[2] == False): #Mise en forme du texte si il n'y a pas de choix.\n texte = \"\\t\" + instructions_reader[1] + \"\\n\"*2 + \"\\t1- Continuer\"\n if(instructions_reader[2] == True): #Mise en forme du texte si il y a des choix.\n texte = \"\\t\" + instructions_reader[1] + \"\\n\"*2 + \"\\t1-\" + instructions_reader[3] + \"\\n\"*2 + \"\\t2-\" + instructions_reader[4] + \"\\n\"*2 + \"\\t3-\" + instructions_reader[5]\n\n if(creation_reader == True): #Si l'objet reader n'a pas ete cree.\n FenetreReader = play_manager.Reader(texte.expandtabs(4), constantes.POS_READER_JEU(),\n constantes.READER_LARGEUR(), constantes.READER_FONTSIZE(), constantes.READER_HAUTEUR(),\n bg = constantes.READER_BACKGROUND(), fgcolor = constantes.READER_COULEUR_TEXTE(),\n hlcolor = constantes.READER_COULEUR_HIGHLIGHT(), split = True)\n FenetreReader.show()\n creation_reader = False\n\n elif(creation_reader == False): #Si le reader est deja creer, alors on ne fait que l'updater.\n FenetreReader.TEXT = texte\n FenetreReader.show()\n\n chargement_nouveau_texte = False\n\n return section_de_jeu, jouer_musique\n\ndef introduction_jeu(fenetre, jouer_musique):\n \"\"\"Lances une courte introduction lors d'un nouveau jeu.\"\"\"\n\n #On met en place les variables necessaire a l'introduction\n intro_en_cours = True\n rafraichissement = True\n image_intro = 1\n\n #Affichage du background de l'intro\n utilitaires.background_manager(fenetre, \"intro\")\n\n while(intro_en_cours == True): #Tant que l'intro est en cours\n if(rafraichissement == True): #Si on demande de rafraichir l'ecran\n if(image_intro == 1): #On selection le nouveau texte a afficher\n image_a_afficher_intro = \"texte_intro_1\"\n elif(image_intro == 2):\n image_a_afficher_intro = \"texte_intro_2\"\n elif(image_intro == 3):\n image_a_afficher_intro = \"texte_intro_3\"\n elif(image_intro == 4):\n image_a_afficher_intro = \"texte_intro_4\"\n elif(image_intro == 5): #Ou si on arrive a la fin des textes, on coupe l'intro\n intro_en_cours= False\n\n utilitaires.afficher_image(fenetre, image_a_afficher_intro, constantes.POS_TEXTE_INTRO()) #On met a jour le texte\n pygame.display.flip() #On rafraichit l'ecran\n image_intro += 1 # On incremete image_intro pour permettre l'affichage du nouveau texte au prochain passage\n rafraichissement = False\n\n for event in pygame.event.get():\n if(pygame.mixer.music.get_busy() == False and jouer_musique == True): #Si la musique s'acheve, on relance la musique\n utilitaires.musique_manager(\"theme_accueil\")\n elif(event.type == MOUSEBUTTONDOWN and event.button == 1): #Si on a un clic gauche, on defile le texte\n rafraichissement = True\n elif(event.type == KEYDOWN): #Gestion des sons\n if(event.key == K_p):\n utilitaires.musique_volume(True)\n elif(event.key == K_o):\n utilitaires.musique_volume(False)\n elif(event.key == K_k):\n jouer_musique = utilitaires.musique_mute(jouer_musique)\n\n return 1 #On retourne le premier niveau comme valeur\n","sub_path":"bubblegum_jeu.py","file_name":"bubblegum_jeu.py","file_ext":"py","file_size_in_byte":11565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"105020949","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom .models import BoardState, Player, PlayerPiece, Piece\nfrom django.http import HttpResponseServerError\nfrom django.template import loader\nfrom .gameStateManager import GameStateManager, Error\nfrom blockoo_engine.player import PlayerDTO\nfrom blockoo_engine.piece import PieceDTO\nfrom blockoo_engine.board import BoardDTO\nimport json\n\ndef index(request):\n return HttpResponse(loader.get_template('index.html').render({}, request)) \n \ndef getBoardState(request, board_id):\n try:\n boardState = BoardState.objects.get(id=board_id)\n\n finalScore = {}\n\n print(boardState.gameIsOver)\n\n if boardState.gameIsOver:\n playerDAOs = Player.objects.filter(board_id=board_id)\n for player in playerDAOs:\n playerScore = 0\n for playerPiece in PlayerPiece.objects.filter(player_id=player.id):\n playerScore = playerScore + len(playerPiece.piece_id)\n \n finalScore[player.color] = playerScore\n\n return asJsonResponse({\"boardState\": boardState, \"finalScore\": finalScore})\n except BoardState.DoesNotExist:\n raise HttpResponseServerError(\"Unabled to find that board\")\n \ndef getBoardPlayers(request, board_id):\n try:\n playerDAOs = Player.objects.filter(board_id=board_id)\n\n playerDTOs = []\n\n for p in playerDAOs:\n playerPieceDAOs = PlayerPiece.objects.filter(player_id=p.id)\n pieceDTOs = []\n for dao in playerPieceDAOs:\n pieceDTOs.append(dao.piece.id)\n \n playerDTOs.append(PlayerDTO(p.id, p.color, pieceDTOs, p.isFinished))\n\n return asJsonList(playerDTOs)\n except BoardState.DoesNotExist:\n raise HttpResponseServerError(\"Unable to find players for that board\")\n \ndef placePiece(request, board_id):\n try:\n if request.method != \"POST\":\n raise HttpResponseServerError(\"Invalid request\")\n \n boardState = BoardState.objects.get(id=board_id)\n playerDAOs = Player.objects.filter(board_id=board_id)\n\n playersDTOs = []\n\n for p in playerDAOs:\n playerPieceDAOs = PlayerPiece.objects.filter(player_id=p.id)\n pieceDTOs = []\n for dao in playerPieceDAOs:\n pieceDTOs.append(PieceDTO(dao.piece.id))\n \n playersDTOs.append(PlayerDTO(p.id, p.color, pieceDTOs, p.isFinished))\n\n requestJson = json.loads(request.body)\n\n mgr = GameStateManager(boardState, playersDTOs)\n mgr.placePiece(requestJson[\"posX\"], requestJson[\"posY\"], requestJson[\"rotation\"], requestJson[\"mirrored\"], requestJson[\"piece\"], requestJson[\"player\"])\n \n placedPieceDAO = Piece.objects.get(id=requestJson[\"piece\"])\n placedPlayerPieceDAO = PlayerPiece.objects.get(player_id=requestJson[\"player\"], piece_id=placedPieceDAO.id)\n placedPlayerPieceDAO.delete()\n\n newState = \"\"\n for r in mgr.myBoard.arr:\n for c in r:\n newState = newState + c + \"|\"\n newState = newState[:len(newState)-1]\n newState = newState + \"\\n\"\n\n boardState.boardState = newState[:len(newState)-1]\n boardState.activePlayerInGameId = mgr.activePlayerId\n boardState.save()\n\n return asJsonResponse({})\n except Exception as e:\n print(e.message)\n return asJsonResponse({\"isError\": True, \"message\": e.message})\n \ndef gameReset(request, board_id):\n\n #reset board to fresh state\n newBoardDTO = BoardDTO('')\n boardStateDAO = BoardState.objects.get(id=board_id)\n\n newState = \"\"\n for r in newBoardDTO.arr:\n for c in r:\n newState = newState + c + \"|\"\n newState = newState[:len(newState)-1]\n newState = newState + \"\\n\"\n\n boardStateDAO.boardState = newState[:len(newState)-1]\n boardStateDAO.activePlayerInGameId = 0\n boardStateDAO.gameIsOver = False\n boardStateDAO.save()\n\n #give all players all pieces\n playerDAOs = Player.objects.filter(board_id=board_id)\n pieceDAOs = Piece.objects.all()\n for player in playerDAOs:\n player.isFinished = False\n playerPieceDAOs = PlayerPiece.objects.filter(player_id=player.id)\n for p in playerPieceDAOs:\n p.delete()\n\n for piece in pieceDAOs:\n newPlayerPiece = PlayerPiece()\n newPlayerPiece.player_id = player.id\n newPlayerPiece.piece_id = piece.id\n newPlayerPiece.save()\n player.save()\n\n return asJsonResponse({})\n\ndef finishPlayer(request, board_id):\n boardStateDAO = BoardState.objects.get(id=board_id)\n\n currentPlayerId = boardStateDAO.activePlayerInGameId;\n\n currentPlayer = Player.objects.get(id=currentPlayerId)\n currentPlayer.isFinished = True\n currentPlayer.save()\n\n if(currentPlayerId < 3):\n boardStateDAO.activePlayerInGameId = currentPlayerId + 1;\n else:\n boardStateDAO.activePlayerInGameId = 0;\n\n playerIsStillActive = False\n for player in Player.objects.filter(board_id=board_id):\n playerIsStillActive = not player.isFinished or playerIsStillActive\n\n boardStateDAO.gameIsOver = not playerIsStillActive\n\n print(f'Player {currentPlayer.color}')\n print(currentPlayer.isFinished)\n\n boardStateDAO.save(); \n\n return asJsonResponse({})\n\ndef endGame(request, board_id):\n boardStateDAO = BoardState.objects.get(id=board_id)\n \n \ndef asJsonResponse(obj):\n rtv = json.loads(json.dumps(obj, default=lambda obj: obj.__dict__))\n if '_state' in rtv:\n rtv.pop('_state')\n return JsonResponse(rtv, safe=False)\n \ndef asJsonList(obj):\n rtv = []\n for o in obj:\n appended = json.loads(json.dumps(o.__dict__))\n if '_state' in rtv:\n rtv.pop('_state')\n rtv.append(appended)\n \n return JsonResponse(rtv, safe=False)","sub_path":"blockoo_app/blockoo_site/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"231729997","text":"from slither.slither import Slither\nfrom parser import Parser\nimport parser\nimport collections\n\n\nSLITHER_UNDECLARED_CONSTRUCTOR_NAME = 'slitherConstructorVariables'\n\n\n# This whole class is a prototype, a proper parser should be used\n\n\nclass Instrumentator:\n\n def instrument(self, contract_path, contract_name, predicates, instrument_for_echidna):\n self.contract_path = contract_path\n self.contract_name = contract_name\n\n slither = Slither(self.contract_path)\n self.contract_info = slither.get_contract_from_name(self.contract_name)\n if self.contract_info is None:\n raise Exception('Check config file for contract name')\n\n with open(self.contract_path) as contract_file:\n contract = contract_file.read()\n contract = contract.replace('}', '\\n}')\n self.contract_lines = contract.split('\\n')\n\n self.__pre_process_contract()\n\n parser = Parser()\n\n echidna_function = ''\n\n for index, predicate_string in enumerate(predicates):\n predicate = parser.parse(predicate_string)\n\n functions_to_instrument = self.__get_functions_to_instrument(predicate)\n\n self.__instrument_new_variables(predicate, functions_to_instrument, instrument_for_echidna)\n\n if instrument_for_echidna:\n echidna_function += '(' + predicate.solidity_repr + ')\\n&& '\n else:\n assert_string = f'assert({predicate.solidity_repr}); // VERIMAN ASSERT FOR PREDICATE NO. {index + 1}'\n self.__insert_in_functions(functions_to_instrument, assert_string, self.__insert_at_end_of_functions)\n\n if instrument_for_echidna:\n echidna_function = 'function echidna_invariant() public returns(bool) {\\nreturn ' \\\n + echidna_function.rsplit('\\n&& ', 1)[0]\\\n + ';\\n}'\n self.__insert_in_contract(echidna_function)\n\n contract = '\\n'.join(self.contract_lines)\n with open(self.contract_path, 'w') as contract_file:\n contract_file.write(contract)\n\n\n def __pre_process_contract(self):\n pragma = ''\n pragma_found = False\n inside_contract = False\n constructor_found = False\n self.first_line_of_contract = 0\n self.last_line_of_contract = len(self.contract_lines)\n\n for index, line in enumerate(self.contract_lines):\n line_no_spaces = line.replace(' ', '')\n\n if not pragma_found and 'pragmasolidity' in line_no_spaces:\n pragma_found = True\n pragma = line_no_spaces.split('pragmasolidity')[1].split(';')[0]\n\n if line.lstrip().startswith('contract '):\n was_inside_contract = inside_contract\n inside_contract = line_no_spaces.startswith('contract' + self.contract_name)\n\n if inside_contract:\n for i in range(index, len(self.contract_lines) - 1):\n contract_line = self.contract_lines[i]\n if '{' in contract_line:\n self.first_line_of_contract = i # TODO test\n break\n elif was_inside_contract:\n self.last_line_of_contract = index # TODO test\n break\n\n if inside_contract and (line_no_spaces.startswith('constructor(') or\n line_no_spaces.startswith('function' + self.contract_name + '(')):\n constructor_found = True\n break\n\n if not constructor_found:\n if pragma.startswith('^0.4'):\n constructor_string = f'function {self.contract_name}() public {{\\n}}\\n'\n elif pragma.startswith('^0.5'):\n constructor_string = f'constructor() public {{\\n}}\\n'\n else:\n # TODO handle cases like \"pragma solidity >=0.4.0 <0.6.0;\"\n raise Exception(\"Unknown pragma in contract\")\n\n self.__insert_in_contract(constructor_string)\n\n self.__add_inherited_functions()\n\n\n def __should_be_instrumented(self, func):\n return func.visibility == 'public' and not func.is_shadowed\n\n\n def __add_inherited_functions(self):\n for func in self.contract_info.functions_inherited:\n\n if self.__should_be_instrumented(func):\n function_declaration_start = 'function ' + func.name\n new_function = ''\n\n in_desired_contract = False\n in_desired_function = False\n for index, line in enumerate(self.contract_lines):\n in_desired_contract = in_desired_contract or 'contract ' + func.contract_declarer.name in line\n in_desired_function = in_desired_contract and (in_desired_function or function_declaration_start in line)\n if in_desired_contract and in_desired_function:\n new_function += line\n if '{' in line:\n break\n\n new_function = function_declaration_start + new_function.split(function_declaration_start, 1)[1]\n new_function = new_function.split('{', 1)[0] + '{\\n'\n\n if func.return_type is not None:\n new_function += 'return '\n\n new_function += f'super.{func.name}('\n\n for param in func.parameters:\n new_function += param.name + ','\n\n new_function = new_function.rsplit(',', 1)[0] + ');\\n}\\n'\n\n self.__insert_in_contract(new_function)\n\n\n def __instrument_new_variables(self, predicate, functions_to_instrument, instrument_for_echidna):\n if predicate.operator == parser.PREVIOUSLY:\n if instrument_for_echidna:\n initialization_code = f'bool {predicate.solidity_vars[0]};'\n update_code = f'{predicate.solidity_vars[0]}={predicate.values[0].solidity_repr};'\n\n self.__insert_in_contract(initialization_code)\n self.__insert_in_functions(functions_to_instrument, update_code, self.__insert_at_beginning_of_functions)\n else:\n initialization_code = f'bool {predicate.solidity_vars[0]}={predicate.values[0].solidity_repr};'\n\n self.__insert_in_functions(functions_to_instrument, initialization_code, self.__insert_at_beginning_of_functions)\n elif predicate.operator == parser.SINCE:\n q = predicate.solidity_vars[0]\n p_since_q = predicate.solidity_vars[1]\n q_repr = predicate.values[1].solidity_repr\n p_repr = predicate.values[0].solidity_repr\n initialization_code = f'bool {q}=false;\\nbool {p_since_q}=true;'\n update_code = '''if({q}){{\\n\\\n{p_since_q}={p_repr}&&{p_since_q};\\n\\\n}}\\n\n{q}={q_repr}||{q};\\n'''.format(q=q, p_since_q=p_since_q, q_repr=q_repr, p_repr=p_repr)\n\n self.__insert_in_contract(initialization_code)\n self.__insert_in_functions(functions_to_instrument, update_code, self.__insert_at_end_of_functions)\n\n for term in predicate.values:\n self.__instrument_new_variables(term, functions_to_instrument, instrument_for_echidna)\n\n\n def __get_functions_to_instrument(self, predicate):\n functions_to_instrument = set()\n\n for variable_name in predicate.related_vars:\n\n if self.__is_solidity_property(variable_name):\n functions_to_instrument = set(self.contract_info.functions_entry_points)\n else:\n variable = self.contract_info.get_state_variable_from_name(variable_name)\n functions_writing_variable = self.contract_info.get_functions_writing_to_variable(variable)\n\n for func in functions_writing_variable:\n if self.__should_be_instrumented(func):\n functions_to_instrument.add(func)\n\n functions_to_instrument = functions_to_instrument.union(self.__get_public_callers(functions_writing_variable))\n\n if len(functions_to_instrument) == len(self.contract_info.functions_entry_points):\n break\n\n # The initial state also needs to be checked:\n is_constructor_considered = False\n for func in functions_to_instrument:\n if func.name == 'constructor': # FIXME temporal, issue with Slither\n is_constructor_considered = True\n break\n\n if not is_constructor_considered:\n if self.contract_info.constructor is not None:\n functions_to_instrument.add(self.contract_info.constructor)\n else:\n for func in self.contract_info.functions:\n if func.name == SLITHER_UNDECLARED_CONSTRUCTOR_NAME:\n functions_to_instrument.add(func)\n break\n\n # We can thought of all no-state-changing functions as equivalent:\n for func in self.contract_info.functions:\n if not func in functions_to_instrument and self.__should_be_instrumented(func) and func.name != 'constructor': # FIXME temporal, issue with Slither\n functions_to_instrument.add(func)\n break\n\n return functions_to_instrument\n\n\n def __is_solidity_property(self, variable_name):\n parts = variable_name.split('.')\n return parts[0] in ['block', 'msg', 'tx', 'this', 'now']\n\n\n def __get_public_callers(self, functions):\n result = set()\n\n for func in functions:\n callers, queue = set(), collections.deque([func])\n\n while queue:\n func_to_check = queue.popleft()\n\n for neighbour in func_to_check.reachable_from_functions:\n if neighbour not in callers:\n queue.append(neighbour)\n callers.add(neighbour)\n\n if self.__should_be_instrumented(neighbour):\n result.add(neighbour)\n\n return result\n\n\n def __insert_in_contract(self, code_to_insert):\n self.contract_lines.insert(self.first_line_of_contract + 1, code_to_insert)\n self.last_line_of_contract += 1\n\n\n def __insert_in_functions(self, functions, code_string, insert_in_function):\n remaining_functions = list(functions)\n open_blocks = 0\n in_function = False\n current_function = None\n\n constructors_in_list = list(filter(lambda func: func.name == 'constructor' or\n func.name == SLITHER_UNDECLARED_CONSTRUCTOR_NAME, remaining_functions))\n fallbacks_in_list = list(filter(lambda func: func.name == 'fallback', remaining_functions))\n\n if len(constructors_in_list) > 1 or len(fallbacks_in_list) > 1:\n raise Exception('Invalid set of functions to instrument')\n\n for index in range(self.first_line_of_contract, self.last_line_of_contract):\n line = self.contract_lines[index]\n open_blocks = open_blocks + line.count('{') - line.count('}')\n\n if open_blocks <= 2:\n line_stripped = line.lstrip()\n line_no_spaces = line.replace(' ', '')\n\n if line_stripped.startswith('function ') \\\n or line_no_spaces.startswith('function()') \\\n or line_no_spaces.startswith('constructor('):\n\n func_found = None\n\n if (line_no_spaces.startswith('constructor(') or line_no_spaces.startswith('function' + self.contract_name)) \\\n and len(constructors_in_list) > 0:\n func_found = constructors_in_list[0]\n constructors_in_list = []\n elif line_no_spaces.startswith('function()') and len(fallbacks_in_list) > 0:\n func_found = fallbacks_in_list[0]\n fallbacks_in_list = []\n else:\n for func in remaining_functions:\n if line_no_spaces.startswith('function' + func.name + '('):\n func_found = func\n break\n\n found = func_found is not None\n if found:\n remaining_functions.remove(func_found)\n current_function = func_found\n\n in_function = found\n\n if in_function:\n function_done = insert_in_function(code_string, index, open_blocks, current_function)\n if function_done and len(remaining_functions) == 0:\n break\n else:\n in_function = not function_done\n\n if len(remaining_functions) > 0:\n raise Exception('One or more functions couldn\\'t be instrumented')\n\n\n def __insert_at_beginning_of_functions(self, code_string, index, open_blocks, current_function):\n function_done = False\n line = self.contract_lines[index]\n\n if open_blocks <= 2 and '{' in line:\n self.contract_lines[index] = line.replace('{', '{\\n' + code_string, 1)\n function_done = True\n\n return function_done\n\n\n def __insert_at_end_of_functions(self, code_string, index, open_blocks, current_function):\n function_done = False\n line = self.contract_lines[index]\n\n if 'return ' in line:\n if not 'return VERIMAN_' in line:\n store_return_values = ''\n return_variables = ''\n for type in current_function.return_type:\n new_var_for_return_value = Parser.create_variable_name('return_value')\n store_return_values += f'{type} {new_var_for_return_value},'\n return_variables += new_var_for_return_value + ','\n store_return_values = store_return_values.rsplit(',', 1)[0]\n return_variables = return_variables.rsplit(',', 1)[0]\n\n if len(current_function.return_type) > 1:\n store_return_values = '(' + store_return_values + ')'\n return_variables = '(' + return_variables + ')'\n\n return_value = line.split('return ', 1)[1].split(';', 1)[0]\n\n assignment_line = f'{store_return_values}={return_value};'\n new_return_line = f'return {return_variables}'\n\n self.contract_lines[index] = line.replace(f'return {return_value}', f'{assignment_line}\\n{code_string}\\n{new_return_line}')\n else:\n self.contract_lines[index] = line.replace('return ', f'{code_string}\\nreturn ')\n\n function_done = open_blocks <= 2\n\n if 'return;' in line:\n self.contract_lines[index] = line.replace('return;', f'{code_string}\\nreturn;')\n function_done = open_blocks <= 2\n\n if not function_done and open_blocks == 1 and '}' in line:\n solidity_lines = line.split(';')\n finishes_with_return = 'return ' in solidity_lines[len(solidity_lines) - 1]\n if not finishes_with_return:\n self.contract_lines[index] = (code_string + '\\n}').join(line.rsplit('}', 1))\n function_done = True\n\n return function_done","sub_path":"instrumentator.py","file_name":"instrumentator.py","file_ext":"py","file_size_in_byte":15348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"600120837","text":"# !/usr/bin/python3\n# file: coroutine_4_yield_多任务.py\n# Created by Guang at 19-7-21\n# description:\n#\n# *-* coding:utf8 *-*\nfrom greenlet import greenlet\nimport time\n\n\ndef test1():\n while True:\n print(\"---A--\")\n gr2.switch()\n time.sleep(0.5)\n\n\ndef test2():\n while True:\n print(\"---B--\")\n gr1.switch()\n time.sleep(0.5)\n\n\ngr1 = greenlet(test1)\ngr2 = greenlet(test2)\n\n# 切换到gr1中运行\ngr1.switch()\n","sub_path":"multitask_coroutine/coroutine_5_greenlet_多任务.py","file_name":"coroutine_5_greenlet_多任务.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"20428557","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 08 12:08:01 2015\n\n@author: Rob\n\"\"\"\n\nfrom databaker.constants import *\n\ndef per_file(tabs):\n return [\"Table 1\", \"Table 3\"]\n \ndef per_tab(tab):\n \n anchor = tab.excel_ref(\"A6\")\n req_cols = anchor.fill(RIGHT).is_not_blank()\n req_rows = anchor.fill(DOWN) - tab.filter(contains_string(\"Latest three months\")).expand(DOWN)\n \n obs = req_rows.waffle(req_cols).is_not_blank()\n \n tab.excel_ref(\"B1\").dimension(\"Type\", CLOSEST, ABOVE)\n #req_cols.dimension(\"SA / NSA\", DIRECTLY, ABOVE)\n tab.excel_ref(\"3:5\").is_not_blank().children().dimension(\"Area\", DIRECTLY, ABOVE)\n req_rows.is_not_blank().dimension(\"Year\", CLOSEST, ABOVE)\n anchor.shift(RIGHT).fill(DOWN).dimension(\"Month\", DIRECTLY, LEFT)\n \n obs = obs - tab.excel_ref('E1').fill(DOWN) \n \n yield obs\n ","sub_path":"OTT/OTTArea.py","file_name":"OTTArea.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"479550908","text":"from unittest import TestCase\r\nimport sys\r\nimport os\r\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\r\nfrom music import Music\r\nfrom sound_context import SoundContext\r\nfrom pyaudio import PyAudio\r\nimport numpy as np\r\n\r\nclass MusicTest(TestCase):\r\n\r\n def test_init(self):\r\n sr = 44100\r\n m = Music(sr)\r\n\r\n def test_play(self):\r\n sr = 44100\r\n m = Music(sr)\r\n\r\n sp1 = np.zeros(sr)\r\n sp1[440] = 10\r\n sc1 = SoundContext(sr, sr, sp1)\r\n\r\n sp2 = np.zeros(sr)\r\n sp2[554] = 10\r\n sc2 = SoundContext(sr, sr, sp2)\r\n\r\n sp3 = np.zeros(sr)\r\n sp3[659] = 10\r\n sc3 = SoundContext(sr, sr, sp3)\r\n\r\n sp4 = np.zeros(sr)\r\n sp4[880] = 10\r\n sc4 = SoundContext(sr, sr, sp4)\r\n\r\n m.add_element(0, int(2.5 * sr), sc1)\r\n m.add_element(int(0.5 * sr), int(2 * sr), sc2)\r\n m.add_element(int(1 * sr), int(1.5 * sr), sc3)\r\n m.add_element(int(1.5 * sr), int(1 * sr), sc4)\r\n p = PyAudio()\r\n m.play(p)\r\n p.terminate()","sub_path":"test/music_test.py","file_name":"music_test.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"260551647","text":"from calendar import HTMLCalendar\nfrom itertools import groupby\nfrom datetime import date\nfrom django.utils.html import conditional_escape as esc\n\n\n# http://uggedal.com/journal/creating-a-flexible-monthly-calendar-in-django/\n# helped immensely\nclass RunCalendar(HTMLCalendar):\n def __init__(self, runs):\n super(RunCalendar, self).__init__()\n self.runs = self.group_by_day(runs)\n\n def formatday(self, day, weekday):\n if day != 0:\n cssclass = self.cssclasses[weekday]\n if date.today() == date(self.year, self.month, day):\n cssclass += ' today'\n if day in self.runs:\n cssclass += ' filled'\n body = ['']\n for run in self.runs[day]:\n body.append('
')\n body.append(esc(run.run_length))\n if run.run_length > 1:\n body.append(esc(' miles '))\n else:\n body.append(esc(' mile '))\n body.append(esc('(' + run.run_duration + ')'))\n return self.day_cell(cssclass, '%d %s' % (day, ''.join(body)))\n return self.day_cell(cssclass, day)\n return self.day_cell('noday', ' ')\n\n def formatmonth(self, year, month):\n self.year, self.month = year, month\n return super(RunCalendar, self).formatmonth(year, month)\n\n def group_by_day(self, runs):\n field = lambda run: run.run_date.day\n return dict(\n [(day, list(items)) for day, items in groupby(runs, field)]\n )\n\n def day_cell(self, cssclass, body):\n return '%s' % (cssclass, body)","sub_path":"pycharmtest/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"408110021","text":"import cv2\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\n\n# img=cv2.imread('/Users/wangshan/Desktop/image/yue1.jpeg')\n# ptx = img[100,100,2]\n# print(ptx)\n# print(img.shape)\n# print(img.size , img.dtype)\n# img.itemset((100,100,2) ,100)\n# print(img.item(100,100,2))\n# BLUE=[255,0,0]\n# # img[:,:,2] = 0\n# # img[:,:,1] = 0\n# constant= cv2.copyMakeBorder(img,10,10,10,10,cv2.BORDER_CONSTANT,value=BLUE)\n# cv2.imshow('aa' , constant)\n# # k= cv2.waitKey(0)\n# # time.sleep(3)\n# # cv2.destroyAllWindows()\n#\n# k = cv2.waitKey(0)\n# if k == 27: # wait for ESC key to exit\n# cv2.destroyAllWindows()\n# elif k == ord('s'): # wait for 's' key to save and exit\n# cv2.destroyAllWindows()\n\n\n# img1=cv2.imread('/Users/wangshan/Desktop/image/yue1.jpeg')\n# img2=cv2.imread('/Users/wangshan/Desktop/image/yue2.jpeg')\n# print(img1.shape)\n# print(img2.shape)\n#\n# img = cv2.addWeighted(img1 , 0.8 , img2 , 0.2 , 0)\n#\n# cv2.imshow('img' , img)\n#\n#\n# k = cv2.waitKey(0)\n# if k == 27: # wait for ESC key to exit\n# cv2.destroyAllWindows()\n# elif k == ord('s'): # wait for 's' key to save and exit\n# cv2.destroyAllWindows()\n\n\n\n\nimg1=cv2.imread('/Users/wangshan/Desktop/image/yue1.jpeg')\nimg2=cv2.imread('/Users/wangshan/Desktop/image/logo.png')\nlogorows , logocols , logochannels = img2.shape\nprint(img2.shape)\nroi = img1[0:logorows , 0 : logocols]\nimg2gray = cv2.cvtColor(img2 , cv2.COLOR_BGR2GRAY)\nret , mask = cv2.threshold(img2gray , 10 , 255 , cv2.THRESH_BINARY)\nmask_inv = cv2.bitwise_not(mask)\nimg1_bg = cv2.bitwise_and(roi , roi ,mask = mask_inv)\nimg2_fg = cv2.bitwise_and(img2 , img2 ,mask = mask)\ndes = cv2.add(img1_bg , img2_fg)\nimg1[0:logorows , 0:logocols] = des\nplt.subplot(121),plt.imshow(img1_bg,cmap = 'gray')\nplt.title('Original Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(122),plt.imshow(img2_fg,cmap = 'gray')\nplt.title('Edge Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(121),plt.imshow(des,cmap = 'gray')\nplt.title('Original Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(121),plt.imshow(img1,cmap = 'gray')\nplt.title('Original Image'), plt.xticks([]), plt.yticks([])\nplt.show()\ne1 = cv2.getTickCount()\n# your code execution\ne2 = cv2.getTickCount()\ntime = (e2 - e1)/ cv2.getTickFrequency()\nprint(time)","sub_path":"opencv/corehand.py","file_name":"corehand.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"58312612","text":"import glob\nimport json\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nimport numpy as np\n\nfrom collections import deque\n\nstats_files = []\n\nfor x in glob.glob('./monitoring/**/*.stats.json', recursive=True):\n stats_files.append(x)\n\nepisode_rewards = []\n\n\nfor stats_file in stats_files:\n json_data=open(stats_file).read()\n data = json.loads(json_data)\n episode_rewards.append((data['episode_rewards'], stats_file))\n\nmean_stats = []\n\nfor stats, stats_file in episode_rewards:\n all_datapoints = deque(maxlen=100)\n mean_stat_datapoint = []\n for datapoint in stats:\n all_datapoints.append(datapoint)\n mean_stat_datapoint.append(np.mean(all_datapoints))\n mean_stats.append((mean_stat_datapoint, stats, stats_file))\n\nlegends = []\n# for stats, stats_file in episode_rewards:\n# mean = np.mean(stats)\n# if mean > 65:\n# plt.plot(stats, label=mean)\n# legends.append(mean)\n# else:\n# print(\"Mean to low : {}\".format(mean))\n\nfor mean_stats, stats, stats_file in mean_stats:\n mean = mean_stats[len(mean_stats) - 1]\n print(\"Mean too low. Mean:{}\\tEpisodes:{}\\tFile:{}\".format(mean, len(mean_stats), stats_file))\n if mean > 194 and len(mean_stats) < 3000:\n plt.plot(mean_stats, label=\"{}\".format(stats_file))\n legends.append(stats_file)\n # else:\n # print(\"Mean too low. Mean:{}\\tEpisodes:{}\".format(mean, len(mean_stats)))\n\nplt.legend(legends)\nplt.tight_layout()\nplt.show()","sub_path":"read_stats.py","file_name":"read_stats.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"275935217","text":"import pygame\r\nimport random\r\nfrom os import path\r\n\r\n\"\"\"Created by Antoine Rebelo in February 2020.\r\n Python mutation of Space Invaders\r\n ___________Version 1.0____________\"\"\"\r\n\r\n# Create paths to image and soundfolders for easy access\r\nimgDir = path.join(path.dirname(__file__), 'img')\r\nsoundDir = path.join(path.dirname(__file__), 'sounds')\r\n\r\n# Define constants and colours\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nSIZE = (800, 500)\r\nCENTER_RESET = (400, 450)\r\nALIEN_CENTER = (400, 75)\r\nSCORE_POSITION = (125, 450)\r\nLEVEL_POSITION = (675, 450)\r\nLIVES_POSITION = (400, 425)\r\n\r\n# Initialise Pygame, set up the window and running conditions\r\npygame.init()\r\npygame.mixer.init()\r\nscreen = pygame.display.set_mode(SIZE)\r\nwindow = screen.get_rect()\r\npygame.display.set_caption(\"Space invaders\")\r\nfps = pygame.time.Clock()\r\nFONT = pygame.font.Font('freesansbold.ttf', 18)\r\nbackground = pygame.image.load(path.join(imgDir, 'space.jpg'))\r\n\r\n# Load Image files for player and aliens\r\nplayerImage = pygame.image.load(path.join(imgDir, \"player.png\"))\r\nalienImage = pygame.image.load(path.join(imgDir, \"alien.png\"))\r\n\r\n# Create data structures for small and large\r\n# explosion animations and loads them\r\nexplosionAnimation = {}\r\nexplosionAnimation['large'] = []\r\nexplosionAnimation['small'] = []\r\nfor i in range(9):\r\n filename = 'regularExplosion0{}.png'.format(i)\r\n img = pygame.image.load(path.join(imgDir, filename)).convert()\r\n img.set_colorkey(BLACK)\r\n imgLarge = pygame.transform.scale(img, (75, 75))\r\n explosionAnimation['large'].append(imgLarge)\r\n imgSmall = pygame.transform.scale(img, (32, 32))\r\n explosionAnimation['small'].append(imgSmall)\r\n\r\n# Load sound files\r\nshootSound = pygame.mixer.Sound(path.join(soundDir, \"bulletSound.wav\"))\r\ndeathSound = pygame.mixer.Sound(path.join(soundDir, \"Explosion.wav\"))\r\n\r\n# Win/Lose conditions, score counter and level counter\r\nlevel = 1\r\nscore = 0\r\nbounty = 15\r\nrunning = False\r\nvictory = False\r\nfailure = False\r\n\r\n\r\n# Classes representing all actor- and projectile objects\r\nclass Player(pygame.sprite.Sprite):\r\n \"\"\"Class representing the player object\r\n used to shoot down the awful aliens.\"\"\"\r\n\r\n # Constructor for Player object\r\n def __init__(self):\r\n # Call superclass from Sprite\r\n pygame.sprite.Sprite.__init__(self)\r\n # Scales the image down\r\n self.image = pygame.transform.scale(playerImage, (45, 35))\r\n self.image.set_colorkey(BLACK)\r\n self.rect = self.image.get_rect()\r\n self.rect.center = CENTER_RESET\r\n self.speedx = 0\r\n # Delay for autofire so you can't laserbeam-spam\r\n self.delay = 300\r\n self.lastShot = pygame.time.get_ticks()\r\n # Timer and check for hiding yourself when you lose a life\r\n self.hidden = False\r\n self.hideTimer = pygame.time.get_ticks()\r\n self.lives = 3\r\n\r\n def update(self):\r\n # If you lost a life, you hide out of bounds for 1 second\r\n if self.hidden and pygame.time.get_ticks() - self.hideTimer > 1000:\r\n self.hidden = False\r\n self.rect.center = CENTER_RESET\r\n self.speedx = 0\r\n key = pygame.key.get_pressed()\r\n if key[pygame.K_LEFT]:\r\n self.speedx = -4\r\n if key[pygame.K_RIGHT]:\r\n self.speedx = 4\r\n if key[pygame.K_SPACE]:\r\n self.shoot()\r\n self.rect.x += self.speedx\r\n\r\n # Going out of bounds is cheating\r\n if self.rect.right > SIZE[0]:\r\n self.rect.right = SIZE[0]\r\n elif self.rect.left < 0:\r\n self.rect.left = 0\r\n\r\n def shoot(self):\r\n \"\"\"Method for shooting a small bullet\r\n that travels upwards 5 pixels per frame\"\"\"\r\n\r\n # Autofire check\r\n now = pygame.time.get_ticks()\r\n if now - self.lastShot > self.delay:\r\n self.lastShot = now\r\n bullet = Bullet(self.rect.centerx, self.rect.top, -5)\r\n allSprites.add(bullet)\r\n bullets.add(bullet)\r\n # Pew pew\r\n shootSound.play()\r\n\r\n def hide(self):\r\n # Hide player during explosion, respawn in the middle\r\n self.hidden = True\r\n self.hideTimer = pygame.time.get_ticks()\r\n self.rect.center = (SIZE[0] / 2, SIZE[1] + 200)\r\n\r\n\r\nclass Alien(pygame.sprite.Sprite):\r\n \"\"\"Class representing the alien objects\r\n that will spawn in a group of invaders\"\"\"\r\n\r\n # Constructor for alien class\r\n def __init__(self, x, y, level):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.transform.scale(alienImage, (30, 25))\r\n self.image.set_colorkey(BLACK)\r\n self.rect = self.image.get_rect()\r\n # Randomise speed so all aliens move differently\r\n speed = random.choice([(-2, -1), (1, 2)])\r\n # Have to skip 0 so they don't stand still\r\n self.speedVector = [random.randint(*speed) * level, 35]\r\n self.rect.centerx = x\r\n self.rect.centery = y\r\n # Randomised autofire delay so all shoot differently\r\n # and also so no one has laserbeams\r\n self.delay = random.randint(500, 2000) + random.randint(500, 7000)\r\n self.lastShot = pygame.time.get_ticks()\r\n\r\n def shoot(self):\r\n \"\"\"Identical shooting method as the player\"\"\"\r\n\r\n # Autofire check\r\n now = pygame.time.get_ticks()\r\n if now - self.lastShot > self.delay:\r\n self.lastShot = now\r\n bullet = alienBullet(self.rect.centerx, self.rect.bottom + 5, 5)\r\n allSprites.add(bullet)\r\n alienBullets.add(bullet)\r\n # PEW PEW\r\n shootSound.play()\r\n\r\n def update(self):\r\n # Move downwards a few pixels when they\r\n # reach either side of the window\r\n if self.rect.left < 20 or self.rect.right > 780:\r\n self.speedVector[0] = -self.speedVector[0]\r\n self.rect.y += self.speedVector[1]\r\n\r\n self.rect.x += self.speedVector[0]\r\n self.shoot()\r\n\r\n\r\nclass Bullet(pygame.sprite.Sprite):\r\n \"\"\"Bullet class used by the player when firing weapons.\"\"\"\r\n\r\n # Constructor\r\n def __init__(self, x, y, speed):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.Surface((5, 5))\r\n self.image.fill(RED)\r\n self.rect = self.image.get_rect()\r\n # Spawn at the top of the player ship\r\n self.rect.bottom = y\r\n self.rect.centerx = x\r\n self.speedy = speed\r\n\r\n def update(self):\r\n self.rect.y += self.speedy\r\n\r\n # Destroy the sprite if it goes out of bounds\r\n if self.rect.bottom < 0:\r\n self.kill()\r\n if self.rect.top > 500:\r\n self.kill()\r\n\r\n\r\nclass alienBullet(pygame.sprite.Sprite):\r\n \"\"\"Bullet class used by aliens, almost identical\"\"\"\r\n\r\n # Constructor\r\n def __init__(self, x, y, speed):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.Surface((5, 5))\r\n self.image.fill(GREEN)\r\n self.rect = self.image.get_rect()\r\n # Spawn at bottom of alien ship\r\n self.rect.bottom = y\r\n self.rect.centerx = x\r\n self.speedy = speed\r\n\r\n def update(self):\r\n self.rect.y += self.speedy\r\n\r\n # Die if goes off screen\r\n if self.rect.bottom < 0:\r\n self.kill()\r\n if self.rect.top > 500:\r\n self.kill()\r\n\r\n\r\nclass Explosion(pygame.sprite.Sprite):\r\n \"\"\"Explosion class instantiated everytime\r\n an actor dies from a bullet, animated\r\n from a dictionary using 9 images\"\"\"\r\n\r\n # Constructor\r\n def __init__(self, center, size):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.size = size\r\n # Access the dictionary\r\n self.image = explosionAnimation[self.size][0]\r\n self.rect = self.image.get_rect()\r\n self.rect.center = center\r\n # Speed of animation check\r\n self.frame = 0\r\n self.lastUpdate = pygame.time.get_ticks()\r\n self.frameRate = 50\r\n\r\n def update(self):\r\n \"\"\"Checks the frame if it can still animate,\r\n kills itself if final image is used\"\"\"\r\n now = pygame.time.get_ticks()\r\n if now - self.lastUpdate > self.frameRate:\r\n self.lastUpdate = now\r\n self.frame += 1\r\n if self.frame == len(explosionAnimation[self.size]):\r\n # Boom\r\n self.kill()\r\n else:\r\n center = self.rect.center\r\n self.image = explosionAnimation[self.size][self.frame]\r\n self.rect = self.image.get_rect()\r\n self.rect.center = center\r\n\r\n\r\n# Create sprite groups for my actors for easier manipulation\r\nallSprites = pygame.sprite.Group()\r\nenemies = pygame.sprite.Group()\r\nbullets = pygame.sprite.Group()\r\nplayerGroup = pygame.sprite.Group()\r\nalienBullets = pygame.sprite.Group()\r\n\r\n# Add player to sprite group\r\nplayer = Player()\r\nplayerGroup.add(player)\r\nallSprites.add(player)\r\n\r\n\r\ndef resetGame():\r\n \"\"\"Method for adding all actors to\r\n their relevant Sprite group and\r\n populating the screen with aliens\"\"\"\r\n\r\n global player\r\n\r\n allSprites.empty()\r\n enemies.empty()\r\n playerGroup.add(player)\r\n allSprites.add(player)\r\n repopulate()\r\n\r\n\r\ndef repopulate():\r\n \"\"\"adds 6*6 aliens on the screen at random\r\n locations on the X axis and toggles the\r\n correct difficulty based on the current level.\"\"\"\r\n\r\n global level\r\n for i in range(6):\r\n for j in range(6):\r\n x = random.randint(60, window.width-60)\r\n y = (j + 1) * window.height/2/7\r\n n = Alien(x, y, level)\r\n allSprites.add(n)\r\n enemies.add(n)\r\n\r\n\r\ndef showFailure():\r\n \"\"\"Text to show when you lose.\"\"\"\r\n FONT = pygame.font.Font('freesansbold.ttf', 35)\r\n screen.blit(background, (0, 0))\r\n failureText = FONT.render(\"FAILURE!! Final score: \" + str(score), True, RED)\r\n failureBox = failureText.get_rect()\r\n failureBox.center = (window.width/2, window.height/2)\r\n screen.blit(failureText, failureBox)\r\n\r\n\r\ndef showVictory():\r\n \"\"\"Text to show when you win.\"\"\"\r\n FONT = pygame.font.Font('freesansbold.ttf', 35)\r\n screen.blit(background, (0, 0))\r\n victoryText = FONT.render(\"You have VICTORY! \\\r\n Final score: \" + str(score), True, GREEN)\r\n victoryBox = victoryText.get_rect()\r\n victoryBox.center = (window.width/2, window.height/2)\r\n screen.blit(victoryText, victoryBox)\r\n\r\n\r\ndef showWelcome():\r\n \"\"\"Text to show as welcome screen,\r\n contains rules and instructions\"\"\"\r\n\r\n FONT = pygame.font.Font('freesansbold.ttf', 20)\r\n screen.blit(background, (0, 0))\r\n welcomeText = FONT.render(\"Welcome to Space Invaders!\", True, WHITE)\r\n controlText = FONT.render(\"Control your spaceship with left \\\r\n & right keys, shoot with Spacebar.\", True, WHITE)\r\n levelText = FONT.render(\"There are 3 waves of dangerous aliens to beat.\\\r\n Good luck!\", True, WHITE)\r\n continueText = FONT.render(\"Press SPACE to begin the game!\", True, WHITE)\r\n\r\n welcomeBox = welcomeText.get_rect()\r\n continueBox = continueText.get_rect()\r\n levelBox = levelText.get_rect()\r\n controlBox = controlText.get_rect()\r\n\r\n welcomeBox.center = (window.width/2, window.height/2)\r\n levelBox.center = (window.width/2, window.height/2 + 50)\r\n controlBox.center = (window.width/2, window.height/2 + 25)\r\n continueBox.center = (window.width/2, window.height/2 + 75)\r\n screen.blit(welcomeText, welcomeBox)\r\n screen.blit(controlText, controlBox)\r\n screen.blit(levelText, levelBox)\r\n screen.blit(continueText, continueBox)\r\n\r\n\r\ndef updateScore(points):\r\n \"\"\"Just updates the score\"\"\"\r\n global score\r\n\r\n if points == 0:\r\n score = 0\r\n else:\r\n score += points\r\n\r\n\r\ndef drawScore(points, level, playerLives):\r\n \"\"\"Displays the score, level number and\r\n amount of lives player has left,\r\n at the bottom of the window\"\"\"\r\n\r\n levelText = FONT.render(\"LEVEL: \" + str(level), True, WHITE)\r\n levelRect = levelText.get_rect()\r\n levelRect.center = LEVEL_POSITION\r\n scoreText = FONT.render(\"SCORE: \" + str(points), True, WHITE)\r\n scoreRect = scoreText.get_rect()\r\n scoreRect.center = SCORE_POSITION\r\n livesText = FONT.render(\"LIVES LEFT: \" + str(playerLives), True, WHITE)\r\n livesRect = livesText.get_rect()\r\n livesRect.center = LIVES_POSITION\r\n\r\n screen.blit(livesText, livesRect)\r\n screen.blit(levelText, levelRect)\r\n screen.blit(scoreText, scoreRect)\r\n\r\n\r\ndef victoryScreen():\r\n \"\"\"Displays the victory screen and\r\n awaits input before closing\"\"\"\r\n global running\r\n global victory\r\n\r\n showVictory()\r\n while victory:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n exit()\r\n if event.key == pygame.K_SPACE:\r\n victory = False\r\n pygame.display.flip()\r\n\r\n\r\ndef failureScreen():\r\n \"\"\"Displays the losing screen and\r\n awaits input before closing\"\"\"\r\n global running\r\n global failure\r\n\r\n showFailure()\r\n while failure:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n exit()\r\n if event.key == pygame.K_SPACE:\r\n failure = False\r\n pygame.display.flip()\r\n\r\n\r\ndef welcomeScreen():\r\n \"\"\"Displays the welcome screen\r\n and awaits input before closing\"\"\"\r\n global running\r\n\r\n welcome = True\r\n screen.blit(background, (0, 0))\r\n showWelcome()\r\n while welcome:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n exit()\r\n if event.key == pygame.K_SPACE:\r\n running = True\r\n welcome = False\r\n pygame.display.flip()\r\n\r\n\r\ndef main():\r\n \"\"\"Main game loop. Controls the game states\r\n and acts accordingly, controls and checks for collisions between\r\n actors and objects, and updates all sprites and animations \"\"\"\r\n global running\r\n global victory\r\n global failure\r\n global level\r\n\r\n resetGame()\r\n welcomeScreen()\r\n while running:\r\n # Keep game at 60 fps\r\n fps.tick(60)\r\n # Background Image\r\n screen.blit(background, (0, 0))\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n exit()\r\n\r\n # Enemies hit with a bullet explode beautifully\r\n hits = pygame.sprite.groupcollide(bullets, enemies, True, True)\r\n for hit in hits:\r\n deathSound.play()\r\n # 15 points per alien, times the current level\r\n updateScore(bounty * level)\r\n expl = Explosion(hit.rect.center, 'small')\r\n allSprites.add(expl)\r\n if not enemies:\r\n # If all enemies are killed on level 3\r\n # You have won the game. Congratulations\r\n if level == 3:\r\n running = False\r\n victory = True\r\n # If all enemies are killed, to go next level\r\n level += 1\r\n resetGame()\r\n\r\n # If enemies reach the player, you instantly lose\r\n collision = pygame.sprite.groupcollide(\r\n playerGroup,\r\n enemies,\r\n True, True,\r\n pygame.sprite.collide_circle)\r\n if collision:\r\n deathSound.play()\r\n expl = Explosion(hit.rect.center, 'large')\r\n allSprites.add(expl)\r\n player.kill()\r\n\r\n # If player is hit by enemy bullets your HP is decreased\r\n # starting from 3 to 0, then you explode and lose\r\n shotDown = pygame.sprite.groupcollide(playerGroup, alienBullets,\r\n False,\r\n True,\r\n pygame.sprite.collide_circle\r\n )\r\n for hit in shotDown:\r\n deathSound.play()\r\n expl = Explosion(hit.rect.center, 'large')\r\n player.lives -= 1\r\n player.hide()\r\n allSprites.add(expl)\r\n\r\n # To make sure the explosion animation\r\n # fully finishes when you die\r\n if player.lives == 0 and not expl.alive():\r\n running = False\r\n failure = True\r\n\r\n # Update all the graphics and sprites on the screen\r\n drawScore(score, level, player.lives)\r\n allSprites.update()\r\n allSprites.draw(screen)\r\n pygame.display.flip()\r\n\r\n while victory:\r\n victoryScreen()\r\n while failure:\r\n failureScreen()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"main1.py","file_name":"main1.py","file_ext":"py","file_size_in_byte":17482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"237844679","text":"from database import executesql\n\ncount = 0\nrow_data = []\nrow_count = 0\n\ndef getsessions(database):\n return database.sessions.find()\n\ndef savesession(session, sqldb):\n if \"_id\" in session:\n id = str(session[\"_id\"])\n else:\n return\n\n if (\"buid\" in session) and session[\"buid\"] != None:\n buid = str(session[\"buid\"][0])\n else:\n return\n\n if \"session_start\" in session:\n start_session = str(session[\"session_start\"])\n else:\n start_session = \"\"\n\n if \"session_end\" in session:\n end_session = str(session[\"session_end\"])\n else:\n end_session = \"\"\n\n global count\n count += 1\n\n global row_data\n row_data.append((id, start_session, end_session, buid))\n\n if count == 1000:\n sessionsql = '''INSERT INTO sessions (\n _id,\n start_session, \n end_session, \n buid) \n VALUES (\\\"%s\\\", \\\"%s\\\", \\\"%s\\\", \\\"%s\\\")'''\n global row_count\n row_count += 1\n print(\"session - row \" + str(row_count))\n executesql(sessionsql, row_data, sqldb)\n row_data = []\n global count\n count = 0","sub_path":"op = op code/datatransfer mongodb to sql/models_sessions.py","file_name":"models_sessions.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"547707162","text":"# Created by Artem Manchenkov\n# artyom@manchenkoff.me\n#\n# Copyright © 2019\n#\n# Сервер для обработки сообщений от клиентов\n#\n\nfrom twisted.internet import reactor\nfrom twisted.internet.protocol import ServerFactory, connectionDone\nfrom twisted.protocols.basic import LineOnlyReceiver\n\n\nclass ServerProtocol(LineOnlyReceiver):\n factory: 'Server'\n login: str = None\n\n def connectionMade(self):\n self.factory.clients.append(self)\n\n def connectionLost(self, reason=connectionDone):\n self.factory.clients.remove(self)\n\n def lineReceived(self, line: bytes):\n content = line.decode()\n\n if self.login is not None:\n content = f\"Message from {self.login}: {content}\"\n self.factory.messages.append(content.encode())\n for user in self.factory.clients:\n if user is not self:\n user.sendLine(content.encode())\n else:\n\n if content.startswith(\"login:\"):\n self.login = content.replace(\"login:\", \"\")\n if self.login in self.factory.nicknames:\n self.sendLine(\"Login used, try another\".encode())\n self.transport.loseConnection()\n else:\n self.factory.nicknames.append(self.login)\n self.sendLine(\"Welcome!\".encode())\n self.send_history()\n else:\n self.sendLine(\"Invalid login\".encode())\n\n def send_history(self):\n if len(self.factory.messages) == 0:\n return\n elif len(self.factory.messages) < 11:\n for item in self.factory.messages:\n self.sendLine(item)\n else:\n i = len(self.factory.messages) - 10\n while i < len(self.factory.messages):\n self.sendLine(self.factory.messages[i])\n i += 1\n\n\nclass Server(ServerFactory):\n protocol = ServerProtocol\n clients: list\n nicknames: list\n messages: list\n\n def startFactory(self):\n self.clients = []\n self.nicknames = []\n self.messages = []\n print(\"Server started\")\n\n def stopFactory(self):\n print(\"Server stopped\")\n\n\nreactor.listenTCP(1234, Server())\nreactor.run()\n","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"323135150","text":"from math import exp\n\n\ndef factorial(k):\n if k < 2:\n return 1\n else:\n return factorial(k-1) * k\n\n\ndef poisspdf(lbd, k):\n return (exp(-lbd) * lbd**k)/factorial(k)\n\n\nlbd1,lbd2 = [float(ii) for ii in input().split()]\n\n\n# remember: E(x) = lbd, Var(x) = lbd = E(X^2)-E(X)^2\n# thus E(X^2) = lbd+ lbd^2\n# remember linearity of expectation:\n# E(Ca) = E(160+40X^2) = 160+40*E(X^2)\n#\n# hence\n# E(Ca) = 160+40*(lbd+lbd^2)\n\nexpectedCa = round((160 + 40 * (lbd1 + lbd1**2)), 3)\nexpectedCb = round((128 + 40 * (lbd2 + lbd2**2)), 3)\n\nprint(expectedCa)\nprint(expectedCb)\n","sub_path":"day_5_poisson_dist_2.py","file_name":"day_5_poisson_dist_2.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"518329383","text":"#!/bin/python3\nimport PySimpleGUIQt as sg\nimport core\nimport config\n\nwalld = core.Walld(config.API, config.MAIN_FOLDER)\n\nmenu_def = ['BLANK', ['spin_dice', '---', '&Save', 'Save as...', 'Category',walld.get_categories(),\\\n'Resolution', ['16:9::res_', '16:10::res_', '21:9::res_'], 'E&xit', '!master']]\n\ntray = sg.SystemTray(menu=menu_def, filename=r'temp/kk.x-icon')\n\ndef make_flip(item):\n if \"cat_\" in item:\n print('cat in item')\n place = 5\n elif \"res_\" in item:\n place = 7\n if '*' in item:\n menu_def[1][place][menu_def[1][place].index(item)] = item[1:]\n walld.change_option(item[1:])\n else:\n menu_def[1][place][menu_def[1][place].index(item)] = '*' + item\n walld.change_option(item, add=True)\n\ndef restore_settings():\n for i in walld.get_settings()['categories']:\n menu_def[1][5][menu_def[1][5].index(i)] = \"*\" + i\n for i in walld.get_settings()['resolutions']:\n menu_def[1][7][menu_def[1][7].index(i)] = \"*\" + i\n tray.Update(menu=menu_def)\n\ndef tray_start():\n restore_settings()\n while True: # The event loop\n menu_item = tray.Read()\n if menu_item == 'Exit':\n break\n\n elif menu_item == 'Save as...':\n apath = sg.PopupGetFile('hi',save_as=True, file_types=(('PNG files', '*.png' ),('JPEG files', '*.jpg')))\n walld.save_image(apath)\n\n elif menu_item == '__ACTIVATED__':\n walld.spin_dice()\n\n elif 'cat_' in menu_item:\n make_flip(menu_item)\n tray.Update(menu=menu_def)\n #ТУТ НУЖЕН ЦИКЛ FOR ДЛЯ ПРОШЕРСТЕНИЯ ВСЕХ ЛИСТОВ\n elif 'res_' in menu_item:\n make_flip(menu_item)\n tray.Update(menu=menu_def)\n\n elif menu_item == 'spin_dice':\n walld.spin_dice()\n\n elif menu_item == 'Save':\n walld.save_image()\n\ntray_start()\n","sub_path":"tray.py","file_name":"tray.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"640329129","text":"# # import numpy as np\n# #\n# # import matplotlib\n# # #matplotlib.use('Agg')\n# # import matplotlib.pyplot as plt\n# #\n# # import matplotlib.pyplot as plt\n# # import numpy as np\n# #\n# #\n# # def f(t):\n# # s1 = np.sin(2*np.pi*t)\n# # e1 = np.exp(-t)\n# # return np.multiply(s1, e1)\n# #\n# # t1 = np.arange(0.0, 5.0, 0.1)\n# # t2 = np.arange(0.0, 5.0, 0.02)\n# #\n# # fig, ax = plt.subplots()\n# # plt.plot(t1, f(t1), 'bo', t2, f(t2), 'k')\n# # plt.text(3.0, 0.6, 'f(t) = exp(-t) sin(2 pi t)')\n# # ttext = plt.title('Fun with text!')\n# # ytext = plt.ylabel('Damped oscillation')\n# # xtext = plt.xlabel('time (s)')\n# #\n# # plt.setp(ttext, size='large', color='r', style='italic')\n# # plt.setp(xtext, size='medium', name=['Courier', 'DejaVu Sans Mono'],\n# # weight='bold', color='g')\n# # plt.setp(ytext, size='medium', name=['Helvetica', 'DejaVu Sans'],\n# # weight='light', color='b')\n# # plt.show()\n# #\n# # import matplotlib.pyplot as plt\n# # import matplotlib.cbook as cbook\n# #\n# # fname = cbook.get_sample_data('msft.csv', asfileobj=False)\n# # fname2 = cbook.get_sample_data('data_x_x2_x3.csv', asfileobj=False)\n# #\n# # # test 1; use ints\n# # plt.plotfile(fname, (0, 5, 6))\n# #\n# # # test 2; use names\n# # plt.plotfile(fname, ('date', 'volume', 'adj_close'))\n# #\n# # # test 3; use semilogy for volume\n# # plt.plotfile(fname, ('date', 'volume', 'adj_close'),\n# # plotfuncs={'volume': 'semilogy'})\n# #\n# # # test 4; use semilogy for volume\n# # plt.plotfile(fname, (0, 5, 6), plotfuncs={5: 'semilogy'})\n# #\n# # # test 5; single subplot\n# # plt.plotfile(fname, ('date', 'open', 'high', 'low', 'close'), subplots=False)\n# #\n# # # test 6; labeling, if no names in csv-file\n# # plt.plotfile(fname2, cols=(0, 1, 2), delimiter=' ',\n# # names=['$x$', '$f(x)=x^2$', '$f(x)=x^3$'])\n# #\n# # # test 7; more than one file per figure--illustrated here with a single file\n# # plt.plotfile(fname2, cols=(0, 1), delimiter=' ')\n# # plt.plotfile(fname2, cols=(0, 2), newfig=False,\n# # delimiter=' ') # use current figure\n# # plt.xlabel(r'$x$')\n# # plt.ylabel(r'$f(x) = x^2, x^3$')\n# #\n# # # test 8; use bar for volume\n# # plt.plotfile(fname, (0, 5, 6), plotfuncs={5: 'bar'})\n# #\n# # plt.show()\n#\n# import numpy as np\n# arr = [1,2,3,4,5,6]\n# #求均值\n# arr_mean = np.mean(arr)\n# #求方差\n# arr_var = np.var(arr)\n# #求标准差\n# arr_std = np.std(arr,ddof=1)\n# print(\"平均值为:%f\" % arr_mean)\n# print(\"方差为:%f\" % arr_var)\n# print(\"标准差为:%f\" % arr_std)\n#\n# num_client = 100\n# num_params = 450\n# dict_users = {i: np.array([]) for i in range(num_params)}\n#\n# #print(len(dict_users))\n#\n# for i in range(num_params):\n# dict_users[i] = np.random.randint(0,1,size=[3,6,5,5])\n# print(dict_users[i])\n#\n#\n# a = {i: np.array([]) for i in range(num_client)}\n# for i in range(num_client):\n# a[i] = np.random.randint(0,100,size=[3,6,5,5])\n#\n#\n# print(a[0][0][0][0][0])\n# x = []\n# res_var = []\n# res_std = []\n# for j in range(3):\n# for k in range(6):\n# for m in range(5):\n# for n in range(5):\n# if(len(x)!=0):\n# res_var.append(np.var(x))\n# res_std.append(np.std(x,ddof=1))\n# x = []\n# for i in range(num_client):\n# x.append(a[i][j][k][m][n])\n# print(a[0].flatten(),len(a[0].flatten()))\n# print(res_var,len(res_var))\n# print(res_std,len(res_std))\n#\n# mean_var = np.mean(res_var)\n# print(mean_var)\n# mean_std = np.mean(res_std)\n# print(mean_std)\n#\n# import tensorflow as tf\n# img1 = tf.constant(value=[[[[1],[2],[3],[4]],[[1],[2],[3],[4]],[[1],[2],[3],[4]],[[1],[2],[3],[4]]]],dtype=tf.float32)\n# img2 = tf.constant(value=[[[[1],[1],[1],[1]],[[1],[1],[1],[1]],[[1],[1],[1],[1]],[[1],[1],[1],[1]]]],dtype=tf.float32)\n# img = tf.concat(values=[img1,img2],axis=3)\n# sess=tf.Session()\n# #sess.run(tf.initialize_all_variables())\n# sess.run(tf.global_variables_initializer())\n# print(img)\n# print(\"out1=\",type(img))\n#\n#\n# #转化为numpy数组\n# img_numpy=img.eval(session=sess)\n# print(img_numpy)\n# print(img_numpy.flatten(),len(img_numpy.flatten()))\n# print(\"out2=\",type(img_numpy))\n#\n#\n# #转化为tensor\n# img_tensor= tf.convert_to_tensor(img_numpy)\n# print(img_tensor)\n# print(\"out2=\",type(img_tensor))\n#\n# #print(dict_users[0])\n#\n\"\"\"\nCopyright (c) 2017, Gavin Weiguang Ding\nAll rights reserved.\nRedistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n3. Neither the name of the copyright holder nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.rcdefaults()\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Rectangle\nfrom matplotlib.patches import Circle\n\nNumDots = 4\nNumConvMax = 8\nNumFcMax = 20\nWhite = 1.\nLight = 0.7\nMedium = 0.5\nDark = 0.3\nDarker = 0.15\nBlack = 0.\n\n\ndef add_layer(patches, colors, size=(24, 24), num=5,\n top_left=[0, 0],\n loc_diff=[3, -3],\n ):\n # add a rectangle\n top_left = np.array(top_left)\n loc_diff = np.array(loc_diff)\n loc_start = top_left - np.array([0, size[0]])\n for ind in range(num):\n patches.append(Rectangle(loc_start + ind * loc_diff, size[1], size[0]))\n if ind % 2:\n colors.append(Medium)\n else:\n colors.append(Light)\n\n\ndef add_layer_with_omission(patches, colors, size=(24, 24),\n num=5, num_max=8,\n num_dots=4,\n top_left=[0, 0],\n loc_diff=[3, -3],\n ):\n # add a rectangle\n top_left = np.array(top_left)\n loc_diff = np.array(loc_diff)\n loc_start = top_left - np.array([0, size[0]])\n this_num = min(num, num_max)\n start_omit = (this_num - num_dots) // 2\n end_omit = this_num - start_omit\n start_omit -= 1\n for ind in range(this_num):\n if (num > num_max) and (start_omit < ind < end_omit):\n omit = True\n else:\n omit = False\n\n if omit:\n patches.append(\n Circle(loc_start + ind * loc_diff + np.array(size) / 2, 0.5))\n else:\n patches.append(Rectangle(loc_start + ind * loc_diff,\n size[1], size[0]))\n\n if omit:\n colors.append(Black)\n elif ind % 2:\n colors.append(Medium)\n else:\n colors.append(Light)\n\n\ndef add_mapping(patches, colors, start_ratio, end_ratio, patch_size, ind_bgn,\n top_left_list, loc_diff_list, num_show_list, size_list):\n\n start_loc = top_left_list[ind_bgn] \\\n + (num_show_list[ind_bgn] - 1) * np.array(loc_diff_list[ind_bgn]) \\\n + np.array([start_ratio[0] * (size_list[ind_bgn][1] - patch_size[1]),\n - start_ratio[1] * (size_list[ind_bgn][0] - patch_size[0])]\n )\n\n\n\n\n end_loc = top_left_list[ind_bgn + 1] \\\n + (num_show_list[ind_bgn + 1] - 1) * np.array(\n loc_diff_list[ind_bgn + 1]) \\\n + np.array([end_ratio[0] * size_list[ind_bgn + 1][1],\n - end_ratio[1] * size_list[ind_bgn + 1][0]])\n\n\n patches.append(Rectangle(start_loc, patch_size[1], -patch_size[0]))\n colors.append(Dark)\n patches.append(Line2D([start_loc[0], end_loc[0]],\n [start_loc[1], end_loc[1]]))\n colors.append(Darker)\n patches.append(Line2D([start_loc[0] + patch_size[1], end_loc[0]],\n [start_loc[1], end_loc[1]]))\n colors.append(Darker)\n patches.append(Line2D([start_loc[0], end_loc[0]],\n [start_loc[1] - patch_size[0], end_loc[1]]))\n colors.append(Darker)\n patches.append(Line2D([start_loc[0] + patch_size[1], end_loc[0]],\n [start_loc[1] - patch_size[0], end_loc[1]]))\n colors.append(Darker)\n\n\n\ndef label(xy, text, xy_off=[0, 4]):\n plt.text(xy[0] + xy_off[0], xy[1] + xy_off[1], text,\n family='sans-serif', size=8)\n\n\nif __name__ == '__main__':\n\n fc_unit_size = 2\n layer_width = 40\n flag_omit = True\n\n patches = []\n colors = []\n\n fig, ax = plt.subplots()\n\n\n ############################\n # conv layers\n size_list = [(32, 32), (28, 28), (14, 14), (10, 10), (5, 5)]\n num_list = [3, 6, 6, 16, 16]\n x_diff_list = [0, layer_width, layer_width, layer_width, layer_width]\n text_list = ['Inputs'] + ['Feature\\nmaps'] * (len(size_list) - 1)\n loc_diff_list = [[3, -3]] * len(size_list)\n\n num_show_list = list(map(min, num_list, [NumConvMax] * len(num_list)))\n top_left_list = np.c_[np.cumsum(x_diff_list), np.zeros(len(x_diff_list))]\n\n for ind in range(len(size_list)-1,-1,-1):\n if flag_omit:\n add_layer_with_omission(patches, colors, size=size_list[ind],\n num=num_list[ind],\n num_max=NumConvMax,\n num_dots=NumDots,\n top_left=top_left_list[ind],\n loc_diff=loc_diff_list[ind])\n else:\n add_layer(patches, colors, size=size_list[ind],\n num=num_show_list[ind],\n top_left=top_left_list[ind], loc_diff=loc_diff_list[ind])\n label(top_left_list[ind], text_list[ind] + '\\n{}@{}x{}'.format(\n num_list[ind], size_list[ind][0], size_list[ind][1]))\n\n ############################\n # in between layers\n start_ratio_list = [[0.4, 0.5], [0.4, 0.8], [0.4, 0.5], [0.4, 0.8]]\n end_ratio_list = [[0.4, 0.5], [0.4, 0.8], [0.4, 0.5], [0.4, 0.8]]\n patch_size_list = [(5, 5), (2, 2), (5, 5), (2, 2)]\n ind_bgn_list = range(len(patch_size_list))\n text_list = ['Convolution', 'Max-pooling', 'Convolution', 'Max-pooling']\n\n for ind in range(len(patch_size_list)):\n add_mapping(\n patches, colors, start_ratio_list[ind], end_ratio_list[ind],\n patch_size_list[ind], ind,\n top_left_list, loc_diff_list, num_show_list, size_list)\n label(top_left_list[ind], text_list[ind] + '\\n{}x{} kernel'.format(\n patch_size_list[ind][0], patch_size_list[ind][1]), xy_off=[26, -65]\n )\n\n\n ############################\n # fully connected layers\n size_list = [(fc_unit_size, fc_unit_size)] * 3\n num_list = [120, 84, 10]\n num_show_list = list(map(min, num_list, [NumFcMax] * len(num_list)))\n x_diff_list = [sum(x_diff_list) + layer_width, layer_width, layer_width]\n top_left_list = np.c_[np.cumsum(x_diff_list), np.zeros(len(x_diff_list))]\n loc_diff_list = [[fc_unit_size, -fc_unit_size]] * len(top_left_list)\n text_list = ['Hidden\\nunits'] * (len(size_list) - 1) + ['Outputs']\n\n for ind in range(len(size_list)):\n if flag_omit:\n add_layer_with_omission(patches, colors, size=size_list[ind],\n num=num_list[ind],\n num_max=NumFcMax,\n num_dots=NumDots,\n top_left=top_left_list[ind],\n loc_diff=loc_diff_list[ind])\n else:\n add_layer(patches, colors, size=size_list[ind],\n num=num_show_list[ind],\n top_left=top_left_list[ind],\n loc_diff=loc_diff_list[ind])\n label(top_left_list[ind], text_list[ind] + '\\n{}'.format(\n num_list[ind]))\n\n text_list = ['Flatten\\n', 'Fully\\nconnected', 'Fully\\nconnected']\n\n for ind in range(len(size_list)):\n label(top_left_list[ind], text_list[ind], xy_off=[-10, -65])\n\n ############################\n for patch, color in zip(patches, colors):\n patch.set_color(color * np.ones(3))\n if isinstance(patch, Line2D):\n ax.add_line(patch)\n else:\n patch.set_edgecolor(Black * np.ones(3))\n ax.add_patch(patch)\n\n plt.tight_layout()\n plt.axis('equal')\n plt.axis('off')\n plt.show()\n fig.set_size_inches(8, 2.5)\n\n fig_dir = './'\n fig_ext = '.png'\n fig.savefig(os.path.join(fig_dir, 'convnet_fig' + fig_ext),\n bbox_inches='tight', pad_inches=0)","sub_path":"FedAvg/new_test.py","file_name":"new_test.py","file_ext":"py","file_size_in_byte":13547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"165309443","text":"\n\n\n################################################################################\n# Zadanie 4\n\n\n\ndef revers_sentence(inStr):\n outStr = ''\n for x in range(len(inStr)-1,-1,-1):\n if (x == len(inStr)-1) or (inStr[x+1] == ' '):\n outStr = outStr + inStr[x].upper()\n else:\n outStr = outStr + inStr[x].lower()\n return outStr\n\n\n\n# a = input('\\n>')\n# n = count_char(a,'A')\n# print(n)\n\n\n\n################################################################################\n# Zadanie 5\n\n\n\ndef count_char(inStr,char):\n inStr = inStr.lower()\n char = char.lower()\n outCount = 0 #\n for x in inStr: # This could be achieved by simple inStr.count(char)\n if x == char: # but in that case why build a new function in the first place\n outCount += 1 #\n return outCount\n\n\n\n# a = input('\\n>')\n# rev = revers_sentence(a)\n# print(rev)\n\n\n\n################################################################################\n# Zadanie 6\n\n\n\ndef list_filter(int_values,*dividers):\n outList = []\n for x in int_values:\n if type(x) != int:\n continue\n divChk = True\n for y in dividers:\n if (type(y) != int) or (y < 2):\n continue\n x = int(x)\n y = int(y)\n if (x % y) == 0:\n divChk = False\n if divChk:\n outList.append(x)\n return outList\n\n\n\n# result = list_filter([1,0,8,15,20,11], 20)\n# print(result) # [1,8,15,11]\n# result = list_filter([1,8,'aa',1.1,15,20,11], 20,4,0)\n# print(result) # [1,15,11]\n# result = list_filter([1,8,15,20,11], 2, 5, 31,1,1.1,'aa')\n# print(result) # [1,11]\n\n\n\n################################################################################\n# Zadanie 7\n\n\n\nimport random\n\ndef get_random_elements(inList,randVal = 1):\n if (type(randVal) != int) or (randVal < 1) or (randVal > len(inList)):\n raise ValueError('No way to do this!')\n outList = []\n for x in range(randVal):\n randItem = inList[random.randint(0,len(inList)-1)]\n outList.append(randItem)\n inList.remove(randItem)\n return outList\n\n\n\n# n = get_random_elements([1,2,6,3,7]) # [2]\n# print(n)\n# n = get_random_elements([1,2,6,3,7],3) # [6,2,7]\n# print(n)\n# n = get_random_elements([1,2,6,3,7],16) # Wyjątek!\n# print(n)\n\n\n\n################################################################################\n# Zadanie 8\n\n\n\nfrom flask import Flask, request\nfrom jd_library import jd_htmlops\nfrom my_phonebook import pb\n\n\n\napp = Flask(__name__)\n\n\n\n@app.route('/pbk', methods=['GET', 'POST'])\ndef pbk():\n if request.method == 'POST':\n nameFilter = request.form['nameFilter']\n numFilter = request.form['numFilter']\n else:\n nameFilter = ''\n numFilter = ''\n tabContent = ''\n for x in pb:\n if (nameFilter.lower() in x['nickname'].lower()) and (numFilter in x['number']):\n tabContent += f'''\n \n {x['nickname']}\n {x['number']}\n \n '''\n if tabContent == '':\n tabContent = '''\n \n No record!\n \n '''\n siteContent = f'''\n

Moja książka telefoniczna:

\n
\n \n \n \n \n \n \n \n \n \n {tabContent}\n
NicknameNumber
\n \n \n \n
\n \n
\n '''\n outHtml = jd_htmlops.buildHtmlPage('My Phone Book', siteContent)\n return outHtml\n\n\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"answers.py","file_name":"answers.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"336910210","text":"import logging.config\nfrom time import sleep\n\nimport lib\nimport telebot\n\nimport settings\n\nlogging.config.dictConfig(settings.logger_config)\nlogger = logging.getLogger('app_logger')\n\nbot = telebot.TeleBot(settings.TELEGRAM_BOT_TOKEN)\n\n\n@bot.message_handler(commands=['registration'])\ndef register_user(message):\n bot.send_message(message.chat.id, 'Укажите email который использовался при регистрации в Pyrus')\n bot.register_next_step_handler(message, process_registration)\n\n\ndef process_registration(message):\n user = lib.fetch_user_info(message.text.lower())\n if not user:\n bot.send_message(message.chat.id, f'Пользователь с email {message.text} не найден')\n return\n if dict(user).get('chat_id') == str(message.chat.id):\n bot.send_message(message.chat.id, f'Пользователь с email {message.text} уже зарегистрирован')\n return\n\n query_ok = lib.register_chat_id(message.text.lower(), message.chat.id)\n if query_ok:\n bot.send_message(message.chat.id, 'Ура! Вы зарегистрированы!\\nГотовьтесь получать штрафы')\n\n\nif __name__ == '__main__':\n while True:\n try:\n bot.polling()\n except Exception as e:\n logger.error(e, exc_info=True)\n sleep(15)\n","sub_path":"telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"436650730","text":"import re\n\nimport pymel.core as pc\nfrom Maya_Rigging.Core import JointStretchNetworkLib as js\nfrom Maya_Rigging.Core.BuildModuleJointSkeletonLib import orientJoints\nfrom Maya_Rigging.Core import BuildWorld as bw\nfrom Maya_Rigging.Core.Setup.BuildSpaceSwitchSetup import buildSpaceSwitchSetup\nfrom Maya_Rigging.Core.Setup.BuildTwistJointSetup import buildTwistJointSetup\nfrom Maya_Rigging.Utils import CharUtilsLib as chUL\nfrom Maya_Rigging.Utils import CurveUtilLib as cuUL\nfrom Maya_Rigging.Utils import List\n\ndef buildQuadLegSetup(name,\n side,\n hipJoint,\n hockJoint,\n ankleJoint,\n ballJoint,\n stretchType,\n ikFkType,\n stretch,\n midLock,\n volume,\n world,\n scale,\n controlColor):\n FK = [] \t\t\t#array to hold fk joint chain\n IK = []\t\t\t\t#array to hold fk joint chain\n list = []\t\t\t#array to hold actual joint chain\n temp = ''\t\t\t#hold name unused joint to delete\n middleIndex = 0\t\t#hold middle index of ik joint chain\n modPos = []\t\t\t#hold mid postion for pole vectoe control\n hockIkHandle = []\n ankleIkHandle = []\t#hold all created ik handle\n ballIkHandle = []\n toeIkHandle = []\n fkContrlList = []\t#array to hold fk control joint chain\n fkCon = []\t\t\t#array to hold newly creted fk control\n ikSpaceGrp = ''\n parentGrp = []\t\t#holds name of parent grp of given object\n allChild = []\n ballJointChild = []\n childTemp = []\n cleanGrp = []\n\n partGrp = pc.group(em=True, n=(name + side + 'legParts_grp'))\n chUL.lockAndHide(partGrp, 'locknHide', 'trans rot scale')\n\n if world:\n cleanGrp = bw.build_world(name, scale)\n pc.parent(partGrp, cleanGrp[0])\n\n # get child joint of ball joint\n endJoint = chUL.getChildJoint(ballJoint)\n\n # checking fingers if its there then upparent for creating ik fk joints....\n allChild = pc.listRelatives(ankleJoint, c=True)\n for i in range(len(allChild)):\n if allChild[i]!=ballJoint:\n childTemp = pc.parent(allChild[i], w=True)\n ballJointChild.append(childTemp[0])\n\n # duplicate joint chain twice in order to create ik fk switch\n FK = chUL.dupId(hipJoint,'prefix','fk')\n IK = chUL.dupId(hipJoint,'prefix','ik') \n\n # parent back fingers joint if its there\n if len(allChild)>1:\n for i in range(0,len(allChild)-1):\n pc.parent(ballJointChild[i], ankleJoint)\n\n for x in range(len(FK)):\n if pc.attributeQuery('twistJoints', n=FK[x], ex=True): # delete twistAttr if exists.....\n try:\n pc.deleteAttr(FK[x], attribute='twistJoints')\n pc.deleteAttr(IK[x], attribute='twistJoints')\n except:\n pass\n\n # hold all joint chain in array\n IK = chUL.findJointArray(('ik_' + hipJoint), ('ik_' + ballJoint))\n FK = chUL.findJointArray(('fk_' + hipJoint), ('fk_' + ballJoint))\n list = chUL.findJointArray(hipJoint, ballJoint)\n\n pc.select(cl=True)\n\n # create controls\n ankleCtrl = cuUL.curveControl('cube1', 'joint', controlColor)\n ankleCtrl[0] = pc.rename(ankleCtrl[0], name + side + 'foot_ctrl')\n cuUL.resizeCurves(None, 0, 0, 1, 2)\n flexCtrl = cuUL.curveControl('sphere', 'joint', controlColor)\n flexCtrl[0] = pc.rename(flexCtrl[0], name + side + 'flex_ctrl')\n cuUL.resizeCurves(None, 1, 1, 1, 0.7)\n kneeCtrl = cuUL.curveControl('cone', 'curve', controlColor)\n kneeCtrl[0] = pc.rename(kneeCtrl[0], name + side + 'knee_ctrl')\n switchCtrl = cuUL.curveControl('pin1', 'curve', controlColor)\n switchCtrl[0] = pc.rename(switchCtrl[0], name + side + 'legSwitches_ctrl')\n chUL.fixFacingAxis('Z', 0)\n\n # snap controls to to respective joints\n anklePos = pc.xform(ankleJoint, q=True, ws=True, rp=True)\n pc.setAttr((ankleCtrl[0] + '.t'), (anklePos[0], anklePos[1], anklePos[2]))\n tempCon = pc.aimConstraint(ballJoint, ankleCtrl[0], offset=[0,0,0], weight=1, aimVector=[0,0,1], upVector=[0,1,0], worldUpType='vector', worldUpVector=[0,1,0], skip=['x','z'])\n pc.delete(tempCon)\n tempLoc = pc.spaceLocator()\n pc.setAttr((tempLoc + '.t'), (anklePos[0],anklePos[1],anklePos[2]))\n tempClt = pc.cluster(ankleCtrl[0])\n pc.parent(tempClt[1], tempLoc)\n pc.pointConstraint(ballJoint, tempLoc, w=1)\n pc.scale(tempLoc,(scale,scale,scale))\n pc.select(ankleCtrl[0], r=True)\n pc.runtime.DeleteHistory()\n pc.delete(tempLoc)\n\n # snap flexCtrl[0]\n chUL.Snap(ankleJoint, flexCtrl[0])\n pc.makeIdentity(flexCtrl[0], apply=True, t=0, r=1, s=0)\n pc.parent(flexCtrl[0], ankleCtrl[0])\n flexCtrlZero = chUL.quickZeroOut(flexCtrl[0])\n\n chUL.Snap(ballJoint, switchCtrl[0])\n pc.setAttr((switchCtrl[0]+'.r'), (0,0,0))\n pc.select(cl=True)\n \n pc.parent(ankleCtrl[0], partGrp)\n pc.parent(kneeCtrl[0], partGrp)\n pc.parent(switchCtrl[0], partGrp)\n\n pc.parentConstraint(ballJoint, switchCtrl[0], mo=True, weight=1)\n chUL.lockAndHide(switchCtrl[0], 'locknHide', 'trans rot scale vis')\n\n # get middle index of ik joint for pole control placement\n tempIK = chUL.findJointArray(('ik_' + hipJoint), ('ik_' + hockJoint))\n middleIndex = ((len(tempIK))/2)\n modPos = chUL.zoofindPolePosition(hipJoint, IK[middleIndex], hockJoint, 0.7)\n pc.select(kneeCtrl[0], r=True)\n pc.setAttr((kneeCtrl[0]+'.t'),(modPos[0],modPos[1],modPos[2]))\n chUL.fixFacingAxis('Z', 1)\n pc.select(cl=True)\n # create ik fk connections from given arrey\n chUL.fkIkConnect(list,IK, FK, ikFkType, switchCtrl[0])\n pc.select(cl=True)\n\n # fk controllers and rename them respectively\n fkCon = chUL.fkControl(FK[0], 'circleCross', 1, controlColor)\n fkContrlList = chUL.listHierarchy(fkCon[0])\n for f in fkContrlList:\n f = pc.rename(f, (name + side + f))\n\n # check stretch condition and create connections\n hockJointPos = pc.xform(hockJoint, q=True, worldSpace=True, rotatePivot=True)\n if stretch == 1:\n js.stretch_network(name, side, ('ik_' + hipJoint), ('ik_' + hockJoint), ankleCtrl[0], stretchType, midLock, kneeCtrl[0])\n chUL.stretchTypeConnect(list, IK, FK, stretchType, switchCtrl[0]) \n\n # some stretch corrrection purely name specific might not run on different case remember that..\n if stretch == 1:\n # select -r (name + side + 'ik_' + hipJoint + 'End_loc')\n # select -tgl (name + side + 'ik_' + ankleJoint + 'End_loc')\n tempSel = [(name + side + 'ik_' + hipJoint + 'End_loc'),(name + side + 'ik_' + hockJoint + 'End_loc')]\n chUL.lockAndHide(tempSel[0], 'unLock','trans')\n chUL.lockAndHide(tempSel[1],'unLock','trans')\n\n transConnection = pc.listConnections((tempSel[0] + '.tx'), d=False, s=True, plugs=True, skipConversionNodes=True)\n transNodePath = transConnection[0] \n match = re.search('[^.]*',str(transNodePath))\n transNode = []\n if match:\n transNode.append(match.group())\n elif transNodePath:\n transNode.append(transNodePath)\n pc.delete(transNode)\n \n transConnection = pc.listConnections((tempSel[1] + '.tx'), d=False, s=True, plugs=True, skipConversionNodes=True)\n transNodePath = transConnection[0] \n match = re.search('[^.]*',str(transNodePath))\n transNode = []\n if match:\n transNode.append(match.group()) \n elif transNodePath:\n transNode.append(transNodePath) \n pc.delete(transNode)\n \n ikSpaceGrp = pc.group(em=True, n=(name + side + 'stretchLocCon_grp'))\n pc.setAttr((ikSpaceGrp+'.t'),(hockJointPos[0],hockJointPos[1],hockJointPos[2]))\n pc.select(cl=True)\n \n pointConA = pc.pointConstraint(ikSpaceGrp, tempSel[0], offset=[0,0,0], weight=1)\n pointConA = pc.pointConstraint(ikSpaceGrp, tempSel[1], offset=[0,0,0], weight=1)\n\n\n # creating twist joint setup if attribute exists on given joint\n size = (len(list))-1\n twistJoints = []\n sknJoints = []\n ankleRots = pc.listRelatives(ankleJoint, parent=True)\n for l in list:\n if l == ankleRots[0]:\n twistJoints = buildTwistJointSetup(name,\n side,\n l,\n stretchType,\n ikFkType,\n 'child',\n ankleJoint,\n switchCtrl[0],\n stretch,\n volume,\n scale,\n controlColor)\n else:\n twistJoints = buildTwistJointSetup(name,\n side,\n l,\n stretchType,\n ikFkType,\n 'parent',\n ankleJoint,\n switchCtrl[0],\n stretch,\n volume,\n scale,\n controlColor)\n sknJoints.extend(twistJoints)\n\n\n # createik handle for ik joint chain\n hockIkHandle = pc.ikHandle(name=(name + side + 'hock_ikhandle'), startJoint=('ik_' + hipJoint), endEffector=('ik_' + hockJoint), solver='ikRPsolver')\n ankleIkHandle = pc.ikHandle(name=(name + side + 'ankle_ikhandle'), startJoint=('ik_' + hockJoint), endEffector=('ik_' + ankleJoint), solver='ikRPsolver')\n ballIkHandle = pc.ikHandle(name=(name + side + 'ball_ikhandle'), startJoint=('ik_' + ankleJoint), endEffector=('ik_' + ballJoint), solver='ikRPsolver')\n toeIkHandle = pc.ikHandle(name=(name + side + 'toe_ikhandle'), startJoint=('ik_' + ballJoint), endEffector=('ik_' + endJoint), solver='ikRPsolver')\n\n # add attribute to ankle control and create reverse foot setup for leg\n pc.addAttr(ankleCtrl[0], ln='FOOT', at='bool', keyable=True)\n pc.setAttr((ankleCtrl[0] + '.FOOT'), keyable=False, channelBox=True)\n pc.addAttr(ankleCtrl[0], ln='roll', at='double', min=-10, max=10, keyable=True)\n pc.addAttr(ankleCtrl[0], ln='rollAngle', at='double', dv=50, keyable=True)\n pc.setAttr((ankleCtrl[0] + '.rollAngle'), keyable=False, channelBox=True)\n pc.addAttr(ankleCtrl[0], ln='tap', at='double', keyable=True)\n pc.addAttr(ankleCtrl[0], ln='clawRoll', at='double', keyable=True)\n pc.addAttr(ankleCtrl[0], ln='bank', at='double', keyable=True)\n pc.addAttr(ankleCtrl[0], ln='ballTwist', at='double', keyable=True)\n pc.addAttr(ankleCtrl[0], ln='toePivot', at='double', keyable=True)\n pc.addAttr(ankleCtrl[0], ln='toePivotSide', at='double', keyable=True)\n pc.addAttr(ankleCtrl[0], ln='heelPivot', at='double', keyable=True)\n pc.addAttr(ankleCtrl[0], ln='heelPivotSide', at='double', keyable=True)\n\n rollBallGrp = pc.group(em=True, n=(name + side + 'clawRoll_grp'))\n tapGrp = pc.group(em=True, n=(name + side + 'tap_grp'))\n toePivotGrp = pc.group(em=True, n=(name + side + 'toePivot_grp'))\n ballPivotGrp = pc.group(em=True, n=(name + side + 'ballPivot_grp'))\n heelPivotGrp = pc.group(em=True, n=(name + side + 'heelPivot_grp'))\n inPivotBankGrp = pc.group(em=True, n=(name + side + 'inPivotBank_grp'))\n outPivotBankGrp = pc.group(em=True, n=(name + side + 'outPivotBank_grp'))\n\n #snapping to right postion\n heelPos = []\n bankInPos = []\n bankOutPos = []\n\n anklePos = pc.xform(ankleJoint, q=True, ws=True, rp=True)\n ballPos = pc.xform(ballJoint, q=True, ws=True, rp=True)\n endPos = pc.xform(endJoint, q=True, ws=True, rp=True)\n\n tempPivots = ''\n\n if not pc.attributeQuery('heelPos', n=ankleJoint, ex=True):\n tempPivots = chUL.makeFootHeelPivots(ankleJoint,ballJoint)\n heel = pc.getAttr(ankleJoint+'.heelPos')\n bankIn = pc.getAttr(ballJoint + '.bankInPos')\n bankOut = pc.getAttr(ballJoint + '.bankOutPos')\n heelPos = pc.xform(heel, q=True, ws=True, rp=True)\n bankInPos = pc.xform(bankIn, q=True, ws=True, rp=True)\n bankOutPos = pc.xform(bankOut, q=True, ws=True, rp=True)\n else:\n heel = pc.getAttr(ankleJoint+'.heelPos')\n bankIn = pc.getAttr(ballJoint + '.bankInPos')\n bankOut = pc.getAttr(ballJoint + '.bankOutPos')\n heelPos = pc.xform(heel, q=True, ws=True, rp=True)\n bankInPos = pc.xform(bankIn, q=True, ws=True, rp=True)\n bankOutPos = pc.xform(bankOut, q=True, ws=True, rp=True)\n try:\n pc.delete(tempPivots)\n except:\n pass\n\n pc.setAttr((rollBallGrp + '.t'), (ballPos[0],ballPos[1],ballPos[2]))\n pc.setAttr((tapGrp + '.t'), (ballPos[0],ballPos[1],ballPos[2]))\n pc.setAttr((toePivotGrp + '.t'), (endPos[0],endPos[1],endPos[2]))\n pc.setAttr((ballPivotGrp + '.t'), (ballPos[0],ballPos[1],ballPos[2]))\n pc.setAttr((heelPivotGrp + '.t'), (heelPos[0],heelPos[1],heelPos[2]))\n pc.setAttr((inPivotBankGrp + '.t'), (bankInPos[0],bankInPos[1],bankInPos[2]))\n pc.setAttr((outPivotBankGrp + '.t'), (bankOutPos[0],bankOutPos[1],bankOutPos[2]))\n\n #parent to the respective grp and zeroout the grp if it has any rotation value\n pc.parent(heelPivotGrp, ankleCtrl[0])\n pc.setAttr((heelPivotGrp + '.r'), (0,0,0))\n pc.parent(toePivotGrp,heelPivotGrp)\n pc.setAttr((toePivotGrp + '.r'), (0,0,0))\n pc.parent(ballPivotGrp, toePivotGrp)\n pc.setAttr((ballPivotGrp + '.r'), (0,0,0))\n pc.parent(inPivotBankGrp, ballPivotGrp)\n pc.setAttr((inPivotBankGrp + '.r'), (0,0,0))\n pc.parent(outPivotBankGrp, inPivotBankGrp)\n pc.setAttr((outPivotBankGrp + '.r'), (0,0,0))\n pc.parent(tapGrp,outPivotBankGrp)\n pc.setAttr((tapGrp + '.r'), (0,0,0))\n pc.parent(rollBallGrp, outPivotBankGrp)\n pc.setAttr((rollBallGrp + '.r'), (0,0,0))\n pc.parent(flexCtrlZero[0],rollBallGrp)\n pc.setAttr((flexCtrlZero[0] + '.r'),(0,0,0))\n\n #connnecting foot attribute\n footRollRange = pc.createNode('setRange', n=(name + side + 'heelRoll_range'))\n pc.setAttr((footRollRange + '.oldMinX'), -10)\n\n rollBallRMV = pc.createNode('remapValue', n=(name + side + 'clawRoll_rmv'))\n pc.setAttr((rollBallRMV + '.value[1].value_FloatValue'),0)\n pc.setAttr((rollBallRMV + '.value[2].value_Position'),0.5)\n pc.setAttr((rollBallRMV + '.value[2].value_FloatValue'),1)\n pc.setAttr((rollBallRMV + '.value[2].value_Interp'),1)\n pc.setAttr((rollBallRMV + '.inputMax'),10)\n\n rollToeRMV = pc.createNode('remapValue', n=(name + side + 'toeRoll_rmv'))\n pc.setAttr((rollToeRMV + '.value[2].value_Position'),0.5)\n pc.setAttr((rollToeRMV + '.value[2].value_Interp'),1)\n pc.setAttr((rollToeRMV + '.inputMax'),10)\n\n hellRollMDL = pc.createNode('multDoubleLinear', n=(name + side + 'hellRoll_mdl'))\n pc.setAttr((hellRollMDL + '.input2'),-1)\n\n rollBallAdl = pc.createNode('addDoubleLinear', n=(name + side + 'clawRoll_adl'))\n toeRollAdl = pc.createNode('addDoubleLinear', n=(name + side + 'toeRoll_adl'))\n heelRollAdl = pc.createNode('addDoubleLinear', n=(name + side + 'heelRoll_adl'))\n\n pc.connectAttr((ankleCtrl[0] + '.rollAngle'), (hellRollMDL + '.input1'))\n pc.connectAttr((hellRollMDL + '.output'), (footRollRange + '.minX'))\n pc.connectAttr((ankleCtrl[0] +'.roll'), (footRollRange + '.valueX'))\n\n pc.connectAttr((ankleCtrl[0] + '.roll'), (rollBallRMV + '.inputValue'))\n pc.connectAttr((ankleCtrl[0] + '.rollAngle'), (rollBallRMV + '.outputMax'))\n\n pc.connectAttr((ankleCtrl[0] +'.roll'), (rollToeRMV + '.inputValue'))\n pc.connectAttr((ankleCtrl[0] +'.rollAngle'), (rollToeRMV + '.outputMax'))\n\n pc.connectAttr((footRollRange + '.outValueX'), (heelRollAdl + '.input1'))\n pc.connectAttr((rollBallRMV + '.outValue'), (rollBallAdl + '.input1'))\n pc.connectAttr((rollToeRMV + '.outValue'), (toeRollAdl + '.input1'))\n\n pc.connectAttr((ankleCtrl[0] + '.clawRoll'), (rollBallAdl + '.input2'))\n pc.connectAttr((ankleCtrl[0] + '.toePivot'), (toeRollAdl + '.input2'))\n pc.connectAttr((ankleCtrl[0] + '.heelPivot'), (heelRollAdl + '.input2'))\n\n\n pc.connectAttr((ankleCtrl[0] + '.tap'), (tapGrp + '.rx'))\n pc.connectAttr((rollBallAdl + '.output'), (rollBallGrp + '.rx'))\n pc.connectAttr((ankleCtrl[0] + '.bank'), (inPivotBankGrp + '.rz'))\n pc.transformLimits(inPivotBankGrp, rz=[0,45], erz=[1,0])\n pc.connectAttr((ankleCtrl[0] + '.bank'), (outPivotBankGrp + '.rz'))\n pc.transformLimits(outPivotBankGrp, rz=[-45,0], erz=[0,1])\n pc.connectAttr((ankleCtrl[0] + '.ballTwist'), (ballPivotGrp + '.ry'))\n pc.connectAttr((toeRollAdl + '.output'), (toePivotGrp + '.rx'))\n pc.connectAttr((ankleCtrl[0] + '.toePivotSide'), (toePivotGrp + '.ry'))\n pc.connectAttr((heelRollAdl + '.output'), (heelPivotGrp + '.rx'))\n pc.connectAttr((ankleCtrl[0] + '.heelPivotSide'), (heelPivotGrp + '.ry'))\n\n #now create flex leg setup..\n legFlexJointsPos = chUL.getQuadFlexLegJointPos(hipJoint, ankleJoint)\n flexJnt = []\n\n #creating spring joints\n for i in range(4):\n tempPos = legFlexJointsPos[i]\t\n flexJnt.append(pc.joint(p=[(tempPos[0]),(tempPos[1]),(tempPos[2])], n=(name+side+'legFlex'+str(i+1)+'_jnt')))\n\n\n # orienting joints and parent..\n orientJoints(flexJnt, 'xyz', 'zdown')\n hipParentJoint = chUL.getParent(hipJoint)\n try:\n pc.parent(flexJnt[0], hipParentJoint)\n except:\n pass\n pc.setAttr((flexJnt[0] + '.visibility'),0)\n chUL.lockAndHide(flexJnt[0],'lock','vis')\n\n # creating leg offset cntrl setup\n pc.select(cl=True)\n legOffsetCtrl = cuUL.curveControl('cube1', 'joint', controlColor)\n legOffsetCtrl[0] = pc.rename(legOffsetCtrl[0], name + side + 'legOffset_ctrl')\n\n chUL.Snap(hipJoint,legOffsetCtrl[0])\n pc.setAttr((legOffsetCtrl[0]+'.r'),(0,0,0))\n\n pc.parent(legOffsetCtrl[0],partGrp)\n chUL.lockAndHide(legOffsetCtrl[0],'locknHide','rot scale vis')\n\n #looking for first fk control unlock trans create point cons with offset control and lock it again \n fkConParent = chUL.getParent(fkContrlList[0]) \n chUL.lockAndHide(fkConParent[0],'unLock','trans')\n pc.parentConstraint(legOffsetCtrl[0], flexJnt[0], mo=True, skipRotate=['x','y','z'], weight=1)\n pc.parentConstraint(legOffsetCtrl[0], IK[0], mo=True, skipRotate=['x','y','z'], weight=1)\n pc.parentConstraint(legOffsetCtrl[0], fkConParent, mo=True, skipRotate=['x','y','z'], weight=1)\n pc.parentConstraint(legOffsetCtrl[0], hipJoint, mo=True, skipRotate=['x','y','z'],weight=1)\n chUL.lockAndHide(fkConParent[0],'lock','trans')\n\n #create flex ik handle\n flexIkHandle = pc.ikHandle(name=(name + side + 'flex_ikhandle'), startJoint=flexJnt[0], endEffector=flexJnt[3], solver='ikRPsolver')\n pc.parent(flexIkHandle[0], rollBallGrp)\n pc.setAttr((flexIkHandle[0] + '.visibility'),0)\n chUL.lockAndHide(flexIkHandle[0],'lock','trans rot scale vis')\n pc.poleVectorConstraint (kneeCtrl[0],flexIkHandle[0])\n pc.parentConstraint(flexJnt[3], flexCtrlZero[0], mo=True, skipTranslate=['x','y','z'], weight=1)\n\n #parent ik handle into foot control and lock the transforms\n pc.parent(hockIkHandle[0], flexCtrl[0])\n if stretch == 1: \n pc.parent(ikSpaceGrp, flexCtrl[0])\n pc.setAttr((hockIkHandle[0] + '.visibility'), 0)\n chUL.lockAndHide(hockIkHandle[0],'lock','trans rot scale vis')\n\n pc.parent(ankleIkHandle[0],rollBallGrp)\n pc.setAttr((ankleIkHandle[0] + '.visibility'), 0)\n chUL.lockAndHide(ankleIkHandle[0],'lock','trans rot scale vis')\n\n pc.parent(toeIkHandle[0],tapGrp)\n pc.setAttr((toeIkHandle[0] + '.visibility'),0)\n chUL.lockAndHide(toeIkHandle[0],'lock','trans rot scale vis')\n\n pc.parent(ballIkHandle[0], outPivotBankGrp)\n pc.setAttr((ballIkHandle[0] + '.visibility'),0)\n chUL.lockAndHide(ballIkHandle[0],'lock','trans rot scale vis')\n\n #create pole vector constraint for ikhandle\n pc.poleVectorConstraint(kneeCtrl[0], hockIkHandle[0])\n\n #create ikfk visibility connections\n pc.addAttr(switchCtrl[0], ln='autoVis', at='bool', keyable=True)\n pc.setAttr((switchCtrl[0] + '.autoVis'),1)\n pc.addAttr(switchCtrl[0], ln='fkVis', at='bool', keyable=True)\n pc.addAttr(switchCtrl[0], ln='ikVis', at='bool', keyable=True)\n pc.setAttr((switchCtrl[0] + '.autoVis'), keyable=False, channelBox=True)\n\n fkIkCnd = pc.createNode('condition', n=(name + side + 'fkIkVis_cnd'))\n fkIkRev = pc.createNode('reverse', n=(name + side + 'fkIkVis_rev'))\n pc.connectAttr((switchCtrl[0] + '.FK_IK'), (fkIkRev + '.inputX'))\n pc.connectAttr((fkIkRev + '.outputX'), (fkIkCnd + '.colorIfFalseR'))\n pc.connectAttr((switchCtrl[0] + '.FK_IK'), (fkIkCnd + '.colorIfFalseG'))\n pc.connectAttr((switchCtrl[0] + '.autoVis'), (fkIkCnd + '.firstTerm'))\n pc.connectAttr((switchCtrl[0] + '.fkVis'), (fkIkCnd + '.colorIfTrueG'))\n pc.connectAttr((switchCtrl[0] + '.ikVis'), (fkIkCnd + '.colorIfTrueR'))\n\n pc.connectAttr((fkIkCnd + '.outColorR'), (fkContrlList[0] + '.visibility'))\n pc.connectAttr((fkIkCnd + '.outColorR'), (FK[0] + '.visibility'))\n pc.connectAttr((fkIkCnd + '.outColorG'), (ankleCtrl[0] + '.visibility'))\n pc.connectAttr((fkIkCnd + '.outColorG'), (kneeCtrl[0] + '.visibility'))\n pc.connectAttr((fkIkCnd + '.outColorG'), (IK[0] + '.visibility'))\n\n #zero out all controllers and cleanup animation controller\n ankleConGrp = chUL.quickZeroOut(ankleCtrl[0])\n kneeConGrp = chUL.quickZeroOut(kneeCtrl[0])\n legOffsetConGrp = chUL.quickZeroOut(legOffsetCtrl[0])\n pc.select(cl=True)\n\n #create guide curve\n guideCurve = chUL.curveGuide(IK[middleIndex], kneeCtrl[0])\n pc.connectAttr((fkIkCnd + '.outColorG'), (guideCurve + '.visibility'))\n\n chUL.lockAndHide(ankleCtrl[0], 'locknHide', 'scale vis')\n chUL.lockAndHide(kneeCtrl[0], 'locknHide', 'rot scale vis')\n\n if stretchType == 'translate':\n for f in fkContrlList:\n chUL.lockAndHide(f, 'locknHide', 'scale vis')\n\n if stretchType == 'scale':\n for f in fkContrlList:\n chUL.lockAndHide(f, 'locknHide', 'trans vis')\n\n parentJoint = pc.listRelatives(hipJoint, parent=True)\n parentGrp = pc.listRelatives(fkContrlList[0], parent=True)\n chUL.lockAndHide(parentGrp[0], 'unLock', 'trans rot')\n pc.parent(parentGrp[0], partGrp)\n\n #add leg rig info for future update...\n charRigInfo = chUL.getcharRigInfoNode(name)\n pc.addAttr(charRigInfo, ln=(side+'legRig'), dt='string')\n pc.setAttr((charRigInfo + '.' + (side+'legRig')), (ankleCtrl[0]+' '+kneeCtrl[0]+' '+switchCtrl[0]+' '+fkContrlList[0]), type='string')\n\n #create space switch\n if world:\n if pc.attributeQuery('spineRig', n=charRigInfo, ex=True):\n spineRigPart = pc.getAttr(charRigInfo+'.spineRig')\n spineRigArray = List.seperate(spineRigPart)\n buildSpaceSwitchSetup(ankleCtrl[0], ankleConGrp[0],[spineRigArray[2],spineRigArray[0],(name+'worldB_ctrl')], ['hip','root','world'], 1)\n buildSpaceSwitchSetup(kneeCtrl[0], kneeConGrp[0],[ankleCtrl[0],spineRigArray[0],(name+'worldB_ctrl')], ['foot','root','world'], 1)\n pc.setAttr((ankleCtrl[0]+'.parent'), 2)\n\n #parent skeleton\n pc.select(cl=True)\n chUL.parentSkeletonTo(hipJoint, cleanGrp[1])\n\n # createskinJointset\n set = chUL.createSkinJointSet(name)\n sknJoints.extend(list);\n sknJoints.extend(sknJoints)\n chUL.addSkinJointToSet(set, sknJoints)\n\n #scale controls to global value\n pc.select(kneeCtrl,flexCtrl,legOffsetCtrl,switchCtrl,fkContrlList, r=True)\n cuUL.resizeCurves(None, 1, 1, 1, scale)\n pc.select(cl=True)\n \n if parentJoint[0] != '':\n pc.parentConstraint(parentJoint[0], parentGrp[0], mo=True, weight=1)\n pc.parentConstraint(parentJoint[0], legOffsetConGrp[0], mo=True, weight=1)\n\n #anim control cleanup\n chUL.lockAndHide(parentGrp[0],'lock','trans rot')\n chUL.lockAndHide(ankleConGrp[0],'lock','trans rot scale vis')\n chUL.lockAndHide(kneeConGrp[0],'lock','trans rot scale vis')\n chUL.lockAndHide(legOffsetCtrl[0],'locknHide','rot scale vis')\n chUL.lockAndHide(legOffsetConGrp[0],'lock','trans rot scale vis')\n chUL.lockAndHide(flexCtrl[0], 'locknHide', 'trans scale vis')\n chUL.lockAndHide(flexCtrlZero[0],'lock','trans rot scale vis')\n pc.delete(fkContrlList[len(fkContrlList)-1]) #deleting end fk control see line no.138 for detail","sub_path":"Core/Setup/BuildQuadLegSetup.py","file_name":"BuildQuadLegSetup.py","file_ext":"py","file_size_in_byte":24815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"552312920","text":"# author: Vladimir Gligorov (vladimir.gligorov@cern.ch)\n# date: 02/2019\n#\n\nimport os, sys\nimport argparse\nimport ROOT\nfrom ROOT import gStyle\nfrom ROOT import gROOT\nfrom ROOT import TStyle\nfrom ROOT import gPad\n\nfrom array import array\n\nsys.path.append('../')\nfrom common.LHCbStyle import *\n\n\ndef getTrackers():\n return [\"Upstream\"] #]\n\n\nnbins = 3\n\nf = ROOT.TFile.Open(\"../../../plotsfornote_root/momentum_resolution.root\", \"read\")\n\nsetLHCbStyle()\n\ntrackers = getTrackers()\n\ncanvas = ROOT.TCanvas(\"perbinp\", \"perbinp\", 800, 800)\ncanvas.Divide(2, 4)\ncanvas.cd()\n\npaves = {}\n\nfor tracker in trackers:\n for i in range(1, nbins + 1):\n canvas.cd(i)\n plot = f.Get(tracker + \"/momentum_resolution_py;\" + str(i))\n plot.DrawCopy(\"\")\n paves[str(i)] = ROOT.TPaveText(0.65, 0.65, 0.9, 0.9, \"NDC\")\n paves[str(i)].SetFillStyle(0)\n paves[str(i)].SetFillColor(0)\n paves[str(i)].SetBorderSize(0)\n paves[str(i)].AddText(\"Bin \" + str(i))\n paves[str(i)].Draw()\n\ncanvas.SaveAs(\"../../../plotsfornote/\" + tracker + \"MomResByBinOfP.pdf\")\n","sub_path":"checker/plotting/tracking/draw_momentum_resolution_per_bin.py","file_name":"draw_momentum_resolution_per_bin.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"410262480","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nGeneral description\n-------------------\n\nYou should have understood the basic_example to understand this one.\n\nThis is an example to show how the label attribute can be used with tuples to\nmanage the results of large energy system. Even though, the feature is\nintroduced in a small example it is made for large system.\n\nIn small energy system you normally address the node, you want your results\nfrom, directly. In large systems you may want to group your results and collect\nall power plants of a specific region or pv feed-in of all regions.\n\nTherefore you can use named tuples as label. In a named tuple you need to\nspecify the fields:\n\n>>> label = namedtuple('solph_label', ['region', 'tag1', 'tag2'])\n\n>>> pv_label = label('region_1', 'renewable_source', 'pv')\n>>> pp_gas_label = label('region_2', 'power_plant', 'natural_gas')\n>>> demand_label = label('region_3', 'electricity', 'demand')\n\nYou always have to address all fields but you can use empty strings or None as\nplace holders.\n\n>>> elec_bus = label('region_4', 'electricity', '')\n>>> print(elec_bus)\nsolph_label(region='region_4', tag1='electricity', tag2='')\n\n>>> elec_bus = label('region_4', 'electricity', None)\n>>> print(elec_bus)\nsolph_label(region='region_4', tag1='electricity', tag2=None)\n\nNow you can filter the results using the label or the instance:\n\n>>> for key, value in results.items(): # Loop results (keys are tuples!)\n... if isinstance(key[0], solph.Sink) & (key[0].label.tag2 == 'demand'):\n... print(\"elec demand {0}: {1}\".format(key[0].label.region,\n... value['sequences'].sum()))\n\nelec demand region_1: 3456\nelec demand region_2: 2467\n...\n\nIn the example below a subclass is created to define ones own string output.\nBy default the output of a namedtuple is `field1=value1, field2=value2,...`:\n\n>>> print(str(pv_label))\nsolph_label(region='region_1', tag1='renewable_source', tag2='pv')\n\nWith the subclass we created below the output is different, because we defined\nour own string representation:\n\n>>> new_pv_label = Label('region_1', 'renewable_source', 'pv')\n>>> print(str(new_pv_label))\nregion_1_renewable_source_pv\n\nYou still will be able to get the original string using `repr`:\n\n>>> print(repr(new_pv_label))\nLabel(tag1='region_1', tag2='renewable_source', tag3='pv')\n\nThis a helpful adaption for automatic plots etc..\n\nAfterwards you can use `format` to define your own custom string.:\n>>> print('{0}+{1}-{2}'.format(pv_label.region, pv_label.tag2, pv_label.tag1))\nregion_1+pv-renewable_source\n\nData\n----\nbasic_example.csv\n\n\nInstallation requirements\n-------------------------\n\nThis example requires the version v0.3.x of oemof. Install by:\n\n pip install 'oemof>=0.3,<0.4'\n\nOptional to see the plots:\n\n pip install matplotlib\n\n\"\"\"\n\n# ****************************************************************************\n# ********** PART 1 - Define and optimise the energy system ******************\n# ****************************************************************************\n\n###############################################################################\n# imports\n###############################################################################\nfrom collections import namedtuple\n\n# Default logger of oemof\nfrom oemof.tools import logger\nfrom oemof.tools import helpers\n\nimport oemof.solph as solph\nimport oemof.outputlib as outputlib\n\nimport logging\nimport os\nimport pandas as pd\nimport pprint as pp\n\ntry:\n import matplotlib.pyplot as plt\nexcept ImportError:\n plt = None\n \n# Subclass of the named tuple with its own __str__ method.\n\n\nclass Label(namedtuple('solph_label', ['tag1', 'tag2', 'tag3'])):\n __slots__ = ()\n\n def __str__(self):\n \"\"\"The string is used within solph as an ID, so it hast to be unique\"\"\"\n return '_'.join(map(str, self._asdict().values()))\n\n\nsolver = 'cbc' # 'glpk', 'gurobi',....\ndebug = False # Set number_of_timesteps to 3 to get a readable lp-file.\nnumber_of_time_steps = 24*7*8\nsolver_verbose = False # show/hide solver output\n\n# initiate the logger (see the API docs for more information)\nlogger.define_logging(logfile='oemof_example.log',\n screen_level=logging.INFO,\n file_level=logging.DEBUG)\n\nlogging.info('Initialize the energy system')\ndate_time_index = pd.date_range('1/1/2012', periods=number_of_time_steps,\n freq='H')\n\nenergysystem = solph.EnergySystem(timeindex=date_time_index)\n\n# Read data file\nfilename = os.path.join(os.path.dirname(__file__), 'basic_example.csv')\ndata = pd.read_csv(filename)\n\n##########################################################################\n# Create oemof object\n##########################################################################\n\nlogging.info('Create oemof objects')\n\n# The bus objects were assigned to variables which makes it easier to connect\n# components to these buses (see below).\n\n# create natural gas bus\nbgas = solph.Bus(label=Label('bus', 'gas', None))\n\n# create electricity bus\nbel = solph.Bus(label=Label('bus', 'electricity', None))\n\n# adding the buses to the energy system\nenergysystem.add(bgas, bel)\n\n# create excess component for the electricity bus to allow overproduction\nenergysystem.add(solph.Sink(label=Label('sink', 'electricity', 'excess'),\n inputs={bel: solph.Flow()}))\n\n# create source object representing the natural gas commodity (annual limit)\nenergysystem.add(solph.Source(\n label=Label('source', 'gas', 'commodity'), outputs={bgas: solph.Flow(\n nominal_value=29825293, summed_max=1)}))\n\n# create fixed source object representing wind pow er plants\nenergysystem.add(solph.Source(\n label=Label('ee_source', 'electricity', 'wind'), outputs={bel: solph.Flow(\n actual_value=data['wind'], nominal_value=1000000, fixed=True)}))\n\n# create fixed source object representing pv power plants\nenergysystem.add(solph.Source(\n label=Label('source', 'electricity', 'pv'), outputs={bel: solph.Flow(\n actual_value=data['pv'], nominal_value=582000, fixed=True)}))\n\n# create simple sink object representing the electrical demand\nenergysystem.add(solph.Sink(\n label=Label('sink', 'electricity', 'demand'), inputs={bel: solph.Flow(\n actual_value=data['demand_el'], fixed=True, nominal_value=1)}))\n\n# create simple transformer object representing a gas power plant\nenergysystem.add(solph.Transformer(\n label=Label('power plant', 'electricity', 'gas'),\n inputs={bgas: solph.Flow()},\n outputs={bel: solph.Flow(nominal_value=10e10, variable_costs=50)},\n conversion_factors={bel: 0.58}))\n\n# create storage object representing a battery\nstorage = solph.components.GenericStorage(\n nominal_storage_capacity=10077997,\n label=Label('storage', '', 'battery'),\n inputs={bel: solph.Flow(nominal_value=10077997/6)},\n outputs={bel: solph.Flow(nominal_value=10077997/6,\n variable_costs=0.001)},\n loss_rate=0.00, initial_storage_level=None,\n inflow_conversion_factor=1, outflow_conversion_factor=0.8,\n)\n\nenergysystem.add(storage)\n\n##########################################################################\n# Optimise the energy system and plot the results\n##########################################################################\n\nlogging.info('Optimise the energy system')\n\n# initialise the operational model\nmodel = solph.Model(energysystem)\n\n# This is for debugging only. It is not(!) necessary to solve the problem and\n# should be set to False to save time and disc space in normal use. For\n# debugging the timesteps should be set to 3, to increase the readability of\n# the lp-file.\nif debug:\n filename = os.path.join(\n helpers.extend_basic_path('lp_files'), 'basic_example.lp')\n logging.info('Store lp-file in {0}.'.format(filename))\n model.write(filename, io_options={'symbolic_solver_labels': True})\n\n# if tee_switch is true solver messages will be displayed\nlogging.info('Solve the optimization problem')\nmodel.solve(solver=solver, solve_kwargs={'tee': solver_verbose})\n\nlogging.info('Store the energy system with the results.')\n\n# The processing module of the outputlib can be used to extract the results\n# from the model transfer them into a homogeneous structured dictionary.\n\n# add results to the energy system to make it possible to store them.\nenergysystem.results['main'] = outputlib.processing.results(model)\nenergysystem.results['meta'] = outputlib.processing.meta_results(model)\n\n# The default path is the '.oemof' folder in your $HOME directory.\n# The default filename is 'es_dump.oemof'.\n# You can omit the attributes (as None is the default value) for testing cases.\n# You should use unique names/folders for valuable results to avoid\n# overwriting.\n\n# store energy system with results\nenergysystem.dump(dpath=None, filename=None)\n\n# ****************************************************************************\n# ********** PART 2 - Processing the results *********************************\n# ****************************************************************************\n\nlogging.info('**** The script can be divided into two parts here.')\nlogging.info('Restore the energy system and the results.')\nenergysystem = solph.EnergySystem()\nenergysystem.restore(dpath=None, filename=None)\n\n# define an alias for shorter calls below (optional)\nresults = energysystem.results['main']\nprint(energysystem.groups.keys())\nstorage = energysystem.groups[\"storage__battery\"]\n\nprint('********* LABEL *********')\nprint(repr(storage.label))\nprint(str(storage.label))\n\n# print a time slice of the state of charge\nprint('')\nprint('********* State of Charge (slice) *********')\nprint(results[(storage, None)]['sequences']['2012-02-25 08:00:00':\n '2012-02-26 15:00:00'])\nprint('')\nprint(str(storage.label))\nprint(type(storage))\n# get all variables of a specific component/bus\n# If you use the class the columns/index will be classes.\ncustom_storage = outputlib.views.node(results, storage)\n\n# If you use a string the columns/index will be strings.\nelectricity_bus = outputlib.views.node(results, \"bus_electricity_None\")\n\n# plot the time series (sequences) of a specific component/bus\nif plt is not None:\n custom_storage['sequences'].plot(kind='line', drawstyle='steps-post')\n plt.show()\n electricity_bus['sequences'].plot(kind='line', drawstyle='steps-post')\n plt.show()\n\n# print the solver results\nprint('********* Meta results *********')\npp.pprint(energysystem.results['meta'])\nprint('')\n\n# print the sums of the flows around the electricity bus\nprint('********* Main results *********')\nprint(electricity_bus['sequences'].sum(axis=0))\n","sub_path":"oemof_examples/oemof.solph/v0.3.x/basic_example/basic_example_tuple_as_label.py","file_name":"basic_example_tuple_as_label.py","file_ext":"py","file_size_in_byte":10689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"88869167","text":"def readInt(file):\n\tline = f.readline()\n\treturn int(line)\nif __name__ == \"__main__\":\n\twith open('pancakes.in') as f:\n\t\tn = readInt(f)\n\t\tfor i in range(1, n + 1):\n\t\t\tstack = f.readline()\n\t\t\tstack = stack[:-1] + \"+\"\n\t\t\thadHappy = False\n\t\t\thadSad = False\n\t\t\tres = 0\n\t\t\tfor character in list(stack):\n\t\t\t\tif character == '+':\n\t\t\t\t\tif hadSad:\n\t\t\t\t\t\tres = res + 1\n\t\t\t\t\t\tif hadHappy:\n\t\t\t\t\t\t\tres = res + 1\n\t\t\t\t\thadSad = False\n\t\t\t\t\thadHappy = True\n\t\t\t\telse:\n\t\t\t\t\thadSad = True\n\t\t\tprint (\"Case #\"+ str(i) + \": \" + str(res))","sub_path":"solutions_5634697451274240_0/Python/nonreviad/revengeofthepancakes.py","file_name":"revengeofthepancakes.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"213351270","text":"# 遍历就是将所有的数据数据都访问一遍,遍历针对的是可迭代对象\n# while 循环,/ for .. in 循环\n\nkillers = ['李白','兰陵王','韩信','赵信','阿珂','孙悟空']\n\n#for.. in 循环的本质就是不断调用next 方法,查找下一个数据\nfor killer in killers:\n print(killer)\n\ni = 0\nwhile i < len(killers):\n print(killers[i])\n i += 1\n","sub_path":"06.列表的使用/04.列表的遍历.py","file_name":"04.列表的遍历.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"6435440","text":"from init_script import *\nfrom fpga_lib.analysis import fit\n\nclass T1(FPGAExperiment):\n points = IntParameter(101)\n t_max = FloatParameter(1e5)\n ro_drive_amp = FloatParameter(0)\n fit_func = 'exp_decay'\n fit_fmt = {'tau': ('%.1f us', 1e3)}\n loop_delay = IntParameter(1e6)\n n_repeat = IntParameter(1)\n bin_size = IntParameter(0)\n\n def sequence(self):\n with scan_length(0, self.t_max, self.points, axis_scale=self.n_repeat) as dynlen:\n sync()\n system.cool_qubit()\n sync()\n qubit.flip()\n sync()\n if self.ro_drive_amp:\n for _ in range(self.n_repeat):\n readout.constant_pulse(dynlen, self.ro_drive_amp, sigma_t=100)\n delay(1000)\n else:\n for _ in range(self.n_repeat):\n delay(dynlen)\n readout()\n delay(self.loop_delay)\n\n def update(self):\n qubit.t1 = self.fit_params['tau']\n","sub_path":"fpga_exp/T1.py","file_name":"T1.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"529990942","text":"import os\nimport re\n\n# 获取bz 模型名, 表名, 驼峰名\ngroup = {}\npath = 'model/'\nfor root, dirs, files in os.walk(path):\n for ff in files:\n text_io = open(os.path.join(root,ff), 'r', encoding='utf-8')\n for line in text_io:\n if(line.__contains__('@TableName')):\n # print(line)\n pattern = re.compile('\"(.*)\"')\n tableName = pattern.findall(line)[0]\n arr = []\n for ar in tableName.split('_'):\n arr.append(ar.capitalize())\n # print(''.join(arr))\n group[''.join(arr)] = [ff[:-5], tableName]\n # print(group)\n\n# 获取生成器 mapper 文件\n# mapperPath = 'F:\\\\mapper\\\\'\nmapperPath = 'mapper/'\nfor root, dirs, files in os.walk(mapperPath):\n for ff in files:\n if(ff.endswith('.java')):\n fs = open(os.path.join(root, ff), 'r+', encoding='utf-8')\n fsd = ''\n for ll in fs:\n if(ll.__contains__('public interface')):\n if re.search(ll.split( )[2], ll):\n print(ll.replace(ll.split( )[2], ff[:-5]))\n # ll = re.sub(ll.split( )[2], ff[:-5], ll)\n # fs.write(ll.replace(ll.split( )[2], ff[:-5]))\n # fs.write(ll.replace(ll.split( )[2], ff[:-5]))\n # print(ll.split( )[2], ff[:-5])\n\n # print(ff[:-11], group.get(ff[:-11]))\n # if(group.get(ff[:-11])):\n # os.rename(os.path.join(root, ff), os.path.join(root, group.get(ff[:-11])[0] + \"Dao.java\"))\n # print(ff[:-5])\n\n # elif(ff.endswith('.xml')):\n # # print(ff)\n # fs = open(os.path.join(root,ff), encoding='utf-8')\n # for ds in fs:\n # if(ds.__contains__(ff[:-5])):\n # # ds.replace(ff[:-5], group[ff[:-5]][1])\n # if(group.get(ff[:-10])):\n # print(ff[:-10], group.get(ff[:-10])[0]+'Dao')\n # os.rename(os.path.join(root, ff), os.path.join(root, group.get(ff[:-10])[0] + \"Dao.java\"))\n # print(ds)\n\n\n\n\n\n","sub_path":"daoxml.py","file_name":"daoxml.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"292050146","text":"\"\"\"\n公共函数\n\"\"\"\n\nimport os\nimport re\nimport time\nimport uuid\nimport hashlib\nimport base64\nimport random\nimport string\nimport datetime\nimport xlwt\nfrom functools import wraps\nfrom inspect import signature\nfrom luk.config.default import DOWN_DIR\n\n# pylint: disable=all\n\n\ndef fmt_datetime(dt, fmt='%F %T'):\n return dt.strftime(fmt)\n\n\ndef get_current_time(fmt=\"%F %T\"):\n \"\"\" 获取当前时间字符串\"\"\"\n now = datetime.datetime.now()\n return fmt_datetime(now, fmt)\n\n\ndef get_uuid(length=16):\n uid = str(uuid.uuid1()).encode('utf8')\n return hashlib.md5(uid).hexdigest()[:length]\n\n\ndef get_code(length=6, code_type='int'):\n \"\"\" 获取短信验证码\n :param length: 返回长度\n :param code_type: 返回类型\n \"\"\"\n alpha = string.digits if code_type == 'int' else string.ascii_lowercase\n code = random.sample(alpha, length)\n code = ''.join(code)\n return code\n\n\ndef sha1_hash(text):\n \"\"\" sha1加密\"\"\"\n text = to_bytes(text)\n return hashlib.sha1(text).hexdigest()\n\n\ndef encode(text):\n \"\"\" base64 加密\"\"\"\n text = to_bytes(text)\n return base64.b64encode(text)\n\n\ndef decode(text):\n \"\"\" base64 解密\"\"\"\n text = to_bytes(text)\n return base64.b64decode(text)\n\n\ndef get_time():\n \"\"\" 获取时间戳\"\"\"\n return int(time.time())\n\n\ndef to_bytes(text):\n \"\"\" 转成bytes\"\"\"\n if isinstance(text, str):\n text = text.encode('utf8')\n return text\n\n\ndef to_string(text):\n \"\"\" 转成string\"\"\"\n if isinstance(text, bytes):\n text = text.decode('utf8', 'ignore')\n return text\n\n\ndef line_to_camel(text):\n \"\"\" 驼峰\"\"\"\n pattern = re.compile(r'(_\\w)')\n sub = re.sub(pattern, lambda _map: _map.group(1)[1].upper(), text)\n return sub\n\n\ndef camel_to_line(text):\n \"\"\" 下划线\"\"\"\n pattern = re.compile(r'([a-z]|\\d)([A-Z])')\n sub = re.sub(pattern, r'\\1_\\2', text).lower()\n return sub\n\n\ndef make_excel(title, headers, datas, filename='info.xls'):\n \"\"\" 创建excel\"\"\"\n wb = xlwt.Workbook()\n ws = wb.add_sheet(title)\n for idx, header in enumerate(headers):\n ws.write(0, idx, header)\n\n for row, data in enumerate(datas, 1):\n for col, d in enumerate(data):\n ws.write(row, col, d)\n \n if not os.path.exists(DOWN_DIR):\n os.mkdir(DOWN_DIR)\n path = os.path.join(DOWN_DIR, filename)\n wb.save(path)\n return path\n\n\ndef typeassert(func):\n \"\"\" 类型检测\n \n @typeassert\n def hello(x: int, y: str) -> list:\n ret = [x, y]\n return ret\n \"\"\"\n sig = signature(func)\n annotations = func.__annotations__\n return_type = annotations.pop('return', None)\n bound_types = sig.bind_partial(**annotations).arguments\n @wraps(func)\n def wrapper(*args, **kw):\n bound_values = sig.bind(*args, **kw)\n for name, value in bound_values.arguments.items():\n if name in bound_types:\n if not isinstance(value, bound_types[name]):\n raise TypeError(\n \"Argument '{}' must be {}\".format(name, bound_types[name])\n )\n \n ret = func(*args, **kw)\n if return_type:\n if not isinstance(ret, return_type):\n raise TypeError(\n \"Return Type must be {}\".format(return_type)\n )\n return ret\n return wrapper\n\n\ndef url_route(url):\n \"\"\" 解析url 并生成oss相对路径\"\"\"\n route = re.sub(r'(http.*?\\.com/)', '', url)\n return route\n\nif __name__ == '__main__':\n print(camel_to_line('hello'))","sub_path":"luk/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"133326425","text":"import datetime\n# month_list = ['january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december']\ndays_in_month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n# days_of_week = ['Su', 'Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa']\n\ndef overlap(month, year):\n # daydict = [[] for i in range(7)]\n sundays = []\n # fridays = []\n # lc = []\n # off = []\n\n if is_leap(year):\n days_in_month[1] = 29\n\n num_days = days_in_month[month-1]\n day_one = datetime.date(year, month, 1)\n start_day = day_one.isoweekday()\n\n start = start_day\n if start_day >= 7:\n start_day = 0\n start = 0\n # print(start)\n first_sun = 1 if start == 0 else 1 + (7 - start)\n first_fri = 7 if start == 6 else 1 + (5 - start)\n\n # lc.append(first_fri)\n # lc.append(first_fri + 10)\n\n lc = first_fri + 10\n\n for d in range(first_sun, num_days + 1, 7):\n sundays.append(d)\n\n off = sundays[-2]\n # for d in range(first_fri, num_days + 1, 7):\n # fridays.append(d)\n\n # for day in range(1, num_days+1):\n # daydict[start_day].append(day)\n # start_day += 1\n # if start_day >= 7:\n # start_day = 0\n # print(daydict)\n\n # print(sundays)\n # print(fridays)\n\n # print(lc, off)\n if off < lc:\n return 1\n else:\n return 0\n\ndef is_leap(year):\n if year % 4 == 0:\n if year % 100 == 0:\n if year % 400 == 0:\n return True\n else:\n return False\n else:\n return True\n else:\n return False\n\n\nt = int(input())\nfor case in range(t):\n m1, y1 = list(map(int, input().split()))\n m2, y2 = list(map(int, input().split()))\n\n ov = 0\n\n # input_year = input()\n # year = int(input_year)\n # input_month = input()\n # month = int(input_month)\n if y2 == y1:\n for month in range(m1, m2 + 1):\n ov += overlap(month, y1)\n\n else:\n\n for month in range(m1, 13):\n # print(month, y1)\n ov += overlap(month, y1)\n\n for year in range(y1+1, y2):\n for month in range(1, 13):\n # print(month, year)\n ov += overlap(month, year)\n\n for month in range(1, m2+1):\n ov += overlap(month, y2)\n # print(month, y2)\n \n print(ov)\n\n\n","sub_path":"cal.py","file_name":"cal.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"378625006","text":"import os\nHERE = os.path.dirname(__file__)\n\nTEST_RUNNER = 'django_nose.runner.NoseTestSuiteRunner'\n\nSECRET_KEY = 'anything'\n\nDATABASES = {\n 'default': {\n 'NAME': ':memory:',\n 'ENGINE': 'django.db.backends.sqlite3',\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'unique-snowflake'\n }\n}\n\nTEMPLATE_DIRS = (\n os.path.join(HERE, 'templates'),\n)\n\nINSTALLED_APPS = (\n 'fancy_cache',\n 'django_nose',\n)\n\n\nROOT_URLCONF = 'fancy_tests.tests.urls'\n","sub_path":"fancy_tests/tests/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"553401590","text":"\"\"\"\nContains a bunch of constants and program-wide values.\n\"\"\"\n\nWIZARD = True\n\nDEFAULT_MAP_X_WIDTH = 80 #Default width of the entire screen\nDEFAULT_MAP_Y_WIDTH = 24 #Default height of the entire screen\n\nDEFAULT_DIMENSIONS = (DEFAULT_MAP_X_WIDTH, DEFAULT_MAP_Y_WIDTH)\n\nMAP_DIMENSIONS = (60, 17)\nMESSAGES_DIMENSIONS = (80, 7)\nSTATUS_DIMENSIONS = (20, 17) #column 1 intentionally left blank\n\nTURN_TICKS = 72\n\nfrom kb import kp\n\nDIRECTION_SWITCH = {\n kp.NW: (-1, -1),\n kp.W: (-1, 0),\n kp.SW: (-1, 1),\n kp.S: (0, 1),\n kp.SE: (1, 1),\n kp.E: (1, 0),\n kp.NE: (1, -1),\n kp.N: (0, -1)}\n\nRUN_DIRECTION_SWITCH = {\n kp.RNW: (-1, -1),\n kp.RW: (-1, 0),\n kp.RSW: (-1, 1),\n kp.RS: (0, 1),\n kp.RSE: (1, 1),\n kp.RE: (1, 0),\n kp.RNE: (1, -1),\n kp.RN: (0, -1)}\n\nimport symbol\n\nTRANSPARENT_GLYPH = symbol.Glyph(' ', (0, 0, 0))\n\n# Those symbols which represent the interior of a room, spaces which can be\n# easily seen through.\n# Put this in level instead!\n# OPEN_GLYPHS = set(['.'])\n\n# Those symbols which represent a square which is not easily seen through, but\n# which is somewhat transparent (like cooridor spaces).\n# Put this in level instead!\n# SEMI_OPEN_GLYPHS = set(['#'])\n\nimport objid\n\nclass MapVars:\n def __init__(self, mapDims):\n self.dimensions = mapDims\n\nmv = MapVars((DEFAULT_MAP_X_WIDTH, DEFAULT_MAP_Y_WIDTH))\n\ndef getID():\n \"\"\"\n Returns an ID, guaranteed to be unique.\n \"\"\"\n return IDFactory.get()\n\nIDFactory = objid.ObjectIDFactory()\n\nfrom dude import MonsterFactory\n\ndef getMonsterFactory():\n \"\"\"\n Returns the primary monster factory.\n \"\"\"\n return monFactory\n\nmonFactory = MonsterFactory()\n\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"53250826","text":"#Python Template for ES2B4 Assignment - 2\n\n#Name: Vigneshwar Syamasundar\n#ID : U1527115\n#Assignment Section : A2_3\n\nfrom assignment2_a22fibImport import fibonacciSeries\n\nListElements = list(range(0,5010,10))\nprint(ListElements)\nn = (len(ListElements)) # The length of the sorted list of elements\nF = (fibonacciSeries(n)) # The fibSeries computed from n\np = len(F) # The length of the fibSeries series\nk = F[p-1] # The last element in the fibSeries\nprint(ListElements)\nprint(F)\ndef fibSA(x, p = p, k = k, F = F, offset = 0):\n\tprint(\"Remaining list = \" , ListElements[offset:])\n\tif (k == 0 ):\n\t\tprint(x, \"is not in the list\")\n\t\treturn\n\telif (F[p-2]+offset) > n -1:\n\t p = p - 1\n\t k = F[p-1]\n\t fibSA(x , p = p, k = k, F = F, offset = offset)\n\telse:\n\t ListElementsAtIndex = ListElements[F[p-2]+offset]\n\t print(ListElementsAtIndex , \"compared to\", x)\n\t if x == ListElementsAtIndex:\n\t print(\"\")\n\t print(\"x found at position \", (F[p-2]+offset))\n\t print(\" \")\n\t elif x < ListElementsAtIndex:\n\t print(\"x must be lower\")\n\t p = p - 1\n\t k = F[p-1]\n\t print(\"smaller p = \",p)\n\t print(\"smaller k = \",k)\n\t print(\"\")\n\t fibSA(x , p = p, k = k, F = F, offset = offset)\n\t elif x > ListElementsAtIndex:\n\t print(\"x must be higher\")\n\t offset += F[p-2]\n\t p = p - 2\n\t if p > 0:\n\t \tk = F[p-1]\n\t else:\n\t \tk = 0\n\t print(\"bigger p = \",p)\n\t print(\"bigger k = \",k) \n\t print(offset)\n\t print(\"\")\n\t fibSA(x , p = p, k = k, F = F, offset = offset)\n\nfibSA(5000)\n\n","sub_path":"assignment2_a2_3.py","file_name":"assignment2_a2_3.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"567747766","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 28 17:49:32 2017\n\n@author: kstriplin\n\"\"\"\n\nimport pandas as pd\n\nunames = ['user_id', 'gender', 'age', 'occupation', 'zip']\nusers = pd.read_table('users.dat', sep='::', header=None, names=unames, engine='python')\n\nrnames = ['user_id', 'movie_id', 'rating', 'timestamp']\nratings = pd.read_table('ratings.dat', sep='::', header=None, names=rnames, engine='python')\n\nmnames = ['movie_id', 'title', 'genres']\nmovies = pd.read_table('movies.dat', sep='::', header=None, names=mnames, engine='python')\n\ndata = pd.merge(pd.merge(ratings, users), movies)\nmean_ratings = data.pivot_table('rating', index='title', columns='gender', aggfunc='mean')\nratings_by_title = data.groupby('title').size()\nactive_titles = ratings_by_title.index[ratings_by_title >= 250]\nmean_ratings = mean_ratings.ix[active_titles]\ntop_female_ratings = mean_ratings.sort_index(by='F', ascending=False)\nmean_ratings['diff'] = mean_ratings['M'] - mean_ratings['F']\nsorted_by_diff = mean_ratings.sort_index(by='diff')\n\nprint(sorted_by_diff[:15])\n\nprint(sorted_by_diff[::-1][:15])\n\nrating_std_by_title = data.groupby('title')['rating'].std()\nrating_std_by_title = rating_std_by_title.ix[active_titles]\nprint(rating_std_by_title.order(ascending=False)[:10])","sub_path":"data-analysis/python-for-data-analysis/ch02/movielens.py","file_name":"movielens.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"164250935","text":"import urllib.request\n\nurl = 'https://www.mygalgame.com/yufaqinggaolengmhuichangdeguojitianmixueyuanshenghuo.html'\n\ndata = {'e_secret_key' : 'A665'}\nreq = urllib.request.Request(url, data)\nreq.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0')\nreponse = urllib.request.urlopen(req)\nhtml = reponse.read().decode('utf-8')\n\nwith open('3.txt', 'w', encoding='utf-8') as f:\n f.write(html)\n","sub_path":"图片/galgame.py","file_name":"galgame.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"239319961","text":"from confluent_kafka import Producer\n\n\ndef main():\n producer = Producer({\n 'bootstrap.servers': 'localhost:9092',\n 'queue.buffering.max.messages': 500000\n })\n for i in range(10):\n producer.produce('test-topic', (f'Hello {i}'))\n producer.poll(0)\n producer.flush()\n\n\nif __name__ == '__main__':\n main()\n print('Completed.')\n","sub_path":"python/basic_kafka/simple/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"583029314","text":"import pandas as pd\nimport pytest\n\nfrom trane.ops.aggregation_ops import (\n AvgAggregationOp,\n CountAggregationOp,\n MajorityAggregationOp,\n MaxAggregationOp,\n MinAggregationOp,\n SumAggregationOp,\n)\nfrom trane.utils.table_meta import TableMeta\n\n\n@pytest.fixture\ndef df():\n df = pd.DataFrame({\"col\": [1, 2, 3, 4, 5]})\n return df\n\n\n@pytest.fixture\ndef meta():\n meta = TableMeta(\n {\n \"tables\": [\n {\n \"fields\": [\n {\n \"name\": \"col\",\n \"type\": TableMeta.SUPERTYPE[TableMeta.TYPE_FLOAT],\n \"subtype\": TableMeta.TYPE_FLOAT,\n },\n ],\n },\n ],\n },\n )\n return meta\n\n\n@pytest.mark.parametrize(\n \"agg_operation,expected_output\",\n [\n (CountAggregationOp, 5),\n (SumAggregationOp, 15),\n (AvgAggregationOp, 3.00),\n (MaxAggregationOp, 5),\n (MinAggregationOp, 1),\n (MajorityAggregationOp, str(1)),\n ],\n)\ndef test_agg_ops(df, meta, agg_operation, expected_output):\n op = agg_operation(\"col\")\n op.op_type_check(meta)\n output = op(df)\n assert output == expected_output\n\n\n@pytest.mark.parametrize(\n \"agg_operation\",\n [\n (SumAggregationOp),\n (AvgAggregationOp),\n (MaxAggregationOp),\n (MinAggregationOp),\n (MajorityAggregationOp),\n ],\n)\ndef test_sum_agg_none(agg_operation):\n op = agg_operation(\"col\")\n df = pd.DataFrame()\n output = op(df)\n assert output is None\n","sub_path":"tests/ops/test_aggregation_ops.py","file_name":"test_aggregation_ops.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"561982725","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\ndef jacobi_symbol(m, n, s = 1):\r\n '''\r\n recursive method for jacobi symbol in form (m / n)\r\n '''\r\n #print(m, n, s)\r\n \r\n #base cases\r\n if m == 1:\r\n return 1\r\n elif m % n == 0:\r\n return 0\r\n\r\n #property 1\r\n if m % n != m:\r\n return jacobi_symbol(m % n, n, s)\r\n\r\n #property 2\r\n elif m == 2:\r\n if n % 8 in (1, 7):\r\n return s\r\n elif n % 8 in (3, 5):\r\n return -s\r\n\r\n #property 3\r\n elif m % 2 == 0:\r\n return jacobi_symbol(2, n) * jacobi_symbol(m // 2, n, s)\r\n\r\n #property 4\r\n elif m % 4 == n % 4 == 3:\r\n return jacobi_symbol(n, m, -s)\r\n else:\r\n return jacobi_symbol(n, m, s)\r\n \r\n\r\nif __name__ == '__main__':\r\n for (m, n) in [\r\n #book example\r\n #(7411, 9283), # -1\r\n \r\n ##wiki examples\r\n #(1001, 9907), # -1\r\n #(19, 45), # 1\r\n \r\n #hw2\r\n (136, 457),\r\n (34333, 532789),\r\n (467827, 112233441),\r\n ]:\r\n s = jacobi_symbol(m, n)\r\n print('\\t'.join(map(str, (m, n, s))))","sub_path":"hw2/jacobi_symbol.py","file_name":"jacobi_symbol.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"370411028","text":"import torch\nfrom tqdm import tqdm, trange\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\nfrom matplotlib import animation\n#Writer = animation.writers['pillow']\n#writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)\n\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm, trange\nfrom ipywidgets import interact, fixed\nfrom IPython.display import HTML, display\n\nimport seaborn as sns\nffmpeg_path = 'C://Users//thoma//Documents//ffmpeg//FFmpeg//bin'#//bin'\n#C:\\Users\\thoma\\Documents\\ffmpeg\\FFmpeg\\bin\n#plt.rcParams['animation.ffmpeg_path'] = ffmpeg_path\n\n\nN = 100\nmin_x, max_x = -3, 1\n\nclass AnimatepEnsemble(object):\n def __init__(self,X_obs,y_obs,X_true,y_true,p=0.00, decay=0.001, non_linearity=torch.nn.LeakyReLU, n_models=10, model_list=None,u_iters=100, l2=1, n_std=4, title=\"\",dataset_lenght=None):\n #super(AnimateBootstrapEnsemble, self).__init__(p, decay, non_linearity, n_models, model_list,dataset_lenght)\n\n self.losses = []\n self.n_std = n_std\n self.u_iters = u_iters\n self.l2 = l2\n self.title = title\n \n #\n self.X_obs = X_obs\n self.y_obs = y_obs\n self.X_true = X_true\n self.y_true = y_true\n \n ## plot items\n self.fig, self.ax0 = plt.subplots(1,1)\n self.ax0.set_ylim([15, -15])\n \n self.ax0.plot(self.X_obs, self.y_obs, ls=\"none\", marker=\"o\", color=\"0.1\", alpha=0.5, label=\"observed\")\n self.ax0.plot(self.X_true, self.y_true, ls=\"-\", color=\"r\", label=\"true\")\n self.ln_mean, = self.ax0.plot([], [], ls=\"-\", color=\"b\", label=\"mean\")\n \n self.loss_text = self.ax0.set_title('', fontsize=15)\n \n self.fill_stds = []\n for i in range(self.n_std):\n fill_t = self.ax0.fill_between(\n [], [], [],\n color=\"b\",\n alpha=0.5**(i+1)\n )\n self.fill_stds.append(fill_t)\n \n self.ax0.legend(loc=\"upper left\")\n \n\n\n def init_plot(self):\n self.ln_mean.set_data([], [])\n self.loss_text.set_text('')\n return self.ln_mean, self.loss_text\n \n def animate_plot(self, i,iters=50):\n for j in range(iters):\n loss = self.fit_ensemble(self.X_obs,self.y_obs)\n self.losses.append(loss)\n \n #self.loss_text.set_text('{}, loss[{}]={:.3f}'.format(self.title, (i+1)*100, loss))\n \n y_mean, y_std = self.ensemble_uncertainity_estimate(\n self.X_true, self.u_iters, l2=self.l2,\n range_fn=range\n )\n \n self.ln_mean.set_data(self.X_true, y_mean)\n for i in range(self.n_std):\n self.fill_stds[i].remove()\n self.fill_stds[i] = self.ax0.fill_between(\n self.X_true,\n y_mean - y_std * ((i+1)/2),\n y_mean + y_std * ((i+1)/2),\n color=\"b\",\n alpha=0.5**(i+1)\n )\n return [self.ln_mean, self.loss_text] + self.fill_stds\n \n \n def train(self, iters, interval=100):\n anim = animation.FuncAnimation(\n self.fig, self.animate_plot, init_func=self.init_plot,\n frames=range(iters), interval=interval, blit=True)\n return HTML(anim.to_html5_video())","sub_path":"uncertanty/src/base_animators.py","file_name":"base_animators.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"591208350","text":"# -*- coding: utf-8 -*-\nimport json\n\nfrom django.core import serializers\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.hashers import make_password\nfrom django.http import HttpResponse\nfrom apps.users.models import User, SeccionPorProfesor, AlumnoPorSeccion\n\nfrom .forms import UserSignupForm, UserAuthenticationForm\nfrom apps.carreras.models import Carrera, Semestre\n\n\ndef signup(request):\n form = UserSignupForm(request.POST or None)\n carreras = Carrera.objects.all()\n\n if form.is_valid():\n form.save()\n return redirect('/signin/?r=success')\n\n return render(request, 'users/signup.html', locals())\n\n\ndef signout(request):\n logout(request)\n return redirect('/')\n\n\ndef signin(request):\n form = UserAuthenticationForm(request.POST or None)\n\n if form.is_valid():\n login(request, form.get_user())\n\n if request.user.is_superuser:\n return redirect('/admin/')\n elif request.user.type_user == 'alumno':\n return redirect('alumno_profile', username=request.user.username)\n elif request.user.type_user == 'profesor':\n return redirect('profesor_profile', username=request.user.username)\n else:\n return redirect('/')\n\n return render(request, 'users/signin.html', locals())\n\n\n@login_required(login_url='/signin/')\ndef first_user_auth(request):\n carreras = Carrera.objects.all()\n\n return render(request, 'users/complete_profile.html', locals())\n\n\n@login_required(login_url='/signin/')\ndef go_to_user_profile(request, type_user, username):\n if type_user == 'alumno':\n return redirect('alumno_profile', username=username)\n elif type_user == 'profesor':\n return redirect('profesor_profile', username=username)\n\n\n@login_required(login_url='/signin/')\ndef alumno_profile(request, username):\n user = User.objects.get(username=username)\n carreras = Carrera.objects.all();\n\n # Secciones registradas por el alumno\n secciones = AlumnoPorSeccion.objects.filter(alumno_id=user)\n\n return render(request, 'users/alumno_profile.html', locals())\n\n\n@login_required(login_url='/signin/')\ndef update_alumno_profile(request, id_user):\n user = User.objects.get(id=id_user)\n\n if request.method == 'POST':\n user.first_name = request.POST.get('first_name')\n user.last_name = request.POST.get('last_name')\n user.email = request.POST.get('email')\n user.career = Carrera.objects.get(id=request.POST.get('career'))\n if (request.POST.get('password') != ''):\n user.set_password(request.POST.get('password'))\n user.save()\n\n return redirect('alumno_profile', username=user.username)\n\n\n@login_required(login_url='/signin/')\ndef update_profesor_profile(request, id_user):\n user = User.objects.get(id=id_user)\n\n if request.method == 'POST':\n user.first_name = request.POST.get('first_name')\n user.last_name = request.POST.get('last_name')\n user.email = request.POST.get('email')\n user.semblance = request.POST.get('semblance')\n user.seminar_description = request.POST.get('seminar_description')\n if (request.POST.get('password') != ''):\n user.set_password(request.POST.get('password'))\n user.save()\n\n return redirect('profesor_profile', username=user.username)\n\n\n@login_required(login_url='/signin/')\ndef profesor_profile(request, username):\n user = User.objects.get(username=username)\n materias = user.classes.all()\n secciones = SeccionPorProfesor.objects.filter(profesor_id=user)\n\n return render(request, 'users/profesor_profile.html', locals())\n\n\n@login_required(login_url='/signin/')\ndef show_classes_by_career(request, username):\n user = User.objects.get(username=username)\n\n if user.type_user == 'alumno':\n classes_by_career = Semestre.objects.filter(id=user.career.id)\\\n .order_by(\"semestre\")\n\n return render(request, 'users/materias_por_semestre.html', locals())\n\n\n# Regresa la lista de los profesores de X materia\n@login_required(login_url='/signin/')\ndef get_profesors_by_class(request, cve_materia):\n\n secciones = SeccionPorProfesor.objects.filter(materia_id__clave=cve_materia)\n results = {}\n\n for seccion in secciones:\n first_name = seccion.profesor_id.first_name\n last_name = seccion.profesor_id.last_name\n section = seccion.seccion\n profesor_id = seccion.profesor_id.id\n ID = seccion.id\n results[seccion.profesor_id.username+\"_\"+section] = ({'seccion': section, 'profesor_fn': first_name,\n 'profesor_ln': last_name, 'profesor_id': profesor_id, 'ID': ID})\n #data = serializers.serialize('json', results)\n\n #users = User.objects.filter(classes__clave=cve_materia)\n #results = []\n #for user in users:\n # results.append(user)\n #data = serializers.serialize('json', results)\n return HttpResponse(json.dumps(results), content_type=\"application/json\")\n\n\n@login_required(login_url='/signin/')\ndef show_classes_by_profesor(request, username):\n user = User.objects.get(username=username)\n sec_dir = []\n\n for clase in user.classes.all():\n secciones_por_materia = SeccionPorProfesor.objects.filter(materia_id=clase.id).filter(profesor_id=user.id)\n\n if secciones_por_materia:\n for seccion in secciones_por_materia:\n sec_dir.append(seccion)\n\n if user.type_user == 'profesor':\n classes = user.classes\n\n #assert False\n\n return render(request, 'users/clases_por_profesor.html', locals())\n\n@login_required(login_url='/signin/')\ndef seccion_profesor(request, username, materia, seccion):\n return render(request, 'users/seccion_profesor.html', locals())\n\n\n@login_required(login_url='/signin/')\ndef save_alumno_seccion(request, seccionID, alumno):\n\n alumno_fk = User.objects.get(username=alumno)\n seccion_fk = SeccionPorProfesor.objects.get(id=seccionID)\n\n # Verificar que no se registre dos veces a la misma materia\n existe_alumno = AlumnoPorSeccion.objects.filter(seccion_id__materia_id=seccion_fk.materia_id).filter(alumno_id=alumno_fk)\n\n # Se verifica que el alumno no este registrado a ese curso\n # existe_alumno = AlumnoPorSeccion.objects.filter(seccion_id=seccion_fk).filter(alumno_id=alumno_fk)\n\n if not existe_alumno:\n alumno_registrado = AlumnoPorSeccion(seccion_id=seccion_fk,alumno_id=alumno_fk)\n alumno_registrado.save()\n results = {'status': True}\n else:\n results = {'status': False}\n\n return HttpResponse(json.dumps(results), content_type=\"application/json\")\n","sub_path":"apps/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"20941328","text":"\"\"\"\n## Misc\nCommon gotchas\n\n\"\"\"\n\n__author__ = \"Batuhan Taskaya\"\n\nimport ast\n\nfrom inspectortiger.inspector import Inspector\nfrom inspectortiger.plugins.context import CTX_TYPES, get_context\nfrom inspectortiger.plugins.parentize import parent_to\n\nMUTABLE_TYPE = (ast.List, ast.Dict, ast.Set)\n\n\n@Inspector.register(ast.FunctionDef)\ndef default_mutable_arg(node, db):\n \"\"\"Default argument is something mutable.\"\"\"\n\n if any(\n isinstance(default, MUTABLE_TYPE) for default in node.args.defaults\n ):\n return node.args\n\n\n@Inspector.register(ast.Try)\ndef control_flow_inside_finally(node, db):\n \"\"\"A return/break/continue that would implicitly cancel any active exception.\"\"\"\n\n for subnode in node.finalbody:\n for child in ast.walk(subnode):\n if isinstance(child, ast.Return) and get_context(\n child, db\n ) is get_context(node, db):\n return child\n elif isinstance(child, (ast.Break, ast.Continue)) and not any(\n isinstance(parent, ast.For)\n for parent in parent_to(child, node)\n ):\n return child\n else:\n continue\n","sub_path":"inspectortiger/plugins/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"152544040","text":"import json\n\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n param = {}\n param['username'] = \"Ученик Яндекс.Лицея\"\n param['title'] = 'Домашняя страница'\n return render_template('index.html', **param)\n\n\n@app.route('/odd_even')\ndef odd_even():\n return render_template('odd_even.html', number=3)\n\n\n@app.route('/news')\ndef news():\n with open(\"news.json\", \"rt\", encoding=\"utf8\") as f:\n news_list = json.loads(f.read())\n print(news_list)\n return render_template('news.html', news=news_list)\n\n\n@app.route('/queue')\ndef queue():\n return render_template('queue.html')\n\n\nif __name__ == '__main__':\n app.run(port=8080, host='127.0.0.1')\n","sub_path":"2nd_year/WEB8. Шаблоны. flask-wtf/Samples/01_/01_.py","file_name":"01_.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"221053107","text":"import telebot\nimport requests\nimport json\nimport API\nimport re\nimport datetime\nfrom telebot import types\n\nbot = telebot.TeleBot(API.token)\ndef user_keyboard():\n start_key = types.ReplyKeyboardMarkup()\n start_key.row('Найти бар поблизости')\n start_key.row('Оставить отзыв', 'Поставить оценку бару')\n return start_key\n\n@bot.message_handler(commands=['start'])\ndef help_com(message):\n bot.send_message(message.from_user.id, \"Выберите пункт\", reply_markup=user_keyboard())\n\n\n@bot.message_handler(content_types=['text'])\ndef key_answer(message):\n if message.text == 'Найти бар поблизости':\n bot.send_message(message.chat.id, 'Не забудь ввести радиус поиска (в метрах)')\n elif re.match(r'\\d{' + str(len(message.text)) + '}', message.text) != None:\n latitude = int(message.text) / (1000 * 111)\n longitude = int(message.text) / (1000 * 62.6)\n global part_of_requeuest\n part_of_requeuest = str(longitude) + ',' + str(latitude)\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n button_geo = types.KeyboardButton(text=\"Отправить местоположение\", request_location=True)\n button_home = types.KeyboardButton(text=\"Главное меню\")\n keyboard.add(button_geo)\n keyboard.add(button_home)\n bot.send_message(message.chat.id, 'Жду твое гео🗿', reply_markup=keyboard)\n elif message.text == 'Оставить отзыв':\n keyboard = types.InlineKeyboardMarkup()\n url_button = types.InlineKeyboardButton(text=\"Оставить отзыв в чате\", url=\"https://t.me/beer_feedback\")\n keyboard.add(url_button)\n bot.send_message(message.chat.id, \"Чтобы оставить отзыв, просто ткни на кнопку!\", reply_markup=keyboard)\n elif message.text == 'Поставить оценку бару':\n bot.send_message(message.chat.id, \"Находится в разработке...\")\n bot.send_message(message.chat.id, \"Поддержи проект, чтобы мы перестали пить пиво и занялись проектом\")\n elif message.text == 'Главное меню':\n bot.send_message(message.from_user.id, \"Выберите пункт\", reply_markup=user_keyboard())\n else:\n bot.send_message(message.chat.id, \"Чел, не пори дичь. Го выпьем пивка 🍻\")\n\n\n@bot.message_handler(content_types=['location'])\ndef find_bar(message):\n if message.location is not None:\n start_key = types.ReplyKeyboardMarkup()\n start_key.row('Найти бар поблизости')\n start_key.row('Оставить отзыв', 'Поставить оценку бару')\n req = 'https://search-maps.yandex.ru/v1/?apikey=' + API.yand + '&text=бар&lang=ru_RU&ll=' + str(\n message.location.longitude) + ',' + str(message.location.latitude) + '&spn=' + part_of_requeuest + '&rspn=1'\n user_name = str(message.from_user.username)\n response = requests.get(req)\n js_bars = response.text\n js_bars = json.loads(js_bars)\n cord = str(message.location.latitude) + ',' + str(message.location.longitude)\n file = open('logfile.txt','w')\n file.write('Date: ' + str(datetime.datetime.today()) + ' User: ' + user_name + '\\n' + 'Link: ' + req + '\\n')\n file.close()\n if len(js_bars['features']) != 0:\n for i in range(0, len(js_bars['features'])):\n bar_stat = ''\n bar_hours = ''\n bar_link = ''\n bar_category = ''\n bar_name = js_bars['features'][i]['properties']['name']\n bar_address = js_bars['features'][i]['properties']['CompanyMetaData']['address'] + ';'\n bar_stat = bar_name + '\\n' + bar_address + '\\n'\n try:\n bar_hours = js_bars['features'][i]['properties']['CompanyMetaData']['Hours']['text']\n bar_stat += bar_hours + '\\n'\n except:\n bar_hours = 'Часы работы не указаны'\n try:\n bar_link = js_bars['features'][i]['properties']['CompanyMetaData']['url']\n bar_stat += bar_link + '\\n'\n except:\n bar_link = 'Ссыка на сайт бара не указана'\n try:\n for j in range(0, len(js_bars['features'][i]['properties']['CompanyMetaData']['Categories'])):\n bar_category += js_bars['features'][i]['properties']['CompanyMetaData']['Categories'][j][\n 'name'] + ';'\n bar_stat += bar_category\n except:\n bar_category = 'Категория у бара не указана'\n bar_stat += bar_category\n\n bot.send_message(message.from_user.id, str(bar_stat))\n lat = js_bars['features'][i]['geometry']['coordinates'][1]\n lng = js_bars['features'][i]['geometry']['coordinates'][0]\n bot.send_venue(message.chat.id, latitude=float(lat), longitude=float(lng), title=bar_name,\n address=bar_address)\n else:\n bot.send_message(message.from_user.id, \"Нет баров поблизости\", reply_markup=start_key)\n bot.send_message(message.from_user.id, \"Выберите пункт\", reply_markup=start_key)\n\n\nbot.polling(none_stop=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"624986527","text":"def swap(arr,s,e):\n arr[s],arr[e] = arr[e],arr[s]\n\ndef FindMin(arr,s,e):\n minval = s\n for i in range(s,e):\n if arr[minval] > arr[i]:\n minval = i\n return minval\n\ndef arrangeArrayAccToIndex(arr,indexArr):\n for i in range(len(arr)):\n minIndex = FindMin(indexArr,i,len(arr))\n \n if(minIndex!=i):\n swap(indexArr,minIndex,i)\n swap(arr,minIndex,i)\n print(arr)\n\n \n\n\n\nif __name__ == \"__main__\":\n arr = [50, 40, 70, 60, 90]\n indexArr = [3, 0, 4, 1, 2]\n\n arrangeArrayAccToIndex(arr,indexArr)","sub_path":"arrangeArrayAccToIndex.py","file_name":"arrangeArrayAccToIndex.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"162049960","text":"# coding: utf-8\nfrom __future__ import absolute_import\nfrom flask import Blueprint\nfrom flask.ext.login import login_required\nfrom flask_restful import Api, Resource, reqparse, abort\nfrom .documents import OngDocument\n\nong_blueprint = Blueprint('ong', __name__)\napi = Api(ong_blueprint)\n\n\n@api.resource('/ong/', '/ong/')\nclass OngResource(Resource):\n\n @login_required\n def get(self, id=None):\n parser = reqparse.RequestParser()\n parser.add_argument('limit', type=int)\n args = parser.parse_args(strict=True)\n limit = args.get('limit')\n if limit is not None:\n ongs = OngDocument.objects[:limit]\n return [ong.to_dict() for ong in ongs]\n elif id is not None:\n ong = OngDocument.objects.get_or_404(id=id)\n return ong.to_dict()\n\n abort(400, message=\"You must provide limit or id\")\n\n @login_required\n def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument('name', type=str, required=True, help='You must provide name')\n args = parser.parse_args()\n name = args.get('name')\n if name is None:\n abort(400, message=\"You must provide name\")\n\n ong = OngDocument(name=name).save()\n return ong.to_dict(), 201\n\n @login_required\n def delete(self, id=None):\n if id is not None:\n ong_document = OngDocument.objects.get_or_404(id=id)\n ong_document.delete()\n return None, 204\n abort(400, message=\"You must provide an id\")\n\n @login_required\n def put(self, id=None):\n if id is None:\n abort(400, message=\"You must provide an id\")\n\n parser = reqparse.RequestParser()\n parser.add_argument('name', type=str)\n parser.add_argument('description', type=str)\n parser.add_argument('purpose', type=str)\n parser.add_argument('phone1', type=str)\n parser.add_argument('phone2', type=str)\n parser.add_argument('email', type=str)\n parser.add_argument('site', type=str)\n parser.add_argument('address', type=dict)\n parser.add_argument('addressNumber', type=str)\n parser.add_argument('logoUrl', type=str)\n args = parser.parse_args()\n\n name = args.get(\"name\", None)\n description = args.get(\"description\", None)\n purpose = args.get(\"purpose\", None)\n phone1 = args.get(\"phone1\", None)\n phone2 = args.get(\"phone2\", None)\n email = args.get(\"email\", None)\n address = args.get(\"address\", None)\n addressNumber = args.get(\"addressNumber\", None)\n\n ong_document = OngDocument.objects.get_or_404(id=id)\n\n if name is not None:\n ong_document.name = name\n\n if description is not None:\n ong_document.description = description\n\n if purpose is not None:\n ong_document.purpose = purpose\n\n if phone1 is not None:\n ong_document.phone1 = phone1\n\n if phone2 is not None:\n ong_document.phone2 = phone2\n\n if email is not None:\n ong_document.email = email\n\n if address is not None:\n ong_document.address = address\n\n if addressNumber is not None:\n ong_document.addressNumber = addressNumber\n\n ong_document.save()\n return ong_document.to_dict(), 201\n\n\n # if site is not None:\n # ong_document.site = site\n #\n # if logoUrl is not None:\n # ong_document.logoUrl = logoUrl\n # site = args.get(\"site\", None)\n # logoUrl = args.get(\"logoUrl\", None)\n\n\n","sub_path":"ong/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"225862681","text":"# coding=UTF-8\nimport plotly.offline as plot\nimport plotly.graph_objs as go\nimport numpy as np\n\ndef CreData():\n #构建Sin函数 数组\n x = np.arange(-np.pi,np.pi,0.01)\n y = np.sin(x)\n dataD=dict(x=x,y=y)\n return dataD\n\ndef main():\n data=CreData()\n #数据图\n trace=go.Scatter(\n x=data['x'],\n y=data['y'],\n mode='lines',\n line=go.Line(\n width=5,\n color='#AA2233'\n )\n )\n #布局\n layout=go.Layout(\n showlegend=True,\n plot_bgcolor='#A9A9A9'\n )\n #窗口对象\n fig = go.Figure(data=[trace],\n layout=layout\n )\n plot.plot(fig,filename=\"start.html\")\n\nif __name__ == '__main__':\n main()","sub_path":"Start.py","file_name":"Start.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"440222572","text":"import turtle\n\n# List can be made empty\n# It can also be 'stored' in a variable\npens = []\n\n# Let say I want to create 10 pens\n# We can use a loop\nfor _ in range(10):\n pens.append(turtle.Turtle())\n\n# Look at what the list looks like now\n# You can see in the console that it says some weird string of the object\n# But we can see that we have 10 pens :D\nprint(pens)\n\n# Example 1\n# Lets make all the pens move in one direction and see what happens\nfor pen in pens: # This for loop is saying for each pen in pens\n pen.forward(100)\n\nturtle.exitonclick() # For VSCODE so it does not immdeiately close","sub_path":"content/topic_5_list/multiple_pens_example_1.py","file_name":"multiple_pens_example_1.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"280522986","text":"# https://codeforces.com/contest/691/problem/A\n\nn=int(input())\na=list(map(int,input().split()))\n\nif n == 1:\n if a[0] == 0:\n print ('NO')\n else:\n print ('YES')\nelse:\n count = 0\n for i in range(n):\n if a[i] == 0:\n count += 1\n if count == 1:\n print ('YES')\n else:\n print ('NO')\n","sub_path":"691A-Fashion in Berland.py","file_name":"691A-Fashion in Berland.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"27262804","text":"import numpy\nimport sys\nimport time\nfrom matplotlib import pyplot as plt\n\n\nif (len(sys.argv) != 2):\n print(\"usage: python %s N\" % sys.argv[0])\n quit()\n\nn = int(sys.argv[1])\na = numpy.zeros((n, n)) # Matrix A\nb = numpy.zeros((n, n)) # Matrix B\nc = numpy.zeros((n, n)) # Matrix C\n\n# Initialize the matrices to some values.\nfor i in range(n):\n for j in range(n):\n a[i, j] = i * n + j\n b[i, j] = j * n + i\n c[i, j] = 0\n\n\ndef product(m1, m2):\n size = len(m1)\n result = [[0] * size for i in range(size)]\n for i in range(size):\n for j in range(size):\n for k in range(size):\n result[i][j] += m1[i][k] * m2[k][j]\n return result\n\n\nbegin = time.time()\n\n######################################################\n# Write code to calculate C = A * B #\n# (without using numpy librarlies e.g., numpy.dot()) #\n######################################################\nc = product(a, b)\n\nend = time.time()\n# print(\"time: %.6f sec\" % (end - begin))\n\n# to draw the graph\n# (N, time)\nx = [3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40, 50, 67, 100, 200, 400, 500, 700, 1000]\ny = [0.000033, 0.000061, 0.000099, 0.000215, 0.000325, 0.000374, 0.000539, 0.001361, 0.005425, 0.017417,\n 0.040709, 0.082543, 0.197933, 0.631375, 5.699142, 45.014128, 88.360380, 258.377851, 773.905043]\n\nplt.title('N-time Graph')\nplt.xlabel('N-Axis')\nplt.ylabel('time-Axis')\n\nplt.plot(x, y)\nplt.show()\n\n\n\"\"\"\n# Print C for debugging. Comment out the print before measuring the execution time.\ntotal = 0\nfor i in range(n):\n for j in range(n):\n # print c[i, j]\n total += c[i][j]\n# Print out the sum of all values in C.\n# This should be 450 for N=3, 3680 for N=4, and 18250 for N=5.\nprint(\"sum: %.6f\" % total)\n\"\"\"\n","sub_path":"matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"6269557","text":"import numpy as np\nimport pandas as pd\nfrom IPython.display import display, clear_output\nimport ipywidgets as widgets\n\ndef load_and_describe_data(csv_file_path_name):\n \"\"\"Function to load data into a Pandas DataFrame and\n describe the data\"\"\"\n df = pd.read_csv(csv_file_path_name)\n df.columns = [col_name.lower() for col_name in df.columns]\n print('*'*10 + ' info ' + '*'*10)\n print(df.info())\n print('*'*10 + ' columns ' + '*'*10)\n print(df.columns)\n print('*'*10 + ' number of duplicated rows ' + '*'*10)\n print(df.duplicated().sum())\n print('*'*10 + ' number of nulls in columns ' + '*'*10 )\n print(df.isnull().sum())\n print('*'*10 + ' describe ' + '*'*10)\n print(df.describe())\n return df\n\n\ndef check_for_constant_columns(df):\n columns_with_one_unique_val = []\n for col in df.columns:\n if df[col].nunique() == 1:\n #print(df[col].nunique())\n #print(col)\n columns_with_one_unique_val.append(col)\n return columns_with_one_unique_val\n\n\ndef view_column_value_counts(df):\n def print_value_counts(df, column_name):\n print(df[column_name].value_counts())\n \n #https://stackoverflow.com/questions/53791590/jupyter-ipywidgets-how-to-refresh-plot-using-dropdown-menu\n w_val_counts = widgets.Dropdown(\n options=df.columns,\n value=df.columns[0],\n description='Columns',\n )\n display(w_val_counts)\n\n def on_change(change):\n if change['name'] == 'value' and (change['new'] != change['old']):\n clear_output()\n display(w_val_counts)\n print_value_counts(df, change['new'])\n\n w_val_counts.observe(on_change)","sub_path":"utils/common_utils.py","file_name":"common_utils.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"259637764","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Add columns tests suite.\"\"\"\n\nimport pandas as pd\nfrom io import StringIO\nimport pytest\nfrom .context import xsniper\n\n@pytest.fixture\ndef set_csv_file():\n \"\"\"Instantiate CSVFile object as a setup for every test.\"\"\"\n\n in_memory_csv = StringIO(\"\"\"\\\nheader1,header2,header3\ncell1,cell2,cell3\ncell4,cell5,cell6\"\"\")\n return in_memory_csv\n\ndef test_add_single_column():\n \"\"\"Should add the desired column.\"\"\"\n\n test_csv = StringIO(\"\"\"\\\nheader1,header3\ncell1,cell3\ncell4,cell6\"\"\")\n\n test_single_column = StringIO(\"\"\"\\\nheader2\ncell2\ncell5\"\"\")\n got = xsniper.CSVFile(test_csv)\n df = pd.read_csv(test_single_column, index_col=None, header=0)\n want = df['header2']\n\n got.add_columns(want)\n assert got.get_single_column('header2').equals(want)\n\ndef test_add_several_columns():\n \"\"\"Should add several columns.\"\"\"\n\n test_csv = StringIO(\"\"\"\\\nheader1,header3\ncell1,cell3\ncell4,cell6\"\"\")\n\n test_single_column = StringIO(\"\"\"\\\nheader2,header4\ncell2,cell7\ncell5,cell8\"\"\")\n df = pd.read_csv(test_single_column, index_col=None, header=0)\n h2 = df['header2']\n h4 = df['header4']\n want = pd.concat([h2, h4], ignore_index=False, axis=1)\n\n got = xsniper.CSVFile(test_csv)\n got.add_columns(want)\n assert got.get_columns('header2', 'header4').equals(want)\n","sub_path":"tests/test_add_columns.py","file_name":"test_add_columns.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"527930820","text":"import sys\n\nfrom jin.makeconfig import Maker\nfrom jin.parseconfig import Parser\nfrom jin.composer import Composer\nfrom jin.runner import Runner\n\nVERSION = \"Jin Simplified Warp (1.0.31) (Beta)\"\nUSAGE = \"\"\"usage: jin [--version] [--help] []\n\nCommands:\n\nstarting a new project\n init Creates a new Jin project in the current directory\n\ntesting changes\n [push|empty] dry Simulates a commit (dry run) and shows a log of the changes\n\nmanaging a project\n push Updates remote (remote_location) with host (host_location) changes\n + match-host Deletes any files and folders in the remote that are not in the host\n + watch Watches the project directory for changes and pushes to remote automatically\n\n empty Removes all files and folders from remote (remote_location)\n\n status Shows the most recent action in the project\"\"\"\n\ndef checkArgs():\n arguments = sys.argv[1:]\n\n if len(arguments) <= 0:\n print(USAGE)\n sys.exit(0)\n\n for arg in arguments:\n if arg in (\"-h\", \"--help\"):\n print(USAGE)\n elif arg in (\"-v\", \"--version\"):\n print(VERSION)\n elif arg in (\"init\"):\n maker = Maker(arguments)\n maker.createNewConfig()\n return\n elif arg in (\"push\", \"empty\"):\n parser = Parser()\n composer = Composer(arguments, parser.config)\n runner = Runner(composer).run()\n return\n elif arg in (\"status\"):\n try:\n with open(\".jin/jin.log\", \"r\") as log:\n print(f\"Jin Log (most recent action):\\n\\n{log.read()}\")\n except:\n print(\"No actions have been commited yet.\")\n else:\n print(USAGE)\n sys.exit(1)\n","sub_path":"jin/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"135462605","text":"import json\nfrom .batalha.iniciativa import batalha\nfrom .jogadores.criacao_personagem import personagem\nfrom .jogadores.checar import checar\nfrom .jogadores.print import printar\nfrom .funcoes.restore import restaurar\nfrom .funcoes.adicionar import add\nfrom .funcoes.lvlup import lvlup\nfrom .funcoes.addinv import additem\nfrom .funcoes.useit import useitem\nfrom .funcoes.creators.classcreator import ccreator\nfrom .funcoes.creators.racecreator import rcreator\nfrom .funcoes.creators.magiccreator import mcreator\nfrom .funcoes.creators.shieldcreator import screator\nfrom .funcoes.creators.weaponcreator import wcreator\nfrom .funcoes.creators.armorcreator import acreator\nfrom .funcoes.editores import editor\nfrom .funcoes.equip import equipar\nfrom .funcoes.addarma import addeq\ndef menu(command):\n with open('Beta/data/nomes.json') as f:\n nomes=json.load(f)\n #cs=command split\n cs=command.split(sep=',')\n if command == \"cp\" or cs[0]=='cp':\n if len(cs)==1: personagem(0,0)\n elif len(cs)==2: personagem(cs[1],1)\n else: print('Escreva :cp,tipo de personagem')\n elif command == \"co\":\n n=batalha()\n #for no in n:\n # restaurar(no,'f',1)\n elif command == \"c\" or cs[0]=='c':\n if len(cs)==1: checar(0,0)\n elif len(cs)==2: checar(cs[1],1)\n else: print('Escreva :c,oq vc deseja checar')\n elif command == \"e\" or cs[0]=='e':\n if len(cs)==1: editor(0,0)\n elif len(cs)==2: editor(cs[1],1)\n else: print('Escreva :e,tipo de coisa q vc deseja editar')\n elif command == \"p\" or cs[0]=='p':\n if len(cs)==1: printar(0,0)\n elif len(cs)==2: printar(cs[1],1)\n else: print('Escreva :p,oq vc deseja printar')\n elif command == \"r\" or cs[0]=='r':\n if len(cs)==1: restaurar(0,0,0)\n elif len(cs)==2:\n if cs[1]=='a':\n for n in nomes:\n restaurar(n,'f',1)\n else: print('Digitou errado')\n elif len(cs)==3: restaurar(cs[1],cs[2],1)\n else: print('Escreva :r,quem vc deseja restaurar,oq vc deseja restaurar')\n elif command == \"ait\" or cs[0]=='add':\n if len(cs)==1: additem(0,0,0)\n elif len(cs)==2: print('Digitou errado')\n elif len(cs)==3: additem(cs[1],cs[2],1)\n else: print('Escreva :a,qual item vc deseja adicionar,quem vai ter o item adicionado')\n elif command==\"lu\" or cs[0]=='tl':\n if len(cs)==3: lvlup(cs[1],cs[2],2)\n else: lvlup(0,0,0)\n elif command == \"a\":\n add()\n elif command == \"cm\":\n mcreator()\n elif command == \"cc\":\n ccreator()\n elif command == \"cr\":\n rcreator()\n elif command == \"cs\":\n screator()\n elif command == \"cw\":\n wcreator()\n elif command == \"ca\":\n acreator()\n elif command == \"eq\":\n equipar()\n elif command == \"adde\":\n addeq()\n elif command == \"uit\" or cs[0]=='use':\n if len(cs)==1: useitem(0,0,0)\n elif len(cs)==2: print('Digitou errado')\n elif len(cs)==3: useitem(cs[1],cs[2],1)\n else: print('Escreva :use,qual item vc deseja usar,quem vai usar o item')\n else: print('Não existe essa opção')\n","sub_path":"Beta/scripts/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"321242412","text":"import sys\nn = int(sys.stdin.readline())\nfactor = 2\nprint(\"Prime Factors of\",n,\" are as follows \")\nwhile(factor*factor<=n):\n while (n%factor==0):\n print(factor,end=\" \")\n n/=factor\n factor+=1\nif n>1:\n print(int(n))\nelse:\n print()\n\n\n","sub_path":"factors.py","file_name":"factors.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"340789307","text":"# -*- coding: utf-8 -*-\n\n\n# You can diff these tests with those for ``ExitStack`` in the hope of showing\n# how good our asynchronous counterpart is.\n\n\nimport pytest\n\nfrom aiotk import AsyncExitStack\nfrom unittest import mock\n\n\nclass AutoClose(object):\n \"\"\"Example synchronous context manager.\"\"\"\n\n def __init__(self, h, v=None, suppress=False):\n self._h = h\n self._v = v\n self._suppress = suppress\n\n async def __aenter__(self):\n return self._v\n\n async def __aexit__(self, etype, val, tb):\n self._h.close(etype, val, tb)\n return self._suppress\n\n\n@pytest.mark.asyncio\nasync def test_exit_stack_noop():\n async with AsyncExitStack():\n pass\n\n\n@pytest.mark.asyncio\nasync def test_exit_stack():\n\n handle = mock.MagicMock()\n async with AsyncExitStack() as stack:\n await stack.enter_context(AutoClose(handle))\n handle.close.assert_called_once_with(None, None, None)\n\n\n@pytest.mark.asyncio\nasync def test_exit_stack_exception_propagate():\n\n h1 = mock.MagicMock()\n h2 = mock.MagicMock()\n v1 = mock.MagicMock()\n v2 = mock.MagicMock()\n error = ValueError('FUUU')\n\n with pytest.raises(ValueError) as exc:\n async with AsyncExitStack() as stack:\n v = await stack.enter_context(AutoClose(h1, v=v1))\n assert v is v1\n v = await stack.enter_context(AutoClose(h2, v=v2))\n assert v is v2\n raise error\n assert exc.value is error\n\n h2.close.assert_called_once_with(ValueError, error, mock.ANY)\n h1.close.assert_called_once_with(ValueError, error, mock.ANY)\n\n\n@pytest.mark.asyncio\nasync def test_exit_stack_exception_suppress():\n\n h1 = mock.MagicMock()\n h2 = mock.MagicMock()\n error = ValueError('FUUU')\n\n async with AsyncExitStack() as stack:\n await stack.enter_context(AutoClose(h1))\n await stack.enter_context(AutoClose(h2, suppress=True))\n raise error\n\n h2.close.assert_called_once_with(ValueError, error, mock.ANY)\n h1.close.assert_called_once_with(None, None, None)\n\n\n@pytest.mark.asyncio\nasync def test_exit_stack_exception_substitution():\n\n h1 = mock.MagicMock()\n h2 = mock.MagicMock()\n e1 = ValueError('FUUU')\n e2 = KeyError('oops')\n\n class FailingAutoClose(object):\n \"\"\".\"\"\"\n\n def __init__(self, h):\n self._h = h\n\n def __enter__(self):\n return self\n\n def __exit__(self, etype, val, tb):\n assert etype is ValueError\n assert val is e1\n assert tb\n self._h.close(etype, val, tb)\n raise e2\n\n with pytest.raises(KeyError) as exc:\n async with AsyncExitStack() as stack:\n await stack.enter_context(AutoClose(h1))\n await stack.enter_context(FailingAutoClose(h2))\n raise e1\n assert exc.value is e2\n\n h2.close.assert_called_once_with(ValueError, e1, mock.ANY)\n h1.close.assert_called_once_with(KeyError, e2, mock.ANY)\n","sub_path":"tests/test_stack_async.py","file_name":"test_stack_async.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"623609028","text":"from stagesepx.cutter import VideoCutter\nfrom stagesepx.classifier import SVMClassifier\nfrom stagesepx.reporter import Reporter\n\n# cut\nvideo_path = '../test.mp4'\ncutter = VideoCutter()\nres = cutter.cut(video_path)\nstable, unstable = res.get_range()\ndata_home = res.pick_and_save(stable, 5)\n\n# classify\ncl = SVMClassifier()\ncl.load(data_home)\ncl.train()\n\n# 注意,如果在classify方法指定了范围\n# 那么分析时只会分析处于范围内的帧!\n# 例如,这里只传入了stable的范围,那么非stable范围内的帧都会被忽略掉,标记为 -1\nclassify_result = cl.classify(\n video_path,\n stable,\n # 步长,可以自行设置用于平衡效率与颗粒度\n # 默认为1,即每帧都检测\n step=1\n)\n\n# draw\nr = Reporter()\nr.add_dir_link(data_home)\n\n# 你可以将 thumbnail 直接嵌入到report中\nfor index, each in enumerate(unstable):\n r.add_thumbnail(f'unstable stage {index}', res.thumbnail(each))\n\nr.draw(classify_result)\n","sub_path":"example/cut_and_classify.py","file_name":"cut_and_classify.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"611614011","text":"from django.utils.importlib import import_module\nfrom django.utils.module_loading import module_has_submodule\nfrom collections import namedtuple\n\n\nclass Fixture(namedtuple(\"Fixture\", \"app name export func\")):\n __slots__ = ()\n\n def __hash__(self):\n return hash((self.app, self.name))\n\n def __eq__(self, other):\n return self[:2] == other[:2]\n\n @property\n def models(self):\n return self.func.models\n\n @property\n def requires(self):\n return self.func.requires\n\n @property\n def label(self):\n return \"%s.%s\" % (self.app, self.name)\n\n def __call__(self, *args, **kwargs):\n return self.func(*args, **kwargs)\n\nclass CircularDependencyError(Exception):\n \"\"\"\n Raised when there is a circular dependency in fixture requirements.\n \"\"\"\n pass\n\n\ndef unique_seq(l):\n seen = set()\n for e in l:\n if e not in seen:\n seen.add(e)\n yield e\n\n\ndef calculate_requirements(available_fixtures, fixture, seen=None):\n if seen is None:\n seen = set([fixture])\n models = list(reversed(fixture.models))\n requirements = []\n for requirement in fixture.requires:\n app_label, fixture_name = requirement.rsplit(\".\", 1)\n fixture_func = available_fixtures[(app_label, fixture_name)]\n if fixture_func in seen:\n raise CircularDependencyError\n r, m = calculate_requirements(\n available_fixtures,\n fixture_func,\n seen | set([fixture_func])\n )\n requirements.extend([req for req in r if req not in requirements])\n models.extend(reversed(m))\n requirements.append(fixture)\n return requirements, list(unique_seq(reversed(models)))\n\n\ndef get_available_fixtures(apps):\n fixtures = {}\n for app in apps:\n try:\n fixture_gen = import_module(\".fixture_gen\", app)\n except ImportError:\n if module_has_submodule(import_module(app), \"fixture_gen\"):\n raise\n continue\n for obj in fixture_gen.__dict__.values():\n if not getattr(obj, \"__fixture_gen__\", None):\n continue\n fixture = Fixture(app.rsplit(\".\", 1)[-1], obj.__name__, obj.export, obj)\n fixtures[fixture] = fixture\n return fixtures\n\ndef fixture_generator(*models, **kwargs):\n \"\"\"\n Define function as a fixture generator\n \"\"\"\n requires = kwargs.pop(\"requires\", [])\n export = kwargs.pop(\"export\", False)\n if kwargs:\n raise TypeError(\"fixture_generator got an unexpected keyword argument: %r\", iter(kwargs).next())\n def decorator(func):\n func.models = models\n func.requires = requires\n func.export = func.__name__ if export is True else export\n func.__fixture_gen__ = True\n return func\n return decorator\n","sub_path":"fixture_generator/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"239506079","text":"import pyutk\n\nclass Evaluator:\n class Eval:\n def __init__(self) -> None:\n self.N = 0\n self.d = 0\n\n self.gl2 = 0.0\n self.gauss = 0.0\n self.heavi = 0.0\n \n def __add__(self, other):\n rslt = Evaluator.Eval()\n rslt.N = self.N\n rslt.d = self.d\n rslt.gl2 = self.gl2 + other.gl2\n rslt.gauss = self.gauss + other.gauss\n rslt.heavi = self.heavi + other.heavi\n\n return rslt\n \n def __truediv__(self, N):\n rslt = Evaluator.Eval()\n rslt.N = self.N\n rslt.d = self.d\n rslt.gl2 = self.gl2 / N\n rslt.gauss = self.gauss / N\n rslt.heavi = self.heavi / N\n\n return rslt\n \n def __init__(self, config):\n self.gauss = pyutk.IntegrationTest()\n self.heavi = pyutk.IntegrationTest()\n\n samples = pyutk.Sobol(config.dim, 32).sample(config.nInt)\n \n self.gauss.BuildGaussianDatabase (\"tmp_gauss.txt\", config.dim, config.mInt, config.seed, {}, samples)\n self.heavi.BuildHeavisideDatabase(\"tmp_heavi.txt\", config.dim, config.mInt, config.seed, {}, samples)\n\n def EvalPointset(self, pts):\n gl2 = pyutk.GL2Discrepancy().compute(pts)\n gauss = self.gauss.compute(pts).mean\n heavi = self.heavi.compute(pts).mean\n\n eval = Evaluator.Eval()\n eval.N = pts.shape[0]\n eval.d = pts.shape[1]\n eval.gl2 = gl2\n eval.gauss = gauss\n eval.heavi = heavi\n\n return eval\n","sub_path":"examples/Evaluation/Python/Eval.py","file_name":"Eval.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"572296482","text":"# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# \t\t#\n# \tA k-means clustering solver. There is am example problem included, which is commented out.\t#\n# \t\t#\n#\tAuthor: Tyler Hooks \t#\n# \t\t#\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\nimport math\nimport random\nimport re\n\ndef get_coordinates(coords):\n\tcoordinates = []\n\tfor coordinate in re.findall('\\(\\d\\.*\\d*,\\s*\\d\\.*\\d*\\)', coords):\n\t\tcoordinate = re.findall('\\d\\.*\\d*', coordinate)\n\t\tcoordinate_list = []\n\t\tfor c in coordinate:\n\t\t\tcoordinate_list.append(float(c))\n\t\tcoordinates.append(tuple(coordinate_list))\n\treturn coordinates\n\t\t\n\ndef get_centroids(coordinates, count):\n\treturn random.sample(coordinates, count)\n\ndef get_distance(centroids, coordinate):\n\tdistances = []\n\tfor c in centroids:\n\t\tdistance = math.sqrt(math.pow(c[0] - coordinate[0], 2) + math.pow(c[1] - coordinate[1], 2))\n\t\tdistances.append(distance)\n\treturn distances\n\t\ndef get_membership(centroids, coordinates):\n\tclusters = {}\n\tfor c in range(len(centroids)):\n\t\tclusters[c] = []\n\tfor coordinate in coordinates:\n\t\tdistances = get_distance(centroids, coordinate)\n\t\tcluster = distances.index(min(distances))\n\t\tclusters[cluster].append(coordinate)\n\treturn clusters\n\ndef get_new_centroids(clusters):\n\tnew_centroids = []\n\tfor cluster in clusters:\n\t\tx = []\n\t\ty = []\n\t\tfor c in clusters[cluster]:\n\t\t\tx.append(c[0])\n\t\t\ty.append(c[1])\n\t\tnew_centroid = (sum(x)/len(clusters[cluster]), sum(y)/len(clusters[cluster]))\n\t\tnew_centroids.append(new_centroid)\n\treturn new_centroids\n\ntry:\n\tcoords = input(\"Please enter the coordinates: \")\n\tcount = int(input(\"How many clusters do you wish to have? \"))\n\t\n\t# Example problem. \n\t# coords = \"(1, 3), (3, 3) (4, 3), (5, 3) (1, 2), (4, 2), (1, 1) (2, 1)\"\n\t# count = 2\n\tcoordinates = get_coordinates(coords)\n\tcentroids = get_centroids(coordinates, count)\n\tclusters = get_membership(centroids, coordinates)\n\tnew_centroids = get_new_centroids(clusters)\n\tfor iteration in range(5):\n\t\tif centroids == new_centroids or iteration == 4:\n\t\t\tfor c in clusters:\n\t\t\t\tprint(f\"c{c}: {clusters[c]}\")\n\t\t\t# print(f\"Stopped on iteration {iteration + 1}.\")\n\t\t\tbreak\n\t\tclusters = get_membership(centroids, coordinates)\n\t\tcentroids = new_centroids\n\t\tnew_centroids = get_new_centroids(clusters)\n\t\t\t\n\t\nexcept Exception as e:\n\tprint(f\"Error: {e}\")\n","sub_path":"clusters.py","file_name":"clusters.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"579780079","text":"import os\n\nfrom setuptools import setup\n\n\ndef get_version():\n # workaround to resolve import issue\n basedir = os.path.dirname(os.path.realpath(__file__))\n version_line = None\n with open(os.path.join(basedir, \"hdtop/__init__.py\")) as fp:\n for line in fp:\n if line.startswith(\"__version__\"):\n version_line = line\n break\n\n l_quote = version_line.find('\"')\n r_quote = version_line.rfind('\"')\n return version_line[l_quote + 1 : r_quote]\n\n\nsetup(\n name=\"hdtop\",\n version=get_version(),\n description=\"Top-liked monitoring console for hadoop.\",\n author=\"Tzing\",\n author_email=\"tzingshih@gmail.com\",\n url=\"https://github.com/tzing/hdtop\",\n packages=[\"hdtop\"],\n python_requires=\">=3.6\",\n entry_points={\n \"console_scripts\": [\"hdtop = hdtop.__main__:main\"],\n },\n install_requires=[\"httpx\", \"urwid\"],\n classifiers=[\n \"Development Status :: 2 - Pre-Alpha\",\n \"Environment :: Console\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3 :: Only\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"302977612","text":"\"\"\"\nYou are given a list of character sequences as a comma separated string. Write a function which returns another string containing all the character sequences except the first and the last ones, separated by spaces. If the input string is empty, or the removal of the first and last items would cause the string to be empty, return a null value.\n\"\"\"\n\ndef array(string):\n string = string.replace(\" \", \"\").split(\",\")\n if len(string) <= 2:\n return None\n else:\n string.pop(0)\n string.pop(-1)\n if len(string) == 0:\n return None\n else:\n string = \" \".join(map(str, string))\n return string\n","sub_path":"Remove First and Last Character Part Two - CW.py","file_name":"Remove First and Last Character Part Two - CW.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"326692337","text":"import cv2\nimport os\nimg = cv2.imread(os.path.join(\"Resources\", \"img.jpg\"))\nimgGrey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n#cv2.imshow('NormalImage', img)\nimgBlur = cv2.GaussianBlur(imgGrey,(7,7),0)\ncv2.imshow(\"Blur\", imgBlur)\ncv2.waitKey(0)\n \n\n","sub_path":"PythonProjects/OpenCV/example1.py","file_name":"example1.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"165729793","text":"'''\nCreated on 2011-12-13\n\n@author: fimbul\n'''\nclass Excptn():\n def foo(self):\n try:\n a = 5 / 0\n except Exception as e:\n print(\"Exception Encountered!\")\n print(\"The exception is %s :\", e)\n import traceback\n traceback.print_exc()\n finally:\n print(\"This is finally...\")\nif __name__ == '__main__':\n e = Excptn()\n e.foo()","sub_path":"src/module_study/exception_.py","file_name":"exception_.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"516269683","text":"# -*- coding: utf8 -*-\nfrom tokenizer import Tokenizer\nfrom tags import CloseTag\nfrom error import UnbalancedError\n\n\nclass HtmlHelper(Exception):\n\n @classmethod\n def truncate(cls, value, target_len=200, ellipsis='...'):\n \"\"\"Returns a copy of str truncated to target_len characters,\n preserving HTML markup (which does not count towards the length).\n Any tags that would be left open by truncation will be closed at\n the end of the returned string. Optionally append ellipsis if\n the string was truncated.\"\"\"\n # open tags are pushed on here, then popped when\n # the matching close tag is found\n stack = []\n # string to be returned\n retval = []\n # number of characters (not counting markup) placed in retval so far\n length = 0\n tokens = Tokenizer(value)\n tok = tokens.next_token()\n while tok != tokens.token_end:\n if not length < target_len:\n retval.append(ellipsis)\n break\n if tok.__class__.__name__ == 'OpenTag':\n stack.append(tok)\n retval.append(tok.as_string())\n elif tok.__class__.__name__ == 'CloseTag':\n if stack[-1].tag == tok.tag:\n stack.pop()\n retval.append(tok.as_string())\n else:\n raise UnbalancedError(tok.as_string())\n elif tok.__class__.__name__ == 'SelfClosingTag':\n retval.append(tok.as_string())\n else:\n retval.append(tok)\n length += 1\n tok = tokens.next_token()\n while len(stack) > 0:\n tok = CloseTag(stack.pop().tag)\n retval.append(tok.as_string())\n return ''.join(retval)\n","sub_path":"app/helpers/html/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"427087393","text":"import threading\nimport mysql.connector\nfrom mysql.connector import Error\nimport time\nclass ConcurrencyTesting(threading.Thread):\n def __init__(self,thread_name,query):\n threading.Thread.__init__(self)\n self.thread_name=thread_name\n self.query=query\n\n def run(self):\n print(\"Thread Name: \"+self.thread_name +\" Start\")\n runQuery(self.thread_name,self.query)\n print(\"Thread Name: \"+self.thread_name +\" Stop\")\n\n\n\ndef runQuery(thread_name,query):\n try:\n conn = mysql.connector.connect(host='localhost',\n database='concurrency',\n user='root',\n password='12345')\n if conn.is_connected():\n cursor = conn.cursor()\n cursor.execute(query)\n result= cursor.fetchall()\n for i in result:\n Table1_Col1=i[0]\n Table1_Col2=i[1]\n Table1_Col3=i[2]\n Table1_Col4=i[3]\n Table2_Col1=i[4]\n Table2_Col2=i[5]\n Table2_Col3=i[6]\n Table2_Col4=i[7]\n print(\"Now Executing \"+thread_name+\" Result are: \"+str(Table1_Col1)+\" \"+str(Table1_Col2)+\" \"+str(Table1_Col3)+\" \"+str(Table1_Col4)+\n \" \"+str(Table2_Col1)+\" \"+str(Table2_Col2)+\" \"+str(Table2_Col3)+\" \"+str(Table2_Col4))\n\n\n\n except Error as e:\n print(e)\n\n finally:\n conn.close()\n\n\ncross_product=\"select *from Table1,Table2 where Table1.t1_one=Table2.t2_two\"\njoin=\"select *from Table1 inner join Table2 on Table1.t1_one=Table2.t2_two\"\nt1= ConcurrencyTesting(\"Thread 1(Cross)\",cross_product)\nt2=ConcurrencyTesting(\"Thread 2(Join)\",join)\nt1.start()\nt2.start()\n\n\n\n\n\n","sub_path":"Python/MySQL with Python/concurrency_testing.py","file_name":"concurrency_testing.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"477616822","text":"# -*- encoding: utf-8 -*-\n# @TIME : 2019/10/11 17:19\n# @Author : 成昭炜\n\nimport numpy as np\nfrom PIL import Image\nimport cv2\nimport torchvision\n\n\nclass TransformParameters:\n\n def __init__(self, fill_mode=\"nearest\",\n interpolation=\"linear\",\n cval=0,\n relative_translation=True):\n # 边界填充模式\n self.fill_mode = fill_mode\n self.cval = cval\n # 插值模式\n self.interpolation = interpolation\n self.relative_translation = relative_translation\n\n def cvBorderMode(self):\n if self.fill_mode == \"constant\":\n return cv2.BORDER_CONSTANT\n elif self.fill_mode == \"nearest\":\n return cv2.BORDER_REPLICATE\n elif self.fill_mode == \"reflect\":\n return cv2.BORDER_REFLECT_101\n elif self.fill_mode == \"wrap\":\n return cv2.BORDER_WRAP\n\n def cvInterpolation(self):\n # 最邻近插值\n if self.interpolation == \"nearest\":\n return cv2.INTER_NEAREST\n # 线性插值\n elif self.interpolation == \"linear\":\n return cv2.INTER_LINEAR\n # 三次样条插值\n elif self.interpolation == \"cubic\":\n return cv2.INTER_CUBIC\n # 区域插值\n elif self.interpolation == \"area\":\n return cv2.INTER_AREA\n # lanczos插值\n elif self.interpolation == \"lanczos4\":\n return cv2.INTER_LANCZOS4\n\n\ndef random_vector(mini, maxi, default):\n mini = np.array(mini)\n maxi = np.array(maxi)\n assert mini.shape == maxi.shape\n return default(mini, maxi)\n\n\ndef rotation(angle):\n \"\"\"\n 构造一个二维旋转矩阵\n :param angle:\n :return:\n \"\"\"\n return np.array([\n [np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]\n ])\n\n\ndef translation(trans):\n \"\"\"\n 构造一个二维平移矩阵\n :param trans:\n :return:\n \"\"\"\n return np.array([\n [1, 0, trans[0]],\n [0, 1, trans[1]],\n [0, 0, 1]\n ])\n\n\ndef shear(angle):\n \"\"\"\n 构造一个二维shear矩阵\n :param angle:\n :return:\n \"\"\"\n return np.array([\n [1, -np.sin(angle), 0],\n [0, np.cos(angle), 0],\n [0, 0, 1]\n ])\n\n\ndef scaling(factor):\n \"\"\"\n 构造二维缩放矩阵\n :param factor:\n :return:\n \"\"\"\n return np.array([\n [factor[0], 0, 0],\n [0, factor[1], 0],\n [0, 0, 1]\n ])\n\n\ndef flip(flip_x, flip_y):\n _x = int(flip_x)\n _y = int(flip_y)\n return np.array([\n [(-1)**_x, 0, _x],\n [0, (-1)**_y, _y],\n [0, 0, 1]\n ])\n\n\nclass RandomAffine(object):\n\n default = np.random\n\n def __init__(self, min_rotation=-0.1,\n max_rotation=0.1,\n min_translation=(-0.1, -0.1),\n max_translation=(0.1, 0.1),\n min_shear=-0.1,\n max_shear=0.1,\n min_scaling=(0.9, 0.9),\n max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n flip_y_chance=0.5):\n\n self.rotation = (min_rotation, max_rotation)\n self.min_translation = min_translation\n self.max_translation = max_translation\n self.shear = (min_shear, max_shear)\n self.min_scaling = min_scaling\n self.max_scaling = max_scaling\n self.flip_x_chance = flip_x_chance\n self.flip_y_chance = flip_y_chance\n\n self.transform_parameters = TransformParameters()\n\n @staticmethod\n def get_params(rot, min_translation, max_translation, sh,\n min_scaling, max_scaling, flip_x_chance, flip_y_chance):\n rot = RandomAffine.default.uniform(rot[0], rot[1])\n trans = random_vector(min_translation, max_translation, RandomAffine.default.uniform)\n sh = RandomAffine.default.uniform(sh[0], sh[1])\n scale = random_vector(min_scaling, max_scaling, RandomAffine.default.uniform)\n fl = (RandomAffine.default.uniform(0, 1) < flip_x_chance,\n RandomAffine.default.uniform(0, 1) < flip_y_chance)\n\n return rot, trans, sh, scale, fl\n\n @staticmethod\n def transform_coordinate(transform, aabb):\n xmin, ymin, xmax, ymax = aabb\n\n points = transform.dot([\n [xmin, xmax, xmin, xmax],\n [ymin, ymax, ymax, ymin],\n [1, 1, 1, 1],\n ])\n\n min_corner = points[0:2, :].min(axis=1)\n max_corner = points[0:2, :].max(axis=1)\n\n return [min_corner[0], min_corner[1], max_corner[0], max_corner[1]]\n\n def __call__(self, img, target):\n rot, trans, sh, scale, fl = self.get_params(self.rotation,\n self.min_translation,\n self.max_translation,\n self.shear,\n self.min_scaling,\n self.max_scaling,\n self.flip_x_chance,\n self.flip_y_chance)\n\n random_rotation = rotation(rot)\n random_translation = translation(trans)\n random_shear = shear(sh)\n random_scaling = scaling(scale)\n random_flip = flip(fl[0], fl[1])\n\n transform = np.linalg.multi_dot([\n random_rotation,\n random_translation,\n random_shear,\n random_scaling,\n random_flip\n ])\n\n height = img.shape[0]\n width = img.shape[1]\n\n transform[0, 2] *= width\n transform[1, 2] *= height\n\n params = self.transform_parameters\n\n img = cv2.warpAffine(img, transform[:2, :], dsize=(width, height), flags=params.cvInterpolation(),\n borderMode=params.cvBorderMode(),\n borderValue=params.cval)\n\n for idx in range(target.shape[0]):\n target[idx, 1:5] = self.transform_coordinate(transform, target[idx, 1:5])\n\n return img, target\n\n\nclass Resize(object):\n\n def __init__(self, min_side=400, max_side=400):\n self.min_side = min_side\n self.max_side = max_side\n\n def __call__(self, img, target=None):\n width = img.shape[1]\n height = img.shape[0]\n\n small_side = min(width, height)\n\n scale = self.min_side/small_side\n\n large_side = max(width, height)\n\n if large_side*scale > self.max_side:\n scale = self.max_side/large_side\n\n img = cv2.resize(img, None, fx=scale, fy=scale)\n\n if target is not None:\n target[:, 1:5] *= scale\n\n return img, target, scale\n\n\ndef img2matrix(img):\n return np.asarray(img.convert(\"RGB\"))\n\n\ndef matrix2img(matrix):\n return Image.fromarray(matrix, \"RGB\")\n\n\ndef normalizer_image(x):\n x = x.astype(np.float32)\n x /= 255.0\n return x\n\n\ndef stupid_process():\n toTensor = torchvision.transforms.ToTensor()\n\n def _process(image):\n image = matrix2img(image)\n image = toTensor(image)\n return image\n return _process\n\n\ndef train_augment(visual_effect, handle_target, transform, preprocess, resize):\n\n def _train_augment(image, target):\n\n image = visual_effect(image)\n image = img2matrix(image)\n if handle_target:\n target = handle_target(target)\n image, target = transform(image, target)\n image, target, _ = resize(image, target)\n\n image = preprocess(image)\n return image, target\n\n return _train_augment\n\n\ndef val_augment(preprocess, handle_target, resize):\n\n def _val_augment(image, target):\n orgin = image\n image = img2matrix(image)\n if handle_target:\n target = handle_target(target)\n image, _, scale = resize(image, None)\n image = preprocess(image)\n return image, (scale, target, orgin)\n return _val_augment\n\n\ndef simple_augment():\n\n train_augment = torchvision.transforms.Compose([\n torchvision.transforms.RandomHorizontalFlip(),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ])\n\n val_augment = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ])\n\n return train_augment, val_augment\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"utils/augment.py","file_name":"augment.py","file_ext":"py","file_size_in_byte":8570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"21379019","text":"import numpy as np\n\n\nclass MemoryScheduler(object):\n USE_INPLACE = True\n def __init__(self, layer):\n self.layer = layer\n self.buffers = {\n \"input_output\" : 0,\n \"residual\": 0,\n \"im2col\": 0, \n \"kernel\": 0}\n self.peakmem = 0 #currently we only support MoblieNet-like models which only have 1 by-pass for a block\n self.flash = 0\n self.bias = 0\n self.scale = 0\n\n self.layermem = []\n \n # public functions \n def allocateMemory(self):\n # varaiables to maintain tensors of previous layer \n previous_output_add = 'front' # input is place at &buffer0[0]\n last_is_residual = False \n\n \n ## For detailed memory\n for i in range(len(self.layer)):\n layermem = { }\n self.layermem.append(layermem)\n\n # go through all layers and figure out the placement of each tensors\n for i in range(len(self.layer)):\n ''' find the life cycle of the output '''\n output_idx = self.layer[i]['output_idx']\n # scan if the output is used for residual\n output_residual = False\n ## For detailed memory\n residual_index = 0\n for j in range(i+2, len(self.layer)):\n if self.layer[j]['input_idx'] == output_idx:\n output_residual = True\n residual_index = j\n break\n if self.layer[j]['op'] == 'ADD' and self.layer[j]['input2_idx'] == output_idx:\n output_residual = True\n residual_index = j\n break\n \n ## For detailed memory\n output_size = self.__flatsize(self.layer[i], \"output\")\n for j in range(i+1, residual_index):\n self.layermem[j]['residual'] = output_size\n\n ''' assign the output address '''\n if output_residual:\n self.layer[i]['output_buf_add'] = 'residual' # place it in the residual buf\n else:\n if previous_output_add == 'end':\n if self.layer[i]['op'] == 'DEPTHWISE_CONV_2D' and self.USE_INPLACE:\n self.layer[i]['output_buf_add'] = 'end' # place it inplace\n else:\n self.layer[i]['output_buf_add'] = 'front' # place it inplace\n else:\n if self.layer[i]['op'] == 'DEPTHWISE_CONV_2D' and self.USE_INPLACE:\n self.layer[i]['output_buf_add'] = 'front' # place it inplace\n else:\n self.layer[i]['output_buf_add'] = 'end' # place it inplace\n\n ''' assign the input address and enlarge buffer '''\n input_size = self.__flatsize(self.layer[i], \"input\")\n if self.layer[i]['op'] == 'DEPTHWISE_CONV_2D' and self.USE_INPLACE:\n # we just need two channels for the inplace implementation\n output_size = self.layer[i]['input_h'] * self.layer[i]['input_w'] * 2\n else:\n output_size = self.__flatsize(self.layer[i], \"output\")\n\n # 1. enlarge the input_output buffer\n if output_residual:\n self.__enlargeBuffer(\"input_output\", input_size)\n else:\n self.__enlargeBuffer(\"input_output\", input_size + output_size)\n # 2. assign address and enlarge residual buffer if needed (e.g., ADD)\n if self.layer[i]['op'] == 'ADD':# two inputs\n self.layer[i]['input_buf_add'] = previous_output_add\n self.layer[i]['input2_buf_add'] = 'residual'\n\n input2_size = self.__flatsize(self.layer[i], \"input\") # same as input\n self.__enlargeBuffer(\"residual\", input_size)\n\n ## For detailed memory\n self.layermem[i]['activation'] = input_size + output_size\n\n else:# one input\n self.layer[i]['input_buf_add'] = previous_output_add\n\n ## For detailed memory\n self.layermem[i]['activation'] = input_size + output_size\n \n ''' update previous output address '''\n previous_output_add = self.layer[i]['output_buf_add']\n\n # now we have the buffer size for input/output tensors\n # go through all layers again to (1) assign specific address for each tensor and (2) enlarge intermediate buffers\n for i in range(len(self.layer)):\n # (1) assign specific address for each tensor\n self.layer[i]['input_buf_add_offset'] = self.__getBufferAddress(self.layer[i]['input_buf_add'], self.__flatsize(self.layer[i], \"input\"))\n if self.layer[i]['op'] == 'DEPTHWISE_CONV_2D' and self.USE_INPLACE:\n if self.layer[i]['output_buf_add'] == 'front':\n self.layer[i]['output_buf_add_offset'] = self.__getBufferAddress('end', self.__flatsize(self.layer[i], \"output\"))\n else:\n self.layer[i]['output_buf_add_offset'] = self.__getBufferAddress('front', self.__flatsize(self.layer[i], \"output\"))\n else:\n self.layer[i]['output_buf_add_offset'] = self.__getBufferAddress(self.layer[i]['output_buf_add'], self.__flatsize(self.layer[i], \"output\"))\n if self.layer[i]['op'] == 'ADD':# two inputs\n self.layer[i]['input2_buf_add_offset'] = self.__getBufferAddress(self.layer[i]['input2_buf_add'], self.__flatsize(self.layer[i], \"input2\"))\n else:\n # (2) enlarge intermediate buffers\n if self.layer[i]['op'] == 'DEPTHWISE_CONV_2D':\n if self.USE_INPLACE:\n im2col_size = 2 * (self.layer[i]['input_h'] + 2 * self.layer[i]['padding']) * (self.layer[i]['input_w'] + 2 * self.layer[i]['padding'])\n kernel_size = 0\n else:\n im2col_size = 2 * self.layer[i]['kernel_h'] * self.layer[i]['kernel_w'] * self.layer[i]['input_c'] # 16 bit\n kernel_size = 2 * (self.layer[i]['kernel_h'] * self.layer[i]['kernel_w'] + 1) * self.layer[i]['input_c'] # 16 bit\n weight_size = self.layer[i]['kernel_h'] * self.layer[i]['kernel_w'] * self.layer[i]['input_c'] \n\n self.__enlargeBuffer('im2col', im2col_size)\n self.__enlargeBuffer('kernel', kernel_size)\n\n ## For detailed memory\n self.layermem[i]['runtime'] = kernel_size + im2col_size\n self.layermem[i]['weight'] = weight_size \n self.layermem[i]['bias'] = 4 * self.layer[i]['output_c'] # bias\n self.layermem[i]['scale'] = 8 * self.layer[i]['output_c'] # shift and multiplier\n\n self.__increaseFlash(weight_size)\n self.__increaseFlash(3 * 4 * self.layer[i]['output_c'])# 32-bit bias, shift, multiplier\n elif self.layer[i]['op'] == 'CONV_2D': \n im2col_size = 2 * 2 * self.layer[i]['kernel_h'] * self.layer[i]['kernel_w'] * self.layer[i]['input_c'] # 16 bit\n if self.layer[i]['kernel_h'] == 1:\n kernel_size = 0\n else:\n kernel_size = 2 * self.layer[i]['kernel_h'] * self.layer[i]['kernel_w'] * self.layer[i]['input_c'] * self.layer[i]['output_c'] # 16 bit\n weight_size = self.layer[i]['kernel_h'] * self.layer[i]['kernel_w'] * self.layer[i]['input_c'] * self.layer[i]['output_c']\n \n self.__enlargeBuffer('im2col', im2col_size)\n self.__enlargeBuffer('kernel', kernel_size)\n ## For detailed memory\n self.layermem[i]['runtime'] = kernel_size + im2col_size\n self.layermem[i]['weight'] = weight_size \n self.layermem[i]['bias'] = 4 * self.layer[i]['output_c'] # bias\n self.layermem[i]['scale'] = 8 * self.layer[i]['output_c'] # shift and multiplier\n\n self.__increaseFlash(weight_size)\n self.__increaseFlash(3* 4 * self.layer[i]['output_c'])# 32-bit bias, shift, multiplier\n elif self.layer[i]['op'] == 'FULLY_CONNECTED': \n weight_size = self.layer[i]['input_c'] * self.layer[i]['output_c'] \n \n ## For detailed memory\n self.layermem[i]['weight'] = weight_size\n self.layermem[i]['bias'] = 4 * self.layer[i]['output_c'] # bias\n\n self.__increaseFlash(weight_size)\n self.__increaseFlash(4 * self.layer[i]['output_c'])# 32-bit bias\n \n self.peakmem = self.buffers['im2col'] + self.buffers['kernel'] + self.buffers['input_output'] + self.buffers['residual'] \n\n def dumpLayerMem(self):\n # header\n print(\"-------------------------------------------- Schedule Details --------------------------------------------\")\n print(\"----------------------| SRAM || Flash |\")\n print(\"----------------------| activation | runtime | residual | sum || weight | bias | scale |\")\n\n string = \"-------Schedule-------|\"\n maxActive = self.buffers['input_output'] \n maxRuntime = self.buffers['im2col'] + self.buffers['kernel']\n maxResidual = self.buffers['residual'] \n maxWeight = self.__sumKey(self.layermem,'weight')\n maxBias = self.__sumKey(self.layermem,'bias')\n maxScale = self.__sumKey(self.layermem,'scale')\n string += str(maxActive).ljust(14) + \"|\"\n string += str(maxRuntime).ljust(11) + \"|\"\n string += str(maxResidual).ljust(12) + \"|\"\n string += str(maxActive+maxRuntime+maxResidual).ljust(8) + \"||\"\n string += str(maxWeight).ljust(12) + \"|\"\n string += str(maxBias).ljust(10) + \"|\"\n string += str(maxScale).ljust(10) + \"|\"\n print(string)\n for i in range(len(self.layermem)):\n string = \"\"\n string += str(i) + \":\" + self.layer[i]['op']\n string = string.ljust(22) + \"|\"\n SRAM = 0\n if \"activation\" in self.layermem[i]:\n substr = str(self.layermem[i]['activation']) + \" (\" + \"{:.0%}\".format(self.layermem[i]['activation']/maxActive) + \")\"\n string += substr.ljust(14) + \"|\"\n SRAM += self.layermem[i]['activation']\n if \"runtime\" in self.layermem[i]:\n substr = str(self.layermem[i]['runtime']) + \" (\" + \"{:.0%}\".format(self.layermem[i]['runtime']/maxRuntime) + \")\"\n string += substr.ljust(11) + \"|\"\n SRAM += self.layermem[i]['runtime']\n else:\n #SRAM end\n string = string.ljust(49) + \"|\"\n if \"residual\" in self.layermem[i]:\n substr = str(self.layermem[i]['residual']) + \"(\" + \"{:.0%}\".format(self.layermem[i]['residual']/maxResidual) + \")\"\n string += substr.ljust(12) + \"|\"\n SRAM += self.layermem[i]['residual']\n else:\n #SRAM end\n string = string.ljust(62) + \"|\"\n\n string += str(SRAM)\n string = string.ljust(71) + \"||\"\n\n if \"weight\" in self.layermem[i]:\n substr = str(self.layermem[i]['weight']) + \" (\" + \"{:.0%}\".format(self.layermem[i]['weight']/maxWeight) + \")\"\n string += str(substr).ljust(12) + \"|\"\n if \"bias\" in self.layermem[i]: \n substr = str(self.layermem[i]['bias']) + \" (\" + \"{:.0%}\".format(self.layermem[i]['bias']/maxBias) + \")\"\n string += str(substr).ljust(10) + \"|\"\n if \"scale\" in self.layermem[i]:\n substr = str(self.layermem[i]['scale']) + \" (\" + \"{:.0%}\".format(self.layermem[i]['scale']/maxScale) + \")\"\n string += str(substr).ljust(10) + \"|\"\n print(string)\n pass\n\n def __sumKey(self, layers, key):\n result = 0\n for l in range(len(layers)): \n if key in layers[l]: \n result += layers[l][key]\n\n return result\n\n def getBuffers(self):\n return self.buffers\n \n # Maximum binary size: This should be updated if any change in the inference side\n # TODO: Combine with code generation to get more accurate result\n BINARAY_PRESERVE = 110 * 1024\n def profileResult(self):\n return self.peakmem , self.flash + self.BINARAY_PRESERVE\n\n # private functions\n def __increaseFlash(self, size):\n self.flash += size\n\n def __getBufferAddress(self, location, tensorSize):\n if location == 'front':\n return 0\n elif location == 'end':\n return self.buffers['input_output'] - tensorSize\n elif location == 'residual':\n return 0\n else:\n assert 1 == 0, \"unexpected tensor location\"\n\n def __flatsize(self, params, target_str):\n ret_size = 0\n\n if target_str == \"input\":\n if params['input_dim'] == 3:\n ret_size = params['input_h'] * params['input_w'] * params['input_c']\n elif params['input_dim'] == 2:\n ret_size = params['input_h'] * params['input_c']\n elif target_str == \"input2\":\n if params['input2_dim'] == 3:\n ret_size = params['input2_h'] * params['input2_w'] * params['input_c']\n elif params['input2_dim'] == 2:\n ret_size = params['input2_h'] * params['input_c']\n elif target_str == \"output\":\n if params['output_dim'] == 3:\n ret_size = params['output_h'] * params['output_w'] * params['output_c']\n elif params['output_dim'] == 2:\n ret_size = params['output_h'] * params['output_c']\n\n return ret_size\n\n def __enlargeBuffer(self, buf_str, size): \n self.buffers[buf_str] = max(self.buffers[buf_str], size)\n ","sub_path":"mcunet/tinyengine/MemoryScheduler.py","file_name":"MemoryScheduler.py","file_ext":"py","file_size_in_byte":14133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"125531550","text":"import pytest\n\nfrom evm.vm.gas_meter import (\n GasMeter,\n)\nfrom evm.exceptions import (\n ValidationError,\n OutOfGas,\n)\n\n\n@pytest.fixture\ndef gas_meter():\n return GasMeter(10)\n\n\n@pytest.mark.parametrize(\n \"value,is_valid\",\n (\n (-1, False),\n (0, True),\n (10, True),\n (2**256, False),\n ('a', False),\n )\n)\ndef test_start_gas_on_instantiation(value, is_valid):\n if is_valid:\n meter = GasMeter(value)\n assert meter.start_gas == value\n assert meter.gas_remaining == value\n assert meter.gas_refunded == 0\n else:\n with pytest.raises(ValidationError):\n GasMeter(value)\n\n\n@pytest.mark.parametrize(\n \"consume,reason,is_valid\",\n (\n (-1, \"Reason\", False),\n (0, \"Reason\", True),\n (1, \"Reason\", True),\n )\n)\ndef test_consume_gas_rejects_negative_values(gas_meter, consume, reason, is_valid):\n if is_valid:\n gas_meter.consume_gas(consume, reason)\n assert gas_meter.gas_remaining == gas_meter.start_gas - consume\n else:\n with pytest.raises(ValidationError):\n gas_meter.consume_gas(consume, reason)\n\n\n@pytest.mark.parametrize(\n \"return_amt,is_valid\",\n (\n (-1, False),\n (0, True),\n (1, True),\n )\n)\ndef test_return_gas_rejects_negative_values(gas_meter, return_amt, is_valid):\n if is_valid:\n gas_meter.return_gas(return_amt)\n assert gas_meter.gas_remaining == (gas_meter.start_gas + return_amt)\n else:\n with pytest.raises(ValidationError):\n gas_meter.return_gas(return_amt)\n\n\n@pytest.mark.parametrize(\n \"refund,is_valid\",\n (\n (-1, False),\n (0, True),\n (1, True),\n )\n)\ndef test_refund_gas_rejects_negative_values(gas_meter, refund, is_valid):\n if is_valid:\n gas_meter.refund_gas(refund)\n assert gas_meter.gas_refunded == refund\n else:\n with pytest.raises(ValidationError):\n gas_meter.refund_gas(refund)\n\n\n@pytest.mark.parametrize(\n \"consume,reason,is_valid\",\n (\n (10, \"Reason\", True),\n (11, \"Reason\", False),\n )\n)\ndef test_consume_gas_spends_or_raises_exception(gas_meter, consume, reason, is_valid):\n assert gas_meter.gas_remaining == 10\n if is_valid:\n gas_meter.consume_gas(consume, reason)\n assert gas_meter.gas_remaining == 0\n else:\n with pytest.raises(OutOfGas):\n gas_meter.consume_gas(consume, reason)\n\n\ndef test_consumption_return_refund_work_correctly(gas_meter):\n assert gas_meter.gas_remaining == 10\n assert gas_meter.gas_refunded == 0\n gas_meter.consume_gas(5, \"Reason\")\n assert gas_meter.gas_remaining == 5\n gas_meter.return_gas(5)\n assert gas_meter.gas_remaining == 10\n gas_meter.refund_gas(5)\n assert gas_meter.gas_refunded == 5\n","sub_path":"tests/core/gas_meter/test_gas_meter.py","file_name":"test_gas_meter.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"112627671","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/torchrs/module/module.py\n# Compiled at: 2014-09-19 06:03:51\n__author__ = 'Binh Vu '\nimport os, json\nfrom ..util import getlogger\nfrom ..git.git import Git\nfrom .postprocess import PostProcess\n\nclass Module(object):\n\n def __init__(self, location):\n self.location = os.path.abspath(location)\n if not os.path.exists(self.location):\n raise Exception(('Module directory does not exists ({0})').format(self.location))\n self.logger = getlogger('torch.module')\n\n @staticmethod\n def isTorchModule(conf):\n return conf['host'].find('.git') != -1\n\n def getDependencies(self):\n modules = []\n with open(os.path.join(self.location, 'composer.json')) as (f):\n composer = json.loads(f.read())\n if 'dependencies' not in composer:\n composer['dependencies'] = {}\n for package, conf in composer['dependencies'].iteritems():\n conf['name'] = package\n if Module.isTorchModule(conf):\n modules.append(conf)\n\n return modules\n\n def verify(self):\n if not Git.isGitRepo(self.location):\n return (False, 'Not a git repo')\n if os.listdir(self.location) == []:\n return (False, 'Empty directory')\n if not os.path.exists(os.path.join(self.location, 'composer.json')):\n return (False, 'Missing composer.json ' + os.path.join(self.location, 'composer.json'))\n return (True, '')\n\n def updateModuleStatus(self, gitconfig):\n return (\n True, '')\n\n def updateModuleConfig(self, modules):\n if os.path.exists(os.path.join(self.location, '__init__.py')):\n mode = 'r+b'\n else:\n mode = 'w+b'\n with open(os.path.join(self.location, '__init__.py'), mode) as (f):\n content = f.read()\n content = content.split('\\n')\n if content[0].find('# Auto generated by torchrs') != 0:\n self.logger.debug('Not detect torch config, generating ...')\n update = [\n '# Auto generated by torchrs',\n 'import sys',\n ('sys.path.append(\"{0}\")').format(modules[0]),\n '# Auto generated by torchrs']\n else:\n update = []\n content = ('\\n').join(update + content)\n self.logger.debug('Update __init__.py')\n f.seek(0)\n f.write(content)\n self.logger.debug('Update .gitignore')\n if not os.path.exists(os.path.join(self.location, '.gitignore')):\n mode = 'w+b'\n else:\n mode = 'r+b'\n with open(os.path.join(self.location, '.gitignore'), mode) as (f):\n content = f.read()\n content = content.split('\\n')\n ignoreFiles = {'/modules': 1, \n '*.pyc': 1}\n for line in content:\n for key in ignoreFiles:\n if line.strip() == key:\n ignoreFiles[key] = 0\n\n for key, value in ignoreFiles.iteritems():\n if value == 1:\n f.write(key + '\\n')\n\n def postProcess(self, config, extra):\n processor = PostProcess(self)\n if 'postexecute' not in config:\n with open(os.path.join(self.location, 'composer.json')) as (f):\n composer = json.loads(f.read())\n if 'postexecute' in composer:\n config['postexecute'] = composer['postexecute']\n processor.execute(config, extra)","sub_path":"pycfiles/torchrs-0.2.3-py2.7/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"222119831","text":"#!/usr/bin/env python\n\nimport rospy\nfrom sensor_msgs.msg import NavSatFix\nimport time\nimport json\n\n\nWPTime = 0.5 # How long between two WPs?\n\n\n# Internal variables\nwaypoints = []\ngates = []\ncurrent_location = {\"lon\": 0, \"lat\": 0, \"alt\": 0}\nstopped = False\nlastWP = time.time()\n\ntime.sleep(5) # Wait for mavros to launch\n\n# Callback of global position of rover - add waypoint to list if WPTime has passed\ndef cbPosition(msg):\n global lastWP,stopped,current_location,WPTime,waypoints\n if stopped: return\n current_location = {\"lat\": msg.latitude, \"lon\": msg.longitude, \"alt\": msg.altitude}\n if time.time() - lastWP > WPTime:\n waypoints.append(current_location)\n lastWP = time.time()\n\n\nrospy.init_node('WPRecorder') # ROS setup\n\n# Register callback\nsub_position = rospy.Subscriber(\"/mavros/global_position/global\", NavSatFix, cbPosition, queue_size=1)\n\n\nprint (\"Enter [g] to add a gate at the current location\")\nprint (\"Enter [q] to stop the script and save the waypoints\")\nprint()\ninputCmd = raw_input()\n\n# If input is 'g', we append a entry to gates array\nwhile inputCmd != 'q':\n if inputCmd == 'g': gates.append(current_location)\n inputCmd = raw_input()\n\nstopped = True\n\n# Construct json and output it\njsonRaw = {\"Waypoints\": waypoints, \"Gates\": gates}\njsonString = json.dumps(jsonRaw, indent=4)\n\nprint(jsonString)\n\n\n# Store json to file\ntext_file = open(\"/home/julien/RoboDroneRace/KnightsOfNyquistRDR/Path.json\", \"w\")\ntext_file.write(jsonString)\ntext_file.close()\nrospy.signal_shutdown('Quit')\n","sub_path":"catkin_ws/src/10-waypointcontrol/wp_controller/src/wp_recorder_node.py","file_name":"wp_recorder_node.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"385478129","text":"\"\"\"Apipu\"\"\"\ndef main():\n \"\"\"Main Function\"\"\"\n position = []\n slope = []\n while True:\n text = input()\n if text == \"Finish\":\n break\n position.append(text)\n for i in position:\n num_x = int(i.split(\",\")[0])\n num_y = int(i.split(\",\")[1])\n if num_x != 0:\n slope.append(num_y/num_x)\n else:\n slope.append(\"vertical\")\n slope = list(set(slope))\n print(len(slope))\nmain()\n","sub_path":"Python5/X-Burner.py","file_name":"X-Burner.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"545431450","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport piexif\nimport threading\nfrom PIL import Image\nfrom optparse import OptionParser\n\ntry:\n import Queue as queue\nexcept ImportError:\n import queue\n\nq = queue.Queue()\nlock = threading.Lock()\n\ndef addtime(time, exif=None):\n if not exif:\n exif = piexif.load(piexif.dump({}))\n exif['Exif'][piexif.ExifIFD.DateTimeOriginal] = time\n exif['Exif'][piexif.ExifIFD.DateTimeDigitized] = time\n return exif\n\ndef fuckexif(name, time):\n try:\n img = Image.open(name)\n info = img._getexif()\n except Exception as e:\n if time and '_getexif' in str(e) and os.path.splitext(name)[1].lower() in ('.jpg', '.jpeg'):\n info = addtime(time)\n img.save(name, format=\"JPEG\", exif=piexif.dump(info))\n img.close()\n return True\n lock.acquire()\n print(\"%s: %s\" % (name, e))\n lock.release()\n return False\n if info:\n exif = piexif.load(img.info['exif'])\n exif['GPS'] = {}\n etime = exif['Exif'].get(piexif.ExifIFD.DateTimeOriginal) or exif['Exif'].get(piexif.ExifIFD.DateTimeDigitized)\n if not etime and time:\n exif = addtime(time, exif)\n elif time:\n exif = addtime(time)\n else:\n img.close()\n return True\n exif = piexif.dump(exif)\n img.save(name, exif=exif)\n img.close()\n return True\n\ndef fuck(time):\n while True:\n try:\n path = q.get_nowait()\n except queue.Empty:\n break\n lock.acquire()\n print(\"Fucking %s...\" % path)\n lock.release()\n fuckexif(path, time)\n return\n\ndef main():\n parser = OptionParser(usage=\"Usage: %prog -p [-t <1999:00:00 00:00:00>]\")\n parser.add_option(\"-p\", \"--path\", dest=\"path\", type=\"string\", help=\"specify picture a path or a directory to remove Exif info\")\n parser.add_option(\"-t\", \"--time\", dest=\"time\", type=\"string\", help=\"specify time to the file without Exif info\") \n opts, args = parser.parse_args()\n path = opts.path\n time = opts.time\n if not path:\n parser.print_help()\n os.sys.exit(1)\n if os.path.isfile(path):\n print(\"Fucking %s...\" % path)\n fuckexif(path, time)\n return\n for path,dirs,files in os.walk(path):\n for file in files:\n q.put(os.path.join(path, file))\n size = q.qsize()\n for i in range(1 if size < 50 else 25):\n threading.Thread(target=fuck, args=(time,)).start()\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Exif/delexif.py","file_name":"delexif.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"278372408","text":"# view\n# coding = utf-8\nimport time, datetime\n\nimport xlrd # excel读工具\nfrom django import forms\nfrom django.utils.translation import gettext as _\nfrom django.core.exceptions import ValidationError\nfrom device.models import *\nfrom django.http import HttpResponseRedirect, JsonResponse\nimport sys\nfrom django.shortcuts import render, HttpResponse\nimport json\nfrom django.shortcuts import render_to_response\nfrom django.core import serializers\nfrom django.db.models import Q\nfrom django.core.mail import EmailMessage\nfrom django.template import loader\nfrom device.settings import EMAIL_HOST_USER\nfrom django.core.paginator import *\nimport operator\nimport django\n\nif django.VERSION >= (1, 7): # 自动判断版本\n django.setup()\n\nst = datetime.datetime.now()\nprint(\"st\", st)\nst = str(st)\nst = datetime.datetime.strptime(st, '%Y-%m-%d %H:%M:%S.%f')\n\n\"\"\"这是傻比需求,怎么写\"\"\"\n\ndef outer(key):\n def runtime(func):\n \"\"\"\n 时间装饰器用于记录程序运行时间\n :param func:\n :return:\n \"\"\"\n def int_time(*args, **kwargs):\n\n start_time = datetime.datetime.now() # 程序开始时间\n res = func(*args, **kwargs)\n over_time = datetime.datetime.now() # 程序结束时间\n total_time = (over_time-start_time).total_seconds()\n print(\"函数%s\"%key,':程序共计%s秒' % total_time)\n return res\n return int_time\n return runtime\n\n\n\ndef login_req(request):\n return render_to_response('login.html')\n\n\ndef login(request):\n username = \"admin\"\n password = \"admin\"\n\n # us = user.objects.filter(username=username)\n # pwd = user.objects.filter(password=password)\n\n if username == \"admin\":\n if password == \"admin\":\n return HttpResponseRedirect('/index/')\n print('密码不正确')\n else:\n print('用户名错误')\n\n\n# 注册函数\ndef logon(request):\n try:\n username = request.POST.get('username')\n password = request.POST.get('password')\n us = user.objects.values_list(\"username\")\n if us == username: print('注册的用户名重复')\n user.objects.create(username=username, password=password, date=st)\n return 1\n except:\n import traceback;\n traceback.print_exc()\n\n\n# 测试\ndef showdata(request): # request\n rack = Rack.objects.all()\n a = []\n sun = 0\n for r in rack:\n data = {\n \"id\": r.id,\n \"Place_id\": r.rack_id,\n }\n a.append(data)\n sun += 1\n totalpages = sun\n\n return render_to_response('logon2.html', {\"totalpages\": totalpages, \"currpage\": \"10\", \"totalrecords\": \"14\",\n \"rows\": a}) # 返回到logon页面渲染数据\n\n\ndef result(request):\n # 返回Unumber和pdu\n print(\"this is result\")\n seltype = request.GET.get('seltype') # 所属类型\n value = request.GET.get('value') # 类型\n content = request.GET.get('content') # 机器名\n if seltype == \"1\":\n \"下拉框时返回数据的来源\"\n if value == \"A\":\n res = Device.objects.filter(device_type='网络设备', device_subtype=content, state=0, rack_id=9999)\n elif value == \"B\":\n res = Device.objects.filter(device_type='存储设备', device_subtype=content, state=0, rack_id=9999)\n elif value == \"C\":\n res = Device.objects.filter(device_type='服务器', device_subtype=content, state=0, rack_id=9999)\n elif seltype == \"2\":\n \"模糊查询的数据来源\"\n res = Device.objects.filter(\n Q(ci__contains=content) | Q(sn__contains=content) | Q(device_type__contains=content) | Q(\n device_subtype__contains=content) | Q(asset__contains=content), state=0, rack_id=9999)\n elif seltype == \"3\":\n ic = []\n s = 0\n res = Device.objects.filter(state=0, rack_id=9999)\n else:\n print(\"类型为空\")\n pass\n\n ic = []\n page = 0\n\n for i in res:\n page += 1\n if i.rack_id_id is None: i.rack_id_id = \"\"\n if i.sn is None: i.sn = \"\"\n if i.device_name is None: i.device_name = \"\"\n if i.size is None: i.size = \"\"\n if i.ip_kvm is None: i.ip_kvm = \"\"\n if i.ci is None: i.ci = \"\"\n data = {\"ci\": i.ci, \"sid\": page, \"rack_id\": i.rack_id_id, \"sn\": i.sn, \"device_name\": str(i.device_name),\n \"size\": str(i.size), \"ip_kvm\": str(i.ip_kvm), \"device_name\": str(i.device_name),\n \"device_subtype\": str(i.device_subtype)}\n ic.append(data)\n\n # print(\"ic = \", ic)\n data = json.dumps(ic)\n return HttpResponse(data, content_type='application/json') # 返回jsno数据到页面\n\n\ndef save(request):\n ic = []\n data = {}\n type = request.GET.get(\"type\")\n if type == \"0\":\n # 此处保存机柜选择\n rack = request.GET.get(\"rack\")\n U = request.GET.get(\"U\")\n pdul = request.GET.get(\"pdul\")\n pdur = request.GET.get(\"pdur\")\n sn = request.GET.get(\"sn\")\n rackid = Rack.objects.filter(rack_id=rack)\n rack_id = []\n for i in rackid:\n rack_id = i.id\n if rack_id:\n Device.objects.filter(rack_id_id=9999, sn=sn).update(u_num=U, rack_id_id=rack_id, state=1)\n if U:\n u = [int(x) for x in U.split(',')]\n U_rack.objects.filter(u_num__in=u, rack_id_id=rack_id).update(state=1)\n if pdul and pdul!=\"undefined\":\n Pdu_left.objects.filter(position_left=pdul, pdu_id_id=rack_id).update(state=1)\n if pdur and pdul!=\"undefined\":\n Pdu_right.objects.filter(position_right=pdur, pdu_id_id=rack_id).update(state=1)\n # elif type == \"1\":\n # # x86服务器布线时候保存\n # d = request.GET.get(\"d\")\n # sn = request.GET.get(\"sn\")\n # switch = \"no\"\n # if d != '--*--':\n # a = request.GET.get(\"a\")\n # b = request.GET.get(\"b\")\n # c = request.GET.get(\"c\")\n # e = request.GET.get(\"e\")\n # port.objects.filter(id=e, port_num=d).update(state=1) # 交换机的端口状态\n # port.objects.filter(id=b).update(state=1) # 服务器的端口状态\n # portlist.objects.create(server_interface=a, switchboard=c, switchboard_port=d, sn=sn)\n # switch = \"yes\"\n #\n # d1 = request.GET.get(\"d1\")\n # if d1 != '--*--':\n # a1 = request.GET.get(\"a1\")\n # b1 = request.GET.get(\"b1\")\n # c1 = request.GET.get(\"c1\")\n # e1 = request.GET.get(\"e1\")\n # port.objects.filter(id=e1).update(state=1)\n # port.objects.filter(id=b1).update(state=1)\n # if switch == \"yes\":\n # portlist.objects.update(server_interface2=a1, switchboard2=c1, switchboard_port2=d1)\n # else:\n # portlist.objects.create(server_interface=a1, switchboard=c1, switchboard_port=d1, sn=sn)\n # switch = \"yes\"\n #\n # d2 = request.GET.get(\"d2\")\n # if d2 != '--*--':\n # a2 = request.GET.get(\"a2\")\n # b2 = request.GET.get(\"b2\")\n # c2 = request.GET.get(\"c2\")\n # port.objects.filter(id=b2).update(state=1)\n # e2 = request.GET.get(\"e2\")\n # port.objects.filter(id=e2, port_num=d2).update(state=1)\n # if switch == \"yes\":\n # portlist.objects.update(server_interface2=a2, switchboard2=c2, switchboard_port2=d2)\n # else:\n # portlist.objects.create(server_interface=a2, switchboard=c2, switchboard_port=d2, sn=sn)\n # switch = \"yes\"\n #\n # d3 = request.GET.get(\"d3\")\n # if d3 != '--*--':\n # a3 = request.GET.get(\"a3\")\n # b3 = request.GET.get(\"b3\")\n # c3 = request.GET.get(\"c3\")\n # e3 = request.GET.get(\"e3\")\n # port.objects.filter(id=e3, port_num=d3).update(state=1)\n # port.objects.filter(id=b3).update(state=1)\n # if switch == \"yes\":\n # portlist.objects.update(server_interface2=a3, switchboard2=c3, switchboard_port2=d3)\n # else:\n # portlist.objects.create(server_interface=a3, switchboard=c3, switchboard_port=d3, sn=sn)\n # switch = \"yes\"\n #\n # d4 = request.GET.get(\"d4\")\n # if d4 != '--*--':\n # a4 = request.GET.get(\"a4\")\n # b4 = request.GET.get(\"b4\")\n # c4 = request.GET.get(\"c4\")\n # e4 = request.GET.get(\"e4\")\n # port.objects.filter(id=e4, port_num=d4).update(state=1)\n # port.objects.filter(id=b4).update(state=1)\n # if switch == \"yes\":\n # portlist.objects.update(server_interface2=a4, switchboard2=c4, switchboard_port2=d4)\n # else:\n # portlist.objects.create(server_interface=a4, switchboard=c4, switchboard_port=d4, sn=sn)\n # switch = \"yes\"\n #\n # d5 = request.GET.get(\"d5\")\n # if d5 != '--*--':\n # a5 = request.GET.get(\"a5\")\n # b5 = request.GET.get(\"b5\")\n # c5 = request.GET.get(\"c5\")\n # e5 = request.GET.get(\"e5\")\n # port.objects.filter(id=e5, port_num=d5).update(state=1)\n # port.objects.filter(id=b5).update(state=1)\n # if switch == \"yes\":\n # portlist.objects.update(server_interface2=a5, switchboard2=c5, switchboard_port2=d5)\n # else:\n # portlist.objects.create(server_interface=a5, switchboard=c5, switchboard_port=d5, sn=sn)\n # switch = \"yes\"\n #\n # d6 = request.GET.get(\"d6\")\n # if d6 != '--*--':\n # a6 = request.GET.get(\"a6\")\n # b6 = request.GET.get(\"b6\")\n # c6 = request.GET.get(\"c6\")\n # e6 = request.GET.get(\"e6\")\n # port.objects.filter(id=e6, port_num=d6).update(state=1)\n # port.objects.filter(id=b6).update(state=1)\n # if switch == \"yes\":\n # portlist.objects.update(server_interface2=a6, switchboard2=c6, switchboard_port2=d6)\n # else:\n # portlist.objects.create(server_interface=a6, switchboard=c6, switchboard_port=d6, sn=sn)\n # switch = \"yes\"\n #\n # d7 = request.GET.get(\"d7\")\n # if d7 != '--*--':\n # a7 = request.GET.get(\"a7\")\n # b7 = request.GET.get(\"b7\")\n # c7 = request.GET.get(\"c7\")\n # e7 = request.GET.get(\"e7\")\n # port.objects.filter(id=e7, port_num=d7).update(state=1)\n # port.objects.filter(id=b7).update(state=1)\n # if switch == \"yes\":\n # portlist.objects.update(server_interface2=a7, switchboard2=c7, switchboard_port2=d7)\n # else:\n # portlist.objects.create(server_interface=a7, switchboard=c7, switchboard_port=d7, sn=sn)\n # switch = \"yes\"\n #\n # d8 = request.GET.get(\"d8\")\n # if d8 != '--*--':\n # a8 = request.GET.get(\"a8\")\n # b8 = request.GET.get(\"b8\")\n # c8 = request.GET.get(\"c8\")\n # e8 = request.GET.get(\"e8\")\n # port.objects.filter(id=e8, port_num=d8).update(state=1)\n # port.objects.filter(id=b8).update(state=1)\n # if switch == \"yes\":\n # portlist.objects.update(server_interface2=a8, switchboard2=c8, switchboard_port2=d8)\n # else:\n # portlist.objects.create(server_interface=a8, switchboard=c8, switchboard_port=d8, sn=sn)\n # switch = \"yes\"\n # d9 = request.GET.get(\"d9\")\n # if d9 != '--*--':\n # a9 = request.GET.get(\"a9\")\n # b9 = request.GET.get(\"b9\")\n # c9 = request.GET.get(\"c9\")\n # e9 = request.GET.get(\"e9\")\n # port.objects.filter(id=e9, port_num=d9).update(state=1)\n # port.objects.filter(id=b9).update(state=1)\n # if switch == \"yes\":\n # portlist.objects.update(server_interface2=a9, switchboard2=c9, switchboard_port2=d9)\n # else:\n # portlist.objects.create(server_interface=a9, switchboard=c9, switchboard_port=d9, sn=sn)\n # switch = \"yes\"\n #\n # d10 = request.GET.get(\"d10\")\n # if d10 != '--*--':\n # a10 = request.GET.get(\"a10\")\n # b10 = request.GET.get(\"b10\")\n # c10 = request.GET.get(\"c10\")\n # e10 = request.GET.get(\"e10\")\n # port.objects.filter(id=e10, port_num=d10).update(state=1)\n # port.objects.filter(id=b10).update(state=1)\n # if switch == \"yes\":\n # portlist.objects.update(server_interface2=a10, switchboard2=c10, switchboard_port2=d10)\n # else:\n # portlist.objects.create(server_interface=a10, switchboard=c10, switchboard_port=d10, sn=sn)\n # switch = \"yes\"\n #\n # d11 = request.GET.get(\"d11\")\n # if d11 != '--*--':\n # a11 = request.GET.get(\"a11\")\n # b11 = request.GET.get(\"b11\")\n # c11 = request.GET.get(\"c11\")\n # e11 = request.GET.get(\"e11\")\n # port.objects.filter(id=e11, port_num=d11).update(state=1)\n # port.objects.filter(id=b11).update(state=1)\n # if switch == \"yes\":\n # portlist.objects.update(server_interface2=a11, switchboard2=c11, switchboard_port2=d11)\n # else:\n # portlist.objects.create(server_interface=a11, switchboard=c11, switchboard_port=d11, sn=sn)\n # switch = \"yes\"\n #\n # d12 = request.GET.get(\"d12\")\n # if d12 != '--*--':\n # a12 = request.GET.get(\"a12\")\n # b12 = request.GET.get(\"b12\")\n # c12 = request.GET.get(\"c12\")\n # e12 = request.GET.get(\"e12\")\n # port.objects.filter(id=e12, port_num=d12).update(state=1)\n # port.objects.filter(id=b12).update(state=1)\n # if switch == \"yes\":\n # portlist.objects.update(server_interface2=a12, switchboard2=c12, switchboard_port2=d12)\n # else:\n # portlist.objects.create(server_interface=a7, switchboard=c7, switchboard_port=d7, sn=sn)\n # switch = \"yes\"\n #\n # d13 = request.GET.get(\"d13\")\n # if d13 != '--*--':\n # a13 = request.GET.get(\"a13\")\n # b13 = request.GET.get(\"b13\")\n # c13 = request.GET.get(\"c13\")\n # e13 = request.GET.get(\"e13\")\n # port.objects.filter(id=e13, port_num=d13).update(state=1)\n # port.objects.filter(id=b13).update(state=1)\n # if switch == \"yes\":\n # portlist.objects.update(server_interface2=a13, switchboard2=c13, switchboard_port2=d13)\n # else:\n # portlist.objects.create(server_interface=a13, switchboard=c13, switchboard_port=d13, sn=sn)\n # switch = \"yes\"\n #\n # d14 = request.GET.get(\"d14\")\n # if d14 != '--*--':\n # a14 = request.GET.get(\"a14\")\n # b14 = request.GET.get(\"b14\")\n # c14 = request.GET.get(\"c14\")\n # e14 = request.GET.get(\"e14\")\n # port.objects.filter(id=e14, port_num=d14).update(state=1)\n # port.objects.filter(id=b14).update(state=1)\n # if switch == \"yes\":\n # portlist.objects.update(server_interface_kvm=a14, switchboard_kvm=c14, switchboard_port_kvm=d14)\n # else:\n # portlist.objects.create(server_interface_kvm=a14, switchboard_kvm=c14, switchboard_port_kvm=d14, sn=sn)\n elif type == \"2\":\n # 显示工单数据前先保存到数据库\n device_subtype = request.GET.get(\"device_subtype\")\n sn = request.GET.get(\"sn\")\n rack_id = request.GET.get(\"rack_id\")\n unum = request.GET.get(\"unum\")\n managing_ip = request.GET.get(\"managing_ip\" or 0)\n host_ip = request.GET.get(\"host_ip\", 0)\n lpdu = request.GET.get(\"lpdu\", 0)\n rpdu = request.GET.get(\"rpdu\", 0)\n device_name = request.GET.get(\"device_name\", \"\")\n port.objects.filter(state=2).update(state=1)\n if device_subtype == \"SAN\":\n Sanlist.objects.filter(sn=sn).update(device_subtype=device_subtype, sn=sn, rack_id=rack_id, unum=unum,\n managing_ip=managing_ip, host_ip=host_ip, device_name=device_name,\n position_left=lpdu, position_right=rpdu) # 将u数,ip数据保存到工单表(一条完整数据的3分之一)\n lists = Sanlist.objects.filter(sn=sn)\n for l in lists:\n data = {\n \"device_subtype\": l.device_subtype, \"sn\": l.sn, \"rack_id\": l.rack_id, \"unum\": l.unum,\n \"managing_ip\": l.managing_ip, \"host_ip\": l.host_ip,\n \"device_name\": l.device_name, \"lpdu\": lpdu, \"rpdu\": rpdu, \"server_interface\": l.server_interface,\n \"switchbodard_port\": l.switchboard_port,\n \"switchboard_G19\": l.switchboard_G19, \"switchboard_H01\": l.switchboard_G19, \"manage\": l.manage\n\n }\n else:\n portlist.objects.filter(sn=sn).update(device_subtype=device_subtype, sn=sn, rack_id=rack_id, unum=unum,\n managing_ip=managing_ip, host_ip=host_ip, device_name=device_name,\n position_left=lpdu,\n position_right=rpdu) # 将u数,ip数据保存到工单表(一条完整数据的3分之一)\n kvmips = IP.objects.filter(ip_addr=managing_ip)\n kvmip_netmask = ''\n for ip in kvmips: kvmip_netmask = ip.netmask\n hostips = IP.objects.filter(ip_addr=host_ip)\n hostip_netmask = ''\n hostip_vlan = ''\n hostip_gateway = ''\n for ip in hostips:\n hostip_netmask = ip.netmask\n hostip_vlan = ip.vlan_id\n hostip_gateway = ip.gateway\n # 获取相交换机位置\n match_rack_id = Rack.objects.filter(rack_id=rack_id).values(\"match_rack_id\", \"id\")\n match_rack_ids = Rack.objects.filter(rack_id=match_rack_id[0]['match_rack_id']).values(\"match_rack_id\", \"id\")\n if device_subtype == \"SAN\":\n slist = Sanlist.objects.filter(sn=sn)\n for l in slist:\n data = {\n \"device_subtype\": l.device_subtype, \"sn\": l.sn, \"size\": l.size, \"rack_id\": l.rack_id,\n \"unum\": l.unum,\n \"managing_ip\": l.managing_ip, \"kvm_netmask\": kvmip_netmask,\n \"host_ip\": l.host_ip, \"host_netmask\": hostip_netmask, \"vlan_id\": hostip_vlan,\n \"device_name\": l.device_name, \"lpdu\": lpdu, \"rpdu\": rpdu,\n \"gateway\": hostip_gateway, \"server_interface\": l.server_interface,\n \"server_interface\":l.server_interface,\"switchboard_port\":l.switchboard_port,\n \"manage\":l.manage,\"switchboard_G19\":l.switchboard_G19,\"switchboard_G01\":l.switchboard_G01,\n \"switchboard_H01\":l.switchboard_H01,\"switchboard_H01_tor\":l.switchboard_H01_tor\n\n\n }\n ic.append(data)\n else:\n podata = portlist.objects.filter(sn=sn)\n for l in podata:\n # 交换机U位和相邻的交换机 心跳交换机 mpo的\n tor_u = Device.objects.filter(device_subtype=\"TOR\", rack_id_id=match_rack_id[0]['id']).values(\"u_num\")\n tors_u = Device.objects.filter(device_subtype=\"TOR\", rack_id_id=match_rack_ids[0]['id']).values(\"u_num\")\n tor_hb_u = Device.objects.filter(device_subtype=\"HB-DF\", rack_id_id=match_rack_id[0]['id']).values(\"u_num\")\n tors_hb_u = Device.objects.filter(device_subtype=\"HB-DF\", rack_id_id=match_rack_ids[0]['id']).values(\n \"u_num\")\n tor_mp_u = Device.objects.filter(device_subtype=\"MPO\", rack_id_id=match_rack_id[0]['id']).values(\"u_num\")\n tors_mp_u = Device.objects.filter(device_subtype=\"MPO\", rack_id_id=match_rack_ids[0]['id']).values(\"u_num\")\n\n switchs = Device.objects.filter(device_name=l.switchboard)\n for switch in switchs:\n uposition1 = switch.u_num\n\n switchs = Device.objects.filter(device_name=l.switchboard2)\n for switch in switchs:\n uposition2 = switch.u_num\n ###########\n data = {\n \"tor_u\": tor_u[0]['u_num'], \"tors_u\": tors_u[0]['u_num'], \"tor_hb_u\": tor_hb_u[0]['u_num'],\n \"tors_hb_u\": tors_hb_u[0]['u_num'], \"tor_mp_u\": tor_mp_u[0]['u_num'],\n \"tors_mp_u\": tors_mp_u[0]['u_num'],\n\n \"device_subtype\": l.device_subtype, \"sn\": l.sn, \"size\": l.size, \"rack_id\": l.rack_id, \"unum\": l.unum,\n \"managing_ip\": l.managing_ip, \"kvm_netmask\": kvmip_netmask,\n \"host_ip\": l.host_ip, \"host_netmask\": hostip_netmask, \"vlan_id\": hostip_vlan,\n \"device_name\": l.device_name, \"lpdu\": lpdu, \"rpdu\": rpdu,\n \"match_rack_id\": match_rack_id[0]['match_rack_id'],\n \"uposition1\": uposition1, \"uposition2\": uposition2,\n \"gateway\": hostip_gateway, \"server_interface\": l.server_interface,\n\n \"switchboard\": l.switchboard, \"switchboard_port\": l.switchboard_port,\n \"server_interfaces\": l.server_interfaces, \"switchboard_ports\": l.switchboard_ports,\n\n \"server_interface2_2\": l.server_interface2_2, \"switchboard_port2_2\": l.switchboard_port2_2,\n \"server_interface2\": l.server_interface2, \"switchboard2\": l.switchboard2,\n \"switchboard_port2\": l.switchboard_port2, \"server_interface_manage\": l.server_interface_manage,\n \"switchboard_manage\": l.switchboard_manage,\n \"switchboard_port_manage\": l.switchboard_port_manage, \"server_interface_kvm\": l.server_interface_kvm,\n \"switchboard_kvm\": l.switchboard_kvm,\n \"switchboard_port_kvm\": l.switchboard_port_kvm,\n\n \"switchboard_hb\": l.switchboard_hb, \"server_interface_hb\": l.server_interface_hb,\n \"server_interface_hb2\": l.server_interface_hb2,\n \"switchboard_port_hb2\": l.switchboard_port_hb2, \"server_interface_san\": l.server_interface_san,\n \"switchboard_port_san\": l.switchboard_port_san,\n \"switchboards_san\": l.switchboards_san, \"server_interfaces_san\": l.server_interfaces_san,\n \"switchboard_ports_san\": l.switchboard_ports_san, \"switchboards2\": l.switchboards2,\n \"server_interfaces2\": l.server_interfaces2, \"switchboard_ports2\": l.switchboard_ports2,\n \"server_interfacees2\": l.server_interfacees2, \"switchboard_portes2\": l.switchboard_portes2,\n \"switchboard_port_hb\": l.switchboard_port_hb,\n }\n ic.append(data)\n\n elif type == \"11\":\n devicename = request.GET.get(\"devicename\")\n if devicename == \"加密机\":\n # 加密机布线的时候保存\n jsdata = request.GET.get(\"jsdata\")\n jsdata = jsdata.split(\",\")\n jidata = request.GET.get(\"jiddata\")\n jidata = jidata.split(\",\")\n jsname = request.GET.get(\"jsname\")\n jsname = jsname.split(\",\")\n jstor = request.GET.get(\"jstor\")\n jstor = jstor.split(\",\")\n # 保存到portlist需要sn update port表的数据 device的state\n sn = request.GET.get(\"sn\")\n Device.objects.filter(sn=sn, state=0).update(state=1)\n port.objects.filter(id__in=jidata).update(state=2)\n portlist.objects.create(sn=sn, switchboard_port=jsdata[0], server_interface=jsname[0], switchboard=jstor[0])\n elif devicename == \"Ceph服务器\":\n jsdata = request.GET.get(\"jsdata\")\n jidata = request.GET.get(\"jiddata\")\n jidata = jidata.split(\",\")\n jsname = request.GET.get(\"jsname\")\n jstor = request.GET.get(\"jstor\")\n\n jsdata2 = request.GET.get(\"jsdata2\")\n jsname2 = request.GET.get(\"jsname2\")\n jstor2 = request.GET.get(\"jstor2\")\n\n jsdata_o = request.GET.get(\"jsdata_o\", \"\")\n jsname_o = request.GET.get(\"jsname_o\", \"\")\n jstor_o = request.GET.get(\"jstor_o\", \"\")\n\n jsdata_kvm = request.GET.get(\"jsdata_kvm\", \"\")\n jsname_kvm = request.GET.get(\"jsname_kvm\", \"\")\n jstor_kvm = request.GET.get(\"jstor_kvm\", \"\")\n # 保存到portlist需要sn update port表的数据 device的state\n sn = request.GET.get(\"sn\")\n Device.objects.filter(sn=sn, state=0).update(state=1)\n port.objects.filter(id__in=jidata).update(state=2)\n portlist.objects.create(sn=sn, switchboard_port=jsdata, server_interface=jsname, switchboard=jstor,\n server_interface2=jsname2, switchboard2=jstor2, switchboard_port2=jsdata2,\n switchboard_port_manage=jsdata_o, server_interface_manage=jsname_o,\n switchboard_manage=jstor_o,\n switchboard_port_kvm=jsdata_kvm, server_interface_kvm=jsname_kvm,\n switchboard_kvm=jstor_kvm,\n\n )\n elif devicename == \"小型机\":\n size = request.GET.get(\"size\")\n jidata = request.GET.get(\"jiddata\")\n jidata = jidata.split(\",\")\n jsdata = request.GET.get(\"jsdata\")\n jsdata = jsdata.split(\",\")\n jsname = request.GET.get(\"jsname\")\n jsname = jsname.split(\",\")\n jstor = request.GET.get(\"jstor\")\n jstor = jstor.split(\",\")\n\n jsdata2 = request.GET.get(\"jsdata2\")\n jsdata2 = jsdata2.split(\",\")\n jsname2 = request.GET.get(\"jsname2\")\n jsname2 = jsname2.split(\",\")\n jstor2 = request.GET.get(\"jstor2\")\n jstor2 = jstor2.split(\",\")\n\n jsdata_hb = request.GET.get(\"jsdata_hb\")\n jsdata_hb = jsdata_hb.split(\",\")\n jsname_hb = request.GET.get(\"jsname_hb\")\n jsname_hb = jsname_hb.split(\",\")\n jstor_hb = request.GET.get(\"jstor_hb\")\n jstor_hb = jstor_hb.split(\",\")\n\n jsdata_o = request.GET.get(\"jsdata_o\", \"\")\n jsdata_o = jsdata_o.split(\",\")\n jsname_o = request.GET.get(\"jsname_o\", \"\")\n jsname_o = jsname_o.split(\",\")\n jstor_o = request.GET.get(\"jstor_o\", \"\")\n jstor_o = jstor_o.split(\",\")\n\n jsdataes_nas = request.GET.get(\"jsdataes_nas\", \"\")\n jsdataes_nas = jsdataes_nas.split(\",\")\n jsnamees_nas = request.GET.get(\"jsnamees_nas\", \"\")\n jsnamees_nas = jsnamees_nas.split(\",\")\n jstores_nas = request.GET.get(\"jstores_nas\", \"\")\n jstores_nas = jstores_nas.split(\",\")\n\n jsdata_kvm = request.GET.get(\"jsdata_kvm\", \"\")\n jsname_kvm = request.GET.get(\"jsname_kvm\", \"\")\n jstor_kvm = request.GET.get(\"jstor_kvm\", \"\")\n # 保存到portlist需要sn update port表的数据 device的state\n sn = request.GET.get(\"sn\")\n Device.objects.filter(sn=sn, state=0).update(state=1)\n port.objects.filter(id__in=jidata).update(state=2)\n if size == \"4\":\n portlist.objects.create(sn=sn,\n switchboard_port=jsdata[0], server_interface=jsname[0], switchboard=jstor[0],\n server_interfaces=jsname[1], switchboard_ports=jsdata[1],\n server_interface2=jsname2[0], switchboard2=jstor2[0],\n switchboard_port2=jsdata2[0],\n server_interface2_2=jsname2[1], switchboard_port2_2=jsdata2[1],\n switchboard_hb=jstor_hb[0], server_interface_hb=jsname_hb[0],\n switchboard_port_hb=jsdata_hb[0],\n server_interface_hb2=jsname_hb[1], switchboard_port_hb2=jsdata_hb[1],\n switchboard_ports_san=jsdata_o[0], server_interfaces_san=jsname_o[0],\n switchboards_san=jstor_o[0],\n switchboard_port_san=jsdata_o[1], server_interface_san=jsname_o[1],\n switchboard_ports2=jsdataes_nas[0], server_interfaces2=jsnamees_nas[0],\n switchboards2=jstores_nas[0],\n switchboard_portes2=jsdataes_nas[1], server_interfacees2=jsnamees_nas[1],\n switchboard_port_kvm=jsdata_kvm, server_interface_kvm=jsname_kvm,\n switchboard_kvm=jstor_kvm,\n )\n elif size == \"2\":\n portlist.objects.create(sn=sn,\n switchboard_port=jsdata[0], server_interface=jsname[0], switchboard=jstor[0],\n server_interface2=jsname2[0], switchboard2=jstor2[0],\n switchboard_port2=jsdata2[0],\n switchboard_hb=jstor_hb[0], server_interface_hb=jsname_hb[0],\n switchboard_port_hb=jsdata_hb[0],\n server_interface_hb2=jsname_hb[1], switchboard_port_hb2=jsdata_hb[1],\n switchboard_ports_san=jsdata_o[0], server_interfaces_san=jsname_o[0],\n switchboards_san=jstor_o[0],\n switchboard_ports2=jsdataes_nas[0], server_interfaces2=jsnamees_nas[0],\n switchboards2=jstores_nas[0],\n switchboard_port_kvm=jsdata_kvm, server_interface_kvm=jsname_kvm,\n switchboard_kvm=jstor_kvm,\n )\n elif devicename == \"SAN\":\n jsdata = request.GET.get(\"jsdata\")\n jidata = request.GET.get(\"jiddata\")\n jidata = jidata.split(\",\")\n jsname = request.GET.get(\"jsname\")\n jstor = request.GET.get(\"jstor\")\n jstor = jstor.split(\",\")\n\n jsdata2 = request.GET.get(\"jsdata2\")\n jsname2 = request.GET.get(\"jsname2\")\n jstor2 = request.GET.get(\"jstor2\")\n jstor2 = jstor2.split(\",\")\n\n jsdata_kvm = request.GET.get(\"jsdata_kvm\", \"\") # 从H01中选一个作为管理口\n jsname_kvm = request.GET.get(\"jsname_kvm\", \"\")\n jstor_kvm = request.GET.get(\"jstor_kvm\", \"\")\n jstor_kvm = jstor_kvm.split(\",\")\n\n jsdata_G01 = request.GET.get(\"jsdata_G01\", \"\") # 从G01中选一个作为管理口\n jsname_G01 = request.GET.get(\"jsname_G01\", \"\")\n jstor_G01 = request.GET.get(\"jstor_G01\", \"\")\n jstor_G01 = jstor_G01.split(\",\")\n\n # 保存到portlist需要sn update port表的数据 device的state\n sn = request.GET.get(\"sn\")\n Device.objects.filter(sn=sn, state=0).update(state=1)\n port.objects.filter(id__in=jidata).update(state=2)\n\n is_ok = Sanlist.objects.filter(sn=sn).values(\"id\")\n if not is_ok:\n Sanlist.objects.create(sn=sn, server_interface=jsname, switchboard_port=jsdata, switchboard_G19=jstor[0])\n Sanlist.objects.create(sn=sn, server_interface=jsname2, switchboard_port=jsdata2, switchboard_H01=jstor2[0],manage=0)\n Sanlist.objects.create(sn=sn, server_interface=jsname_kvm, switchboard_port=jsdata_kvm,switchboard_H01_tor=jstor_kvm[0], manage=1)\n Sanlist.objects.create(sn=sn, server_interface=jsname_G01, switchboard_port=jsdata_G01, switchboard_G01=jstor_G01[0])\n else:\n Sanlist.objects.filter(sn=sn).update(sn=sn, server_interface=jsname, switchboard_port=jsdata,switchboard_G19=jstor[0])\n Sanlist.objects.filter(sn=sn).update(sn=sn, server_interface=jsname2, switchboard_port=jsdata2, switchboard_H01=jstor2[0], manage=0)\n Sanlist.objects.filter(sn=sn).update(sn=sn, server_interface=jsname_kvm, switchboard_port=jsdata_kvm, switchboard_H01_tor=jstor_kvm[0], manage=1)\n Sanlist.objects.filter(sn=sn).update(sn=sn, server_interface=jsname_G01, switchboard_port=jsdata_G01,switchboard_G01=jstor_G01[0])\n elif devicename == \"TOR\":\n jsdata = request.GET.get(\"jsdata\")\n jidata = request.GET.get(\"jiddata\")\n jidata = jidata.split(\",\")\n jsname = request.GET.get(\"jsname\")\n jstor = request.GET.get(\"jstor\")\n\n jsdata2 = request.GET.get(\"jsdata2\")\n jsname2 = request.GET.get(\"jsname2\")\n jstor2 = request.GET.get(\"jstor2\")\n\n # 保存到portlist需要sn update port表的数据 device的state\n sn = request.GET.get(\"sn\")\n Device.objects.filter(sn=sn, state=0).update(state=1)\n port.objects.filter(id__in=jidata).update(state=2)\n portlist.objects.create(sn=sn, switchboard_port=jsdata, server_interface=jsname, switchboard=jstor,\n server_interface2=jsname2, switchboard2=jstor2, switchboard_port2=jsdata2,\n )\n elif devicename ==\"X86服务器\":\n jsdata = request.GET.get(\"jsdata\")\n jidata = request.GET.get(\"jiddata\")\n jidata = jidata.split(\",\")\n jsname = request.GET.get(\"jsname\")\n jstor = request.GET.get(\"jstor\")\n\n jsdata2 = request.GET.get(\"jsdata2\")\n jsname2 = request.GET.get(\"jsname2\")\n jstor2 = request.GET.get(\"jstor2\")\n\n\n jsdata_kvm = request.GET.get(\"jsdata_kvm\", \"\")\n jsname_kvm = request.GET.get(\"jsname_kvm\", \"\")\n jstor_kvm = request.GET.get(\"jstor_kvm\", \"\")\n # 保存到portlist需要sn update port表的数据 device的state\n sn = request.GET.get(\"sn\")\n Device.objects.filter(sn=sn, state=0).update(state=1)\n port.objects.filter(id__in=jidata).update(state=2)\n portlist.objects.create(sn=sn, switchboard_port=jsdata, server_interface=jsname, switchboard=jstor,\n server_interface2=jsname2, switchboard2=jstor2, switchboard_port2=jsdata2,\n switchboard_port_kvm=jsdata_kvm, server_interface_kvm=jsname_kvm,\n switchboard_kvm=jstor_kvm,\n )\n data = json.dumps(ic)\n return HttpResponse(data, content_type='application/json') # 返回jsno数据到页面\n\n\ndef index(request):\n return render_to_response('index.html',\n {\"totalpages\": '10', \"currpage\": \"10\", \"totalrecords\": \"14\"}) # 返回到logon页面渲染数据\n\n@outer('index_adddecice')\ndef index_adddecice(request, pindex):\n\n ##首页页面默认为第一页\n if pindex == '':\n pindex = 1\n\n \"\"\"设备上架返回数据\"\"\"\n ic = []\n s = 1\n device = Device.objects.filter(state=0, rack_id=9999)\n\n for i in device:\n data2 = {\n \"sid\": s,\n \"rack_id\": i.rack_id_id,\n \"ci\": i.ci,\n \"sn\": i.sn,\n \"device_name\": str(i.device_name),\n \"size\": str(i.size),\n \"ip_kvm\": str(i.ip_kvm),\n \"device_name\": str(i.device_name),\n \"device_subtype\": str(i.device_subtype)\n }\n s=s+1\n if s == 21:\n s = 1\n ic.append(data2)\n\n paginator = Paginator(ic, 20)\n page = paginator.page(int(pindex))\n ##确保上一页,下一页操作不会越界\n prePage = 1\n nextPage = page.paginator.num_pages\n if page.has_previous():\n prePage = page.number - 1\n if page.has_next():\n nextPage = page.number + 1\n\n context = {'items': page, 'prePage': prePage, 'nextPage': nextPage}\n return render(request, 'addDeviceList.html', context)\n\n\ndef index_rmvDevice(request):\n \"\"\"设备下架\"\"\"\n type = request.GET.get(\"type\", \"\")\n data = []\n ic = []\n s = 0\n # if type==\"look\":\n datas=(\"TOR\",\"SAN\",\"HB-DF\",\"小型机\",\"加密机\",\"X86服务器\",\"Ceph服务器\")\n rmdevice = Device.objects.filter(state=1)\n for i in rmdevice:\n s += 1\n rid = Rack.objects.filter(id=i.rack_id_id).values(\"rack_id\")\n remdata = {\n \"sid\": s,\n \"ci\": i.ci,\n \"sn\": i.sn,\n \"rack_id\":rid[0]['rack_id'],\n \"device_name\": str(i.device_name),\n \"size\": str(i.size),\n \"ip_kvm\": str(i.ip_kvm),\n \"device_name\": str(i.device_name),\n \"device_subtype\": str(i.device_subtype)\n }\n data.append(remdata)\n\n # elif type == \" del\":\n # id=request.GET.get(\"id\",\"\")\n # sn = request.GET.get(\"sn\",\"\")\n # Device.objects.filter(sn=sn,state=1).update(state=2)#设置2的属性为删除状态\n\n return render(request, 'rmvDevice.html', context={'items': data})\n\n\ndef req(request):\n \"\"\"返回可用的机柜\"\"\"\n type = request.GET.get('type')\n rowDatas = request.GET.get('rowDatas')\n device_subtype = request.GET.get(\"device_subtype\")\n size = request.GET.get(\"size\")\n rack = Rack.objects.filter(state=0, rack_id=rowDatas) # rack_id=9999是初始化的\n rd = 0\n a_f = (\"A\", \"B\", \"C\", \"D\", \"E\", \"F\")\n b_df = (\"A10\", \"B10\", \"C10\", \"D10\", \"E10\", \"F10\")\n for j in rack: rd = j.id # 机柜的id Rack的外键\n a = []\n sun = 0\n\n if type == \"1\":\n # 返回可用机柜\n rack_list = Rack.objects.filter(u_rack__state__contains=0)\n rack_list.query.group_by = ['id']\n if device_subtype==\"X86服务器\" or device_subtype==\"小型机\" or device_subtype==\"Ceph服务器\" or device_subtype==\"心跳交换机\" :\n\n for r in rack_list:\n unumberdata = []\n state_ok = False\n if r.rack_id[0] in a_f:\n unumber = U_rack.objects.filter(rack_id_id=r.id,state=0)\n if device_subtype==\"心跳交换机\" and r.rack_id in b_df:\n data = {'id': sun, 'rack_id': r.rack_id, 'U_num': r.U_num, 'U_used': r.U_used}\n a.append(data)\n sun += 1\n elif device_subtype==\"X86服务器\":\n for un in unumber:\n if 16=2: state_ok = True\n if state_ok:\n data = {'id': sun, 'rack_id': r.rack_id, 'U_num': r.U_num, 'U_used': r.U_used}\n a.append(data)\n sun += 1\n else:\n data = {'id': sun, 'rack_id': r.rack_id, 'U_num': r.U_num, 'U_used': r.U_used}\n a.append(data)\n sun += 1\n elif device_subtype ==\"TOR\":\n is_list = Device.objects.filter(state=1,device_subtype=\"TOR\").values(\"rack_id_id\")\n is_ok=[]\n for i in is_list:\n is_ok.append(i['rack_id_id'])\n for r in rack_list:\n if r.tor and r.id not in is_ok:\n data = {'id': sun, 'rack_id': r.rack_id, 'U_num': r.U_num, 'U_used': r.U_used}\n a.append(data)\n sun += 1\n elif device_subtype ==\"SAN\":\n g_h = (\"G\",\"H\")\n for r in rack_list:\n if r.rack_id[0] in g_h:\n data = {'id': sun, 'rack_id': r.rack_id, 'U_num': r.U_num, 'U_used': r.U_used}\n a.append(data)\n sun += 1\n elif device_subtype==\"加密机\":\n p = (\"P\")\n for r in rack_list:\n unumberdata = []\n if r.rack_id[0] in p:\n unumber = U_rack.objects.filter(rack_id_id=r.id,state=0)\n for un in unumber:\n if 4 <= un.u_num < 44:\n unumberdata.append(un.u_num)\n if len(unumberdata) >= 3: state_ok = True\n if state_ok:\n data = {'id': sun, 'rack_id': r.rack_id, 'U_num': r.U_num, 'U_used': r.U_used}\n a.append(data)\n sun += 1\n elif type == \"2\":\n \"data = u_num-u_use \"\n rack_id = request.GET.get('a') # 机柜id\n u_num = request.GET.get('b')\n position_left = request.GET.get('c')\n position_right = request.GET.get('d')\n id = Rack.objects.filter(rack_id=rack_id)\n rack_id = []\n for i in id:\n rack_id = i.id\n if rack_id:\n \"当你再次点选择机柜的时候将之前选中的状态释放\"\n if u_num: U_rack.objects.filter(u_num=u_num, rack_id=rack_id).update(state=0)\n if position_left: Pdu_left.objects.filter(position_left=position_left, pdu_id_id=rack_id).update(state=0)\n if position_right: Pdu_right.objects.filter(position_right=position_right, pdu_id_id=rack_id).update(state=0)\n\n Unum = U_rack.objects.filter(state=0, rack_id=rd).order_by(\"-u_num\")\n for u in Unum:\n data = {'U_num': u.u_num}\n a.append(data)\n elif type == \"3\":\n \"\"\"左pdu的数据返回\"\"\"\n Pdu = Pdu_left.objects.filter(state=0, pdu_id_id=rd).order_by(\"-position_left\")\n for r in Pdu:\n data = {\"position_left\": r.position_left}\n a.append(data)\n sun += 1\n elif type == \"4\":\n \"\"\"\n 右pdu的数据返回\n \"\"\"\n Pdu = Pdu_right.objects.filter(state=0, pdu_id_id=rd).order_by(\"-position_right\")\n for r in Pdu:\n data = {\"position_right\": r.position_right}\n a.append(data)\n sun += 1\n else:\n print(\"----------异常情况------------\")\n\n json_str = json.dumps(a)\n print(device_subtype,\" :json_str = \" ,json_str)\n return HttpResponse({json_str: 'dataList'})\n\n\ndef resultdevice(request):\n \"\"\"\n 服务器布线的数据输出 传设备子类型判断是否是X86服务器type 传sn号\n if x % 3 == 0:\n :param request:\n :return: json\n \"\"\"\n rrs = 0\n result = []\n\n sn = request.GET.get('sn')\n rackid = request.GET.get('rackid') # rack表的位置编号\n type = request.GET.get('type')\n content = request.GET.get('content') # 下拉框选中的文本\n dev = Device.objects.filter(sn=sn) # 选中服务器的数据\n size = request.GET.get('size', \"\")\n devicename = request.GET.get('devicename', \"\")\n rackids = Rack.objects.filter(rack_id=rackid)\n data = {}\n for d in dev:\n data = {\n \"device_id\": d.id, # 拿到该服务器的id\n \"device_type\": d.device_type,\n \"device_subtype\": d.device_subtype,\n }\n\n portdata = port.objects.filter(device_id_id=data[\"device_id\"], state__in=[0,2]) # 查端口表里面的是服务器id并且为state=0 这个是取的服务器的id # rackid = A01交换机的名字\n if type == \"1\":\n if portdata:\n for p in portdata:\n pdata = {\"id\": p.id, \"port_num\": p.port_num, \"port_type\": p.port_type, \"number\": p.number}\n jdata = []\n jdata2 = []\n jdata_kvm=[]\n for r in rackids: # 本机柜的交换机名字\n pdata['tor'] = r.tor\n rid = r.id\n match_rack_id = Rack.objects.filter(rack_id=r.match_rack_id).values(\"tor\") # 相邻机柜的交换机id\n pdata['match_rack_id'] = match_rack_id[0]['tor'] # 相邻交换机名称\n\n # 从端口表获取相应的数据\n devs = Device.objects.filter(rack_id_id=rid, device_subtype='TOR').values(\"id\") # 交换机的id\n pls = port.objects.filter(device_id_id=devs[0]['id'], state=0) # Ceph服务器的交换机的端口\n for ps in pls:\n if int(ps.number)<17:\n jdata_kvm.append(ps.port_num)\n elif 16 < int(ps.number) < 33:\n jdata.append(ps.port_num)\n elif 32 < int(ps.number) < 48:\n jdata2.append(ps.port_num)\n pdata['port_number_kvm']=jdata_kvm\n pdata['port_number'] = jdata\n pdata['port_number2'] = jdata2\n result.append(pdata)\n\n # elif type == \"22\":\n # # 第3-7数据 返回到交换机端口的\n # rrs += 1\n # if content != \"--*--\":\n # d2 = Device.objects.filter(device_subtype='TOR', device_name=content) # 拿交换机的机柜id\n # dd2 = {}\n # for dd2s in d2:\n # dd2 = {\"device_id\": dd2s.id, \"rackid\": dd2s.rack_id_id}\n # plist = port.objects.filter(state=0, device_id_id=dd2['device_id'])\n # for p in plist:\n # if int(dd2['rackid']) % 2 == 0: # 如果他可以被2整除那么他就一定是A002 或者A004 那么他就是33-48\n # if p.number:\n # if int(p.number) > 32 and int(p.number) < 49:\n # pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n # result.append(pdata)\n # else:\n # if p.number:\n # if int(p.number) > 16 and int(p.number) < 33:\n # pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n # result.append(pdata)\n # elif type == \"2\":\n # # 第3++条数据\n # if portdata:\n # for p in portdata:\n # rrs += 1\n # if rrs < 3: continue\n # if p.port_type == \"kvm\": break\n # pdata = {\"id\": p.id, \"port_num\": p.port_num, \"port_type\": p.port_type, \"number\": p.number}\n # for r in rackids:\n # # 是相邻的机柜的rack_id 来拿该机柜的id\n # ra_id = Rack.objects.filter(rack_id=r.match_rack_id)\n # for r2 in ra_id:\n # rs2 = {\"id\": r2.id}\n # d2 = Device.objects.filter(rack_id_id=rs2['id'], device_subtype='TOR')\n # for d22 in d2:\n # pdata[\"rack_ids\"] = d22.device_name\n # pdata[\"dev_id\"] = d22.id\n # # 是该机柜的rack_id\n # ras = Rack.objects.filter(rack_id=rackid)\n # for ras2 in ras:\n # rasdict = {\"id\": ras2.id}\n # d2 = Device.objects.filter(rack_id_id=rasdict['id'], device_subtype='TOR')\n # for d22 in d2:\n # pdata[\"rack_id\"] = d22.device_name\n # pdata[\"dev_id\"] = d22.id\n # result.append(pdata)\n # elif type == \"3\":\n # # 第八条数据的来源\n # portdata = port.objects.filter(device_id_id=data[\"device_id\"], state=0)\n # if portdata:\n # for p in portdata:\n # if p.port_type == \"kvm\":\n # pdata = {\"id\": p.id, \"port_num\": p.port_num, \"port_type\": p.port_type, \"number\": p.number}\n # for r in rackids:\n # # 是相邻的机柜的rack_id 来拿该机柜的id\n # ra_id = Rack.objects.filter(rack_id=r.match_rack_id)\n # for r2 in ra_id:\n # rs2 = {\"id\": r2.id}\n # d2 = Device.objects.filter(rack_id_id=rs2['id'], device_subtype='TOR') # 拿交换机的机柜id\n # for d22 in d2:\n # pdata[\"rack_ids\"] = d22.device_name\n # pdata[\"dev_ids\"] = d22.id\n # # 是该机柜的rack_id\n # ras = Rack.objects.filter(rack_id=rackid)\n # for ras2 in ras:\n # rasdict = {\"id\": ras2.id}\n # d2 = Device.objects.filter(rack_id_id=rasdict['id'], device_subtype='TOR')\n # for d22 in d2:\n # pdata[\"rack_id\"] = d22.device_name\n # pdata[\"dev_id\"] = d22.id\n # result.append(pdata)\n # elif type == \"4\":\n # if content != \"--*--\":\n # # 返回所有的状态可用的number q前端返回的device的id = dev_id\n # dev_id = request.GET.get('dev_id', \"\") # 返回device的id\n # kvm = request.GET.get('kvm', \"\") # 返回kvm\n # dev_ids = Device.objects.filter(id=dev_id)\n # for d in dev_ids:\n # dd = {\"rackid\": d.rack_id_id} # 拿到当前数据的机柜id\n # plist = port.objects.filter(state=0, device_id_id=dev_id)\n # for p in plist:\n # if kvm:\n # if int(p.number) < 17:\n # pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n # result.append(pdata)\n # elif int(dd['rackid']) % 2 == 0: # 如果他可以被2整除那么他就一定是A002 或者A004 那么他就是33-48\n # if int(p.number) > 32 and int(p.number) < 49:\n # pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n # result.append(pdata)\n # else:\n # if int(p.number) > 16 and int(p.number) < 33:\n # pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n # result.append(pdata)\n # elif type == \"5\":\n # # 返回所有的状态可用的number q前端返回的device的id = dev_id\n # dev_id = request.GET.get('dev_id') # 返回device的id\n # dev_ids = Device.objects.filter(id=dev_id)\n # for d in dev_ids:\n # dd = {\"rackid\": d.rack_id_id} # 拿到当前数据的机柜id\n # plist = port.objects.filter(state=0, device_id_id=dev_id)\n # for p in plist:\n # if int(dd['rackid']) % 2 == 0: # 如果他可以被2整除那么他就一定是A002 或者A004 那么他就是33-48\n # if p.number:\n # if int(p.number) > 32 and int(p.number) < 49:\n # pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n # result.append(pdata)\n # else:\n # if p.number:\n # if int(p.number) > 16 and int(p.number) < 33:\n # pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n # result.append(pdata)\n # elif type == \"6\":\n # if content != \"--*--\":\n # # 返回所有的状态可用的number q前端返回的device的id = dev_id\n # dev_id = request.GET.get('dev_id') # 返回device的id\n # kvm = request.GET.get('kvm') # 返回kvm\n # dev_ids = Device.objects.filter(id=dev_id)\n # for d in dev_ids:\n # dd = {\"rackid\": d.rack_id_id} # 拿到当前数据的机柜id\n # plist = port.objects.filter(state=0, device_id_id=dev_id)\n #\n # for p in plist:\n # if kvm:\n # if int(p.number) < 17:\n # pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n # result.append(pdata)\n # elif int(dd['rackid']) % 2 == 0: # 如果他可以被2整除那么他就一定是A002 或者A004 那么他就是33-48\n # if int(p.number) > 32 and int(p.number) < 49:\n # pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n # result.append(pdata)\n # else:\n # if int(p.number) > 16 and int(p.number) < 33:\n # pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n # result.append(pdata)\n elif type == \"7\":\n if devicename == \"加密机\":\n for p in portdata: # 这台加密机在端口表中的所有数据\n jdata = []\n pdata = {\"id\": p.id, \"port_num\": p.port_num, \"port_type\": p.port_type, \"number\": p.number}\n for r in rackids: # 本机柜的交换机名字\n pdata['tor'] = r.tor\n rid = r.id\n # 从端口表获取相应的数据\n devs = Device.objects.filter(sn=sn).values(\"id\") # 交换机的id\n pls = port.objects.filter(device_id_id=devs[0]['id'], state=0) # 加密机的交换机\n for p in pls:\n jdata.append(p.port_num)\n pdata['port_num_j'] = jdata\n result.append(pdata)\n if devicename == \"小型机\":\n for p in portdata:\n jdata = []\n jdata_border = []\n jdata_border2 = []\n jdata_border_hb = []\n jdata_mo = []\n jdata_mo2 = []\n pdata = {\"id\": p.id, \"port_num\": p.port_num, \"port_type\": p.port_type, \"number\": p.number}\n for r in rackids: # 本机柜的交换机名字\n pdata['tor'] = r.tor\n rid = r.id\n match_rack_id = Rack.objects.filter(rack_id=r.match_rack_id).values(\"tor\", \"id\",\n \"hb_df\") # 相邻机柜的交换机id\n pdata['match_rack_id'] = match_rack_id[0]['tor'] # 相邻交换机名称\n pdata['hb_df'] = r.hb_df # 心跳布线架\n\n devs = Device.objects.filter(rack_id_id=rid, device_subtype='MPO').values(\n \"device_name\") # 通过机柜的id查device的id 本机柜设备表里面的id\n pdata['device_name'] = devs[0]['device_name']\n devs_border = Device.objects.filter(rack_id_id=match_rack_id[0]['id'], device_subtype='MPO').values(\n \"device_name\") # 相邻机柜的设备表里面的id\n pdata['device_name_border'] = devs_border[0]['device_name']\n\n dev = Device.objects.filter(rack_id_id=rid, device_subtype='TOR').values(\"id\") # 交换机的id\n pls = port.objects.filter(device_id_id=dev[0]['id'], state=0) # Ceph服务器的交换机的端口\n for ps in pls:\n if int(ps.number) < 17:\n jdata.append(ps.port_num)\n elif 16 < int(ps.number) < 33:\n jdata_border2.append(ps.port_num)\n\n dev = Device.objects.filter(rack_id_id=match_rack_id[0]['id'], device_subtype='TOR').values(\"id\") # 相邻\n pls = port.objects.filter(device_id_id=dev[0]['id'], state=0)\n for ps in pls:\n if 33 > int(ps.number) > 16:\n jdata_border.append(ps.port_num)\n\n dev = Device.objects.filter(rack_id_id=rid, device_subtype='HB-DF').values(\"id\") # 相邻\n pls = port.objects.filter(device_id_id=dev[0]['id'], state=0)\n for ps in pls:\n jdata_border_hb.append(ps.port_num)\n\n dev = Device.objects.filter(rack_id_id=rid, device_subtype='MPO').values(\"id\") # 相邻\n pls = port.objects.filter(device_id_id=dev[0]['id'], state=0)\n for ps in pls:\n jdata_mo.append(ps.port_num)\n\n dev = Device.objects.filter(rack_id_id=match_rack_id[0]['id'], device_subtype='MPO').values(\"id\") # 相邻\n pls = port.objects.filter(device_id_id=dev[0]['id'], state=0)\n for ps in pls:\n jdata_mo2.append(ps.port_num)\n\n pdata['port_number'] = jdata\n pdata['port_number2'] = jdata_border # 是相邻本机柜的可用端口\n pdata['port_number22'] = jdata_border2\n pdata['port_number_hb'] = jdata_border_hb\n pdata['port_number_mo'] = jdata_mo\n pdata['port_number_mo2'] = jdata_mo2\n\n result.append(pdata)\n if devicename == \"Ceph服务器\":\n for p in portdata: # 这台加密机在端口表中的所有数据\n jdata = []\n jdata2 = []\n jdata_border =[]\n pdata = {\"id\": p.id, \"port_num\": p.port_num, \"port_type\": p.port_type, \"number\": p.number}\n for r in rackids: # 本机柜的交换机名字\n pdata['tor'] = r.tor\n rid = r.id\n match_rack_id = Rack.objects.filter(rack_id=r.match_rack_id).values(\"tor\") # 相邻机柜的交换机id\n pdata['match_rack_id'] = match_rack_id[0]['tor'] # 相邻交换机名称\n\n # 从端口表获取相应的数据\n devs = Device.objects.filter(rack_id_id=rid, device_subtype='TOR').values(\"id\") # 交换机的id\n pls = port.objects.filter(device_id_id=devs[0]['id'], state=0) # Ceph服务器的交换机的端口\n for ps in pls:\n if 17>int(ps.number):\n jdata_border.append(ps.port_num)\n elif 16 < int(ps.number) < 33:\n jdata.append(ps.port_num)\n elif 32 < int(ps.number) < 48:\n jdata2.append(ps.port_num)\n pdata['port_number'] = jdata\n pdata['port_number2'] = jdata2\n pdata['port_number3'] = jdata_border\n result.append(pdata)\n if devicename == \"SAN\":\n for p in portdata:\n jdata = []\n jdata2 = []\n jdata_border = []\n jdata2_border = []\n pdata = {\"id\": p.id, \"port_num\": p.port_num, \"port_type\": p.port_type, \"number\": p.number}\n # SAN默认是rackid连G19\n rack_san_G19 = Rack.objects.filter(rack_id=\"G19\").values(\"mtp_san\", \"id\")\n rack_san_G01 = Rack.objects.filter(rack_id=\"G01\").values(\"tor\", \"id\")\n rack_san_H01 = Rack.objects.filter(rack_id=\"H01\").values(\"tor\",\"mtp_san\", \"id\")\n\n devs_G19_san = Device.objects.filter(rack_id_id=rack_san_G19[0]['id'], device_subtype='SAN').values(\"device_name\", \"id\")\n devs_H01_san = Device.objects.filter(rack_id_id=rack_san_H01[0]['id'], device_subtype='SAN').values(\"device_name\", \"id\")\n devs_H01_tor = Device.objects.filter(rack_id_id=rack_san_H01[0]['id'], device_subtype='TOR').values(\"device_name\", \"id\")\n devs_G01_tor = Device.objects.filter(rack_id_id=rack_san_G01[0]['id'], device_subtype='TOR').values( \"device_name\", \"id\")\n\n pdata['rack_san_G19_san'] = devs_G19_san[0]['device_name']\n pdata['rack_san_G01_tor'] = devs_G01_tor[0]['device_name']\n pdata['rack_san_H01_san'] = devs_H01_san[0]['device_name']\n pdata['rack_san_H01_tor'] = devs_H01_tor[0]['device_name']\n\n pls_G19 = port.objects.filter(device_id_id=devs_G19_san[0]['id'], state=0)\n for ps in pls_G19:\n jdata2.append(ps.port_num)\n pls_H01 = port.objects.filter(device_id_id=devs_H01_san[0]['id'], state=0)\n for ps in pls_H01:\n jdata_border.append(ps.port_num)\n\n pls_G01 = port.objects.filter(device_id_id=devs_G01_tor[0]['id'], state=0)\n for ps in pls_G01:\n jdata.append(ps.port_num)\n pls_H01 = port.objects.filter(device_id_id=devs_H01_tor[0]['id'], state=0)\n for ps in pls_H01:\n jdata2_border.append(ps.port_num)\n\n\n pdata['port_number'] = jdata # 是本机柜的G01可用端口\n pdata['port_number2'] = jdata2 # 是本机柜的G19可用端口\n pdata['port_number_border'] = jdata_border\n pdata['port_number2_border'] = jdata2_border\n result.append(pdata)\n if devicename == \"TOR\":\n for p in portdata: # 这台加密机在端口表中的所有数据\n jdata = []\n jdata2 = []\n if p.port_type == \"upstream\":\n pdata = {\"id\": p.id, \"port_num\": p.port_num, \"port_type\": p.port_type, \"number\": p.number}\n for r in rackids: # 本机柜的交换机名字\n rid = r.id\n rnet = r.mtp_net\n match_rack_id = Rack.objects.filter(rack_id=r.match_rack_id).values(\"id\",\"mtp_net\") # 相邻机柜的交换机id\n devs = Device.objects.filter(rack_id_id=rid, device_subtype='MPO').values(\"device_name\") # 通过机柜的id查device的id 本机柜设备表里面的id\n pdata['device_name'] = devs[0]['device_name']\n devs_border = Device.objects.filter(rack_id_id=match_rack_id[0]['id'], device_subtype='MPO').values(\"device_name\") # 相邻机柜的设备表里面的id\n pdata['device_name_border'] = devs_border[0]['device_name']\n\n # 从端口表获取相应的数据\n devs = Device.objects.filter(rack_id_id=rid, device_subtype='MPO').values(\"id\") # 交换机的id\n pls = port.objects.filter(device_id_id=devs[0]['id'], state=0) # 交换机的端口\n for ps in pls:\n if ps.port_type == \"MTP-NET\":\n jdata.append(ps.port_num)\n devs = Device.objects.filter(rack_id_id=match_rack_id[0]['id'], device_subtype='MPO').values(\n \"id\") # 交换机的id\n pls = port.objects.filter(device_id_id=devs[0]['id'], state=0) # 交换机的端口\n for ps in pls:\n if ps.port_type == \"MTP-NET\":\n jdata2.append(ps.port_num)\n pdata['port_number'] = jdata\n pdata['port_number2'] = jdata2\n result.append(pdata)\n elif type == \"8\":\n if devicename == \"小型机\":\n jdata = []\n manage = request.GET.get('manage', \"\")\n hbdf = request.GET.get('hbdf')\n if manage == \"1\":\n if content == \"--*--\":\n return HttpResponse({'dataList'})\n if hbdf:\n d2 = Device.objects.filter(device_subtype='HB-DF', device_name=content) # 拿交换机的机柜id\n else:\n d2 = Device.objects.filter(device_subtype='TOR', device_name=content) # 拿交换机的机柜id\n dd2 = {}\n for dd2s in d2:\n dd2 = {\"device_id\": dd2s.id, \"rackid\": dd2s.rack_id_id}\n rs = Rack.objects.filter(id=dd2['rackid']).values(\"rack_id\")\n plist = port.objects.filter(state=0, device_id_id=dd2['device_id'])\n for p in plist:\n pdata = {}\n if hbdf:\n if rs[0]['rack_id'] == rackid:\n pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n result.append(pdata)\n else:\n if rs[0]['rack_id'] == rackid:\n if int(p.number) > 16 and int(p.number) < 33:\n pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n result.append(pdata)\n else:\n if int(p.number) > 32 and int(p.number) < 49:\n pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n result.append(pdata)\n elif manage == \"0\":\n if content == \"--*--\":\n return HttpResponse({'dataList'})\n d2 = Device.objects.filter(device_subtype='MPO', device_name=content) # 拿交换机的机柜id\n dd2 = {}\n for dd2s in d2:\n dd2 = {\"device_id\": dd2s.id, \"rackid\": dd2s.rack_id_id}\n rs = Rack.objects.filter(id=dd2['rackid']).values(\"rack_id\")\n plist = port.objects.filter(state=0, device_id_id=dd2['device_id'])\n for p in plist:\n if int(p.number) < 7:\n pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n result.append(pdata)\n if devicename == \"Ceph服务器\":\n d2 = Device.objects.filter(device_subtype='TOR', device_name=content) # 拿交换机的机柜id\n dd2 = {}\n for dd2s in d2:\n dd2 = {\"device_id\": dd2s.id, \"rackid\": dd2s.rack_id_id}\n\n rs = Rack.objects.filter(id=dd2['rackid']).values(\"rack_id\")\n\n plist = port.objects.filter(state=0, device_id_id=dd2['device_id'])\n for p in plist:\n\n if rs[0]['rack_id'] == rackid:\n if int(p.number) > 16 and int(p.number) < 33:\n pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n result.append(pdata)\n\n\n else:\n if int(p.number) > 32 and int(p.number) < 49:\n pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n result.append(pdata)\n if devicename == \"SAN\":\n if content == \"--*--\":\n return HttpResponse({'dataList'})\n\n d2 = Device.objects.filter(device_subtype='SAN', device_name=content) # 拿交换机的机柜id\n dd2 = {}\n for dd2s in d2:\n dd2 = {\"device_id\": dd2s.id, \"rackid\": dd2s.rack_id_id}\n rs = Rack.objects.filter(id=dd2['rackid']).values(\"id\")\n plist = port.objects.filter(state=0, device_id_id=dd2['device_id'])\n for p in plist:\n pdata = {}\n if rs[0]['id'] == 111:\n pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n result.append(pdata)\n else:\n pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n result.append(pdata)\n if devicename == \"TOR\":\n d2 = Device.objects.filter(device_subtype='MPO', device_name=content) # 拿交换机的机柜id\n dd2 = {}\n for dd2s in d2:\n dd2 = {\"device_id\": dd2s.id, \"rackid\": dd2s.rack_id_id}\n rs = Rack.objects.filter(id=dd2['rackid']).values(\"rack_id\", \"mtp_net\")\n for r in rackids: # 本机柜的交换机名字\n rid = r.id\n rnet = r.mtp_net\n match_rack_id = Rack.objects.filter(rack_id=r.match_rack_id).values(\"id\", \"mtp_net\") # 相邻机柜的交换机id\n plist = port.objects.filter(state=0, device_id_id=dd2['device_id'])\n\n for p in plist:\n if rs[0]['mtp_net'] == p.port_type or p.port_type == match_rack_id[0]['mtp_net']:\n pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n result.append(pdata)\n if devicename == \"X86服务器\":\n if content == \"--*--\":\n return HttpResponse({'dataList'})\n d2 = Device.objects.filter(device_subtype='TOR', device_name=content) # 拿交换机的机柜id\n dd2 = {}\n for dd2s in d2:\n dd2 = {\"device_id\": dd2s.id, \"rackid\": dd2s.rack_id_id}\n\n rs = Rack.objects.filter(id=dd2['rackid']).values(\"rack_id\")\n\n plist = port.objects.filter(state=0, device_id_id=dd2['device_id'])\n for p in plist:\n\n if rs[0]['rack_id'] == rackid:\n if int(p.number) > 16 and int(p.number) < 33:\n pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n result.append(pdata)\n\n else:\n if int(p.number) > 32 and int(p.number) < 49:\n pdata = {\"id\": p.id, \"number\": p.number, \"port_num\": p.port_num}\n result.append(pdata)\n\n json_str = json.dumps(result)\n print(\"json_str:\", json_str)\n return HttpResponse({json_str: 'dataList'})\n\n\ndef getorder(request):\n sn = request.GET.get(\"sn\")\n a = portlist.objects.filter(sn=sn)\n json_str = json.dumps(a)\n\n return HttpResponse({json_str: 'dataList'})\n\n\n### 自动分配kvm IP\ndef getkvmIp(request):\n kvmIp = 0\n kvmIpList = IP.objects.filter(netarea='KVM').filter(state=0)\n if kvmIpList:\n kvmIp = kvmIpList[0].ip_addr\n id = kvmIpList[0].id\n IP.objects.filter(id=id).update(state='1')\n return JsonResponse({'data': kvmIp})\n\n\ndef getIp(request):\n netarea = request.GET.get('p1')\n vlan = request.GET.get('p2')\n ipList = IP.objects.filter(netarea=netarea).filter(vlan_id=vlan).filter(state=0)\n ip = []\n if ipList:\n ip = ipList[0].ip_addr\n id = ipList[0].id\n IP.objects.filter(id=id).update(state='1')\n\n return JsonResponse({'data': ip})\n\n\ndef getVlan(request):\n try:\n netarea = request.GET.get('netarea')\n data = IP.objects.filter(netarea=netarea).values('vlan_id').distinct().order_by('vlan_id')\n list = []\n\n for ip in data:\n list.append(ip['vlan_id'])\n except:\n pass\n\n return JsonResponse({'data': list})\n\n\ndef send_html_mail(request):\n subject = u'设备上架工单'\n sns = request.GET.get(\"valus\").split(',')\n emergency = request.GET.get(\"u\")\n\n PRI = request.GET.get(\"u2\")\n stat_time = request.GET.get(\"u3\")\n end_time = request.GET.get(\"u4\")\n import datetime\n\n stat_time = datetime.datetime.strptime(stat_time.strip(), '%Y-%m-%d')\n end_time = datetime.datetime.strptime(end_time.strip(), '%Y-%m-%d')\n # Device.objects.filter(sn=sn).update(state=1)\n\n datas = []\n\n for sn in sns:\n dev_type = Device.objects.filter(sn=sn).values(\"device_subtype\")\n if dev_type==\"SAN\":\n Sanlist.objects.filter(sn=sn).update(emergency=emergency, stat_time=stat_time, PRI=PRI, end_time=end_time)\n datalist = Sanlist.objects.filter(sn=sn)\n\n for l in datalist:\n kvmips = IP.objects.filter(ip_addr=l.managing_ip)\n kvmip_netmask = ''\n for ip in kvmips:\n kvmip_netmask = ip.netmask\n hostips = IP.objects.filter(ip_addr=l.host_ip)\n hostip_netmask = ''\n hostip_vlan = ''\n hostip_gateway = ''\n for ip in hostips:\n hostip_netmask = ip.netmask\n hostip_vlan = ip.vlan_id\n hostip_gateway = ip.gateway\n\n switchs = Device.objects.filter(device_name=l.switchboard)\n for switch in switchs:\n rackid = switch.rack_id_id\n uposition1 = switch.u_num\n racks = Rack.objects.filter(id=rackid)\n for rack in racks:\n rack_name1 = rack.rack_id\n uposition2 = \"\"\n switchs = Device.objects.filter(device_name=l.switchboard2)\n for switch in switchs:\n rackid = switch.rack_id_id\n uposition2 = switch.u_num\n racks = Rack.objects.filter(id=rackid)\n for rack in racks:\n rack_name2 = rack.rack_id\n\n switchs = Device.objects.filter(device_name=l.switchboard_kvm)\n for switch in switchs:\n rackid = switch.rack_id_id\n uposition3 = switch.u_num\n racks = Rack.objects.filter(id=rackid)\n for rack in racks:\n rack_name3 = rack.rack_id\n\n data = {\n \"device_subtype\": l.device_subtype, \"sn\": l.sn, \"size\": l.size, \"rack_id\": l.rack_id,\n \"unum\": l.unum,\n \"managing_ip\": l.managing_ip, \"kvm_netmask\": kvmip_netmask,\n \"host_ip\": l.host_ip, \"host_netmask\": hostip_netmask, \"vlan_id\": hostip_vlan,\n \"device_name\": l.device_name, \"lpdu\": l.position_left, \"rpdu\": l.position_right,\n \"gateway\": hostip_gateway, \"server_interface\": l.server_interface,\n \"server_interface\": l.server_interface, \"switchboard_port\": l.switchboard_port,\n \"manage\": l.manage, \"switchboard_G19\": l.switchboard_G19, \"switchboard_G01\": l.switchboard_G01,\n \"switchboard_H01\": l.switchboard_H01, \"switchboard_H01_tor\": l.switchboard_H01_tor\n\n }\n datas.append(data)\n\n else:\n portlist.objects.filter(sn=sn).update(emergency=emergency, stat_time=stat_time, PRI=PRI, end_time=end_time)\n datalist = portlist.objects.filter(sn=sn)\n for l in datalist:\n # 交换机U位和相邻的交换机 心跳交换机 mpo的\n\n switchs = Device.objects.filter(device_name=l.switchboard)\n for switch in switchs:\n uposition1 = switch.u_num\n\n switchs = Device.objects.filter(device_name=l.switchboard2)\n for switch in switchs:\n uposition2 = switch.u_num\n data = {\n \"device_subtype\": l.device_subtype, \"sn\": l.sn, \"rack_id\": l.rack_id, \"unum\": l.unum,\n \"managing_ip\": l.managing_ip,\n \"host_ip\": l.host_ip,\n \"server_interface\": l.server_interface,\n \"switchboard\": l.switchboard,\n \"switchboard_port\": l.switchboard_port,\n \"server_interface2\": l.server_interface2,\n \"switchboard2\": l.switchboard2,\n \"switchboard_port2\": l.switchboard_port2,\n \"server_interface_kvm\": l.server_interface_kvm,\n \"switchboard_kvm\": l.switchboard_kvm,\n \"switchboard_port_kvm\": l.switchboard_port_kvm,\n \"lpdu\": l.position_left, \"rpdu\": l.position_right, \"device_name\": l.device_name,\n \"emergency\": l.emergency, \"PRI\": l.PRI, \"stat_time\": l.stat_time, \"end_time\": l.end_time,\n\n \"server_interfaces\": l.server_interfaces, \"switchboard_ports\": l.switchboard_ports,\n \"server_interface2_2\": l.server_interface2_2, \"switchboard_port2_2\": l.switchboard_port2_2,\n \"server_interface_manage\": l.server_interface_manage,\n \"switchboard_manage\": l.switchboard_manage,\n \"switchboard_port_manage\": l.switchboard_port_manage,\n\n \"switchboard_hb\": l.switchboard_hb, \"server_interface_hb\": l.server_interface_hb,\n \"server_interface_hb2\": l.server_interface_hb2,\n \"switchboard_port_hb2\": l.switchboard_port_hb2, \"server_interface_san\": l.server_interface_san,\n \"switchboard_port_san\": l.switchboard_port_san,\n \"switchboards_san\": l.switchboards_san, \"server_interfaces_san\": l.server_interfaces_san,\n \"switchboard_ports_san\": l.switchboard_ports_san, \"switchboards2\": l.switchboards2,\n \"server_interfaces2\": l.server_interfaces2, \"switchboard_ports2\": l.switchboard_ports2,\n \"server_interfacees2\": l.server_interfacees2, \"switchboard_portes2\": l.switchboard_portes2,\n \"switchboard_port_hb\": l.switchboard_port_hb,\n }\n datas.append(data)\n\n recipient_list = ['18719871010@163.com']\n html_content = loader.render_to_string('workList.html', {\"data\": datas})\n msg = EmailMessage(subject, html_content, EMAIL_HOST_USER, recipient_list)\n msg.attach_file('D:\\Z-GitHub\\PAB_SH\\device\\static\\css\\images\\pinganLogo.jpg')\n msg.content_subtype = \"html\"\n msg.send()\n\n return JsonResponse({'data': datas})\n\n\ndef getHostname(request):\n type = request.GET.get('type')\n identify = 1\n if type == 'X86服务器':\n identify = 2\n list = Hostname.objects.filter(state=0)\n # list = Hostname.objects.filter(areas='ESX_VM').filter(dev_identification=identify, state=0).order_by('hostname')\n hostname = ''\n if list:\n hostname = list[0].hostname\n id = list[0].id\n Hostname.objects.filter(id=id).update(state='1')\n return JsonResponse({'data': hostname})\n\n\ndef deviceInfoManage(request):\n context = {'str': '上传excel'}\n return render(request, 'deviceInfoManage.html', context)\n\n\ndef validate_excel(value):\n if value.name.split('.')[-1] not in ['xls', 'xlsx']:\n raise ValidationError(_('Invalid File Type: %(value)s'), params={'value': value}, )\n\n\nclass UploadExcelForm(forms.Form):\n excel = forms.FileField(validators=[validate_excel])\n\n\ndef post(request):\n form = UploadExcelForm(request.POST, request.FILES)\n if form.is_valid():\n # 读取上传的excel\n wb = xlrd.open_workbook(filename=None, file_contents=request.FILES['excel'].read())\n for sheet in wb.sheets(): # 循环获取所有sheet\n row = sheet.nrows\n for i in range(1, row): # 循环获取每一行数据\n line = sheet.row_values(i)\n ci = line[0]\n device_type = line[1]\n device_subtype = line[2]\n asset = line[3]\n sn = line[4]\n unit_type = line[5]\n cpu = line[6]\n mem = line[7]\n disk = '(SSD)' + line[10] + ' (SAS)' + line[11]\n warranty_start = line[12].split('-')[0]\n warranty_end = line[12].split('-')[1]\n owner = line[13]\n size = str(line[14]).split('.')[0]\n maintenance = line[15]\n contacts = line[16]\n contacts_mail = line[17]\n contacts_tel = line[18]\n # 在设备表新增一条记录\n devs = Device.objects.filter(ci=ci)\n if not devs.exists():\n Device.objects.create(ci=ci, device_type=device_type, device_subtype=device_subtype,\n asset=asset, sn=sn, unit_type=unit_type, cpu=cpu, mem=mem,\n warranty_start=warranty_start, warranty_end=warranty_end,\n owner=owner, size=size, maintenance=maintenance, state='0',\n contacts=contacts, contacts_mail=contacts_mail, is_init='0',\n contacts_tel=contacts_tel, rack_id_id='9999', disk=disk)\n # 获取上面新增的数据在device表中的id\n devs = Device.objects.filter(ci=ci)\n id = 1\n for dev in devs:\n # 解析设备端口\n wcardNum = line[8].split('*')[0]\n wportNum = line[8].split('*')[1]\n qportNum = line[9].split('*')[1].split('(')[0]\n number = 0\n for networkcard in range(1, int(wcardNum) + 1):\n for ports in range(1, int(wportNum) + 1):\n number += 1\n port_num = 'PN0' + str(networkcard) + '_0' + str(ports)\n\n port.objects.create(device_id_id=dev.id, port_type='业务', port_num=port_num,\n number=number, state='0')\n\n for ports in range(1, int(qportNum) + 1):\n number += 1\n port_num = 'KVM_0' + str(ports)\n port.objects.create(device_id_id=dev.id, port_type='KVM', port_num=port_num,\n number=number, state='0')\n\n return HttpResponse(\"上传完成\")\n\ndef wiringReview(request):\n sn = request.GET.get('sn')\n devType = request.GET.get('devType')\n ic = []\n if (devType != 'SAN'):\n podata = portlist.objects.filter(sn=sn)\n for l in podata:\n data = {\n \"server_interface\": l.server_interface,\n \"switchboard\": l.switchboard, \"switchboard_port\": l.switchboard_port,\n \"server_interfaces\": l.server_interfaces, \"switchboard_ports\": l.switchboard_ports,\n\n \"server_interface2_2\": l.server_interface2_2, \"switchboard_port2_2\": l.switchboard_port2_2,\n \"server_interface2\": l.server_interface2, \"switchboard2\": l.switchboard2,\n \"switchboard_port2\": l.switchboard_port2, \"server_interface_manage\": l.server_interface_manage,\n \"switchboard_manage\": l.switchboard_manage,\n \"switchboard_port_manage\": l.switchboard_port_manage, \"server_interface_kvm\": l.server_interface_kvm,\n \"switchboard_kvm\": l.switchboard_kvm,\n \"switchboard_port_kvm\": l.switchboard_port_kvm,\n\n \"switchboard_hb\": l.switchboard_hb, \"server_interface_hb\": l.server_interface_hb,\n \"server_interface_hb2\": l.server_interface_hb2,\n \"switchboard_port_hb2\": l.switchboard_port_hb2, \"server_interface_san\": l.server_interface_san,\n \"switchboard_port_san\": l.switchboard_port_san,\n \"switchboards_san\": l.switchboards_san, \"server_interfaces_san\": l.server_interfaces_san,\n \"switchboard_ports_san\": l.switchboard_ports_san, \"switchboards2\": l.switchboards2,\n \"server_interfaces2\": l.server_interfaces2, \"switchboard_ports2\": l.switchboard_ports2,\n \"server_interfacees2\": l.server_interfacees2, \"switchboard_portes2\": l.switchboard_portes2,\n \"switchboard_port_hb\": l.switchboard_port_hb,\n }\n ic.append(data)\n else:\n g19list = Sanlist.objects.filter(sn=sn, switchboard_G19='SHDCC-SAN-G19')\n for list in g19list:\n data1 = {\n \"G19server_interface\": list.server_interface, \"G19switchboard_port\": list.switchboard_port,\n }\n ic.append(data1)\n h01list = Sanlist.objects.filter(sn=sn, switchboard_H01='SHDCC-SAN-H01')\n for list in h01list:\n data2 = {\n \"H01server_interface\": list.server_interface, \"H01switchboard_port\": list.switchboard_port,\n }\n ic.append(data2)\n torh01list = Sanlist.objects.filter(sn=sn, switchboard_H01_tor='SHDCC-TOR-G01')\n for list in torh01list:\n data3 = {\n \"torh01server_interface\": list.server_interface, \"torh01switchboard_port\": list.switchboard_port,\n }\n ic.append(data3)\n g01list = Sanlist.objects.filter(sn=sn, switchboard_G01='SHDCC-TOR-H01')\n for list in g01list:\n data4 = {\n \"g01server_interface\": list.server_interface, \"g01switchboard_port\": list.switchboard_port,\n }\n ic.append(data4)\n\n data = json.dumps(ic)\n return HttpResponse(data, content_type='application/json')\n\n","sub_path":"device/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":85393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"445160525","text":"import apibinding.api_actions as api_actions\nimport account_operations\nimport apibinding.inventory as inventory\nimport zstackwoodpecker.operations.resource_operations as res_ops\nimport net_operations as net_ops\nimport vm_operations as vm_ops\n\n\ndef create_vpc_vrouter(name, virtualrouter_offering_uuid, resource_uuid=None, system_tags=None, use_tags=None, session_uuid=None):\n action = api_actions.CreateVpcVRouterAction()\n action.timeout = 300000\n action.name = name\n action.virtualRouterOfferingUuid = virtualrouter_offering_uuid\n action.resourceUuid = resource_uuid\n action.systemTags = system_tags\n action.userTags = use_tags\n evt = account_operations.execute_action_with_session(action, session_uuid)\n return evt.inventory\n\n\ndef remove_all_vpc_vrouter():\n cond = res_ops.gen_query_conditions('applianceVmType', '=', 'vpcvrouter')\n vr_vm_list = res_ops.query_resource(res_ops.APPLIANCE_VM, cond)\n if vr_vm_list:\n for vr_vm in vr_vm_list:\n nic_uuid_list = [nic.uuid for nic in vr_vm.vmNics if nic.metaData == '4']\n for nic_uuid in nic_uuid_list:\n net_ops.detach_l3(nic_uuid)\n vm_ops.destroy_vm(vr_vm.uuid)\n","sub_path":"zstackwoodpecker/zstackwoodpecker/operations/vpc_operations.py","file_name":"vpc_operations.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"633091548","text":"\nfrom . import BaseTestCase\n\n\nclass APITestCase(BaseTestCase):\n\n def test_not_found(self):\n response = self.client.get('/api/resource_name/not_found_method/')\n self.assertStatus(response, 404)\n expected_message = {\n 'error': \"The endpoint resource_name/not_found_method doesn't exist\"\n }\n self.assertEqual(expected_message, response.json)\n","sub_path":"tests/unit/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"545353175","text":"from intents.base import QueryTask, Help\nimport teamin\nfrom common.cntime import CNTime\n\nimport re\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nclass IntentQtaskAbout(QueryTask):\n NAME = 'QTASK_ABOUT'\n\n def __init__(self, request, intent):\n self.request = request\n self.intent = intent\n\n def Go(self):\n self.initSlots()\n query = self.request.Message()\n pskey = self.bestCandidate()\n if not pskey:\n return Help.Response()\n\n kw = self.parseKeywords(query, pskey)\n\n btc = teamin.BizTaskCount(self.request.AgentName, self.request.AgentUID)\n\n # 双重条件:人物+关键词,时间+关键词\n if hasattr(self, 'cond_p'):\n kw = '与{}有关,{}'.format(self.cond_p, kw)\n (count, finished, expired), weburl = btc.SpecifyKeywords(query, kw)\n self.intent.set_interval(0, 0, weburl)\n elif hasattr(self, 'cond_t'):\n (count, finished, expired), weburl = btc.SpecifyKeywordsTime(query, kw, self.cond_t)\n self.intent.set_interval(0, 0, weburl)\n\n return self.Response(count, finished, expired, weburl)\n\n def initSlots(self):\n slots = self.intent.slots.filter(type__startswith='user_').all()\n for slot in slots:\n f = slot.type[5:]\n if f == 'q':\n self.slot_q = slot\n elif f == 'tsk':\n self.slot_tsk = slot\n elif f == 'p':\n self.slot_p = slot\n elif f == 't':\n self.slot_t = slot\n elif f == 't2':\n self.slot_t2 = slot\n\n if hasattr(self, 'slot_p'):\n self.cond_p = self.slot_p.original_word\n if hasattr(self, 'slot_t'):\n self.cond_t = CNTime(self.slot_t.original_word).guess_time()\n if hasattr(self, 'slot_t2'):\n t2 = CNTime(self.slot_t2.original_word).guess_time()\n self.cond_t = CNTime.Merge(self.cond_t, t2)\n\n def patterns(self):\n return { \n 'a': '[D:kw_plz][D:user_q][D:user_tsk][D:kw_sp][D:kw_inc][D:user_p][W:4-99][D:kw_rel][W:0-8]',\n 'b': '[D:kw_plz][D:user_q][D:kw_inc][D:user_p][W:4-99][D:kw_rel][D:kw_de][D:user_tsk]',\n 'c': '[D:kw_plz][D:user_q][D:user_tsk][D:kw_sp][D:kw_inc][W:8-99][D:kw_rel][D:kw_de][D:user_p][W:0-8]',\n 'd': '[D:kw_plz][D:user_q][D:kw_inc][W:8-99][D:kw_rel][D:kw_de][D:user_p][D:kw_de][D:user_tsk]',\n 'e': '[D:kw_plz][D:user_q][D:kw_inc][W:0-2][D:user_t][W:0-8][D:user_t2][W:4-99][D:kw_rel][D:kw_de][D:user_tsk]',\n 'f': '[D:kw_plz][D:user_q][D:user_tsk][D:kw_sp][D:kw_inc][W:0-2][D:user_t][W:0-8][D:user_t2][W:4-99][D:kw_rel][W:0-8]',\n 'g': '[D:kw_plz][D:user_q][D:user_tsk][D:kw_sp][D:kw_inc][D:user_t][W:4-99][D:kw_rel][W:0-8]',\n 'h': '[D:kw_plz][D:user_q][D:kw_inc][D:user_t][W:4-99][D:kw_rel][D:kw_de][D:user_tsk]',\n 'i': '[D:kw_plz][D:user_q][D:kw_inc][W:8-99][D:kw_rel][D:kw_de][W:0-2][D:user_t][W:0-8][D:user_t2][D:kw_de][D:user_tsk]',\n 'j': '[D:kw_plz][D:user_q][D:user_tsk][D:kw_sp][D:kw_inc][W:8-99][D:kw_rel][D:kw_de][W:0-2][D:user_t][W:0-8][D:user_t2][W:0-8]',\n 'k': '[D:kw_plz][D:user_q][D:kw_inc][W:8-99][D:kw_rel][D:kw_de][D:user_t][D:kw_de][D:user_tsk]',\n 'l': '[D:kw_plz][D:user_q][D:user_tsk][D:kw_sp][D:kw_inc][W:8-99][D:kw_rel][D:kw_de][D:user_t][W:0-8]',\n }\n\n def bestCandidate(self):\n c = 0\n d = 0\n rmds = []\n candidate = None\n for v in self.intent.candidates.filter():\n c2 = float(v.intent_confidence)\n if c2 < c:\n continue\n d2 = len(re.findall(r'\\[.+?\\]', v.match_info))\n if c2 > c or not candidate:\n c = c2\n d = d2\n rmds = re.findall(r'\\[D:user_(p|t|t2)\\]', v.match_info)\n candidate = v\n continue\n rmds2 = re.findall(r'\\[D:user_(p|t|t2)\\]', v.match_info)\n if len(rmds2) > len(rmds) or \\\n (len(rmds2) == len(rmds) and d2 > d) or \\\n (len(rmds2) == len(rmds) and d2 == d and len(v.match_info) > len(candidate.match_info)):\n c = c2\n d = d2\n rmds = rmds2\n candidate = v\n\n\n self.kws = {}\n key = ''\n if candidate and candidate.match_info:\n tgt = candidate.match_info\n kws = re.findall(r'kw_(\\w+):([^\\|]+)', tgt)\n for k, v in kws:\n v = (v, len(v.encode('gbk')))\n if k in self.kws:\n if type(self.kws[k]) == tuple:\n self.kws[k] = [self.kws[k]]\n self.kws[k].append(v)\n else:\n self.kws[k] = v\n value = ''\n for k, v in self.patterns().items():\n if not tgt.startswith(v):\n continue\n if len(v) < len(value):\n continue\n key = k \n value = v \n kw_plz = '[D:kw_plz]'\n if not tgt.startswith(kw_plz):\n tgt = kw_plz + tgt\n for k, v in self.patterns().items():\n if not tgt.startswith(v):\n continue\n if len(v) < len(value):\n continue\n key = k \n value = v \n if key:\n logger.debug('{} matched {} use {}'.format(self.NAME, key, tgt))\n return key\n\n def parseKeywords(self, oquery, pskey):\n bq = oquery.encode('gbk')\n\n kw_sp = self.kws.get('sp')\n kw_inc = self.kws.get('inc')\n s, e = None, None\n\n if pskey in ['a', 'b']:\n s = self.slot_p.offset + self.slot_p.length\n elif pskey in ['c']:\n s = self.slot_tsk.offset + self.slot_tsk.length + kw_sp[1] + kw_inc[1]\n elif pskey in ['d']:\n s = self.slot_q.offset + self.slot_q.length + kw_inc[1]\n e = self.slot_p.offset\n elif pskey in ['e', 'f']:\n s = self.slot_t2.offset + self.slot_t2.length\n elif pskey in ['g', 'h']:\n s = self.slot_t.offset + self.slot_t.length\n elif pskey in ['i', 'k']:\n s = self.slot_q.offset + self.slot_q.length + kw_inc[1]\n e = self.slot_t.offset\n elif pskey in ['j', 'l']:\n s = self.slot_tsk.offset + self.slot_tsk.length + kw_sp[1] + kw_inc[1]\n e = self.slot_t.offset\n\n ret = ''\n if s and e:\n ret = bq[s:e].decode('gbk')\n elif s:\n ret = bq[s:].decode('gbk')\n elif e:\n ret = bq[:e].decode('gbk')\n\n if ret and ret[0] not in ['与', '和', '跟']:\n ret = '与' + ret\n return ret\n","sub_path":"intents/qtask_about.py","file_name":"qtask_about.py","file_ext":"py","file_size_in_byte":6870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"594646339","text":"# -*- coding: utf-8 -*-#\n'''\n# Name: t3\n# Description: \n# Author: super\n# Date: 2019-07-09\n'''\n\nimport numpy as np\n\nxarr = np.array([1.1,1.2,1.3,1.4,1.5])\nyarr = np.array([2.1,2.2,2.3,2.4,2.5])\ncond = np.array([True, False, True, True, False])\nresult = [(x if c else y) for x,y,c in zip(xarr, yarr, cond)]\nprint(result)\n\nreslut1 = np.where(cond, xarr, yarr)\nprint(reslut1)","sub_path":"pythonBasis/numpyTest/t3.py","file_name":"t3.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"249620951","text":"#! python3\n# identifyPhotoFolder.py - Walks an entire drive and prints the absolute\n# path of any folder that contains photos\n\nimport os\nfrom PIL import Image\n\n\nfor foldername, subfolders, filenames in os.walk('C:\\\\'):\n numPhotofiles = 0\n numNonPhotofiles = 0\n for filename in filenames:\n # Check if file extension isn't .png or .jpg.\n if not (filename.endswith('.jpg') or filename.endswith('.png')):\n numNonPhotofiles += 1\n continue # skip to next filename\n\n # Open image file using Pillow.\n try:\n im = Image.open(foldername + '/' + filename)\n width, height = im.size\n except (IOError, ValueError):\n continue\n # Check if width & height are larger than 500.\n if (width > 500) and (height > 500):\n # Image is large enough to be considered a photo.\n numPhotofiles += 1\n else:\n # Image is too small to be a photo.\n numNonPhotofiles += 1\n \n # If more than half of files were photos,\n # print the absolute path of the folder\n if numPhotofiles > ((numPhotofiles + numNonPhotofiles) / 2):\n print(os.path.abspath(foldername))","sub_path":"ch17/practice-projects/identify-photo-folders/identifyPhotoFolder.py","file_name":"identifyPhotoFolder.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"597651719","text":"import operator\nfrom datetime import datetime\nfrom functools import reduce\n\nfrom django.shortcuts import render, redirect\nfrom django.db.models import Q\nfrom django.contrib.postgres.search import SearchVector\nfrom django.core.paginator import (\n Paginator,\n EmptyPage,\n PageNotAnInteger\n)\n\nfrom ..models import (\n Product,\n Pen,\n Knife,\n VacationSettings,\n Image\n)\n\n\ndef product(request, id):\n vacation_settings = VacationSettings.load()\n product = Product.objects.get(id=id)\n images = Image.objects.filter(product=product)\n end = vacation_settings.end_date\n begin = datetime.now().date()\n weeks = (end-begin).days//7\n vacation_message = 'Expect a minimum shipping delay of {} weeks.'.format(\n weeks)\n context = {\n \"product\": product,\n \"images\": images,\n \"vacation_message\": vacation_message\n }\n return render(request, 'shop/product.html', context)\n\n\n# perform filter and render products page\ndef search(request):\n field = request.GET.get('filter')\n value = request.GET.get('value')\n page = request.GET.get('page')\n # TODO: datetime handling will need changing in production\n if field == 'recent':\n all_products = Product.objects.filter(status='A').order_by(\n '-created_at').prefetch_related('image')\n headline = 'All Products'\n elif field == 'all':\n all_products = Product.objects.filter(status='A', knife__isnull=True).order_by(\n '-updated_at').prefetch_related('image')\n headline = 'All Pens'\n elif field == 'shop':\n if value == 'other':\n make_list = ['pelikan', 'parker', 'montblanc', \"waterman's\"]\n query = reduce(operator.or_, (Q(make__iexact=x)\n for x in make_list))\n all_products = Product.objects.filter(status=\"A\", knife__isnull=True).exclude(query).exclude(\n pen__country__iexact=\"it\").exclude(pen__country__iexact=\"de\").prefetch_related('image')\n headline = \"Other Pens\"\n elif value == \"italian\":\n all_products = Product.objects.filter(\n pen__country__iexact=\"it\", status=\"A\", knife__isnull=True).prefetch_related('image')\n headline = \"Italian Pens\"\n elif value == \"german\":\n germans = [\"montblanc\", \"pelikan\"]\n query = reduce(operator.or_, (Q(make__iexact=x) for x in germans))\n all_products = Product.objects.filter(\n pen__country__iexact=\"de\", status=\"A\", knife__isnull=True).exclude(query).prefetch_related(\"image\")\n headline = \"Other German Pens\"\n else:\n all_products = Product.objects.filter(make__iexact=value, status=\"A\", knife__isnull=True).order_by(\n '-updated_at').prefetch_related('image').prefetch_related('image')\n headline = \"Pens Manufactured by {}\".format(value.capitalize())\n elif field == \"price\":\n if value == \"high\":\n all_products = Product.objects.filter(price__gt=600, status=\"A\", knife__isnull=True).order_by(\n '-updated_at').prefetch_related('image')\n phrase = \"over $600\"\n elif value == \"low\":\n all_products = Product.objects.filter(price__lt=200, status=\"A\", knife__isnull=True).order_by(\n '-updated_at').prefetch_related('image')\n phrase = \"under $200\"\n else:\n all_products = Product.objects.filter(price__lt=int(value), price__gt=int(\n value)-200, status=\"A\", knife__isnull=True).order_by('-updated_at').prefetch_related('image')\n phrase = \"between ${} and ${}\".format(str(int(value)-200), value)\n headline = \"Pens {}\".format(phrase)\n elif field == \"other\":\n if value == \"knife\":\n all_products = Product.objects.filter(status=\"A\").exclude(\n knife__isnull=True).order_by('-updated_at').prefetch_related('image')\n headline = \"Knives\"\n if value == \"sold\":\n all_products = Product.objects.filter(\n status=\"S\").order_by('-updated_at')\n headline = \"Sold Items\"\n elif field == \"search\":\n all_fields = get_all_search_fields()\n all_products = Product.objects.annotate(\n search=SearchVector(*all_fields),\n ).filter(search=value, status=\"A\").order_by('-updated_at').prefetch_related('image')\n headline = \"Search Results For: '{}'\".format(value)\n paginator = Paginator(all_products, 24)\n try:\n products = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n products = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n products = paginator.page(paginator.num_pages)\n context = {\n \"products\": products,\n \"headline\": headline,\n \"filter\": field,\n \"value\": value\n }\n return render(request, 'shop/products.html', context)\n\n\ndef get_all_search_fields():\n product_fields = Product._meta.get_fields()\n product_field_names = [field.name for field in product_fields if field.get_internal_type(\n ) == \"CharField\" or field.get_internal_type() == \"TextField\"]\n pen_fields = Pen._meta.get_fields()\n pen_field_names = [\"pen__\" + field.name for field in pen_fields if field.get_internal_type(\n ) == \"CharField\" or field.get_internal_type() == \"TextField\"]\n knife_fields = Knife._meta.get_fields()\n knife_field_names = [\"knife__\" + field.name for field in knife_fields if field.get_internal_type(\n ) == \"CharField\" or field.get_internal_type() == \"TextField\"]\n return product_field_names + pen_field_names + knife_field_names\n","sub_path":"apps/shop/views/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":5744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"563111478","text":"# coding=utf-8\n\n\nfrom pylab import *\n\n\ndef showBar():\n n = 15\n X = np.arange(n)\n # Y1 = (1-X/float(n)) * np.random.uniform(0.5,1.0,n)\n Y1 = (1) * np.random.uniform(0.5, 1.0, n)\n Y2 = (1 - X / float(n)) * np.random.uniform(0.5, 1.0, n)\n bar(X, +Y1, facecolor='#9999ff', edgecolor='white')\n bar(X, -Y2, facecolor='#ff9999', edgecolor='black')\n for x, y in zip(X, Y1):\n text(x + 0.4, y + 0.05, '%.2f' % y, ha='center', va='bottom')\n ylim(-1.25, +1.25)\n show()\n\n\ndef showSin():\n X = np.linspace(-np.pi, np.pi, 256, endpoint=True)\n C, S = np.cos(X), np.sin(X)\n # 画布大小\n # figure(figsize=(15,12), dpi=80)\n # 颜色和折线宽度\n plot(X, C, color=\"blue\", linewidth=4, linestyle=\"-\",label=\"cosine\")\n plot(X, S, color=\"red\", linewidth=2.5, linestyle=\"-\", label=\"sine\")\n # plot(X,C)\n # plot(X,S)\n # 扩展边缘空白\n # xlim(X.min()*1.5, X.max()*1.5)\n # ylim(C.min()*1.1, C.max()*1.1)\n # 横纵坐标刻度\n xticks([i/2*np.pi for i in range(-2,3)],[ r'%s$\\pi$'% (i/2) for i in range(-2,3)])\n yticks([-1, 0, +1])\n\n # ax = gca()\n # # 去除边框\n # ax.spines['right'].set_color('none')\n # ax.spines['top'].set_color('none')\n # # 设置底边的两个象限\n # ax.xaxis.set_ticks_position('bottom')\n # ax.spines['bottom'].set_position(('data',0))\n # # 设置左边的两个象限\n # ax.yaxis.set_ticks_position('left')\n # ax.spines['left'].set_position(('data',0))\n\n show()\n\ndef showMy():\n X = range(-20,21)\n plot(X,[i*i*i + 8*i*i -100*i for i in X])\n ax = gca()\n ax.spines['right'].set_color('none')#去右边框\n ax.spines['top'].set_color('none')\n # ax.xaxis.set_ticks_position('bottom')\n ax.spines['bottom'].set_position(('data',0))#底线设置为0\n # ax.yaxis.set_ticks_position('left')\n ax.spines['left'].set_position(('data',0))\n show()\nif __name__ == \"__main__\":\n # showBar()\n # showSin()\n showMy()\n","sub_path":"jingle/test/MatplotlibTest.py","file_name":"MatplotlibTest.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"49106843","text":"# JupyterHub configuration\n#\n## If you update this file, do not forget to delete the `jupyterhub_data` volume before restarting the jupyterhub service:\n##\n## docker volume rm jupyterhub_jupyterhub_data\n##\n## or, if you changed the COMPOSE_PROJECT_NAME to :\n##\n## docker volume rm _jupyterhub_data\n##\n\nimport os\n\n## Generic\nc.JupyterHub.admin_access = True\nc.Spawner.default_url = '/lab'\n\n## Authenticator\n\nc.JupyterHub.log_level = 10\nfrom oauthenticator.github import LocalGitHubOAuthenticator\nc.JupyterHub.authenticator_class = LocalGitHubOAuthenticator\n\nc.LocalGitHubOAuthenticator.create_system_users = True\n\nc.Authenticator.allowed_users = allowed_users = set()\nc.JupyterHub.admin_users = admin = set()\n\nimport os\nimport sys\n\njoin = os.path.join\n\nhere = os.path.dirname(__file__)\nroot = os.environ.get('OAUTHENTICATOR_DIR', here)\nsys.path.insert(0, root)\n\nwith open(join(root, 'userlist')) as f:\n for line in f:\n if not line:\n continue\n parts = line.split()\n name = parts[0]\n allowed_users.add(name)\n if len(parts) > 1 and parts[1] == 'admin':\n admin.add(name)\n\nc.GitHubOAuthenticator.oauth_callback_url = os.environ['OAUTH_CALLBACK_URL']\n\n\n## Docker spawner\nc.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'\nc.DockerSpawner.image = os.environ['DOCKER_JUPYTER_CONTAINER']\nc.DockerSpawner.network_name = os.environ['DOCKER_NETWORK_NAME']\n# See https://github.com/jupyterhub/dockerspawner/blob/master/examples/oauth/jupyterhub_config.py\nc.JupyterHub.hub_ip = os.environ['HUB_IP']\n\n# user data persistence\n# see https://github.com/jupyterhub/dockerspawner#data-persistence-and-dockerspawner\nnotebook_dir = os.environ.get('DOCKER_NOTEBOOK_DIR') or '/home/jovyan'\nc.DockerSpawner.notebook_dir = notebook_dir\nc.DockerSpawner.volumes = { 'jupyterhub-user-{username}': notebook_dir }\n\n# Other stuff\nc.Spawner.cpu_limit = 1\nc.Spawner.mem_limit = '1G'\n\n\n## Services\n#c.JupyterHub.services = [\n# {\n# 'name': 'cull_idle',\n# 'admin': True,\n# 'command': 'python /srv/jupyterhub/cull_idle_servers.py --timeout=3600'.split(),\n# },\n#]\n","sub_path":"jupyterhub/jupyterhub_config.py","file_name":"jupyterhub_config.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"175806430","text":"from math import sin, cos, atan2, radians, sqrt\n\n# Approx. radius of earth in km (Required for distance calculation)\nR = 6373.0\n\n# Fares' service id : 0=Lyft, 1=XL, 2=Lux Black, 3=Lux Black XL\n# Rates are : base fare, cost per mile, cost per min, max fare & min fare respectively\nfares = [[2.29,1.58,0.32,450.0,7.19], [3.46,2.56,0.45,450.0,9.43], [6.29,3.37,0.59,700.0,13.47], [12.58,4.05,0.72,700.0,22.45]]\n\n\n# Function\ndef calculate_rate(lat_src, lon_src, lat_dest, lon_dest, wait_time, service_id):\n\n # NYC Coordinate range. Source : https://on.nyc.gov/2s80jgV\n if ((lon_src or lon_dest) < -74.257159 or (lon_src or lon_dest) > -73.699215) or ((lat_src or lat_dest) < 40.495992 or (lat_src or lat_dest) > 40.915568):\n print(\"WARNING : Coordinates specified may not be in NYC\")\n\n # Distance calculation\n delta_lat = radians(lat_dest) - radians(lat_src)\n delta_lon = radians(lon_dest) - radians(lon_src)\n a = sin(delta_lat / 2)**2 + cos(radians(lat_src)) * cos(radians(lat_dest)) * sin(delta_lon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n distance = (R*c)*0.621371 # km to m\n\n # Fare definitions\n base_fare = fares[service_id][0]\n cost_distance = (distance*fares[service_id][1])\n cost_wait = wait_time*fares[service_id][2]\n min_fare = fares[service_id][4]\n max_fare = fares[service_id][3]\n\n total_cost = base_fare + cost_distance + cost_wait\n\n # Max and Min fare conditions\n if total_cost > max_fare:\n return max_fare\n elif total_cost < min_fare:\n return min_fare\n else:\n return round(total_cost, 2)\n\n\n# Insert your parameters below (source lat, source lon, destination lat, destination lon, trip wait time, service id)\nprint(\"Total cost = \", calculate_rate(40.5, -73.8, 40.7, -73.9, 2, 0))","sub_path":"LyftNYCRate.py","file_name":"LyftNYCRate.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"344088782","text":"import pygame\n\nclass Player(object):\n '''\n A class that is supposed to represent the player\n '''\n\n _spritePath = ''\n\n def __init__(self, **kwargs):\n self.name = kwargs.get('name', 'Player')\n self.max_hearts = kwargs.get('max_hearts', 3)\n self.hearts = kwargs.get('hearts', self.max_hearts)\n self._spritePath = kwargs.get('sprite', 'assets/sprites/earth.png')\n self.sprite = pygame.image.load(self._spritePath).convert()\n\n self.posX = 480\n self.posY = 300\n self.score = 0\n self.kills = 0\n\n self.ships = []","sub_path":"classes/Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"460068927","text":"import os\nimport sys\nimport cooler\nimport numpy as np\nfrom .utils import redircwd_back_projroot, cool_to_raw\nfrom .utils import remove_zeros, sampling_hic\n\n\ndef save_to_compressed(hic, idx, output_path, output_name):\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n output = os.path.join(output_path, output_name)\n hic = np.asarray(hic)\n print(output)\n np.savez_compressed(output, hic=hic, compact=idx)\n\n\n\ndef run(raw_hic='Rao2014-GM12878-DpnII-allreps-filtered.10kb.cool',\n chromosome_list=['22'],\n genomic_distance=2000000,\n lr_size=40,\n hr_size=40,\n downsample_factor=16):\n methods_name = 'deephic'\n root_dir = redircwd_back_projroot(project_name='refine_resolution')\n experiment_name = '_'.join(\n [methods_name, str(genomic_distance), str(lr_size), str(hr_size)])\n data_cat = raw_hic.split('-')[0] + '_' + raw_hic.split('-')[1] + '_' + raw_hic.split('-')[2] + '-' + str(downsample_factor) + '_' + raw_hic.split('.')[1]\n input_path = os.path.join(\n root_dir, 'data', 'input_'+experiment_name, data_cat)\n # preprocessing_output_path = os.path.join( root_dir, 'data', 'input_' + experiment_name, data_cat)\n\n cell_type = raw_hic.split('-')[1]\n\n [hic_m, hi_res] = cool_to_raw(raw_path=os.path.join(\n root_dir, 'data', 'raw'), raw_hic=raw_hic)\n\n # low_res = int(np.sqrt(downsample_factor)*hi_res)\n\n for chro in chromosome_list:\n # chrN_10kb.npz\n name_hr = f'chr{chro}_H.npz'\n chromosome = 'chr' + chro\n if chromosome not in hic_m.chromnames:\n continue\n mat_hr = hic_m.matrix(balance=True).fetch(chromosome)\n [mat_hr, idx] = remove_zeros(mat_hr)\n save_to_compressed(mat_hr, idx, output_path=os.path.join(\n input_path, 'hr'), output_name=name_hr)\n # chrN_40kb.npz\n name_lr = f'chr{chro}_L.npz'\n mat_lr = sampling_hic(mat_hr, downsample_factor, fix_seed=True)\n save_to_compressed(mat_lr, idx, output_path=os.path.join(\n input_path, 'lr'), output_name=name_lr)\n\nif __name__ == '__main__':\n run()","sub_path":"software/prepare_deephic_seq.py","file_name":"prepare_deephic_seq.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"614534347","text":"from turtle import *\nt=Turtle()\nw=Screen()\n#w.bgcolor(\"Yellow\")\n#w.bgpic(\"school.gif\")\nw.title(\"My first turtle program\")\nt.shape(\"turtle\")\nt.pensize(10)\nt.speed(6)\nt.hideturtle()\n#'fastest':0\n#'fast':10\n#'normal':6\n#'slow':3\n#'slowest':1\n#t.shape(\"circle\")\n'''t.color(\"red\",\"green\")\nt.forward(200)\nt.left(90)\nt.forward(200)\nt.left(90)\nt.forward(200)\nt.left(90)\nt.forward(200)\nt.left(90)'''\nt.pencolor(\"Yellow\")\nt.fillcolor(\"red\")\nt.begin_fill()\nfor i in range(4):\n t.fd(100)\n t.left(90)\nt.end_fill()\n # t.bk(500)\n\n\n\ndone()\n\n","sub_path":"Turtleintro2.py","file_name":"Turtleintro2.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"200602895","text":"import re\nimport sys\nimport os\nimport subprocess\nimport uuid\n\nfrom zipfile import ZipFile\nfrom dotenv import load_dotenv, find_dotenv\nfrom pymongo import MongoClient\nfrom pathlib import Path\n\n\nPATH_ROOT = Path().absolute().parent.parent\nsys.path.append(str(PATH_ROOT))\n\nfrom scdf.src.scripts.bilhetagem import processar_interceptacao_telefonica\nfrom scdf.src.scripts.index_files import index_files\nfrom scdf.src.scripts.parse_emails import parse_emails\nfrom scdf.src.scripts.recursive_folders import recursive_folders\nfrom scdf.src.scripts.tika_textos import tika_textos\nfrom scdf.src.scripts.processar_arquivos import insert_words\n\nTIKA_CLASS = tika_textos()\nRECURSIVE_CLASS = recursive_folders()\nload_dotenv(find_dotenv())\nmongo_url = os.getenv(\"mongo_url\")\n\n\ndef inserir_investigacao(id_inv, id_responsavel, myclient):\n mydb_master = myclient[\"SCDF\"]\n col = mydb_master[\"investigacoes\"]\n inv = col.find_one(\n {\n \"id_investigacao\": id_inv,\n \"id_responsavel\": id_responsavel,\n }\n )\n if not inv:\n col.insert_one(\n {\n \"id_investigacao\": id_inv,\n \"id_responsavel\": id_responsavel,\n }\n )\n\n\ndef indice_arquivos(filepaths, id_inv, path_inicial, mydb):\n i = index_files(filepaths)\n i.save_paths_file(\n path_inicial + \"indice_arquivos_investigacao_\" + str(id_inv),\n id_inv,\n list_paths=filepaths,\n csv_file=True,\n mydb=mydb,\n )\n\n\ndef processar_arquivo_texto(filepaths, mydb):\n mascaras = {\n \"RG\": r\"\\d{1,2}\\.\\d{6}\\-\\w|\\d{1,2}\\.\\d{3}\\.\\d{3}\\-\\w\",\n \"CPF\": r\"\\d{3}\\.\\d{3}\\.\\d{3}\\-\\d{2}\",\n \"Email\": r\"[^@]+@[^@]+\\.[^@\\s\\.]+\",\n \"Telefone\": r\"[\\s\\.\\,]\\d{8,9}[\\s\\.\\,]|[\\s\\.\\,]\\d{4,5}[\\-\\.\\s]\\d{4}[\\s\\.\\,]\",\n \"Data\": r\"\\d{2}[\\./\\\\]\\d{2}[\\./\\\\]\\d{4}\",\n }\n mycol = mydb[\"indice_palavras_documentos_\" + id_inv]\n col_palavras_interesse = mydb[\"palavras_interesse_\" + id_inv]\n for filepath in filepaths:\n if filepath[-4:] == \"docx\" or filepath[-3:] == \"pdf\" or filepath[-3:] == \"doc\":\n try:\n texto = TIKA_CLASS.process_file(filepath)\n insert_words(texto, filepath, mycol)\n for nome_r, reg in mascaras.items():\n lista_regex = re.findall(reg, texto)\n for l in lista_regex:\n col_palavras_interesse.insert_one(\n {\n \"arquivo\": filepath,\n \"tipo_expressão\": nome_r,\n \"resultado_encontrado\": l,\n }\n )\n except Exception as e:\n print(\"Erro {} em processar arquivo \".format(e), filepath)\n\n\ndef processar_emails(file_list, id_inv, destination_path, myclient):\n if len(file_list) > 0:\n PARSER_EMAILS = parse_emails(file_list, id_inv, destination_path, myclient)\n df = PARSER_EMAILS.email_to_excel(myclient[\"SCDF_\" + id_inv])\n PARSER_EMAILS.relatorio_geral(df, myclient[\"SCDF_\" + id_inv])\n\n\ndef unzip_files(filepaths):\n for file in filepaths:\n with ZipFile(file, \"r\") as zip_ref:\n zip_ref.extractall(\"/\".join(file.split(\"/\")[:-1]))\n\n\n# def vetorizacao_textos(filepath, path_inicial):\n# df = pd.read_csv(filepath)\n# paths = []\n# for _, row in df.iterrows():\n# if row[\"TIPO_ARQUIVO\"] == \"txt\":\n# paths.append(row[\"PATH_ARQUIVO\"])\n# if len(paths):\n# w = word2vec_textos()\n# w.create_model(\n# filepath=path_inicial + \"word2vec_model.bin\", path_multiple=paths\n# )\n\n\nif __name__ == \"__main__\":\n\n path_inicial = sys.argv[1]\n id_responsavel = sys.argv[2]\n id_inv = sys.argv[3]\n if id_inv == 0:\n id_inv = str(uuid.uuid4()).split(\"-\")[0]\n\n myclient = MongoClient(mongo_url)\n mydb = myclient[\"SCDF_\" + id_inv]\n\n destination_path = sys.argv[4]\n print(\"Id da investigação registrado: \", id_inv)\n inserir_investigacao(id_inv, id_responsavel, myclient)\n arq_bilhetagem = None\n col_a_bil = None\n col_b_bil = None\n if len(sys.argv) > 5:\n arq_bilhetagem = sys.argv[5]\n if len(sys.argv) == 8:\n col_a_bil = sys.argv[6]\n col_b_bil = sys.argv[7]\n\n # print(\"Descompactando os arquivos\")\n # arquivos_descompactar = [\n # f for f in RECURSIVE_CLASS.find_files(path_inicial) if f[-3:] == \"zip\"\n # ]\n # unzip_files(arquivos_descompactar)\n\n list_files = RECURSIVE_CLASS.find_files(path_inicial)\n\n # PROCESSAR EMAILS\n print(\"Processando os emails e gerando relatório\")\n processar_emails(\n [i for i in list_files if i[-3:] == \"msg\"], id_inv, destination_path, myclient\n )\n\n # PROCESSAR OS PDF'S E ARQUIVOS DE WORD\n print(\"Processando os arquivos de texto\")\n processar_arquivo_texto(\n list_files,\n mydb,\n )\n indice_arquivos(list_files, id_inv, path_inicial, mydb)\n\n # # VETORIZAÇÃO\n # vetorizacao_textos(path_inicial+'indice_arquivos_investigacao_'+str(id_inv)+'.csv',path_inicial)\n\n # PROCESSAR BILHETAGENS E GERAR RELATÓRIO\n if arq_bilhetagem:\n print(\"Processando arquivo de bilhetagem\")\n if col_a_bil:\n processar_interceptacao_telefonica(\n arq_bilhetagem,\n id_inv,\n path_inicial,\n colunaOrig=col_a_bil,\n colunaDest=col_b_bil,\n )\n else:\n processar_interceptacao_telefonica(\n arq_bilhetagem,\n id_inv,\n path_inicial,\n colunaOrig=\"Origem/IMEI\",\n colunaDest=\"Destino/IMEI\",\n )\n","sub_path":"src/.ipynb_checkpoints/upload_investigacao-checkpoint.py","file_name":"upload_investigacao-checkpoint.py","file_ext":"py","file_size_in_byte":5743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"646999309","text":"from utilities.config import FORMATTER_STRING, OUTPUT_DIR, LOGGING_FILE\nimport logging.config\nimport sys\nimport os\n\n\ndef init_logger():\n # Create the path for the log file\n path = os.path.join(OUTPUT_DIR, LOGGING_FILE)\n # Create logger\n log = logging.getLogger()\n log.setLevel(logging.DEBUG)\n # Create Formatter\n formatter = logging.Formatter(FORMATTER_STRING)\n # create a file handler and add it to logger\n file_handler = logging.FileHandler(path)\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n log.addHandler(file_handler)\n # create a stream handler and add it to logger\n stream_handler = logging.StreamHandler(sys.stdout)\n stream_handler.setLevel(logging.ERROR)\n stream_handler.setFormatter(formatter)\n log.addHandler(stream_handler)\n return log","sub_path":"utilities/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"372507275","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\n\nclass Read_txt:\n def __init__(self):\n self.ip=''\n def read_txt(self):\n with open(\"../config/IP\", \"r\") as f:\n content=f.read()\n for i in content:\n self.ip+=i\n ip=self.ip.split(\"=\")[1]\n return ip\n\n\n","sub_path":"woniubossV2/read_file/read_txt.py","file_name":"read_txt.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"167694673","text":"import matplotlib.pyplot as plt\r\n\r\n# # 1.2.4\r\n#\r\n# x1 = [5, 20, 50, 100, 200]\r\n# y1_1 = [0.535431222073, 0.143490816141, 0.0524036567181, 0.047132848027, 0.0469374509791]\r\n# y1_2 = [0.705598679074, 0.556377195394, 0.48541936981, 0.43740992187, 0.440685272087]\r\n#\r\n# plt.title('Average Cross Entropy of the Data with Different Numbers of Hidden Units')\r\n# plt.xlabel('Numbers of Hidden Units')\r\n# plt.ylabel('Average Cross Entropy')\r\n#\r\n# plt.plot(x1, y1_1, color=\"red\", linewidth=2.5, linestyle=\"-\", label=\"Training Dataset\")\r\n# plt.plot(x1, y1_1, 'ro', color='black')\r\n# plt.plot(x1, y1_2, color=\"blue\", linewidth=2.5, linestyle=\"-\", label=\"Validation Dataset\")\r\n# plt.plot(x1, y1_2, 'ro', color='black')\r\n# plt.legend(loc='upper right')\r\n# plt.axis([0, 250, 0, 0.75])\r\n# plt.show()\r\n#\r\n# # 1.2.6\r\n#\r\n# x2 = [0.1, 0.01, 0.001]\r\n# y2_1 = [0.0250880538617, 0.0524036567181, 0.383153297005]\r\n# y2_2 = [0.835180016243, 0.48541936981, 0.522567099928]\r\n#\r\n# plt.title('Average Cross Entropy of the Data with Different Learning Rates')\r\n# plt.xlabel('Learning Rate')\r\n# plt.ylabel('Average Cross Entropy')\r\n#\r\n# plt.plot(x2, y2_1, color=\"red\", linewidth=2.5, linestyle=\"-\", label=\"Training Dataset\")\r\n# plt.plot(x2, y2_1, 'ro', color='black')\r\n# plt.plot(x2, y2_2, color=\"blue\", linewidth=2.5, linestyle=\"-\", label=\"Validation Dataset\")\r\n# plt.plot(x2, y2_2, 'ro', color='black')\r\n# plt.legend(loc='upper right')\r\n# plt.axis([0, 0.12, 0, 1])\r\n# plt.show()\r\n\r\n# 1.2.6.1\r\n\r\nx_3 = []\r\nfor i in range(1, 101):\r\n x_3.append(i)\r\n\r\n# print len(x_3)\r\n\r\ny_0_0 = map(float, open('train_0.txt').read().splitlines())\r\ny_0_1 = map(float, open('valida_0.txt').read().splitlines())\r\ny_1_0 = map(float, open('train_1.txt').read().splitlines())\r\ny_1_1 = map(float, open('valida_1.txt').read().splitlines())\r\ny_2_0 = map(float, open('train_2.txt').read().splitlines())\r\ny_2_1 = map(float, open('valida_2.txt').read().splitlines())\r\n\r\nplt.title('Average Cross Entropy of the Data with Different Learning Rates')\r\nplt.xlabel('Number of Epoch')\r\nplt.ylabel('Average Cross Entropy')\r\n\r\nplt.plot(x_3, y_0_0, color=\"blue\", linewidth=2.5, linestyle=\"-\", label=\"Training Dataset with Lambda=0.1\")\r\nplt.plot(x_3, y_0_1, color=\"blue\", linewidth=2.5, linestyle=\"--\", label=\"Validation Dataset with Lambda=0.1\")\r\nplt.plot(x_3, y_1_0, color=\"green\", linewidth=2.5, linestyle=\"-\", label=\"Training Dataset with Lambda=0.01\")\r\nplt.plot(x_3, y_1_1, color=\"green\", linewidth=2.5, linestyle=\"--\", label=\"Validation Dataset with Lambda=0.01\")\r\nplt.plot(x_3, y_2_0, color=\"purple\", linewidth=2.5, linestyle=\"-\", label=\"Training Dataset with Lambda=0.001\")\r\nplt.plot(x_3, y_2_1, color=\"purple\", linewidth=2.5, linestyle=\"--\", label=\"Validation Dataset with Lambda=0.001\")\r\n# plt.plot(x2, y2_1, 'ro', color='black')\r\n# plt.plot(x2, y2_2, color=\"blue\", linewidth=2.5, linestyle=\"-\", label=\"Validation Dataset\")\r\n# plt.plot(x2, y2_2, 'ro', color='black')\r\nplt.legend(loc='upper right')\r\nplt.axis([0, 100, 0, 2.3])\r\nplt.show()\r\n","sub_path":"Plot.py","file_name":"Plot.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"249099171","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('django_namecoin', '0022_name_is_valid_name'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='name',\n name='kind',\n field=models.CharField(max_length=256, blank=True, help_text='Kind', null=True, verbose_name='Kind', db_index=True),\n ),\n migrations.AlterField(\n model_name='name',\n name='name',\n field=models.CharField(max_length=256, blank=True, help_text='Name', null=True, verbose_name='Name', db_index=True),\n ),\n ]\n","sub_path":"django_namecoin/migrations/0023_auto_20160521_1056.py","file_name":"0023_auto_20160521_1056.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"123460828","text":"import os\nimport sys\n\ndef append_paths():\n\tfor root, dirs, files in os.walk('static'):\n\t\tfor filename in files:\n\t\t\tfilepath = os.path.join(root, filename)\n\n\t\tif root not in static:\n\t\t static[root] = []\n\n\t\tstatic[root].append(filepath)\n\nappend_paths()","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"179708121","text":"import os\n\nfrom flask_sqlalchemy import SQLAlchemy\nimport sqlalchemy_utils\n\nfrom settings import BUNDLES\nfrom settings import DATABASE\n\nCONNECTOR_SQLITE = \"sqlite\"\nCONNECTOR_MYSQL = \"mysql\"\nCONNECTOR_ORACLE = \"oracle\"\nCONNECTOR_POSTGRESQL = \"postgresql\"\nCONNECTOR_BASE_SLASH = \"://\"\nCONNECTOR_SLASH = \"/\"\nCONNECTOR_DOUBLE_SLASH = \"//\"\nCONNECTOR_COLON = \":\"\nCONNECTOR_AT = \"@\"\nROOT_DIR = os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]\nDOT_DB = \".db\"\nDOT_MODELS = \".models\"\nDOT = \".\"\nSQLALCHEMY_DATABASE_URI = \"SQLALCHEMY_DATABASE_URI\"\nMESSAGE_CONNECTOR = \"CONNECTOR\"\nMESSAGE_USERNAME = \"USERNAME\"\nMESSAGE_PASSWORD = \"PASSWORD\"\nMESSAGE_HOST = \"HOST\"\nMESSAGE_PORT = \"PORT\"\nMESSAGE_NAME = \"NAME\"\nMESSAGE_CONNECTOR_NOT_FOUND = \"CONNECTOR not found\"\n\nclass DatabaseManager(object):\n \"\"\"\n This class is responsible for load database configuration and apply this\n to flask application.\n\n The database configuration needs the next keys to work:\n\n -USERNAME\n -PASSWORD\n -HOST\n -PORT\n -NAME\n\n \"\"\"\n database = None\n\n @classmethod\n def start_app(cls, app):\n if not sqlalchemy_utils.database_exists(cls._get_connector()):\n raise Exception(\"Database doesn't exists. Create it and try again\")\n app.config['SQLALCHEMY_DATABASE_URI'] = cls._get_connector()\n cls.database = SQLAlchemy(app)\n\n @classmethod\n def create(cls, app):\n app.config['SQLALCHEMY_DATABASE_URI'] = cls._get_connector()\n cls.database = SQLAlchemy(app)\n cls._load_models()\n cls.database.create_all()\n\n @classmethod\n def drop(cls):\n cls._load_models()\n cls.database.drop_all()\n\n @classmethod\n def get_session(cls):\n return cls.database.session\n\n @classmethod\n def _get_connector(cls):\n connector = DATABASE.get(MESSAGE_CONNECTOR, CONNECTOR_SQLITE).lower()\n if connector == CONNECTOR_SQLITE:\n return cls._get_sqlite_connector()\n elif connector == CONNECTOR_MYSQL or connector == CONNECTOR_ORACLE or connector == CONNECTOR_POSTGRESQL:\n return cls._get_common_connector()\n else:\n raise Exception(MESSAGE_CONNECTOR_NOT_FOUND)\n\n @staticmethod\n def _get_sqlite_connector():\n return CONNECTOR_SQLITE + CONNECTOR_BASE_SLASH + CONNECTOR_DOUBLE_SLASH + ROOT_DIR + CONNECTOR_SLASH + \\\n DATABASE.get(MESSAGE_NAME, CONNECTOR_SQLITE) + DOT_DB\n\n @staticmethod\n def _get_common_connector():\n username = DATABASE.get(MESSAGE_USERNAME)\n password = DATABASE.get(MESSAGE_PASSWORD)\n host = DATABASE.get(MESSAGE_HOST)\n port = DATABASE.get(MESSAGE_PORT)\n name = DATABASE.get(MESSAGE_NAME)\n\n return CONNECTOR_MYSQL + CONNECTOR_BASE_SLASH + username + CONNECTOR_COLON + password + CONNECTOR_AT \\\n + host + CONNECTOR_COLON + port + CONNECTOR_SLASH + name\n\n @staticmethod\n def _load_models():\n for bundle in BUNDLES:\n package = bundle[0] + DOT_MODELS\n files_path = ROOT_DIR + CONNECTOR_SLASH + package.replace(DOT, CONNECTOR_SLASH)\n files = filter(lambda file_: not \"__\" in file_ and not \".pyc\" in file_, os.listdir(files_path))\n map(lambda model:__import__(package + DOT + model.replace(\".py\", \"\")), files)\n\n","sub_path":"managers/databases.py","file_name":"databases.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"375012855","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*\n\nimport serial\nimport serial.tools.list_ports\n\nport_list = list(serial.tools.list_ports.comports())\n\nif len(port_list) <= 0:\n print (\"The Serial port can't find!\")\n\nelse:\n port_list_0 = list(port_list[0])\n\n port_serial = port_list_0[0]\n\n ser = serial.Serial(port_serial, 115200, timeout=0.1)\n print (ser)\n print (\"Link...\", ser.name)\n data = ''\n while 1:\n num = ser.inWaiting() > 0\n if ser.inWaiting():\n print (ser.readline())\n#\n#\n# class Ser(object):\n# def __init__(self):\n# # 打开端口\n# self.port = serial.Serial(port='3', baudrate=115200, timeout=2)\n# print (self.port)\n#\n# # 发送指令的完整流程\n# def send_cmd(self, cmd):\n# self.port.write(cmd)\n# response = self.port.readall()\n# response = self.convert_hex(response)\n# return response\n#\n# # 转成16进制的函数\n# def convert_hex(self, string):\n# res = []\n# result = []\n# for item in string:\n# res.append(item)\n# for i in res:\n# result.append(hex(i))\n# return result\n","sub_path":"fish/serial-arduino.py","file_name":"serial-arduino.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"182006274","text":"import pygame\nfrom Main.DrumPage.Buttons.ToggleButtons import toggleBtn\nfrom Main.SynthPage.SoundGenerator import makeScale\n\n#self.game.mainmenu.synthMain.displaynotes\nclass DisplayNotes():\n def __init__(self,game):\n self.game= game\n self.notes = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']\n\n self.notesButtons = [0] * len(self.notes)\n for i in range(len(self.notes)):\n self.notesButtons[i] = toggleBtn(self.game,1)\n\n self.notesButtons[0].toggleState=1\n\n self.activeNote = \"C\"\n\n\n\n\n\n\n def __call__(self, *args, **kwargs):\n keys = pygame.key.get_pressed()\n for i in range(len(self.notes)):\n self.notesButtons[i].button(self.notes[i],\n self.game.displayW * .1,\n self.game.displayH * .1 + i * self.game.buttonSize *1.5,\n self.game.buttonSize,\n self.game.buttonSize,\n self.game.lightpurple,\n self.game.bright_gray,\n onStr = self.notes[i],\n hoverStr = self.notes[i],\n whenClicked = (lambda: self.noteclicked(i)) #pass a function with argument as a\n #function call back\n )\n\n if self.game.mainmenu.synthMain.displayscale.activeScale != -1:\n for i in range(len(self.notesButtons)):\n PL = [(self.game.displayW * .1,\n self.game.displayH * .1 + i * self.game.buttonSize *1.5),\n\n\n\n (self.game.displayW * .1 +self.game.buttonSize,\n self.game.displayH * .1 + i * self.game.buttonSize * 1.5),\n\n\n\n (self.game.displayW * .1 +self.game.buttonSize,\n self.game.displayH * .1 + i * self.game.buttonSize * 1.5 + self.game.buttonSize),\n\n (self.game.displayW * .1,\n self.game.displayH * .1 + i * self.game.buttonSize * 1.5 + self.game.buttonSize),\n ]\n\n\n note = self.game.mainmenu.synthMain.displaynotes.notes[i]\n\n self.rootNote = self.game.mainmenu.synthMain.displayroot.rootNote\n\n self.rootDict = dict()\n\n scale = self.game.mainmenu.synthMain.scales[self.game.mainmenu.synthMain.displayscale.activeScale]\n ms = makeScale(self.rootNote, scale)\n\n for i in ms:\n self.rootDict[i] = 1\n\n for i in self.game.mainmenu.synthMain.displaynotes.notes:\n if i not in self.rootDict:\n self.rootDict[i] = -1\n\n\n\n\n if self.rootDict[note] == 1:\n pygame.draw.polygon(self.game.gameDisplay, self.game.black,PL,1 )\n\n\n self.keyPressed(self.rootDict,keys)\n\n else:\n self.keyPressed(None,keys)\n\n\n\n\n def noteclicked(self,num):\n for i in range(len(self.notesButtons)):\n if i != num:\n self.notesButtons[i].toggleState=0\n\n self.notesButtons[num].toggleState =1\n\n self.activeNote = self.notes[num]\n\n\n def keyPressed(self, dix,keys):\n\n\n notesums=-1\n activeNote = -1\n if dix is None:\n notesums = -1\n\n count =0\n for i in self.notes:\n\n if i == self.activeNote:\n activeNote=count\n break\n\n count +=1\n\n\n if keys[pygame.K_w] or keys[pygame.K_s]:\n self.notesButtons[activeNote].toggleState = 0\n\n if dix== None: # no scale -> free to go to any note\n\n if keys[pygame.K_w]:\n #print activeNote\n newNote = (activeNote-1) % len(self.notes)\n\n else:\n newNote =(activeNote+1) % len(self.notes)\n\n\n\n\n #pass any\n\n else:\n newNote=-1\n if keys[pygame.K_w]:\n\n while newNote ==-1:\n activeNote -= 1\n newNote = (activeNote) % len(self.notes)\n\n if dix[self.notes[newNote]] ==-1:\n newNote =-1\n\n else:\n\n while newNote == -1:\n activeNote +=1\n newNote = (activeNote) % len(self.notes)\n\n #print newNote, self.notes[newNote], dix[self.notes[newNote]]\n\n #print newNote\n if dix[self.notes[newNote]] ==-1:\n newNote = -1\n\n\n\n self.notesButtons[newNote].toggleState=1\n\n self.activeNote = self.notes[newNote]\n\n\n\n\n","sub_path":"Main/SynthPage/SynthDisplayNotes.py","file_name":"SynthDisplayNotes.py","file_ext":"py","file_size_in_byte":4920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"83949370","text":"\nfrom sage.misc.temporary_file import tmp_filename\nfrom sage.plot.colors import rainbow\nimport os\n\ndef gen_html_code(JSONgraph):\n\n try :\n js_code_file = open(path_To_Project_Repo+\"/JS_Graph_Sage/src/HTML/base_html.html\", 'r')\n except :\n print(\"Repository \"+path_To_Project_Repo+\" not found, update it with _update_JS_Repo(path)\")\n sys.exit(1)\n js_code = js_code_file.read().replace(\"// GRAPH_DATA_HEREEEEEEEEEEE\", JSONgraph)\n js_code_file.close()\n\n # Add d3.js script depending on whether d3js package is installed.\n #d3js_filepath = os.path.join(SAGE_SHARE, 'd3js', 'd3.min.js')\n #if os.path.exists(d3js_filepath):\n # with open(d3js_filepath, 'r') as d3js_code_file:\n # d3js_script = ''\n #else:\n \n\n # Writes the temporary .html file\n try :\n filename = path_To_Project_Repo+'/JS_Graph_Sage/obj/result.html'\n except :\n print(\"Repository \"+path_To_Project_Repo+\" not found, update it with _update_JS_Repo(path)\")\n sys.exit(1)\n f = open(filename, 'w')\n f.write(js_code)\n f.close()\n\n return filename\n\n\ndef graph_to_JSON(G,\n vertex_partition=[],\n vertex_colors=None,\n edge_partition=[],\n layout=None,\n charge=-120,\n link_distance=100,\n link_strength=2,\n gravity=.04,\n vertex_size=12,\n edge_thickness=4):\n\n directed = G.is_directed()\n multiple_edges = G.has_multiple_edges()\n\n # Associated an integer to each vertex\n v_to_id = {v: i for i, v in enumerate(G)}\n\n # Vertex colors\n if vertex_colors is not None:\n vertex_partition = list(vertex_colors.values())\n len_vertex_partition = len(vertex_partition)\n color = {i: len_vertex_partition for i in range(G.order())}\n for i, l in enumerate(vertex_partition):\n for v in l:\n color[v_to_id[v]] = i\n\n # Vertex list\n # Data for vertex v must be at position v_to_id[v] in list nodes\n nodes = [{\"name\": str(v), \"group\": str(color[v_to_id[v]])} for v in G]\n\n # Edge colors.\n edge_color_default = \"#aaa\"\n color_list = rainbow(len(edge_partition))\n edge_color = {}\n for i, l in enumerate(edge_partition):\n for e in l:\n u, v, label = e if len(e) == 3 else e+(None,)\n edge_color[u, v, label] = color_list[i]\n if not directed:\n edge_color[v, u, label] = color_list[i]\n\n # Edge list\n edges = []\n seen = {} # How many times has this edge been seen ?\n\n for u, v, l in G.edge_iterator():\n\n # Edge color\n color = edge_color.get((u, v, l), edge_color_default)\n\n # Computes the curve of the edge\n curve = 0\n\n # Loop ?\n if u == v:\n seen[u, v] = seen.get((u, v), 0) + 1\n curve = seen[u, v] * 10 + 10\n\n # For directed graphs, one also has to take into accounts\n # edges in the opposite direction\n elif directed:\n if G.has_edge(v, u):\n seen[u, v] = seen.get((u, v), 0) + 1\n curve = seen[u, v] * 15\n else:\n if multiple_edges and len(G.edge_label(u, v)) != 1:\n # Multiple edges. The first one has curve 15, then\n # -15, then 30, then -30, ...\n seen[u, v] = seen.get((u, v), 0) + 1\n curve = (1 if seen[u, v] % 2 else -1) * (seen[u, v] // 2) * 15\n\n elif not directed and multiple_edges:\n # Same formula as above for multiple edges\n if len(G.edge_label(u, v)) != 1:\n seen[u, v] = seen.get((u, v), 0) + 1\n curve = (1 if seen[u, v] % 2 else -1) * (seen[u, v] // 2) * 15\n\n # Adding the edge to the list\n # The source (resp. target) is the index of u (resp. v) in list nodes\n edges.append({\"source\": v_to_id[u],\n \"target\": v_to_id[v],\n \"strength\": 0,\n \"color\": color,\n \"curve\": curve,\n \"name\": str(l)})\n\n loops = [e for e in edges if e[\"source\"] == e[\"target\"]]\n edges = [e for e in edges if e[\"source\"] != e[\"target\"]]\n\n # Defines the vertices' layout if possible\n if layout is not None: \n Gpos = G.graphplot(layout=layout)._pos\n else :\n Gpos = G.get_pos()\n pos = []\n\n if Gpos is not None:\n charge = 0\n link_strength = 0\n gravity = 0\n\n for v in G:\n x, y = Gpos[v]\n pos.append([float(x), float(-y)])\n \n # Encodes the data as a JSON string\n from json import JSONEncoder\n string = JSONEncoder().encode({\"nodes\": nodes,\n \"links\": edges,\n \"loops\": loops,\n \"pos\": pos,\n \"directed\": G.is_directed(),\n \"charge\": int(charge),\n \"link_distance\": int(link_distance),\n \"link_strength\": int(link_strength),\n \"gravity\": float(gravity),\n \"vertex_size\": int(vertex_size),\n \"edge_thickness\": int(edge_thickness)})\n return string\n\n\n\nimport re, webbrowser, time\ndef show_CustomJS(G, layout=None):\n global current_server, graph_client_dict\n\n if not current_server:\n graph_client_dict[1] = G\n launch_connection()\n WaitServer()\n else :\n client_dictionnary_verification(G)\n graph_client_dict[current_server.id_counter + 1] = G\n\n JSONgraph = graph_to_JSON(G, layout=layout)\n webbrowser.open('file://'+os.path.realpath(gen_html_code(JSONgraph)))\n\n\ndef WaitServer():\n global current_server\n\n while current_server is None :\n time.sleep(0.5)\n \n\nclass DataGraph(object):\n def __init__(self, data):\n self.__dict__ = json.loads(data)\n\n\nimport json\nfrom sage.graphs import graph\ndef ConstructGraphFromJSONObject(JSONObject):\n posdict={}\n\n G = None\n\n if JSONObject.directed :\n G = DiGraph()\n else :\n G = Graph()\n\n #Add nodes\n for node in JSONObject.nodes:\n G.add_vertex(node.get(\"name\"))\n\n #Fill the dictionary of node coordinates\n for n in JSONObject.nodes:\n posdict[n.get(\"name\")] = (n.get(\"x\"),n.get(\"y\"))\n\n G.set_pos(posdict)\n\n #Add edges\n for l in JSONObject.links:\n G.add_edge(l.get(\"source\"),l.get(\"target\"))\n\n #Add loops\n if len(JSONObject.loops)>0:\n G.allow_loops(True)\n for l in JSONObject.loops:\n G.add_edge(l.get(\"source\"),l.get(\"target\"))\n\n return G\n\n\n# def ConstructGraphFromJSON(pathRepo=path_To_JSON_Repo,\n# nameJSON=JSON_name):\n# string = GetBackJSON(nameJSON=nameJSON)\n\n# return ConstructGraphFromJSONString(string)\n\n\n\n# def GetBackJSON(pathRepo=path_To_JSON_Repo,\n# nameJSON=JSON_name):\n\n# filename = pathRepo+nameJSON\n\n# try :\n# f = open(filename, 'r')\n# except :\n# print ('File '+pathRepo+nameJSON+' does not exist')\n# print ('default : path = \\'Mes Documents/Git/JS_Graph_Sage/obj/\\' -> _update_JSON_Repo(path) to update')\n# print (' name JSON = \\'Graph_JSON\\' -> _update_JSON_name(name) to update')\n# sys.exit(1)\n\n# if f.mode == 'r':\n# lines = f.readlines()\n\n# return lines[0]\n\n","sub_path":"src/Python/customJsGraph.py","file_name":"customJsGraph.py","file_ext":"py","file_size_in_byte":7407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"593559367","text":"import math\n\n\ndef printPrime(n):\n count=0\n list=[2]\n\n\n if n<=2:\n return 0\n\n def isPrime(num):\n for j in range(2,int(math.sqrt(num))+1):\n if num%j==0:\n return False\n return True\n\n for num in range(3,n):\n if isPrime(num):\n count=count+1\n list.append(num)\n\n return list\n\nn=10\nprint(printPrime(n))","sub_path":"D/printPrime_practice.py","file_name":"printPrime_practice.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"249102926","text":"load(\n \"@io_bazel_rules_dotnet//dotnet/private:common.bzl\",\n \"as_iterable\",\n)\nload(\n \"@io_bazel_rules_dotnet//dotnet/private:providers.bzl\",\n \"DotnetResourceList\",\n)\nload(\"@io_bazel_rules_dotnet//dotnet/private:rules/common.bzl\", \"collect_transitive_info\")\n\ndef _map_resource(d):\n return d.result.path + \",\" + d.identifier\n\ndef emit_assembly_common(\n kind,\n dotnet,\n name,\n srcs,\n deps = None,\n out = None,\n resources = None,\n executable = True,\n defines = None,\n unsafe = False,\n data = None,\n keyfile = None,\n subdir = \"./\",\n target_framework = \"\"):\n \"\"\"See dotnet/toolchains.rst#binary for full documentation. Emits actions for assembly build.\n\n The function is used by all frameworks.\n\n Args:\n kind: String \"core\", \"net\" \"mono\"\n dotnet: DotnetContext provider\n name: name of the assembly\n srcs: source files (as passed from rules: list of lables/targets)\n deps: list of DotnetLibrary. Dependencies as passed from rules)\n out: output file name if provided. Otherwise name is used\n resources: list of DotnetResourceList provider\n executable: bool. True for executable assembly, False otherwise\n defines: list of string. Defines to pass to a compiler\n unsafe: /unsafe flag (False - default - /unsafe-, otherwise /unsafe+)\n data: list of targets (as passed from rules). Additional depdendencies of the target\n keyfile: File to be used for signing if provided\n subdir: specific subdirectory to be used for target location. Default ./\n \"\"\"\n\n if name == \"\" and out == None:\n fail(\"either name or out must be set\")\n\n if not out:\n result = dotnet.declare_file(dotnet, path = subdir + name)\n else:\n result = dotnet.declare_file(dotnet, path = subdir + out)\n\n if dotnet.debug:\n pdbext = \".mdb\" if kind == \"mono\" else \".pdb\"\n pdb = dotnet.declare_file(dotnet, path = result.basename + pdbext, sibling = result)\n else:\n pdb = None\n\n direct_inputs = []\n\n # The goal is to match msbuild as much as reasonable. Inspired by rules_csharp (https://github.com/Brightspace/rules_csharp)\n # https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/compiler-options/listed-alphabetically\n args = dotnet.actions.args()\n\n # General command lines parameters\n args.add(result.path, format = \"/out:%s\")\n if executable:\n target = \"exe\"\n else:\n target = \"library\"\n args.add(target, format = \"/target:%s\")\n\n args.add(\"/checked-\")\n args.add(\"/nostdlib+\")\n args.add(\"/utf8output\")\n args.add(\"/deterministic+\")\n args.add(\"/filealign:512\")\n args.add(\"/nologo\")\n args.add(\"/highentropyva+\")\n args.add(\"/langversion:latest\")\n\n # Debug parameters\n if pdb:\n if kind != \"mono\":\n args.add(\"-debug:full\")\n args.add(\"-pdb:\" + pdb.path)\n args.add(\"/optimize-\")\n args.add(\"/define:TRACE;DEBUG\")\n else:\n args.add(\"-debug\")\n elif kind != \"mono\":\n args.add(\"/debug-\")\n args.add(\"/optimize+\")\n args.add(\"/define:TRACE;RELEASE\")\n if unsafe:\n args.add(\"/unsafe\")\n\n # Keyfile\n if keyfile:\n args.add(\"-keyfile:\" + keyfile.files.to_list()[0].path)\n direct_inputs.append(keyfile.files.to_list()[0])\n\n # Defines\n if defines and len(defines) > 0:\n args.add_all(defines, format_each = \"/d:%s\")\n\n # Resources\n for r in resources:\n if r[DotnetResourceList].result and len(r[DotnetResourceList].result) > 0:\n args.add_all(r[DotnetResourceList].result, format_each = \"/resource:%s\", map_each = _map_resource)\n res_l = [t.result for t in r[DotnetResourceList].result]\n direct_inputs += res_l\n\n # Source files\n attr_srcs = [f for t in srcs for f in as_iterable(t.files)]\n args.add_all(attr_srcs)\n direct_inputs += attr_srcs\n\n # Generate the source file for target framework\n if target_framework != \"\":\n f = dotnet._ctx.actions.declare_file(result.basename + \"._tf_.cs\", sibling = result)\n content = \"\"\"\n [assembly:System.Runtime.Versioning.TargetFramework(\"{}\")]\n \"\"\".format(target_framework)\n dotnet._ctx.actions.write(f, content)\n args.add(f)\n direct_inputs.append(f)\n\n # References - also needs to include transitive dependencies\n (transitive_refs, transitive_runfiles, transitive_deps) = collect_transitive_info(deps)\n\n args.add_all(transitive_refs, format_each = \"/r:%s\")\n\n args.set_param_file_format(\"multiline\")\n\n # Prepare and execute action\n paramfilepath = name + \".param\"\n paramfile = dotnet.declare_file(dotnet, path = paramfilepath)\n dotnet.actions.write(output = paramfile, content = args)\n\n direct_inputs.append(paramfile)\n\n # select runner and action_args\n if kind != \"net\":\n runner = dotnet.runner\n action_args = [dotnet.mcs.path, \"/noconfig\", \"@\" + paramfile.path]\n direct_inputs.append(runner)\n direct_inputs.append(dotnet.mcs)\n else:\n runner = dotnet.mcs\n action_args = [\"/noconfig\", \"@\" + paramfile.path]\n direct_inputs.append(runner)\n\n inputs = depset(direct = direct_inputs, transitive = [transitive_refs])\n dotnet.actions.run(\n inputs = inputs,\n outputs = [result] + ([pdb] if pdb else []),\n executable = runner,\n arguments = action_args,\n mnemonic = \"Compile\" + kind,\n progress_message = (\n \"Compiling \" + kind + \" \" + dotnet.label.package + \":\" + dotnet.label.name\n ),\n )\n\n # Collect runfiles\n direct_runfiles = []\n direct_runfiles.append(result)\n if pdb:\n direct_runfiles.append(pdb)\n\n data_l = [f for t in data for f in as_iterable(t.files)]\n direct_runfiles += data_l\n\n # Final result\n return dotnet.new_library(\n dotnet = dotnet,\n name = name,\n deps = deps,\n transitive = transitive_deps,\n runfiles = depset(direct = direct_runfiles, transitive = [transitive_runfiles]),\n result = result,\n pdb = pdb,\n transitive_refs = depset(direct = [result], transitive = [transitive_refs]),\n )\n","sub_path":"dotnet/private/actions/assembly_common.bzl","file_name":"assembly_common.bzl","file_ext":"bzl","file_size_in_byte":6272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"404159564","text":"\"\"\"This script has different methods to convert the hdf5 hit table from pyBAR into a CERN ROOT Ttree.\r\n\"\"\"\r\nimport tables as tb\r\nimport numpy as np\r\nimport ctypes\r\nimport progressbar\r\nfrom ROOT import TFile, TTree\r\nfrom ROOT import gROOT, AddressOf\r\n\r\n\r\ndef init_hit_struct():\r\n gROOT.ProcessLine(\r\n \"struct HitInfo{\\\r\n ULong64_t event_number;\\\r\n UInt_t trigger_number;\\\r\n UChar_t relative_BCID;\\\r\n UShort_t LVL1ID;\\\r\n UChar_t column;\\\r\n UShort_t row;\\\r\n UChar_t tot;\\\r\n UShort_t BCID;\\\r\n UShort_t TDC;\\\r\n UChar_t trigger_status;\\\r\n UInt_t service_record;\\\r\n UShort_t event_status;\\\r\n };\")\r\n from ROOT import HitInfo\r\n return HitInfo()\r\n\r\n\r\ndef get_root_type_descriptor(numpy_type_descriptor):\r\n ''' Converts the numpy type descriptor to the ROOT type descriptor.\r\n Parameters\r\n ----------\r\n numpy_type_descriptor: np.dtype\r\n '''\r\n return{\r\n 'int64': 'L',\r\n 'uint64': 'l',\r\n 'int32': 'I',\r\n 'uint32': 'i',\r\n 'int16': 'S',\r\n 'uint16': 's',\r\n 'int8': 'B',\r\n 'uint8': 'b',\r\n }[str(numpy_type_descriptor)]\r\n\r\n\r\ndef get_c_type_descriptor(numpy_type_descriptor):\r\n ''' Converts the numpy type descriptor to the ctype descriptor.\r\n Parameters\r\n ----------\r\n numpy_type_descriptor: np.dtype\r\n '''\r\n return{\r\n 'int64': ctypes.c_longlong,\r\n 'uint64': ctypes.c_ulonglong,\r\n 'int32': ctypes.c_int,\r\n 'uint32': ctypes.c_uint,\r\n 'int16': ctypes.c_short,\r\n 'uint16': ctypes.c_ushort,\r\n 'int8': ctypes.c_byte,\r\n 'uint8': ctypes.c_ubyte,\r\n }[str(numpy_type_descriptor)]\r\n\r\n\r\ndef init_tree_from_table(table, chunk_size=1, tree_entry=None):\r\n ''' Initializes a ROOT tree from a HDF5 table.\r\n Takes the HDF5 table column names and types and creates corresponding branches. If a chunk size is specified the branches will have the length of the chunk size and\r\n an additional parameter is returned to change the chunk size at a later stage.\r\n If a tree_entry is defined (a ROOT c-struct) the new tree has the branches set to the corresponding tree entry address.\r\n\r\n Parameters\r\n ----------\r\n numpy_type_descriptor: np.dtype\r\n '''\r\n if(chunk_size > 1 and tree_entry is not None):\r\n raise NotImplementedError()\r\n\r\n tree = TTree('Table', 'Converted HDF5 table')\r\n n_entries = None\r\n if chunk_size > 1:\r\n n_entries = ctypes.c_int(chunk_size) if chunk_size > 1 else 1\r\n tree.Branch('n_entries', ctypes.addressof(n_entries), 'n_entries/I') # needs to be added, otherwise one cannot access chunk_size_tree\r\n\r\n for column_name in table.dtype.names:\r\n tree.Branch(column_name, 'NULL' if tree_entry is None else AddressOf(tree_entry, column_name), column_name + '[n_entries]/' + get_root_type_descriptor(table.dtype[column_name]) if chunk_size > 1 else column_name + '/' + get_root_type_descriptor(table.dtype[column_name]))\r\n\r\n return tree, n_entries\r\n\r\n\r\ndef convert_hit_table(input_filename, output_filename):\r\n ''' Creates a ROOT Tree by looping over all entries of the table.\r\n In each iteration all entries are type casting to int and appended to the ROOT Tree. This is straight forward but rather slow (45 kHz Hits).\r\n The ROOT Tree has its addresses pointing to a hit struct members. The struct is defined in ROOT.\r\n\r\n Parameters\r\n ----------\r\n input_filename: string\r\n The file name of the hdf5 hit table.\r\n\r\n output_filename: string\r\n The filename of the created ROOT file\r\n\r\n '''\r\n with tb.open_file(input_filename, 'r') as in_file_h5:\r\n hits = in_file_h5.root.Hits\r\n\r\n myHit = init_hit_struct()\r\n out_file_root = TFile(output_filename, 'RECREATE')\r\n tree, _ = init_tree_from_table(hits, 1, myHit)\r\n\r\n progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.ETA()], maxval=hits.shape[0], term_width=80)\r\n progress_bar.start()\r\n\r\n update_progressbar_index = hits.shape[0] / 1000\r\n\r\n for index, hit in enumerate(hits):\r\n myHit.event_number = int(hit['event_number'])\r\n myHit.trigger_number = int(hit['trigger_number'])\r\n myHit.relative_BCID = int(hit['relative_BCID'])\r\n myHit.LVL1ID = int(hit['LVL1ID'])\r\n myHit.column = int(hit['column'])\r\n myHit.row = int(hit['row'])\r\n myHit.tot = int(hit['tot'])\r\n myHit.BCID = int(hit['BCID'])\r\n myHit.TDC = int(hit['TDC'])\r\n myHit.trigger_status = int(hit['trigger_status'])\r\n myHit.service_record = int(hit['service_record'])\r\n myHit.event_status = int(hit['event_status'])\r\n tree.Fill()\r\n if (index % update_progressbar_index == 0): # increase the progress bar update speed, otherwise progress_bar.update(index) is called too often\r\n progress_bar.update(index)\r\n progress_bar.finish()\r\n\r\n out_file_root.Write()\r\n out_file_root.Close()\r\n\r\n\r\ndef convert_hit_table_fast(input_filename, output_filename):\r\n ''' Creates a ROOT Tree by looping over chunks of the hdf5 table. Some pointer magic is used to increase the conversion speed. Is 40x faster than convert_hit_table.\r\n\r\n Parameters\r\n ----------\r\n input_filename: string\r\n The file name of the hdf5 hit table.\r\n\r\n output_filename: string\r\n The filename of the created ROOT file\r\n\r\n '''\r\n\r\n with tb.open_file(input_filename, 'r') as in_file_h5:\r\n hits_table = in_file_h5.root.Hits\r\n\r\n out_file_root = TFile(output_filename, 'RECREATE')\r\n\r\n tree, chunk_size_tree = init_tree_from_table(hits_table, chunk_size)\r\n\r\n progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.ETA()], maxval=hits_table.shape[0], term_width=80)\r\n progress_bar.start()\r\n\r\n for index in range(0, hits_table.shape[0], chunk_size):\r\n hits = hits_table.read(start=index, stop=index + chunk_size)\r\n\r\n column_data = {} # columns have to be in an additional python data container to prevent the carbage collector from deleting\r\n\r\n for branch in tree.GetListOfBranches(): # loop over the branches\r\n if branch.GetName() != 'n_entries':\r\n column_data[branch.GetName()] = np.ascontiguousarray(hits[branch.GetName()]) # a copy has to be made to get the correct memory alignement\r\n branch.SetAddress(column_data[branch.GetName()].data) # get the column data pointer by name and tell its address to the tree\r\n\r\n if index + chunk_size > hits_table.shape[0]: # decrease tree leave size for the last chunk\r\n chunk_size_tree.value = hits_table.shape[0] - index\r\n\r\n tree.Fill()\r\n progress_bar.update(index)\r\n\r\n out_file_root.Write()\r\n out_file_root.Close()\r\n\r\n\r\n# def convert_table_event_based(input_filename, output_filename):\r\n# ''' Creates a ROOT Tree by looping over chunks of the hdf5 table. Some pointer magic is used to increase the conversion speed. Is 40x faster than convert_hit_table.\r\n#\r\n# Parameters\r\n# ----------\r\n# input_filename: string\r\n# The file name of the hdf5 hit table.\r\n#\r\n# output_filename: string\r\n# The filename of the created ROOT file\r\n#\r\n# '''\r\n#\r\n# from analysis import analysis_utils\r\n#\r\n# with tb.open_file(input_filename, 'r') as in_file_h5:\r\n# hits_table = in_file_h5.root.Hits\r\n#\r\n# out_file_root = TFile(output_filename, 'RECREATE')\r\n#\r\n# tree, n_entries = init_tree_from_table(hits_table, chunk_size)\r\n#\r\n# for index in range(0, hits_table.shape[0], chunk_size):\r\n# hits = hits_table.read(start=index, stop=index + chunk_size)\r\n#\r\n# column_data = {} # columns have to be in an additional python data container to prevent the carbage collector from deleting\r\n# column_data_pointer = {}\r\n# for branch in tree.GetListOfBranches(): # loop over the branches\r\n# if branch.GetName() != 'n_entries':\r\n# column_data[branch.GetName()] = hits[branch.GetName()].view(np.recarray).copy() # a copy has to be made to get the correct memory alignement\r\n#\r\n# hit_index = 0 # needed to access the correct posittion in the hit array\r\n# for event_info in analysis_utils.get_n_cluster_in_events(hits['event_number']):\r\n# n_hits = event_info[1]\r\n# n_entries.value = n_hits\r\n# event_hits_pointer = hits[hit_index:]\r\n# # for branch in tree.GetListOfBranches(): # loop over the branches\r\n# # if branch.GetName() != 'n_entries':\r\n# # branch.SetAddress(column_data[branch.GetName()].ctypes.data_as(ctypes.POINTER(get_c_type_descriptor(column_data[branch.GetName()].dtype)))) # get the column data pointer by name and tell its address to the tree\r\n# hit_index += n_hits\r\n# tree.Fill()\r\n# break\r\n# #\r\n# # progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.ETA()], maxval=hits_table.shape[0])\r\n# # progress_bar.start()\r\n# #\r\n# # for index in range(0, hits_table.shape[0], chunk_size):\r\n# # hits = hits_table.read(start=index, stop=index + chunk_size)\r\n# #\r\n# # column_data = {} # columns have to be in an additional python data container to prevent the carbage collector from deleting\r\n# #\r\n# # for branch in tree.GetListOfBranches(): # loop over the branches\r\n# # if branch.GetName() != 'chunk_size_tree':\r\n# # column_data[branch.GetName()] = hits[branch.GetName()].view(np.recarray).copy() # a copy has to be made to get the correct memory alignement\r\n# # branch.SetAddress(column_data[branch.GetName()].data) # get the column data pointer by name and tell its address to the tree\r\n# #\r\n# # if index + chunk_size > hits_table.shape[0]: # decrease tree leave size for the last chunk\r\n# # chunk_size_tree.value = hits_table.shape[0] - index\r\n# #\r\n# # tree.Fill()\r\n# # progress_bar.update(index)\r\n#\r\n# out_file_root.Write()\r\n# out_file_root.Close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n chunk_size = 50000 # chose this parameter as big as possible to increase speed, but not too big otherwise program crashed\r\n# convert_hit_table('test.h5', 'output.root')\r\n convert_hit_table_fast('test.h5', 'output_fast.root')\r\n","sub_path":"host/pybar/utils/convert_table_root_tree.py","file_name":"convert_table_root_tree.py","file_ext":"py","file_size_in_byte":10863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"157345380","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.http import HttpResponse\nimport pandas as pd\nfrom mainApp.forms import UnitForm\nfrom mainApp.models import Units, create_from_DF\nfrom mainApp.serializers.unitserializer import UnitSerializer\nfrom mainApp.viewapi.logs import createLog\nimport csv, io\nimport math\nfrom django.contrib.auth.decorators import login_required\nfrom django.template.response import TemplateResponse\nfrom mainApp.middlewares import checkInUrl\nfrom django.urls import reverse\nfrom django.http import HttpResponseRedirect\nfrom itertools import tee\n\n@login_required(login_url='/login/')\ndef unitPagination_page(request, num=1, limit=10):\n \"\"\"\n Hiện thị trang danh sách trường có phân trang và giới hạn bản ghi (chưa có dữ liệu)\n \"\"\"\n if checkInUrl(request, 'unit') is False:\n listFunction = request.user.list_function()\n return HttpResponseRedirect(reverse(listFunction[0]))\n return TemplateResponse(request, 'adminuet/unit.html', {'page': num, 'limit': limit})\n\n@login_required(login_url='/login/')\n@api_view(['GET'])\ndef unit_getList(request):\n \"\"\"\n Lấy danh sách các trường (không sử dụng)\n Hàm trả về tất cả các row trong unit\n \"\"\"\n if checkInUrl(request, 'unit') is False:\n listFunction = request.user.list_function()\n return HttpResponseRedirect(reverse(listFunction[0]))\n if request.method == 'GET':\n unitList = Units.objects.all()\n unitSerializer = UnitSerializer(unitList, many = True)\n return Response(unitSerializer.data)\n\n@login_required(login_url='/login/')\n@api_view(['GET'])\ndef unit_getListForOffset(request, offset, limit):\n \"\"\"\n Lấy danh sách các trường tại vị trí offset và limit bản ghi\n Hàm trả về các row trong unit theo offset\n Trả về số lượng page mà chia theo limit\n \"\"\"\n if checkInUrl(request, 'unit') is False:\n listFunction = request.user.list_function()\n return HttpResponseRedirect(reverse(listFunction[0]))\n if request.method == 'GET':\n units = Units.objects.order_by('-unitID').all()\n unitList = units[offset:offset + limit]\n unitCount = units.count()\n unitSerializer = UnitSerializer(unitList, many = True)\n page = math.ceil(unitCount/limit)\n data = {\n 'data': unitSerializer.data,\n 'numberOfPage': page,\n }\n # Ghi log\n createLog(request, 'VIEW - Trường', '')\n return Response(data)\n\n@login_required(login_url='/login/')\n@api_view(['GET','POST'])\ndef unit_form(request, unit_id=0):\n \"\"\"\n Form chung cho cả Thêm mới và Sửa\n Thêm mới dùng POST\n Sửa dùng GET để lấy thêm dữ liệu của row hiện tại\n \"\"\"\n if checkInUrl(request, 'unit') is False:\n listFunction = request.user.list_function()\n return HttpResponseRedirect(reverse(listFunction[0]))\n try: \n if request.method == 'GET':\n if unit_id == 0:\n unitForm = UnitForm()\n else:\n unit = Units.objects.get(pk=unit_id)\n unitForm = UnitForm(instance = unit)\n return TemplateResponse(request, 'adminuet/unitform.html', {'form': unitForm})\n else:\n contentLog = 'UPDATE - Trường'\n contentMsg = 'Cập nhật thành công.'\n if unit_id == 0:\n unitForm = UnitForm(request.POST)\n contentLog = 'INSERT - Trường'\n contentMsg = 'Thêm mới thành công.'\n else:\n unit = Units.objects.get(pk=unit_id)\n unitForm = UnitForm(request.POST, instance = unit)\n if unitForm.is_valid():\n unitNameNew = unitForm['unitName'].value()\n if not checkUnitNameExist(unitNameNew.strip()):\n unitForm.save()\n # Ghi log\n createLog(request, contentLog, unitNameNew)\n messages.success(request, contentMsg)\n else:\n messages.error(request, 'Vui lòng thay đổi tên đơn vị. Đơn vị này đã tồn tại.')\n return redirect('/adminuet/unit-form/'+str(unit_id))\n return redirect('/adminuet/unit/')\n except Exception:\n messages.error(request, \"Thao tác thất bại.\")\n return redirect('/adminuet/unit/')\n\ndef checkUnitNameExist(unitname):\n \"\"\"\n Kiểm tra trường có tồn tại trong DB\n \"\"\"\n if Units.objects.filter(unitName=unitname).exists():\n return True\n\n@login_required(login_url='/login/')\ndef unit_delete(request, unit_id):\n \"\"\"\n Thực hiện xóa trường\n Đưa id vào để xóa row có id\n \"\"\"\n if checkInUrl(request, 'unit') is False:\n listFunction = request.user.list_function()\n return HttpResponseRedirect(reverse(listFunction[0]))\n try:\n unit = Units.objects.get(pk=unit_id)\n name = unit.unitName\n unit.delete()\n # Ghi log\n createLog(request, 'DELETE - Trường', name)\n messages.success(request, \"Xóa thành công.\")\n except (Exception) as error:\n print(error)\n messages.error(request, \"Thao tác thất bại.\")\n return redirect('/adminuet/unit/')\n\n@login_required(login_url='/login/')\ndef import_page(request):\n \"\"\"\n Đọc file csv để nhập vào DB\n Hàm nhập từ file csv các trường stt, unitName, unitDescription vào model Units\n \"\"\"\n if checkInUrl(request, 'unit') is False:\n listFunction = request.user.list_function()\n return HttpResponseRedirect(reverse(listFunction[0]))\n template = 'adminuet/unitimport.html'\n if request.method == 'GET':\n return TemplateResponse(request, template)\n try:\n csv_file = request.FILES['document']\n except (Exception) as error:\n messages.error(request,'Lỗi: Chưa chọn tệp dữ liệu.')\n return TemplateResponse(request, template)\n if not csv_file.name.endswith('.csv'):\n messages.error(request,'Lỗi: Sai định dạng tệp. Vui lòng chọn lại tệp')\n return TemplateResponse(request, template)\n try:\n df = pd.read_csv(csv_file).set_index(\"stt\")\n create_from_DF(df=df, model=Units, searching_cols=['unitName'])\n # Ghi log\n message = str(len(df)) + ' bản ghi'\n createLog(request, 'IMPORT - Trường', message)\n except (Exception) as error:\n messages.error(request,'Lỗi: Dữ liệu không đúng định dạng.')\n return TemplateResponse(request, template)\n return redirect('/adminuet/unit/')\n\n\n@login_required(login_url='/login/')\ndef export_page(request):\n \"\"\"\n Xuất danh sách các trường ra file csv\n Hàm xuất từ danh sách trường ra file csv\n \"\"\"\n if checkInUrl(request, 'unit') is False:\n listFunction = request.user.list_function()\n return HttpResponseRedirect(reverse(listFunction[0]))\n try:\n nameFileExport = 'attachment; filename=\"{}.csv\"'.format(\"ListUnit\")\n list_unit = Units.objects.all()\n rows = ([i+1, unit.unitName, unit.unitDescription] for unit, i in zip(list_unit, range(list_unit.count())))\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = nameFileExport\n writer = csv.writer(response)\n rows, rowCopy = tee(rows)\n # Ghi log\n createLog(request, 'EXPORT - Trường', str(len(list(rowCopy))) + ' bản ghi')\n writer.writerow(['stt', 'unitName', 'unitDescription'])\n [writer.writerow([row[0], row[1], row[2]]) for row in rows]\n createLog(request, 'EXPORT - Trường', '')\n return response\n except (Exception) as error:\n print(error)\n messages.error(request, \"Thao tác thất bại.\")\n return redirect('/adminuet/unit/')","sub_path":"mainApp/viewapi/unitview.py","file_name":"unitview.py","file_ext":"py","file_size_in_byte":8063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"192627637","text":"import numpy as np\nimport math\nimport utm\nfrom scipy.spatial.distance import squareform,pdist\nfrom sklearn.metrics.pairwise import pairwise_distances\n\ndef distance(a,b):\n return math.sqrt(math.pow((a[0]-b[0]),2)+math.pow((a[1]-b[1]),2))\n\ndef find_endpoint(same_class):\n same_class=np.array(same_class)\n time = same_class[:, 2]\n time = list(map(float, time))\n if time==[]:\n return [],[]\n else:\n max_time_id=time.index(np.max(time))\n min_time_id=time.index(np.min(time))\n origin_point=same_class[min_time_id,:]\n destination_point=same_class[max_time_id,:]\n return list(origin_point),list(destination_point)\n\n\ndef find_endpoint_for_head_tail(same_class):\n same_class=np.array(same_class)\n point=list(same_class[:,0:2])\n dis=pairwise_distances(np.array(point), metric='euclidean')\n max_index=np.where(dis==np.max(dis))\n row_id=max_index[0][0]\n col_id=max_index[1][0]\n if same_class[row_id,2] < same_class[col_id,2]:\n ori_id=row_id\n des_id=col_id\n else:\n ori_id=col_id\n des_id=row_id\n origin_point = same_class[ori_id, :]\n destination_point = same_class[des_id, :]\n return list(origin_point),list(destination_point)\n\n\ndef delete_near_point(all_point,min_dis):\n point = list(all_point[:, 0:2])\n all_point_list=list(all_point[:, 0:4])\n delete_id=[]\n dele=[]\n dis = squareform(pdist(np.array(point), 'euclidean'))\n a=np.where(dis math.cos(5*math.pi/180):\n key_point.append(list(destination_of_class))\n else:\n key_point.append(list(middle_of_class))\n key_point.append(list(destination_of_class))\n\n if len(slope) % 2 == 1:\n if list(all_point[len(slope)-1]) not in key_point:\n key_point.append(list(all_point[len(slope)-1]))\n if list(all_point[len(slope)]) not in key_point:\n key_point.append(list(all_point[len(slope)]))\n return key_point\n\n\ndef calculate_slope(all_point):\n slope=[]\n for j in range(len(all_point)-1):\n point0=all_point[j]\n point1=all_point[j+1]\n point0_x=float(point0[0])\n point0_y=float(point0[1])\n point1_x=float(point1[0])\n point1_y=float(point1[1])\n if point0_x==point1_x:\n class_slope=10000\n else:\n class_slope = (point1_y - point0_y) / (point1_x - point0_x)\n slope.append(class_slope)\n return slope\n\n\ndef insert_for_long(after_concat):\n all_point=[]\n for i in range(len(after_concat)-1):\n point0=after_concat[i]\n point1=after_concat[i+1]\n all_point.append(list(point0))\n dis=distance(point0,point1)\n if dis >= 10:\n insert=[(point0[0]+point1[0])/2,(point0[1]+point1[1])/2,point0[2]]\n all_point.append(list(insert))\n if i==len(after_concat)-2:\n all_point.append(point1)\n return all_point\n\n\ndef utm2jw(all_point,zone_number,zone_letter):\n X = []\n Y = []\n jw = []\n origin_of_class=[]\n for i in all_point:\n X.append(i[0])\n Y.append(i[1])\n jw.append(utm.to_latlon(i[0], i[1], zone_number, zone_letter))\n\n new_jw = list(set(jw))\n new_jw.sort(key=jw.index)\n return new_jw,X,Y\n","sub_path":"Automap/endpoint.py","file_name":"endpoint.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"572762359","text":"from decimal import Decimal\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import AccountingCustomerParty\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import AccountingSupplierParty\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import Address\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import AddressLine\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import AllowanceCharge\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import BuyersItemIdentification\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import Contact\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import Country\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import Delivery\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import DeliveryAddress\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import FinancialInstitution\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import FinancialInstitutionBranch\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import InvoiceLine\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import Item\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import ItemInstance\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import LegalMonetaryTotal\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import LotIdentification\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import OrderLineReference\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import OrderReference\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import Party\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import PartyName\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import PartyTaxScheme\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import PayeeFinancialAccount\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import PaymentMeans\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import PaymentTerms\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import PostalAddress\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import Price\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import SellersItemIdentification\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import TaxCategory\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import TaxScheme\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import TaxSubtotal\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import TaxTotal\nfrom ubl.models.common.ubl_common_basic_components_2_1 import AccountTypeCode\nfrom ubl.models.common.ubl_common_basic_components_2_1 import AllowanceChargeReasonCode\nfrom ubl.models.common.ubl_common_basic_components_2_1 import AllowanceTotalAmount\nfrom ubl.models.common.ubl_common_basic_components_2_1 import Amount\nfrom ubl.models.common.ubl_common_basic_components_2_1 import BaseQuantity\nfrom ubl.models.common.ubl_common_basic_components_2_1 import BuildingName\nfrom ubl.models.common.ubl_common_basic_components_2_1 import BuildingNumber\nfrom ubl.models.common.ubl_common_basic_components_2_1 import CityName\nfrom ubl.models.common.ubl_common_basic_components_2_1 import CompanyId\nfrom ubl.models.common.ubl_common_basic_components_2_1 import CountrySubentity\nfrom ubl.models.common.ubl_common_basic_components_2_1 import CurrencyCode\nfrom ubl.models.common.ubl_common_basic_components_2_1 import CustomerAssignedAccountId\nfrom ubl.models.common.ubl_common_basic_components_2_1 import CustomizationId\nfrom ubl.models.common.ubl_common_basic_components_2_1 import Description\nfrom ubl.models.common.ubl_common_basic_components_2_1 import ElectronicMail\nfrom ubl.models.common.ubl_common_basic_components_2_1 import ExemptionReason\nfrom ubl.models.common.ubl_common_basic_components_2_1 import Id\nfrom ubl.models.common.ubl_common_basic_components_2_1 import IdentificationCode\nfrom ubl.models.common.ubl_common_basic_components_2_1 import InvoiceTypeCode\nfrom ubl.models.common.ubl_common_basic_components_2_1 import InvoicedQuantity\nfrom ubl.models.common.ubl_common_basic_components_2_1 import Line\nfrom ubl.models.common.ubl_common_basic_components_2_1 import LineExtensionAmount\nfrom ubl.models.common.ubl_common_basic_components_2_1 import LineId\nfrom ubl.models.common.ubl_common_basic_components_2_1 import LineStatusCode\nfrom ubl.models.common.ubl_common_basic_components_2_1 import LotNumberId\nfrom ubl.models.common.ubl_common_basic_components_2_1 import MultiplierFactorNumeric\nfrom ubl.models.common.ubl_common_basic_components_2_1 import Name\nfrom ubl.models.common.ubl_common_basic_components_2_1 import Note\nfrom ubl.models.common.ubl_common_basic_components_2_1 import PayableAmount\nfrom ubl.models.common.ubl_common_basic_components_2_1 import PaymentMeansCode\nfrom ubl.models.common.ubl_common_basic_components_2_1 import Percent\nfrom ubl.models.common.ubl_common_basic_components_2_1 import PostalZone\nfrom ubl.models.common.ubl_common_basic_components_2_1 import PriceAmount\nfrom ubl.models.common.ubl_common_basic_components_2_1 import ProfileId\nfrom ubl.models.common.ubl_common_basic_components_2_1 import RegistrationName\nfrom ubl.models.common.ubl_common_basic_components_2_1 import SalesOrderId\nfrom ubl.models.common.ubl_common_basic_components_2_1 import SalesOrderLineId\nfrom ubl.models.common.ubl_common_basic_components_2_1 import StreetName\nfrom ubl.models.common.ubl_common_basic_components_2_1 import SupplierAssignedAccountId\nfrom ubl.models.common.ubl_common_basic_components_2_1 import TaxAmount\nfrom ubl.models.common.ubl_common_basic_components_2_1 import TaxExclusiveAmount\nfrom ubl.models.common.ubl_common_basic_components_2_1 import TaxTypeCode\nfrom ubl.models.common.ubl_common_basic_components_2_1 import TaxableAmount\nfrom ubl.models.common.ubl_common_basic_components_2_1 import Telefax\nfrom ubl.models.common.ubl_common_basic_components_2_1 import Telephone\nfrom ubl.models.common.ubl_common_basic_components_2_1 import UblversionId\nfrom ubl.models.common.ubl_common_basic_components_2_1 import Uuid\nfrom ubl.models.maindoc.ubl_invoice_2_1 import Invoice\nfrom xsdata.models.datatype import XmlDate\nfrom xsdata.models.datatype import XmlTime\n\n\nobj = Invoice(\n ublversion_id=UblversionId(\n value=\"2.0\"\n ),\n customization_id=CustomizationId(\n value=\"urn:oasis:names:specification:ubl:xpath:Invoice-2.0:sbs-1.0-draft\"\n ),\n profile_id=ProfileId(\n value=\"bpid:urn:oasis:names:draft:bpss:ubl-2-sbs-invoice-notification-draft\"\n ),\n id=Id(\n value=\"A00095678\"\n ),\n copy_indicator=False,\n uuid=Uuid(\n value=\"849FBBCE-E081-40B4-906C-94C5FF9D1AC3\"\n ),\n issue_date=XmlDate(2005, 6, 21),\n invoice_type_code=InvoiceTypeCode(\n value=\"SalesInvoice\"\n ),\n note=[\n Note(\n value=\"sample\"\n ),\n ],\n tax_point_date=XmlDate(2005, 6, 21),\n order_reference=OrderReference(\n id=Id(\n value=\"AEG012345\"\n ),\n sales_order_id=SalesOrderId(\n value=\"CON0095678\"\n ),\n uuid=Uuid(\n value=\"6E09886B-DC6E-439F-82D1-7CCAC7F4E3B1\"\n ),\n issue_date=XmlDate(2005, 6, 20)\n ),\n accounting_supplier_party=AccountingSupplierParty(\n customer_assigned_account_id=CustomerAssignedAccountId(\n value=\"CO001\"\n ),\n party=Party(\n party_name=[\n PartyName(\n name=Name(\n value=\"Consortial\"\n )\n ),\n ],\n postal_address=PostalAddress(\n street_name=StreetName(\n value=\"Busy Street\"\n ),\n building_name=BuildingName(\n value=\"Thereabouts\"\n ),\n building_number=BuildingNumber(\n value=\"56A\"\n ),\n city_name=CityName(\n value=\"Farthing\"\n ),\n postal_zone=PostalZone(\n value=\"AA99 1BB\"\n ),\n country_subentity=CountrySubentity(\n value=\"Heremouthshire\"\n ),\n address_line=[\n AddressLine(\n line=Line(\n value=\"The Roundabout\"\n )\n ),\n ],\n country=Country(\n identification_code=IdentificationCode(\n value=\"GB\"\n )\n )\n ),\n party_tax_scheme=[\n PartyTaxScheme(\n registration_name=RegistrationName(\n value=\"Farthing Purchasing Consortium\"\n ),\n company_id=CompanyId(\n value=\"175 269 2355\"\n ),\n exemption_reason=[\n ExemptionReason(\n value=\"N/A\"\n ),\n ],\n tax_scheme=TaxScheme(\n id=Id(\n value=\"VAT\"\n ),\n tax_type_code=TaxTypeCode(\n value=\"VAT\"\n )\n )\n ),\n ],\n contact=Contact(\n name=Name(\n value=\"Mrs Bouquet\"\n ),\n telephone=Telephone(\n value=\"0158 1233714\"\n ),\n telefax=Telefax(\n value=\"0158 1233856\"\n ),\n electronic_mail=ElectronicMail(\n value=\"bouquet@fpconsortial.co.uk\"\n )\n )\n )\n ),\n accounting_customer_party=AccountingCustomerParty(\n customer_assigned_account_id=CustomerAssignedAccountId(\n value=\"XFB01\"\n ),\n supplier_assigned_account_id=SupplierAssignedAccountId(\n value=\"GT00978567\"\n ),\n party=Party(\n party_name=[\n PartyName(\n name=Name(\n value=\"IYT Corporation\"\n )\n ),\n ],\n postal_address=PostalAddress(\n street_name=StreetName(\n value=\"Avon Way\"\n ),\n building_name=BuildingName(\n value=\"Thereabouts\"\n ),\n building_number=BuildingNumber(\n value=\"56A\"\n ),\n city_name=CityName(\n value=\"Bridgtow\"\n ),\n postal_zone=PostalZone(\n value=\"ZZ99 1ZZ\"\n ),\n country_subentity=CountrySubentity(\n value=\"Avon\"\n ),\n address_line=[\n AddressLine(\n line=Line(\n value=\"3rd Floor, Room 5\"\n )\n ),\n ],\n country=Country(\n identification_code=IdentificationCode(\n value=\"GB\"\n )\n )\n ),\n party_tax_scheme=[\n PartyTaxScheme(\n registration_name=RegistrationName(\n value=\"Bridgtow District Council\"\n ),\n company_id=CompanyId(\n value=\"12356478\"\n ),\n exemption_reason=[\n ExemptionReason(\n value=\"Local Authority\"\n ),\n ],\n tax_scheme=TaxScheme(\n id=Id(\n value=\"UK VAT\"\n ),\n tax_type_code=TaxTypeCode(\n value=\"VAT\"\n )\n )\n ),\n ],\n contact=Contact(\n name=Name(\n value=\"Mr Fred Churchill\"\n ),\n telephone=Telephone(\n value=\"0127 2653214\"\n ),\n telefax=Telefax(\n value=\"0127 2653215\"\n ),\n electronic_mail=ElectronicMail(\n value=\"fred@iytcorporation.gov.uk\"\n )\n )\n )\n ),\n delivery=[\n Delivery(\n actual_delivery_date=XmlDate(2005, 6, 20),\n actual_delivery_time=XmlTime(11, 30, 0, 0, 0),\n delivery_address=DeliveryAddress(\n street_name=StreetName(\n value=\"Avon Way\"\n ),\n building_name=BuildingName(\n value=\"Thereabouts\"\n ),\n building_number=BuildingNumber(\n value=\"56A\"\n ),\n city_name=CityName(\n value=\"Bridgtow\"\n ),\n postal_zone=PostalZone(\n value=\"ZZ99 1ZZ\"\n ),\n country_subentity=CountrySubentity(\n value=\"Avon\"\n ),\n address_line=[\n AddressLine(\n line=Line(\n value=\"3rd Floor, Room 5\"\n )\n ),\n ],\n country=Country(\n identification_code=IdentificationCode(\n value=\"GB\"\n )\n )\n )\n ),\n ],\n payment_means=[\n PaymentMeans(\n payment_means_code=PaymentMeansCode(\n value=\"20\"\n ),\n payment_due_date=XmlDate(2005, 7, 21),\n payee_financial_account=PayeeFinancialAccount(\n id=Id(\n value=\"12345678\"\n ),\n name=Name(\n value=\"Farthing Purchasing Consortium\"\n ),\n account_type_code=AccountTypeCode(\n value=\"Current\"\n ),\n currency_code=CurrencyCode(\n value=\"GBP\"\n ),\n financial_institution_branch=FinancialInstitutionBranch(\n id=Id(\n value=\"10-26-58\"\n ),\n name=Name(\n value=\"Open Bank Ltd, Bridgstow Branch \"\n ),\n financial_institution=FinancialInstitution(\n id=Id(\n value=\"10-26-58\"\n ),\n name=Name(\n value=\"Open Bank Ltd\"\n ),\n address=Address(\n street_name=StreetName(\n value=\"City Road\"\n ),\n building_name=BuildingName(\n value=\"Banking House\"\n ),\n building_number=BuildingNumber(\n value=\"12\"\n ),\n city_name=CityName(\n value=\"London\"\n ),\n postal_zone=PostalZone(\n value=\"AQ1 6TH\"\n ),\n country_subentity=CountrySubentity(\n value=\"London \"\n ),\n address_line=[\n AddressLine(\n line=Line(\n value=\"5th Floor\"\n )\n ),\n ],\n country=Country(\n identification_code=IdentificationCode(\n value=\"GB\"\n )\n )\n )\n ),\n address=Address(\n street_name=StreetName(\n value=\"Busy Street\"\n ),\n building_name=BuildingName(\n value=\"The Mall\"\n ),\n building_number=BuildingNumber(\n value=\"152\"\n ),\n city_name=CityName(\n value=\"Farthing\"\n ),\n postal_zone=PostalZone(\n value=\"AA99 1BB\"\n ),\n country_subentity=CountrySubentity(\n value=\"Heremouthshire\"\n ),\n address_line=[\n AddressLine(\n line=Line(\n value=\"West Wing\"\n )\n ),\n ],\n country=Country(\n identification_code=IdentificationCode(\n value=\"GB\"\n )\n )\n )\n ),\n country=Country(\n identification_code=IdentificationCode(\n value=\"GB\"\n )\n )\n )\n ),\n ],\n payment_terms=[\n PaymentTerms(\n note=[\n Note(\n value=\"Payable within 1 calendar month from the invoice date\"\n ),\n ]\n ),\n ],\n allowance_charge=[\n AllowanceCharge(\n charge_indicator=False,\n allowance_charge_reason_code=AllowanceChargeReasonCode(\n value=\"17\"\n ),\n multiplier_factor_numeric=MultiplierFactorNumeric(\n value=Decimal(\"0.10\")\n ),\n amount=Amount(\n value=Decimal(\"10.00\"),\n currency_id=\"GBP\"\n )\n ),\n ],\n tax_total=[\n TaxTotal(\n tax_amount=TaxAmount(\n value=Decimal(\"17.50\"),\n currency_id=\"GBP\"\n ),\n tax_evidence_indicator=True,\n tax_subtotal=[\n TaxSubtotal(\n taxable_amount=TaxableAmount(\n value=Decimal(\"100.00\"),\n currency_id=\"GBP\"\n ),\n tax_amount=TaxAmount(\n value=Decimal(\"17.50\"),\n currency_id=\"GBP\"\n ),\n tax_category=TaxCategory(\n id=Id(\n value=\"A\"\n ),\n tax_scheme=TaxScheme(\n id=Id(\n value=\"UK VAT\"\n ),\n tax_type_code=TaxTypeCode(\n value=\"VAT\"\n )\n )\n )\n ),\n ]\n ),\n ],\n legal_monetary_total=LegalMonetaryTotal(\n line_extension_amount=LineExtensionAmount(\n value=Decimal(\"100.00\"),\n currency_id=\"GBP\"\n ),\n tax_exclusive_amount=TaxExclusiveAmount(\n value=Decimal(\"90.00\"),\n currency_id=\"GBP\"\n ),\n allowance_total_amount=AllowanceTotalAmount(\n value=Decimal(\"10.00\"),\n currency_id=\"GBP\"\n ),\n payable_amount=PayableAmount(\n value=Decimal(\"107.50\"),\n currency_id=\"GBP\"\n )\n ),\n invoice_line=[\n InvoiceLine(\n id=Id(\n value=\"A\"\n ),\n invoiced_quantity=InvoicedQuantity(\n value=Decimal(\"100\"),\n unit_code=\"KGM\"\n ),\n line_extension_amount=LineExtensionAmount(\n value=Decimal(\"100.00\"),\n currency_id=\"GBP\"\n ),\n order_line_reference=[\n OrderLineReference(\n line_id=LineId(\n value=\"1\"\n ),\n sales_order_line_id=SalesOrderLineId(\n value=\"A\"\n ),\n line_status_code=LineStatusCode(\n value=\"NoStatus\"\n ),\n order_reference=OrderReference(\n id=Id(\n value=\"AEG012345\"\n ),\n sales_order_id=SalesOrderId(\n value=\"CON0095678\"\n ),\n uuid=Uuid(\n value=\"6E09886B-DC6E-439F-82D1-7CCAC7F4E3B1\"\n ),\n issue_date=XmlDate(2005, 6, 20)\n )\n ),\n ],\n tax_total=[\n TaxTotal(\n tax_amount=TaxAmount(\n value=Decimal(\"17.50\"),\n currency_id=\"GBP\"\n ),\n tax_evidence_indicator=True,\n tax_subtotal=[\n TaxSubtotal(\n taxable_amount=TaxableAmount(\n value=Decimal(\"100.00\"),\n currency_id=\"GBP\"\n ),\n tax_amount=TaxAmount(\n value=Decimal(\"17.50\"),\n currency_id=\"GBP\"\n ),\n tax_category=TaxCategory(\n id=Id(\n value=\"A\"\n ),\n percent=Percent(\n value=Decimal(\"17.5\")\n ),\n tax_scheme=TaxScheme(\n id=Id(\n value=\"UK VAT\"\n ),\n tax_type_code=TaxTypeCode(\n value=\"VAT\"\n )\n )\n )\n ),\n ]\n ),\n ],\n item=Item(\n description=[\n Description(\n value=\"Acme beeswax\"\n ),\n ],\n name=Name(\n value=\"beeswax\"\n ),\n buyers_item_identification=BuyersItemIdentification(\n id=Id(\n value=\"6578489\"\n )\n ),\n sellers_item_identification=SellersItemIdentification(\n id=Id(\n value=\"17589683\"\n )\n ),\n item_instance=[\n ItemInstance(\n lot_identification=LotIdentification(\n lot_number_id=LotNumberId(\n value=\"546378239\"\n ),\n expiry_date=XmlDate(2010, 1, 1)\n )\n ),\n ]\n ),\n price=Price(\n price_amount=PriceAmount(\n value=Decimal(\"1.00\"),\n currency_id=\"GBP\"\n ),\n base_quantity=BaseQuantity(\n value=Decimal(\"1\"),\n unit_code=\"KGM\"\n )\n )\n ),\n ]\n)\n","sub_path":"ubl/samples/UBL-Invoice-2.0-Example-NS1.py","file_name":"UBL-Invoice-2.0-Example-NS1.py","file_ext":"py","file_size_in_byte":24538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"393241352","text":"#!/usr/bin/python\n\n#------------------------------------------------------------------------\n# REVISION HISTORY\n# add new entries here at the top tagged by date and initials\n# JAP 20140521: revised processing of multi-row display\n# added selection and display of state ranges\n# JAP 20140520: revised event/fluent processing to handle institution parameter\n# suppressed printing of null events\n# allow line breaks after left parenthesis in fluents\n# JAP 20130801: reorganized fluent table layout: initiated at top\n# terminated at the bottom\n# GDB 201305??: added input file argument and consequent changes\n# GDB/JAP?????: changed main loop to iterate over occurred not holdsat\n# JAP 20130??: first version (approx)\n\nfrom __future__ import print_function\nimport re\nimport sys\nimport ply.lex as lex\nfrom collections import defaultdict\nimport string\nfrom itertools import izip\nfrom itertools import count\nimport argparse\nfrom math import ceil\n\nclass myLexer():\n\n # Build the lexer\n # def build(self,**kwargs):\n # self.lexer = lex.lex(object=self, **kwargs)\n\n def __init__(self):\n self.lexer = lex.lex(module=self)\n\n reserved = { }\n\n tokens = ['NAME','NUMBER','LPAR','RPAR','COMMA']\n\n # Tokens\n\n t_COMMA = r','\n t_LPAR = r'\\('\n t_RPAR = r'\\)'\n\n def t_NAME(self,t):\n r'[a-z][a-zA-Z_0-9]*'\n return t\n \n def t_NUMBER(self,t):\n r'\\d+'\n # t.value = int(t.value)\n return t\n\n t_ignore = \" \\t\\r\"\n\n # Comments\n def t_COMMENT(self,t):\n r'%.*'\n pass\n # No return value. Token discarded\n\n def t_newline(self,t):\n r'\\n+'\n t.lexer.lineno += t.value.count(\"\\n\")\n\n def t_error(self,t):\n print(\"Illegal character '%s'\" % t.value[0])\n t.lexer.skip(1)\n\ndef pyvizError(s):\n print(s,file=sys.stderr)\n\nobserved = defaultdict(list)\nholdsat = defaultdict(list)\ninitiated = defaultdict(list)\nterminated = defaultdict(list)\noccurred = defaultdict(list)\n\ndef chunks(l, n):\n \"\"\" Yield successive n-sized chunks from l.\n \"\"\"\n for i in xrange(0, len(l), n):\n yield l[i:i+n]\n\n# command line arguments\n\ndef parse_range(astr):\n# code acquired from\n# http://stackoverflow.com/questions/4248399/page-range-for-printing-algorithm\n result=set()\n for part in astr.split(','):\n x=part.split('-')\n result.update(range(int(x[0]),int(x[-1])+1))\n return sorted(result)\n\ndef arb(s): return s\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-a\", \"--answer-set\", type=arb,\n help=\"specify answer set (default 1)\")\n# GDB 20130430 \nparser.add_argument(\"-i\", \"--answerset-file\", type=arb, \n help=\"specify answer set file\")\n# JAP 20130801\nparser.add_argument(\"-w\", \"--width\", type=int,\n help=\"specify number of states per row (default all)\")\n\n# JAP 20140520\nparser.add_argument(\"-s\", \"--states\", type=arb,\n help=\"specify which states to display (default all)\")\n\nargs=parser.parse_args()\ndisplayWidth = 0\n\nanswer_set=\"1\"\nif args.answer_set:\n answer_set = args.answer_set\n# GDB 20130430 \nif args.answerset_file:\n f = open(args.answerset_file,'r')\n# JAP 20130801\nif args.width:\n displayWidth = args.width\n if displayWidth<0: displayWidth = 0\n\ndocument = \"\"\n\nif args.answerset_file:\n document = f #document + f.read(-1)\nelse:\n document = sys.stdin #document + sys.stdin.read(-1)\n#debug('line:',document)\n\nglobal found\nfound=False\nmylex=myLexer()\nfor line in document:#sys.stdin:\n if re.match(\"Answer: {n}\".format(n=answer_set),line): break\nfor line in document:#sys.stdin:\n # split line into terms so that process can group output about each\n # mechanism found\n found = True\n for term in re.split(' ',line):\n mylex.lexer.input(term)\n l = [tok.value for tok in mylex.lexer]\n # print(\"tok.value = \",l)\n if l==[]: continue # skip blanks\n if l[0] in ['holdsat','observed','initiated','terminated','occurred']:\n # some rather tacky dead-reckoning\n what = string.join(l[2:-5],'').replace('_','\\_').replace(',',', ').replace('(','(\\\\allowbreak{}') # allows line breaks after a left paren\n where = l[-4]\n when = int(l[-2])\n # print(what,where,when)\n if l[0]=='holdsat':\n # processHoldsat(l)\n holdsat[when].append(what+\": \"+where)\n elif l[0]=='observed':\n # processObserved(l)\n observed[when].append(what+\": \"+where)\n elif l[0]=='initiated':\n # processInitiated(l)\n initiated[when].append(what+\": \"+where)\n elif l[0]=='terminated':\n # processTerminated(l)\n terminated[when].append(what+\": \"+where)\n elif l[0]=='occurred':\n # processOccurred(l)\n occurred[when].append(what+\": \"+where) if what!='null' else False\n else:\n print(\"% skipping \\\"{term}\\\"\"\n .format(term=string.join(l,'')))\n break # stop after specified answer set\nif not found:\n print(\"Answer set {n} not found\\n\".format(n=answer_set))\n exit(-1)\n\n# establish how many states there are\nevent_count=max(len(occurred),len(observed))\ninst_count=max(len(holdsat),len(initiated),len(terminated))\nselected_states = parse_range(args.states) if args.states else set(range(0,max(event_count,inst_count)+1))\nnstates=len(selected_states)\ndisplayWidth=nstates if displayWidth==0 else displayWidth\n\nprint(\"% events = {e}\\n% states = {s}\\n% nstates = {n}\\n% display width = {w}\"\n .format(e=event_count,s=inst_count,n=nstates,w=displayWidth))\n\nlabels = {}\nstates = {}\n\n# set up transition labels and state tables\n\nfor t in range(0,max(event_count,inst_count)+1):\n # events for each transition\n labels[t] = (\"{\\\\begin{tabular}{>{\\centering}m{5cm}}\\n\"\n + string.join(observed[t-1],\"\\\\\\\\\\n\")\n + \"\\\\\\\\\\n\\\\em \"\n + string.join(occurred[t-1],\"\\\\\\\\\\n \")\n + \"\\n\\\\end{tabular}}\")\n # fluents for each state\n states[t] = (\"\\\\begin{minipage}{5cm}\"\n \"\\\\raggedright\"\n \"\\everypar={\\hangindent=1em\\hangafter=1}\\n\"\n # do initiated fluents first\n + string.join(\n [\"\" if (t>0) and\n ((x in holdsat[t-1]) or (x in initiated[t-1])) else\n \"\\\\textbf{\"+x+\"}\\\\\\\\\\n\" # bold new fluents\n for x in sorted(holdsat[t]) if x not in terminated[t]],'')\n + string.join(\n [\"\\\\textbf{\"+x+\"}\\\\\\\\\\n\" for x in sorted(initiated[t-1])],'') #20150928 TL: changed to initiated[t-1]. \n # then inertial ones\n + string.join(\n [x+\"\\\\\\\\\\n\" if (t>0) and\n ((x in holdsat[t-1]) or (x in initiated[t-1])) else\n \"\" #\\\\textbf{\"+x+\"}\\\\\\\\\\n\" # bold new fluents\n for x in sorted(holdsat[t]) if x not in terminated[t]],'')\n # finally terminated ones\n + string.join(\n [\"\\\\sout{\"+x+\"}\\\\\\\\\\n\" for x in sorted(terminated[t])],'')\n + \"\\\\end{minipage}\\n};\")\n\nstates_by_row = list(chunks(list(selected_states),displayWidth))\nprint(\"% states_by_row = {n}\\n\".format(n=states_by_row))\nprint(\"\\\\begin{longtable}{@{}l@{}}\")\nfor r in states_by_row:\n print(\"% start row={r} of {m}\\n\".format(r=r,m=states_by_row))\n print(\"\\\\resizebox{\\\\textwidth}{!}{\\n\")\n print(\"\\\\begin{tikzpicture}\\n\"\n \"[\\nstart chain=trace going right,\")\n # set up state chains\n print(\"% state chains for {i} through {j}\"\n .format(i=r[0],j=r[-1]))\n for t in r:\n print(\"start chain=state{i} going down,\"\n .format(i=t))\n print(\"node distance=1cm and 5.2cm\\n]\")\n for (k,t) in enumerate(r): \n print(\"% start row={r}, state={t}\"\n .format(r=r,t=t))\n print(\"{{{{ [continue chain=trace]\\n\"\n \"\\\\node[circle,draw,on chain=trace]\"\n \"(i{i}) {{$S_{{{i}}}$}};\"\n .format(i=t))\n if (t==r[0]): # first element of row\n if (r==states_by_row[0]): # first element of first row\n # dummy node to left of S0\n print(\"\\draw[color=white](i{i})+(180:5.6cm) --\"\n \"node[above]{{}}(i{i});\"\n .format(i=t))\n # provenance of trace\n print(\"\\\\draw(i{i})+(-3,0)node[rotate=90,anchor=south]\"\n \"{{Answer set={a}, source={f}}};\"\n .format(\n i=t,\n a=answer_set,\n f=args.answerset_file.replace('_','\\_') if args.answerset_file\n else 'stdin'))\n else:\n print(\"\\draw[-latex,dashed](i{i})+(180:5.2cm) --\"\n \"node[above]{l}(i{i});\"\n .format(i=t,l=labels[t]))\n else:\n # transitions + event labels\n print(\"% label for s_{i} -- s_{j}\\n\"\n \"\\draw[-latex,{style}](i{i}) -- % t={j}\\n\"\n \"node[above]{l}\\n(i{j});\"\n # r[k-1] is index of preceding node in chain\n .format(i=r[k-1],j=t,l=labels[t],\n # highlight the discontinuity\n style='thin' if t-1==r[k-1] else 'dotted')) \n print(\"}\") # close first brace of continue chain\n # check there is some state to display\n if max(len(holdsat[t]),len(initiated[t]),len(terminated[t]))>0:\n print(\"{{ [continue chain=state{i} going below]\\n\"\n \"\\\\node [on chain=state{i},below=of i{i},\"\n \"rectangle,draw,inner frame sep=0pt] (s{i}) {{\\n\"\n \"% instant {i}\\n\".format(i=t)\n + states[t] # insert state table\n + \"\\n} % end node and chain\\n\"\n + \"\\draw (i{i}) -- (s{i});\\n\".format(i=t))\n print(\"}\") # close second brace of continue chain\n print(\"% \\pause % uncomment here to animate\\n\")\n # last element, intermediate row\n if ((t==r[-1]) and (r!=states_by_row[-1])):\n print(\"{{ [continue chain=trace]\\n\"\n \"\\\\node[on chain=trace] (i{j}) {{}};\\n\"\n \"\\\\draw[-latex,dashed](i{i}) -- \\n\".format(i=t,j=t+1)\n + \"node[above]\"\n + \"{}\" # labels[t+1]\n + \"(i{j});\\n}}\".format(i=t,j=t+1))\n # last element, last row\n if ((t==r[-1]) and (r==states_by_row[-1])):\n # prints nodes and final arc in white so final line scaling\n # matches preceding lines\n print(\"% fill nodes {a} to {b}\\n\"\n .format(a=t+1,b=t+(displayWidth-len(r))))\n for x in range(t+1,t+(displayWidth-len(r))+1):\n print(\"% dummy node {x} to complete row\".format(x=x))\n print(\"{{{{ [continue chain=trace]\\n\"\n \"\\\\node[color=white,circle,draw,on chain=trace]\"\n \"(i{i}) {{$S_{{{i}}}$}};}}}}\"\n .format(i=x))\n x = t+(displayWidth-len(r))\n print(\"% dummy arc to complete row\")\n print(\"{{ [continue chain=trace]\\n\"\n \"\\\\node[on chain=trace] (i{j}) {{}};\\n\"\n \"\\\\draw[color=white,-latex,dashed](i{i}) -- (i{j});\\n}}\"\n .format(i=x,j=x+1))\n # bottom of row loop\n print(\"% end row={r} of {m}\\n\".format(r=r,m=states_by_row))\n print(\"\\\\end{tikzpicture}\\n\"\n \"% close resizebox\\n}\\\\\\\\\\n\")\nprint(\"\\\\end{longtable}\\n\")\n\nif args.answerset_file: f.close()\n \n","sub_path":"examples/libraries/pyviz/pyviz-chain.py","file_name":"pyviz-chain.py","file_ext":"py","file_size_in_byte":11710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"328451412","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pylab as pl\n\n\nCONTRAST_THRESHOLD = 80\nHISTOGRAM_SIZE = 256\nmini = 0\nmaxi = 255\ndiff = 255\nhistogram = np.zeros(HISTOGRAM_SIZE, dtype = np.uint)\ncapture = cv2.VideoCapture(0)\n\nwhile(capture.isOpened()):\n\tret, frame = capture.read()\n\theight, width, channels = frame.shape\n\tgray_image = np.zeros([height,width], dtype=np.uint8)\n\n\tfor i in range(0,height):\n\t\tfor j in range(0,width):\n\t\t\tgray_image[i,j] = np.uint8(0.3*frame[i,j,2] + 0.59*frame[i,j,1] + 0.11*frame[i,j,0])\t# B,G,R\n\t\t\thistogram[gray_image[i,j]] = histogram[gray_image[i,j]] + 1\n\t\t\tif(gray_image[i,j] < mini):\n\t\t\t\tgray_image[i,j] = 0\n\t\t\telif(gray_image[i,j] > maxi):\n\t\t\t\tgray_image[i,j] = 255\n\t\t\telse :\n\t\t\t\tgray_image[i,j] = np.uint8((255*(gray_image[i,j]-mini)/diff))\n\n\tx = 0\n\twhile(x < HISTOGRAM_SIZE and histogram[x] < CONTRAST_THRESHOLD):\n\t\tx = x + 1\n\tmini = x\n\tx = HISTOGRAM_SIZE - 1\n\twhile(x > mini and histogram[x] < CONTRAST_THRESHOLD):\n\t\tx = x - 1\n\tmaxi = x\n\tdiff = maxi - mini\n\t#pl.imshow(gray_image)\n\t#pl.pause(0.00001)\n\t#pl.draw()\n\tgray_image = gray_image/256 #normalising - only for cv2.imshow(), if pl.draw() is used, this is not needed\n\tcv2.imshow('frame',gray_image)\n\n\tif cv2.waitKey(1) & 0xFF == 27 :\n\t\tbreak\n\ncapture.release()\ncv2.destroyAllWindows()","sub_path":"working_contrast_enhancement.py","file_name":"working_contrast_enhancement.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"208827239","text":"# -*- coding:utf-8 -*-\n# 此模块的负责显示逻辑,相当于视图(view)\n\nimport datetime\n\nfrom flask import render_template,flash,redirect,session,url_for,request,g\nfrom flask_login import login_user,logout_user,current_user,login_required\nfrom models import User,Post,ROLE_USER,ROLE_ADMIN\nfrom forms import LoginForm,RegisterForm,AboutmeForm,PublishBlogForm\nfrom app import app,db,lm\nfrom string import strip\nfrom utils import PER_PAGE\n\n# 用于从数据库加载用户\n@lm.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n@app.route('/')\n@app.route('/index')\ndef index():\n\tuser = 'Maxbin'\n\tposts = [\n\t {\n\t 'author':{'nickname':'John'},\n\t\t'body':'Beautiful day in Portland!'\n\t },\n\t {\n\t\t'author':{'nickname':'Susan'},\n\t\t'body':'The Avengers movie was so cool!'\n\t }\n\t\t]\n\treturn render_template(\"index.html\",\n\t\t\t\ttitle = 'Home',\n\t\t\t\tuser = user,\n\t\t\t\tposts = posts)\n\n@app.route('/login',methods=['GET','POST'])\ndef login():\n\t# 验证用户是否被验证\n\tif current_user.is_authenticated:\n\t\treturn redirect('index')\n\t# 注册验证\n\tform = LoginForm()\n\tif form.validate_on_submit():\n\t\tuser = User.query.filter_by(nickname=request.form.get('user_name')).first()\n\t\tif user is not None and user.verify_password(request.form.get('password')):\n\t\t\tlogin_user(user,request.form.get('remember_me'))\n\t\t\tuser.last_seen = datetime.datetime.now()\n\t\t\ttry:\n\t\t\t\tdb.session.add(user)\n\t\t\t\tdb.session.commit()\n\t\t\texcept:\n\t\t\t\tflash(\"The Database error!\")\n\t\t\t\treturn redirect('/login')\n\t\t\tflash('Your name: '+request.form.get('user_name'))\n\t\t\tflash('remember_me? '+str(request.form.get('remember_me')))\n\t\t\treturn redirect(url_for(\"user\",name=current_user.id))\n\t\telse:\n\t\t\tflash(\"Invalid username or password!\")\n\t\t\treturn redirect('/login')\n\treturn render_template('login.html',\n\t\t\t\ttitle = 'Sign In',\n\t\t\t\tform = form)\n\n@app.route('/User/')\ndef user(name):\n\treturn redirect(url_for('index'))\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n\tlogout_user()\n\treturn redirect(url_for('index'))\n\t\t\n@app.route('/register',methods=['GET','POST'])\ndef register():\n\tform=RegisterForm()\n\tif request.method == 'POST':\n\t\tif form.validate_on_submit():\n\t\t\tuser = User()\n\t\t\tuser_name = request.form.get('user_name')\n\t\t\tuser_email = request.form.get('user_email')\n\t\t\tuser_password = request.form.get('user_password')\n\t\t\tregister_check = User.query.filter(db.or_(\n\t\t\tUser.nickname==user_name,User.email==user_email)).first()\n\t\t\tif register_check:\n\t\t\t\tflash(\"error:The user's name or email alraedy exists!\")\n\t\t\t\treturn redirect('/register')\n\t\t\tif len(user_name) and len(user_email):\n\t\t\t\tuser.nickname = user_name\n\t\t\t\tuser.email = user_email\n\t\t\t\tuser.password = user_password\n\t\t\t\tuser.role = ROLE_USER\n\t\t\t\ttry:\n\t\t\t\t\tdb.session.add(user)\n\t\t\t\t\tdb.session.commit()\n\t\t\t\texcept:\n\t\t\t\t\tflash(\"The Database error!\")\n\t\t\t\t\treturn redirect('/register')\n\t\t\t\tflash(\"Register successful!\")\n\t\t\t\treturn redirect('/index')\n\treturn render_template(\n\t\t\t'register.html',\n\t\t\ttitle = 'Register',\n\t\t\tform = form)\n\n@app.route('/user/',defaults={'page':1},methods=['GET','POST'])\n@app.route('/user//page/',methods=['GET','POST'])\n@login_required\ndef users(user_id,page):\n\tform = AboutmeForm()\n\tcurrent_user.last_seen = datetime.datetime.now()\n\tif user_id != current_user.id:\n\t\tflash(\"Sorry,you can only view your profile!\",\"error\")\n\t\tredirect(\"/index\")\n\tpagination = Post.query.filter_by(\n\t\tuser_id = current_user.id\n\t\t).order_by(\n\t\tdb.desc(Post.timestamp)\n\t\t).paginate(page,PER_PAGE,False)\n\n\treturn render_template(\n\t\t\t\"user.html\",\n\t\t\tform=form,\n\t\t\tpagination=pagination)\n\n@app.route('/publish/',methods=['GET','POST'])\n@login_required\ndef publish(user_id):\n\tform = PublishBlogForm()\n\tif form.validate_on_submit():\n\t\tposts = Post()\n\t\tblog_body = request.form.get(\"body\")\n\t\tif not len(strip(blog_body)):\n\t\t\tflash(\"The content is necessray!\")\n\t\t\treturn redirect(url_for(\"publish\",user_id=user_id))\n\t\tposts.body = blog_body\n\t\tposts.timestamp = datetime.datetime.now()\n\t\tposts.user_id = user_id\n\n\t\ttry:\n\t\t\tdb.session.add(posts)\n\t\t\tdb.session.commit()\n\t\texcept:\n\t\t\tflash(\"Database error!!\")\n\t\t\treturn redirect(url_for(\"publish\",user_id=user_id))\n\t\tflash(\"Publish Successful!!\")\n\t\treturn redirect(url_for(\"users\",user_id=user_id))\n\treturn render_template(\n\t\t\t\"publish.html\",\n\t\t\tform=form)\n\n@app.route('/user/aboutme/',methods=['GET','POST'])\n@login_required\ndef about_me(user_id):\n\tuser = User.query.filter(User.id == user_id).first()\n\tif request.method == \"POST\":\n\t\tcontent = request.form.get(\"describe\")\n\t\tif len(content) and len(content)<=140:\n\t\t\tuser.about_me=content\n\t\t\ttry:\n\t\t\t\tdb.session.add(user)\n\t\t\t\tdb.session.commit()\n\t\t\texcept:\n\t\t\t\tflash(\"Datebase error!\")\n\t\t\t\treturn redirect(url_for(\"users\",user_id=user_id))\n\t\telse:\n\t\t\tflash(\"Sorry,something wrong with your database\")\n\treturn redirect(url_for(\"users\",user_id=user_id))\n\n\n\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"75606293","text":"\"\"\"\n Chris Jakins\n 1000802309\n HW8 CSE3313\n 4/11\n\"\"\"\nimport numpy as np\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nfrom skimage.feature import match_template\n\n\n#\n# SETUP\n#\n\nfilenameSmall = \"ERBwideColorSmall.jpg\"\nfilenameTemplate = \"ERBwideTemplate.jpg\"\n\n#\n# Functions\n#\n\n# function taken from \n# stackoverflow.com/questions/12201577/\n# how-can-i-convert-an-rgb-image-into-grayscale-in-python\ndef rgb2gray(img):\n r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]\n gray = 0.2989 * r + 0.587 * g + 0.114 * b\n\n return gray\n\n#\n# Main\n#\n\nimg = mpimg.imread(filenameSmall)\ntemplate = mpimg.imread(filenameTemplate)\n\nimg = rgb2gray(img)\ntemplate = rgb2gray(template)\n\nplt.imshow(img, cmap = \"gray\")\nplt.show()\n\nplt.imshow(template, cmap = \"gray\")\nplt.show()\n\n# this code taken from \n# http://scikit-image.org/docs/dev/auto_examples/features_detection/plot_template.html\nresult = match_template(img, template)\nij = np.unravel_index(np.argmax(result), result.shape)\nx, y = ij[::-1]\n\nprint(x, y)\n\n# how do i do this better\nfor i in range(y, y + template.shape[1]):\n for j in range(x, x + template.shape[0]):\n img[i][j] = 0\n\nplt.imshow(img, cmap = \"gray\")\nplt.show()\n","sub_path":"spring2018/3313/hw8/cfj2309/templateMatching.py","file_name":"templateMatching.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"400469265","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport simplejson as json\n\nfrom alipay.aop.api.response.AlipayResponse import AlipayResponse\nfrom alipay.aop.api.domain.IndexBigbuyItem import IndexBigbuyItem\nfrom alipay.aop.api.domain.IndexBlockBanner import IndexBlockBanner\n\n\nclass KoubeiMemberDataItemBigbuyQueryResponse(AlipayResponse):\n\n def __init__(self):\n super(KoubeiMemberDataItemBigbuyQueryResponse, self).__init__()\n self._big_buy_item_list = None\n self._current_time = None\n self._gmt_end = None\n self._gmt_start = None\n self._index_block_banner = None\n self._promo_schema = None\n\n @property\n def big_buy_item_list(self):\n return self._big_buy_item_list\n\n @big_buy_item_list.setter\n def big_buy_item_list(self, value):\n if isinstance(value, list):\n self._big_buy_item_list = list()\n for i in value:\n if isinstance(i, IndexBigbuyItem):\n self._big_buy_item_list.append(i)\n else:\n self._big_buy_item_list.append(IndexBigbuyItem.from_alipay_dict(i))\n @property\n def current_time(self):\n return self._current_time\n\n @current_time.setter\n def current_time(self, value):\n self._current_time = value\n @property\n def gmt_end(self):\n return self._gmt_end\n\n @gmt_end.setter\n def gmt_end(self, value):\n self._gmt_end = value\n @property\n def gmt_start(self):\n return self._gmt_start\n\n @gmt_start.setter\n def gmt_start(self, value):\n self._gmt_start = value\n @property\n def index_block_banner(self):\n return self._index_block_banner\n\n @index_block_banner.setter\n def index_block_banner(self, value):\n if isinstance(value, IndexBlockBanner):\n self._index_block_banner = value\n else:\n self._index_block_banner = IndexBlockBanner.from_alipay_dict(value)\n @property\n def promo_schema(self):\n return self._promo_schema\n\n @promo_schema.setter\n def promo_schema(self, value):\n self._promo_schema = value\n\n def parse_response_content(self, response_content):\n response = super(KoubeiMemberDataItemBigbuyQueryResponse, self).parse_response_content(response_content)\n if 'big_buy_item_list' in response:\n self.big_buy_item_list = response['big_buy_item_list']\n if 'current_time' in response:\n self.current_time = response['current_time']\n if 'gmt_end' in response:\n self.gmt_end = response['gmt_end']\n if 'gmt_start' in response:\n self.gmt_start = response['gmt_start']\n if 'index_block_banner' in response:\n self.index_block_banner = response['index_block_banner']\n if 'promo_schema' in response:\n self.promo_schema = response['promo_schema']\n","sub_path":"alipay/aop/api/response/KoubeiMemberDataItemBigbuyQueryResponse.py","file_name":"KoubeiMemberDataItemBigbuyQueryResponse.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"12708296","text":"# Parsimony algorithm for tree reconciliation\n# Eliot Bush 11/2016\n\n## test trees\n\n# cospeciation with loss\ngTreeA = ('i',('h',('f',('a',(),()),('b',(),())),('d',(),())),('e',(),()))\nspTreeA = ('I',('H',('G',('F',('A',(),()),('B',(),())),('C',(),())),('D',(),())),('E',(),()))\ntipMapA = set([('a','A'),('b','B'),('d','D'),('e','E')])\n\n# cospeciation, duplication\ngTreeB = ('g',('e',('a',(),()),('b',(),())),('f',('c1',(),()),('c2',(),())))\nspTreeB = ('G',('E',('A',(),()),('B',(),())),('C',(),()))\ntipMapB = set([('a','A'),('b','B'),('c1','C'),('c2','C')])\n\n# cospeciation, transfer, loss\ngTreeC = ('k',('h',('a',(),()),('c',(),())),('j',('m',('b',(),()),('d',(),())),('i',('e',(),()),('f',(),()))))\nspTreeC = ('K',('H',('G',('A',(),()),('B',(),())),('C',(),())),('J',('I',('E',(),()),('F',(),())),('D',(),())))\ntipMapC = set([('a','A'),('b','B'),('c','C'),('d','D'),('e','E'),('f','F')]) # host trans\n\n# deep duplication with loss\ngTreeD = ('j',('i',('f',('a',(),()),('b',(),())),('h',('g1',('d1',(),()),('e1',(),())),('c',(),()))),('g2',('d2',(),()),('e2',(),())))\nspTreeD = ('I',('F',('A',(),()),('B',(),())),('H',('G',('D',(),()),('E',(),())),('C',(),())))\ntipMapD = set([('a','A'),('b','B'),('c','C'),('d1','D'),('e1','E'),('d2','D'),('e2','E')])\n\n# cospeciation with loss\ngTreeE = ('h',('f',('a',(),()),('b',(),())),('g',('c',(),()),('e',(),())))\nspTreeE = ('Z',('W',('A',(),()),('B',(),())),('Y',('X',('D',(),()),('E',(),())),('C',(),())))\ntipMapE = set([('a','A'),('b','B'),('c','C'),('e','E')])\n\n\n## functions\n\ndef getTips(tree):\n '''Return list of tips of tree.'''\n if tree[1]==():\n return [tree[0]]\n else:\n return getTips(tree[1]) + getTips(tree[2])\n\ndef getInternalNodes(tree):\n '''Return list of internal nodes in tree.'''\n if tree[1]==():\n return []\n else:\n return getInternalNodes(tree[1])+getInternalNodes(tree[2]) + [tree[0]] \n\ndef getAllNodes(tree):\n '''Return all nodes in tree'''\n if tree[1]==():\n return [tree[0]]\n else:\n return getAllNodes(tree[1])+getAllNodes(tree[2]) + [tree[0]] \n\ndef subtree(node,tree):\n '''Return subtree of tree, rooted at node.'''\n if tree[0] == node:\n return tree\n elif tree[1] == ():\n return None # node isn't present\n else:\n lt=subtree(node,tree[1])\n rt=subtree(node,tree[2])\n \n if lt != None: return lt\n else: return rt\n\ndef initializeDpTables(gTree,spTree,tipMap):\n '''Given gene and species trees, create the dp tables in the form of\ndicts. We create one which has gene tree nodes vs. species tree\nnodes. And another which has gene tree nodes vs. species tree\nbranches. Populate with scores for the gene tree tips. Return as dict\nof dicts, where the key 'n' has the node dict as value, and the key\n'b' has the branch dict as value.\n '''\n nodeDpD = {}\n branchDpD = {}\n # do gTree tips\n for gNode in getTips(gTree):\n for spNB in getAllNodes(spTree):\n # regard spNB as a node\n if (gNode,spNB) in tipMap:\n nodeDpD[(gNode,spNB)] = (0,('t',))\n else:\n nodeDpD[(gNode,spNB)] = (float('inf'),())\n\n # regard spNB as a branch (gTree tips map to branches w/ inf cost)\n branchDpD[(gNode,spNB)] = (float('inf'),())\n\n dpD = {'n':nodeDpD,'b':branchDpD}\n \n return dpD\n\ndef printDp(gTree,spTree,dpD):\n '''Print out the dp table nicely.'''\n L=extractDp(gTree,spTree,dpD)\n printDpHelper(L)\n \ndef extractDp(gTree,spTree,dpD):\n '''Return dp table scores as list of lists..'''\n\n colNames=['']+getAllNodes(spTree) + getAllNodes(spTree)\n\n L=[colNames]\n \n for gNode in getTips(gTree) + getInternalNodes(gTree):\n rowL=[gNode]\n for spNB in getAllNodes(spTree):\n if (gNode,spNB) in dpD['n']:\n rowL.append(str(dpD['n'][(gNode,spNB)][0])) # regard as node\n else:\n rowL.append('')\n for spNB in getAllNodes(spTree):\n if (gNode,spNB) in dpD['n']:\n rowL.append(str(dpD['b'][(gNode,spNB)][0])) # regard as branch\n else:\n rowL.append('')\n L.append(rowL)\n return L\n \ndef printDpHelper(L,indent=0):\n '''Given tabular data in a list of lists (where sublists are rows)\nprint nicely so columns line up. Indent is an optional number of blank spaces to put in front of each row.'''\n # get max width for each column\n colMax=[]\n for col in range(len(L[0])):\n mx=0\n for row in L:\n if len(row[col]) > mx:\n mx = len(row[col])\n colMax.append(mx)\n \n # print\n for row in L:\n for col in range(len(row)):\n row[col]=row[col]+' ' * (colMax[col]-len(row[col]))\n \n for row in L:\n printStr = \" \"*indent + \" | \".join(row)\n print(printStr.rstrip())\n\ndef getAncestors(spNode,spTree):\n '''Get list of ancestors nodes. Assume spNode is in spTree.'''\n \n if spNode == spTree[0]:\n return []\n elif spTree[1] == ():\n # hit a tip and spNode doesn't match\n return None\n else:\n lt=getAncestors(spNode,spTree[1])\n rt=getAncestors(spNode,spTree[2])\n\n # one or both must be None\n if lt!=None:\n return [spTree[0]] + lt\n elif rt!=None:\n return [spTree[0]] + rt\n else:\n return None\n\ndef getDescendants(node,tree):\n '''Return a list of all nodes in tree descending from node. Includes\nnode itself.'''\n st=subtree(node,tree)\n return getAllNodes(st)\n \ndef getPossibleTranferBranches(spNode,spTree):\n '''Given a node on the species tree, determine all the legal branches\none of its subnodes could horizontal transfer too. The restriction is,\nit must not go to a branch in this same lineage, either forward\nor backward.'''\n\n # these functions get nodes, but we can think of the branch\n # leading to each of those nodes\n allBrL=getAllNodes(spTree)\n parBrL=getAncestors(spNode,spTree)\n decBrL=getDescendants(spNode,spTree)\n\n branchL=[]\n for br in allBrL:\n if br not in parBrL and br not in decBrL:\n branchL.append(br)\n\n return branchL\n\ndef isTip(node,tree):\n '''Returns boolean indicating whether node is a tip.'''\n if tree[1] == ():\n if node==tree[0]: return True\n else: return False\n else:\n return isTip(node,tree[1]) or isTip(node,tree[2])\n\ndef lossCountHelper(tree,node):\n '''Count the number of nodes we pass to get to node starting at\nroot. Return tuple with that count, and tuple of loss nodes.'''\n if tree[0] == node:\n return (0,()) # we're not passing a node here\n elif tree[1]==():\n return (-float('inf'),())\n else:\n lt=lossCountHelper(tree[1],node)\n rt=lossCountHelper(tree[2],node)\n\n if lt[0] > rt[0]:\n lossT= lt[1] + (tree[0],) # add this one\n return (lt[0]+1,lossT)\n else:\n lossT= rt[1] + (tree[0],)\n return (rt[0]+1,lossT)\n\ndef lossCount(tree,parent,child,nb):\n '''Given tree, and a node placement of the parent at parent and a\nchild at child, figure out how many loss events are implied. nb is 'n'\nor 'b' and tells us whether parent represents a node or the branch\nleading to that node. Return tuple with count, and tuple of loss\nnodes.\n '''\n if parent == child:\n return (0,())\n else:\n subtr=subtree(parent,tree)\n if nb=='n':\n # parent is a node, we don't want to count the root of\n # subtr itself, since that's parent\n lt = lossCountHelper(subtr[1],child)\n rt = lossCountHelper(subtr[2],child)\n if lt[0] > rt[0]: return lt\n else: return rt\n else:\n # parent is branch leading to node, so we should count it\n return lossCountHelper(subtr,child)\n \ndef findMinCostRootPlacement(root,spTree,dpD):\n '''Given filled out dp tables, return the minimum cost placement for\nroot root.\n '''\n rootPlacement = ()\n minCost=float('inf')\n for spNB in getAllNodes(spTree):\n # regard spNB as node\n if dpD['n'][(root,spNB)][0] < minCost:\n rootPlacement = (root,spNB,'n')\n minCost = dpD['n'][(root,spNB)][0]\n # now regard spNB as branch\n if dpD['b'][(root,spNB)][0] < minCost:\n rootPlacement = (root,spNB,'b')\n minCost = dpD['b'][(root,spNB)][0]\n\n return rootPlacement,minCost\n\ndef reconcile(gTree,spTree,tipMap,dup,trans,loss):\n '''Reconcile a gene tree with a species tree.'''\n\n dpD=initializeDpTables(gTree,spTree,tipMap)\n\n # fill dp table\n for gNode in getInternalNodes(gTree):\n for spNB in getAllNodes(spTree):\n # regard spNB as node\n dpD['n'][(gNode,spNB)]=scoreInternalGeneNodeToSpNode(gTree,spTree,dpD,loss,gNode,spNB)\n # now regard spNB as branch\n dpD['b'][(gNode,spNB)]=scoreInternalGeneNodeToSpBranch(gTree,spTree,dpD,dup,trans,loss,gNode,spNB)\n \n rootPlacement,minCost = findMinCostRootPlacement(gTree[0],spTree,dpD)\n \n return dpD,rootPlacement,minCost\n\ndef scorePlacement(gSubNode1,sp1,gSubNode2,sp2,nLoss1,nLoss2,loss,event,dpD,dupTransScore,minCostSol):\n '''Score the placement of two gene nodes onto species\nbranch/nodes. Considers that sp1,sp2 could each be either node or\nbranch. Adds in the cost of associated losses, and packages\noutput. event specifies what event marker should be put in the package\n(e.g. 'c' for cospecieaiotn, 'd' for duplication etc. dupTransScore\ncarries any additional costs for duplication or horizontal transfer\n '''\n for k1 in ('n','b'):\n d1=dpD[k1]\n for k2 in ('n','b'):\n d2=dpD[k2]\n sc = dupTransScore + nLoss1[0]*loss + d1[(gSubNode1,sp1)][0] + nLoss2[0]*loss + d2[(gSubNode2,sp2)][0]\n sol = (event,(gSubNode1,sp1,k1,nLoss1[1]),(gSubNode2,sp2,k2,nLoss2[1]))\n if sc < minCostSol[0]: minCostSol = (sc,sol)\n return minCostSol\n\ndef scoreInternalGeneNodeToSpNode(gTree,spTree,dpD,loss,gNode,spNode):\n '''Given that we map gNode to spNode, calculate the costs.'''\n\n # if it's a species tip, return infinite cost\n if isTip(spNode,spTree):\n return (float('inf'),())\n\n # spNode is internal\n gSubNodeT = (subtree(gNode,gTree)[1][0],subtree(gNode,gTree)[2][0])\n\n # look at two subtrees below spNode on spTree. Get list of all\n # nodes in each, store as tuple of two lists.\n spSubTree=subtree(spNode,spTree)\n spSubNodeLT=(getAllNodes(spSubTree[1]),getAllNodes(spSubTree[2]))\n \n minCostSol = (float('inf'),())\n\n # co speciation. \n for i in (0,1):\n gSubNode1=gSubNodeT[i] # mechanism for alternating \n gSubNode2=gSubNodeT[(i+1)%2] # which node gSubNode1 refers to\n\n # consider each possible placement of gSubNode1 into\n # spSubNodeLT[0] and gSubNode2 into spSubNodeLT[1]\n\n for descend1 in spSubNodeLT[0]:\n nLoss1 = lossCount(spTree,spNode,descend1,'n')\n for descend2 in spSubNodeLT[1]:\n nLoss2 = lossCount(spTree,spNode,descend2,'n')\n\n minCostSol=scorePlacement(gSubNode1,descend1,gSubNode2,descend2,nLoss1,nLoss2,loss,'c',dpD,0,minCostSol)\n return minCostSol\n\ndef scoreInternalGeneNodeToSpBranch(gTree,spTree,dpD,dup,trans,loss,gNode,spBranch):\n '''Given that we map gNode to spBranch, calculate the costs.'''\n\n gSubNodeT = (subtree(gNode,gTree)[1][0],subtree(gNode,gTree)[2][0])\n # look at two subtrees below spNode on spTree. Get list of all\n # nodes in each, store as tuple of two lists.\n if isTip(spBranch,spTree):\n spSubNodeLT=([spBranch],[spBranch]) # need to have current node in both\n else: \n spSubTree=subtree(spBranch,spTree)\n spSubNodeLT=(getAllNodes(spSubTree[1])+[spBranch],getAllNodes(spSubTree[2])+[spBranch]) # add current node to both\n \n minCostSol = (float('inf'),())\n\n transBrL = getPossibleTranferBranches(spBranch,spTree)\n\n for i in (0,1):\n gSubNode1=gSubNodeT[i] # mechanism for alternating \n gSubNode2=gSubNodeT[(i+1)%2] # which node gSubNode1 refers to\n\n # horiz transfer. One goes to another branch, one stays in descendant lineage\n for transBr in transBrL:\n for descend in getDescendants(spBranch,spTree):\n # gSubNode1 stays in lineage, gSubNode2 h transfers\n dnLoss = lossCount(spTree,spBranch,descend,'b')# losses on descend placement\n # no loss on trans, so pass in (0,())\n minCostSol=scorePlacement(gSubNode1,descend,gSubNode2,transBr,(0,()),dnLoss,loss,'h',dpD,trans,minCostSol)\n\n # duplication case.\n for descend1 in spSubNodeLT[0]:\n d1nLoss = lossCount(spTree,spBranch,descend1,'b')\n for descend2 in spSubNodeLT[1]:\n d2nLoss = lossCount(spTree,spBranch,descend2,'b')\n minCostSol=scorePlacement(gSubNode1,descend1,gSubNode2,descend2,d1nLoss,d2nLoss,loss,'d',dpD,dup,minCostSol)\n \n return minCostSol\n\ndef pstr(g,s,nb):\n '''Make placement string describing a gene node placement on a species\nnode or branch.'''\n if nb == 'n':\n return \"(node \"+g+\",node \"+s+\")\"\n else:\n return \"(node \"+g+\",branch \"+s+\")\"\n \ndef bt(placement,dpD):\n '''Backtrack through dp tables, returning a list of events. Placement\nis a 3 tuple (geneNode, speciesPosition, nb), where nb is either 'n' or 'b' to indicate whether we should regard speciesPosition as a node or a branch.'''\n gNode,spNB = placement[:2] # gene node,species node or branch\n nb = placement[2] # node or a branch on species tree\n if dpD[nb][(gNode,spNB)][1][0] == 't':\n # we're on a tip\n return []\n else:\n eventL=[]\n eType,e1,e2 = dpD[nb][(gNode,spNB)][1]\n e1g,e1s,e1nb,e1losses=e1 # note, e1Tr is a placeholder, except in case of transfer\n e2g,e2s,e2nb,e2losses=e2 # where it gives the actual transfer branch\n if eType == 'c':\n eventL.append(\"Cospeciation: \"+ pstr(gNode,spNB,nb)+ \"-->\" + pstr(e1g,e1s,e1nb) + \" \" + pstr(e2g,e2s,e2nb))\n for tempgene in e1losses+e2losses:\n eventL.append(\" gene loss at node \"+str(tempgene))\n elif eType == 'h':\n eventL.append(\"Horizontal transfer: \"+ pstr(gNode,spNB,nb)+ \"-->\" + \"normal descent to \"+pstr(e1g,e1s,e1nb) + \" - transfer to \" + pstr(e2g,e2s,e2nb))\n for tempgene in e1losses+e2losses:\n eventL.append(\" gene loss at node \"+str(tempgene))\n elif eType == 'd':\n eventL.append(\"Duplication: \"+ pstr(gNode,spNB,nb)+ \"-->\" + pstr(e1g,e1s,e1nb) + \" \" + pstr(e2g,e2s,e2nb))\n for tempgene in e1losses+e2losses:\n eventL.append(\" gene loss at node \"+str(tempgene))\n\n # recurse.\n L1=bt((e1g,e1s,e1nb),dpD)\n L2=bt((e2g,e2s,e2nb),dpD)\n\n return eventL + L1 + L2\n \ndef printReconciliation(dpD,rootPlacement,minCost):\n '''Print out a reconciliation nicely.'''\n \n print(\"Minimum cost:\",minCost)\n \n eventL=bt(rootPlacement,dpD)\n \n print(\"\\n\".join(eventL))\n\n\ndef runCases():\n '''This is my own wrapper, not given to students.'''\n\n # A\n dpD,rootPlacement,minCost = reconcile(gTreeA,spTreeA,tipMapA,1,1,1)\n print('testA')\n printReconciliation(dpD,rootPlacement,minCost)\n\n\n # B\n dpD,rootPlacement,minCost = reconcile(gTreeB,spTreeB,tipMapB,1,1,1)\n print()\n print('testB')\n printReconciliation(dpD,rootPlacement,minCost)\n \n # C\n dpD,rootPlacement,minCost = reconcile(gTreeC,spTreeC,tipMapC,1,1,1)\n print()\n print('testC')\n printReconciliation(dpD,rootPlacement,minCost)\n\n # D\n dpD,rootPlacement,minCost = reconcile(gTreeD,spTreeD,tipMapD,1,3,1)\n print()\n print('testD')\n printReconciliation(dpD,rootPlacement,minCost)\n\n # E\n dpD,rootPlacement,minCost = reconcile(gTreeE,spTreeE,tipMapE,1,1,1)\n print()\n print('testE')\n printReconciliation(dpD,rootPlacement,minCost)\n\n\n","sub_path":"Fall2018/Bio188/hw7/reconcile.py","file_name":"reconcile.py","file_ext":"py","file_size_in_byte":16112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"181499738","text":"\nsvg='''\n\nXXXXXXXXX\n\n'''\nfrom numpy import *\ndef interp(start, end, coef):\n return (end - start)*coef + start\n\ndef pie_slice(origin, radius, angle_start, angle_stop, n):\n or_x, or_y = origin\n angle_start = pi * angle_start/180\n angle_stop = pi * angle_stop/180\n pts = []\n for i in range(n+1):\n angle = interp(angle_start, angle_stop, i/n)\n pt = array([or_x + radius*cos(angle), or_y + radius*sin(angle)])\n pts.append(pt)\n pts.append(array([or_x, or_y]))\n return pts\n\n# vals = [0]+[a for a in range(1,11)]\n# vals = [0]+[1/a for a in range(3,13)]\n# vals = [1/a for a in range(3,14)]\nvals = [1/a for a in range(1,11)]+[0]\n\n\n\nvals.reverse()\n# print('vals')\n# for a in vals:\n# print(' ',a)\n\n\nvals = [360*a/sum(vals) for a in vals]\nacc = 0.0\nvall_accs = []\nfor a in vals:\n vall_accs.append(acc+a)\n acc += a\nval_pairs = [(vall_accs[a], vall_accs[a+1]) for a in range(len(vall_accs)-1)]\n\ncolors = [\n 'c60000', # red\n 'D86100', # orange\n 'E0C600', # yellow\n '7FE000', # yellow green\n '10FF00', # green green\n '00E0AF', # cyan\n '0086E0', # blew\n '8485E0', # purple\n 'A33AFF', # blue purple\n 'E500FF', # magenta\n]\ncolors.reverse()\n# print('colors', len(colors))\n\nfor a in val_pairs:\n print(' ',a)\n# pies = [(pie_slice((100,100), 100,angle, vall_accs[i-1],10), colors[i]) for i,(angle) in enumerate(vall_accs)]\npies = [(pie_slice((20,120), 90, a1, a2, 50), colors[i]) for i,(a1,a2) in enumerate(val_pairs)]\npolygons = ''\nfor pie,color in pies:\n polygon_str = ' \\n'.join([\"%f, %f\"%(p1[0], p1[1]) for p1 in pie])\n polygon = ('\\n'%(polygon_str, color))\n polygons += polygon + '\\n\\n'\n\n# polygon = ('\\n'%(polygon_str))\n\n\nsvg1, svg2 = svg.split('XXXXXXXXX')\n\nopen('pies.nogit.svg', 'w').write(svg1+ polygons +svg2)","sub_path":"angles-meme.py","file_name":"angles-meme.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"36038537","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport tornado.web\nimport tornado.template\nimport tornado.auth\n\nclass BaseHandler(tornado.web.RequestHandler):\n def initialize(self):\n user = self.get_current_user()\n if user:\n self.email = user['email']\n self.__template_loader = tornado.template.Loader(self.settings['template_dir'])\n\n def render(self, template_name, **kwargs):\n self.write(self.__template_loader.load(template_name).generate(**kwargs))\n \n def get_current_user(self):\n user_json = self.get_secure_cookie('user')\n if not user_json: return None\n return tornado.escape.json_decode(user_json)\n\nclass MainHandler(BaseHandler):\n def get(self):\n if self.get_current_user():\n self.redirect('/feed')\n else:\n self.render('main.html')\n\n","sub_path":"reader/handler/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"148818232","text":"import pandas as pd\nimport numpy as np\nfrom builtins import isinstance\nimport pickle\n\nimport tensorflow as tf\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.callbacks import LearningRateScheduler, EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nfrom keras import backend as K\n\nBASE_DIR = 'C:\\\\Users\\\\Karan Sarkar\\\\Google Drive\\\\HFI\\\\'\nTRAIN_DATA_FILE = BASE_DIR + 'chpw_member_level.csv'\nMODEL_FILE = BASE_DIR + 'diag2rc2.pickle'\n\n\ndf = pd.read_csv(TRAIN_DATA_FILE)\nEMBEDDING_MATRIX_FILE = BASE_DIR + 'embedding_matrix.pickle'\npickle_in = open(EMBEDDING_MATRIX_FILE,\"rb\")\ndiagnosis_embeddings, diag2idx, idx2diag = pickle.load(pickle_in)\n\ndef name2idx(name):\n return df.columns.get_loc(name) + 1\n\ndef create_dicts(column, length):\n tokens = set()\n for row in df.itertuples():\n codes = row[column]\n if(isinstance(codes, str)):\n for code in row[column].split():\n tokens.add(code[:length])\n code2idx = {}\n idx2code = {}\n num_codes = len(tokens)\n for i, code in enumerate(tokens):\n code2idx[code[:length]] = i\n idx2code[i] = code[:length]\n return (code2idx, idx2code, num_codes)\n \ndef one_hot_encode(code2idx, num_codes, column):\n print(num_codes)\n data = np.zeros((len(df.index), num_codes))\n idx = 0\n for row in df.itertuples():\n codes = row[column]\n if(isinstance(codes, str)):\n for code in row[column].split():\n data[idx, :] += diagnosis_embeddings[diag2idx[code], :]\n idx += 1\n return data\n\n(code2idx, idx2code, num_codes) = create_dicts(name2idx('diag_codes'))\nx_train = one_hot_encode(code2idx, num_codes, name2idx('diag_codes'))\n\n\ndef auc(y_true, y_pred):\n auc = tf.metrics.auc(y_true, y_pred)[1]\n K.get_session().run(tf.local_variables_initializer())\n return auc\n\n\nearlyStopping = EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='min')\nreduce_lr_loss = ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience=0, verbose=1, epsilon=1e-4, mode='min', cooldown=1)\n\nmodel = Sequential()\nmodel.add(Dense(500, activation='relu'))\nmodel.add(Dense(500, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy', auc])\nmodel.fit(x_train, np.array(df['rc2']), validation_split=0.1, epochs=20, verbose = 1, callbacks=[earlyStopping])\n\nfilehandler = open(MODEL_FILE,\"wb\")\npickle.dump(((code2idx, idx2code, num_codes), model) ,filehandler)\n\n","sub_path":"NLP/FinalProject/diag2rc2B.py","file_name":"diag2rc2B.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"38430404","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime, timedelta\nimport json\n\nfrom django.db.models import Q\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom ..models import Driver\nfrom ..models import Employee\nfrom ..models import Job\nfrom ..models import Salary\nfrom ..serializers import DriverSerializer\nfrom ..serializers import EmployeeSerializer\nfrom ..serializers import JobSerializer\nfrom ..serializers import SalarySerializer\n\n\n@csrf_exempt\ndef api_get_job(request):\n if request.user.is_authenticated:\n if request.method == \"GET\":\n job = Job.objects.all().order_by('number')\n serializer = JobSerializer(job, many=True)\n return JsonResponse(serializer.data, safe=False)\n return JsonResponse('Error', safe=False) \n\n@csrf_exempt\ndef api_get_employee(request):\n if request.user.is_authenticated:\n if request.method == \"GET\":\n data = get_employee_list('a')\n return JsonResponse(data, safe=False)\n \n elif request.method == \"POST\":\n req = json.loads( request.body.decode('utf-8') )\n job = req['job']\n\n if job == 'driver':\n today = datetime.now()\n date_compare = today + timedelta(days=30)\n\n employee = Driver.objects.filter(employee__status='a').order_by('truck__number', 'employee__hire_date', 'employee__first_name', 'employee__last_name')\n serializer = DriverSerializer(employee, many=True)\n data = {\n 'other': [],\n 'driver': serializer.data,\n 'date_compare': date_compare\n } \n else:\n employee = Employee.objects.filter(status='a', job__job_title=job).order_by('hire_date', 'first_name', 'last_name')\n serializer = EmployeeSerializer(employee, many=True)\n data = {\n 'other': serializer.data,\n 'driver': [],\n }\n return JsonResponse(data, safe=False)\n return JsonResponse('Error', safe=False)\n\n@csrf_exempt\ndef api_get_former_employee(request):\n if request.user.is_authenticated:\n if request.method == \"GET\":\n data = get_employee_list('t')\n return JsonResponse(data, safe=False)\n return JsonResponse('Error', safe=False)\n\n# Salary\n@csrf_exempt\ndef api_get_employee_salary(request):\n if request.user.is_authenticated:\n if request.method == \"GET\":\n employee = Salary.objects.filter(employee__status='a', to_date=None).order_by('employee__job__number', 'employee__hire_date', 'employee__first_name', 'employee__last_name')\n serializer = SalarySerializer(employee, many=True)\n return JsonResponse(serializer.data, safe=False)\n return JsonResponse('Error', safe=False)\n\n@csrf_exempt\ndef api_get_salary_history(request):\n if request.user.is_authenticated:\n if request.method == \"POST\":\n req = json.loads(request.body.decode('utf-8'))\n emp_id = req['emp_id']\n\n salary = Salary.objects.filter(employee__pk=emp_id).order_by('-from_date', '-pk')\n serializer = SalarySerializer(salary, many=True)\n \n return JsonResponse(serializer.data, safe=False)\n return JsonResponse('Error', safe=False)\n\n\n# Methods\ndef get_employee_list(status):\n employee = Employee.objects.filter(status=status)\n\n other = employee.filter(~Q(job__job_title='driver')).order_by('job__number', 'hire_date', 'first_name', 'last_name')\n other_serializer = EmployeeSerializer(other, many=True)\n\n driver = Driver.objects.filter(employee__in=employee).order_by('truck__number', 'employee__hire_date', 'employee__first_name', 'employee__last_name')\n driver_serializer = DriverSerializer(driver, many=True)\n\n data = {\n 'other': other_serializer.data,\n 'driver': driver_serializer.data\n }\n return data","sub_path":"ndd-app/employee/views/employee_data_view.py","file_name":"employee_data_view.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"163386382","text":"import json\nimport pandas as pd\n\nimport remotespark.utils.configuration as conf\nimport remotespark.utils.constants as constants\nfrom remotespark.utils.utils import coerce_pandas_df_to_numeric_datetime\n\nfrom .command import Command\nfrom .dataframeparseexception import DataFrameParseException\n\n\nclass SQLQuery(object):\n def __init__(self, query, samplemethod=None, maxrows=None,\n samplefraction=None, only_columns=False):\n if samplemethod is None:\n samplemethod = conf.default_samplemethod()\n if maxrows is None:\n maxrows = conf.default_maxrows()\n if samplefraction is None:\n samplefraction = conf.default_samplefraction()\n\n assert samplemethod == 'take' or samplemethod == 'sample'\n assert isinstance(maxrows, int)\n assert 0.0 <= samplefraction <= 1.0\n\n self.query = query\n self.samplemethod = samplemethod\n self.maxrows = maxrows\n self.samplefraction = samplefraction\n self.only_columns = only_columns\n\n def to_command(self, kind):\n if kind == constants.SESSION_KIND_PYSPARK:\n return self._pyspark_command()\n elif kind == constants.SESSION_KIND_SPARK:\n return self._scala_command()\n elif kind == constants.SESSION_KIND_SPARKR:\n return self._r_command()\n else:\n raise ValueError(\"Kind '{}' is not supported.\".format(kind))\n\n def execute(self, session):\n (success, records_text) = self._get_records(session)\n if not success:\n raise DataFrameParseException(records_text)\n if records_text == \"\":\n # If there are no records, show some columns at least.\n records_text = self._get_columns(session)\n return self._columns_to_dataframe(records_text)\n else:\n return self._records_to_dataframe(records_text)\n\n def to_only_columns_query(self):\n \"\"\"Given a SQL query, return a new version of that SQL query which only gets\n the columns for that query.\"\"\"\n return SQLQuery(self.query, self.samplemethod, self.maxrows,\n self.samplefraction, True)\n\n def _get_records(self, session):\n return self.to_command(session.kind).execute(session)\n\n def _get_columns(self, session):\n (success, out) = self.to_only_columns_query().to_command(session.kind).execute(session)\n if success:\n return out\n else:\n raise DataFrameParseException(out)\n\n @staticmethod\n def _columns_to_dataframe(columns_text):\n return pd.DataFrame.from_records([], columns=columns_text.split('\\n'))\n\n @staticmethod\n def _records_to_dataframe(records_text):\n strings = records_text.split('\\n')\n try:\n df = pd.DataFrame([json.loads(s) for s in strings])\n coerce_pandas_df_to_numeric_datetime(df)\n return df\n except ValueError:\n raise DataFrameParseException(\"Cannot parse object as JSON: '{}'\".format(strings))\n\n def _pyspark_command(self):\n command = 'sqlContext.sql(\"\"\"{}\"\"\")'.format(self.query)\n if self.only_columns:\n command = '{}.columns'.format(command)\n else:\n command = '{}.toJSON()'.format(command)\n if self.samplemethod == 'sample':\n command = '{}.sample(False, {})'.format(command, self.samplefraction)\n if self.maxrows >= 0:\n command = '{}.take({})'.format(command, self.maxrows)\n else:\n command = '{}.collect()'.format(command)\n command = 'for {} in {}: print({})'.format(constants.LONG_RANDOM_VARIABLE_NAME,\n command,\n constants.LONG_RANDOM_VARIABLE_NAME)\n return Command(command)\n\n def _scala_command(self):\n command = 'sqlContext.sql(\"\"\"{}\"\"\")'.format(self.query)\n if self.only_columns:\n command = '{}.columns'.format(command)\n else:\n command = '{}.toJSON'.format(command)\n if self.samplemethod == 'sample':\n command = '{}.sample(false, {})'.format(command, self.samplefraction)\n if self.maxrows >= 0:\n command = '{}.take({})'.format(command, self.maxrows)\n else:\n command = '{}.collect'.format(command)\n return Command('{}.foreach(println)'.format(command))\n\n def _r_command(self):\n raise NotImplementedError()\n\n # Used only for unit testing\n def __eq__(self, other):\n return self.query == other.query and \\\n self.samplemethod == other.samplemethod and \\\n self.maxrows == other.maxrows and \\\n self.samplefraction == other.samplefraction and \\\n self.only_columns == other.only_columns\n\n def __ne__(self, other):\n return not (self == other)","sub_path":"remotespark/livyclientlib/sqlquery.py","file_name":"sqlquery.py","file_ext":"py","file_size_in_byte":4922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"171136207","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Feb 17 22:24:36 2018\r\n\r\n@author: tamos\r\n\r\nThis code calculates Verlinde's apparent dark matter distribution from the \r\navailable barionic mass distribution. The calculations are for the Coma cluster\r\nusing the data from Terukina and some other data sets from the literature. \r\n\r\n\"\"\"\r\n\r\n###############################################################################\r\n### Defining the needed functions ###\r\n###############################################################################\r\n\r\n## Importing the needed packages:\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as mpatches\r\nfrom matplotlib.ticker import ScalarFormatter\r\nimport matplotlib.ticker as ticker\r\nfrom scipy.integrate import quad\r\n\r\n## Defining the needed functions:\r\n\r\ndef cal1(Mv,cv): ## Eq. (6) in Wilcox et al.\r\n\trvir=(3.E3*Mv/(400.*np.pi))**(1.0/3.0)\r\n\t#Calculates the clusters virial radius from its mass\r\n\trs=rvir/cv\r\n\t#Calculates the scale radius\r\n\tmc=np.log(1.0+cv)-cv/(1.0+cv)\r\n\trhos=100.*cv*cv*cv/3.E3/mc\r\n\t#Here calculate the scale radius\r\n\tphi0=0.5*rhos*rs*rs*0.71**(2.0/3.0)*1.691\r\n\t\r\n\treturn rs,phi0\r\n\r\ndef cal2(beta,phiinfty,phi0): ## Eq (8) in Wilcox et al. \r\n\tcaptb=2.0*beta*phi0\r\n\t#This is equal to betaa * rhos * rs^2, the factor in equations 2.17 & 2.18\r\n\tif beta==0.0:\r\n\t\txc=0.0\r\n\telif ((captb/phiinfty)-1.0)>0.0:\r\n\t\t#print (captb/phiinfty)-1.0\r\n\t\txc=(captb/phiinfty)-1.0\r\n\t\t#This is equation 2.19 rearranged from phiinf-beta*rhos*rs^2/(1+rc) to rc = beta*rhos*rs^2/phiinf - 1 calculating the critical radius, below which, no MG\r\n\telse:\r\n\t\txc=0.0\r\n\tif xc==0.0:\r\n\t\tcaptc=0.0\r\n\telse:\r\n\t\tcaptc=-captb*np.log(1.0+xc)+phiinfty*xc\r\n\t\t#This is equation 2.18, -beta*rhos*rs^2*ln(1+rc)+phiinf*rc\r\n\treturn xc,captc\r\n\r\ndef F_phi(r,Mv,cv,beta,phiinfty): ## Not sure where this equations comes from\r\n\trs,phi0 = cal1(Mv,cv)\r\n\txc,captc = cal2(beta,phiinfty,phi0)\r\n\tx=r/rs\r\n\t#Rescale the radius\r\n\tif (x<=xc):\r\n\t\tF_phix=0.0\r\n\t\t#If outside the critical radius then the MG force is 0\r\n\telse:\r\n\t\tF0=8.985*phi0/rs*0.71**(2.0/3.0)\r\n\t\tF_phix=F0*(2.0*beta*beta*(1.0/(x*(1.0+x))-np.log(1.0+x)/x/x)-beta*captc/phi0/x/x)\r\n\tif (F_phix>0.0):\r\n\t\tF_phix=0.0\r\n\t\t#Force cannot be positive\r\n\treturn F_phix\r\n\r\n########################### The mass profiles #################################\r\n'''\r\ndef lensing_M(r, cv, Mv): ## Eq. 7 in Wilcox et al. \r\n\trvir=(3.E3*Mv/(400.*np.pi))**(1.0/3.0)\r\n\trads=rvir/cv\r\n\trhos = Mv/4./np.pi/rads**3/(np.log(1.0+cv) - cv/(cv+1.0))\r\n\treturn 4. * np.pi * rhos * rads**3 * (np.log(1.0 + (r/rads)) - (r/rads)/(1.0 + (r/rads)))\r\n '''\r\n#### An alternative way for calculating lensing_M to check for accuracy:\r\n\r\ndef lensing_M(r, cv, Mv):\r\n G = (6.67408E-11*1.989E30)/(2.938E67) ## in units of Mpc^3 s^-2 M_sol^-1\r\n H_0 = 2.26856e-18\r\n rho_c = (3.*H_0**2.)/(8.*np.pi*G) \r\n #sigma_c = (200./3.)*(cv**3.)/(np.log(1+cv)-cv/(1+cv))\r\n rvir = ((3.*Mv*1E14)/(4.*np.pi*100*rho_c))**(1.0/3.0) ## Using a 100 for delta_c here \r\n rads=rvir/cv\r\n rhos = Mv/4./np.pi/rads**3/(np.log(1.0+cv) - cv/(cv+1.0))\r\n return 4. * np.pi * rhos * rads**3 * (np.log(1.0 + (r/rads)) - (r/rads)/(1.0 + (r/rads)))\r\n \r\n\r\ndef ne(r,n_0,b_1,r_1):\r\n\treturn n_0*(1.0+r**2/r_1**2)**b_1\r\n\r\ndef dne(r,n_0,b_1,r_1):\r\n\treturn 2*b_1*n_0*r*(r**2/r_1**2+1)**(b_1-1)/r_1**2\r\n\r\ndef T_gas(r,T_0,A,r_0,b_0):\r\n\treturn T_0*(1+A*(r/r_0))**b_0\r\n\t\r\ndef dT_gas(r,T_0,A,r_0,b_0):\r\n\treturn A*b_0*T_0*(A*r/r_0+1)**(b_0-1)/r_0\r\n\r\ndef dnedT_gas(r,n_0,b_1,r_1,T_0,A,r_0,b_0): ## Eq. (14). Used the chain rule for the log integrals\r\n\treturn A*b_0*n_0*T_0*(A*r/r_0+1)**(b_0-1)*(r**2/r_1**2+1)**b_1 / r_0 + A*b_1*n_0*T_0*(A*r/r_0+1)**(b_0)*(r**2/r_1**2+1)**(b_1-1) / r_1\r\n\r\n########## From terukina: ########\r\n# rho_{gas} = mp*n_{gas}*mu\r\n# Where mu = 0.59\r\n \r\n# Also: n_{e} = (2 + mu)/5 * n_{gas}\r\n\r\n##################################\r\n\r\ndef g(r):\r\n\talpha_nt = 0.3\r\n\tbeta_nt = 0.5\r\n\tn_nt = 0.8\r\n\tn_M = 0.2\r\n\tz_rs = 0.3\r\n\tr_500 = 1.2\r\n\treturn alpha_nt*(1+z_rs)**beta_nt * (r/r_500)**n_nt * (Mv[0]/3.)**n_M\r\n\r\n## So the reason that there is no G, k & mp here is most likely because k, mp & G\r\n## are absolved by writing the equation as follows: G x Mthermal x mp/r_200 = -kT/mu(r/r_200) x ( dlnne/dlnr + dlnT/dlnr)\r\n## this allows writing both sides of the equation in keV\r\n\r\ndef thermal_M(r,n_0,b_1,r_1,T_0,A,r_0,b_0):\r\n\treturn -T_gas(r,T_0,A,r_0,b_0) * r * 0.59 * (r/ne(r,n_0,b_1,r_1) * dne(r,n_0,b_1,r_1) + r/T_gas(r,T_0,A,r_0,b_0) * dT_gas(r,T_0,A,r_0,b_0)) / 2.\r\n\r\n## Most likely the reason for the missing constants, is that they're already included in the\r\n## definition of k & m\r\n\r\ndef non_thermal_M(r,n_0,b_1,r_1,T_0,A,r_0,b_0):\r\n\treturn - r**2 * g(r) / (1-g(r)) * dnedT_gas(r,n_0,b_1,r_1,T_0,A,r_0,b_0) / (5 * ne(r,n_0,b_1,r_1) * 0.59)\r\n\r\n\r\n\r\n###############################################################################\r\n## Initial masss profile calculations ##\r\n###############################################################################\r\n\r\ndef g_c(r):\r\n\talpha_nt = 0.3\r\n\tbeta_nt = 0.5\r\n\tn_nt = 0.8\r\n\tn_M = 0.2\r\n\tz_rs = 0.3\r\n\tr_500 = 1.2\r\n\treturn alpha_nt*(1+z_rs)**beta_nt * (r/r_500)**n_nt * (Mvc[0]/3.)**n_M\r\n\r\ndef non_thermal_Mc(r,n_0,b_1,r_1,T_0,A,r_0,b_0):\r\n\treturn - r**2 * g_c(r) / (1-g_c(r)) * dnedT_gas(r,n_0,b_1,r_1,T_0,A,r_0,b_0) / (5 * ne(r,n_0,b_1,r_1) * 0.59)\r\n\r\n\r\n## To convert the radius into propper units:\r\n\r\n'''\r\nrvir=(3.E3*Mv_sol/(400.*np.pi))**(1.0/3.0) ## in Mpc\r\nr=np.linspace(0.3,2,100) ## The Radius ## \r\nr_mpc = r* rvir # in Mpc\r\nr_m = r_mpc *3.086e+22 # in m\r\n'''\r\n\r\n# Bestfit parameters from Terukina's paper:\r\n\r\n#rc=np.linspace(0.3,2,100) \r\n\r\nrc = np.linspace(0.1,1,100) ## In Mpc\r\n\r\n# Mvc = 24.6\r\n# cvc = 2.64\r\nn_0c = 2.34E-3\r\nb_1c = -0.915\r\n#b_1c = -0.8\r\n#b_1c = -1.0 ## Using the values from fig 4\r\nr_1c = 0.299\r\n#r_1c = 0.355\r\n#r_1c = 0.34 ## Using the values from fig4\r\n#r_1c = 0.254\r\nT_0c = 11.3\r\n#T_0c = 8.6 ## Using the values from fig4\r\nAc = 0.082 \r\nr_0c = 3.9\r\nb_0c =-5.3\r\n#T0c = 8.6\r\n#betac = \r\n#phiinftyc =\r\n\r\nmpc = 3.085677581E22 ## Megaparsecs in meters\r\ncvc=[2.64, 2.64+0.72 ,2.64-0.7] ###############################\r\n#cvc=[3.5, 6.07,1,71] ## using values from figure 5\r\nMvc=[24.6, 24.6+13.3 , 24.6-6.1] ############# What #############\r\n#Mvc= [8.92/0.7, 28.97/0.7,3.75/0.7] ## using values from figure 5\r\n#n_0 = [2.34E-3, 2.55E-3,2.15E-3]\r\nn_0 = [2.34,2.55E-3,2.15E-3 ] # using the value from refs [25-27]\r\n#cvb=[2.64,2.64+1.75,2.64-1.78] ############ are these?##########\r\n#Mvb=[11.3,11.3+2.466,11.3-2.191] ############################\r\n\r\n\r\narray1 = []\r\narray2 = []\r\narray3 = []\r\narray4 = []\r\narray5 = [] ## These are the two arrays for the n_0 1 sigma variation on the thermal+non-thermal masses\r\narray6 = [] ## \r\narray7 = []\r\n\r\nfor i in range(len(rc)):\r\n array1.append(lensing_M(rc[i], cvc[1], Mvc[1]))\r\n array2.append(lensing_M(rc[i], cvc[2], Mvc[2]))\r\n array3.append(thermal_M(rc[i],n_0c,b_1c,r_1c,T_0c,Ac,r_0c,b_0c))\r\n array4.append(thermal_M(rc[i],n_0c,b_1c,r_1c,T_0c,Ac,r_0c,b_0c) + non_thermal_Mc(rc[i],n_0c,b_1c,r_1c,T_0c,Ac,r_0c,b_0c)/5)\r\n #array5.append(thermal_M(rc[i],n_0c,b_1c,r_1c,T_0c,Ac,r_0c,b_0c) + - rc[i]**2 * F_phi(rc[i],Mvc[0],cvc[0],beta,phiinfty))\r\n array5.append(thermal_M(rc[i],n_0[1],b_1c,r_1c,T_0c,Ac,r_0c,b_0c) + non_thermal_Mc(rc[i],n_0[1],b_1c,r_1c,T_0c,Ac,r_0c,b_0c)/5)\r\n array6.append(thermal_M(rc[i],n_0[2],b_1c,r_1c,T_0c,Ac,r_0c,b_0c) + non_thermal_Mc(rc[i],n_0[2],b_1c,r_1c,T_0c,Ac,r_0c,b_0c)/5) \r\n array7.append(lensing_M(rc[i], cvc[0], Mvc[0]))\r\n \r\nfig = plt.figure(figsize=(11.5,9.5))\r\nax = fig.add_subplot(111)\r\n\r\nplt.xlim(min(rc),max(rc))\r\nplt.ylim(0.01,100)\r\nax.fill_between(rc, array1, array2, facecolor='blue', alpha=0.4, interpolate=True, lw=0)\r\nax.set_yscale('log')\r\nax.set_xscale('log')\r\nax.loglog(rc,array3, label = 'Thermal mass')\r\nax.loglog(rc,array4, '--',label = 'Thermal + non-thermal')\r\n#ax.loglog(rc,array5, ':', label = 'Thermal + non-thermal + (1-$\\sigma$)') ## Not sure why it doesn't have much effect\r\n#ax.loglog(rc,array6, ':', label = 'Thermal + non-thermal - (1-$\\sigma$)') ## Not sure why it doesn't have much effect??\r\nax.set_xlabel('r (Mpc)')\r\nax.set_ylabel('M(