diff --git "a/6381.jsonl" "b/6381.jsonl" new file mode 100644--- /dev/null +++ "b/6381.jsonl" @@ -0,0 +1,669 @@ +{"seq_id":"15085448","text":"from django.views import generic\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom .models import Event\nfrom .serializers import EventSerializer\nfrom rest_framework import authentication, permissions\nfrom rest_framework.views import APIView\nfrom django.http import HttpResponse\nfrom datetime import datetime\n\nclass LogEvent(APIView):\n \"\"\"\n API endpoint that logs Events.\n \"\"\"\n permission_classes = (permissions.AllowAny,)\n def post(self, request, format=None):\n try:\n insert_event = Event(\n event_type = request.POST[\"event_type\"],\n created_at = datetime.utcnow(),\n host = request.get_host(),\n referrer = request.META.get(\"HTTP_REFERER\"),\n job_id = request.POST[\"job_id\"],\n request_url = request.path,\n ip_address = request.META.get(\"REMOTE_ADDR\"),\n #company_id = request.POST[\"company_id\"],\n user_agent = request.META.get(\"HTTP_USER_AGENT\"),\n #applicant_email = request.user.email,\n session_id = request.session.session_key\n )\n insert_event.save()\n return HttpResponse(status=201)\n except Exception as e:\n return HttpResponse(e,status=400)\n","sub_path":"events/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"76113943","text":"# -*- coding: utf-8 -*-\n\"\"\"\n2D Poisson Solver \n\"\"\"\n\n# src-ch7/laplace_Diriclhet1.py\nimport numpy as np\nimport scipy \nimport scipy.linalg\nimport scipy.sparse\nimport scipy.sparse.linalg\n#import matplotlib; matplotlib.use('Qt4Agg')\nimport matplotlib.pylab as plt\nfrom matplotlib import colors, ticker, cm\nfrom math import pi\n\nfrom Constants import MU0\nfrom Mesh import MESHGRID\n\n\n#set infinite-long wire to position (wx, wy) with infinitesimal radius\nI = 1.0 # wire current in A\n# According to Ampere's law in integral form\n# B(r|r>r0) = mu0*I/(2*pi*r)\n#The earth's magnetic field is about 0.5 gauss. \nwidth, height, nx, ny = 50.0, 50.0, 101, 101\nmesh = MESHGRID(width, height, nx, ny)\nmesh.init_mesh()\n\nmn = (nx-2)*(ny-2)\nb = np.zeros(mn) #RHS\n\n#scalar broadcasting version:\nA = scipy.sparse.diags([1, 1, -4, 1, 1],\n [-(nx-2), -1, 0, 1, (ny-2)], \n shape=(mn, mn)).toarray()\n\n# update A matrix\nfor i in range(1, nx-2):\n A[(nx-2)*i-1, (ny-2)*i] = 0\n A[(ny-2)*i, (nx-2)*i-1] = 0\n \nb[0+(nx-2)*int(ny/2-1)+30] = MU0*I/(2.0*pi)\nb[-1-(nx-2)*int(ny/2-1)-30] = -MU0*I/(2.0*pi)\n \nphi=scipy.linalg.solve(A,b)\nphi = phi.reshape((nx-2, ny-2))\nphi = np.pad(phi, pad_width=1, mode='constant', constant_values=0)\n\ndef curl(A):\n \"\"\"\n calculate the curl of A, which is a matrix of Fz in z direction\n Boundary of A is set to zero\n Using center difference, only A[1:-2, 1:-2] is calculated\n \"\"\"\n U = np.zeros_like(A)\n V = np.zeros_like(A)\n bf = np.zeros_like(A)\n nx, ny = A.shape\n for i in range(1, nx-2):\n for j in range(1, ny-2):\n U[i,j] = (A[i, j+1] - A[i, j-1])/mesh.delx/2.0\n V[i,j] = -(A[i+1, j] - A[i-1, j])/mesh.dely/2.0\n bf = np.sqrt(np.power(U, 2) + np.power(V, 2))\n U = np.divide(U, bf, where=bf>0, out=np.zeros_like(bf))\n V = np.divide(V, bf, where=bf>0, out=np.zeros_like(bf))\n return U, V, bf\n\nU, V, bf = curl(phi)\n\nphi = abs(phi)\nprint('B field min = %.2e max = %.2e' % (phi.min(), phi.max()))\n\n#\nfig, ax = plt.subplots(1, 2, figsize=(6,3))\n#ax[0].plot(mesh.posx, mesh.posy,\n# marker='.', markersize=3,\n# color='black', linestyle='None')\n# Alternatively, you can manually set the levels\n# and the norm:\nlev_exp = np.arange(np.floor(np.log10(phi[np.nonzero(phi)].min())),\n np.ceil(np.log10(phi.max())), 0.1)\nlevs = np.power(10, lev_exp)\ncs = ax[0].contour(mesh.posx, mesh.posy,\n phi, levs, norm=colors.LogNorm())\n#ax.clabel(cs, cs.levels)\n#fig.colorbar(cs)\n#ax.quiver(mesh.posx, mesh.posy, vx, vy)\n#ax.plot(pos1[0], pos1[1],\n# color='red', marker='o', markersize=15)\n#ax.plot(pos2[0], pos2[1],\n# color='red', marker='o', markersize=15)\n\n#ax[1].plot(mesh.posx, mesh.posy,\n# marker='.', markersize=3,\n# color='black', linestyle='None')\n\nlev_exp = np.arange(np.floor(np.log10(bf[np.nonzero(bf)].min())),\n np.ceil(np.log10(bf.max())), 0.1)\nlevs = np.power(10, lev_exp)\ncs = ax[1].contour(mesh.posx, mesh.posy, bf, levs, norm=colors.LogNorm())\n\nfig, ax = plt.subplots(1, 1, figsize=(6,6))\nax.streamplot(mesh.posx, mesh.posy, V, U)\n","sub_path":"TwoWires.py","file_name":"TwoWires.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"583817653","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 22 15:48:24 2020\n\nThe script defines a Convolutional Neural Network (CNN) and performs training\nand validation using specified images. The weights obtained while training\nare saved to a checkpoint file to facilitate testing of the CNN.\n\n@author: Giovanni D'Addario & Alice Purdy\n\"\"\"\n#import matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport tensorflow as tf\nimport time\n\nfrom astropy.io import fits\nfrom cnn import cnn_model\nfrom tensorflow.keras import layers, models, optimizers\n\n\nstart = time.perf_counter() # set a time counter for execution time\n\n# set paths to directories with files for training and validating the CNN\ntrain_data_dir = './train_dir'\nval_data_dir = './val_dir'\n\n# get shape of images from first training image (assumed same for all \n # validation/training images)\nfile_shape = np.shape(fits.getdata(train_data_dir + '/' + os.listdir(train_data_dir)[0]))\n\n# create 4D arrays to which the images will be assigned: arrays will be fed\n # into the CNN\ntrain_data = np.zeros(shape=(len(os.listdir(train_data_dir)), file_shape[0], \n file_shape[1], file_shape[2]))\nval_data = np.zeros(shape=(len(os.listdir(val_data_dir)), file_shape[0],\n file_shape[1], file_shape[2]))\n\n# assign training data to 4D array and create array of training labels\ntrain_label_file = 'train_dir_colour_flag.csv' # file with file name/ label pairs\ntrain_label_data = np.genfromtxt(train_label_file, delimiter=',',\n skip_header=1, dtype=None)\n# create array to store labels of the training images\ntrain_labels = np.zeros(shape=(len(train_label_data)))\n# loop through file name/ label pairs: assing label to label array and data \n # from file to 4D data array, guaranteeing that entries with the same index\n # in both arrays correspond to the same file\nfor index, row in enumerate(train_label_data):\n filename, label = row\n data = fits.getdata(train_data_dir+'/'+filename.decode(\"utf-8\"))\n train_data[index] = data\n train_labels[index] = int(label)\nprint('Normalisation check:', np.min(train_data), np.max(train_data)) \n# assign validation data to 4D array and create array of validation labels\nval_label_file = 'val_dir_colour_flag.csv' # file with file name/ label pairs\nval_label_data = np.genfromtxt(val_label_file, delimiter=',', skip_header=1, dtype=None)\n# create array to store labels of the validation images\nval_labels = np.zeros(shape=(len(val_label_data)))\n# loop through file name/ label pairs: assing label to label array and data \n # from file to 4D data array, guaranteeing that entries with the same index\n # in both arrays correspond to the same file\nfor index, row in enumerate(val_label_data):\n filename, label = row\n data = fits.getdata(val_data_dir+'/'+filename.decode(\"utf-8\"))\n val_data[index] = data\n val_labels[index] = int(label)\n\nprint('Training data shape: ', np.shape(train_data))\n\n#def cnn_model():\n# \"\"\"\n# Defines a CNN model.\n# \n# Returns:\n# model : keras model: linear stack of layes\n# \"\"\"\n# model = models.Sequential()\n# model.add(layers.Conv2D(100, (4,4), activation='relu', \n# data_format='channels_first', input_shape=file_shape))\n# model.add(layers.MaxPool2D((2,2), data_format='channels_first'))\n# model.add(layers.Conv2D(200, (4,4), data_format='channels_first', activation='relu'))\n# model.add(layers.MaxPool2D((2,2), data_format='channels_first'))\n# model.add(layers.Conv2D(200, (4,4), data_format='channels_first', activation='relu'))\n# model.add(layers.MaxPool2D((2,2), data_format='channels_first'))\n# model.add(layers.Conv2D(200, (4,4), data_format='channels_first', activation='relu'))\n# model.add(layers.Flatten())\n# model.add(layers.Dense(200, activation='relu'))\n# model.add(layers.Dense(2)) \n# model.compile(optimizer=optimizers.Adam(lr=1e-5), \n# loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n# \t\n# return model\n\n#print('model defined')\n\n# create checkpoint to save model during and at the end of training. \ncheckpoint_path = \"./checkpoint/cp.ckpt\" # defines checkpoint path\ncheckpoint_dir = os.path.dirname(checkpoint_path)\n\n# create a model instance\nmodel = cnn_model()\nprint(model.summary())\n\n# create a callback that saves the model's weights\ncp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, \n save_weights_only=True,\n verbose=1)\nprint('callback created')\n\n# train the model with the callback\nhistory = model.fit(train_data, train_labels, epochs=40,\n validation_data=(val_data, val_labels),\n \t\t\t\t\tcallbacks=[cp_callback])\n\nprint('model trained')\n\nprint('Accuracy ', history.history['acc'])\nprint('Val accuracy: ', history.history['val_acc'])\n#fig = plt.figure(figsize=(10,10))\n#plt.plot(history.history['acc'], c='blue', label='Training accuracy')\n#plt.plot(history.history['val_acc'], c='red', \n# label = 'Validation accuracy')\n#plt.xlabel('Epoch')\n#plt.ylabel('Accuracy')\n#plt.ylim([0, 1])\n#plt.legend(loc='lower right')\n#plt.savefig('CentralVsSatellite_Reduced_Accuracy')\n#print('complete')\n# elapsed time\n#print()\n#predictions = model.predict(val_data)\n#results = []\n#for line in predictions:\n#\tresults.append(np.argmax(line))\n#print('validation predictions:', results)\n#print()\n#train_pred = model.predict(train_data)\n#train_res = []\n#for entry in train_pred:\n#\ttrain_res.append(np.argmax(entry))\n#print('training predictions:', train_res)\n#\nelapsed = time.perf_counter() - start\nprint('\\nTraining: elapsed %.3f seconds.' % elapsed)\n","sub_path":"Logs/GalaxyColour/Logbook/Run_002-1808/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":5739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"465614984","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2008-2009 TUBITAK/UEKAE\n# Licensed under the GNU General Public License, version 2.\n# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt\n\nfrom pisi.actionsapi import shelltools\nfrom pisi.actionsapi import pisitools\nfrom pisi.actionsapi import get\n\nimport glob\n\nWorkDir=\"iscan-firmwares-%s\" % get.srcVERSION()\n\ndef install():\n\n # Get plugins list\n plugins = shelltools.ls(\".\")\n\n # Install the firmware files\n for pl in plugins:\n files = shelltools.ls(\"%s/usr/share/iscan\" % pl)\n if len(files) > 0:\n pisitools.insinto(\"/usr/share/iscan\", \"%s/usr/share/iscan/%s\" % (pl, files[0]))\n\n # Install the libraries\n libs = [f.rpartition(get.curDIR()+'/')[-1] for f in glob.glob(\"%s/*/usr/lib/iscan/*\" % get.curDIR())\n if not shelltools.isLink(f)]\n\n for l in libs:\n pisitools.dolib_so(l, \"/usr/lib/iscan\")\n pisitools.dosym(shelltools.baseName(l), \"/%s\" % l.split(\"/\", 1)[1].split(\".\")[0]+'.so')\n pisitools.dosym(shelltools.baseName(l), \"/%s\" % l.split(\"/\", 1)[1].split(\".\")[0]+'.so.2')\n\n # Dodoc one of the plugins doc files, it's all same.\n for d in shelltools.ls(\"iscan-plugin-gt-f520/usr/share/doc/iscan-plugin-gt-f520-1.0.0\"):\n pisitools.dodoc(\"iscan-plugin-gt-f520/usr/share/doc/iscan-plugin-gt-f520-1.0.0/%s\" % d)\n\n\n\n","sub_path":"pardus/tags/2009.2/hardware/firmware/iscan-firmware/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"257093823","text":"# -*- coding: utf-8 -*-\nimport socket\nimport sys\n\n# 正引き\ndef foward_lookup(domain):\n try:\n return socket.gethostbyname(domain)\n except:\n return False\n\n# 逆引き\ndef reverse_lookup(ip):\n try:\n return socket.gethostbyaddr(ip)[0]\n except:\n return False\n\ndef main():\n host = \"172.217.25.78\"\n host = host.strip()\n domain = reverse_lookup(host)\n print(domain)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"nslook.py","file_name":"nslook.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"26518025","text":"from models.release import DiscogsRelease\n\nimport logging\nfrom config.logger import setup_logger\nlogger = logging.getLogger(__name__)\nlogger = setup_logger(logger)\n\n\nclass AddToWatchlistTask(object):\n\n def __init__(self, release_id, db):\n\n self.release = DiscogsRelease(release_id)\n self.db = db\n\n def execute(self):\n\n logger.info('processing release {}'.format(self.release.id))\n\n self.release.set_attributes()\n\n self.db.insert_into_releases(self.release, 'watchlist')\n","sub_path":"code/tasks/insert_release.py","file_name":"insert_release.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"146590011","text":"import dnslib\r\nfrom socket import *\r\n\r\nport = 12001\r\nserverSocket = socket(AF_INET, SOCK_DGRAM)\r\nserverSocket.bind(('0.0.0.0', port))\r\nprint(\"the server is ready to receive\")\r\n\r\n# this is the list for root servers\r\nlist_rootdns = [(\"b.root-servers.net.\", \"199.9.14.201\"),\r\n (\"c.root-servers.net.\", \"192.33.4.12\"),\r\n (\"k.root-servers.net.\", \"193.0.14.129\"),\r\n (\"i.root-servers.net.\", \"192.36.148.17\"),\r\n (\"l.root-servers.net.\", \"199.7.83.42\"),\r\n (\"h.root-servers.net.\", \"198.97.190.53\"),\r\n (\"f.root-servers.net.\", \"192.5.5.241\"),\r\n (\"g.root-servers.net.\", \"192.112.36.4\"),\r\n (\"m.root-servers.net.\", \"202.12.27.33\"),\r\n (\"e.root-servers.net.\", \"192.203.230.10\"),\r\n (\"a.root-servers.net.\", \"198.41.0.4\"),\r\n (\"d.root-servers.net.\", \"199.7.91.13\"),\r\n (\"j.root-servers.net.\", \"192.58.128.30\")]\r\ncache = {}\r\n\r\n\r\ndef query(msg, cname_ans, ip): # cname_ans: answer record for cname type\r\n info = dnslib.DNSRecord.parse(msg)\r\n info.header.rd = 0\r\n msg = bytes(info.pack())\r\n serverSocket.sendto(msg, (ip, 53))\r\n receive = serverSocket.recv(2048)\r\n receive = dnslib.DNSRecord.parse(receive)\r\n\r\n if receive.header.a == 0: # find no answer, continue to query\r\n if receive.header.ar > 1: # except OPT information\r\n newip = str(receive.ar[0].rdata)\r\n return query(msg, cname_ans, newip)\r\n nameserver = receive.auth[0].rdata\r\n newmsg = dnslib.DNSRecord.question(str(nameserver), \"A\")\r\n newmsg = dnslib.DNSRecord.parse(newmsg)\r\n newmsg = bytes(newmsg.pack()) # type: # from DNSRecord to bytes\r\n newinfo = query(newmsg, cname_ans, list_rootdns[0][1])\r\n newip = str(newinfo.rr[0].rdata) # for name server, there is not cname record in answer(we can get A type directly)\r\n return query(msg, cname_ans, newip)\r\n else: # find answers, return answers or query for CNAME\r\n if receive.rr[0].rtype == 5: # type == CNAME\r\n newmsg = dnslib.DNSRecord.question(str(receive.rr[0].rdata), \"A\")\r\n newmsg = bytes(newmsg.pack())\r\n cname_ans.append(receive.rr[0])\r\n return query(newmsg, cname_ans, list_rootdns[0][1])\r\n else: # type == A (1)\r\n tmp = receive.rr\r\n receive.rr = cname_ans\r\n receive.rr.extend(tmp)\r\n receive.header.a = len(receive.rr)\r\n receive = bytes(receive.pack())\r\n return receive\r\n\r\n\r\nif __name__ == \"__main__\":\r\n while True:\r\n msg, clientAddress = serverSocket.recvfrom(2048)\r\n info = dnslib.DNSRecord.parse(msg)\r\n id = info.header.id\r\n name = info.q.qname\r\n if info.q.qname in cache: # query the cache\r\n ans = cache[info.q.qname] # only consider A type\r\n else:\r\n ans = query(msg, [], list_rootdns[0][1])\r\n ans = dnslib.DNSRecord.parse(ans)\r\n ans.q.qname = name\r\n ans = bytes(ans.pack())\r\n cache[info.q.qname] = ans\r\n ans = dnslib.DNSRecord.parse(ans)\r\n ans.header.id = id\r\n ans = bytes(ans.pack())\r\n\r\n serverSocket.sendto(ans, clientAddress)\r\n\r\n\r\n\r\n","sub_path":"labCode/LocalDNSServer.py","file_name":"LocalDNSServer.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"40577135","text":"import sys\n\nimport unittest\nfrom contextlib import contextmanager\nfrom io import StringIO\n\nfrom studio.torch import summary\n\n\n@contextmanager\ndef capture_output():\n new_out = StringIO()\n old_out = sys.stdout\n try:\n sys.stdout = new_out\n yield sys.stdout\n finally:\n sys.stdout = old_out\n\n\nclass ReporterTest(unittest.TestCase):\n\n @unittest.skip('weird unicode vs string error, probably' +\n 'due to python3 migration')\n def test_summary_report(self):\n r = summary.Reporter(log_interval=2, smooth_interval=2)\n with capture_output() as out:\n r.add(0, 'k', 0.1)\n r.add(1, 'k', 0.2)\n r.report()\n r.add(2, 'k', 0.3)\n r.report()\n self.assertEqual(out.getvalue(), \"Step 2: k = 0.25000\\n\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"studio/torch/summary_test.py","file_name":"summary_test.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"538586442","text":"#!/usr/bin/env python\n#coding:utf8\n# Author : tuxpy\n# Email : q8886888@qq.com\n# Last modified : 2015-03-13 17:28:38\n# Filename : config/config.py\n# Description : \n\nfrom ConfigParser import ConfigParser\n\nCFG_PATH = 'config/config.cfg'\n\nclass Config():\n def __init__(self):\n self.cfg = ConfigParser()\n self.cfg.read(CFG_PATH)\n\n def __get_config(self, section, int_list = None):\n _config = {}\n int_list = int_list or []\n for k, v in self.cfg.items(section):\n if k in int_list:\n v = int(v)\n _config[k] = v\n\n return _config\n\n @property\n def redis_config(self):\n return self.__get_config('redis', ['db', 'port'])\n\n @property\n def weixin_config(self):\n return self.__get_config('weixin')\n\nconfig = Config()\nredis_config = config.redis_config\nweixin_config = config.weixin_config\n\n","sub_path":"web_server/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"220417279","text":"from app import nutritional_values\nimport statistics\n\n\nclass Cat(object):\n def __init__(self, weight, age, activity):\n self.activity = activity\n self.kcal_needs_per_kg = nutritional_values.kcal_by_activity[self.activity]\n self.weight = weight\n self.age = age\n self.age_modif = nutritional_values.age_modifier[self.age]\n\n # RER - Resting Energy Requirement (by weight only)\n # '0.67' value taken from a nutritional guide. See nutritional_values.py for reference\n self.rer_formula_by_weight = self.weight * 0.67\n\n # DER - Daily Energy Requirement (by weight and activity)\n self.der = self.calculate_der()\n self.der_avg = statistics.mean([self.der[\"min\"], self.der[\"max\"]])\n self.der_avg = round(self.der_avg, 0)\n\n # MER - Metabolic Energy Requirement (total, by weight, activity and age)\n self.mer = self.calculate_mer()\n self.mer_avg = statistics.mean([self.mer[\"min\"], self.mer[\"max\"]])\n self.mer_avg = round(self.mer_avg, 0)\n\n def calculate_der(self):\n der = {}\n\n der[\"min\"] = self.rer_formula_by_weight * self.kcal_needs_per_kg[\"min\"]\n der[\"min\"] = round(der[\"min\"], 2)\n\n der[\"max\"] = self.rer_formula_by_weight * self.kcal_needs_per_kg[\"max\"]\n der[\"max\"] = round(der[\"max\"], 2)\n\n return der\n\n def calculate_mer(self):\n mer = {}\n\n mer[\"min\"] = self.der[\"min\"] * self.age_modif[\"min\"]\n mer[\"min\"] = round(mer[\"min\"], 2)\n mer[\"max\"] = self.der[\"max\"] * self.age_modif[\"max\"]\n mer[\"max\"] = round(mer[\"max\"], 2)\n\n return mer\n","sub_path":"app/cat_calc.py","file_name":"cat_calc.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"535019143","text":"from dataclasses import replace\nfrom random import random, getrandbits\n\nfrom numpy import around, ndarray, array\nfrom pygame import Rect\nfrom pygame.surface import Surface\nfrom pygame.transform import flip\n\nfrom src.model.constants import PLAYER_SPRITE, GRAVITY, SCREEN_SIZE, PLAYER_SIZE, PLAYER_MAX_V\nfrom src.model.dataclasses import MenuParticle\nfrom src.model.utils import clamp_vec\n\n\ndef spawn_menu_particle(particles: list[MenuParticle]) -> list[MenuParticle]:\n \"\"\"\n Spawns a menu particle at a random position on top of the screen.\n\n :param particles: menu particle data\n :return: updated menu particles list\n \"\"\"\n particles_: list[MenuParticle] = particles.copy()\n\n pos: ndarray = array((\n random() * SCREEN_SIZE[0] - PLAYER_SIZE[0],\n -PLAYER_SIZE[1]\n ))\n v: ndarray = array((1 - random() * 2, 0))\n\n particles_.append(MenuParticle(\n pos,\n Rect(tuple(pos), tuple(PLAYER_SIZE)),\n v,\n bool(getrandbits(1))\n ))\n return particles_\n\n\ndef update_display_menu_particles(particles: list[MenuParticle], screen: Surface, delta: float) -> list[MenuParticle]:\n \"\"\"\n Updates then display on screen a menu particles list.\n\n :param particles: menu particles list\n :param screen: screen surface\n :param delta: delta between two frames\n :return: updated menu particles list\n \"\"\"\n particles_: list[MenuParticle] = particles.copy()\n for i, _ in enumerate(particles_):\n\n particles_[i] = _update_particle(particles_[i], delta)\n _display_menu_particles(particles_[i], screen)\n\n if particles_[i].rect.top > SCREEN_SIZE[1]:\n particles_.pop(i)\n\n return particles_\n\n\ndef _display_menu_particles(particle: MenuParticle, screen: Surface) -> None:\n \"\"\"\n Displays a menu particle on screen.\n It has the appearance of a player sprite.\n\n :param particle: menu particle data\n :param screen: screen surface\n \"\"\"\n sprite: Surface = PLAYER_SPRITE if not particle.flipped else flip(PLAYER_SPRITE, True, False)\n screen.blit(sprite, around(particle.rect.topleft))\n\n\ndef _update_particle(particle: MenuParticle, delta: float) -> MenuParticle:\n \"\"\"\n Update a menu particle position, making it fall.\n\n :param particle: menu particle data\n :param delta: delta between two frames\n :return: updated menu particle data\n \"\"\"\n v: ndarray = particle.velocity + GRAVITY * delta\n v = clamp_vec(v, PLAYER_MAX_V)\n pos: ndarray = particle.pos + v\n\n rect: Rect = Rect(particle.rect)\n rect.topleft = around(pos)\n\n return replace(particle, pos=pos, rect=rect, velocity=v)\n","sub_path":"src/menu/menu_particles.py","file_name":"menu_particles.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"99035542","text":"#!/usr/bin/env python3\n\n# utilities for dealing with LRLPs\nimport argparse\nimport sys\nimport os\nimport re\nimport os.path\nfrom zipfile import ZipFile as zf\nimport xml.etree.ElementTree as ET\nimport gzip\nfrom io import TextIOWrapper\n\ndef main():\n import codecs\n parser = argparse.ArgumentParser(description=\"Print monolingual data\")\n parser.add_argument(\"--infile\", \"-i\", nargs='+', type=argparse.FileType('r'), default=[sys.stdin,], help=\"input zip file(s) (each contains a multi file)\")\n # parser.add_argument(\"--outfile\", \"-o\", nargs='?', type=argparse.FileType('w'), default=sys.stdout, help=\"output file (single text file)\")\n parser.add_argument(\"--outfile\", \"-o\", help=\"output file (single text file)\")\n parser.add_argument(\"--xml\", \"-x\", action='store_true', help=\"process ltf xml files\")\n parser.add_argument(\"--tokenize\", action='store_true', help=\"use tokens (only applies if -x)\")\n\n\n try:\n args = parser.parse_args()\n except IOError as msg:\n parser.error(str(msg))\n\n of = codecs.open(args.outfile, 'w', 'utf-8')\n for infile in args.infile:\n archive = zf(infile)\n for info in archive.infolist():\n if info.file_size < 20:\n continue\n # plain processing assumes rsd structure\n if not args.xml and os.path.dirname(info.filename) != 'rsd':\n continue\n # print info.filename\n with TextIOWrapper(archive.open(info, 'r')) as ifh:\n if args.xml:\n xobj = ET.parse(ifh)\n if args.tokenize:\n of.writelines([ ' '.join([ y.text for y in x.findall(\".//TOKEN\") ])+\"\\n\" for x in xobj.findall(\".//SEG\") ])\n else:\n of.writelines([ x.text+\"\\n\" for x in xobj.findall(\".//ORIGINAL_TEXT\") ])\n else:\n lines = ifh.readlines()\n for line in lines:\n of.write(line.decode('utf8'))\n\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"monoextract.py","file_name":"monoextract.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"182640061","text":"import numpy as np\nfrom scipy.stats import truncnorm\n\n\nnp.random.seed(seed=12345)\nrng = np.random.default_rng(12345)\n\n\ndef normal(sigma, size, mu=0):\n return rng.normal(mu, sigma, size)\n\n\ndef laplace(scale, size, loc=0):\n return rng.laplace(loc, scale, size)\n\n\ndef trunc_normal(b, scale, size, loc=0, a=0):\n a = -b if a == 0 else a\n return truncnorm.rvs(a, b, loc, scale, size)\n\n\nif __name__ == '__main__':\n tmp = normal(1, 2)\n print(tmp, tmp.shape)\n tmp = laplace(1, 2)\n print(tmp, tmp.shape)\n tmp = laplace(1, 2)\n print(tmp, tmp.shape)\n tmp = trunc_normal(0.1, 1, 2)\n print(tmp, tmp.shape)\n tmp = trunc_normal(0.1, 1, 2)\n print(tmp, tmp.shape)\n","sub_path":"perturbation.py","file_name":"perturbation.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"580999804","text":"import requests\n\nfrom api.login_api import Login\nfrom data_for_test import COLLECTION_URL, LOGIN_URL\n\n\nclass Collections:\n def __init__(self):\n\n self.collections_url = COLLECTION_URL\n self.login = Login()\n\n def get_collections(self, data):\n json_data = {\"mobile\":\"13012345678\",\"code\":\"888888\"}\n token = self.login.get_login\\\n (json_data).json().get(\"data\").get(\"token\")\n\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + token\n }\n\n return requests.post(self.collections_url, json=data, headers=headers)","sub_path":"app_dark_horse/api/collections_api.py","file_name":"collections_api.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"326174617","text":"import setuptools\n\nwith open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"verilog_timings_parser\",\n version=\"0.0.1\",\n packages=setuptools.find_packages(),\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n author=\"Antmicro Ltd.\",\n author_email=\"contact@antmicro.com\",\n entry_points={\n 'console_scripts': ['verilog-timings-to-liberty=verilog_timings_parser.convert_verilog_timings_to_liberty:main'] # noqa: E501\n },\n install_requires=[\n 'quicklogic_timings_importer @ git+https://github.com/antmicro/quicklogic-timings-importer#egg=quicklogic_timings_importer', # noqa: E501\n 'ply',\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"236059358","text":"# An example of a function that takes several arguments\nprint(\"hello\", \"world\", \"wide\")\n\n# The max() function\nresult = max(42, 17)\nresult = max(3.59, 8.24, 9.71, 6.53)\nresult = max(\"abcwdef\")\nresult = max(92, 3.8, 47, 6.32, 9.87, 4)\nprint(result)\n\n# The min() function\nresult = min(26, 18, 57, 35)\nresult = min(9.7, 8.4, 7.2, 52.8)\nresult = min(\"wxyaz\")\nprint(result)\n\n# The len() function\nresult = len(\"pepperoni\")\nprint(result)\n\n# The int() function\nresult = int(\"32\")\nresult = int(3.9999)\nresult = int(-2.3)\nresult = int(0.5)\nresult = int(-0.5)\nprint(result)\n\n# The round() function\noutcome = round(3.5827, 3)\nprint(outcome)\n\n# the float() function\nresult = float(32)\nresult = float(\"3.14\")\nprint(result)\n\n# the str() function\nresult = str(32)\nresult = str(3.14)\nprint(result)\n\nprint(\"pi is: \" + str(3.14))\n\nimport random\n\n# the random() function\nnum = random.random()\nprint(num)\n\n# the randint() function\nvalue = random.randint(5, 10)\nprint(value)\n\nselection = random.choice([18, 23, 9, 35])\nprint(selection)\n\n","sub_path":"03Functions/builtinfunctions.py","file_name":"builtinfunctions.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"473883639","text":"from __future__ import absolute_import\n\nfrom django.conf.urls import patterns, url, include\n\nfrom . import views\n\n\nlists_patterns = patterns(\n '',\n url(r'^$', views.TalkListListView.as_view(), name='list'),\n url(r'^create/$', views.TalkListCreateView.as_view(), name='create'),\n #url(r'^check/$', views.check.as_view(), name='check'),\n url(r'^update/(?P[-\\w]+)/$', views.TalkListUpdateView.as_view(), name='update'),\n url(r'^d/(?P[-\\w]+)/$', views.TalkListDetailView.as_view(), name='detail'),\n)\n\nurlpatterns = patterns(\n '',\n url(r'^lists/', include(lists_patterns, namespace='lists')),\n)\n","sub_path":"talks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"17596904","text":"# Domain types attributes are all strings. Here is functions used to convert from other types to Domain attributes and vice versa. \n# Functions here should only be used for the above stated purpose.\n\nfrom typing import List\n\ndef str_to_bool(v: str) -> bool:\n if v:\n if v.lower() == \"true\":\n return True\n return False\n\ndef str_to_int(v: str) -> int:\n if v:\n return int(v)\n return int()\n\ndef str_to_int_list(v: str) -> List[int]:\n retVal = []\n if v:\n split = v.split(\",\")\n for e in split:\n retVal.append(int(e))\n return retVal\n\ndef str_to_float(v: str) -> float:\n if v:\n return float(v)\n return float()\n\ndef object_to_str(o: object) -> str:\n if o == None:\n return \"\"\n s = str(o)\n l = s.lower()\n if l == \"false\" or l == \"true\":\n return l\n return s\n\ndef get_dict_value(name: str, dict, default: str = \"\") -> str:\n if name in dict:\n return object_to_str(dict[name])\n return default\n","sub_path":"python_src/dewco/domain/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"130328540","text":"#coding=utf-8\nimport time\nimport pandas as pd\nfrom lxml import etree\nfrom selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n'''\n智联招聘 \n爬取范围:东莞 python\n爬取字段:岗位、公司、薪资、地区\n时间:2020.4.12\n'''\n\ndef get_data(url):\n # 设置浏览器\n option = webdriver.ChromeOptions()\n # 不加载图形界面\n # option.add_argument('--headless')\n # 不加载图片, 提升速度\n # option.add_argument('blink-settings=imagesEnabled=false')\n option.add_experimental_option('excludeSwitches', ['enable-automation'])\n user_agent = (\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_4)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36\"\n )\n option.add_argument('User-Agent=%s' % user_agent)\n # 加载本地cookie配置\n option.add_argument('--user-data-dir=C:\\\\Users\\\\KinYohi\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\User Data')\n\n # 启动浏览器\n browser = webdriver.Chrome(executable_path=\"./chromedriver.exe\", options=option)\n # 防止JS检测\n browser.execute_cdp_cmd('Page.addScriptToEvaluateOnNewDocument',{'source': 'Object.defineProperty(navigator, \"webdriver\", {get: () => undefined})'})\n browser.get(url)\n time.sleep(10)\n WebDriverWait(browser, 10)\n data = etree.HTML(browser.page_source)\n job, company, salary, area = [], [], [], []\n box = data.xpath('//div[@id=\"listContent\"]')\n for i in box:\n # 岗位字段\n job = i.xpath('div/div/a/div[1]/div[1]/span/@title')\n # 公司字段\n company = i.xpath('div/div/a/div[1]/div[2]/a/text()')\n # 薪资字段\n salary = i.xpath('div/div/a/div[2]/div[1]/p/text()')\n # 地区字段\n area = i.xpath('div/div/a/div[2]/div[1]/ul/li[1]/text()')\n\n for i in range(0, len(job)):\n print([job[i], company[i], salary[i], area[i]])\n result.append([job[i], company[i], salary[i], area[i]])\n browser.close()\n\nif __name__ == '__main__':\n result = []\n urls = ['https://sou.zhaopin.com/?p={}&jl=779&kw=python&kt=3'.format(i) for i in range(1, 3)]\n for url in urls:\n print('Scraping %s' % url)\n data = get_data(url)\n if data != None:\n result.append(data)\n df = pd.DataFrame(result, columns=['岗位', '公司', '薪资', '地区'])\n print(df)\n df.to_excel('python.xls', index=False, encoding='utf-8')\n print('Success')","sub_path":"智联招聘/zhilian.py","file_name":"zhilian.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"205417033","text":"import os\nimport sys\ndef h(options, buildout):\n c = os.getcwd()\n os.chdir(options['compile-directory'])\n if not 'freebsd' in sys.platform.lower():\n os.system(\"autoreconf -ifv\")\n if sys.platform.startswith('cygwin'):\n r = open('config.h.in').readlines()\n r.append('\\n\\n#define _POSIX_C_SOURCE 200112L\\n\\n')\n open('config.h.in', 'w').writelines(r)\ndef p(options, buildout):\n if sys.platform.startswith('cygwin'):\n c = os.getcwd()\n os.chdir(options['compile-directory'])\n r = open('config.h').readlines()\n r.append('\\n\\n#define _POSIX_C_SOURCE 200112L\\n\\n')\n open('config.h', 'w').writelines(r)\n \ndef q(options, buildout):\n import pdb;pdb.set_trace()\n\n","sub_path":"h.py","file_name":"h.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"363885610","text":"from random import shuffle\nfrom typing import Any, Tuple\n\ndef min_max(l: list[Any], a: int, b: int) -> Tuple[int, int]:\n partie = l[a:b] # On prend la sous-partie nécessaire\n\n Min, Max = 0, 0\n for i in range(len(partie)): # Pour chaque élément de la liste\n if partie[i] > partie[Max]: # On regarde si c'est le maximum\n Max = i\n \n if partie[i] < partie[Min]: # On regarde si c'est le minimum\n Min = i\n \n return Min+a, Max+a # On ajoute a car on a décalé les indices de -a en prenant une partie\n\n\ndef permute(l: list[Any], a: int, b: int) -> None:\n l[a], l[b] = l[b], l[a] # Un assignement double permet de faire cela en une ligne\n\n\ndef tri(l: list[Any]) -> list:\n for i in range(len(l)//2): # Pour une moitié de liste (on traite les éléments 2 par 2)\n Min, Max = min_max(l, i, -1-i) # On cherche le min et le max\n if Min == len(l)-1-i and Max == i:\n l[Min], l[Max] = l[Max], l[Min]\n elif Min == len(l)-1-i:\n l[Min], l[i] = l[i], l[Min]\n l[Max], l[len(l)-1-i] = l[len(l)-1-i], l[Max]\n else:\n l[Max], l[len(l)-1-i] = l[len(l)-1-i], l[Max]\n l[Min], l[i] = l[i], l[Min]\n \n return l # On retourne la liste triée\n\nliste = [*range(100)]\nshuffle(liste)\ntri(liste)\nprint(liste)","sub_path":"ds2/tri_liste.py","file_name":"tri_liste.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"139419262","text":"# -*- coding: utf-8 -*-\nimport threading\nfrom Tkinter import *\nimport time\nimport socket\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect(('127.0.0.1', 9999))\n\nkey_status = [0, 0, 0, 0]\n\n\ndef key_press(event):\n print(\"key press \", event.keycode)\n if event.char == u'\\uf700':\n key_status[0] = 1\n elif event.char == u'\\uf701':\n key_status[1] = 1\n elif event.char == u'\\uf702':\n key_status[2] = 1\n elif event.char == u'\\uf703':\n key_status[3] = 1\n\n\ndef key_release(event):\n print(\"key up \", event.keycode)\n if event.char == u'\\uf700':\n key_status[0] = 0\n elif event.char == u'\\uf701':\n key_status[1] = 0\n elif event.char == u'\\uf702':\n key_status[2] = 0\n elif event.char == u'\\uf703':\n key_status[3] = 0\n\n\ndef tick():\n while True:\n key1 = key_status[0] == 1\n key2 = key_status[1] == 1\n key3 = key_status[2] == 1\n key4 = key_status[3] == 1\n\n direction = 5\n\n if key1 and key2:\n label[\"text\"] = \"fuck up \"\n print(\"fuck up\")\n elif key3 and key4:\n label[\"text\"] = \"fuck up \"\n print(\"fuck up\")\n elif key1 and key3:\n label[\"text\"] = \"前左\"\n direction = 7\n elif key1 and key4:\n label[\"text\"] = \"前右\"\n direction = 9\n elif key2 and key3:\n label[\"text\"] = \"后左\"\n direction = 1\n elif key2 and key4:\n label[\"text\"] = \"后右\"\n direction = 3\n elif key1:\n label[\"text\"] = \"前\"\n direction = 8\n elif key2:\n label[\"text\"] = \"后\"\n direction = 2\n else:\n label[\"text\"] = \"fuck \"\n s.send(bytes(direction))\n time.sleep(0.1)\n\n\nroot = Tk()\n\nroot.title(\"Tk 画布捕获键盘\")\nlabel = Label(root, text=\"请输入...\", bg='white', width=10, height=3)\nlabel.pack(side=TOP, fill=X)\n\ntick_thread = threading.Thread(target=tick)\ntick_thread.start()\n\nroot.bind(\"\", key_release)\nroot.bind(\"\", key_press)\nroot.mainloop()\n","sub_path":"server/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"615651249","text":"########################################################################################################################\r\n# #\r\n# MIT License #\r\n# #\r\n# Copyright (c) 2018 Telefonica R&D #\r\n# #\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated #\r\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the #\r\n# rights in the Software without restriction, including without limitation the rights o use, copy, modify, merge, #\r\n# publish, to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and #\r\n# to permit persons to whom the Software is furnished to do so, subject to the following conditions: #\r\n# #\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO #\r\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.#\r\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN #\r\n# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #\r\n# DEALINGS IN THE SOFTWARE. #\r\n# #\r\n########################################################################################################################\r\n\r\nfrom __future__ import print_function\r\n\r\nimport time\r\nimport socket\r\nimport json\r\nimport logging\r\nimport sys\r\n\r\n\r\n# Setting log\r\nimport traceback\r\n\r\nlogger = logging.getLogger()\r\nlogger.setLevel(logging.DEBUG)\r\nhandler = logging.StreamHandler(sys.stdout)\r\nlog_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\nhandler.setFormatter(log_format)\r\nlogger.addHandler(handler)\r\nlogger.info(\"Setting log\")\r\n\r\n\r\nconfig_ip='0.0.0.0'\r\nconfig_port= 4114\r\n\r\nif __name__ == '__main__':\r\n\r\n try:\r\n\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n sock.bind((config_ip,config_port))\r\n ack = {\"ack\": \"OK\"}\r\n\r\n while True:\r\n\r\n logger.info(\"#############################################################################################\")\r\n logger.info(\"###################################### LOOP listening ######################################\")\r\n logger.info(\"#############################################################################################\")\r\n\r\n udp_msg, udp_ip = sock.recvfrom(1024)\r\n logger.info(\"Received message[ %s ] from [ %s ]\" % (udp_msg, udp_ip[0]))\r\n\r\n\r\n ack_msg = json.dumps(ack)\r\n logger.info(\"Sending ACK [ %s ]\" % ack_msg)\r\n\r\n\r\n logger.info(\"Sending message[ %s ] to [ %s:%s ]\" % (ack_msg, udp_ip[0], udp_ip[1]))\r\n sock.sendto(ack_msg,udp_ip)\r\n\r\n logger.info(\"ACK sent.\")\r\n\r\n except Exception as e:\r\n logger.error(\"exception main()\")\r\n logger.error(\"message:{}\".format(e.message))\r\n traceback.print_exc(file=sys.stdout)\r\n\r\n\r\n","sub_path":"scripts/Python/Send_ACK/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"41893509","text":"# Troposphere to create CloudFormation template for the ECS roles\n# By Jason Umiker (jason.umiker@gmail.com)\n\nfrom troposphere import Template, Ref, Output, GetAtt, iam\n\nt = Template()\nt.add_version('2010-09-09')\n\n# Create the Instance Role\nInstanceRole = t.add_resource(iam.Role(\n \"ECSInstanceRole\",\n AssumeRolePolicyDocument={\n 'Statement': [{\n 'Effect': 'Allow',\n 'Principal': {'Service': ['ec2.amazonaws.com']},\n 'Action': [\"sts:AssumeRole\"]\n }]},\n))\n\nInstanceProfile = t.add_resource(iam.InstanceProfile(\n \"ECSInstanceProfile\",\n Roles=[Ref(InstanceRole)]\n))\n\n# Create the Spot Fleet Role\nSpotFleetRole = t.add_resource(iam.Role(\n \"ECSSpotFleetRole\",\n AssumeRolePolicyDocument={\n 'Statement': [{\n 'Effect': 'Allow',\n 'Principal': {'Service': ['spotfleet.amazonaws.com']},\n 'Action': [\"sts:AssumeRole\"]\n }]},\n))\n\n# Create the ECS Instance Policy\nInstancePolicy = t.add_resource(iam.PolicyType(\n \"ECSInstancePolicy\",\n PolicyName=\"ECSInstancePolicy\",\n PolicyDocument={'Version': '2012-10-17',\n 'Statement': [{'Action': [\"ecs:CreateCluster\",\n \"ecs:DeregisterContainerInstance\",\n \"ecs:DiscoverPollEndpoint\",\n \"ecs:Poll\",\n \"ecs:RegisterContainerInstance\",\n \"ecs:StartTelemetrySession\",\n \"ecs:UpdateContainerInstancesState\",\n \"ecs:Submit*\",\n \"ecr:GetAuthorizationToken\",\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:BatchGetImage\",\n \"logs:CreateLogStream\",\n \"logs:PutLogEvents\"],\n 'Resource': ['*'],\n 'Effect': 'Allow'},\n ]},\n Roles=[Ref(InstanceRole)],\n))\n\n# Create the ECS Instance Policy\nSpotFleetPolicy = t.add_resource(iam.PolicyType(\n \"ECSSpotFleetPolicy\",\n PolicyName=\"SpotFleetPolicy\",\n PolicyDocument={'Version': '2012-10-17',\n 'Statement': [{'Action': [\"ec2:DescribeImages\",\n \"ec2:DescribeSubnets\",\n \"ec2:RequestSpotInstances\",\n \"ec2:TerminateInstances\",\n \"ec2:DescribeInstanceStatus\",\n \"iam:PassRole\"],\n 'Resource': ['*'],\n 'Effect': 'Allow'},\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"elasticloadbalancing:RegisterInstancesWithLoadBalancer\"\n ],\n \"Resource\": [\n \"arn:aws:elasticloadbalancing:*:*:loadbalancer/*\"\n ]\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"elasticloadbalancing:RegisterTargets\"\n ],\n \"Resource\": [\n \"*\"\n ]\n }\n ]},\n Roles=[Ref(SpotFleetRole)],\n))\n\n# Output the Instance Role Arn\nt.add_output(Output(\n \"ECSInstanceRoleArn\",\n Value=GetAtt(InstanceRole, \"Arn\"),\n Description=\"Instance Role Arn\"\n))\n\n# Output the Spot Fleet Role Name\nt.add_output(Output(\n \"ECSSpotFleetRoleName\",\n Value=Ref(SpotFleetRole),\n Description=\"Spot Fleet Role Name\"\n))\n\n# Output the Instance Profile Name\nt.add_output(Output(\n \"ECSInstanceProfileName\",\n Value=Ref(InstanceProfile),\n Description=\"Instance Profile Name\"\n))\n\nprint(t.to_json())","sub_path":"ecs_roles.py","file_name":"ecs_roles.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"623367069","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('doors', '0006_threedtext'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='clothes',\n name='shopid',\n field=models.ForeignKey(to='doors.Door', verbose_name='drzwi', related_name='clothes', db_column='shopID'),\n ),\n migrations.AlterField(\n model_name='doorpickup',\n name='parentid',\n field=models.ForeignKey(to='doors.Door', related_query_name='doorpickup', verbose_name='drzwi', related_name='doorpickups', db_column='parentID'),\n ),\n migrations.AlterField(\n model_name='shop',\n name='shopid',\n field=models.ForeignKey(to='doors.Door', verbose_name='drzwi sklepu', related_name='shop', db_column='shopID'),\n ),\n ]\n","sub_path":"doors/migrations/0007_auto_20150728_2317.py","file_name":"0007_auto_20150728_2317.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"567442949","text":"# Copyright 2017 Naoya Inoue\n\"\"\"A decoder that splits a string into tokens and returns the\nindividual tokens and the length.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport tensorflow as tf\nfrom tensorflow.contrib.slim.python.slim.data import data_decoder\n\n\nclass TokensWithCopySVDecoder(data_decoder.DataDecoder):\n \"\"\"A DataProvider that splits a string tensor into individual tokens and\n returns the tokens and the length.\n Optionally prepends or appends special tokens.\n\n Args:\n delimiter: Delimiter to split on. Must be a single character.\n tokens_feature_name: A descriptive feature name for the token values\n length_feature_name: A descriptive feature name for the length value\n \"\"\"\n\n def __init__(self,\n delimiter=\" \",\n tokens_feature_name=\"tokens\",\n length_feature_name=\"length\",\n copysv_feature_name=\"copysv\",\n prepend_token=None,\n append_token=None):\n self.delimiter = delimiter\n self.tokens_feature_name = tokens_feature_name\n self.length_feature_name = length_feature_name\n self.copysv_feature_name = copysv_feature_name\n self.prepend_token = prepend_token\n self.append_token = append_token\n\n def decode(self, data, items):\n decoded_items = {}\n\n # Split tokens\n data_st = tf.string_split([data], delimiter=\"\\t\").values\n tokens, copysv = data_st[0], data_st[1]\n tokens = tf.string_split([tokens], delimiter=self.delimiter).values\n copysv = tf.string_to_number(tf.string_split([copysv], delimiter=self.delimiter).values, out_type=tf.int32)\n\n # Add one extra dummy dimension (for SOS, EOS)\n copysv = tf.concat([[-1], copysv], 0)\n copysv = tf.concat([copysv, [-1]], 0)\n\n # Optionally prepend a special token\n if self.prepend_token is not None:\n tokens = tf.concat([[self.prepend_token], tokens], 0)\n\n # Optionally append a special token\n if self.append_token is not None:\n tokens = tf.concat([tokens, [self.append_token]], 0)\n\n decoded_items[self.length_feature_name] = tf.size(tokens)\n decoded_items[self.tokens_feature_name] = tokens\n decoded_items[self.copysv_feature_name] = copysv\n\n return [decoded_items[_] for _ in items]\n\n def list_items(self):\n return [self.tokens_feature_name, self.length_feature_name, self.copysv_feature_name]\n","sub_path":"seq2seq/data/tokens_with_copysv_decoder.py","file_name":"tokens_with_copysv_decoder.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"216290539","text":"import heapq\n\n\nclass Solution:\n\n def nthUglyNumber(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n ugly_numbers = [1] * n\n hq = [(prime, 1, prime) for prime in [2, 3, 5]]\n heapq.heapify(hq)\n for i in range(1, n):\n ugly_numbers[i] = hq[0][0]\n while ugly_numbers[i] == hq[0][0]:\n _, idx, prime = heapq.heappop(hq)\n heapq.heappush(hq, (ugly_numbers[idx] * prime, idx + 1, prime))\n return ugly_numbers[-1]\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.nthUglyNumber(10))\n","sub_path":"algorithm/leetcode/ugly_number_264/solution-1.py","file_name":"solution-1.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"440204460","text":"# usage: \n# python bulk_tester_snn.py --test-images kaggle_image_data/PrivateTest --model output/snn123.hdf5 --shape-predictor shape_predictor_68_face_landmarks.dat\n\nfrom imutils import face_utils\nimport dlib\nimport numpy as np\nfrom keras.models import load_model\nimport time\nimport sys\nimport argparse\nimport cv2\nimport imutils\nfrom keras.preprocessing.image import img_to_array\nfrom imutils import paths\nimport csv\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-m\", \"--model\", required=True,\n help=\"path to input keras model file\")\nap.add_argument(\"-i\", \"--test-images\", required=True,\n help=\"path to input image \")\nap.add_argument(\"-s\", \"--shape-predictor\", required=True,\n help=\"path to shape predictors\")\nargs = vars(ap.parse_args())\n\ndef image_to_feature_vector(image, size=(48, 48)):\n # resize the image to a fixed size, then flatten the image into\n # a list of raw pixel intensities\n return cv2.resize(image, size).flatten()\n \n# let's go code an faces detector(HOG) and after detect the \n# landmarks on this detected face\n\n# p = our pre-treined model directory, on my case, it's on the same script's diretory.\n# p = \"shape_predictor_68_face_landmarks.dat\"\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(args['shape_predictor'])\n\n#Dictionary for emotion recognitiomodel output and emotions\nemotions = {0:\"angry\", 1:\"disgust\", 2:\"fear\", 3:\"happy\", 4:\"sad\", 5:\"surprise\", 6:\"neutral\"}\n\n#initialize counters for Analysis\ntotal_images = 0\ntotal_correct = 0\ntotal_wrong = 0\n\n#csv headers\ncsvData = [['imageTitle', 'OriginalLabel','finalPrediction', 'full_predictions']]\ncsvData2 = ['Total_images_input','Total_images_calculated', 'total_correct', 'total_wrong', 'total_accuracy']\ncsvData3 = ['original_lable', 'no: of correct', 'no: of wrong', 'total_img_category', 'percentage_accuracy']\n#\n# Each array in below Dict has [total_correct, total_wrong, total_imgs_in_this_cat]\naccuracy_dict = {\"angry\":[0,0,0], \"disgust\":[0,0,0], \"fear\":[0,0,0], \"happy\":[0,0,0], \"sad\":[0,0,0], \"surprise\":[0,0,0], \"neutral\":[0,0,0]}\n# loading the trained NN model\nprint(\"[INFO] loading the pre-trained model for emotion prediction......\")\nemotion_classifier = load_model(args['model'], compile=False)\n\ntry:\n # loop over our testing images\n for imagePath in paths.list_images(args[\"test_images\"]):\n # load the image, resize it to a fixed 32 x 32 pixels (ignoring\n # aspect ratio), and then extract features from it\n print(\"[INFO] classifying {}\".format(\n imagePath[imagePath.rfind(\"/\") + 1:]))\n url_split = imagePath.split('/')\n img_name, img_label, csvName = url_split[-1],emotions[int(url_split[-2])], url_split[-3]\n total_images += 1\n accuracy_dict[img_label][2] += 1\n image = cv2.imread(imagePath)\n frame = cv2.imread(imagePath)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = imutils.resize(gray, width=500)\n\n # detect faces in the grayscale image\n rects = detector(gray, 1)\n\n # loop over the face detections\n for (i, rect) in enumerate(rects):\n # determine the facial landmarks for the face region, then\n # convert the facial landmark (x, y)-coordinates to a NumPy\n # array\n shape = predictor(gray, rect)\n # print(\"shape directly from predictor is:{}\".format(shape))\n\n shape = face_utils.shape_to_np(shape)\n\n roi = image_to_feature_vector(gray)\n roi = roi.astype(\"float\") / 255.0\n #roi = img_to_array(roi)\n roi = np.expand_dims(roi, axis=0)\n preds = emotion_classifier.predict(roi)[0]\n emotion_probability = np.max(preds)\n label1 = emotions[preds.argmax()]\n\n label = \"{}: {:.2f}%\".format(emotions[preds.argmax()],\n emotion_probability * 100)\n if img_label == label1:\n total_correct += 1\n accuracy_dict[img_label][0] += 1\n else:\n total_wrong += 1 \n accuracy_dict[img_label][1] += 1 \n preds_label = {}\n for (i, emo) in enumerate(preds):\n preds_label[emotions[i]] = \"{:.2f}%\".format(preds[i] * 100)\n\n print(\"=========preds:{} and emotion_probability:{} and label:{}\".format(preds,emotion_probability,label))\n # print(\"shape directly from faceutils nptoshape is:{}\".format(type(shape)))\n # print(\"shape directly from faceutils nptoshape is length:{}\".format(len(shape)))\n # Insert CSV data\n csvData.append([img_name,img_label,label,preds_label])\n\n # convert dlib's rectangle to a OpenCV-style bounding box\n # [i.e., (x, y, w, h)], then draw the face bounding box\n (x, y, w, h) = face_utils.rect_to_bb(rect)\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n # show the face number\n cv2.putText(image, \"Face #{}\".format(i + 1), (x - 10, y - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n\n # loop over the (x, y)-coordinates for the facial landmarks\n # and draw them on the image\n for (x, y) in shape:\n cv2.circle(image, (x, y), 1, (0, 0, 255), -1)\n\n cv2.putText(image, label, (10, 35), cv2.FONT_HERSHEY_SIMPLEX,\n 1.0, (0, 255, 0), 3)\n\n # show the output image with the face detections + facial landmarks\n # cv2.imshow(\"Output\", image)\n # cv2.waitKey(0)\n # if key == ord(\"q\"):\n # break\n # cv2.destroyAllWindows()\nexcept Exception as e:\n raise e\nelse:\n pass\nfinally:\n \n csvData.append(csvData2)\n csvData.append([total_images, total_correct + total_wrong ,total_correct,total_wrong, 100 * float(total_correct)/float(total_correct + total_wrong)])\n csvData.append(csvData3)\n for key, value in accuracy_dict.items():\n csvData.append([key, value[0], value[1], value[2], 100 * float(value[0])/float(value[0] + value[1])])\n with open(csvName + '.csv', 'wb') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerows(csvData) \n\n","sub_path":"dev_code/snn/simple-neural-network/network_tester_1/bulk_tester_snn.py","file_name":"bulk_tester_snn.py","file_ext":"py","file_size_in_byte":6252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"496972022","text":"import re\n#规定密钥\nls1 = [0,18,9,13,25,19,7,15,6,21,3,10,4,16,8,11,22,17,1,24,5,23,12,20,2,14]\nls2 = [0,5,3,14,2,13,11,22,4,15,1,21,18,10,17,24,23,12,6,9,7,20,8,25,16,19]\n\ndef crypt_rotor_one_word(word, ls1, ls2):\n left_ls1, left_ls2 = [i for i in range(26)], [i for i in range(26)]\n right_ls1, right_ls2 = ls1, ls2\n res = ''\n for letter in word.lower():\n in1 = left_ls1[ord(letter) - ord('a')]\n out1 = right_ls1.index(in1)\n in2 = left_ls2[out1]\n out2= right_ls2.index(in2)\n res += chr(out2 + ord('a'))\n left_ls1, right_ls1 = left_ls1[-1:] + left_ls1[:-1], right_ls1[-1:] + right_ls1[:-1]\n if left_ls1[0] == 0:\n left_ls2, right_ls2 = left_ls2[-1:] + left_ls2[:-1], right_ls2[-1:] + right_ls2[:-1]\n return res\n\ndef decrypt_rotor_one_word(word, ls1, ls2):\n left_ls1, left_ls2 = [i for i in range(26)], [i for i in range(26)]\n right_ls1, right_ls2 = ls1, ls2\n res = ''\n i, j = (len(word) - 1) % 26, (len(word) - 1) // 26\n left_ls1, right_ls1 = left_ls1[-i:] + left_ls1[:-i], right_ls1[-i:] + right_ls1[:-i]\n left_ls2, right_ls2 = left_ls2[-j:] + left_ls2[:-j], right_ls2[-j:] + right_ls2[:-j]\n for letter in word[::-1]:\n in1 = right_ls2[ord(letter) - ord('a')]\n out1 = left_ls2.index(in1)\n in2 = right_ls1[out1]\n out2= left_ls1.index(in2)\n res += chr(out2 + ord('a'))\n if left_ls1[0] == 0:\n left_ls2, right_ls2 = left_ls2[1:] + left_ls2[:1], right_ls2[1:] + right_ls2[:1]\n left_ls1, right_ls1 = left_ls1[1:] + left_ls1[:1], right_ls1[1:] + right_ls1[:1]\n return res[::-1]\n\ndef crypt_decrypt_rotor_sentence(sentence, ls1, ls2, cryp=1):\n res = sentence\n for word in re.findall(r'[a-zA-Z]+', sentence):\n sub_res = crypt_rotor_one_word(word, ls1, ls2) if cryp == 1 \\\n else decrypt_rotor_one_word(word, ls1, ls2)\n res = re.sub('\\\\b' + word + '\\\\b', sub_res, res)\n return res","sub_path":"algorithm/Rotor.py","file_name":"Rotor.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"72982947","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom bs4 import BeautifulSoup, SoupStrainer\nfrom bs4 import NavigableString\nimport requests\nimport time\n\ndef product_crawl1(num):\n print(num)\n headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.87 Safari/537.36'}\n \n initial_link = \"https://www.coupang.com/np/categories/\"+str(num)\n response = requests.get(initial_link, headers = headers)\n soup = BeautifulSoup(response.text, 'lxml')\n\n big_category = soup.select(\"div.search-result > ul > li\")[1]\n temp_name = big_category.text.strip()\n temp_value = big_category.select(\"a\")[0]['href'][15:]\n temp_str = \"1,\" + temp_name+\",\" + temp_value\n txt_File.write(temp_str+'\\n')\n\n category_list=soup.select(\"div#searchCategoryComponent > ul > li\")\n for category in category_list:\n temp_name = category.select('label')[0].text.strip()\n temp_value = category['data-linkcode']\n temp_str = \"2,\" + temp_name+',' + temp_value\n txt_File.write(temp_str+'\\n')\n\n\ndef product_crawl2(num):\n print(num)\n\n options = webdriver.ChromeOptions()\n options.add_argument('--ignore-certificate-errors')\n options.add_argument('--incognito')\n driver = webdriver.Chrome(\"./chromedriver\", options=options)\n\n initial_link = \"https://www.coupang.com/np/categories/\"+str(num)\n driver.get(initial_link)\n\n folded = driver.find_elements_by_xpath(\"//*[@id=\\\"searchCategoryComponent\\\"]/ul/li/a\")\n for fold in folded:\n driver.execute_script(\"arguments[0].click();\", fold)\n\n req = driver.page_source\n soup = BeautifulSoup(req, 'lxml')\n\n big_category = soup.select(\"div.search-result > ul > li\")[1]\n temp_name = big_category.text.strip()\n temp_value = big_category.select(\"a\")[0]['href'][15:]\n temp_str = \"0,\" + temp_name+\",\" + temp_value\n txt_File.write(temp_str+'\\n')\n\n category_list=soup.select(\"div#searchCategoryComponent > ul > li\")\n for category in category_list:\n temp_name = category.select(\"label\")[0].text.strip()\n temp_value = category['data-linkcode']\n temp_str = \"1,\" + temp_name+\",\" + temp_value\n txt_File.write(temp_str+'\\n')\n\n for final_category in category.select(\"li\"):\n temp_name = final_category.select(\"label\")[0].text.strip()\n temp_value = final_category['data-linkcode']\n temp_str = \"2,\" + temp_name+',' + temp_value\n txt_File.write(temp_str+'\\n')\n \n driver.close()\n\n# get hash list from txt file\ntxt_File = open('all_category3.txt', 'a', encoding='utf-8')\n\ntxt_File.write(\"0,패션의류/잡화,\\n\")\nproduct_crawl1(186764) #여성패션\nproduct_crawl1(187069) #남성패션\nproduct_crawl1(213201) #베이비패션 (0~3세)\nproduct_crawl1(213514) #여아패션 (3세 이상)\nproduct_crawl1(213741) #남아패션 (3세 이상)\nproduct_crawl1(187821) #스포츠패션\nproduct_crawl1(187365) #신발\nproduct_crawl1(187477) #가방/잡화\n# product_crawl1(337) #명품패션\n\nproduct_crawl2(176522) #뷰티 \nproduct_crawl2(221934) #출산/유아동\nproduct_crawl2(194276) #식품\nproduct_crawl2(185669) #주방용품\nproduct_crawl2(115673) #생활용품\nproduct_crawl2(184555) #홈인테리어\nproduct_crawl2(178255) #가전디지털\nproduct_crawl2(317778) #스포츠/레저\nproduct_crawl2(184060) #자동차용품\nproduct_crawl2(317777) #도서/음반/DVD\nproduct_crawl2(317779) #완구/취미\nproduct_crawl2(177295) #문구/오피스\nproduct_crawl2(115674) #반려동물용품\nproduct_crawl2(305798) #헬스/건강식품\n\ntxt_File.close()\nprint(\"finish!\")","sub_path":"category_get_all.py","file_name":"category_get_all.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"26754127","text":"\nclass Serializer:\n\n def serialize(self, obj):\n self.obj = obj\n obj_name = type(obj).__name__\n if obj_name == \"Blog\":\n return self._blog_serializer()\n\n def _blog_serializer(self):\n blog_serialize = {}\n\n blog_serialize['id'] = self.obj.id\n blog_serialize['name'] = self.obj.name\n blog_serialize['description'] = self.obj.description\n\n return blog_serialize\n","sub_path":"serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"449792119","text":"from flask import Flask, request, jsonify, session\nfrom . import web\nimport requests\n\nfrom app.web.violet_songsheet_functions import SongSheet, Song\nfrom app.web.write_log import send_log\n\n\n'''\n查询歌单返回\n{\n \"code\":0,\n \"data\":[\n {\n \"follow_num\": 关注数量,\n \"info\": 歌单介绍信息,\n \"owner\": 所有者的名字,若是专辑,则为 专辑 两字\"\\u4e13\\u8f91\",\n \"play_times\": 播放量,\n \"sheet_id\": 专辑主键,\n \"sheet_img\": 专辑图片,\n \"sheet_name\": 专辑名字\"\\u4e13\\u8f911\",\n \"thumbs_up_num\": 点赞数\n \"is_followed\": 是否被关注 true/false\n \"is_thumbs_up\": 是否被点赞 true/false\n }\n ]\n}\n查询歌曲返回\n{\n \"code\":0,\n \"data\":[\n {\n \"play_times\": 播放量,\n \"singers\": 歌手(是一个list可能有多个歌手)[\"\\u8521\\u5f90\\u5764\",\"\\u7bee\\u7403\"],\n \"song_album\": 专辑名称,若不存在专辑则返回null\"\\u4e13\\u8f911\",\n \"song_id\": 歌曲主键,\n \"song_img\": 歌曲图片,\n \"song_name\": 歌曲名\"wait wait wait\",\n \"thumbs_up_num\": 点赞数\n \"is_thumbs_up\": 是否被点赞 true/false\n }\n ]\n}\n\n'''\n\n\n# 读取所有歌单信息\n# 无参数\n@web.route('/v1/sheet/all_sheets', methods=['GET', 'POST'])\ndef all_sheets():\n user_id = session.get(\"user_id\")\n if user_id is None:\n user_id = 0\n limit = request.form.get('limit')\n if limit is None:\n limit = 50\n send_log('/v1/sheet/all_sheets')\n return SongSheet.sheets_to_jsonify(user_id, SongSheet.query_all(limit))\n\n\n# 读取主页排前十歌单信息\n# 无参数\n@web.route('/v1/sheet/index_sheets', methods=['GET', 'POST'])\ndef index_sheets():\n user_id = session.get(\"user_id\")\n if user_id is None:\n user_id = 0\n send_log('/v1/sheet/index_sheets')\n return SongSheet.sheets_to_jsonify(user_id, SongSheet.query_top10())\n\n\n# 搜索属于某user的歌单信息\n@web.route('/v1/sheet/sheets_by_id', methods=['GET', 'POST'])\ndef sheets_by_id():\n user_id = session.get(\"user_id\")\n if user_id is None:\n user_id = 0\n sheet_id = None\n if request.method == 'POST':\n sheet_id = request.form.get('sheet_id')\n if request.method == 'GET':\n sheet_id = request.args.get('sheet_id')\n if sheet_id is None:\n return jsonify({\n 'code': -1,\n 'errMsg': '缺少参数sheet_id'\n })\n send_log('/v1/sheet/sheets_by_id')\n return SongSheet.sheets_details_to_jsonify(user_id, SongSheet.query_by_id(sheet_id),\n Song.query_by_sheet(sheet_id))\n\n\n# 搜索属于某user的歌单信息\n# 需要登录\n@web.route('/v1/sheet/sheets_by_owner', methods=['GET', 'POST'])\n# @login_required\ndef sheets_by_owner():\n user_id = session.get('user_id')\n if user_id is None:\n user_id = 0\n owner = request.form.get('user_id')\n if owner is None:\n owner = 0\n send_log('/v1/sheet/sheets_by_owner')\n return SongSheet.sheets_to_jsonify(user_id, SongSheet.query_by_owner(owner))\n\n\n# 通过歌单名词模糊搜索歌单\n# 参数'name':歌单模糊查询词,不需要%通配符\n@web.route('/v1/sheet/sheets_by_name', methods=['GET', 'POST'])\ndef sheets_by_name():\n user_id = session.get(\"user_id\")\n if user_id is None:\n user_id = 0\n name = None\n if request.method == 'POST':\n name = request.form.get('name')\n if request.method == 'GET':\n name = request.args.get('name')\n if name is None:\n return all_sheets()\n send_log('/v1/sheet/sheets_by_name')\n return SongSheet.sheets_to_jsonify(user_id, SongSheet.query_by_name(name))\n\n\n# 搜索某user关注的所有歌单(非所有者)\n# 需要登录\n@web.route('/v1/sheet/sheets_by_user', methods=['GET', 'POST'])\n# @login_required\ndef sheets_by_user():\n user_id = session.get('user_id')\n if user_id is None:\n user_id = 0\n q_user_id = request.form.get('user_id')\n if q_user_id is None:\n q_user_id = 0\n send_log('/v1/sheet/sheets_by_user')\n return SongSheet.sheets_to_jsonify(user_id, SongSheet.query_by_user(q_user_id))\n\n\n# 创建歌单\n# 参数'sheet_name':歌单名称\n# 需要登录\n# 'file':专辑图片,(可选项)\n@web.route('/v1/sheet/create_sheet', methods=['GET', 'POST'])\n# @login_required\ndef create_sheet():\n sheet_name = None\n user_id = session.get(\"user_id\")\n sheet_img = None\n if request.method == 'POST':\n sheet_name = request.form.get('sheet_name')\n sheet_img = request.files.get('file')\n if request.method == 'GET':\n sheet_name = request.args.get('sheet_name')\n sheet_img = request.files.get('file')\n if sheet_name is None:\n return jsonify({\n 'code': -1,\n 'errMsg': '缺少参数sheet_name'\n })\n send_log('/v1/sheet/create_sheet')\n return SongSheet.create_sheet(sheet_name, user_id, sheet_img)\n\n\n# 删除歌单\n# 参数'sheet_id':歌单id\n# 需要登录\n# 注意会删除 歌曲-歌单 表中信息,会删除对应点赞、评论信息,会删除 用户-歌单 表中信息\n# 若owner不符合,不会删除\n@web.route('/v1/sheet/delete_sheet', methods=['GET', 'POST'])\n# @login_required\ndef delete_sheet():\n sheet_id = None\n user_id = session.get(\"user_id\")\n if request.method == 'POST':\n sheet_id = request.form.get('sheet_id')\n if request.method == 'GET':\n sheet_id = request.args.get('sheet_id')\n if sheet_id is None:\n return jsonify({\n 'code': -1,\n 'errMsg': '缺少参数sheet_id'\n })\n send_log('/v1/sheet/delete_sheet')\n return SongSheet.delete_sheet(sheet_id, user_id)\n\n\n# 歌单添加歌曲\n# 参数'sheet_id':歌单id\n# 'song_id':歌曲id\n# 需要登录\n@web.route('/v1/sheet/sheet_add_song', methods=['GET', 'POST'])\n# @login_required\ndef sheet_add_song():\n user_id = session.get(\"user_id\")\n sheet_id = None\n song_id = None\n if request.method == 'POST':\n sheet_id = request.form.get('sheet_id')\n song_id = request.form.get('song_id')\n if request.method == 'GET':\n sheet_id = request.args.get('sheet_id')\n song_id = request.args.get('song_id')\n if sheet_id is None:\n return jsonify({\n 'code': -1,\n 'errMsg': '缺少参数sheet_name'\n })\n if song_id is None:\n return jsonify({\n 'code': -1,\n 'errMsg': '缺少参数song_id'\n })\n send_log('/v1/sheet/sheet_add_song')\n return SongSheet.sheet_add_song(sheet_id, song_id, user_id)\n\n\n# 歌单删除歌曲\n# 参数'sheet_id':歌单id\n# 'song_id':歌曲id\n# 若个单中没有该歌曲,也不会报错\n# 需要登录\n@web.route('/v1/sheet/sheet_delete_song', methods=['GET', 'POST'])\n# @login_required\ndef sheet_delete_song():\n user_id = session.get(\"user_id\")\n sheet_id = None\n song_id = None\n if request.method == 'POST':\n sheet_id = request.form.get('sheet_id')\n song_id = request.form.get('song_id')\n if request.method == 'GET':\n sheet_id = request.args.get('sheet_id')\n song_id = request.args.get('song_id')\n if sheet_id is None:\n return jsonify({\n 'code': -1,\n 'errMsg': '缺少参数sheet_name'\n })\n if song_id is None:\n return jsonify({\n 'code': -1,\n 'errMsg': '缺少参数song_id'\n })\n send_log('/v1/sheet/sheet_delete_song')\n return SongSheet.sheet_delete_song(sheet_id, song_id, user_id)\n\n\n# 读取所有歌曲信息\n# 无参数\n@web.route('/v1/song/all_songs', methods=['GET', 'POST'])\ndef all_songs():\n user_id = session.get(\"user_id\")\n if user_id is None:\n user_id = 0\n send_log('/v1/song/all_songs')\n return Song.songs_to_jsonify(user_id, Song.query_all())\n\n\n# 通过歌曲名查询歌曲\n# 参数'name':歌曲名称\n@web.route('/v1/song/songs_by_name', methods=['GET', 'POST'])\ndef songs_by_name():\n user_id = session.get(\"user_id\")\n if user_id is None:\n user_id = 0\n name = None\n if request.method == 'POST':\n name = request.form.get('name')\n if request.method == 'GET':\n name = request.args.get('name')\n if name is None:\n return all_songs()\n else:\n url = \"http://shysimon.cn:3000/v1/search\"\n params = {\"keywords\": name}\n res = requests.get(url, params)\n for i in res.json()['result']['songs']:\n Song.add_from_music163(i)\n send_log('/v1/song/songs_by_name')\n return Song.songs_to_jsonify(user_id, Song.query_by_name(name))\n\n\n# 通过歌手查询歌曲\n# 参数'singer_id':歌手主键\n@web.route('/v1/song/songs_by_singer', methods=['GET', 'POST'])\ndef songs_by_singer():\n user_id = session.get(\"user_id\")\n if user_id is None:\n user_id = 0\n singer_id = None\n if request.method == 'POST':\n singer_id = request.form.get('singer_id')\n if request.method == 'GET':\n singer_id = request.args.get('singer_id')\n if singer_id is None:\n return all_songs()\n send_log('/v1/song/songs_by_singer')\n return Song.songs_to_jsonify(user_id, Song.query_by_singer(singer_id))\n\n\n# 通过歌单查询歌曲\n# 参数'sheet_id':歌单主键\n@web.route('/v1/song/songs_by_sheet', methods=['GET', 'POST'])\ndef songs_by_sheet():\n user_id = session.get(\"user_id\")\n if user_id is None:\n user_id = 0\n sheet_id = None\n if request.method == 'POST':\n sheet_id = request.form.get('sheet_id')\n if request.method == 'GET':\n sheet_id = request.args.get('sheet_id')\n if sheet_id is None:\n return all_songs()\n send_log('/v1/song/songs_by_sheet')\n return Song.songs_to_jsonify(user_id, Song.query_by_sheet(sheet_id))\n\n\n@web.route('/v1/song/geturl', methods=['GET', 'POST'])\ndef get_music163_url():\n music163_id = None\n if request.method == 'POST':\n music163_id = request.form.get('music163_id')\n if request.method == 'GET':\n music163_id = request.args.get('music163_id')\n if music163_id is None:\n send_log('/v1/song/geturl')\n return jsonify({\n 'code': -1,\n 'errMsg': '缺少参数music163_id'\n })\n song_id = request.form.get('song_id')\n if song_id is None:\n send_log('/v1/song/geturl')\n return jsonify({\n 'code': -1,\n 'errMsg': '缺少参数song_id'\n })\n Song.play(song_id)\n send_log('/v1/song/geturl')\n return jsonify({\n 'code': 0,\n 'url': get_url_from_music163(music163_id),\n 'lrc': get_lyric_from_music163(music163_id)\n })\n\n\ndef get_url_from_music163(music163_id):\n url = \"http://shysimon.cn:3000/v1/music/url\"\n params = {\"id\": music163_id, \"br\": 320000}\n r = requests.get(url, params)\n return r.json()['data'][0]['url']\n\n\ndef get_lyric_from_music163(music163_id):\n url = \"http://shysimon.cn:3000/v1/lyric\"\n params = {\"id\": music163_id}\n r = requests.get(url, params)\n return r.json()['lrc']['lyric']\n\n# if __name__ == '__main__':\n# app.run()\n","sub_path":"app/web/app_song_sheet.py","file_name":"app_song_sheet.py","file_ext":"py","file_size_in_byte":11261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"160043870","text":"import socket\nimport time\nimport serial\nimport struct\nimport threading\nimport argparse\n\nUSE_MULTICAST = False\nMULTICAST_ADDR = \"231.2.3.4\"\nMULTICAST_PORT = 5300\nMsgFormat = struct.Struct(\"!d\")\n\n\ndef TTLPulseServer(serialDevice, serialBaud, interval=1):\n while True:\n try:\n serialConn = None\n if serialDevice is not None:\n # Open the USB0 port and listen for TTL Pulses\n serialConn = serial.Serial(serialDevice, serialBaud)\n\n # Open UDP Socket for Multicast (or Broadcast)\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n try:\n # Option REUSEPORT is not always available\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n except Exception:\n pass\n\n if USE_MULTICAST:\n sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 1)\n else:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n\n pulseCnt = 0\n while True:\n # When receive a pulse, send out a UDP broadcast\n if serialConn is not None:\n pulse = serialConn.read()\n pulseStr = pulse.decode(\"utf-8\")\n pulseCnt = int(pulseStr)\n assert 0 <= pulseCnt <= 9\n else:\n time.sleep(interval)\n pulseCnt = (pulseCnt + 1) % 10\n\n timestamp = time.time()\n msg = MsgFormat.pack(timestamp)\n if USE_MULTICAST:\n sock.sendto(msg, (MULTICAST_ADDR, MULTICAST_PORT))\n else:\n sock.sendto(msg, ('', MULTICAST_PORT))\n # print(\"Send timestamp {}\".format(timestamp))\n\n sock.close()\n\n except serial.SerialException as err:\n # sleep and try again\n time.sleep(30)\n continue\n # except OSError as err:\n # # likely a socket error\n serialConn.close()\n\n\nclass TTLPulseClient():\n def __init__(self):\n # Class attributes\n self.sock = None\n self.timestamp = 0\n self.serverTimestamp = 0\n self.resetThread = None\n self.listenThread = None\n self.PulseNotify = None\n self.shouldExit = False\n self.maxTRTime = 2 # 2 seconds max TR time by default\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n try:\n # Option REUSEPORT is not always available\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n except Exception:\n pass\n\n if USE_MULTICAST:\n sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 1)\n sock.bind((MULTICAST_ADDR, MULTICAST_PORT))\n # Set more multicast options\n intf = socket.gethostbyname(socket.gethostname())\n sock.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(intf))\n sock.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP,\n socket.inet_aton(MULTICAST_ADDR) + socket.inet_aton(intf))\n else:\n # Use Broadcast\n sock.bind((\"\", MULTICAST_PORT))\n self.sock = sock\n # start reset timestamp thread\n self.resetThread = threading.Thread(target=self.resetTimestampThread)\n self.resetThread.setDaemon(True)\n self.resetThread.start()\n # start listen thread\n self.PulseNotify = threading.Event()\n self.listenThread = threading.Thread(target=self.listenTTLThread)\n self.listenThread.setDaemon(True)\n self.listenThread.start()\n\n def __del__(self):\n self.close()\n\n def close(self):\n self.shouldExit = True\n if self.sock is not None:\n self.sock.close()\n self.sock = None\n if self.listenThread is not None:\n self.listenThread.join(timeout=1)\n self.listenThread = None\n if self.resetThread is not None:\n self.resetThread.join(timeout=1)\n self.resetThread = None\n\n def getTimestamp(self):\n return self.timestamp\n\n def getServerTimestamp(self):\n return self.serverTimestamp\n\n def getPulseEvent(self):\n return self.PulseNotify\n\n def setMaxTRTime(self, maxTR):\n self.maxTRTime = maxTR\n\n def resetTimestampThread(self):\n prevTimestamp = self.timestamp\n while not self.shouldExit:\n # sleep 1.5 times the max TR time\n time.sleep(self.maxTRTime * 1.5)\n if self.timestamp == prevTimestamp:\n # print(\"Client reset timestamp\")\n self.timestamp = 0\n self.serverTimestamp = 0\n prevTimestamp = self.timestamp\n\n def listenTTLThread(self):\n while not self.shouldExit:\n # Clear the PulseNotify Event so calling threads will wait for time signal\n self.PulseNotify.clear()\n try:\n data, addr = self.sock.recvfrom(256)\n except OSError as err:\n # print(\"ListenTTLThread drop connection\")\n return\n msg = MsgFormat.unpack(data)\n self.serverTimestamp = msg[0]\n self.timestamp = time.time()\n # Set the PulseNotify Event to wake up calling threads\n self.PulseNotify.set()\n # print(\"Pulse: {} {}\".format(self.timestamp, time.time() - self.timestamp))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', action=\"store\", dest=\"serialDev\")\n parser.add_argument('-b', action=\"store\", dest=\"serialBaud\")\n parser.add_argument('-i', action=\"store\", dest=\"interval\", type=float)\n args = parser.parse_args()\n if args.interval is None:\n args.interval = 1\n if args.serialBaud is None:\n args.serialBaud = 9600\n if args.serialDev is None:\n print(\"No serial device specified, sending pulse every {} sec\".format(args.interval))\n TTLPulseServer(None, 0, interval=args.interval)\n else:\n print(\"Listen on device {} {}\".format(args.serialDev, args.serialBaud))\n TTLPulseServer(args.serialDev, args.serialBaud)\n","sub_path":"rtfMRI/ttlPulse.py","file_name":"ttlPulse.py","file_ext":"py","file_size_in_byte":6454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"157026484","text":"import matplotlib.pyplot as plt\n\nfrom config import alpr_config as config\nfrom license_plate_image_augmentor import LicensePlateImageAugmentor\nfrom pyimagesearch.io import Hdf5DatasetLoader\n\nbatch_size = 6\n\nloader = Hdf5DatasetLoader()\nimages, labels = loader.load(config.TRAIN_HDF5, shuffle=True, max_items=batch_size)\n\naugmentor = LicensePlateImageAugmentor(config.IMAGE_WIDTH, config.IMAGE_HEIGHT, config.SUN397_HDF5)\n\ncols = 2\nrows = len(images) // cols\n\nimage_index = 0\nfig, axarr = plt.subplots(rows, cols, figsize=(15, 50))\nfor r in range(rows):\n for c in range(cols):\n image = images[image_index]\n image = augmentor.generate_plate_image(image)\n axarr[r, c].axis(\"off\")\n axarr[r, c].title.set_text(labels[image_index])\n axarr[r, c].imshow(image, cmap='gray')\n image_index += 1\n\nplt.show()\n","sub_path":"test_image_augmentor.py","file_name":"test_image_augmentor.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"234710650","text":"\nclass ShapeException(Exception):\n\n def __init__(self, node):\n message = \"Cannot infer shape for %s because shape_in is undefined.\" % (\n node\n )\n super(ShapeException, self).__init__(message)\n\nclass DimensionException(Exception):\n\n def __init__(self, node_in, node_out):\n message = \"Shape mismatch between %s and %s. Expected in: %s. Inferred in: %s\" % (\n node_in,\n node_out,\n str(node_out.shape_in),\n str(node_in.shape_out),\n )\n super(DimensionException, self).__init__(message)\n","sub_path":"deepx/node/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"243272339","text":"\"\"\"Django_blog URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom blog import views\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^login/', views.login, name='blog_login'),\n url(r'^reg/', views.reg, name='blog_reg'),\n url(r'^home/', views.home, name='blog_home'),\n url(r'^add/', views.add, name='blog_add'),\n url(r'^edit/(?P\\d+)/', views.edit, name='blog_edit'),\n url(r'^del/(?P\\d+)/', views.delete, name='blog_del'),\n url(r'^logout/', views.logout, name='blog_logout'),\n url(r'^myblogs/', views.myblogs, name='my_blogs'),\n url(r'^content/(?P\\d+)/', views.content, name='blog_content'),\n url(r'^sort_blog/(?P\\D+)/', views.sortblog, name='sort_blog'),\n url(r'^$', views.home)\n\n]\n","sub_path":"Django_blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"503535253","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n\n# --------------------------- custom Network class ------------------------\n\ndef get_act_fun(func):\n if (func == \"sin\"):\n return torch.sin\n if (func == \"sigmoid\"):\n return torch.sigmoid\n if (func == \"relu\"):\n return torch.relu\n if (func == \"tanh\"):\n return torch.tanh\n print(\"Wrong Function Given\")\n\n\ndef get_optim(model, learning_rate, optim):\n if optim == 'sgd':\n opt = torch.optim.SGD(model.myparameters, lr=learning_rate)\n elif optim == 'sgd+mom':\n opt = torch.optim.SGD(model.myparameters, lr=learning_rate, momentum=0.9, nesterov=True)\n elif optim == 'adam':\n opt = torch.optim.Adam(model.myparameters, lr=learning_rate)\n else:\n print(\"Error Wrong Optimizer name!\")\n\n return opt\n\n\ndef get_loss(model, loss):\n if loss == \"mse\":\n l = torch.nn.MSELoss() # use preimplemented mse loss\n elif loss == \"cross_entropy\":\n l = torch.nn.BCEWithLogitsLoss()\n return l\n\n\n# ---------------------- Casual Regression Network ---------------------\nclass Baseline_Net(nn.Module): # implements sin(x_T W x)\n\n def __init__(self, neurons, act_fun, input_dim, learning_rate, optim, loss):\n\n super(Baseline_Net, self).__init__()\n # Calling Super Class's constructor\n self.hidden = []\n self.myparameters = []\n inp = input_dim\n for n in neurons:\n w = Variable(torch.randn(n, inp), requires_grad=True)\n torch.nn.init.xavier_uniform_(w)\n self.myparameters.append(w)\n self.hidden.append(w)\n inp = n\n self.w = Variable(torch.randn(neurons[-1]), requires_grad=True)\n self.myparameters.append(self.w)\n self.g = get_act_fun(act_fun)\n self.optim = get_optim(self, learning_rate, optim)\n self.loss = get_loss(self, loss)\n\n def forward(self, x, learn=True):\n # WIP\n res = x\n for w in self.hidden:\n res = torch.einsum('bi,ni->bn', [res, w])\n res = self.g(res)\n res = torch.einsum('bn,n->b', [res, self.w])\n return res.flatten()\n\n\nclass RDF_Net(nn.Module): # implements sin(x_T W x)\n\n def __init__(self, act_fun, input_dim, learning_rate, optim, loss):\n super(RDF_Net, self).__init__()\n # Calling Super Class's constructor\n\n self.h = Variable(torch.randn(input_dim, 4), requires_grad=True)\n torch.nn.init.xavier_uniform_(self.h)\n self.o = Variable(torch.randn(4, 1), requires_grad=True)\n torch.nn.init.xavier_uniform_(self.o)\n self.b = Variable(torch.zeros(1), requires_grad=True)\n self.myparameters = [self.h, self.b, self.o]\n self.g = get_act_fun(act_fun)\n self.optim = get_optim(self, learning_rate, optim)\n self.loss = get_loss(self, loss)\n self.e = 2.71828182846\n\n def forward(self, x, learn=True):\n # g(x) = exp(-b||x - m||)\n print(x.shape)\n res = x - self.h\n res = torch.abs(res)\n print(\"yae\")\n # res = -b * res\n x = torch.pow(self.e, x)\n print(x.shape)\n res = x\n\n return res.flatten()\n","sub_path":"KGA-Lab/Networks_Einsum.py","file_name":"Networks_Einsum.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"122845038","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport sys\nimport argparse\n\nFLAGS = None\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ndef alexnet_model_fn(features, labels, mode, params):\n\n inputs = tf.reshape(features['x'], [-1, 28, 28, 1])\n\n with tf.variable_scope('conv1'):\n conv1 = tf.layers.conv2d(inputs=inputs,\n filters=96,\n kernel_size=[11, 11],\n strides=(3, 3),\n padding='same',\n activation=tf.nn.relu,\n kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=1e-2),\n bias_initializer=tf.zeros_initializer())\n\n with tf.variable_scope('lrn1'):\n lrn1 = tf.nn.local_response_normalization(conv1,\n alpha=1e-4,\n beta=0.75,\n depth_radius=5,\n bias=2.0)\n\n with tf.variable_scope('pool1'):\n pool1 = tf.layers.max_pooling2d(inputs=lrn1,\n pool_size=[3, 3],\n strides=2,\n padding='same')\n\n with tf.variable_scope('conv2'):\n conv2 = tf.layers.conv2d(inputs=pool1,\n filters=256,\n kernel_size=[5, 5],\n strides=(1, 1),\n padding='same',\n activation=tf.nn.relu,\n kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=1e-2),\n bias_initializer=tf.zeros_initializer())\n\n with tf.variable_scope('lrn2'):\n lrn2 = tf.nn.local_response_normalization(conv2,\n alpha=1e-4,\n beta=0.75,\n depth_radius=5,\n bias=2.0)\n\n with tf.variable_scope('pool2'):\n pool2 = tf.layers.max_pooling2d(inputs=lrn2,\n pool_size=[3, 3],\n strides=2,\n padding='same')\n\n with tf.variable_scope('conv3'):\n conv3 = tf.layers.conv2d(inputs=pool2,\n filters=384,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding='same',\n activation=tf.nn.relu,\n kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=1e-2),\n bias_initializer=tf.zeros_initializer())\n\n with tf.variable_scope('conv4'):\n conv4 = tf.layers.conv2d(inputs=conv3,\n filters=384,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding='same',\n activation=tf.nn.relu,\n kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=1e-2),\n bias_initializer=tf.zeros_initializer())\n\n with tf.variable_scope('conv5'):\n conv5 = tf.layers.conv2d(inputs=conv4,\n filters=256,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding='same',\n activation=tf.nn.relu,\n kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=1e-2),\n bias_initializer=tf.zeros_initializer())\n\n with tf.variable_scope('pool5'):\n pool5 = tf.layers.max_pooling2d(inputs=conv5,\n pool_size=[3, 3],\n strides=2,\n padding='same')\n\n with tf.variable_scope('fc1'):\n pool5_flat = tf.reshape(pool5, [-1, 2 * 2 * 256])\n fc = tf.layers.dense(inputs=pool5_flat,\n units=4096,\n activation=tf.nn.relu,\n kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=5e-3),\n bias_initializer=tf.constant_initializer(1e-3))\n fc1 = tf.nn.dropout(fc, keep_prob=params['keep_prob'])\n\n with tf.variable_scope('fc2'):\n fc = tf.layers.dense(inputs=fc1,\n units=4096,\n activation=tf.nn.relu,\n kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=5e-3),\n bias_initializer=tf.constant_initializer(1e-3))\n fc2 = tf.nn.dropout(fc, keep_prob=params['keep_prob'])\n\n with tf.variable_scope('fc3'):\n logits = tf.layers.dense(inputs=fc2,\n units=10,\n kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=1e-2),\n bias_initializer=tf.zeros_initializer())\n\n predictions = {\n 'classes': tf.argmax(input=logits, axis=1),\n 'probabilities': tf.nn.softmax(logits, name='softmax_tensor')\n }\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)\n loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)\n tf.summary.scalar('loss', loss)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n global_step = tf.train.get_global_step()\n # global_step = tf.to_int32(global_step)\n # learning_rate = tf.train.piecewise_constant(global_step, params['lr_boundaries'], params['lr_values'])\n learning_rate = tf.train.exponential_decay(\n params['learning_rate'],\n global_step,\n params['decay_steps'],\n params['decay_rate'],\n staircase=True\n )\n tf.summary.scalar('learning_rate', learning_rate)\n train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(\n loss=loss,\n global_step=tf.train.get_global_step()\n )\n correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(onehot_labels, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n tf.summary.scalar('accuracy', accuracy)\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss,\n train_op=train_op\n )\n\n eval_metric_ops = {\n 'accuracy': tf.metrics.accuracy(labels=labels, predictions=predictions['classes'])\n }\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metric_ops=eval_metric_ops\n )\n\n\nclass TrainInputHook(tf.train.SessionRunHook):\n def after_create_session(self, session, coord):\n session.run(self.iterator.initializer, feed_dict={\n self.features_placeholder: self.features,\n self.labels_placeholder: self.labels\n })\n\n def input_fn(self, features, labels):\n self.features = features\n self.labels = labels\n self.features_placeholder = tf.placeholder(self.features.dtype, self.features.shape)\n self.labels_placeholder = tf.placeholder(self.labels.dtype, self.labels.shape)\n dataset = tf.contrib.data.Dataset.from_tensor_slices((self.features_placeholder, self.labels_placeholder))\n dataset = dataset.map(self._parse, num_threads=8, output_buffer_size=FLAGS.batch_size * 10)\n dataset = dataset.batch(FLAGS.batch_size)\n dataset = dataset.repeat()\n dataset = dataset.shuffle(buffer_size=FLAGS.batch_size * 10, seed=1)\n self.iterator = dataset.make_initializable_iterator()\n features, labels = self.iterator.get_next()\n return features, labels\n\n def _parse(self, feature, label):\n rand = tf.random_uniform(shape=[], minval=-5., maxval=5., dtype=tf.float32)\n re_feature = tf.reshape(feature, shape=[28, 28, 1])\n image = tf.contrib.image.rotate(re_feature, rand)\n f = tf.reshape(image, shape=[784, ])\n return {'x': f}, label\n\n\ndef main(unused_argv):\n mnist = tf.contrib.learn.datasets.load_dataset(\"mnist\")\n train_data = mnist.train.images\n train_labels = np.asarray(mnist.train.labels, dtype=np.int32)\n valid_data = mnist.validation.images\n valid_labels = np.asarray(mnist.validation.labels, dtype=np.int32)\n test_data = mnist.test.images\n test_labels = np.asarray(mnist.test.labels, dtype=np.int32)\n\n run_config = tf.estimator.RunConfig()\n # run_config = run_config.replace(save_checkpoints_steps=3000)\n\n alexnet = tf.estimator.Estimator(\n model_fn=alexnet_model_fn,\n model_dir=FLAGS.data_dir,\n params=vars(FLAGS),\n config=run_config\n )\n\n train_input_hook = TrainInputHook()\n\n tensors_to_log = {\"probabilities\": \"softmax_tensor\"}\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=1000)\n\n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={'x': train_data},\n y=train_labels,\n batch_size=FLAGS.batch_size,\n num_epochs=None,\n shuffle=True,\n num_threads=8\n )\n\n valid_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={'x': valid_data},\n y=valid_labels,\n num_epochs=1,\n shuffle=False\n )\n\n valid_n_steps = FLAGS.valid_n_steps\n iter_count = 1\n if valid_n_steps > 0:\n iter_count = FLAGS.max_steps / valid_n_steps\n else:\n valid_n_steps = FLAGS.max_steps\n\n for count in range(int(iter_count)):\n alexnet.train(\n input_fn=lambda: train_input_hook.input_fn(train_data, train_labels),\n max_steps=(count+1) * valid_n_steps,\n # steps=FLAGS.steps,\n hooks=[logging_hook, train_input_hook]\n )\n\n eval_valid_result = alexnet.evaluate(input_fn=valid_input_fn, name='validation')\n print('valid eval result: ', eval_valid_result)\n\n test_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={'x': test_data},\n y=test_labels,\n num_epochs=1,\n shuffle=False\n )\n\n eval_test_result = alexnet.evaluate(input_fn=test_input_fn, name='test')\n print('test eval result: ', eval_test_result)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--learning_rate',\n type=float,\n default=1e-3,\n help='Initial learning rate.'\n )\n parser.add_argument(\n '--decay_steps',\n type=int,\n default=1000,\n help='Number of steps to run trainer.'\n )\n parser.add_argument(\n '--decay_rate',\n type=int,\n default=0.8,\n help='Number of steps to run trainer.'\n )\n parser.add_argument(\n '--lr_boundaries',\n type=list,\n default=[1000, 3000, 5000, 7000, 9000, 11000, 13000, 15000, 18000],\n help='Learning rate boundaries'\n )\n parser.add_argument(\n '--lr_values',\n type=list,\n default=[1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10, 1e-11],\n help='learning rate values'\n )\n parser.add_argument(\n '--keep_prob',\n type=float,\n default=0.5,\n help='Dropout keep prob.'\n )\n parser.add_argument(\n '--max_steps',\n type=int,\n default=10000,\n help='Number of steps to run trainer.'\n )\n parser.add_argument(\n '--steps',\n type=int,\n default=None,\n help='Number of steps to run trainer.'\n )\n parser.add_argument(\n '--valid_n_steps',\n type=int,\n default=1000,\n help='Number of steps to run trainer.'\n )\n parser.add_argument(\n '--batch_size',\n type=int,\n default=128,\n help='Batch size. Must divide evenly into the dataset sizes.'\n )\n parser.add_argument(\n '--data_dir',\n type=str,\n default='/tmp/mnist_alextnet_model/dataset',\n help='Directory to put the input data.'\n )\n\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)","sub_path":"dave/kakaobrain/alexnet/alexnet_estimator_with_dataset.py","file_name":"alexnet_estimator_with_dataset.py","file_ext":"py","file_size_in_byte":12709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"155068229","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Python version: 3.6\n\nimport torch\nimport pandas as pd\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nclass KPICSV(Dataset):\n def __init__(self, mode=\"train\"):\n self.mode = mode\n if ( mode is \"full\" ):\n file = '../data/selims/outLags_Latency_QC1_50.csv'\n \n if ( mode is \"train\" ):\n file = '../data/selims/outLags_Latency_QC1_50_train.csv'\n\n if ( mode is \"test\" ):\n file = '../data/selims/outLags_Latency_QC1_50_test.csv'\n\n self.csv_file = pd.read_csv(file).drop(\n [\"enodeB\",\"year\",\"month\",\"day\",\"time\",\"cell\",\"Unnamed: 0\",\n \"datetime\",\"prev_datetime\",\"year_1\",\"month_1\",\"day_1\",\"time_1\",\n \"datetime_1\",\"datetime_2\",\"prev_datetime_1\",\"year_2\",\"month_2\",\"day_2\",\"time_2\",\n \"datetime_3\",\"prev_datetime_2\",\"prev_datetime_3\",\"year_3\",\"month_3\",\"day_3\",\"time_3\",\n \"pred_datetime\",\"datetime_y\",\"DLLatecny_y\"], axis=1\n )\n\n #print self.csv_file.info()\n\n # data set size\n def __len__(self):\n return len(self.csv_file)\n\n def __getitem__(self,idx):\n row = self.csv_file.iloc[idx]\n #floats = row.drop([\"cell\",\"LatencyTarget\"]).values.astype(np.float32)\n floats = row.drop([\"LatencyTarget\"]).values.astype(np.float32)\n data = torch.tensor(floats)\n\n y_val = self.csv_file.iloc[idx][\"LatencyTarget\"]\n y = torch.tensor(y_val, dtype=torch.long)\n return data, y\n","sub_path":"FedAvg/kpi_datasets.py","file_name":"kpi_datasets.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"7165686","text":"import tornado.ioloop\nimport tornado.web\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n self.render(\"ajax.html\")\n\nclass AjaxHandler(tornado.web.RequestHandler):\n def post(self):\n #self.write(\"hello world\")\n self.write(self.get_argument(\"message\"))\n\napplication = tornado.web.Application([\n (r\"/\", MainHandler),\n (r\"/test\", AjaxHandler),\n ])\n\nif __name__ == '__main__':\n application.listen(9000)\n tornado.ioloop.IOLoop.instance().start()\n\n\"\"\"\n这是第一个实现的ajax——tornado——heatmap的小例子。\n通过这个例子也学会了一些知识:\n1.js按照块进行执行,每次产生的变量都是全局变量,不过可以通过特殊标识符使变量局限于块内;\n2.由于一个页面内是一个命名空间,因此小心变量的冲突问题,尽量选择独特的标识符;\n3.任何元素(包括DOM中的id等)必须先定义后使用,否则只能导致没有效果;\n4.JS是按照顺序进行执行,而且出现错误直接跳过,因此代码自我查看比较重要;\n5.ajax是个很好的东西,现在这个demo可以通过较小的修改就可以用于后台返回信息,以热力图的结构进行显示;\n6.tornado中渲染的html中的js文件等,必须注意以静态文件的使用方式进行调用,之前想当然啦;\n7.遇到的问题表面看来是粗心,实际���是因为对实质内容的不完全理解导致的问题。\n\"\"\"\n","sub_path":"ajax_tornado.py","file_name":"ajax_tornado.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"366683218","text":"import csv\nfrom flask import request\nfrom spellchecker import SpellChecker\n\n\ndef distance_to_metro(metro):\n metro = str(request.form['metro'])\n spell = SpellChecker()\n spell.word_frequency.load_text_file(\"metro_station.txt\")\n metro_correct = spell.correction(metro)\n metro_correct_up = metro_correct.capitalize()\n print(metro_correct_up)\n with open('Dist_to_center.csv',encoding='utf-8') as f:\n reader = csv.reader(f)\n for row in reader:\n if metro_correct_up in row:\n return row[2]\n\n\ndef age_of_house(year_of_constr):\n age_constr = request.form['year_of_constr']\n age = (2020 - int(age_constr))\n return age\n\n\ndef change_of_repair(type_of_repair):\n global cos\n type_of_rep = request.form['type_of_repair']\n if type_of_rep == \"cosmetic\":\n cos = 1\n euro = 0\n design = 0\n elif type_of_rep == \"eurorepair\":\n cos = 0\n euro = 1\n design = 0\n elif type_of_rep == \"designer\":\n cos = 0\n euro = 0\n design = 1\n return cos, euro, design\n\n\ndef change_of_typehome(type_of_house):\n global brick_ch\n type_of_hous = request.form['type_of_house']\n if type_of_hous == \"brick\":\n brick_ch = 1\n panel_ch = 0\n modular_ch = 0\n monolithic_ch = 0\n elif type_of_hous == \"panel\":\n brick_ch = 0\n panel_ch = 1\n modular_ch = 0\n monolithic_ch = 0\n elif type_of_hous == \"modular\":\n brick_ch = 0\n panel_ch = 0\n modular_ch = 1\n monolithic_ch = 0\n elif type_of_hous == \"monolithic\":\n brick_ch = 0\n panel_ch = 0\n modular_ch = 0\n monolithic_ch = 1\n return brick_ch, panel_ch, modular_ch, monolithic_ch\n\n\ndef place_value(number):\n return (\"{:,}\".format(number))\n\n","sub_path":"webapp/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"515282375","text":"# -*- coding: utf-8 -*-\n# %%\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport mut.viz\nimport mut.thermo\nconstants = mut.thermo.load_constants()\ncolors = mut.viz.color_selector('pboc')\nmut.viz.plotting_style()\n\n# Load in the data sets\ndata = pd.read_csv('../../data/Chure2019_summarized_data.csv', comment='#')\ndata = data[data['class']=='DNA']\nstats = pd.read_csv('../../data/Chure2019_DNA_binding_energy_summary.csv')\n\nstats = stats[stats['repressors']==260]\nbohr = pd.read_csv('../../data/Chure2019_empirical_F_statistics.csv')\nempirical_bohr = bohr[bohr['class']=='DNA']\n\n# Define some plotting constants. \nc_range = np.logspace(-2, 4, 200)\nc_range[0] = 0\nbohr_range = np.linspace(-8, 8, 200)\nF = (1 + np.exp(-bohr_range))**-1\n\nrep_colors = {60:colors['purple'], 124:colors['blue'], \n 260:colors['red'], 1220:colors['dark_green']}\n\n\n# ##############################################################################\n# FIGURE INSTANTIATION\n# ##############################################################################\nfig, ax = plt.subplots(3, 3, figsize=(4.5, 4), dpi=150)\nfor a in ax.ravel():\n a.xaxis.set_tick_params(labelsize=6)\n a.yaxis.set_tick_params(labelsize=6)\n\nfor i in range(3):\n ax[0, i].set_xscale('symlog', linthreshx=0.006)\n ax[-1, i].set_xscale('symlog', linthreshx=0.006)\n ax[0, i].set_ylim([-0.2, 1.2])\n ax[1, i].set_ylim([-0.2, 1.2])\n ax[1, i].set_xlim([-8, 8])\n ax[0, i].set_xlim([-0.001, 1E4])\n ax[-1, i].set_xlim([-0.001, 1E4])\n ax[-1, i].set_ylim([-8, 8])\n ax[0, i].set_xticks([0, 1E-2, 1E0, 1E2, 1E4])\n ax[-1, i].set_xticks([0, 1E-2, 1E0, 1E2, 1E4])\n for j in range(2):\n ax[i, j+1].set_yticklabels([])\n\n # Add labels \n ax[0, i].set_xlabel(\"IPTG [µM]\", fontsize=6)\n ax[1, i].set_xlabel(\"free energy [$k_BT$]\", fontsize=6)\n ax[-1, i].set_xlabel(\"IPTG [µM]\", fontsize=6)\n\n# Add ylabels\nax[0, 0].set_ylabel('fold-change', fontsize=6)\nax[1, 0].set_ylabel('fold-change', fontsize=6)\nax[2, 0].set_ylabel('$\\Delta F$ [$k_BT$]', fontsize=6)\n\n# Define the axes\naxes = {'Q21M':0, 'Y20I':1, 'Q21A':2}\ntitles = {'Q18M':0, 'Y17I':1, 'Q18A':2}\n# Add titles\nfor m, a in titles.items():\n ax[0, a].set_title(m, fontsize=6, backgroundcolor=colors['pale_yellow'],\n y=1.08) \n\n# Add panel labels\nfig.text(0, 0.90, '(A)', fontsize=6)\nfig.text(0, 0.63, '(B)', fontsize=6)\nfig.text(0, 0.33, '(C)', fontsize=6)\n\n# ##############################################################################\n# GUIDE CURVES FOR ∆F\n# ##############################################################################\nfor i in range(3):\n ax[-1, i].hlines(0, -0.01, 1E4, 'k', linestyle=':', lw=0.75)\n\n# ##############################################################################\n# FOLD-CHANGE CURVES\n# ##############################################################################\nfor r, cor in rep_colors.items():\n for m, a in axes.items():\n _stats = stats[(stats['mutant']==m) & (stats['parameter']=='ep_RA')]\n _c, _ep= np.meshgrid(c_range, _stats[['hpd_min', 'hpd_max']].values)\n arch = mut.thermo.SimpleRepression(R=r, ep_r=_ep, ka=constants['Ka'],\n ki=constants['Ki'], \n ep_ai=constants['ep_AI'],\n effector_conc=_c).fold_change()\n ax[0, a].fill_between(c_range, arch[0, :], arch[1, :], color=cor,\n alpha=0.4) \n\n# ##############################################################################\n# COLLAPSE CURVES\n# ##############################################################################\nfor i in range(3):\n ax[1, i].plot(bohr_range, F, 'k-', lw=0.75)\n\n# ##############################################################################\n# FREE ENERGY PREDICTIONS\n# ##############################################################################\nfor m, a in axes.items():\n _stats = stats[(stats['mutant']==m) & \n (stats['parameter']=='ep_RA')][['hpd_min', 'hpd_max']].values[0]\n ax[-1, a].fill_between(c_range, _stats[0] - constants['O2'], \n _stats[1] - constants['O2'], color=rep_colors[260], \n alpha=0.4)\n\n# ##############################################################################\n# FOLD-CHANGE DATA\n# ##############################################################################\nfor g, d in data.groupby(['mutant', 'repressors']):\n if g[1] == 260:\n face = 'w'\n else:\n face = rep_colors[g[1]]\n ax[0, axes[g[0]]].errorbar(d['IPTGuM'], d['mean'], d['sem'], fmt='.',\n color=rep_colors[int(g[1])], markerfacecolor=face,\n ms=5, markeredgewidth=0.5, capsize=1, lw=1, linestyle='none',\n label=int(g[1]))\n\n# ##############################################################################\n# COLLAPSE DATA\n# ##############################################################################\nfor g, d in data.groupby(['mutant', 'repressors']):\n ep_r = stats[(stats['mutant']==g[0]) & \n (stats['parameter']=='ep_RA')]['median'].values[0]\n bohr = mut.thermo.SimpleRepression(R=g[1], ep_r=ep_r, ka=constants['Ka'],\n ki=constants['Ki'], \n ep_ai=constants['ep_AI'], \n effector_conc=d['IPTGuM']).bohr_parameter()\n if g[1] == 260:\n face = 'w'\n else:\n face = rep_colors[g[1]]\n ax[1, axes[g[0]]].errorbar(bohr, d['mean'], d['sem'], fmt='.', \n linestyle='none', lw=1, capsize=1, ms=5, markeredgewidth=0.5,\n color=rep_colors[g[1]], markerfacecolor=face)\n\n# ##############################################################################\n# INFERRED F \n# ##############################################################################\nfor g, d in empirical_bohr.groupby(['mutant', 'repressors', 'IPTGuM']):\n _param = d[d['parameter']=='delta_bohr']\n mu = d[d['parameter']=='fc_mu']['median'].values[0]\n sig = d[d['parameter']=='fc_sigma']['median'].values[0]\n\n # If the mean fold change is closer to the boundary than the sigma, do not \n # show it (assign alpha=0)\n if (mu < sig) | (1 - mu < sig):\n color = 'slategray'\n alpha = 0\n lw = 0\n fmt = 'x'\n else:\n color = rep_colors[g[1]]\n alpha = 1 \n lw = 0.75\n fmt = '.'\n if g[1] == 260:\n face = 'w'\n zorder=1000\n elif fmt == 'x':\n zorder = 1\n face=color\n else:\n face = color\n zorder=100\n _ax = ax[-1, axes[g[0]]]\n _ax.plot(_param['IPTGuM'], _param['median'], marker=fmt, linestyle='none', \n color=color, markerfacecolor=face, alpha=alpha, ms=5, zorder=zorder, \n markeredgewidth=0.5)\n _ax.vlines(_param['IPTGuM'], _param['hpd_min'], _param['hpd_max'], \n lw=lw, color=color, alpha=alpha, zorder=zorder)\n\n# ##############################################################################\n# LEGEND INFORMATION\n# ##############################################################################\nleg = ax[0, 0].legend(title='$R$', fontsize=5,handletextpad=0.1)\nleg.get_title().set_fontsize(5)\nplt.subplots_adjust(hspace=0.5, wspace=0.1)\nplt.savefig('../../figures/Chure2019_Fig3_DNA_mutants.pdf', bbox_inches='tight')\n","sub_path":"code/figures/Chure2019_Fig3_DNA_mutants.py","file_name":"Chure2019_Fig3_DNA_mutants.py","file_ext":"py","file_size_in_byte":7475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"398395866","text":"from selenium import webdriver\nimport time\nfrom selenium.webdriver.support.ui import WebDriverWait\n\ndriver = webdriver.Chrome()\ndriver.set_window_size(1280,720)\ntime1 = time.time()\n# driver.get('https://loveyueliang.coding.me')\ndriver.get('https://blog.52sifang.cn')\n# driver.get('https://www.haomwei.com/')\ndriver.implicitly_wait(20)\ntime2 = time.time()\n\nprint('总耗时: ',time2-time1)","sub_path":"test/test_selenium.py","file_name":"test_selenium.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"546218060","text":"from directory.core import STUDENTS_ALPHA\nfrom djzbar.utils.informix import do_sql\n\ndef get_student(firstname,lastname):\n sql = STUDENTS_ALPHA\n sql += \" AND profile_rec.priv_code != 'FERP'\"\n sql += (' AND ( lower(id_rec.firstname) like \"%%%s%%\" )' % firstname)\n sql += (' AND lower(id_rec.lastname) like \"%%%s%%\"' % lastname)\n sql += ' order by id_rec.lastname, id_rec.firstname'\n objects = do_sql(sql)\n","sub_path":"directory/bin/celebration.py","file_name":"celebration.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"494227509","text":"# Convert a non-negative integer to its english words representation. Given input is guaranteed to be less than 231 - 1.\n#\n# Example 1:\n#\n# Input: 123\n# Output: \"One Hundred Twenty Three\"\n# Example 2:\n#\n# Input: 12345\n# Output: \"Twelve Thousand Three Hundred Forty Five\"\n# Example 3:\n#\n# Input: 1234567\n# Output: \"One Million Two Hundred Thirty Four Thousand Five Hundred Sixty Seven\"\n# Example 4:\n#\n# Input: 1234567891\n# Output: \"One Billion Two Hundred Thirty Four Million Five Hundred Sixty Seven Thousand Eight Hundred Ninety One\"\n\nclass Solution:\n def numberToWords(self, num):\n \"\"\"\n :type num: int\n :rtype: str\n \"\"\"\n n2w = {1e9: \"Billion\", 1e6: \"Million\", 1e3: \"Thousand\", 1e2: \"Hundred\",\n 90: \"Ninety\", 80: \"Eighty\", 70: \"Seventy\",\n 60: \"Sixty\", 50: \"Fifty\", 40: \"Forty\",\n 30: \"Thirty\", 20: \"Twenty\", 19: \"Nineteen\",\n 18: \"Eighteen\", 17: \"Seventeen\", 16: \"Sixteen\",\n 15: \"Fifteen\", 14: \"Fourteen\", 13: \"Thirteen\",\n 12: \"Twelve\", 11: \"Eleven\", 10: \"Ten\",\n 9: \"Nine\", 8: \"Eight\", 7: \"Seven\",\n 6: \"Six\", 5: \"Five\", 4: \"Four\", 3: \"Three\", 2: \"Two\", 1: \"One\", 0: \"Zero\"}\n\n keys = [1000000000, 1000000, 1000, 100, 90, 80, 70, 60, 50, 40,\n 30, 20, 19, 18, 17, 16, 15, 14, 13, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]\n\n def dfs(n):\n if n <= 20: return n2w[n]\n for div in keys:\n d, r = divmod(n, div)\n if d == 0: continue\n header = dfs(d) + \" \" if div >= 100 else \"\"\n tailer = \" \" + dfs(r) if r else \"\"\n return header + n2w[div] + tailer\n\n return dfs(num)","sub_path":"src/273_Integer_to_English_Words.py","file_name":"273_Integer_to_English_Words.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"444688865","text":"\"\"\"Auxiliary functions for the evaluation of the likelihood.\"\"\"\nimport numpy as np\nfrom numba import guvectorize\nfrom numba import vectorize\n\nfrom respy.python.shared.shared_constants import HUGE_FLOAT\nfrom respy.python.shared.shared_constants import INADMISSIBILITY_PENALTY\n\n\n@vectorize(\"f8(f8, f8, f8)\", nopython=True, target=\"cpu\")\ndef clip(x, minimum=None, maximum=None):\n \"\"\"Clip (limit) input value.\n\n Parameters\n ----------\n x : float\n Value to be clipped.\n minimum : float\n Lower limit.\n maximum : float\n Upper limit.\n\n Returns\n -------\n float\n Clipped value.\n\n \"\"\"\n if minimum is not None and x < minimum:\n return minimum\n elif maximum is not None and x > maximum:\n return maximum\n else:\n return x\n\n\n@guvectorize(\n [\"f8[:], f8[:], f8[:], f8[:, :], f8, b1, i8, f8, f8[:]\"],\n \"(m), (n), (n), (p, n), (), (), (), () -> (p)\",\n nopython=True,\n target=\"parallel\",\n)\ndef simulate_probability_of_agents_observed_choice(\n wages, rewards_systematic, emaxs, draws, delta, max_education, idx, tau, prob_choice\n):\n \"\"\"Simulate the probability of observing the agent's choice.\n\n The probability is simulated by iterating over a distribution of unobservables.\n First, the utility of each choice is computed. Then, the probability of observing\n the choice of the agent given the maximum utility from all choices is computed.\n\n Parameters\n ----------\n wages : np.ndarray\n Array with shape (2,).\n rewards_systematic : np.ndarray\n Array with shape (4,).\n emaxs : np.ndarray\n Array with shape (4,)\n draws : np.ndarray\n Array with shape (num_draws, 4)\n delta : float\n Discount rate.\n max_education: bool\n Indicator for whether the state has reached maximum education.\n idx : int\n Choice of the agent minus one to get an index.\n tau : float\n Smoothing parameter for choice probabilities.\n\n Returns\n -------\n prob_choice : np.ndarray\n Array with shape (num_draws) containing smoothed probabilities for\n choice.\n\n \"\"\"\n num_draws, num_choices = draws.shape\n num_wages = wages.shape[0]\n\n total_values = np.zeros((num_choices, num_draws))\n\n for i in range(num_draws):\n\n max_total_values = 0.0\n\n for j in range(num_choices):\n if j < num_wages:\n rew_ex = wages[j] * draws[i, j] + rewards_systematic[j] - wages[j]\n else:\n rew_ex = rewards_systematic[j] + draws[i, j]\n\n cont_value = rew_ex + delta * emaxs[j]\n\n if j == 2 and max_education:\n cont_value += INADMISSIBILITY_PENALTY\n\n total_values[j, i] = cont_value\n\n if cont_value > max_total_values or j == 0:\n max_total_values = cont_value\n\n sum_smooth_values = 0.0\n\n for j in range(num_choices):\n val_exp = np.exp((total_values[j, i] - max_total_values) / tau)\n\n val_clipped = clip(val_exp, 0.0, HUGE_FLOAT)\n\n total_values[j, i] = val_clipped\n sum_smooth_values += val_clipped\n\n prob_choice[i] = total_values[idx, i] / sum_smooth_values\n\n\n@vectorize([\"f4(f4, f4, f4)\", \"f8(f8, f8, f8)\"], nopython=True, target=\"cpu\")\ndef get_pdf_of_normal_distribution(x, mu, sigma):\n \"\"\"Compute the probability of ``x`` under a normal distribution.\n\n This implementation is faster than calling ``scipy.stats.norm.pdf``.\n\n Parameters\n ----------\n x : float or np.ndarray\n The probability is calculated for this value.\n mu : float or np.ndarray\n Mean of the normal distribution.\n sigma : float or np.ndarray\n Standard deviation of the normal distribution.\n\n Returns\n -------\n probability : float\n Probability of ``x`` under a normal distribution with mean ``mu`` and standard\n deviation ``sigma``.\n\n Example\n -------\n >>> result = get_pdf_of_normal_distribution(0)\n >>> result\n 0.3989422804014327\n >>> from scipy.stats import norm\n >>> assert result == norm.pdf(0)\n\n \"\"\"\n a = np.sqrt(2 * np.pi) * sigma\n b = np.exp(-(x - mu) ** 2 / (2 * sigma ** 2))\n\n probability = 1 / a * b\n\n return probability\n\n\n@guvectorize(\n [\"f8, f8[:], i8, f8[:, :, :], i8, f8[:, :], f8[:, :], f8[:]\"],\n \"(), (w), (), (i, p, n), (), (m, n) -> (p, n), (p)\",\n nopython=True,\n target=\"parallel\",\n)\ndef create_draws_and_prob_wages(\n wage_observed,\n wages_systematic,\n period,\n periods_draws_prob,\n choice,\n sc,\n draws,\n prob_wages,\n):\n \"\"\"Create draws to simulate maximum utilities and create probabilities of wages.\n\n Draws are taken from a general set of unique shocks for each period. The shocks are\n adjusted in case the wage of an agent is available as well as the probability of the\n wage.\n\n Parameters\n ----------\n wage_observed : float\n Agent's observed wage.\n wages_systematic : np.ndarray\n Array with shape (2,) containing systematic wages.\n period : int\n Number of period.\n periods_draws_prob : np.ndarray\n Array with shape (num_periods, num_draws, num_choices) containing sets of draws\n for all periods.\n choice : int\n Choice between one and four.\n sc : np.ndarray\n Array with shape (num_choices, num_choices).\n\n Returns\n -------\n draws : np.ndarray\n Array with shape (num_draws, num_choices) containing adjusted draws.\n prob_wages : np.ndarray\n Array with shape (num_draws,) containing probabilities for the observed wage.\n\n \"\"\"\n # Create auxiliary objects\n num_draws, num_choices = periods_draws_prob.shape[1:]\n temp_draws = np.zeros((num_draws, num_choices))\n\n # Extract relevant deviates from standard normal distribution. The same set of\n # baseline draws are used for each agent and period.\n draws_stan = periods_draws_prob[period]\n\n has_wage = ~np.isnan(wage_observed)\n\n # If an agent is observed working, then the the labor market shocks are observed and\n # the conditional distribution is used to determine the choice probabilities if the\n # wage information is available as well.\n if has_wage:\n log_wo = np.log(wage_observed)\n log_wage_observed = clip(log_wo, -HUGE_FLOAT, HUGE_FLOAT)\n\n log_ws = np.log(wages_systematic[choice - 1])\n log_wage_systematic = clip(log_ws, -HUGE_FLOAT, HUGE_FLOAT)\n\n dist = log_wage_observed - log_wage_systematic\n\n # Adjust draws and prob_wages in case of OCCUPATION A.\n if choice == 1:\n temp_draws[:, 0] = dist / sc[0, 0]\n temp_draws[:, 1] = draws_stan[:, 1]\n\n prob_wages[:] = get_pdf_of_normal_distribution(dist, 0.0, sc[0, 0])\n\n # Adjust draws and prob_wages in case of OCCUPATION B.\n elif choice == 2:\n temp_draws[:, 0] = draws_stan[:, 0]\n temp_draws[:, 1] = (dist - sc[1, 0] * draws_stan[:, 0]) / sc[1, 1]\n\n means = sc[1, 0] * draws_stan[:, 0]\n prob_wages[:] = get_pdf_of_normal_distribution(dist, means, sc[1, 1])\n\n temp_draws[:, 2:] = draws_stan[:, 2:]\n\n # If the wage is missing or an agent is pursuing SCHOOLING or HOME, the draws are\n # not adjusted and the probability for wages is one.\n else:\n temp_draws[:, :] = draws_stan\n prob_wages[:] = 1.0\n\n # What follows is a matrix multiplication written out of the form ``a.dot(b.T). Note\n # that the second argument corresponds to ``sc`` which is not transposed. This is\n # done by adjusting the loops. Additionally, it incorporates the process of taking\n # ``np.exp`` for the draws of the first two choices and clipping them.\n k_, l_ = draws.shape\n m_ = sc.shape[0]\n\n for k in range(k_):\n for m in range(m_):\n val = 0.0\n for l in range(l_):\n val += temp_draws[k, l] * sc[m, l]\n if m < 2:\n val_exp = np.exp(val)\n val_clipped = clip(val_exp, 0.0, HUGE_FLOAT)\n\n draws[k, m] = val_clipped\n else:\n draws[k, m] = val\n","sub_path":"respy/python/evaluate/evaluate_auxiliary.py","file_name":"evaluate_auxiliary.py","file_ext":"py","file_size_in_byte":8146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"21478338","text":"# blanyal\n\ndef binarySearch(inputList, item):\n lower = 0\n upper = len(inputList) - 1\n isFound = False\n\n while (lower <= upper):\n mid = (lower + upper) // 2\n if inputList[mid] == item:\n isFound = True\n break\n elif item < inputList[mid]:\n upper = mid - 1\n else:\n lower = mid + 1\n\n return isFound\n \nif __name__ == \"__main__\":\n inputList = [int(x) for x in input(\"Enter the input list: \").split()]\n item = int(input(\"Enter the item to be found: \"))\n print (binarySearch(inputList, item))\n","sub_path":"Python/BinarySearch.py","file_name":"BinarySearch.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"260850468","text":"import os \nimport sys\nimport glob\nimport argparse\nimport logging\nfrom wand.image import Image\nfrom PIL import Image as Img\n\nlogging.basicConfig(format=\"%(asctime)s - %(module)s - %(levelname)s: %(message)s\", level=logging.DEBUG)\n\n\nclass PdfConverter:\n \n def pdf2jpg(self, pdfpath, fileext, output):\n try:\n logging.info(pdfpath)\n base = os.path.basename(pdfpath)\n\n with Image(filename=pdfpath, resolution=200) as img:\n img.compression_quality = 80\n img.save(filename=output + \"/\" + os.path.splitext(base)[0] + \".\" + fileext)\n\n except Exception as err:\n logging.error(\"Error in converting to jpg\", err)\n return False\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument(dest=\"path\", help=\"Folder or pdf file to be converted\")\n parser.add_argument('--ext', dest='ext', help=\"File exentension\", default=\"jpg\")\n parser.add_argument('--output', dest='output', help=\"Output directory\", default=\"output\")\n\n args = parser.parse_args()\n\n files = []\n if os.path.isfile(args.path):\n if args.path.endswith(\".pdf\"):\n logging.info(\"Processing PDF file\")\n files.append(args.path)\n else:\n logging.error(\"Only supports pdf files\")\n sys.exit(1)\n \n if os.path.isdir(args.path):\n logging.info(\"Listing PDF files in folder %s\" % args.path )\n files = sorted(glob.glob(args.path + '/*.pdf'))\n\n outdir = args.output\n if not os.path.isdir(args.output):\n os.mkdir(args.output)\n\n extensions = ['jpg', 'jpeg', 'png', 'gif']\n if args.ext in extensions:\n fileext = args.ext\n else:\n logging.error(\"Extension not supported\")\n sys.exit(1)\n\n \n cvt = PdfConverter()\n for f in files:\n cvt.pdf2jpg(f, fileext, outdir)\n\n","sub_path":"pdf2image.py","file_name":"pdf2image.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"577051412","text":"import os\nimport re\nimport socket\nfrom datetime import datetime\nfrom json import load, dump\nfrom pprint import pprint\n\nimport requests\nfrom googleapiclient.discovery import build\n\nSHEET_ID = os.environ[\"PATRON_ALIASES_GOOGLE_SHEET_ID\"]\nGOOGLE_SHEETS_DEVELOPER_KEY = os.environ[\"GOOGLE_SHEETS_DEVELOPER_KEY\"]\nPATREON_AUTH_TOKEN = os.environ[\"PATREON_AUTH_TOKEN\"]\n\n\ndef load_nicknames_sheet(cache_reponse=False):\n filepath = \"src/scripts/nicknames.json\"\n if cache_reponse:\n if os.path.exists(filepath):\n with open(filepath) as f:\n return load(f)\n\n print(\"Loading Google Sheet of patron info...\")\n range = 'Current!B2:L'\n service = build('sheets', 'v4', developerKey=GOOGLE_SHEETS_DEVELOPER_KEY)\n # Call the Sheets API\n sheet = service.spreadsheets()\n result = sheet.values().get(spreadsheetId=SHEET_ID, range=range).execute()\n\n nicknames = {}\n for row in result[\"values\"]:\n if row[2] != \"Active patron\":\n continue\n alias, email = row[1].strip(), row[10].strip()\n if alias and email:\n nicknames[email] = alias.strip()\n\n if cache_reponse:\n with open(filepath, \"w\") as f:\n dump(nicknames, f)\n\n return nicknames\n\n\ndef call_patreon(cache_reponse=False):\n filepath = \"src/scripts/patrons.json\"\n if cache_reponse:\n if os.path.exists(filepath):\n with open(filepath) as f:\n return load(f)\n\n print(\"Retrieving data from Patreon...\")\n url = \"https://www.patreon.com/api/oauth2/v2/campaigns/82133/members?include=currently_entitled_tiers&fields%5Bmember%5D=full_name,email,lifetime_support_cents,patron_status,pledge_relationship_start\"\n headers = {\"Authorization\": PATREON_AUTH_TOKEN}\n patrons = []\n\n while True:\n response = requests.request(\"GET\", url, headers=headers)\n json_response = response.json()\n patrons += json_response[\"data\"]\n print(len(patrons))\n if not \"links\" in json_response:\n break\n url = json_response[\"links\"][\"next\"]\n\n if cache_reponse:\n with open(filepath, \"w\") as f:\n dump(patrons, f)\n\n return patrons\n\n\ndef build_patreon_data():\n # If we're not running in GitHub, use cache\n cache_response = \"cloudapp\" not in socket.gethostname()\n nicknames = load_nicknames_sheet(cache_reponse=cache_response)\n patrons = call_patreon(cache_reponse=cache_response)\n\n years_dict = {6: [], 5: [], 4: [], 3: [], 2: [], 1: []}\n tier_list = {6000: \"👑\", 5000: \"🌟\", 4000: \"♠\", 3000: \"♥\", 2000: \"♣\", 1000: \"♦\"}\n\n print(f\"Current datetime: {datetime.now()}\")\n\n for p in patrons:\n attr = p[\"attributes\"]\n if attr[\"patron_status\"] != \"active_patron\":\n continue\n email = attr[\"email\"]\n full_name = attr[\"full_name\"]\n cents = attr[\"lifetime_support_cents\"]\n start = attr[\"pledge_relationship_start\"]\n print(\"{:<25} | {:<6} | {}\".format(full_name, cents, start))\n\n name = nicknames[email] if email in nicknames else full_name.strip()\n\n # Append tier icon\n for tier_cents in sorted(tier_list.keys(), reverse=True):\n if cents / 100 > tier_cents:\n name = f'{tier_list[tier_cents]} {name}'\n break\n\n # Add to year list\n dt = datetime.fromisoformat(start)\n now_dt = datetime.now(dt.tzinfo)\n current_year, current_month, current_day = now_dt.year, now_dt.month, now_dt.day\n for years in (6, 5, 4, 3, 2, 1):\n past_dt = datetime(year=current_year - years, month=current_month, day=current_day, tzinfo=dt.tzinfo)\n if dt < past_dt:\n print(f\"Has pledged for {years} years\")\n years_dict[years].append(name)\n break\n\n def sort_key(name):\n m = re.search(r'. (.*?)', name)\n if m:\n return m.group(1).lower()\n return name.lower()\n\n for sublist in years_dict.values():\n sublist.sort(key=sort_key)\n\n return years_dict\n\n\nif __name__ == \"__main__\":\n data = build_patreon_data()\n pprint(data)\n","sub_path":"src/scripts/build_patreon_data.py","file_name":"build_patreon_data.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"340053333","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport utils\r\n\r\ndef test_run():\r\n start_date = '2010-01-01'\r\n end_date = '2010-12-31'\r\n dates = pd.date_range(start_date, end_date)\r\n #print dates\r\n #print dates[0]\r\n\r\n # empty dataframe\r\n df = pd.DataFrame(index=dates)\r\n #print df\r\n\r\n symbols = ['SPY', 'GLD', 'MSFT', 'GOOG']\r\n for symbol in symbols:\r\n dfSymbol = utils.get_data(symbol)\r\n \r\n # the column that was joined was named 'Adj Close' for every symbol\r\n # we cannot have 2 column with the same name\r\n # since this table is only for adjusted close, we want to rename the data for the symbol using the symbol\r\n dfSymbol = dfSymbol.rename(columns={'Adj Close': symbol})\r\n\r\n # we want inner join to get the intersection of both tables\r\n df = df.join(dfSymbol, how='inner')\r\n \r\n # drop nan values from SPY, to ensure we only handle data when the market (SPY) is open\r\n df = df.dropna(subset=['SPY'])\r\n\r\n #print df\r\n\r\n # slice data to parts that we need\r\n \r\n # row slicing: slice by row range (dates)\r\n #print df.ix['2010-01-01': '2010-01-31'] # the month of january\r\n utils.plot_data(df, '2010-01-01', '2010-01-31', symbols, title='January 2010')\r\n utils.plot_data(normalize_data(df), '2010-01-01', '2010-01-31', symbols, title='normalized January 2010')\r\n\r\n # column slicing: slice by column name (symbol)\r\n #print df[['SPY', 'MSFT']]\r\n utils.plot_data(df, start_date, end_date, ['SPY', 'MSFT'], title='SPY vs MSFT')\r\n utils.plot_data(normalize_data(df), start_date, end_date, ['SPY', 'MSFT'], title='normalized SPY vs MSFT')\r\n\r\n # row x column slicing\r\n #print df.ix['2010-01-01': '2010-01-31', ['SPY', 'MSFT']]\r\n utils.plot_data(df, '2010-01-01', '2010-01-31', ['SPY', 'MSFT'], title='SPY vs MSFT on Jan 2010')\r\n utils.plot_data(normalize_data(df), '2010-01-01', '2010-01-31', ['SPY', 'MSFT'], title='normalized SPY vs MSFT on Jan 2010')\r\n\r\n #normalize stock prices\r\nif __name__ == \"__main__\":\r\n test_run()","sub_path":"sandbox/udacity/1-2.py","file_name":"1-2.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"385910875","text":"import pylab\nclass MortgagePlots(object):\n\tdef plotPayments(self, style):\n\t\tpylab.plot(self.paid[1:],style,label=self.legend)\n\n\tdef plotTotPd(self, style):\n\t\ttotPd = [self.paid[0]]\n\t\tfor i in range(1, len(self.paid)):\n\t\t\ttotPd.append(totPd[-1] + self.paid[i])\n\t\tpylab.plot(totPd, style, label=self.legend)\n\ndef compareMortgages(amt, years, fixedRate, pts, ptsRate, varRate1, varRate2, varMonths):\n\ttotMonths = years * 12\n\tfixed1 = Fixed(amt, fixedRate, totMonths)\n\tfixed2 = FixedWithPts(amt, ptsRate, totMonths, pts)\n\ttwoRate = TwoRate(amt, varRate2, totMonths, varRate1, varMonths)\n\tmorts = [fixed1, fixed2, twoRate]\n\tfor m in range(totMonths):\n\t\tfor mort in morts:\n\t\t\tmort.makePayment()\n\tplotMortgages(morts, amt)\n\ndef plotMortgages(morts, amt):\n\tstyles = ['b-', 'r-.', 'g:']\n\tpayments = 0\n\tcost = 1\n\tpylab.figure(payments)\n\tpylab.title('Monthly Payments of Different $'\\\n\t\t\t\t+ str(amt) + ' Mortgages')\n\tpylab.xlabel('Months')\n\tpylab.ylabel('Monthly Payments')\n\n\tpylab.figure(cost)\n\tpylab.title('cost of Different $' + str(amt)\\\n\t\t\t\t+ ' Mortgages')\n\tpylab.xlabel('Months')\n\tpylab.ylabel('Total Payments')\n\n\tfor i in range(len(morts)):\n\t\tpylab.figure(payments)\n\t\tmorts[i].plotPayments(styles[i])\n\t\tpylab.figure(cost)\n\t\tmorts[i].plotTotPd(styles[i])\n\tpylab.figure(payments)\n\tpylab.legend(loc = 'upper center')\n\tpylab.figure(cost)\n\tpylab.legend(loc = 'best')\n","sub_path":"week1/lecture1/code4.py","file_name":"code4.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"398632059","text":"import pygame\n\n\nclass WeaponAnimation:\n def __init__(self, sprite_sheet, y_start):\n self.step_left = sprite_sheet.subsurface(pygame.Rect(0, y_start, 48, 64))\n self.stand = sprite_sheet.subsurface(pygame.Rect(48, y_start, 48, 64))\n self.step_right = sprite_sheet.subsurface(pygame.Rect(96, y_start, 48, 64))\n\n\nclass BaseWeapon:\n def __init__(self, sprite_sheet, y_sheet_start, explosion_sprite_sheet):\n self.anim_set = WeaponAnimation(sprite_sheet, y_sheet_start)\n\n self.fire_rate_acc = 0.0\n self.fire_rate = 1.0\n self.can_fire = True\n\n self.player_position = [0, 0]\n self.current_aim_vector = [0, 0]\n self.explosion_sprite_sheet = explosion_sprite_sheet\n\n self.barrel_forward_offset = 32\n self.barrel_side_offset = 6\n barrel_x_pos = self.player_position[0] + (self.current_aim_vector[0] * self.barrel_forward_offset) - (\n self.current_aim_vector[1] * self.barrel_side_offset)\n barrel_y_pos = self.player_position[1] + (self.current_aim_vector[1] * self.barrel_forward_offset) + (\n self.current_aim_vector[0] * self.barrel_side_offset)\n self.barrel_exit_pos = [barrel_x_pos, barrel_y_pos]\n\n self.ammo_count = -1\n\n def update(self, time_delta, time_multiplier, player_position, current_aim_vector):\n if self.fire_rate_acc < self.fire_rate:\n self.fire_rate_acc += time_delta * time_multiplier\n else:\n if self.ammo_count != 0:\n self.can_fire = True\n\n self.player_position = player_position\n self.current_aim_vector = current_aim_vector\n\n # calculate the position where the projectiles should leave the weapon\n\n barrel_x_pos = self.player_position[0] + (self.current_aim_vector[0] * self.barrel_forward_offset) - (\n self.current_aim_vector[1] * self.barrel_side_offset)\n barrel_y_pos = self.player_position[1] + (self.current_aim_vector[1] * self.barrel_forward_offset) + (\n self.current_aim_vector[0] * self.barrel_side_offset)\n self.barrel_exit_pos = [barrel_x_pos, barrel_y_pos]\n\n def fire(self, projectiles):\n pass\n","sub_path":"game/base_weapon.py","file_name":"base_weapon.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"360381305","text":"class Node:\n def __init__(self, value, next=None):\n self.value = value\n self.next = next\n\n\nclass Stack:\n def __init__(self, bottom=None, top=None):\n self.bottom = bottom\n self.top = top\n self.__size = 0\n\n def peek(self):\n self.__isEmpty__()\n return self.top.value\n\n def __isEmpty__(self):\n if (self.bottom == None):\n raise Exception('The stack is Empty')\n\n def push(self, item):\n newNode = Node(item)\n\n if(self.bottom == None):\n self.bottom = self.top = newNode\n else:\n self.top.next = newNode\n self.top = newNode\n\n def pop(self):\n self.__isEmpty__()\n\n current = self.bottom\n previous = self.top\n\n while (current.next != self.top):\n current = current.next\n\n self.top = current\n self.top.next = None\n return previous.value\n\n\nll = Stack()\nll.push(1)\nll.push(2)\nll.push(3)\nll.push(4)\nll.push(5)\nprint(ll.peek())\n","sub_path":"Stacks/WorkingWithStacks/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"376383859","text":"import os\nimport sys\nimport glob\nimport pwd\nimport time\nfrom Pegasus.DAX3 import *\nfrom JobMaker import JobMaker\n\ndef visit_list(repo):\n # This is a place-holder for now, to be replaced by DC2 visit list.\n return [1, 2, 3]\n\ndef raft_list(visit):\n # Down-selected list of rafts to analyze.\n return ['2,2']\n\ndef sensor_list(vist, raft):\n # Down-selected list of sensors.\n return ['1,1']\n\ndef tract_list(repo):\n return ['0']\n\ndef patch_list(repo, tract=0):\n # List of patches for the requested repo and tract.\n return ['0,0']\n\ndef filter_list():\n return [filt for filt in 'ugrizy']\n\nUSER = pwd.getpwuid(os.getuid())[0]\n\n# Create a abstract dag\ndax = ADAG(\"Level_2_Pipeline\")\n\n# Add some workflow-level metadata\ndax.metadata(\"creator\", \"%s@%s\" % (USER, os.uname()[1]))\ndax.metadata(\"created\", time.ctime())\n\ninput_repo = '/global/cscratch1/sd/descdm/DC1/DC1-imsim-dithered'\noutput_repo = '.'\nconfig_dir = './configs'\n\njob_maker = JobMaker(dax, output_repo, config_dir, bin_dir='./bin', tc='tc.txt')\n\n# Ingest the raw images.\ningestImages = job_maker.make('ingestImages', repo=input_repo,\n options={'--output': output_repo})\n\n# Ingest the reference catalog.\nref_cat = '/global/homes/d/descdm/dc1/DC1-imsim-dithered/dc1_reference_catalog.txt'\ningestReferenceCatalog = Job('ingestReferenceCatalog')\ningestReferenceCatalog.addArguments(ref_cat, output_repo)\ndax.addJob(ingestReferenceCatalog)\ndax.depends(ingestReferenceCatalog, ingestImages)\njob_maker.add_tc_entry(job_maker, 'ingestReferenceCatalog')\n\nmakeDiscreteSkyMap = job_maker.make('makeDiscreteSkyMap')\n# Loop over visits\nfor visit in visit_list(output_repo):\n # Loop over rafts\n for raft in raft_list(visit):\n dataId = dict(visit=visit, raft=raft)\n processCcd = job_maker.make('processCcd', dataId=dataId)\n dax.depends(processCcd, ingestReferenceCatalog)\n dax.depends(makeDiscreteSkyMap, processCcd)\n\n# Loop over tracts\nfor tract in tract_list(output_repo):\n # Loop over patches.\n for patch in patch_list(output_repo, tract=tract):\n dataId = dict(patch=patch, tract=tract, filter='^'.join(filter_list()))\n mergeDetections = job_maker.make('mergeDetections', dataId=dataId)\n for filt in filter_list():\n dataId = dict(patch=patch, tract=tract, filter=filt)\n options = {'--selectId': 'filter=%s' % filt}\n makeTempExpCoadd = job_maker.make('makeTempExpCoadd', dataId=dataId,\n options=options)\n dax.depends(makeTempExpCoadd, makeDiscreteSkyMap)\n\n assembleCoadd = job_maker.make('assembleCoadd', dataId=dataId,\n options=options)\n dax.depends(assembleCoadd, makeTempExpCoadd)\n\n detectCoaddSources = job_maker.make('detectCoaddSources',\n dataId=dataId)\n dax.depends(detectCoaddSources, assembleCoadd)\n dax.depends(mergeDetections, detectCoaddSources)\n\n # Make a separate loop over filters for measureCoadd job\n # since it will take place after mergeDetections has run on\n # all filters.\n for filt in filter_list():\n dataId = dict(patch=patch, tract=tract, filter=filt)\n measureCoadd = job_maker.make('measureCoadd', dataId=dataId)\n dax.depends(measureCoadd, mergeDetections)\n\n# Forced photometry on data for each visit.\nfor visit in visit_list(output_repo):\n for raft in raft_list(visit):\n for sensor in sensor_list(visit, raft):\n dataId = dict(visit=visit, raft=raft, sensor=sensor)\n forcedPhotCcd = job_maker.make('forcedPhotCcd', dataId=dataId)\n\ndaxfile = 'L2.dax'\nwith open(daxfile, 'w') as f:\n dax.writeXML(f)\n","sub_path":"DM_Level_2/L2_daxgen.py","file_name":"L2_daxgen.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"38784284","text":"from app.engine.sound import SOUNDTHREAD\nfrom app.engine import action, engine\nfrom app.engine.game_state import game\n\nclass DeathManager():\n def __init__(self):\n self.dying_units = {}\n\n def should_die(self, unit):\n unit.is_dying = True\n self.dying_units[unit.nid] = 0\n\n def miracle(self, unit):\n unit.is_dying = False\n if unit.nid in self.dying_units:\n del self.dying_units[unit.nid]\n unit.sprite.flicker.clear()\n unit.sprite.change_state('normal')\n unit.sprite.set_transition('normal')\n\n def force_death(self, unit):\n unit.is_dying = False\n action.do(action.Die(unit))\n if unit.nid in self.dying_units:\n del self.dying_units[unit.nid]\n\n def update(self) -> bool:\n current_time = engine.get_time()\n for unit_nid in list(self.dying_units.keys()):\n death_counter = self.dying_units[unit_nid]\n unit = game.get_unit(unit_nid)\n if death_counter == 0:\n SOUNDTHREAD.play_sfx('Death')\n unit.sprite.start_flicker(0, unit.sprite.default_transition_time, (255, 255, 255), fade_out=False)\n unit.sprite.set_transition('fade_out')\n self.dying_units[unit_nid] = engine.get_time()\n\n elif current_time - death_counter >= unit.sprite.default_transition_time - 50:\n self.force_death(unit)\n\n return not self.dying_units # Done when no dying units left\n\n def is_dying(self, unit):\n return unit.nid in self.dying_units\n","sub_path":"app/engine/death.py","file_name":"death.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"408465711","text":"import random\n\nn = 5 # random.randint(1, 100)\n\nvseplus = True\nvseminus = True\nestnol = False\n\nspisok = list(random.uniform(-100, 100) for i in range(n+1))\n\nfor i in spisok:\n if i < 0:\n vseplus = False\n if i > 0:\n vseminus = False\n if i == 0:\n estnol = True\n\nprint (spisok)\nprint (\"Все числа положительные: \", vseplus)\nprint (\"Все числа отрицательные: \", vseminus)\nprint (\"В списке есть нулевой элемент: \", estnol)\nprint (\"----------------------------------------\")\n\n# True - число положительное, False - отрицательное\nspisok1 = []\nspisok2 = []\nfor i in spisok:\n if i < 0:\n spisok1.append(False)\n spisok2.append(True)\n elif i > 0:\n spisok1.append(True)\n spisok2.append(False)\n\nprint (\"True - число положительное, False - отрицательное\")\nprint (spisok1)\nprint (\"Все числа положительные: \", all(spisok1))\nprint (\"Все числа отрицательные: \", all(spisok2))\nprint (\"В списке есть нулевой элемент: \", estnol)\n","sub_path":"04-chapter/4_2_33_spiski.py","file_name":"4_2_33_spiski.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"103811053","text":"import sys\ndic = {}\ncnt = 0\n\nwhile(True):\n treeName = sys.stdin.readline().rstrip()\n if not treeName:\n break\n if treeName in dic:\n dic[treeName] += 1\n else:\n dic[treeName] = 1\n cnt += 1\nanswer = sorted(dic.items(), key=(lambda x: x[0]))\nfor tree in answer:\n print(f'{tree[0]} {(tree[1]/cnt*100):.4f}')\n","sub_path":"0119/4358_T1061.py","file_name":"4358_T1061.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"358902932","text":"# Copyright (c) 2018 Intel Corp Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\nimport hashlib\nimport os\n\ndef _hash_value_for_file(f, hash_function, block_size=2**20):\n while True:\n data = f.read(block_size)\n if not data:\n break\n hash_function.update(data)\n\n return hash_function.hexdigest()\n\n\ndef cal_file_hash(root, path, algo):\n with open(os.path.join(root, path), 'rb') as fp:\n h = hashlib.new(algo)\n return _hash_value_for_file(fp, h)\n\n","sub_path":"vnfsdk_pkgtools/packager/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"382647158","text":"#!/usr/bin/env python3\nimport boto3\nimport time\nimport sys\n\n#scripts to -connect to ec2 basing on user level aws config in ~\n# - start ec2 instances\n# - block until all instances are RUNNING (for ec2) and then print to stdout\n# - integrable with terminal spawn script\n\nec2Client=boto3.client(\"ec2\")\nINSTANCE_CHECK_POLLING_TIME=3\nEC2_RUNNING_CODE=16\ndef startInstancePortRelay():\n #start port forward relay\n launchTemplate= LaunchTemplate={\n 'LaunchTemplateName': 'port_forward_relay_server',\n 'Version': '6'\n }\n response=ec2Client.run_instances(MaxCount=1,MinCount=1,LaunchTemplate=launchTemplate)\n for instance in response[\"Instances\"]:\t#single line\n return instance[\"InstanceId\"]\t\t\t\t#requested just 1 instance\n\ndef startMaster(userDataScriptPath):\n #start ec2 instance with user data loaded from argument path\n userDataFile=open(userDataScriptPath)\n userData=userDataFile.read()\n userDataFile.close()\n launchTemplate= LaunchTemplate={\n 'LaunchTemplateName': 'EC2-INIT-S3DOWN-SCRIPT',\n #'Version': '1'\n 'Version': '3'\n }\n #response=ec2Client.run_instances(MaxCount=1,MinCount=1,LaunchTemplate=launchTemplate,UserData=userDataScriptPath)\n response=ec2Client.run_instances(MaxCount=1,MinCount=1,LaunchTemplate=launchTemplate,UserData=userData)\n instancesIds=list()\n for instance in response[\"Instances\"]:\n instancesIds.append(instance[\"InstanceId\"])\n print(\"STARTED MASTER INSTANCE\")\n return instancesIds\n\n\n#INSTANCE_TYPE=\"t3.nano\"\nINSTANCE_TYPE=\"\"\ndef startInstances(num):\n #start num ec2 instances\n launchTemplate= LaunchTemplate={\n 'LaunchTemplateName': 'EC2-INIT-S3DOWN-SCRIPT',\n #'Version': '1'\n 'Version': '3'\n }\n if INSTANCE_TYPE!=\"\":\n response=ec2Client.run_instances(MaxCount=num,MinCount=num,InstanceType=INSTANCE_TYPE,LaunchTemplate=launchTemplate)\n else:\n response=ec2Client.run_instances(MaxCount=num,MinCount=num,LaunchTemplate=launchTemplate)\n instancesIds=list()\n for instance in response[\"Instances\"]:\n instancesIds.append(instance[\"InstanceId\"])\n print(\"STARTED \",num,\" INSTANCES\")\n return instancesIds\n\ndef waitForReadyInstance(instanceId):\n #wait for instanceId ready among started ec2 istances and return his public dns name\n while 96>0:\n resp= ec2Client.describe_instances(InstanceIds=[instanceId]) \n instances= resp[\"Reservations\"][0][\"Instances\"]\n for instance in instances:\n if instance[\"InstanceId\"] == instanceId and instance[\"State\"][\"Code\"]==EC2_RUNNING_CODE:\n return instance[\"PublicDnsName\"]\n time.sleep(INSTANCE_CHECK_POLLING_TIME)\n\n\ndef waitForReadyInstances(instancesId):\n #wait for instanceId ready among started ec2 istances and return his public dns name\n nonReadyInstances=instancesId\n readyInstancesHostNames=list()\n while len(readyInstancesHostNames) 50:\r\n u.append(i)\r\n data_surp = data_expl[data_expl['user_id'].isin(u)]\r\n data_surp = data_surp.sort_values('user_id').reset_index(drop=True)\r\n\r\n # Books which have more than 5 number of ratings\r\n y = data_surp.groupby(['ISBN'])\r\n b = []\r\n for i, j in y:\r\n if j.shape[0] > 10:\r\n b.append(i)\r\n data_surp = data_surp[data_surp['ISBN'].isin(b)]\r\n data_surp = data_surp.sort_values('user_id').reset_index(drop=True)\r\n\r\n return data_surp\r\n\r\n\r\ndef explicit_implicit_transform(data):\r\n # returns two dataframes, one with explicit rating and one with implicit rating.\r\n data_expl = data[data.bookRating != 0]\r\n data_impl = data[data.bookRating == 0]\r\n return data_expl, data_impl\r\n\r\n\r\ndef pre_process_merge_pipeline(books, users, ratings):\r\n books = pre_process_books(books)\r\n users = pre_process_users(users)\r\n ratings = pre_process_rating(ratings)\r\n\r\n # Removing duplicates in users\r\n users = users[users.user_id.isin(ratings.user_id)]\r\n\r\n ratings_books = pd.merge(ratings, books, on='ISBN')\r\n\r\n # Replacing not defined publishers and authors.\r\n ratings_books.book_author.fillna('unknown', inplace=True)\r\n ratings_books.publisher.fillna('unknown', inplace=True)\r\n\r\n data = pd.merge(ratings_books, users, on='user_id')\r\n return data\r\n\r\n\r\ndef pre_process_users(users):\r\n # Not much handling for user_id\r\n users.user_id = users.user_id.astype(int)\r\n\r\n # Too many na values for age, so created a normal distribution for na values.\r\n users.age = users.age.astype(float)\r\n users.loc[(users.age > 99) | (users.age < 5), 'age'] = np.nan\r\n # create a normal disgtribution pd.Series to fill Nan values with because you cannot just replace it with mean\r\n # as there are a large no. of nan values.\r\n rand_dist = pd.Series(np.random.normal(loc=users.age.mean(),\r\n scale=users.age.std(),\r\n size=users.user_id[users.age.isna()].count()))\r\n # Eliminating the negative values in the random distribution\r\n age_series = np.abs(rand_dist)\r\n # sorting users Df so as NaN values in age to be first\r\n # reset index to match with index of age_series\r\n # Then use fillna()\r\n users = users.sort_values('age', na_position='first').reset_index(drop=True)\r\n users.age.fillna(age_series, inplace=True)\r\n # replace values < 5 with the mean(). Round values and convert them to int.\r\n users.loc[users.age < 5, 'age'] = users.age.mean()\r\n users.age = users.age.round().astype(int)\r\n # Sort users based on user_id so as to be the same as before\r\n users = users.sort_values('user_id').reset_index(drop=True)\r\n\r\n # Dropping users location because\r\n users.drop('location', axis=1, inplace=True)\r\n\r\n return users\r\n\r\n\r\ndef pre_process_books(books):\r\n # Dropping unneccesary\r\n books.drop(['img_s', 'img_m', 'img_l'], axis=1, inplace=True)\r\n books.year_of_publication = books.year_of_publication.astype(int)\r\n\r\n # Replacing na values\r\n books.loc[187701, 'book_author'] = \"n/a\"\r\n books.loc[[128897, 129044], 'publisher'] = \"NovelBooks, Inc\"\r\n books.loc[(books.year_of_publication > 2010) | (books.year_of_publication < 1500), 'year_of_publication'] = np.nan\r\n books.year_of_publication.fillna(round(books.year_of_publication.mean()), inplace=True)\r\n\r\n # Changing dtype to save memory\r\n books.year_of_publication = books.year_of_publication.astype(int)\r\n\r\n ## REMOVING DUPLICATE VALUES\r\n books = books.drop_duplicates(['book_title', 'book_author'])\r\n\r\n return books\r\n\r\n\r\ndef pre_process_rating(ratings):\r\n ratings = ratings[ratings.ISBN.isin(books.ISBN)]\r\n\r\n return ratings\r\n\r\n\r\ndata_surp, data = get_data_surp_scratch(books, users, ratings)\r\n\r\n\r\n# Conversion and History Functions\r\ndef get_book_title(ISBN):\r\n return ((data_surp.loc[(data_surp.ISBN == str(ISBN)), 'book_title']).reset_index(drop=True).iloc[0])\r\n\r\n\r\ndef get_book_id(book_title):\r\n return data_surp.loc[(data_surp.book_title == str(book_title)), 'ISBN'].reset_index(drop=True).iloc[0]\r\n\r\n\r\ndef get_rated_books_list(user_id):\r\n book_list = []\r\n x = (data_surp.loc[(data_surp.user_id == int(user_id)), 'ISBN'].tolist())\r\n for i in range(0, len(x)):\r\n book_list.append(get_book_title(x[i]))\r\n return book_list\r\n\r\n\r\napp = Flask(__name__, template_folder='template')\r\nuser_cf = load('usercf.pkl')\r\nitem_cf = load('itemcf.pkl')\r\nusers_list = list(data_surp.user_id.unique())\r\nbooks_list = list(data_surp.ISBN.unique())\r\n\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('home.html')\r\n\r\n\r\n@app.route('/user_based')\r\ndef user_based():\r\n return render_template('usercf.html')\r\n\r\n\r\n@app.route('/item_based')\r\ndef item_based():\r\n return render_template('itemcf.html')\r\n\r\n\r\n@app.route('/usercf', methods=['POST'])\r\ndef usercf():\r\n values = [int(x) for x in request.form.values()]\r\n n = values[0]\r\n user_id = values[1]\r\n\r\n if user_id in users_list:\r\n # First map the predictions to each user.\r\n top_n = defaultdict(list)\r\n for uid, iid, true_r, est, _ in user_cf:\r\n top_n[uid].append((iid, est))\r\n\r\n # Then sort the predictions for each user\r\n for uid, user_ratings in top_n.items():\r\n user_ratings.sort(key=lambda x: x[1], reverse=True)\r\n\r\n all_pred = top_n\r\n\r\n for uid, user_ratings in all_pred.items():\r\n user_ratings.sort(key=lambda x: x[1], reverse=True)\r\n all_pred[uid] = user_ratings[:n]\r\n\r\n tmp = pd.DataFrame.from_dict(all_pred)\r\n tmp_transpose = tmp.transpose()\r\n\r\n results = tmp_transpose.loc[user_id]\r\n recommended_book_ids = []\r\n recommended_book_titles = []\r\n\r\n for x in range(0, n):\r\n recommended_book_ids.append(results[x][0])\r\n\r\n for i in range(0, len(recommended_book_ids)):\r\n recommended_book_titles.append(get_book_title(recommended_book_ids[i]))\r\n\r\n\r\n else:\r\n ratings_count = pd.DataFrame(data_surp.groupby(['ISBN'])['bookRating'].sum())\r\n topn = ratings_count.sort_values('bookRating', ascending=False).head(n)\r\n print(\"Following books are recommended:\")\r\n topn = topn.merge(data_surp, left_index=True, right_on='ISBN')\r\n recommended_book_titles = list(topn.book_title.unique())\r\n\r\n return (render_template('usercf.html', recommendations='Recommended books are: {}'.format(recommended_book_titles),\r\n history='History of user: {}'.format(get_rated_books_list(user_id))))\r\n\r\n\r\n@app.route('/itemcf', methods=['POST'])\r\ndef itemcf():\r\n values = [x for x in request.form.values()]\r\n n = int(values[0])\r\n book_title = values[1]\r\n\r\n # Retrieve inner id of the book\r\n book_raw_id = get_book_id(book_title)\r\n\r\n if book_raw_id in books_list:\r\n book_inner_id = item_cf.trainset.to_inner_iid(book_raw_id)\r\n # Retrieve inner ids of the nearest neighbors of book.\r\n book_neighbors = item_cf.get_neighbors(book_inner_id, k=n)\r\n\r\n # Convert inner ids of the neighbors into names.\r\n book_neighbors = (item_cf.trainset.to_raw_iid(inner_id)\r\n for inner_id in book_neighbors)\r\n book_neighbors = (get_book_title(rid)\r\n for rid in book_neighbors)\r\n\r\n books_rec = []\r\n for book_title in book_neighbors:\r\n books_rec.append(book_title)\r\n\r\n # else:\r\n # books_rec='None'\r\n\r\n return (render_template('itemcf.html', recommendations=books_rec))\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"431382632","text":"#_*_coding:utf-8_*_\n# arduinoからのデータを受け取り,処理する\nimport serial\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# Variables\nser = serial.Serial('/dev/cu.usbmodem1421', 9600) # portとbaudを指定\nt = np.zeros(1024) # 全ての要素が0の配列を生成(ここでは1024個分), 横軸の値\ny = np.zeros(1024) # 縦軸の値\n\n# plotの準備\nplt.ion() # インタラクティブモードをオンにする\nplt.figure() # 何も描画されていないウィンドウを描画\nli, = plt.plot(t, y) # t,yを描画\nplt.ylim(0, 5) # y軸の範囲(limit)の設定\nplt.xlabel(\"time[s]\") # x軸の名称\nplt.ylabel(\"Voltage[V]\") # y軸の名称\nser.write(\"*\".encode()) #おまじない\ndata = ser.readline().strip().rsplit()\n# .readline() -> ファイルを全て読み込み1行毎に処理を行う\n# .strip -> 指定した文字の削除(空なら空白)\n# .rsplit() -> 指定した文字を右から分割(空なら空白)\ntInt = float(data[0]) # 実数に変換\n\nwhile True:\n #try:\n ser.write(\"*\".encode())\n data = ser.readline().strip().rsplit()\n # 配列をキューと見たてて要素を追加・削除\n t = np.append(t, (float(data[0])-tInt)/10**6) # 新たな配列を追加して多次元配列を作る\n t = np.delete(t, 0) # 配列の特定の要素を削除(配列名, インデックス, 行列)\n y = np.append(y, float(data[0])*5/1023)\n y = np.delete(y, 0)\n li.set_xdata(t)\n li.set_ydata(y) \n plt.xlim(min(t), max(t))\n plt.draw()\n\"\"\"\n except:\n None\n break\n ser.close()\n\"\"\"\n","sub_path":"python/matplotlib/arduino_plot.py","file_name":"arduino_plot.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"85106440","text":"import openpyxl\nimport tkinter as tk\nfrom tkinter import ttk\nfrom selenium import webdriver\n\n# -----------------------------\n# Excel Method\n# -----------------------------\n\n\ndef update_excel(number_followers, identifier):\n if identifier == 'Twitch':\n col = 2\n else:\n col = 3\n\n excel = openpyxl.load_workbook('Liquid_Twitch_Stats.xlsx')\n print('Opening Workbook')\n\n # Get the sheet to work with\n sheet = excel['Sheet1']\n\n # loop over the rows in the sheet\n for rowNum in range(2, sheet.max_row + 2):\n print(rowNum)\n player_name = sheet.cell(rowNum, column=col).value\n print(player_name)\n if player_name == username:\n sheet.cell(rowNum, column=col).value = number_followers\n break\n elif player_name is None:\n sheet.cell(rowNum, column=1).value = username\n sheet.cell(rowNum, column=col).value = number_followers\n break\n\n excel.save('Liquid_Twitch_Stats.xlsx')\n\n# -----------------------------\n# Get Number of Followers\n# -----------------------------\n\n\ndef get_twitch():\n identifier = 'Twitch'\n browser = webdriver.Chrome()\n browser.get(\"https://www.twitch.tv/\" + username + '/followers')\n browser.maximize_window()\n browser.implicitly_wait(5)\n\n followers = browser.find_element_by_xpath(\n '//*[@id=\"root\"]/div/div[2]/div/main/div[1]/div/div[1]/div[1]/a[5]/div/div/span')\n number_followers = followers.text\n print(followers.text)\n browser.close()\n update_excel(number_followers, identifier)\n\n\ndef get_twitter():\n identifier = 'Twitter'\n browser = webdriver.Chrome()\n browser.get(\"https://www.twitter.com/\" + username)\n browser.maximize_window()\n browser.implicitly_wait(2)\n\n followers = browser.find_element_by_css_selector('#page-container > div.ProfileCanopy.ProfileCanopy--withNav.js-variableHeightTopBar '\n '> div > div.ProfileCanopy-navBar.u-boxShadow > div > div > '\n 'div.Grid-cell.u-size2of3.u-lg-size3of4 > div > div > ul > li.ProfileNav-item.ProfileNav-item--followers '\n '> a > span.ProfileNav-value')\n number_followers = followers.text\n print(followers.text)\n browser.close()\n update_excel(number_followers, identifier)\n# -----------------------------\n# GUI\n# -----------------------------\n\n# Method to get site/name\ndef callback():\n global chosen_site\n global username\n username = user_entry.get()\n chosen_site = site.get()\n print(chosen_site + \" \" + username)\n\n if chosen_site == 'Twitch':\n get_twitch()\n elif chosen_site == 'Twitter':\n get_twitter()\n\n\nroot = tk.Tk()\nroot.title(\"Follower Scraper\")\nroot.geometry('200x200')\n\n# Frame to work on\nframe = ttk.Frame(root)\nframe.pack()\n\n# Main Label\nheader_label = ttk.Label(frame, text=\"Follower Scraper\", font=('Arial', 14))\nheader_label.grid(row=0, column=0)\n\n# Site Label\nsite_label = ttk.Label(frame, text='Choose the site to search', font=('Arial', 10))\nsite_label.grid(row=1, column=0)\n\n# Drop Down Menu\nsites = ('Twitter', 'Twitch')\nsite = ttk.Combobox(frame, values=sites)\nsite.grid(row=2, column=0)\n\n# Username Label\nusername_label = ttk.Label(frame, text='Enter username')\nusername_label.grid(row=3, column=0)\n\n# Entry Widget\nuser_entry = ttk.Entry(frame)\nuser_entry.grid(row=4, column=0)\n\n# Enter Button\nenter_button = ttk.Button(frame, text='Search', command=callback)\nenter_button.grid(row=5, column=0)\n\nroot.mainloop()\n","sub_path":"FollowingScrapper.py","file_name":"FollowingScrapper.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"590047718","text":"# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\n\nimport shutil\nimport datetime\nimport re\nimport inspect\nimport os\nimport sys\nimport polaroid\nimport semantic_version\n\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\ndocssrc_dir = os.path.abspath(os.path.join(__file__, \"..\"))\nproject_dir = os.path.dirname(docssrc_dir)\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.abspath('extensions'))\n# -- Project information -----------------------------------------------------\n\nproject = 'polaroid'\ncopyright = '2021, Daggy1234'\nauthor = 'Daggy1234'\nyear = datetime.date.today().year\n\n# The full version, including alpha/beta/rc tags\nwith open('../polaroid/__init__.py') as f:\n version = re.search(r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]', f.read(),\n re.MULTILINE).group(1)\nrelease = version\n\n# -- Setup -------------------------------------------------------------------\n\n\ndef setup(app):\n # Add custom signature inspector support *argument-clinic* signatures.\n def inspector(app, what, name, obj, options, signature, return_annotation):\n if signature is not None:\n return signature, return_annotation\n try:\n sig = inspect.signature(obj)\n return str(sig), return_annotation\n except (ValueError, TypeError):\n return None, return_annotation\n\n app.connect('autodoc-process-signature', inspector)\n\n# -- General configuration ---------------------------------------------------\n\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.ifconfig\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.githubpages\",\n \"nbsphinx\"\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\nsource_suffix = [\".rst\", \".md\"]\n\nmaster_doc = \"index\"\n\npygments_style = \"emacs\"\n\ndefault_role = \"py:obj\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_book_theme'\nhtml_title = \"polaroid\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for imgmath extension -------------------------------------------\n\nimgmath_image_format = \"svg\"\n\n# -- Options for napoleon extension ------------------------------------------\n\nnapoleon_include_init_with_doc = True\nnapoleon_include_special_with_doc = True\nnapoleon_include_private_with_doc = True\nnapoleon_use_admonition_for_examples = True\nnapoleon_use_admonition_for_notes = True\nnapoleon_use_admonition_for_references = True\nnapoleon_use_rtype = False\n\n# -- Options for autodoc extension -------------------------------------------\n\nautoclass_content = \"class\"\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\nhtml_theme_options = {\n \"toc_title\": f\"Polaroid {version}\",\n \"repository_url\": \"https://github.com/daggy1234/polaroid\",\n \"use_issues_button\": True,\n}\n","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":4672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"17752413","text":"import cv2\nimport numpy as np\nimport copy\n\ndef blend(x,y):\n\timg = copy.copy(x)\n\th = img.shape[0]\n\tw = img.shape[1]\n\tfor j in range(w):\n\t\tfor i in range(h):\n\t\t\tfor c in range(3):\n\t\t\t\timg[i,j][c] = (x[i,j][c]*0.1)+(y[i,j][c]*0.9)\n\treturn img\n\n\nx = cv2.imread('bob.jpg',1)\ny = cv2.imread('bob180.jpg',1)\n\nblend = blend(x,y)\n\ncv2.imshow('Blending', blend)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"old/blending.py","file_name":"blending.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"361709942","text":"import requests\r\n# 웹에서 url을 통해 html을 불러옴\r\nres=requests.get(\"http://google.com\")\r\nprint(\"응답코드: \",res.status_code) #그 서버의 응압상태? 200은 정상 403 접근 권한 무\r\n\r\n# 응답코드에 따라 정상이 아니면 에러를 띄우도록 if문\r\n# if res.status_code==requests.codes.ok\r\n# 그대신 특정함수 사용 정상이 아니면 에러가 나옴\r\nres.raise_for_status\r\n\r\n# 구글 html을 불러와 파일로 만들기\r\n\r\nwith open(\"mygoogle.html\",\"w\",encoding=\"utf8\") as f:\r\n f.write(res.text)","sub_path":"webscraping_basic/3_requests.py","file_name":"3_requests.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"193163433","text":"# Copyright 2017 Bracket Computing, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# https://github.com/brkt/brkt-cli/blob/master/LICENSE\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and\n# limitations under the License.\nimport argparse\n\nfrom brkt_cli import crypto\nfrom brkt_cli.validation import ValidationError\n\n\ndef add_out(parser):\n \"\"\" Add the --out argument, for writing command output to a file instead\n of stdout.\n \"\"\"\n parser.add_argument(\n '--out',\n metavar='PATH',\n help=(\n 'Write to a file instead of stdout. This can be used to avoid '\n 'character encoding issues when redirecting output on Windows.'\n )\n )\n\n\ndef add_brkt_tag(parser):\n parser.add_argument(\n '--brkt-tag',\n metavar='NAME=VALUE',\n dest='brkt_tags',\n help=(\n 'Bracket tag which will be embedded in the JWT as a claim. All '\n 'characters must be alphanumeric or [-_.]. The tag name cannot '\n 'be a JWT registered claim name (see RFC 7519).'),\n action='append'\n )\n\n\ndef add_exp(parser):\n parser.add_argument(\n '--exp',\n metavar='DURATION',\n help='Token expiry time duration in the format N[dhms] (e.g. 12h)'\n )\n\n\ndef add_root_url(parser, cli_config):\n \"\"\" Add the --root-url argument, for specifying the Yeti public API\n endpoint. \"\"\"\n _, env = cli_config.get_current_env()\n default_url = 'https://%s:%d' % (\n env.public_api_host, env.public_api_port)\n parser.add_argument(\n '--root-url',\n metavar='URL',\n default=default_url,\n help='Bracket service root URL'\n )\n\n\ndef _validate_cert_path(path):\n try:\n crypto.validate_cert_path(path)\n except ValidationError as e:\n raise argparse.ArgumentTypeError(e.message)\n return path\n\n\ndef add_public_api_ca_cert(parser, cli_config=None):\n default_path = None\n if cli_config:\n _, env = cli_config.get_current_env()\n default_path = env.public_api_ca_cert_path\n\n parser.add_argument(\n '--public-api-ca-cert',\n metavar='PATH',\n default=default_path,\n type=_validate_cert_path,\n help=(\n 'Root X.509 CA certificate for a Customer Managed MCP in PEM '\n 'format.'\n )\n )\n","sub_path":"brkt_cli/argutil.py","file_name":"argutil.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"90081260","text":"#!/usr/bin/env python3\nimport neovim\nimport time\nimport logging\n\nimport inotify.adapters\n\n# mode => truncate\nch = logging.FileHandler(\"/home/teto/autoread.log\", mode=\"w+\")\n\n# \nlog = logging.getLogger('PyInotify')\nlog.setLevel(logging.DEBUG)\nlog.addHandler(ch)\n\nlog = logging.getLogger('autoread')\nlog.setLevel(logging.DEBUG)\n\n\n# formatter = logging.Formatter()\n# ch.setFormatter(formatter)\n\nlog.addHandler(ch)\n\n@neovim.plugin\nclass Limit(object):\n def __init__(self, nvim):\n self.nvim = nvim\n self.calls = 0\n self.i = inotify.adapters.Inotify()\n\n# BufDelete BufUnload ?\n @neovim.autocmd('BufAdd', pattern='*', eval='expand(\":p\")', sync=False)\n def add_buf(self, filename, ):\n log.debug(\"Adding to list %s\" % filename)\n # print(eval)\n try:\n # here we pass on a mask inotify.constants.\n # see https://github.com/dsoprea/PyInotify/blob/master/inotify/constants.py#L6\n # or man inotify\n mask = inotify.constants.IN_DELETE | inotify.constants.IN_DELETE_SELF\n self.i.add_watch(filename.encode(), mask=mask)\n except Exception as e:\n log.error(\"Error during add for buffer %s : %s\" % (filename, e))\n\n @neovim.autocmd('BufDelete', pattern='*', eval='expand(\":p\")', sync=False)\n def remove_buf(self, filename):\n log.debug(\"removing from watching list %s\" % filename)\n # print(eval)\n try:\n self.i.remove_watch(filename.encode())\n except Exception as e:\n log.error(\"Error for buffer %s : %s\" % (filename, e))\n\n # @neovim.autocmd('BufAdd', pattern='*.py', eval='expand(\"\")', sync=False)\n # def update_list(self, args):\n # log.debug(\"Adding to list\")\n\n # @neovim.function('AutoreadLoop', sync=True)\n @neovim.command('Toto', sync=False)\n #, range='', nargs='*', \n def command_handler(self): #, args, range):\n log.debug(\"Started loop\")\n # self.vim.command(\"checktime\")\n\n # time.sleep(2);\n log.debug(\"One loop\")\n\n try:\n for event in self.i.event_gen():\n if event is not None:\n (header, type_names, watch_path, filename) = event\n log.info(\"WD=(%d) MASK=(%d) COOKIE=(%d) LEN=(%d) MASK->NAMES=%s \"\n \"WATCH-PATH=[%s] FILENAME=[%s]\",\n header.wd, header.mask, header.cookie, header.len, type_names,\n watch_path.decode('utf-8'), filename.decode('utf-8'))\n self.nvim.command(\"checktime\")\n self.nvim.command(\"echom hello\")\n finally:\n i.remove_watch(b'/tmp')\n","sub_path":"rplugin/python3/autoreload.py","file_name":"autoreload.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"197632386","text":"dealer_won_last = False\nplayer_won_last = False\nweird = 0\nplayer_money = 500\nbet = 25\nlast_bet = 25\nplayer_won = 0\ndealer_won = 0\nplayer_bankrupt = 0\nfv = open(\"wins.txt\", \"r\")\n\nline = fv.readline()\nwhile line != \"\":\n\tif bet > last_bet:\n\t\tlast_bet = bet\n\tif dealer_won_last:\n\t\tbet = bet*2\n\t\tif bet > player_money:\n\t\t\tbet = player_money\n\t\t\tlast_bet = bet\n\t\tdealer_won_last = False\n\tif player_won_last:\n\t\tbet = 25\n\t\tlast_bet = bet\n\t\tplayer_won_last = False\n\tif player_money < 0:\n\t\tplayer_bankrupt += 1\n\t\tplayer_money = 500\n\tline = line.split(\"-\")\n\tplayer_sum = line[1].split(\":\")\n\tplayer_sum = int(player_sum[1])\n\tdealer_sum = line[3].split(\":\")\n\tdealer_sum = int(dealer_sum[1])\n\tif player_sum == dealer_sum and dealer_sum <= 21 or player_sum > 21 and dealer_sum <= 21:\n\t\tdealer_won += 1\n\t\tplayer_money -= bet\n\t\tdealer_won_last = True\n\telif player_sum <= 21:\n\t\tplayer_won += 1\n\t\tplayer_money += bet\n\t\tplayer_won_last = True\n\telse:\n\t\tweird += 1\n\t\tprint(line)\n\tline = fv.readline()\nfv.close()\n\nplayer_money = player_money - (player_bankrupt*500)\n\nprint(\"Player Ended with {}\".format(player_money))\nprint(\"Player Bankrupt {} times\".format(player_bankrupt))\nif weird > 0:\n\t(\"Everyone Lost? {}\".format(weird))\nprint(\"Player Won {}\".format(player_won))\nprint(\"Dealer Won {}\".format(dealer_won))","sub_path":"Blackjack/AI_Battle_Analyzer.py","file_name":"AI_Battle_Analyzer.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"366606253","text":"import tensorflow as tf\r\nfrom tensorflow.examples.tutorials.mnist.input_data import read_data_sets\r\nimport p74_framework as myf\r\nimport numpy as np\r\nimport cv2\r\n\r\n\r\nclass MyConfig(myf.Config):\r\n def __init__(self):\r\n super(MyConfig, self).__init__()\r\n self.base_filter = 16\r\n self.sample_path = '../samples/MNIST_data'\r\n self.imgs_path = './imgs/{name}/test.jpg'.format(name=self.get_name())\r\n self.lr = 1e-6\r\n self.batch_size = 200\r\n self._ds = None\r\n self.vec_size = 4\r\n self.keep_prob = 0.65\r\n\r\n @property\r\n def ds(self):\r\n if self._ds is None:\r\n self._ds = MyDS(read_data_sets(self.sample_path).train, self.vec_size)\r\n return self._ds\r\n\r\n def get_sub_tensors(self, gpu_index):\r\n return MySubTensor(self)\r\n\r\n def get_ds_train(self):\r\n return self.ds\r\n\r\n def get_ds_test(self):\r\n return self.ds\r\n\r\n def get_name(self):\r\n return \"p78\"\r\n\r\n def get_tensors(self):\r\n return MyTensors(self)\r\n\r\n def get_app(self):\r\n return MyApp(self)\r\n\r\n def random_seed(self):\r\n np.random.seed(2389123)\r\n\r\n\r\nclass MySubTensor:\r\n def __init__(self, cfg: MyConfig):\r\n self.cfg = cfg\r\n x = tf.placeholder(tf.float64, [None, 784], \"x\")\r\n v = tf.placeholder(tf.float64, [None, self.cfg.vec_size], \"y\")\r\n self.inputs = [x, v]\r\n\r\n with tf.variable_scope(\"gene\"):\r\n x2 = self.gene(v) # [-1, 28, 28, 1]\r\n self.v = v\r\n self.x2 = tf.reshape(x2, [-1, 28, 28])\r\n\r\n with tf.variable_scope(\"disc\") as scope:\r\n x2_y = self.disc(x2) # 假样本为真的概率\r\n scope.reuse_variables()\r\n x = tf.reshape(x, [-1, 28, 28, 1])\r\n x_y = self.disc(x) # 真样本为真的概率\r\n\r\n loss1 = -tf.reduce_mean(tf.log(x_y)) # 训练disc 标签 1\r\n loss2 = -tf.reduce_mean(tf.log(1 - x2_y)) # 训练disc 假样本为假的概率 标签 0\r\n loss3 = -tf.reduce_mean(tf.log(x2_y)) # 训练gene 标签 1\r\n self.losses = [loss1, loss2, loss3]\r\n\r\n def disc(self, x):\r\n # [-1, 28, 28, 1]\r\n filter = self.cfg.base_filter # 16\r\n\r\n x = tf.layers.conv2d(x, filter, 3, 1, \"same\", activation=tf.nn.relu, name=\"conv1\") # [-1, 28, 28, 16]\r\n\r\n for i in range(2):\r\n filter *= 2\r\n x = tf.layers.conv2d(x, filter, 3, 2, \"same\", activation=tf.nn.relu, name=\"conv2%d\" % i) # [-1, 14, 14,\r\n # 32] [-1, 7, 7, 64]\r\n\r\n x = tf.layers.flatten(x) # [-1, 7*7*64]\r\n x = tf.nn.dropout(x, self.cfg.keep_prob)\r\n x = tf.layers.dense(x, 1, name=\"dense\") # [-1, 1]\r\n return tf.nn.sigmoid(x)\r\n\r\n def gene(self, v):\r\n # v shape [-1, 4]\r\n filters = self.cfg.base_filter * 4 # 64\r\n v = tf.layers.dense(v, 7 * 7 * self.cfg.base_filter, name=\"dense\", activation=tf.nn.relu)\r\n v = tf.reshape(v, [-1, 7, 7, filters])\r\n for i in range(2):\r\n filters //= 2\r\n v = tf.layers.conv2d_transpose(v, filters, 3, 2, \"same\", activation=tf.nn.relu, name=\"deconv_%d\" % i)\r\n # v [-1, 28, 28, filters]\r\n v = tf.layers.conv2d_transpose(v, 1, 3, 1, \"same\", name='deconv2')\r\n return v\r\n\r\n\r\nclass MyTensors(myf.Tensors):\r\n def compute_grads(self, opt):\r\n vars = tf.trainable_variables()\r\n var_disc = [var for var in vars if \"disc\" in var.name]\r\n var_gene = [var for var in vars if \"gene\" in var.name]\r\n vars = [var_disc, var_disc, var_gene]\r\n grads = [[opt.compute_gradients(loss, vs) for vs, loss in zip(vars, ts.losses)] for ts in\r\n self.sub_ts] # [gpus, losses]\r\n return [self.get_grads_mean(grads, i) for i in range(len(grads[0]))]\r\n\r\n\r\nclass MyDS:\r\n def __init__(self, ds, vec_size):\r\n self.vec_size = vec_size\r\n self.ds = ds\r\n self.num_examples = ds.num_examples\r\n\r\n\r\n def next_batch(self, batch_size):\r\n xs, _ = self.ds.next_batch(batch_size)\r\n vs = np.random.normal(size=[batch_size, self.vec_size])\r\n return xs, vs\r\n\r\n\r\nclass MyApp(myf.App):\r\n def before_epoch(self, epoch):\r\n self.config.random_seed()\r\n\r\n def test(self, ds):\r\n vs = np.random.normal(size=[200, self.config.vec_size])\r\n ts = self.ts.sub_ts[-1]\r\n imgs = self.session.run(ts.x2, {ts.v: vs}) # [-1, 28, 28]\r\n imgs = np.reshape(imgs, [-1, 10, 28, 28])\r\n imgs = np.transpose(imgs, [0, 2, 1, 3])\r\n imgs = np.reshape(imgs, [-1, 10 * 28])\r\n c = cv2.imwrite(self.config.imgs_path, imgs)\r\n\r\n print(\"The photo has been saved....\", c)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n MyConfig().from_cmd()\r\n","sub_path":"p78_GAN_mnist.py","file_name":"p78_GAN_mnist.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"369437267","text":"\"\"\" --- Helper Functions (Elicit intent, elicit slot etc.) --- \"\"\"\n\ndef elicit_intent(session_attributes, message):\n return {\n \"sessionAttributes\": session_attributes,\n \"dialogAction\": {\n \"type\": \"ElicitIntent\",\n \"message\": {\n \"contentType\": \"PlainText\",\n \"content\": message\n }\n },\n }\n\ndef elicit_slot(\n session_attributes, intent_name, slots, slot_to_elicit, message, response_card=None\n):\n if response_card:\n return {\n \"sessionAttributes\": session_attributes,\n \"dialogAction\": {\n \"type\": \"ElicitSlot\",\n \"intentName\": intent_name,\n \"slots\": slots,\n \"slotToElicit\": slot_to_elicit,\n \"message\": message,\n \"responseCard\": response_card,\n },\n }\n else:\n return {\n \"sessionAttributes\": session_attributes,\n \"dialogAction\": {\n \"type\": \"ElicitSlot\",\n \"intentName\": intent_name,\n \"slots\": slots,\n \"slotToElicit\": slot_to_elicit,\n \"message\": message,\n },\n }\n \n\ndef delegate_slot(slots, sessionAttributes, completed):\n output_session_attributes = sessionAttributes\n output_session_attributes[\"Completed\"] = completed\n \n response = {\n \"sessionAttributes\": output_session_attributes,\n \"dialogAction\": {\n \"type\": \"Delegate\",\n \"slots\": slots\n }\n }\n return response\n\n\ndef confirm_intent(session_attributes, intent_name, slots, message, confirmation):\n if(confirmation == \"None\"):\n return {\n \"sessionAttributes\": session_attributes,\n \"dialogAction\": {\n \"type\": \"ConfirmIntent\",\n \"intentName\": intent_name,\n \"slots\": slots,\n \"message\": message\n }\n \n }\n \n elif((confirmation == \"Denied\")):\n return {\n \"sessionAttributes\": session_attributes,\n \"dialogAction\": {\n \"type\": \"Close\",\n \"fulfillmentState\": \"Failed\",\n \"message\": {\n \"contentType\": \"PlainText\",\n \"content\": \"Okay, your request will not proceed.\"\n }\n }\n \n }\n \n else:\n return delegate_slot(slots, session_attributes, \"confirmed\")\n \n \ndef close(session_attributes, fulfillment_state, message, response_card=None):\n if response_card:\n return {\n \"sessionAttributes\": session_attributes,\n \"dialogAction\": {\n \"type\": \"Close\",\n \"fulfillmentState\": fulfillment_state,\n \"message\": message,\n \"responseCard\": response_card,\n },\n }\n else:\n return {\n \"sessionAttributes\": session_attributes,\n \"dialogAction\": {\n \"type\": \"Close\",\n \"fulfillmentState\": fulfillment_state,\n \"message\": message,\n },\n }\n\n\ndef delegate(session_attributes, slots):\n return {\n \"sessionAttributes\": session_attributes,\n \"dialogAction\": {\"type\": \"Delegate\", \"slots\": slots},\n }\n\ndef build_validation_result(is_valid, outputDialogMode, violated_slot, message_content):\n if message_content is None:\n return {\"isValid\": is_valid, \"violatedSlot\": violated_slot}\n \n else:\n if outputDialogMode==\"VOICE\":\n return {\n \"isValid\": is_valid,\n \"violatedSlot\": violated_slot,\n \"message\": {\"contentType\": \"SSML\", \"content\": message_content},\n }\n else:\n return {\n \"isValid\": is_valid,\n \"violatedSlot\": violated_slot,\n \"message\": {\"contentType\": \"PlainText\", \"content\": message_content},\n }\n\ndef build_response_card(title, subtitle, options):\n buttons = None\n\n if options is not None:\n buttons = []\n for i in range(min(5, len(options))):\n buttons.append(options[i])\n\n return {\n \"contentType\": \"application/vnd.amazonaws.card.generic\",\n \"version\": 1,\n \"genericAttachments\": [\n {\"title\": title, \"subTitle\": subtitle, \"buttons\": buttons}\n ],\n }\n \n\ndef try_ex(func):\n try:\n return func()\n except KeyError:\n return None\n","sub_path":"src/triggerSurveyFunction/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"197396836","text":"\n# coding: utf-8\n\n\n\n\nfrom pandas import Series\nfrom matplotlib import pyplot\nfrom statsmodels.tsa.ar_model import AR\nfrom sklearn.metrics import mean_squared_error\nimport pandas\n\n\n\n\n#Loading dataset\ndataframe = pandas.read_csv('Gold.csv',usecols=['price'], header=0)\ndataframe=dataframe.dropna(axis=0, how='all')\ndataframe=dataframe.fillna(method='bfill')\nX = dataframe.values\n\n\n#split dataset\ntrain, test = X[1:len(X)-100], X[len(X)-100:]\n\n\n# train autoregression\nmodel = AR(train)\nmodel_fit = model.fit()\nwindow = model_fit.k_ar\ncoef = model_fit.params\n\n# walk forward over time steps in test\nhistory = train[len(train)-window:]\nhistory = [history[i] for i in range(len(history))]\n\n\n#making Predictions\nimport math\npredictions = list()\nfor t in range(len(test)):\n\tlength = len(history)\n\tlag = [history[i] for i in range(length-window,length)]\n\tyhat = coef[0]\n\tfor d in range(window):\n\t\tyhat += coef[d+1] * lag[window-d-1]\n\tobs = test[t]\n\tpredictions.append(yhat)\n\thistory.append(obs)\n\tprint('predicted=%f, expected=%f' % (yhat, obs))\nerror = math.sqrt(mean_squared_error(test, predictions))\nprint('Test MSE: %.3f' % error)\nfrom sklearn.metrics import r2_score\nprint(r2_score(test, predictions))\n\n\n# plotting the results\npyplot.plot(test)\npyplot.plot(predictions, color='green',label='Silver')\npyplot.show()\n\n\n\n\n\n\n\n\n","sub_path":"vector_auto_regression.py","file_name":"vector_auto_regression.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"556875278","text":"from __future__ import unicode_literals, print_function, absolute_import\nfrom builtins import str\nimport requests\n\nbare_url = \"http://api.crossref.org/\"\n\n\ndef get_bib(doi):\n url = \"{}works/{}/transform/application/x-bibtex\"\n url = url.format(bare_url, doi)\n r = requests.get(url)\n found = False if r.status_code != 200 else True\n bib = r.content\n return found, str(bib, \"utf-8\")\n\n\ndef get_json(doi):\n url = \"{}works/{}\"\n url = url.format(bare_url, doi)\n r = requests.get(url)\n found = False if r.status_code != 200 else True\n item = r.json()\n return found, item\n\n\ndef get_bib_from_doi(doi, abbrev_journal=False):\n\n found, bib = get_bib(doi)\n if found and abbrev_journal:\n\n found, item = get_json(doi)#json vindo errado\n if found:\n abbreviated_journal = item[\"message\"][\"short-container-title\"][0]\n #pegar journal contraido e contrair autores\n # depois fazer um replace no bib com o nome do journal e o\n #contraido, ou usar o bibtexparser(ultima melhor)\n\n return found, bib\n","sub_path":"scripts/doi2bib-0.3.0/doi2bib/crossref.py","file_name":"crossref.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"90721367","text":"from os.path import dirname, join\nfrom kivy.core.image import Image\nfrom kivy.graphics import Color, Rectangle, Line\n\ndef load_texture( fle ):\n return Image(join(dirname(__file__), fle), mipmap=False).texture\n\ndef texture_toggle( fl1, fl2 ):\n texture1 = load_texture( fl1 )\n texture2 = load_texture( fl2 )\n\n def toggle( button, state ):\n if state == 'normal':\n texture = texture1\n else: # state == 'down'\n texture = texture2\n with button.canvas:\n Rectangle(\n texture=texture,\n pos=(button.x+button.width/2.0-16, button.y+button.height/2.0-16),\n size=(32,32)\n )\n\n return toggle\n\n\ndef distance( a, b ):\n return sum ( [ (x1-x2)**2 for x1,x2 in zip( a, b ) ] ) ** .5\n","sub_path":"libs/level_selector/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"390025672","text":"import numpy as np\n\n\n\ndef smooth(input_signal, window_len = 50, window = 'hanning'):\n if input_signal.ndim != 1:\n raise ValueError (\"smooth only accepts 1 dimension arrays.\")\n if window_len < 3:\n return input_signal\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError (\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n sig = np.r_[2*input_signal[0]-input_signal[window_len:1:-1], input_signal, 2*input_signal[-1]-input_signal[-1:-window_len:-1]]\n if window == 'flat': # moving average\n win = np.ones(window_len, 'd')\n else:\n win = eval('np.' + window + '(window_len)')\n # convolution:\n sig_conv = np.convolve(win / win.sum(), sig, mode='same')\n return sig_conv[window_len - 1:-window_len + 1]\n\n\n\ndef RMS_moving_window(signal, window = 200):\n\n #RMS Moving Window\n border_signal = np.zeros(window) #For the beginning of the signal\n sample_number = 0\n n_samples = len(signal)\n RMS_Total = np.zeros(n_samples)\n while sample_number < n_samples:\n process_signal = []\n #for the first 250 samples\n if sample_number <= 250:\n process_signal += list(border_signal[sample_number:window-1])\n process_signal += list(signal[0:sample_number+window-1])\n #for the last 250 samples\n elif sample_number > ((n_samples-1)-window):\n s = (sample_number + window) - n_samples\n process_signal += list(signal[sample_number-window:n_samples-1])\n process_signal += list(border_signal[0:s])\n #for the samples between 250 and length - 250\n else:\n process_signal += list(signal[(sample_number-window):(sample_number+(window-1))])\n RMS_Total[sample_number] = compute_RMS(np.array(process_signal))\n sample_number+=1\n\n return RMS_Total\n\ndef compute_RMS(signal):\n return np.sqrt(sum(signal**2)/len(signal))\n ","sub_path":"process_signals/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"156570382","text":"__author__ = 'korablevop'\n\n\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\nurlpatterns = patterns('app_accounts',\n\n\n\turl(r'^login/$', 'views.login'),\n\turl(r'^logout/$', 'views.logout'),\n\turl(r'^register/$', 'views.register_user'),\n\turl(r'^user_profile/$', 'views.user_profile'),\n\n\n\n\n\n\t)","sub_path":"app_accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"36518429","text":"# encoding:utf-8\n\nimport json\nimport numpy as np\nimport tensorflow as tf\nimport pretreatment as pre\n\n# 配置文件路径\n\nevent_txt_path = '../event_index/'\nlabels_path = '../label/'\nvector_path = '../word2vec/'\n\n\n# 数据预处理,返回处理后的输入数据\ndef get_embeddings(early_time, time_steps, embedding_size):\n files = pre.get_file_name(event_txt_path) # 获取当前目录下的所有文件名和文件数\n num_event = len(files)\n inputs_x = np.zeros((num_event, time_steps, embedding_size), dtype=np.float32) # 初始化输入矩阵\n inputs_y = np.zeros((num_event, 2), dtype=np.int32)\n seq_length = np.zeros((num_event, ), dtype=np.int32)\n\n with open(labels_path + 'labels.json', 'r') as fl:\n labels = json.load(fl)\n\n for i in range(num_event):\n eid = files[i].split('.')[0]\n inputs_y[i] = labels[eid]\n\n with open(event_txt_path + files[i], 'r', encoding='utf8') as event:\n lines = event.readlines()\n length = len(lines) # 计算每个事件的评论数\n n = 0\n vec = np.zeros((length, embedding_size+1), dtype=np.float32) # 初始化事件矩\n for j in range(length):\n string = lines[j].split()\n vec[j, ] = [float(k) for k in string]\n if vec[j, 0] <= early_time:\n n += 1\n\n if n < time_steps: # 评论数少于time_steps,补0\n inputs_x[i, 0:n, ] = vec[0:n, 1:]\n seq_length[i] = n\n elif n % time_steps == 0: # 其他情况求均值\n seq_length[i] = time_steps\n m = n // time_steps\n for k in range(time_steps):\n inputs_x[i, k, ] = np.mean(vec[k*m: k*m+m, 1:], axis=0)\n else:\n seq_length[i] = time_steps\n m = n // time_steps\n n = n % time_steps\n for k in range(n):\n inputs_x[i, k, ] = np.mean(vec[k*(m+1): (k+1)*(m+1), 1:], axis=0)\n for k in range(time_steps-n):\n inputs_x[i, n+k, ] = np.mean(vec[((m+1)*n+k*m): ((m+1)*n+k*m+m), 1:], axis=0)\n\n return inputs_x, inputs_y, seq_length\n\n\n# 根据训练比例来划分训练集和测试集\ndef get_train_test(early_time, time_steps, embedding_size, rate=0.8):\n inputs_x, inputs_y, seq_length = get_embeddings(early_time, time_steps, embedding_size)\n train_index = int(len(inputs_x) * rate)\n\n train_x = np.asarray(inputs_x[: train_index], dtype=np.float32)\n train_y = np.asarray(inputs_y[: train_index], dtype=np.int32)\n train_seq_length = np.asarray(seq_length[: train_index], dtype=np.int32)\n\n test_x = np.asarray(inputs_x[train_index:], dtype=np.float32)\n test_y = np.asarray(inputs_y[train_index:], dtype=np.int32)\n test_seq_length = np.asarray(seq_length[train_index:], dtype=np.int32)\n\n return train_x, train_y, train_seq_length, test_x, test_y, test_seq_length\n\n\ndef BiLSTM(inputs, seq_length, hidden_num, weights, bias):\n # 搭建双向LSTM模型\n outputs, state = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=tf.contrib.rnn.LSTMCell(hidden_num), # 前向LSTM单元\n cell_bw=tf.contrib.rnn.LSTMCell(hidden_num), # 反向LSTM单元\n inputs=inputs, # 数据输入\n sequence_length=seq_length, # 输入数据的有效序列长度\n dtype=tf.float32\n )\n # outputs = (output_fw, output_bw)为一个turple\n outputs = tf.concat(outputs, 2) # 将outputs在hidden_num维度上进行拼接\n output = outputs[:, -1, :] # outputs.shape = (batch_size,time_steps,hidden_num*2)\n\n logits = tf.matmul(output, weights) + bias # 取出最后时刻的输出结果进行softmax操作\n logits = tf.nn.softmax(logits) # logits.shape = (batch_size, classs_num)\n\n return logits\n\n\n# 获取一个 batch_size 大小的数据\ndef get_batches(inputs_x, inputs_y, seq_length, batch_size):\n for i in range(0, len(inputs_x), batch_size):\n begin_i = i\n end_i = i + batch_size if (i + batch_size) < len(inputs_x) else len(inputs_x)\n # yield 来连续获取大小为 batch_size 的数据\n yield inputs_x[begin_i: end_i], inputs_y[begin_i: end_i], seq_length[begin_i: end_i]\n\n\n# 计算预测结果的正确率\ndef compute_accuracy(pred, true):\n correct_num = 0\n for n in range(len(pred)):\n if np.argmax(pred, 1)[n] == np.argmax(true, 1)[n]:\n correct_num += 1\n return correct_num / len(pred)\n\n\ndef main(_):\n time_steps = 50 # 序列的长度\n embedding_size = 300 # 词向量的维度\n hidden_num = 256 # lstm层的神经元数\n class_num = 2 # 类别数\n batch_size = 30 # batch大小\n epochs = 30 # 迭代次数\n train_rate = 0.7 # 训练样本占比\n deadline = 3600*12\n \n train_x, train_y, train_seq, test_x, test_y, test_seq = get_train_test(deadline*1000, time_steps, embedding_size, train_rate)\n\n # 定义输入数据\n X = tf.placeholder(tf.float32, [None, time_steps, embedding_size], name='input_x')\n Y = tf.placeholder(tf.int32, [None, 2], name='input_y')\n seq_length = tf.placeholder(tf.int32, [None], name='seq_length')\n # 定义并初始化全连接层的权重和偏置项\n weights = tf.Variable(tf.random_uniform([hidden_num * 2, class_num], -0.01, 0.01))\n bias = tf.Variable(tf.random_uniform([class_num], -0.01, 0.01))\n\n logits = BiLSTM(X, seq_length, hidden_num, weights, bias)\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2( # 损失函数通过交叉熵来计算\n labels=Y,\n logits=logits,\n name='cross_entropy')\n cross_entropy_mean = tf.reduce_mean(cross_entropy)\n # 优化方法采用梯度下降算法\n optimizer = tf.train.AdamOptimizer(learning_rate=0.003).minimize(cross_entropy_mean)\n\n init = tf.global_variables_initializer() # 初始化全部变量\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n sess.run(init)\n print('init variables complete...')\n\n for i in range(epochs):\n train = []\n test = []\n for train_x_batch, train_y_batch, train_seq_length in get_batches(train_x, train_y, train_seq, batch_size):\n feed_dict = {X: train_x_batch, Y: train_y_batch, seq_length: train_seq_length}\n _, train_loss, train_pred, train_true = sess.run([optimizer, cross_entropy_mean, logits, Y], feed_dict=feed_dict)\n train_accuracy = compute_accuracy(train_pred, train_true)\n train.append(train_accuracy)\n \n for test_x_batch, test_y_batch, test_seq_length in get_batches(test_x, test_y, test_seq, batch_size):\n feed_dict = {X: test_x_batch, Y: test_y_batch, seq_length: test_seq_length}\n _, test_loss, test_pred, test_true = sess.run([optimizer, cross_entropy_mean, logits, Y], feed_dict=feed_dict)\n test_accuracy = compute_accuracy(test_pred, test_true)\n test.append(test_accuracy)\n if i == 29:\n print(max(train))\n #print('step: %d train_loss %f train_accuracy: %f test_loss: %f test_accuracy: %f' % (i, train_loss, max(train), test_loss, max(test)))\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"04.模型构建/early_detection/code/BiLSTM.py","file_name":"BiLSTM.py","file_ext":"py","file_size_in_byte":7871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"481201480","text":"import numpy as np\r\nimport math\r\nclass Polinomial_Classifier:\r\n def __init__(self,V,C,initial_A = None,initial_B = None,initial_bias = 0):\r\n self.D = V.shape[1]\r\n if initial_A is None:\r\n self.A = np.zeros(self.D)\r\n else:\r\n self.A = initial_A.copy()\r\n if initial_B is None:\r\n self.B = np.zeros(self.D)\r\n else:\r\n self.B = initial_B.copy()\r\n self.bias = initial_bias\r\n self.C = C\r\n self.V = np.complex64(V)\r\n self.length = V.shape[0]\r\n self.flag = 1\r\n def predict(self,v):\r\n summation = 0\r\n d = 0\r\n while(d 0:\r\n k = np.log(v[d])\r\n else :\r\n return 0\r\n if math.isnan(k) and self.flag :\r\n print(v,c,pred,d)\r\n self.flag -= 1\r\n return self.A[d]*k*self.d_loss_Ad(v,c,pred,d)\r\n def d_loss_bias(self,c,pred):\r\n return -2*(c-pred)\r\n\r\n def total_loss(self,P):\r\n summation = 0\r\n i = 0\r\n while (i= threshold):\r\n d = 0\r\n while (d= 2:\n port = ss[1]\n self._servers.append({\"host\": ss[0], \"port\": port})\n\n def getRandom(self):\n if len(self._servers):\n return self._servers[random.randint(0, len(self._servers)-1)]\n return None\n\n def getAll(self):\n return self._servers\n\n def __repr__(self):\n y = []\n for x in self._servers:\n y.append(\"hkp://%s:%s\" % (x[\"host\"], x[\"port\"]))\n return \"\\n\".join(y)","sub_path":"krypton/hkpserver/libs/gossip/gossipservers.py","file_name":"gossipservers.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"307638106","text":"#!/usr/bin/env python\n# encoding: utf-8\n'''\n@author: CodeJZY\n@license: (C) Copyright 2019, Node Supply Chain Manager Corporation Limited.\n@contact: 634799086@qq.com\n@file: keras_layer.py\n@time: 2019/10/30 11:43\n@desc:\n主要在tensorflow keras中运行,每次根据实际情况对某些地方进行修改,适用于单个输入情况\n1.init部分,需要提供自己的层需要的参数\n2.build部分,只需要实例化自己需要的复用层,并且让本对象的列表进行管理\n3.compute_output_shape部分,计算每次实际的输出节点大小\n4.call部分不用修改,当然可以自己进行完善\n\n总流程:\n1.初始化,初始化父类,初始化自己需要的参数\n2.build:创建复用层,让自己built设置为True\n3.call:主要思想就是让复用层call起来,所以用了装饰器,在装饰器中执行额外的操作\n 1.先让复用层build\n 2.让复用层run\n 3.将复用层的参数交给自己管理\n4.compute_output_shape:根据每次实际情况,设置输出节点的形状\n'''\nimport tensorflow as tf\nfrom functools import wraps\n\ntf.enable_eager_execution()\ndef solve_param(func):\n @wraps(func)\n def wrap(self, layer, *args, **kwargs):\n # 复用层对自己进行build部分\n if not layer.built:\n if len(args) > 0:\n inputs = args[0]\n else:\n inputs = kwargs['inputs']\n if isinstance(inputs, list):\n input_shape = [tf.keras.backend.int_shape(x) for x in inputs]\n else:\n input_shape = tf.keras.backend.int_shape(inputs)\n layer.build(input_shape)\n\n # 复用层自己进行调用\n outputs = func(self,layer,*args)\n\n # 将复用层的各种参数让自己进行管理\n for w in layer.trainable_weights:\n if w not in self._trainable_weights:\n self._trainable_weights.append(w)\n for w in layer.non_trainable_weights:\n if w not in self._non_trainable_weights:\n self._non_trainable_weights.append(w)\n for u in layer.updates:\n if not hasattr(self, '_updates'):\n self._updates = []\n if u not in self._updates:\n self._updates.append(u)\n return outputs\n return wrap\n\n\nclass MyLayer(tf.keras.layers.Layer):\n def __init__(self,layer1_units,layer2_units,layer1_activation='linear',layer2_activation='linear',**kwargs):\n '''\n 用来定义这一层需要的参数\n 在现在复用层这个场景中,这些参数主要是给复用层使用\n :param layer1_units: 复用层dense需要的参数\n :param layer2_units: 复用层dense需要的参数\n :param layer1_activation:激活函数\n :param layer2_activation:激活函数\n '''\n # 千万注意,需要为模型初始化,否则程序无法正常运行\n super(MyLayer, self).__init__(**kwargs)\n # 接下来定义此层需要的参数\n self.layer1_units = layer1_units\n self.layer2_units = layer2_units\n self.layer1_activation = layer1_activation\n self.layer2_activation = layer2_activation\n self.layers = []\n\n # build函数在将该层实例化对象添加到模型时自动调用\n def build(self,input_shape):\n print('there1')\n # 复用已经定义好的层\n # 注意,此处只是将复用层实例化,没有做别的操作\n self.layer1 = tf.keras.layers.Dense(units=self.layer1_units,activation=self.layer1_activation)\n self.layers.append(self.layer1)\n self.layer2 = tf.keras.layers.Dense(units=self.layer2_units,activation=self.layer2_activation)\n self.layers.append(self.layer2)\n # 每次必须调用,相当于将此层 built参数设置为True\n super(MyLayer,self).build(input_shape)\n\n # 复用层运行前,需要先对自己进行build\n @solve_param\n def run(self,layer,inputs):\n outputs = layer.call(inputs)\n return outputs\n\n # 自动调用build之后,就要自动调用call\n def call(self,inputs):\n print('there2')\n for layer in self.layers:\n # 让每个复用层自己运行\n inputs = self.run(layer, inputs)\n return inputs\n\n def compute_output_shape(self, input_shape):\n '''\n 让其自动推断输出形状,\n 每次根据实际情况进行计算\n :param input_shape:\n :return:\n '''\n return input_shape[:-1]+(self.layer2_units,)\n\n\nmodel = tf.keras.models.Sequential()\nmodel.add(tf.keras.layers.Input(shape=[10]))\nmodel.add(MyLayer(5,1))\nprint(model.summary())\n","sub_path":"keras_/keras自定义层复用模型框架.py","file_name":"keras自定义层复用模型框架.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"126417766","text":"#!/usr/bin/env python\n\n# Load required modules\nimport sys, os, argparse, numpy as np\nfrom sklearn.externals import joblib\nfrom collections import defaultdict\n\n# Parse command-line arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('-i', '--input_file', type=str, required=True)\nparser.add_argument('-o', '--output_file', type=str, required=True)\nargs = parser.parse_args(sys.argv[1:])\n\n# Load the input bitscores\nwith open(args.input_file, 'r') as IN:\n A_nodes = set()\n B_nodes = set()\n bitscores = defaultdict(int)\n nonzero, skipped = 0, 0\n for i, l in enumerate(IN):\n u, v, score = l.rstrip('\\n').split('\\t')\n if u == '' or v == '':\n skipped += 1\n continue\n A_nodes.add( u )\n B_nodes.add( v )\n bitscores[(u, v)] = float(score)\n nonzero += float(score) != 0\n print('Loaded scores: %s nodes x %s nodes' % (len(A_nodes), len(B_nodes)))\n print('Skipped:', skipped)\n print('Nonzero:', nonzero)\n\n# Convert bitscores to numpy array\nA_nodes = sorted(A_nodes)\nB_nodes = sorted(B_nodes)\nX = np.array([ [ bitscores[(u, v)] for v in B_nodes ] for u in A_nodes ])\n\n# Dump to file\noutput = dict(params=vars(args), A_nodes=A_nodes, B_nodes=B_nodes, X=X)\njoblib.dump( output, args.output_file )\n","sub_path":"wrap_bitscores.py","file_name":"wrap_bitscores.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"18621715","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 10 13:16:26 2018\n\n@author: Sylvain\n\"\"\"\nfrom reportlab.platypus import Image\nfrom reportlab.lib.units import cm\nfrom reportlab.graphics.shapes import Drawing,Rect,colors\nfrom reportlab.graphics.charts.textlabels import Label\nimport numpy as np\nimport logging\nlogger = logging.getLogger()\nclass HyperlinkedImage(Image, object):\n \"\"\"Image with a hyperlink, adopted from http://stackoverflow.com/a/26294527/304209.\"\"\"\n\n def __init__(self, filename, hyperlink=None, width=None, height=None, kind='direct',\n mask='auto', lazy=1, hAlign='CENTER'):\n \"\"\"The only variable added to __init__() is hyperlink.\n\n It defaults to None for the if statement used later.\n \"\"\"\n super(HyperlinkedImage, self).__init__(filename, width, height, kind, mask, lazy,\n hAlign=hAlign)\n self.hyperlink = hyperlink\n\n def drawOn(self, canvas, x, y, _sW=0):\n if self.hyperlink: # If a hyperlink is given, create a canvas.linkURL()\n # This is basically adjusting the x coordinate according to the alignment\n # given to the flowable (RIGHT, LEFT, CENTER)\n x1 = self._hAlignAdjust(x, _sW)\n y1 = y\n x2 = x1 + self._width\n y2 = y1 + self._height\n canvas.linkURL(url=self.hyperlink, rect=(x1, y1, x2, y2), thickness=0, relative=1)\n super(HyperlinkedImage, self).drawOn(canvas, x, y, _sW)\n \n \ndef genLayers(l0=[\"substrate\",30],\n l1=[\"p++\",20],\n l2=[\"p--\",50]):\n c = ['#ffeea0','#ff88ff','#bbd5e8']\n length=4*cm\n d = Drawing(4*cm,100)\n xpos=0#*(width-length-4*cm)/2\n# layer0 = 40\n# l1[1] = 40\n# l2[1]= 100-layer0-l1[1]\n l0[1],l1[1],l2[1] = layersize(l0[1],l1[1],l2[1])\n \n d.add(Rect(xpos,0,length,l0[1] ,fillColor=colors.HexColor(c[0])))\n lab = Label()\n lab.textAnchor = 'middle'\n lab.setText(l0[0])\n lab.setOrigin(xpos+2*cm,l0[1]/2)\n d.add(lab)\n \n d.add(Rect(xpos,l0[1],length,l1[1] ,fillColor=colors.HexColor(c[1])))\n lab = Label()\n lab.textAnchor = 'middle'\n lab.setText(l1[0])\n lab.setOrigin(xpos+2*cm,l0[1]+l1[1]/2)\n d.add(lab)\n \n d.add(Rect(xpos,l0[1]+l1[1],length,l2[1] ,fillColor=colors.HexColor(c[2])))\n lab = Label()\n lab.textAnchor = 'middle'\n lab.setText(l2[0])\n lab.setOrigin(xpos+2*cm,l0[1]+l1[1]+l2[1]/2)\n d.add(lab)\n \n return d\n \n \ndef genLayers2(ls):\n #ls =(titre, taille)\n c = ['#ffeea0','#ff88ff','#bbd5e8']\n length=4*cm\n# ls = np.asarray(ls)\n l = []\n for i in range(len(ls)):\n l.append((ls[i][0],ls[i][1]))\n ls = np.asarray(l,dtype=np.str)\n d = Drawing(4*cm,100)\n xpos=0\n ls[:,1] = layersize2(ls[:,1])\n tickness = ls[:,1].astype(np.float32)\n for i in range(len(ls)):\n lab = Label()\n lab.textAnchor = 'middle'\n color = c[0]\n labelTxt = str(ls[:,0][i])\n if '+' in labelTxt:\n color = c[1]\n elif '-' in labelTxt:\n color = c[2]\n lab.setText(ls[:,0][i])\n if i>0:\n d.add(Rect(xpos,np.cumsum(tickness)[i-1],length,tickness[i] ,fillColor=colors.HexColor(color)))\n lab.setOrigin(xpos+2*cm,np.cumsum(tickness)[i-1]+tickness[i]/2)\n else:\n d.add(Rect(xpos,0,length,tickness[i] ,fillColor=colors.HexColor(color)))\n lab.setOrigin(xpos+2*cm,tickness[i]/2)\n d.add(lab)\n \n return d\n \ndef layersize2(ls):\n for i in range(len(ls)):\n ls[i]=ls[i].replace('*','')\n if str(ls[i]).endswith(\"um\"):ls[i] = int(ls[i][:-2])*1000\n if str(ls[i]).endswith(\"nm\"):ls[i] = int(ls[i][:-2])\n ls = np.asarray(ls, dtype=np.float32)\n ls = np.log10(ls)\n s = np.sum(ls)\n ls = ls/s*100\n return ls\n\ndef resize(w,h,maxwidth):\n return maxwidth,h*maxwidth/w\n\ndef layersize(l0,l1,l2):\n if str(l0).endswith(\"um\"):l0 = int(l0[:-2])*1000\n if str(l1).endswith(\"um\"):l1 = int(l1[:-2])*1000\n if str(l2).endswith(\"um\"):l2 = int(l2[:-2])*1000\n \n if str(l0).endswith(\"nm\"):l0 = int(l0[:-2])\n if str(l1).endswith(\"nm\"):l1 = int(l1[:-2])\n if str(l2).endswith(\"nm\"):l2 = int(l2[:-2])\n l0=np.log(l0)\n l1=np.log(l1)\n l2=np.log(l2)\n s = l0+l1+l2\n l0 = l0/s*100\n l1 = l1/s*100\n l2 = l2/s*100\n return l0,l1,l2\n ","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"71554809","text":"\r\n\r\n# -----------------simulated annealing\r\nimport random\r\nimport math\r\nimport time\r\n\r\nrandom.seed(time.time())\r\n\r\nclass Player():\r\n def __init__(self, name, rating):\r\n self.name = name\r\n self.rating = rating\r\n\r\nclass Group():\r\n def __init__(self, name, players):\r\n self.name = name\r\n self.players = players\r\n self.ratingMean = 0.0\r\n\r\n #calc rating mean\r\n for player in self.players:\r\n self.ratingMean += player.rating / len(self.players)\r\n\r\n self.score = 0\r\n\r\n def printInfo(self):\r\n a = '%s(%s, %s):'%(self.name,str(round(self.score,3)),str(round(self.ratingMean,3)))\r\n b = ['%s(%s)'%(player.name, str(round(player.rating,3))) for player in self.players]\r\n\r\n print(a,b)\r\n\r\n def swap(self, playerIndex, other_group, other_playerIndex):\r\n player = self.players[playerIndex]\r\n other_player = other_group.players[other_playerIndex]\r\n\r\n #detract from group.ratingMean\r\n self.ratingMean -= player.rating / len(self.players)\r\n other_group.ratingMean -= other_player.rating / len(other_group.players)\r\n\r\n #add..\r\n self.ratingMean += other_player.rating / len(self.players)\r\n other_group.ratingMean += player.rating / len(other_group.players)\r\n\r\n #swap\r\n self.players[playerIndex] = other_player\r\n other_group.players[other_playerIndex] = player\r\n\r\n def calcScore(self, against_ratingMean):\r\n error_margin = 2 #amount off global_ratingMean allowed\r\n self.score = 1 if abs(self.ratingMean - against_ratingMean) > error_margin else 0 #\r\n #add more here, eg other than rating\r\n\r\n\r\n#set players\r\nplayers = [Player('a',1100), Player('b',1080), Player('c',410),\r\n Player('d',1030), Player('e',1000), Player('f',1000),\r\n Player('g',900), Player('h',1000), Player('i',700),\r\n Player('j',1000)]\r\n\r\n#\r\nrandom.shuffle(players)\r\n\r\n#set groups\r\ngroups = [Group('G1', players[0:5]), Group('G2', players[5:10])]\r\n\r\n\r\n\r\n#\r\nglobal_ratingMean = 0.0\r\n\r\nfor player in players:\r\n global_ratingMean += player.rating / len(players)\r\n\r\n\r\nprint('global rating mean is %f\\n'%(global_ratingMean))\r\n\r\n\r\nfor group in groups:\r\n group.calcScore(global_ratingMean)\r\n\r\n\r\nprint(\"Initial groups are:\")\r\nfor group in groups:\r\n group.printInfo()\r\n\r\n\r\n\r\n#begin\r\n\r\n\r\nstartTemp = 10.0\r\ncoolingRate = 0.9999\r\ntemp = startTemp\r\n\r\nwhile temp > 0.1:\r\n # cycles = 43 * (startTemp-temp)\r\n\r\n #\r\n group1_index = 0\r\n group2_index = 0\r\n\r\n while group1_index == group2_index:\r\n group1_index = random.randrange(0,len(groups))\r\n group2_index = random.randrange(0,len(groups))\r\n\r\n group1 = groups[group1_index]\r\n group2 = groups[group2_index]\r\n\r\n #pick random players from each group\r\n group1_playerIndex = random.randrange(0,len(group1.players))\r\n group2_playerIndex = random.randrange(0,len(group2.players))\r\n\r\n #\r\n group1_old_score = group1.score\r\n group2_old_score = group2.score\r\n\r\n group1.swap(group1_playerIndex, group2, group2_playerIndex)\r\n group1.calcScore(global_ratingMean)\r\n group2.calcScore(global_ratingMean)\r\n\r\n #\r\n scoreDifAvg = ((group1.score - group1_old_score)+(group2.score - group2_old_score))/2.0\r\n\r\n if scoreDifAvg > 0.0:\r\n p = math.exp(-scoreDifAvg/temp)\r\n\r\n if random.random() >= p:\r\n #swap back\r\n group1.swap(group1_playerIndex, group2, group2_playerIndex)\r\n group1.score = group1_old_score\r\n group2.score = group2_old_score\r\n\r\n\r\n temp*=coolingRate\r\n\r\n#\r\n\r\nprint(\"\\nOutput groups are:\")\r\nfor group in groups:\r\n group.printInfo()\r\n\r\n\r\n\r\n\r\n\r\n\r\n# ----------------genetic algorithm\r\n# #include \r\n# #include \"individual.h\"\r\n\r\n# //means, totals\r\n# float develop, document, gpa, model, social;\r\n# int genders[2], whens[3];\r\n\r\n# //initial\r\n# std::vector people;\r\n# std::vector baseGroups;\r\n\r\n# //\r\n# std::vector population;\r\n\r\n# void init() {\r\n# \t//set people num, group num\r\n\r\n# \t//calc min, max group sizes and nums\r\n\r\n# \t//generate people\r\n\r\n# \t//calc totals, means\r\n\r\n# \t//fill base groups\r\n\r\n# \t//create population\r\n# \t\t//generate genome\r\n# \t\t//copy base groups\r\n# \t\t//calculate groups\r\n# \t\t//score groups\r\n# \t\t//score individual\r\n\r\n\r\n# }\r\n\r\n# void uninit() {\r\n# \t//delete base groups\r\n\r\n# \t//delete population\r\n# \t\t//delete their groups\r\n\r\n# \t//delete people\r\n# }\r\n\r\n# void run() {\r\n# \t//until number of loops\r\n# \t\t//select individuals for reproduction\r\n# \t\t//breed selected individuals, with some random mutations in the offspring\r\n\r\n# \t\t//for population\r\n# \t\t\t//calculate groups\r\n# \t\t\t//score groups\r\n# \t\t\t//score individual\r\n\r\n# }\r\n\r\n# int main(int argc, char *argv[]) {\r\n\r\n# \tinit();\r\n# \trun();\r\n# \tuninit();\r\n\r\n# \tsystem(\"pause\");\r\n\r\n# \treturn 0;\r\n# }\r\n","sub_path":"hillclimb.py","file_name":"hillclimb.py","file_ext":"py","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"279093767","text":"\n\nclass Prog_Word_List:\n wordList=[]\n\n def __init__(self):\n self.wordList = [\n\n 'jira', 'post',\n\n 'path', 'gcov', 'add', 'null', 'flag', 'bug',\n\n 'newline', 'memcpy', 'boolean', 'int', 'integer',\n\n 'float', 'double', 'char', 'void', 'class',\n\n 'struct', 'function', 'method', '->', 'pointer',\n\n 'return', 'stack', 'queue', 'vector', 'array',\n\n 'variable', '#include', 'enum', '#define',\n\n 'std', 'macro', 'header', 'api', 'risk', 'cerr',\n\n 'namespace', 'parameter', 'append', 'prepend',\n\n 'for example', 'e.g', 'static', 'i.e',\n\n 'loop', 'size_t', '%d', '%s', '%f', '%lf',\n\n '%zu', '\\\\n', 'print', 'cout', 'stdlib', 'apk',\n\n 'malloc', 'merged', 'master', 'master', 'master',\n\n 'server', 'client' , '#if',\n\n '==', '>=', '<=', '>', '<', '|', '&', '||',\n\n '&&', '--', '++', '**', '^', '~']\n\n def getList(self):\n return self.wordList","sub_path":"GerritAPI2/Classification/ReviewModifier/Prog_Word_List.py","file_name":"Prog_Word_List.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"320557593","text":"from django.shortcuts import render\nfrom django.shortcuts import render, get_object_or_404, Http404, redirect\nfrom django.views.generic import TemplateView\nfrom django.http import HttpResponse\nfrom django.views import View, generic\nfrom django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin\n\nfrom .forms import (CommentForm, CategoryForm, TagForm, PostForm)\nfrom .models import Post, Category, Tag, Comment\n\nclass PostDetailView(View):\n def get(self, request, title, *args, **kwargs):\n post = get_object_or_404(Post, title=title)\n comment = post.comment_set.all()\n context = {'post':post,'comment': comment,}\n\n return render(request, \"post_detail.html\", context)\n\ndef comment(request,pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = post\n comment.author = request.user\n comment.save()\n return redirect('/posts')\n else:\n form = CommentForm()\n context = {\n 'form': form,\n }\n return render(request, 'comment.html', context)\n\n def get_object(self):\n title = self.kwargs.get(\"title\")\n if title is None:\n raise Http404\n return get_object_or_404(Post, title__iexact=title)\n\nclass PostView(LoginRequiredMixin,View):\n def get(self, request, *args, **kwargs):\n post = Post.objects.filter(status='published').order_by('-date_created')\n context = {\n 'object_list':post,\n }\n return render(request, \"post_list.html\", context)\n\ndef Draft(request):\n post = Post.objects.filter(status__contains='draft')\n context = {'object_list': post,}\n return render(request, 'post_list.html', context)\n\ndef Hidden(request):\n post = Post.objects.filter(status__contains='Hidden')\n context = {'object_list': post,}\n return render(request, 'post_list.html', context)\n\nclass PostCreateView(LoginRequiredMixin, View):\n form_class = PostForm\n initial = {'key': 'value'}\n template_name = 'post_edit.html'\n\n def get(self, request, *args, **kwargs):\n form = self.form_class(initial=self.initial)\n return render(request, self.template_name, {'form': form})\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST, request.FILES)\n if form.is_valid():\n post = form.save(commit=False)\n post.user = request.user\n post.save()\n return redirect('posts:post_detail', title = post.title)\n else:\n form = PostForm()\n context = {\n 'form': form,\n\n }\n return render(request, self.template_name, context)\ndef post_edit(request, title):\n post_user = Post.objects.filter(user=request.user)\n post = get_object_or_404(Post, title=title)\n if request.method == \"POST\":\n form = PostForm(request.POST, request.FILES, instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.user = request.user\n post.save()\n return redirect('posts:post-lists', title=title)\n else:\n form = PostForm(instance=post)\n context = {\n 'form': form,\n }\n return render(request, 'post_edit.html', context)\n\n# class ProfileDetailView(View):\n# def get(self, request, username, *args, **kwargs):\n# profile_detail = get_object_or_404(User, username__iexact=username)\n# post_list = profile_detail.post_set.filter(user=self.request.user, status='published')\n# paginator = Paginator(post_list, 10)\n# page = request.GET.get('page')\n# try:\n# post = paginator.page(page)\n# except PageNotAnInteger:\n# post = paginator.page(1)\n# except EmptyPage:\n# post = paginator.page(paginator.num_pages)\n# context = {\n# 'profile_detail': profile_detail,\n# 'post': post,\n# }\n# return render(request, 'profile_detail.html', context)\n","sub_path":"joy/myblog/myblogspot/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"407321282","text":"import numpy as np\r\ndataDir = 'D:/627/Project/Python/YahooMusic_Attributes&ML/'\r\nfile_track = dataDir + 'trackData2.txt'\r\nfile_train = dataDir + 'trainIdx2_matrix.txt'\r\nfile_test = dataDir + 'testTrack_hierarchy_win.txt'\r\nfile_out = dataDir + 'test_attributs.txt'\r\nfile_hashMap = 'D:/627/Project/Python/DataProcess/Files/train_hashmap.pkl'\r\n \r\n\r\n############################################# Pre-Processing ################################################\r\n#Creating a array, whose index is itemId.\r\n#Array cell store similar items.\r\n#For example: track 1 and track 2 have the same Album 3. Consequently, it store 1 and 2 in items[3].\r\nitems=[[] for i in range(300000)]\r\nfRead = open(file_track,'r')\r\nfor line in fRead:\r\n genreId=[]\r\n arr=line.strip().split('|')\r\n if arr[1]=='None':\r\n arr[1]='299998'\r\n if arr[2]=='None':\r\n arr[2]='299998'\r\n albumId=int(arr[1])\r\n artistId=int(arr[2])\r\n TrackId=int(arr[0])\r\n for i in range(3,len(arr)):\r\n genreId.append(int(arr[i])) \r\n items[albumId].append(TrackId)\r\n items[artistId].append(albumId)\r\n for i in range(len(genreId)):\r\n items[genreId[i]].append(artistId)\r\nfRead.close()\r\n\r\n#This function creates a key for Hashmap object\r\ndef createKey(arr1,arr2):\r\n return str(arr1+','+arr2)\r\n\r\n#This function searchs similar artist\r\n#It selects two set of similar artist set by set size\r\ndef SimArtist(userId,simArtist,hashmap,length):\r\n if length==0:\r\n str0=['0 0 0 0','0 0 0 0']\r\n return str0\r\n elif length==1:\r\n vec_artist=[]\r\n count_artist=0\r\n for i in range(len(simArtist)):\r\n temp=createKey(userId,str(simArtist[i]))\r\n if hashmap.get(temp)!= None:\r\n vec_artist.append(float(hashmap.get(temp)))\r\n count_artist+=1\r\n if len(vec_artist)==0:\r\n mean_artist = 0 \r\n max_artist = 0 \r\n min_artist = 0 \r\n else:\r\n mean_artist=np.mean(vec_artist) \r\n max_artist=max(vec_artist) \r\n min_artist=min(vec_artist)\r\n str1=[str(min_artist)+' '+str(max_artist)+' '+str(mean_artist)+' '+str(count_artist),'0 0 0 0']\r\n return str1\r\n \r\n else:\r\n vec_artist=[[] for i in range(length)]\r\n count_artist=[0]*length\r\n for m in range(length):\r\n for i in range(len(simArtist[m])):\r\n temp=createKey(userId,str(simArtist[m][i])) \r\n if hashmap.get(temp)!=None:\r\n vec_artist.append(float(hashmap.get(temp)))\r\n count_artist[m]+=1\r\n mark=[0,0]\r\n for i in range(length):\r\n if count_artist[i]==max(count_artist):\r\n mark[0]=i\r\n temp=count_artist[i]\r\n count_artist[i]=-1\r\n break\r\n for i in range(length):\r\n if count_artist[i]==max(count_artist):\r\n mark[1]=i\r\n count_artist[mark[0]]=temp\r\n break\r\n \r\n output=[]\r\n if len(vec_artist[mark[0]])==0:\r\n mean_artist1 = 0 \r\n max_artist1 = 0 \r\n min_artist1 = 0 \r\n else:\r\n mean_artist1=np.mean(vec_artist[mark[0]]) \r\n max_artist1=max(vec_artist[mark[0]]) \r\n min_artist1=min(vec_artist[mark[0]])\r\n \r\n output.append(str(min_artist1)+' '+str(max_artist1)+' '+str(mean_artist1)+' '+str(count_artist[mark[0]]))\r\n if len(vec_artist[mark[1]])==0:\r\n mean_artist2 = 0 \r\n max_artist2 = 0 \r\n min_artist2 = 0 \r\n else:\r\n mean_artist2=np.mean(vec_artist[mark[1]]) \r\n max_artist2=max(vec_artist[mark[1]]) \r\n min_artist2=min(vec_artist[mark[1]])\r\n output.append(str(min_artist1)+' '+str(max_artist1)+' '+str(mean_artist1)+' '+str(count_artist[mark[1]]))\r\n return output\r\n\r\n\r\n \r\n#############################################creating attributes############################################\r\nfTest = open(file_test,'r')\r\nfOut = open(file_out,'w')\r\nii=0\r\n\r\nfor line in fTest:\r\n vec_track=[]\r\n vec_album=[]\r\n count_track=0\r\n count_album=0\r\n temp=''\r\n arr = line.strip().split('|')\r\n \r\n if arr[2]=='None':\r\n albumId=299999\r\n else:\r\n albumId=int(arr[2])\r\n \r\n if arr[3]=='None':\r\n artistId=299999\r\n else:\r\n artistId=int(arr[3])\r\n \r\n genreId=[]\r\n genre_num=len(arr)-4 \r\n for i in range(4,len(arr)):\r\n genreId.append(int(arr[i]))\r\n \r\n simArtist=[[] for i in range(genre_num)]\r\n for i in range(genre_num):\r\n simArtist[i]=items[genreId[i]] \r\n userId=int(arr[0])\r\n trackId=int(arr[1])\r\n #Search similar tracks\r\n simTrack=items[albumId]\r\n #Search similar albums\r\n simAlbum=items[artistId]\r\n\r\n \r\n for i in range(len(simTrack)):\r\n temp=createKey(arr[0],str(simTrack[i]))\r\n if hashmap.get(temp)!=None:\r\n vec_track.append(float(hashmap.get(temp)))\r\n count_track+=1\r\n \r\n for i in range(len(simAlbum)):\r\n temp=createKey(arr[0],str(simAlbum[i])) \r\n if hashmap.get(temp)!=None:\r\n vec_track.append(float(hashmap.get(temp)))\r\n count_album+=1\r\n \r\n \r\n artistAtt = SimArtist(arr[0],simArtist,hashmap,genre_num)\r\n \r\n #Computing the attributes scores\r\n if len(vec_track)==0:\r\n mean_track = 0\r\n max_track = 0\r\n min_track = 0\r\n else:\r\n mean_track=np.mean(vec_track)\r\n max_track=max(vec_track)\r\n min_track=min(vec_track)\r\n if len(vec_album)==0:\r\n mean_album = 0 \r\n max_album = 0 \r\n min_album = 0 \r\n else:\r\n mean_album=np.mean(vec_album) \r\n max_album=max(vec_album) \r\n min_album=min(vec_album)\r\n \r\n \r\n outStr=arr[0]+' '+arr[1]+' '+str(min_track)+' '+str(max_track)+' '+str(mean_track)+' '+str(count_track)+' '+str(min_album)+' '+str(max_album)+' '+str(mean_album)+' '+str(count_album)+' '\r\n fOut.write(outStr+' '+artistAtt[0]+' '+artistAtt[1]+'\\n')\r\n ii+=1\r\n\r\n for i in range(len(simTrack)):\r\n temp=createKey(arr[0],str(simTrack[i]))\r\n \r\nprint(ii)\r\nfOut.close()\r\nfTest.close()\r\nfOut.close()\r\n","sub_path":"YahooMusic_AttributesCreation_ML/YahooMusic_AttributesCreation_ML/AttributesCreation_test.py","file_name":"AttributesCreation_test.py","file_ext":"py","file_size_in_byte":6427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"436460804","text":"import torch\nfrom .linear import LinearGaussianObservations\nfrom ....timeseries import AffineProcess\n\n\nclass LocalLinearization(LinearGaussianObservations):\n \"\"\"\n A proposal distribution useful for when the mean of the observable distribution is a non-linear function of the\n underlying state. The proposal linearizes the non-linear function around E[\\mu(x_{t-1})] and then uses\n `LinearGaussianObservations` as a proposal.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n self._hidden_is1d = None\n self._observable_is1d = None\n\n def set_model(self, model):\n if model.observable.n_dim > 0:\n raise Exception(\"This proposal distribution does not work for models having more observable dimension > 0!\")\n\n if not (isinstance(model.observable, AffineProcess) and isinstance(model.hidden, AffineProcess)):\n raise ValueError(f\"Both observable and hidden must be of type {AffineProcess.__class__.__name__}!\")\n\n self._model = model\n\n self._hidden_is1d = self._model.hidden.n_dim == 0\n self._observable_is1d = self._model.observable.n_dim == 0\n\n return self\n\n def get_constant_and_offset(self, params, x):\n x.values.requires_grad_(True)\n\n loc, _ = self._model.observable.mean_scale(x)\n loc.backward(torch.ones_like(loc))\n grad_eval = x.values.grad\n\n x.values.detach_()\n loc = loc.detach()\n\n product = grad_eval * x.values\n if not self._hidden_is1d:\n product = product.sum(-1)\n\n return grad_eval, loc - product\n","sub_path":"pyfilter/filters/particle/proposals/local_linearization.py","file_name":"local_linearization.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"154629425","text":"from threading import Thread\nimport logging\nfrom collections import namedtuple, deque\nfrom queue import Queue, Empty\nimport time\n\ndef Checksum(ints):\n first_sum = 0\n second_sum = 0\n for b in ints:\n first_sum = (first_sum + b) & 0xff\n second_sum = (second_sum + first_sum) & 0xff\n return [first_sum, second_sum]\n\n\ndef AppendChecksum(ints):\n return ints + Checksum(ints)\n\n\nclass Packet(object):\n def __init__(self):\n self.error = 0\n self.index_ack = 0\n self.index_sending = 0\n self.data_length = 0\n self.data = []\n # Only used when parsing.\n self.parsed = False\n self.data_ok = False\n\n def WithData(self, index, data):\n self.index_sending = index\n self.data = data\n self.data_length = len(data)\n return self\n\n def WithAck(self, index, error):\n self.index_ack = index\n self.error = error\n return self\n\n def SerializeToInts(self):\n error_add = 0\n if self.error:\n error_add = 128\n return AppendChecksum([10, 60, error_add + self.index_ack,\n self.index_sending, self.data_length]) + AppendChecksum(self.data)\n\n def IncomingAck(self):\n return Ack(self.index_ack, not self.error)\n\n def ParseFromIntStream(self, stream):\n \"\"\"Consumes what it reads. Returns stream, minus what was consumed.\"\"\"\n stream_length = len(stream)\n for i, b in enumerate(stream):\n if (b == 60 and i > 0 and stream[i - 1] == 10 and\n stream_length - i > 5):\n # Start found.\n data_start = i + 6\n checksums = stream[i + 4:data_start]\n if checksums != Checksum(stream[i - 1:i + 4]):\n logging.info(\"Bad header.\")\n return stream[i + 4:]\n # Got valid header\n self.error = stream[i + 1] > 127\n self.index_ack = stream[i + 1] % 128\n self.index_sending = stream[i + 2]\n self.data_length = stream[i + 3]\n if data_start + self.data_length + 2 <= stream_length:\n self.parsed = True\n checksum_start = data_start + self.data_length\n self.data = stream[data_start:checksum_start]\n self.data_ok = (Checksum(self.data) ==\n stream[checksum_start:checksum_start + 2])\n return stream[checksum_start + 2:]\n return stream\n\n def Parsed(self):\n \"\"\"Implies we got a valid header and the requested number of bytes.\n\n May have DataOk() set to false.\n \"\"\"\n return self.parsed\n\n def Index(self):\n return self.index_sending\n\n def DataOk(self):\n return self.data_ok\n\n\nclass Loggable(object):\n def __init__(self, name):\n self.name = name\n\n def log(self, *args):\n args_with_name = []\n args_with_name = (\"%s: \" + args[0], self.name) + args[1:]\n logging.info(*args_with_name)\n\n\nclass PacketRingBuffer(object):\n # TODO: I think this is cleaner, but haven't tested yet.\n class BufferItem(object):\n def __init__(self, empty, packet=None):\n self.empty = empty\n self.packet = packet\n\n def __init__(self):\n self.last_packet_index = 0\n self.buffer = deque()\n\n def PopPacket(self):\n if len(self.buffer) == 0:\n return None\n if self.buffer[0].empty:\n return None\n packet = self.buffer.popleft().packet\n self.last_packet_index = packet.index_sending\n return packet\n\n\n def InsertPacket(self, packet):\n if packet.index_sending == 0:\n return None\n offset = packet.index_sending - self.last_packet_index\n if offset > 0:\n self._AppendPacket(packet, offset)\n elif offset < -100 and offset > -127:\n self._AppendPacket(packet, offset + 127)\n else:\n # Bad, duplicate packet.\n pass\n\n def _AppendPacket(self, packet, offset):\n insert_index = offset - 1\n if insert_index < len(self.buffer):\n item = self.buffer[insert_index]\n if item.empty:\n item.empty = False\n item.packet = packet\n return\n for i in range(len(self.buffer), insert_index):\n self.buffer.append(PacketRingBuffer.BufferItem(empty=True, packet=None))\n self.buffer.append(PacketRingBuffer.BufferItem(empty=False, packet=packet))\n\n\nclass Reader(Loggable):\n def __init__(self, ser, name=\"\"):\n Loggable.__init__(self, name)\n self.ser = ser\n self.bytes = []\n self.incoming_ok = Queue(10)\n self.incoming_error = Queue(10)\n self.outgoing_ok = Queue(10)\n self.outgoing_error = Queue(10)\n self.incoming_buffer = PacketRingBuffer()\n\n def Read(self, rx_queue):\n self.ReadIntoBuffer()\n if self.bytes:\n #self.log(\"Got some bytes\")\n packet = Packet()\n self.bytes = packet.ParseFromIntStream(self.bytes)\n if packet.Parsed():\n self.log(\"Parsed\")\n if packet.DataOk():\n self.log(\"Data OK: %d, %s\", packet.index_sending, packet.data)\n self.incoming_ok.put(packet.index_sending)\n popped = self.AppendPacket(packet)\n if popped:\n self.log(\"popped\")\n for p in popped:\n self.log(\"Returning packet %d\", p.index_sending)\n rx_queue.put(p)\n else:\n self.incoming_error.put(packet.index_sending)\n if packet.error:\n self.outgoing_error.put(packet.index_ack)\n else:\n self.outgoing_ok.put(packet.index_ack)\n\n def done(self):\n return len(self.bytes) == 0 and self.packet_lists == [[], []]\n\n def AppendPacket(self, packet):\n self.incoming_buffer.InsertPacket(packet)\n returns = []\n while True:\n popped = self.incoming_buffer.PopPacket()\n if not popped:\n return returns\n returns.append(popped)\n\n def PopIncomingError(self):\n \"\"\"Errors in messages incoming to this device.\"\"\"\n try:\n return self.incoming_error.get(block=False)\n except Empty:\n return None\n\n def PopIncomingAck(self):\n \"\"\"Acks for messages incoming to this device.\"\"\"\n try:\n return self.incoming_ok.get(block=False)\n except Empty:\n return None\n\n def PopOutgoingError(self):\n \"\"\"Errors for messages sent from this device.\"\"\"\n try:\n return self.outgoing_error.get(block=False)\n except Empty:\n return None\n\n def PopOutgoingAck(self):\n \"\"\"Acks for messages sent from this device.\"\"\"\n try:\n return self.outgoing_ok.get(block=False)\n except Empty:\n return None\n\n\n def ReadIntoBuffer(self):\n while True:\n byte = self.ser.read()\n if byte is None:\n return\n self.bytes.append(byte)\n\n\nclass Ack(object):\n def __init__(self, index, ok):\n \"\"\"Data index and whether successful. index of 0 -> no ack\"\"\"\n if index is None:\n self.index = 0\n else:\n self.index = index\n self.ok = ok\n self.error = not ok\n\n def __str__(self):\n if self.ok:\n message = \"ok\"\n else:\n message = \"error\"\n return \"%d %s\" % (self.index, message)\n\nSentMessage = namedtuple(\"SentMessage\", [\"index\", \"data\"])\nRETRY_PACKET = 42\nNEW_PACKET = 43\nNO_PACKET = 0\n\nclass Writer(Loggable):\n def __init__(self, ser, name=\"\"):\n Loggable.__init__(self, name)\n self.ser = ser\n self.next_index = 1\n self.sent_messages = deque()\n self.retry_queue = deque()\n self.max_outgoing_length = 4\n self.last_send_time = time.time()\n self.all_quiet = True\n\n def done(self):\n if self.all_quiet:\n self.log(\"Done.\")\n return self.all_quiet\n\n def Write(self, incoming_ack, outgoing_ack, tx_queue):\n \"\"\"Writes the incoming_ack, and either new data or a retry if any.\"\"\"\n if outgoing_ack.index != 0:\n self.all_quiet = False\n self.PopSentForAck(outgoing_ack)\n else:\n self.MaybeRetryAllSent()\n packet = Packet()\n packet.WithAck(incoming_ack.index, error=incoming_ack.error)\n packet_type = self.AddPacketData(tx_queue, packet)\n if packet.index_sending != 0 or packet.index_ack != 0:\n self.TransmitPacket(packet)\n self.all_quiet = len(self.retry_queue) == 0 and len(self.sent_messages) == 0\n return packet_type == NEW_PACKET\n\n def PopSentForAck(self, ack):\n \"\"\"Updates the sent_messages list given an ack.\"\"\"\n logging.info(\"Ack: %s\", ack)\n found = False\n for ack_position, message in enumerate(self.sent_messages):\n if message.index == ack.index:\n found = True\n break\n if not found:\n # Likely an old or duplicate ack. Ignore it.\n self.log(\"Got an ack for an unknown message %d\", ack.index)\n return\n if ack.error:\n ack_position += 1\n for i in range(ack_position):\n self.retry_queue.append(self.sent_messages.popleft())\n if ack.ok:\n self.sent_messages.popleft()\n\n def AddPacketData(self, tx_queue, packet):\n \"\"\"Adds the data part of the packet.\"\"\"\n packet_type = NO_PACKET\n if len(self.sent_messages) >= self.max_outgoing_length:\n #self.log(\"Too many sent; no write.\")\n return packet_type\n if len(self.retry_queue) > 0:\n self.log(\"Retry rather than use queue.\")\n sending = self.retry_queue.popleft()\n packet_type = RETRY_PACKET\n elif not tx_queue.empty():\n sending = SentMessage(self.next_index, tx_queue.get())\n self.next_index += 1\n self.next_index = self.next_index % 128\n # TODO: make sure tests catch this if missing\n if self.next_index == 0:\n self.next_index = 1\n packet_type = NEW_PACKET\n else:\n return NO_PACKET\n self.log(\"Sending packet: %d, %s\", sending.index, sending.data)\n packet.WithData(sending.index, sending.data)\n self.sent_messages.append(sending)\n return packet_type\n\n def TransmitPacket(self, packet):\n \"\"\"Sends the packet over the wire.\"\"\"\n ints = packet.SerializeToInts()\n for b in ints:\n self.ser.write(b)\n self.last_send_time = time.time()\n\n def MaybeRetryAllSent(self):\n \"\"\"Triggers retries if we get multiple no-index acks and have a\n non-empty sent_messages list.\n\n The risk here is that if we do it too soon, we'll spam the receiver.\n \"\"\"\n if (len(self.sent_messages) > 0 and\n time.time() - self.last_send_time > 0.2):\n self.all_quiet = False\n self.retry_queue.append(self.sent_messages.popleft())\n self.log(\"Maybe retry all sent.\")\n\n\nclass RXModule(object):\n def __init__(self, serial_connection):\n self.thread = RXThread(serial_connection, incoming_queue, outgoing_queue)\n self.thread.start()\n\n def Write(self, command):\n \"\"\"Command is a string. Blocks until this command is sent.\"\"\"\n self.thread.WriteMessage(command)\n\n def Read(self, timeout=0):\n \"\"\"Returns a complete message, if incoming transmission is complete.\"\"\"\n return self.thread.ReadMessage(timeout)\n\n def Clear(self):\n self.message = Message()\n\nclass RXThread(Thread, Loggable):\n def __init__(self, serial_connection, name=''):\n Thread.__init__(self)\n Loggable.__init__(self, name)\n self.ser = serial_connection\n self.incoming_message = None\n self.last_communication_time = time.time()\n self.rx_queue = Queue(5)\n self.tx_queue = Queue(5)\n self.daemon = True\n self.name = name\n self.reader = Reader(self.ser, name)\n self.writer = Writer(self.ser, name)\n self.messages_requested = 0\n self.messages_writer_accepted = 0\n\n def WriteMessage(self, command):\n self.tx_queue.put(command)\n self.messages_requested += 1\n\n def ReadMessage(self, timeout):\n try:\n self.log(\"Reading from queue of length: %d\", self.rx_queue.qsize())\n return self.rx_queue.get(timeout=timeout).data\n except Empty:\n return None\n\n def run(self):\n while True:\n self.RxTxLoop()\n\n def done(self):\n return (self.messages_requested == self.messages_writer_accepted\n and self.writer.done()) or not self.is_alive()\n\n def RxTxLoop(self):\n \"\"\"Called in a loop; handles a single rx/tx pair.\"\"\"\n # Receive\n self.reader.Read(self.rx_queue)\n incoming_index_to_resend = self.reader.PopIncomingError()\n if incoming_index_to_resend:\n incoming_ack = Ack(index=incoming_index_to_resend, ok=False)\n else:\n incoming_index_to_ack = self.reader.PopIncomingAck()\n incoming_ack = Ack(index=incoming_index_to_ack, ok=True)\n send_error_index = self.reader.PopOutgoingError()\n if send_error_index:\n outgoing_ack = Ack(send_error_index, ok=False)\n else:\n send_ack_index = self.reader.PopOutgoingAck()\n outgoing_ack = Ack(send_ack_index, ok=True)\n if incoming_ack.index:\n self.log(\"Incoming ack %s\", incoming_ack)\n if outgoing_ack.index:\n self.log(\"Outgoing ack %s\", outgoing_ack)\n #self.log(\"Write from tx_queue of size %d\", self.tx_queue.qsize())\n if self.writer.Write(\n incoming_ack=incoming_ack,\n outgoing_ack=outgoing_ack,\n tx_queue=self.tx_queue):\n self.messages_writer_accepted += 1\n","sub_path":"python/commlink.py","file_name":"commlink.py","file_ext":"py","file_size_in_byte":14255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"139765987","text":"import bson\nfrom mongokit import Document\n\n\nclass Time(Document):\n __database__ = 'ivewasted'\n __collection__ = 'time'\n\n use_dot_notation = True\n\n structure = {\n 'user_id': bson.objectid.ObjectId,\n 'url': basestring,\n 'start_ts': int,\n 'duration': int\n }\n\n required_fields = ['user_id', 'url', 'start_ts', 'duration']\n\n # attension: will not create index automatically\n indexes = [\n {\n 'fields': ['user_id', 'url'],\n }\n ]\n","sub_path":"models/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"188242856","text":"import socket\n\nIP = \"25.139.26.184\"\nPORT = 9876\nsocket1 = socket.socket()\nlocal = socket.socket()\n\nsocket1.bind((\"0.0.0.0\",PORT))\nsocket1.listen(0)\nremote,RHOST = socket1.accept()\nprint(str(RHOST[0])+\":\"+str(RHOST[1])+\" connected\")\nremote.send(\"Connected\\n\")\nremote.settimeout(0.01)\nlocal.settimeout(0.01)\nprint(\"connecting to local\")\nlocal.connect((\"localhost\",6666))\nprint(\"connected\")\nwhile True:\n\ttry:\n\t\tremotemsg = remote.recv(2048)\n\t\tprint(remotemsg)\n\t\tlocal.send(remotemsg)\n\texcept KeyboardInterrupt:\n\t\texit()\n\texcept socket.timeout:\n\t\ttry:\n\t\t\tlocalmsg = local.recv(2048)\n\t\t\tprint(localmsg)\n\t\t\tremote.send(localmsg)\n\t\texcept socket.timeout:\n\t\t\tpass\n","sub_path":"proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"322670362","text":"# from django.shortcuts import render_to_response, redirect\n# from django.template import RequestContext\nfrom .models import Registration\nfrom django.http import HttpResponse\n\ndef regform(request):\n\n if request.method == 'POST':\n form = request.POST\n # if form.is_valid():\n if form['name'] != \"\" and form['email'] != \"\":\n f = Registration(name=form['name'], email=form['email'], source=form['source'], ip=request.META['REMOTE_ADDR'])\n # Registration.name = form.cleaned_data['name']\n # Registration.email = form.cleaned_data['email']\n # Registration.passw = form.cleaned_data['passw']\n # Registration.ip = request.META.get('HTTP_X_FORWARDED_FOR')\n # ip = '4.4.4.4'\n # x = form.save(commit=False)\n # x.ip = request.META['REMOTE_ADDR']\n f.save()\n\n # return redirect('thanks')\n # else:\n # form = RegForm()\n #\n # return render_to_response('customers/form.html', {'form': form}, context_instance=RequestContext(request))","sub_path":"customers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"148707173","text":"from src.env.chain_mdp import ChainMDP\nfrom src.utils.training_utils import *\nfrom src.models.models import *\nfrom src.models.learn import DeepQNetwork\nfrom src.models.gan import GAN\nfrom src.constants.config import *\nfrom src.utils.scalar import *\n\nimport queue\nimport tensorflow as tf\nimport threading as td\n\ndef play(self_ai, parent_ai, gan, data_queue, score_queue):\n\n env = ChainMDP(QUANTITY_FEATURES)\n history = History(HISTORY_LENGTH)\n episode = 0\n score_sum = 0\n scalar = Scalar()\n scalar.add_variable(\"Average_score\")\n scalar.set()\n\n while True:\n # ========================================== start a new episode ==========================================\n frame_counter = 0 # record the num of frames which are stored, not include the skipped ones\n synchronize_version(self_ai, parent_ai)\n episode += 1\n s0 = env.reset()\n\n score = 0 # accumulated reward along the whole episode added from two players\n max_state = 0\n\n while True:\n # if episode % 100 == 1:\n # env.render()\n action = self_ai.act(s0)\n reward, s1 = env.step(action)\n max_state = max(env.loc, max_state)\n\n if episode > 100 and USING_GAN:\n prob = gan.single_state_prob(s1)\n #bonus_reward = (1-2*prob)**2 if prob < 0.5 else 0\n # if bonus_reward > 0.3:\n # print(env.loc, bonus_reward)\n history.put((s0, action, reward, s1, 0))\n else:\n history.put((s0, action, reward, s1, 0))\n if history.full():\n data_queue.put(history.get())\n s0 = s1\n frame_counter += 1\n score += reward\n\n # ======================================= when the game ends ==========================================\n if frame_counter >= MAX_FRAMES_PER_EPISODE:\n score_queue.put(score)\n score_sum += score\n if episode % 10 == 1:\n print()\n print(\"Max state:\", max_state)\n print(\"Score: {:2f}\".format(score))\n print(\"Epsilon : %f\" % self_ai.epsilon)\n print(\"Average score: %f\" % (score_sum / 10.0))\n print()\n scalar.read([score_sum / 10.0], episode)\n score_sum = 0\n break\n\n\nif __name__ == '__main__':\n\n Sess = tf.Session()\n\n global_ai = DeepQNetwork(\n n_features=QUANTITY_FEATURES,\n n_actions=2,\n scope='global_ai',\n model=mlp,\n parent_ai=None,\n sess=Sess,\n learning_rate=5e-3,\n n_replace_target=50,\n hiddens=HIDDENS,\n decay=0.99,\n memory_size=10000,\n batch_size=300,\n epsilon_decrement=5e-4,\n epsilon_lower=0.02,\n learn_start=LEARN_START,\n )\n\n if USING_GAN:\n gan = GAN(\n sess=Sess,\n n_features=QUANTITY_FEATURES,\n memory_size=100000,\n batch_size=300,\n generator=generator,\n discriminator=discriminator,\n learning_rate=0.001\n )\n else:\n gan = None\n\n dataQ = queue.Queue()\n\n score_plotter = ScorePlotter()\n scoreQ = queue.Queue()\n\n ais = []\n for i in range(N_GAMES):\n ais.append(\n DeepQNetwork(\n n_features=QUANTITY_FEATURES,\n n_actions=QUANTITY_ACTIONS,\n scope='local_ai_' + str(i),\n model=mlp,\n parent_ai=global_ai,\n sess=Sess,\n hiddens=HIDDENS\n )\n )\n\n Saver = tf.train.Saver()\n\n if RESTORE:\n Saver.restore(Sess, RESTORE_PATH)\n if RESET_EPSILON:\n Sess.run(global_ai.reset_epsilon)\n print('restored successfully from ' + RESTORE_PATH)\n else:\n Sess.run(tf.global_variables_initializer())\n\n for i in range(N_GAMES):\n new_thread = td.Thread(target=play, args=(ais[i], global_ai, gan, dataQ, scoreQ))\n new_thread.start()\n\n while True:\n plot_score(score_plotter, scoreQ)\n fetch_data(global_ai, gan, dataQ)\n global_ai.learn()\n if USING_GAN:\n D_loss = gan.train_D()\n G_loss = gan.train_G()\n if global_ai.learn_step % 100 == 1:\n print(\"D loss:\", gan.train_D())\n print(\"G loss:\", gan.train_G())\n if not ONLY_PLAY and global_ai.learn_step % SAVE_EVERY == 1:\n save_path = Saver.save(Sess, SAVE_PATH + str(global_ai.learn_step) + '.ckpt')\n print('saved in' + save_path)\n","sub_path":"for_comparing/chainmdp_without_gan/train_chainmdp.py","file_name":"train_chainmdp.py","file_ext":"py","file_size_in_byte":4697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"62981751","text":"import os\nimport random\n\nimport cv2\nimport numpy as np\n# from os.path import join\nimport torch.utils.data as data\n# import matplotlib.pyplot as plt\nfrom PIL import Image, ImageFile\nfrom torchvision import transforms\nfrom torchvision.transforms import ToTensor\n\n\ndef classToRGB(label):\n l, w = label.shape[0], label.shape[1]\n colmap = np.zeros(shape=(l, w, 3)).astype(np.float32)\n indices = np.where(label == 0)\n colmap[indices[0].tolist(), indices[1].tolist(), :] = [0, 0, 0]\n indices = np.where(label == 1)\n colmap[indices[0].tolist(), indices[1].tolist(), :] = [255, 255, 255]\n transform = ToTensor()\n # plt.imshow(colmap)\n # plt.show()\n return transform(colmap)\n\n\nclass Cinnamon(data.Dataset):\n \"\"\" Custom Cinnamon datasets class \"\"\"\n\n def __init__(self, root, ids, label=False, transform=False):\n super(Cinnamon, self).__init__()\n \"\"\"\n Args:\n\n fileDir(string): directory with all the input images.\n transform(callable, optional): Optional transform to be applied on a sample\n \"\"\"\n self.root = root\n self.label = label\n self.transform = transform\n self.ids = ids\n self.classdict = {0: \"background\", 1: \"text\"}\n self.color_jitter = transforms.ColorJitter(\n brightness=0.3, contrast=0.3, saturation=0.3, hue=0.04)\n self.resizer = transforms.Resize((2448, 2448))\n\n def __getitem__(self, index):\n sample = {}\n sample['id'] = self.ids[index][:-8]\n image = Image.open(os.path.join(\n self.root, \"images/\" + self.ids[index])).convert('RGB')\n sample['image'] = image\n # TODO: Maybe need to create mask from json label or something\n if self.label:\n label = Image.open(os.path.join(\n self.root, 'mask/' + self.ids[index].replace('.png', '_mask.png'))).convert('L')\n sample['label'] = label\n if self.transform and self.label:\n image, label = self._transform(image, label)\n sample['image'] = image\n sample['label'] = label\n return sample\n\n\n def _transform(self, image, label):\n # if np.random.random() > 0.5:\n # image = self.color_jitter(image)\n\n # if np.random.random() > 0.5:\n # image = transforms.functional.vflip(image)\n # label = transforms.functional.vflip(label)\n\n if np.random.random() > 0.5:\n image = transforms.functional.hflip(image)\n label = transforms.functional.hflip(label)\n\n if np.random.random() > 0.5:\n degree = random.choice([90, 180, 270])\n image = transforms.functional.rotate(image, degree)\n label = transforms.functional.rotate(label, degree)\n\n # if np.random.random() > 0.5:\n # degree = 60 * np.random.random() - 30\n # image = transforms.functional.rotate(image, degree)\n # label = transforms.functional.rotate(label, degree)\n\n # if np.random.random() > 0.5:\n # ratio = np.random.random()\n # h = int(2448 * (ratio + 2) / 3.)\n # w = int(2448 * (ratio + 2) / 3.)\n # i = int(np.floor(np.random.random() * (2448 - h)))\n # j = int(np.floor(np.random.random() * (2448 - w)))\n # image = self.resizer(transforms.functional.crop(image, i, j, h, w))\n # label = self.resizer(transforms.functional.crop(label, i, j, h, w))\n \n return image, label\n\n\n def __len__(self):\n return len(self.ids)\n","sub_path":"dataset/cinnamon.py","file_name":"cinnamon.py","file_ext":"py","file_size_in_byte":3547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"515820359","text":"import os\nimport sys\nimport json\nimport time\n\nfrom uuoskit import wasmcompiler\n\ntest_dir = os.path.dirname(__file__)\nsys.path.append(os.path.join(test_dir, '..'))\n\nfrom uuosio import log\nfrom uuosio.chaintester import ChainTester\n\nlogger = log.get_logger(__name__)\n\ndef print_console(tx):\n if 'processed' in tx:\n tx = tx['processed']\n for trace in tx['action_traces']:\n logger.info(trace['console'])\n if not 'inline_traces' in trace:\n continue\n for inline_trace in trace['inline_traces']:\n logger.info('++inline console:', inline_trace['console'])\n\ndef print_except(tx):\n if 'processed' in tx:\n tx = tx['processed']\n for trace in tx['action_traces']:\n logger.info(trace['console'])\n logger.info(json.dumps(trace['except'], indent=4))\n\nclass Test(object):\n\n @classmethod\n def setup_class(cls):\n cls.main_token = 'UUOS'\n cls.chain = ChainTester()\n\n test_account1 = 'hello'\n a = {\n \"account\": test_account1,\n \"permission\": \"active\",\n \"parent\": \"owner\",\n \"auth\": {\n \"threshold\": 1,\n \"keys\": [\n {\n \"key\": 'EOS6AjF6hvF7GSuSd4sCgfPKq5uWaXvGM2aQtEUCwmEHygQaqxBSV',\n \"weight\": 1\n }\n ],\n \"accounts\": [{\"permission\":{\"actor\":test_account1,\"permission\": 'eosio.code'}, \"weight\":1}],\n \"waits\": []\n }\n }\n cls.chain.push_action('eosio', 'updateauth', a, {test_account1:'active'})\n cls.chain.push_action('eosio', 'setpriv', {'account':'hello', 'is_priv': True}, {'eosio':'active'})\n\n @classmethod\n def teardown_class(cls):\n cls.chain.free()\n\n def setup_method(self, method):\n pass\n\n def teardown_method(self, method):\n pass\n\n def test_hello(self):\n code = '''\npackage main\nimport \"chain/logger\"\nfunc main() {\n logger.Println(\"Hello,world!\")\n}\n'''\n code, abi = wasmcompiler.compile_go_src('hello', code)\n assert code\n self.chain.deploy_contract('hello', code, b'', 0)\n r = self.chain.push_action('hello', 'sayhello', b'hello,world')\n print_console(r)\n\n def test_pack_size(self):\n with open('testpacksize.go', 'r') as f:\n code = f.read()\n code, abi = wasmcompiler.compile_go_src('hello', code)\n assert code\n self.chain.deploy_contract('hello', code, b'', 0)\n r = self.chain.push_action('hello', 'sayhello', b'hello,world')\n print_console(r)\n\n def test_action(self):\n with open('testaction.go', 'r') as f:\n code = f.read()\n code, abi = wasmcompiler.compile_go_src('hello', code)\n assert code\n self.chain.deploy_contract('hello', code, b'', 0)\n try:\n r = self.chain.push_action('hello', 'sayhello', b'hello,world')\n print_console(r)\n except Exception as e:\n print_except(e.args[0])\n # logger.info(json.dumps(e.args[0], indent=4))\n # error = e.args[0]['except']\n # logger.info('error:', error)\n\n def test_crypto(self):\n with open('testcrypto.go', 'r') as f:\n code = f.read()\n code, abi = wasmcompiler.compile_go_src('hello', code)\n assert code\n self.chain.deploy_contract('hello', code, b'', 0)\n r = self.chain.push_action('hello', 'sayhello', b'hello,world')\n print_console(r)\n\n def test_mi(self):\n with open('testmi.go', 'r') as f:\n code = f.read()\n code, abi = wasmcompiler.compile_go_src('testmi', code)\n assert code\n self.chain.deploy_contract('hello', code, b'', 0)\n r = self.chain.push_action('hello', 'sayhello', b'hello,world')\n print_console(r)\n\n self.chain.produce_block()\n r = self.chain.push_action('hello', 'sayhello', b'hello,world')\n print_console(r)\n\n def test_print(self):\n with open('testprint.go', 'r') as f:\n code = f.read()\n code, abi = wasmcompiler.compile_go_src('hello', code)\n assert code\n self.chain.deploy_contract('hello', code, b'', 0)\n r = self.chain.push_action('hello', 'sayhello', b'hello,world')\n print_console(r)\n\n def test_privileged(self):\n with open('testprivileged.go', 'r') as f:\n code = f.read()\n code, abi = wasmcompiler.compile_go_src('hello', code)\n assert code\n self.chain.deploy_contract('hello', code, b'', 0)\n r = self.chain.push_action('hello', 'sayhello', b'hello,world')\n print_console(r)\n\n def test_deffered_tx(self):\n with open('testtransaction.go', 'r') as f:\n code = f.read()\n code, abi = wasmcompiler.compile_go_src('testtransaction', code)\n assert code\n self.chain.deploy_contract('hello', code, b'', 0)\n r = self.chain.push_action('hello', 'sayhello1', b'hello,world')\n print_console(r)\n\n self.chain.produce_block()\n self.chain.produce_block()\n\n# time.sleep(1)\n r = self.chain.push_action('hello', 'sayhello3', b'hello,world')\n print_console(r)\n\n def test_db(self):\n with open('testdb.go', 'r') as f:\n code = f.read()\n code, abi = wasmcompiler.compile_go_src('testdb', code)\n assert code\n self.chain.deploy_contract('hello', code, b'', 0)\n r = self.chain.push_action('hello', 'sayhello', b'hello,world')\n print_console(r)\n\n def test_token(self):\n with open('testtoken.go', 'r') as f:\n code = f.read()\n code, abi = wasmcompiler.compile_go_src('hello', code)\n assert code\n self.chain.deploy_contract('hello', code, b'', 0)\n r = self.chain.push_action('hello', 'sayhello', b'hello,world')\n print_console(r)\n\n def test_singleton(self):\n with open('testsingleton.go', 'r') as f:\n code = f.read()\n code, abi = wasmcompiler.compile_go_src('testsingleton', code)\n assert code\n self.chain.deploy_contract('hello', code, b'', 0)\n\n for i in range(4):\n r = self.chain.push_action('hello', 'sayhello', b'hello,world')\n print_console(r)\n self.chain.produce_block()\n","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"506986273","text":"from django.views.generic import DetailView\nfrom models import Media\nfrom ..blog.utils import import_more_context, import_blog_models\nmore_context = import_more_context()\nBLOG_MODELS = import_blog_models()\nfrom forms import MediaForm, MediaTaggingForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.template.defaultfilters import slugify\nfrom ..blog.decorators import is_superuser_or_submitter\nfrom django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned\nfrom django.core.cache import cache\nfrom django.conf import settings\nfrom django.http import HttpResponse\nimport micawber\nimport json\nfrom taggit.models import Tag\nfrom endless_pagination.views import AjaxListView\nfrom django.core.urlresolvers import reverse\n\n\ndef get_youtube_trailer(slug):\n promo_vid = Media.objects.filter(tags__slug__in=[\"%s\" % (slug)],\n media_genre__title='Trailer',\n media_type__in=['video']).distinct().order_by('created').reverse()[:3]\n promo_vid_only_youtube = [p.url for p in promo_vid if p.url.find(\"yout\") > 0]\n return promo_vid_only_youtube[0] if len(promo_vid_only_youtube) > 0 else None\n\n\ndef get_youtube_trailer_and_object(slug_list):\n promo_vid = Media.objects.filter(tags__slug__in=slug_list,\n media_genre__title='Trailer',\n media_type__in=['video']).distinct().order_by('created').reverse()[:3]\n return [p for p in promo_vid if p.url.find(\"yout\") > 0]\n\n\n@login_required\ndef media_create(request, template_name='media_form.html'):\n return media_create_tags(request, None)\n\n\n@login_required\ndef media_create_tags(request, match, instance=None, template_name='media_form.html'):\n\n if request.method == 'POST':\n form = MediaForm(request.POST, instance=instance if instance is not None else Media())\n\n if form.is_valid():\n providers = micawber.bootstrap_basic()\n try:\n provider_obj = providers.request(form.cleaned_data['url'])\n except micawber.exceptions.ProviderNotFoundException as msg:\n return render(request, template_name, {'object_form': form, 'error_message': msg})\n except micawber.exceptions.ProviderException as msg:\n return render(request, template_name, {'object_form': form, 'error_message': \"Embedding disabled by request\"})\n media = form.save(commit=False)\n media.title = provider_obj.get('title') if len(provider_obj.get('title')) < 200 else provider_obj.get('title')[:200]\n media.media_type = provider_obj['type']\n media.thumbnail_url = provider_obj.get('thumbnail_url', provider_obj.get('url'))\n media.submitted_by = request.user if instance is None else instance.submitted_by\n media.blog_type = 1 # media\n media.save()\n media.tags.clear()\n for tag in form.cleaned_data['tags']:\n media.tags.add(tag.lower())\n\n return redirect(media)\n else:\n if match:\n tag_list = ['\"%s\"' % tag.replace('-', ' ') for tag in match.split('|')]\n form = MediaForm(initial={'tags': ','.join(tag_list)})\n else:\n form = MediaForm(instance=instance)\n return render(request, template_name, {'object_form': form})\n\n\n@is_superuser_or_submitter\ndef media_edit(request, pk, template_name='media_form.html', **kwargs):\n media = get_object_or_404(Media, pk=pk, **kwargs)\n return media_create_tags(request, None, media)\n\n\ndef media_tagging(request, pk, template_name='media_form.html'):\n media = get_object_or_404(Media, pk=pk)\n if request.method == 'POST':\n form = MediaTaggingForm(request.POST, request.FILES, instance=media)\n if form.is_valid():\n media.save()\n\n media.tags.clear()\n for tag in form.cleaned_data['tags']:\n media.tags.add(tag.lower())\n return redirect(media)\n else:\n form = MediaTaggingForm(instance=media)\n return render(request, template_name, {\n 'object_form': form\n })\n\n\n@is_superuser_or_submitter\ndef media_remove(request, pk, template_name='base_remove_confirm.html', **kwargs):\n media = get_object_or_404(Media, pk=pk, **kwargs)\n if request.method == 'POST':\n media.tags.clear()\n media.delete()\n return redirect(\"/media/\")\n return render(request, template_name, {'object': media})\n\n\ndef get_media_by_type(tag_list, media_type=None):\n media_param = {\"tags__slug__in\": tag_list}\n if media_type:\n media_param['media_type__in'] = [media_type]\n return Media.objects.filter(**media_param).distinct().order_by('created').reverse()\n\n\ndef suggest_tag(request):\n if request.is_ajax():\n q = request.GET.get('q', '')\n tags = Tag.objects.filter(name__icontains=q).distinct()\n results = []\n for t in tags:\n tjson = {}\n tjson['id'] = t.slug\n tjson['value'] = t.name\n tjson['tokens'] = [t.name]\n tjson['link'] = reverse('media_tag_list', kwargs={'match': t.slug})\n results.append(tjson)\n data = json.dumps(results)\n else:\n data = 'fail'\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\n\nclass MediaDetailView(DetailView):\n template_name = \"media_detail.html\"\n model = Media\n providers = micawber.bootstrap_basic(cache)\n\n def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super(MediaDetailView, self).get_context_data(**kwargs)\n try:\n provider_obj = self.providers.request(self.object.url)\n except micawber.exceptions.ProviderNotFoundException as msg:\n provider_obj = msg\n except micawber.exceptions.ProviderException as msg:\n provider_obj = msg\n tag_list = [slugify(tag) for tag in self.object.tags.all()]\n context['list_title'] = 'More media'\n context['object_list'] = get_media_by_type(tag_list)[:4]\n obj_tag = get_tags_or_die(tag_list)\n context['tag_list'] = obj_tag\n context['slug_list'] = \"|\".join(tag_list)\n context['players'], context['object_members'] = more_context(obj_tag, self.object)\n context['metadata'] = provider_obj\n\n return context\n\n\nclass MediaTestView(MediaDetailView):\n key = getattr(settings, 'MICAWBER_EMBEDLY_KEY', None)\n params = {}\n if key:\n params['key'] = key\n providers = micawber.bootstrap_embedly(**params)\n\n\nclass MediaListView(AjaxListView):\n template_name = \"media_list.html\"\n page_template = \"base_thumbnail_page.html\"\n model = Media\n\n def get_queryset(self, **kwargs):\n kwargs = {'media_type': self.kwargs['type']} if self.kwargs.get('type') is not None else {}\n object_list = Media.objects.filter(**kwargs).order_by('created').reverse()\n return object_list\n\n def get_context_data(self, **kwargs):\n context = super(MediaListView, self).get_context_data(**kwargs)\n context['list_title'] = 'All media' if self.kwargs.get('type') is None else 'Media filter by %s' % self.kwargs['type']\n tag_list = [slugify(tu) for t in self.object_list[:10] if t.tags is not None for tu in t.tags.all()]\n context['tags_list'] = get_tags_or_die(set(tag_list))[:9]\n context['type'] = self.kwargs.get('type', \"\")\n return context\n\n\nclass MediaByTagListView(MediaListView):\n tag_list = []\n\n def get_queryset(self, **kwargs):\n self.tag_list = [tag for tag in self.kwargs['match'].split('|')]\n object_list = Media.objects.filter(tags__slug__in=self.tag_list).distinct().order_by('created').reverse()\n return object_list\n\n def get_context_data(self, **kwargs):\n context = super(MediaByTagListView, self).get_context_data(**kwargs)\n context['tag_list'] = get_tags_or_die(self.tag_list)\n return context\n\n\nclass MediaSearchByTagListView(MediaListView):\n tag_list = []\n\n def get_queryset(self, **kwargs):\n q = self.request.GET.get('match')\n if len(q) > 2:\n tags = Tag.objects.filter(name__icontains=q).distinct()\n self.tag_list = [t.slug for t in tags]\n object_list = Media.objects.filter(tags__slug__in=self.tag_list).distinct().order_by('created').reverse()\n else:\n object_list = Media.objects.all().order_by('created').reverse()\n return object_list\n\n def get_context_data(self, **kwargs):\n context = super(MediaSearchByTagListView, self).get_context_data(**kwargs)\n context['tag_list'] = get_tags_or_die(self.tag_list)[:9]\n return context\n\n\ndef get_tags_or_die(tag_list):\n def try_to_get_model(model):\n try:\n o = model.objects.get(slug=tag)\n return o\n except ObjectDoesNotExist:\n pass\n except MultipleObjectsReturned:\n pass\n\n tags = [try_to_get_model(model) for model in BLOG_MODELS for tag in tag_list]\n return [t for t in tags if t is not None]\n","sub_path":"base/media/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"555989048","text":"import os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport plotly.offline as ply\nimport plotly.graph_objs as go\n\n# Read from file\nprint('Reading data...')\n# data = pd.read_pickle(r'modified\\Balance_Demand.pkl')\ndata = pd.read_pickle(r'modified\\Balance_Demand_Mod.pkl')\n\nother_cols = ['Demand (MW) (Adjusted)',\n 'Net Generation (MW) (Adjusted)',\n ]\n\nbal_areas = sorted(list(set(data.loc[:, 'Balancing Authority'])))\n\nif not os.path.exists(r'out\\BA Charts by Type\\Demand'):\n os.makedirs(r'out\\BA Charts by Type\\Demand')\nif not os.path.exists(r'out\\BA Charts by Type\\Demand Ramp'):\n os.makedirs(r'out\\BA Charts by Type\\Demand Ramp')\n\nfor ba in bal_areas:\n# for ba in ['CISO']:\n print(f'\\nBeginning processing for {ba}...')\n data_sel = data.loc[data['Balancing Authority'] == ba, other_cols].sort_index()\n\n gt_total = data_sel.sum(axis=0)\n disp_sel = gt_total[gt_total != 0].index\n\n print('Shaping data for chart...')\n chart_sel = [dict(name=col,\n x=data_sel.index,\n y=data_sel[col],\n hoverinfo='text+x+y',\n text=col)\n for col in disp_sel]\n\n layout = go.Layout(hovermode='closest')\n fig_sel = go.Figure(data=chart_sel, layout=layout)\n\n print('Creating plot...')\n if not os.path.exists(f'out\\\\Balancing Areas\\\\{ba}'):\n os.makedirs(f'out\\\\Balancing Areas\\\\{ba}')\n ply.plot(fig_sel, filename=f'out\\\\Balancing Areas\\\\{ba}\\\\{ba} EIA 930 Demand.html', auto_open=False)\n ply.plot(fig_sel, filename=f'out\\\\BA Charts by Type\\\\Demand\\\\{ba} EIA 930 Demand.html', auto_open=False)\n print('Plot created!')\n\n # Ramp data\n print(f'Processing ramp data...')\n data_sel['Ramp'] = data_sel['Demand (MW) (Adjusted)'].diff()\n\n print('Final data shaping...')\n chart_sel = dict(name='Ramp (MW)',\n x=data_sel.index,\n y=data_sel['Ramp'],\n hoverinfo='text+x+y',\n text='Ramp (MW)')\n\n layout = go.Layout(hovermode='closest')\n fig_sel = go.Figure(data=chart_sel, layout=layout)\n\n print('Creating plot...')\n ply.plot(fig_sel, filename=f'out\\\\Balancing Areas\\\\{ba}\\\\{ba} EIA 930 Demand Ramp.html', auto_open=False)\n ply.plot(fig_sel, filename=f'out\\\\BA Charts by Type\\\\Demand Ramp\\\\{ba} EIA 930 Demand Ramp.html', auto_open=False)\n print('Plot created!')\n","sub_path":"plot_demand.py","file_name":"plot_demand.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"55697205","text":"__author__ = 'shiqifeng'\n\ndef delspace(str) :\n return str.lstrip()\n\nfilename = raw_input('Enter a filename: ')\nf = open(filename,'r')\nflist = []\n\nfor eachline in f:\n flist.append(eachline) #covert to list whose eachline is element in this\n\nchoose = raw_input(\"choose a way(C/N) to cover old file or write a new file\")\nif choose == 'N' :\n wfilename = raw_input('Enter a filename to be writed: ')\n wf = open(wfilename,'w')\n wf.writelines(map(delspace,flist))\n wf.close()\nelif choose == 'C' :\n newf = open(filename,'w')\n newf.writelines(map(delspace,flist))\n newf.close()","sub_path":"task6.py","file_name":"task6.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"561527645","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# Part of Odoo.\n# Copyright (C) 2016 ITS-1 ()\n# E-mail: \n# Address: \n# Phone: +371 67289467\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom odoo import api, fields, models, _\n\nclass AccountJournal(models.Model):\n _inherit = \"account.journal\"\n\n @api.model\n def _prepare_liquidity_account(self, name, company, currency_id, type):\n res = super(AccountJournal, self)._prepare_liquidity_account(name, company, currency_id, type)\n if type in ('bank', 'cash'):\n user_type = self.env.ref('l10n_lv.lv_account_type_2_5')\n if user_type:\n res['user_type_id'] = user_type.id\n tag_0 = self.env.ref('l10n_lv.lv_account_tag_0')\n tag_2 = self.env.ref('l10n_lv.lv_account_tag_2')\n tag_26 = self.env.ref('l10n_lv.lv_account_tag_26')\n tags = []\n if tag_0:\n tags.append(tag_0.id)\n if tag_2:\n tags.append(tag_2.id)\n if tag_26:\n tags.append(tag_26.id)\n if type == 'bank':\n tag_262 = self.env.ref('l10n_lv.lv_account_tag_262')\n if tag_262:\n tags.append(tag_262.id)\n if type == 'cash':\n tag_261 = self.env.ref('l10n_lv.lv_account_tag_261')\n if tag_261:\n tags.append(tag_261.id)\n if tags:\n res.update({'tag_ids': [(6, 0, tags)]})\n return res\n\nclass WizardMultiChartsAccounts(models.TransientModel):\n _inherit = 'wizard.multi.charts.accounts'\n\n @api.onchange('chart_template_id')\n def onchange_chart_template_id(self):\n res = super(WizardMultiChartsAccounts, self).onchange_chart_template_id()\n lv_chart_template = self.env.ref('l10n_lv.l10n_lv_chart_template')\n if lv_chart_template and self.chart_template_id.id == lv_chart_template.id:\n# lv_sale_tax = self.env.ref('l10n_lv.lv_tax_template_PVN-SR')\n# lv_purchase_tax = self.env.ref('l10n_lv.lv_tax_template_Pr-SR')\n# if lv_sale_tax:\n# self.sale_tax_id = lv_sale_tax.id\n# if lv_purchase_tax:\n# self.purchase_tax_id = lv_purchase_tax.id\n self.sale_tax_rate = 21.0\n self.purchase_tax_rate = 21.0\n return res\n\nclass AccountConfigSettings(models.TransientModel):\n _inherit = 'account.config.settings'\n\n @api.onchange('chart_template_id')\n def onchange_chart_template_id(self):\n res = super(AccountConfigSettings, self).onchange_chart_template_id()\n lv_chart_template = self.env.ref('l10n_lv.l10n_lv_chart_template')\n if self.chart_template_id and lv_chart_template and self.chart_template_id.id == lv_chart_template.id:\n lv_sale_tax = self.env.ref('l10n_lv.lv_tax_template_PVN-SR')\n lv_purchase_tax = self.env.ref('l10n_lv.lv_tax_template_Pr-SR')\n if lv_sale_tax:\n self.sale_tax_id = lv_sale_tax.id\n self.default_sale_tax_id = lv_sale_tax.id\n if lv_purchase_tax:\n self.purchase_tax_id = lv_purchase_tax.id\n self.default_purchase_tax_id = lv_purchase_tax.id\n self.sale_tax_rate = 21.0\n self.purchase_tax_rate = 21.0\n return res\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:","sub_path":"l10n_lv/models/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":4325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"560979131","text":"from django.conf.urls import patterns, url\nfrom qoe import views\n\nurlpatterns = [\n\turl(r'^$', views.index, name='index'),\n\turl(r'^init/', views.initQoE, name='init'),\n\turl(r'^query/', views.query, name='query'),\n\turl(r'^update', views.update, name='update'),\n\turl(r'^dump/', views.dump, name='dump'),\n]\n","sub_path":"qoe/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"454638533","text":"#!/usr/bin/python3\n# -*- coding=utf-8 -*-\nimport numpy as np\nimport copy\nfrom scipy.special import expit, softmax\n\nfrom common.wbf_postprocess import weighted_boxes_fusion\n\ndef yolo_decode(prediction, anchors, num_classes, input_dims, scale_x_y=None, use_softmax=False):\n '''Decode final layer features to bounding box parameters.'''\n batch_size = np.shape(prediction)[0]\n num_anchors = len(anchors)\n\n grid_size = np.shape(prediction)[1:3]\n #check if stride on height & width are same\n assert input_dims[0]//grid_size[0] == input_dims[1]//grid_size[1], 'model stride mismatch.'\n stride = input_dims[0] // grid_size[0]\n\n prediction = np.reshape(prediction,\n (batch_size, grid_size[0] * grid_size[1] * num_anchors, num_classes + 5))\n\n ################################\n # generate x_y_offset grid map\n grid_y = np.arange(grid_size[0])\n grid_x = np.arange(grid_size[1])\n x_offset, y_offset = np.meshgrid(grid_x, grid_y)\n\n x_offset = np.reshape(x_offset, (-1, 1))\n y_offset = np.reshape(y_offset, (-1, 1))\n\n x_y_offset = np.concatenate((x_offset, y_offset), axis=1)\n x_y_offset = np.tile(x_y_offset, (1, num_anchors))\n x_y_offset = np.reshape(x_y_offset, (-1, 2))\n x_y_offset = np.expand_dims(x_y_offset, 0)\n\n ################################\n\n # Log space transform of the height and width\n anchors = np.tile(anchors, (grid_size[0] * grid_size[1], 1))\n anchors = np.expand_dims(anchors, 0)\n\n if scale_x_y:\n # Eliminate grid sensitivity trick involved in YOLOv4\n #\n # Reference Paper & code:\n # \"YOLOv4: Optimal Speed and Accuracy of Object Detection\"\n # https://arxiv.org/abs/2004.10934\n # https://github.com/opencv/opencv/issues/17148\n #\n box_xy_tmp = expit(prediction[..., :2]) * scale_x_y - (scale_x_y - 1) / 2\n box_xy = (box_xy_tmp + x_y_offset) / np.array(grid_size)[::-1]\n else:\n box_xy = (expit(prediction[..., :2]) + x_y_offset) / np.array(grid_size)[::-1]\n box_wh = (np.exp(prediction[..., 2:4]) * anchors) / np.array(input_dims)[::-1]\n\n # Sigmoid objectness scores\n objectness = expit(prediction[..., 4]) # p_o (objectness score)\n objectness = np.expand_dims(objectness, -1) # To make the same number of values for axis 0 and 1\n\n if use_softmax:\n # Softmax class scores\n class_scores = softmax(prediction[..., 5:], axis=-1)\n else:\n # Sigmoid class scores\n class_scores = expit(prediction[..., 5:])\n\n return np.concatenate([box_xy, box_wh, objectness, class_scores], axis=2)\n\n\ndef yolo_correct_boxes(predictions, img_shape, model_image_size):\n '''rescale predicition boxes back to original image shape'''\n box_xy = predictions[..., :2]\n box_wh = predictions[..., 2:4]\n objectness = np.expand_dims(predictions[..., 4], -1)\n class_scores = predictions[..., 5:]\n\n # model_image_size & image_shape should be (height, width) format\n model_image_size = np.array(model_image_size, dtype='float32')\n image_shape = np.array(img_shape, dtype='float32')\n height, width = image_shape\n\n new_shape = np.round(image_shape * np.min(model_image_size/image_shape))\n offset = (model_image_size-new_shape)/2./model_image_size\n scale = model_image_size/new_shape\n # reverse offset/scale to match (w,h) order\n offset = offset[..., ::-1]\n scale = scale[..., ::-1]\n\n box_xy = (box_xy - offset) * scale\n box_wh *= scale\n\n # Convert centoids to top left coordinates\n box_xy -= box_wh / 2\n\n # Scale boxes back to original image shape.\n image_wh = image_shape[..., ::-1]\n box_xy *= image_wh\n box_wh *= image_wh\n\n return np.concatenate([box_xy, box_wh, objectness, class_scores], axis=2)\n\n\n\ndef yolo_handle_predictions(predictions, image_shape, max_boxes=100, confidence=0.1, iou_threshold=0.4, use_wbf=False):\n boxes = predictions[:, :, :4]\n box_confidences = np.expand_dims(predictions[:, :, 4], -1)\n box_class_probs = predictions[:, :, 5:]\n\n box_scores = box_confidences * box_class_probs\n box_classes = np.argmax(box_scores, axis=-1)\n box_class_scores = np.max(box_scores, axis=-1)\n pos = np.where(box_class_scores >= confidence)\n\n boxes = boxes[pos]\n classes = box_classes[pos]\n scores = box_class_scores[pos]\n\n if use_wbf:\n # use Weighted-Boxes-Fusion for boxes postprocess\n n_boxes, n_classes, n_scores = weighted_boxes_fusion([boxes], [classes], [scores], image_shape, weights=None, iou_thr=iou_threshold)\n else:\n # Boxes, Classes and Scores returned from NMS\n n_boxes, n_classes, n_scores = nms_boxes(boxes, classes, scores, iou_threshold, confidence=confidence, use_diou=True, is_soft=False)\n\n if n_boxes:\n boxes = np.concatenate(n_boxes)\n classes = np.concatenate(n_classes).astype('int32')\n scores = np.concatenate(n_scores)\n boxes, classes, scores = filter_boxes(boxes, classes, scores, max_boxes)\n\n return boxes, classes, scores\n\n else:\n return [], [], []\n\n\ndef filter_boxes(boxes, classes, scores, max_boxes):\n '''\n Sort the prediction boxes according to score\n and only pick top \"max_boxes\" ones\n '''\n # sort result according to scores\n sorted_indices = np.argsort(scores)\n sorted_indices = sorted_indices[::-1]\n nboxes = boxes[sorted_indices]\n nclasses = classes[sorted_indices]\n nscores = scores[sorted_indices]\n\n # only pick max_boxes\n nboxes = nboxes[:max_boxes]\n nclasses = nclasses[:max_boxes]\n nscores = nscores[:max_boxes]\n\n return nboxes, nclasses, nscores\n\n\ndef box_iou(boxes):\n \"\"\"\n Calculate iou on box array\n\n Parameters\n ----------\n boxes: bbox numpy array, shape=(N, 4), xywh\n x,y are top left coordinates\n\n Returns\n -------\n iou: numpy array, shape=(N-1,)\n IoU value of boxes[:-1] with boxes[-1]\n \"\"\"\n # get box coordinate and area\n x = boxes[:, 0]\n y = boxes[:, 1]\n w = boxes[:, 2]\n h = boxes[:, 3]\n areas = w * h\n\n # check IoU\n inter_xmin = np.maximum(x[:-1], x[-1])\n inter_ymin = np.maximum(y[:-1], y[-1])\n inter_xmax = np.minimum(x[:-1] + w[:-1], x[-1] + w[-1])\n inter_ymax = np.minimum(y[:-1] + h[:-1], y[-1] + h[-1])\n\n inter_w = np.maximum(0.0, inter_xmax - inter_xmin + 1)\n inter_h = np.maximum(0.0, inter_ymax - inter_ymin + 1)\n\n inter = inter_w * inter_h\n iou = inter / (areas[:-1] + areas[-1] - inter)\n return iou\n\n\ndef box_diou(boxes):\n \"\"\"\n Calculate diou on box array\n Reference Paper:\n \"Distance-IoU Loss: Faster and Better Learning for Bounding Box Regression\"\n https://arxiv.org/abs/1911.08287\n\n Parameters\n ----------\n boxes: bbox numpy array, shape=(N, 4), xywh\n x,y are top left coordinates\n\n Returns\n -------\n diou: numpy array, shape=(N-1,)\n IoU value of boxes[:-1] with boxes[-1]\n \"\"\"\n # get box coordinate and area\n x = boxes[:, 0]\n y = boxes[:, 1]\n w = boxes[:, 2]\n h = boxes[:, 3]\n areas = w * h\n\n # check IoU\n inter_xmin = np.maximum(x[:-1], x[-1])\n inter_ymin = np.maximum(y[:-1], y[-1])\n inter_xmax = np.minimum(x[:-1] + w[:-1], x[-1] + w[-1])\n inter_ymax = np.minimum(y[:-1] + h[:-1], y[-1] + h[-1])\n\n inter_w = np.maximum(0.0, inter_xmax - inter_xmin + 1)\n inter_h = np.maximum(0.0, inter_ymax - inter_ymin + 1)\n\n inter = inter_w * inter_h\n iou = inter / (areas[:-1] + areas[-1] - inter)\n\n # box center distance\n x_center = x + w/2\n y_center = y + h/2\n center_distance = np.power(x_center[:-1] - x_center[-1], 2) + np.power(y_center[:-1] - y_center[-1], 2)\n\n # get enclosed area\n enclose_xmin = np.minimum(x[:-1], x[-1])\n enclose_ymin = np.minimum(y[:-1], y[-1])\n enclose_xmax = np.maximum(x[:-1] + w[:-1], x[-1] + w[-1])\n enclose_ymax = np.maximum(x[:-1] + w[:-1], x[-1] + w[-1])\n enclose_w = np.maximum(0.0, enclose_xmax - enclose_xmin + 1)\n enclose_h = np.maximum(0.0, enclose_ymax - enclose_ymin + 1)\n # get enclosed diagonal distance\n enclose_diagonal = np.power(enclose_w, 2) + np.power(enclose_h, 2)\n # calculate DIoU, add epsilon in denominator to avoid dividing by 0\n diou = iou - 1.0 * (center_distance) / (enclose_diagonal + np.finfo(float).eps)\n\n return diou\n\n\ndef nms_boxes(boxes, classes, scores, iou_threshold, confidence=0.1, use_diou=True, is_soft=False, use_exp=False, sigma=0.5):\n nboxes, nclasses, nscores = [], [], []\n for c in set(classes):\n # handle data for one class\n inds = np.where(classes == c)\n b = boxes[inds]\n c = classes[inds]\n s = scores[inds]\n\n # make a data copy to avoid breaking\n # during nms operation\n b_nms = copy.deepcopy(b)\n c_nms = copy.deepcopy(c)\n s_nms = copy.deepcopy(s)\n\n while len(s_nms) > 0:\n # pick the max box and store, here\n # we also use copy to persist result\n i = np.argmax(s_nms, axis=-1)\n nboxes.append(copy.deepcopy(b_nms[i]))\n nclasses.append(copy.deepcopy(c_nms[i]))\n nscores.append(copy.deepcopy(s_nms[i]))\n\n # swap the max line and last line\n b_nms[[i,-1],:] = b_nms[[-1,i],:]\n c_nms[[i,-1]] = c_nms[[-1,i]]\n s_nms[[i,-1]] = s_nms[[-1,i]]\n\n if use_diou:\n iou = box_diou(b_nms)\n else:\n iou = box_iou(b_nms)\n\n # drop the last line since it has been record\n b_nms = b_nms[:-1]\n c_nms = c_nms[:-1]\n s_nms = s_nms[:-1]\n\n if is_soft:\n # Soft-NMS\n if use_exp:\n # score refresh formula:\n # score = score * exp(-(iou^2)/sigma)\n s_nms = s_nms * np.exp(-(iou * iou) / sigma)\n else:\n # score refresh formula:\n # score = score * (1 - iou) if iou > threshold\n depress_mask = np.where(iou > iou_threshold)[0]\n s_nms[depress_mask] = s_nms[depress_mask]*(1-iou[depress_mask])\n keep_mask = np.where(s_nms >= confidence)[0]\n else:\n # normal Hard-NMS\n keep_mask = np.where(iou <= iou_threshold)[0]\n\n # keep needed box for next loop\n b_nms = b_nms[keep_mask]\n c_nms = c_nms[keep_mask]\n s_nms = s_nms[keep_mask]\n\n # reformat result for output\n nboxes = [np.array(nboxes)]\n nclasses = [np.array(nclasses)]\n nscores = [np.array(nscores)]\n return nboxes, nclasses, nscores\n\n\ndef yolo_adjust_boxes(boxes, img_shape):\n '''\n change box format from (x,y,w,h) top left coordinate to\n (xmin,ymin,xmax,ymax) format\n '''\n if boxes is None or len(boxes) == 0:\n return []\n\n image_shape = np.array(img_shape, dtype='float32')\n height, width = image_shape\n\n adjusted_boxes = []\n for box in boxes:\n x, y, w, h = box\n\n xmin = x\n ymin = y\n xmax = x + w\n ymax = y + h\n\n ymin = max(0, np.floor(ymin + 0.5).astype('int32'))\n xmin = max(0, np.floor(xmin + 0.5).astype('int32'))\n ymax = min(height, np.floor(ymax + 0.5).astype('int32'))\n xmax = min(width, np.floor(xmax + 0.5).astype('int32'))\n adjusted_boxes.append([xmin,ymin,xmax,ymax])\n\n return np.array(adjusted_boxes,dtype=np.int32)\n\n","sub_path":"kerasYOLO/common/yolo_postprocess_np.py","file_name":"yolo_postprocess_np.py","file_ext":"py","file_size_in_byte":11459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"97886598","text":"#coding=utf-8\nfrom datetime import datetime, timedelta\nfrom typing import Dict, Optional, Any\nfrom uuid import UUID, uuid4\nimport time\nimport math\nfrom decimal import Decimal\nfrom pytz import timezone\nimport os\nimport csv\nimport numpy as np\nimport config as cfg\n\n''' ----------- ensure singleton object, 2 methods, 1 through overwriting magic method, 1 by decorator ------'''\nclass SingletonMeta(type):\n\tdef __call__(cls, *args, **kwargs):\n\t\tif not hasattr(cls, '_instance'):\n\t\t\t# 以下不要使用'cls._instance = cls(*args, **kwargs)', 防止死循环,\n # cls的调用行为已经被当前'__call__'协议拦截了\n # 使用super(Singleton, cls).__call__来生成cls的实例\n\t\t\tcls._instance = super(SingletonMeta, cls).__call__(*args, **kwargs)\n\t\treturn cls._instance\n\ndef chunklist(chunksize: int,length: int,lst: list=None) -> list:\n\tif lst is not None: # will return chunkized list with original list element rather than chunk indice\n\t\tbucketlist = list(map(lambda x: lst[x*chunksize: x*chunksize+chunksize],list(range(0, ceil(len(lst)/chunksize)))))\n\telse:\n\t\tlst = range(0,length)\n\t\tbucketlist = list(map(lambda x: [x*chunksize, min(x*chunksize+chunksize,len(lst))],list(range(0, math.ceil(len(lst)/chunksize)))))\n\treturn bucketlist\n\ndef singleton(cls):\n instances = {}\n def wrapper(*args, **kwargs):\n if cls not in instances:\n instances[cls] = cls(*args, **kwargs)\n return instances[cls]\n return wrapper\n''' --------------------------------------------------------------------------------------------------------'''\n\ndef logtag(cat: str, ops: str, uuid: Optional[UUID]=None, status: Optional[str]=None, progress: Optional[str]=None):\n return f'{cat}~{ops}~{str(uuid)}~{status}~{progress}'\n\ndef query_to_sql(sqlalchemyqueryfunc):\n\treturn str(sqlalchemyqueryfunc.statement.compile(compile_kwargs={'literal_binds': True}))\n\ndef normByRange(lst_input,delta=None,int_start: int=0,int_end: int=1000,):\n\tlength = abs(int_end - int_start)\n\tdelta = np.nanmax(lst_input) - np.nanmin(lst_input) if delta==None else delta\n\treturn map(lambda x: round(((x- np.nanmin(lst_input))*length/delta),0), lst_input)\n\ndef printElement (iterable):\n\tfor index, item in enumerate(iterable,1):\n\t\tprint(index,':- ',item)\n\ndef to_dbtime (int_unixtime:int) -> str:\n\treturn datetime.utcfromtimestamp(int_unixtime / 1000).strftime(\"%Y-%m-%d %H:%M:%S\")\n\ndef datediff_insec (dtm_start: datetime, dtm_end: datetime) -> int:\n\tdate_diff = dtm_end - dtm_start\n\treturn (date_diff.days * 24 * 60 * 60) + date_diff.seconds\n\ndef toPrec(num, n: int=2, method: str='r') -> Decimal:\n\t# try:\n\tif method=='r':\n\t\treturn Decimal(str(round(num,n)))\n\telif method=='u':\n\t\tresult = math.ceil(float(num) * 10**n) / 10**n\n\t\treturn Decimal(str(result))\n\telif method=='d':\n\t\tresult = math.floor(float(num) * 10**n) / 10**n\n\t\treturn Decimal(str(result))\n\t# except:\n\t# \treturn None\n\ndef chkEncoding (file,checkLen: int=10) -> str:\n defaultEncoding='ISO-8859-1'\n from bs4 import UnicodeDammit\n with open(file, 'rb') as f:\n fhead = f.read(checkLen)\n detected = UnicodeDammit(fhead).original_encoding\n print('detected encoding by reading the first {} chars is {}'.format(checkLen,detected))\n if detected in ['utf-8','ISO-8859-1']:\n return detected\n else:\n return defaultEncoding\n # if (b[0:3] == b'\\xef\\xbb\\xbf'):\n # print('UTF8')\n # else:\n # print('NOT UTF')\n\ndef log_csv(str_cat: str='INFO', str_op: str='', str_desc: str=''):\n\tlogtime = datetime.now().astimezone(timezone(cfg.STR_TIMEZONE))\n\tstr_logdate = datetime.strftime(logtime,'%Y%m%d')\n\tstr_logtime = datetime.strftime(logtime,'%H:%M:%S')\n\t'''fname = '{}log_{}.csv'.format(cfg.PATH_GENFILE,str_logdate)\n\twriteMode = 'a' if os.path.isfile(fname) else 'w'\t\n\twith open(fname, writeMode) as logfile:\n\t\toutput = csv.writer(logfile)\n\t\toutput.writerow([str_cat,str_logdate,str_logtime,str_op,str_desc])'''\n\tprint('{}:{} - {}'.format(str_cat,str_logtime,str_desc))\n\ndef progressbar(index: int,scale: int,str_remark: str=''): #scale = len of iterables\n\tbarsize = 30\n\troundbar = (index*barsize)//scale\n\tprog = ('▮' * roundbar)\n\tremain = ('▯' * (barsize-roundbar))\n\tperc = int((index*100)//scale)\n\t# print(\"\\r{:^3.0f}%[{}->{}] {:.2f}s\".format(perc,prog,remain,flt_dura),end = '')\n\tprint(\"\\r[{}{} {}%] {}\".format(prog,remain,perc,str_remark),end = '')\n\ndef df_csv (str_fanme: str, tpl_dflist: tuple, str_mode: str='w',lst_standardCol: list=[]):\n\t# file_name = config.PATH_FILE + str_fanme + '.csv'\n\t''' check if input df(s) has consistent columns against the given lst_standardCol '''\n\tif len(lst_standardCol)>0 and len(set(lst_standardCol)-set(tpl_dflist[0].columns)) !=0:\n\t\tprint('Critical! - {} column(s) in {} not found in new data - {}'.format(set(lst_standardCol)-set(tpl_dflist[0].columns),\n\t\t\t\tstr_fanme,tpl_dflist[0].columns))\n\t\treturn False\n\tif len(lst_standardCol)>0 and len(set(tpl_dflist[0].columns)-set(lst_standardCol)) !=0:\n\t\tprint('Critical! - {} column(s) in new data not found in {} csv file'.format(set(tpl_dflist[0].columns)-set(lst_standardCol),\n\t\t\t\tstr_fanme))\n\t\treturn False\n\t'''consistency check ends'''\n\t\n\theader = True if str_mode == 'w' else False\n\tboo_index = True if str_fanme == cfg.PATH_BUGFILE else False\n\twith open(str_fanme, str_mode,newline='\\n') as f: # a to append, w to overwrite if file already exists\n\t\tfor dfitem in tpl_dflist:\n\t\t\ttry:\n\t\t\t\tdfitem.to_csv(f, header=header, index=boo_index, encoding=cfg.FILE_ENCODE)\n\t\t\texcept Exception as e:\n\t\t\t\tdfitem.to_csv(f, header=header, index=boo_index, encoding='GB18030')\n\t\t\theader = False\n\nimport socket \ndef get_hostname_ip(): \n try: \n host_name = socket.gethostname() \n host_ip = socket.gethostbyname(host_name) \n return host_name,host_ip \n except: \n print(\"Unable to get Hostname and IP\")\n return ''","sub_path":"tsaisendo/libs.py","file_name":"libs.py","file_ext":"py","file_size_in_byte":5890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"115232827","text":"import pygame\r\nimport sys\r\nimport random\r\n\r\ndef randomColor():\r\n color = (random.choice(range(0,256)), random.choice(range(0,256)), random.choice(range(0,256)))\r\n return color\r\n\r\nclass Land:\r\n def __init__(self, x, y, width, height, color, spawn = False):\r\n self.rect = pygame.Rect((x, y), (width, height))\r\n self.color = color\r\n if self.color == (0,0,0):\r\n self.collides = True\r\n else:\r\n self.collides = False\r\n self.spawn = spawn\r\n def flip(self):\r\n if self.color == (0,0,0):\r\n self.color = (255,255,255)\r\n self.collides = False\r\n else:\r\n self.color = (0,0,0)\r\n self.collides = True\r\n def draw(self, screen):\r\n pygame.draw.rect(screen, self.color, self.rect)\r\n\r\n def loadLand(width, height, locations):\r\n lands = []\r\n landheight = height/len(locations)\r\n for y, each in enumerate(locations):\r\n landwidth = width/len(each)\r\n for x, pos in enumerate(each):\r\n if pos == 'S':\r\n lands.append(Land(x*landwidth, y*landheight, landwidth, landheight, (255,255,255), True))\r\n if pos == 'O':\r\n lands.append(Land(x*landwidth, y*landheight, landwidth, landheight, (255,255,255)))\r\n if pos == '%':\r\n lands.append(Land(x*landwidth, y*landheight, landwidth, landheight, (0,0,0)))\r\n if pos == 'X':\r\n lands.append(BlackLand(x*landwidth, y*landheight, landwidth, landheight, (0,0,0)))\r\n return lands\r\n\r\nclass BlackLand(Land):\r\n def flip(self):\r\n pass\r\n\r\nclass Character:\r\n def __init__(self, x, y, SCREENSIZE):\r\n self.SCREENSIZE = SCREENSIZE\r\n self.frame = 0\r\n self.direction = 0\r\n self.image = {}\r\n self.elapsed = 0\r\n self.falling = True\r\n self.jumping = False\r\n self.canjump = True\r\n self.fallspeed = 0\r\n self.xvel = 0\r\n self.yvel = 0\r\n for i in range(4):\r\n for j in range(2):\r\n self.image[str(i) + str(j)] = pygame.image.load('data/images/character' + str(i) + str(j) + '.png').convert_alpha()\r\n for i in range(4):\r\n self.image['jump' + str(i)] = pygame.image.load('data/images/characterjump' + str(i) + '.png')\r\n self.rect = pygame.Rect((x,y),self.image['00'].get_size())\r\n\r\n def draw(self, screen):\r\n if self.falling:\r\n screen.blit(self.image['jump' + str(self.direction)], self.rect)\r\n else:\r\n screen.blit(self.image[str(self.direction) + str(self.frame)], self.rect)\r\n\r\n def update(self, delta, terrain):\r\n dx = .4*self.xvel*delta\r\n dy = .4*self.yvel*delta\r\n\r\n # WALKING ANIMATION\r\n self.elapsed += delta\r\n if self.elapsed > 200 and self.walking:\r\n self.elapsed = 0\r\n if self.frame == 0:\r\n self.frame = 1\r\n else:\r\n self.frame = 0\r\n if not self.walking:\r\n self.frame = 0\r\n # END WALKING ANIMATION\r\n\r\n # CHECKING SPRITE IS WITHIN THE SCREEN\r\n if self.rect.left + dx < 0 and dx < 0:\r\n dx = 0\r\n if self.rect.right + dx > self.SCREENSIZE[0] and dx > 0:\r\n return True\r\n if self.rect.top + dy < 0 and dy < 0:\r\n dy = 0\r\n if self.rect.bottom + dy > self.SCREENSIZE[1] and dy > 0:\r\n dy = 0\r\n # END CHECKING\r\n\r\n # CHECKING FOR BLOCK COLLISION\r\n collisionlist = self.rect.collidelistall(terrain)\r\n print(collisionlist)\r\n if collisionlist:\r\n for each in collisionlist:\r\n if self.rect.bottom > terrain[each].top:\r\n self.falling = False\r\n else:\r\n self.falling = True\r\n # END BLOCK COLLISION\r\n\r\n\r\n if self.falling:\r\n dy = self.fallspeed*delta*.004\r\n self.fallspeed += .5*delta\r\n else:\r\n self.fallspeed = 4\r\n self.rect.x += dx\r\n self.rect.y += dy\r\n\r\ndef loadlvlfromfile(filename):\r\n level = []\r\n with open(filename) as f:\r\n for line in f:\r\n line = line.strip()\r\n level.append(list(line))\r\n return level\r\n\r\ndef getspawnblock(terrain):\r\n i = 0\r\n for each in terrain:\r\n if each.spawn:\r\n return each\r\n raise ValueError('No spawn block')\r\n\r\ndef getsolidterrain(terrain):\r\n rectlist = [block.rect for block in terrain if block.collides]\r\n return rectlist\r\n\r\ndef main():\r\n\r\n pygame.mixer.init()\r\n \r\n # WIDTH = 512\r\n # HEIGHT = 512\r\n # SIZE = (WIDTH,HEIGHT)\r\n FPS = 60\r\n SCREEN = pygame.display.set_mode((0,0), pygame.FULLSCREEN)\r\n SIZE = SCREEN.get_size()\r\n WIDTH, HEIGHT = SIZE[0], SIZE[1]\r\n pygame.display.set_caption(\"Awesome Game!\")\r\n CLOCK = pygame.time.Clock()\r\n keys = {'left': False, 'right': False, 'up': False, 'down': False}\r\n left, right, up, down = 0, 0, 0, 0\r\n\r\n levelnum = 0\r\n terrain = Land.loadLand(WIDTH, HEIGHT, loadlvlfromfile('data/levels/level' + str(levelnum)))\r\n spawnblock = getspawnblock(terrain)\r\n character = Character(spawnblock.rect.x, spawnblock.rect.y, SIZE)\r\n\r\n while True:\r\n for event in pygame.event.get():\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n pygame.quit()\r\n sys.exit()\r\n if event.key == pygame.K_RIGHT:\r\n keys['right'] = True\r\n character.direction = 0\r\n right = 1\r\n if event.key == pygame.K_LEFT:\r\n keys['left'] = True\r\n character.direction = 2\r\n left = -1\r\n if event.key == pygame.K_UP:\r\n keys['up'] = True\r\n character.direction = 1\r\n up = -1\r\n if event.key == pygame.K_DOWN:\r\n keys['down'] = True\r\n character.direction = 3\r\n down = 1\r\n if event.key == pygame.K_SPACE and character.canjump:\r\n character.fallspeed = -100\r\n character.jumping = True\r\n if event.key == pygame.K_f:\r\n for each in terrain:\r\n each.flip()\r\n\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_RIGHT:\r\n keys['right'] = False\r\n right = 0\r\n if event.key == pygame.K_LEFT:\r\n keys['left'] = False\r\n left = 0\r\n if event.key == pygame.K_UP:\r\n keys['up'] = False\r\n up = 0\r\n if event.key == pygame.K_DOWN:\r\n keys['down'] = False\r\n down = 0\r\n if event.key == pygame.K_SPACE:\r\n character.jumping = False\r\n\r\n if keys['left'] or keys['right'] or keys['up'] or keys['down']:\r\n if keys['left'] and keys['right'] or keys['up'] and keys['down']:\r\n character.walking = False\r\n else:\r\n character.walking = True\r\n else:\r\n character.walking = False\r\n\r\n SCREEN.fill((100,100,100))\r\n delta = CLOCK.get_time()\r\n \r\n for block in terrain:\r\n block.draw(SCREEN)\r\n\r\n character.xvel = left + right\r\n character.yvel = up + down\r\n\r\n nextlevel = character.update(delta, getsolidterrain(terrain))\r\n if nextlevel:\r\n levelnum += 1\r\n del terrain\r\n del spawnblock\r\n del character\r\n terrain = Land.loadLand(WIDTH, HEIGHT, loadlvlfromfile('data/levels/level' + str(levelnum)))\r\n spawnblock = getspawnblock(terrain)\r\n character = Character(spawnblock.rect.x, spawnblock.rect.y, SIZE)\r\n\r\n\r\n character.draw(SCREEN)\r\n\r\n pygame.display.update()\r\n CLOCK.tick(FPS)\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"The glitch/the glitchold.py","file_name":"the glitchold.py","file_ext":"py","file_size_in_byte":8160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"297976633","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.selector import Selector\nfrom util import utils\nimport db\n\n\nclass LottoSpider(scrapy.Spider):\n #\n name = \"lotto\"\n\n def __init__(self, begin=\"today\", end=\"today\", *args, **kwargs):\n pass\n\n def start_requests(self):\n urls = [\n 'http://trend.caipiao.163.com/dlt/?periodNumber=100',\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n sel = Selector(response)\n trs = sel.xpath(\"//table[@id='chartsTable']/tbody/tr\")\n for tr in trs:\n balls = []\n items = dict()\n try:\n num = tr.xpath('./@data-period').extract()[0].encode(\"UTF-8\").strip()\n except:\n self.log(\"error in %s\" % tr)\n continue\n # lotto num\n items[\"num\"] = num\n tds = tr.xpath('./td[@class !=\"f_red\" and @class!=\"f_blue\"]/text()').extract()\n # tds = tr.xpath('./td[contains(@class,\"ball_red\") or contains(@class,\"ball_blue\")]/text()').extract()\n if len(tds) == 7:\n # utils.convertStr2Int(str(tds[2]))\n balls.append(tds[0].encode(\"UTF-8\").strip())\n balls.append(tds[1].encode(\"UTF-8\").strip())\n balls.append(tds[2].encode(\"UTF-8\").strip())\n balls.append(tds[3].encode(\"UTF-8\").strip())\n balls.append(tds[4].encode(\"UTF-8\").strip())\n balls.append(tds[5].encode(\"UTF-8\").strip())\n balls.append(tds[6].encode(\"UTF-8\").strip())\n split = \",\"\n # get front zone and back zone\n # balls[0] is num\n items[\"pro_zone\"] = split.join(str(ss) for ss in balls[0:5])\n items[\"back_zone\"] = split.join(str(ss) for ss in balls[5:7])\n items[\"update_time\"] = \"\"\n items[\"all_with_seq\"] = \"\"\n db.insert_lotto(items)\n self.log(\"%s inserted.\" % num)\n","sub_path":"crawler/spiders/lotto_spider.py","file_name":"lotto_spider.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"515468441","text":"from pluginFramework import pluginShell\nfrom attributes import userCommand\nfrom attributes import backgroundLoop\nimport asyncio\nimport discord\nimport re\n\nimport CONFIG\n\nclass Verification(pluginShell):\n @userCommand\n async def flashsales(self, message):\n if (\n (\"I understand that this channel\"\n \" will have everyone pings\"\n \" and that abusing them can result in a ban.\"\n \" I also understand that this channel is a privilege.\"\n \" I am entitled to everything.\"\n \" Sony is a piece of shit.\"\n \" I deserve everything.\") in message.content.lower()\n or message.content.lower().startswith(\"!flashsale\")):\n await addTempRole(message, message.author, CONFIG.TEMPROLEIDS[\"flashsales\"])\n\n\n #helper function to add temproles\n async def addTempRole(message, member, role):\n for memberroles in message.author.roles:\n if memberroles.id == role:\n roleAdd = discord.utils.get(message.server.roles,\n id = role)\n await self.clientInstance.remove_roles(message.author, roleAdd)\n await self.clientInstance.delete_message(message)\n return\n else:\n continue\n roleAdd = discord.utils.get(message.server.roles,\n id = role)\n await self.clientInstance.add_roles(message.author, roleAdd)\n await self.clientInstance.delete_message(message)\n","sub_path":"plugins/temproles.py","file_name":"temproles.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"604561010","text":"#!/usr/bin/python3\n\"\"\"\nTODO: Module-level description\n\"\"\"\n\nimport argparse\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom asaplib.compressor import exponential_split, LCSplit, ShuffleSplit\nfrom asaplib.compressor import fps, kernel_random_split\nfrom asaplib.fit import KRRSparse\nfrom asaplib.fit import get_score\nfrom asaplib.plot import plot_styles\n\n\ndef main(fmat, fy, prefix, test_ratio, jitter, n_sparse, sigma):\n \"\"\"\n\n Parameters\n ----------\n fmat: Location of kernel matrix file.\n fy: Location of property list (1D-array of floats)\n prefix: filename prefix for learning curve figure\n test_ratio: train/test ratio\n jitter: jitter level, default is 1e-10\n n_sparse: number of representative samples\n sigma: noise level in kernel ridge regression\n\n Returns\n -------\n\n Fitting outcome & Learning curve.\n\n \"\"\"\n\n # if it has been computed before we can simply load it\n try:\n K_all = np.genfromtxt(fmat, dtype=float)\n except OSError:\n raise Exception('fmat file could not be loaded. Please check the filename')\n print(\"loaded\", fmat)\n try:\n y_all = np.genfromtxt(fy, dtype=float)\n except OSError:\n raise Exception('property vector file could not be loaded. Please check the filename')\n if len(y_all) != len(K_all):\n raise ValueError('Length of the vector of properties is not the same as number of samples')\n else:\n n_sample = len(K_all)\n\n # train test split\n if test_ratio > 0:\n K_train, K_test, y_train, y_test, _, _ = kernel_random_split(K_all, y_all, test_ratio)\n else:\n K_train = K_test = K_all\n y_train = y_test = y_all\n n_train = len(K_train)\n n_test = len(K_test)\n\n # sparsification\n if n_sparse >= n_train:\n print(\"the number of representative structure is too large, please select n < \", n_train)\n elif n_sparse > 0:\n ifps, dfps = fps(K_train, n_sparse, 0)\n K_MM = K_train[:, ifps][ifps]\n K_NM = K_train[:, ifps]\n K_TM = K_test[:, ifps]\n else:\n print(\"it's usually better to use some sparsification\")\n K_MM = K_train\n K_NM = K_train\n K_TM = K_test\n\n delta = np.std(y_train) / (np.trace(K_MM) / len(K_MM))\n krr = KRRSparse(jitter, delta, sigma)\n # fit the model\n krr.fit(K_MM, K_NM, y_train)\n\n # get the predictions for train set\n y_pred = krr.predict(K_NM)\n # compute the CV score for the dataset\n print(\"train score: \", get_score(y_pred, y_train))\n # get the predictions for test set\n y_pred_test = krr.predict(K_TM)\n # compute the CV score for the dataset\n print(\"test score: \", get_score(y_pred_test, y_test))\n\n plot_styles.set_nice_font()\n fig = plt.figure(figsize=(8 * 2.1, 8))\n ax = fig.add_subplot(121)\n ax.plot(y_train, y_pred, 'b.', label='train')\n ax.plot(y_test, y_pred_test, 'r.', label='test')\n ax.legend()\n ax.set_title('KRR for: ' + fy)\n ax.set_xlabel('actual y')\n ax.set_ylabel('predicted y')\n\n # learning curve\n # decide train sizes\n lc_points = 10\n train_sizes = exponential_split(n_sparse, n_train - n_test, lc_points)\n print(\"Learning curves using train sizes: \", train_sizes)\n lc_stats = 12 * np.ones(lc_points, dtype=int)\n lc = LCSplit(ShuffleSplit, n_repeats=lc_stats, train_sizes=train_sizes, test_size=n_test, random_state=10)\n\n scores = {size: [] for size in train_sizes}\n for lctrain, lctest in lc.split(y_train):\n Ntrain = len(lctrain)\n lc_K_NM = K_NM[lctrain, :]\n lc_y_train = y_train[lctrain]\n # lc_K_test = K_NM[lctest,:]\n lc_K_test = K_TM\n # lc_y_test = y_train[lctest]\n lc_y_test = y_test\n krr.fit(K_MM, lc_K_NM, lc_y_train)\n lc_y_pred = krr.predict(lc_K_test)\n scores[Ntrain].append(get_score(lc_y_pred, lc_y_test))\n\n sc_name = 'RMSE'\n Ntrains = []\n avg_scores = []\n avg_scores_error = []\n for Ntrain, score in scores.items():\n avg = 0.\n var = 0.\n for sc in score:\n avg += sc[sc_name]\n var += sc[sc_name] ** 2.\n avg /= len(score)\n var /= len(score);\n var -= avg ** 2.\n avg_scores.append(avg)\n avg_scores_error.append(np.sqrt(var))\n Ntrains.append(Ntrain)\n\n ax2 = fig.add_subplot(122)\n ax2.errorbar(Ntrains, avg_scores, yerr=avg_scores_error)\n ax2.set_title('Learning curve')\n ax2.set_xlabel('Number of training samples')\n ax2.set_ylabel('Test {}'.format(sc_name))\n ax2.set_xscale('log')\n ax2.set_yscale('log')\n\n plt.show()\n fig.savefig('KRR_4_' + prefix + '.png')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-fmat', type=str, required=True,\n help='Location of kernel matrix file. You can use gen_kmat.py to compute it.')\n parser.add_argument('-fy', type=str, default='none', help='Location of the list of properties (N floats)')\n parser.add_argument('--prefix', type=str, default='ASAP', help='Filename prefix')\n parser.add_argument('--test', type=float, default=0.05, help='the test ratio')\n parser.add_argument('--jitter', type=float, default=1e-10,\n help='regularizer that improves the stablity of matrix inversion')\n parser.add_argument('--n', type=int, default=-1, help='number of the representative samples')\n parser.add_argument('--sigma', type=float, default=1e-2, help='the noise level of the signal')\n args = parser.parse_args()\n\n main(args.fmat, args.fy, args.prefix, args.test, args.jitter, args.n, args.sigma)\n","sub_path":"scripts/krr.py","file_name":"krr.py","file_ext":"py","file_size_in_byte":5602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"167100713","text":"#!/usr/bin/env python2\n\nimport logging\n\nfrom datetime import datetime\nfrom dateutil import parser\nfrom HTMLParser import HTMLParser\n\nif __name__ == \"__main__\":\n if __package__ is None:\n import sys\n from os import path\n sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n from scraper import Scraper\n else:\n from ..scraper import Scraper\n\nallowed_keys = [\n \"original_id\",\n \"id_obfuscated\",\n \"url\",\n \"name\",\n \"start_time\",\n \"timezone\",\n \"description\",\n \"venue\",\n \"lat\",\n \"lon\",\n \"is_official\",\n \"attendee_count\",\n \"capacity\",\n \"source\"\n]\n\nlogging.basicConfig(format=\"%(asctime)s - %(levelname)s : %(message)s\",\n level=logging.INFO)\n\n\nclass EventScraper(Scraper):\n\n def __init__(self):\n Scraper.__init__(self)\n self.url = \"https://go.berniesanders.com/page/event/search_results\"\n self.html = HTMLParser()\n self.params = {\n 'orderby': 'zip_radius',\n 'zip_radius[1]': '6000',\n 'zip_radius[0]': '78218',\n 'radius_unit': 'mi',\n 'country': 'US',\n 'format': 'json'\n }\n self.map = {\n \"id\": \"original_id\",\n \"start_dt\": \"start_time\"\n }\n\n def translate(self, result):\n # Translate normal key names based on map\n result = dict((self.map.get(k, k), v) for (k, v) in result.items())\n\n # Compile Venue\n address_map = {\n \"venue_addr1\": \"address1\",\n \"venue_addr2\": \"address2\",\n \"venue_addr3\": \"address3\"\n }\n result[\"venue\"] = {\n \"name\": result[\"venue_name\"],\n \"city\": result[\"venue_city\"],\n \"state\": result[\"venue_state_cd\"],\n \"zip\": result[\"venue_zip\"],\n \"location\": {\n \"lon\": float(result[\"longitude\"]),\n \"lat\": float(result[\"latitude\"])\n }\n }\n result[\"source\"] = \"berniesanders.com\"\n for k, v in address_map.iteritems():\n try:\n result[\"venue\"][v] = result[k]\n except KeyError:\n pass\n\n # parse datetime\n result[\"start_time\"] = parser.parse(result[\"start_time\"])\n keys = result.keys()\n for k in keys:\n if k not in allowed_keys:\n result.pop(k)\n return result\n\n def go(self):\n r = self.get(\n self.url,\n params=self.params,\n result_format=\"json\"\n )\n for result in r[\"results\"]:\n rec = self.translate(result)\n query = {\n \"original_id\": rec[\"original_id\"],\n \"source\": \"berniesanders.com\"\n }\n if self.db.events.find(query).count() > 0:\n msg = \"Updating record for '{0}'.\"\n logging.info(msg.format(rec[\"name\"]))\n self.db.events.update_one(query, {\"$set\": rec})\n else:\n msg = \"Inserting record for {0}.\"\n logging.info(msg.format(rec[\"name\"]))\n rec[\"inserted_at\"] = datetime.now()\n self.db.events.insert_one(rec)\n\nif __name__ == \"__main__\":\n bernie = EventScraper()\n bernie.go()\n","sub_path":"scrapers/berniesanders.com/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"588413395","text":"#!/usr/bin/env python3\n\n# CFU 8.2.1 : Surface Area of Cylinder\n# File : CFU_8_2_1_sjubaer.py\n# By : Shadman Jubaer\n# Login : sjubaer\n# Section : 3Whalen\n# Team : Team 51\n#\n# ELECTRONIC SIGNITURE\n# Shadman Jubaer\n#\n# The electronic signature above indicates\n# the program submitted for evaluation is\n# my individual work, and I have a general\n# understanding of all aspects of its\n# development and execution.\n# \n# PROGRAM DESCRIPTION\n# My program does the tasks specified in CFU 8.2.1\n\nimport math\ndef surAreaCyl():\n print('Input Diameter in cm: ')\n D = float(input())\n print('Input Heightin cm: ')\n H = float(input()) \n R = D / 2\n A = (2 * 3.14 * R ** 2) + (2 * 3.14 * R * H)\n print('The surface of a cylinder is ', A, '[cm**2] for a given diameter of ', D, '[cm] and height of ', H, '[cm].')\n return A \n\n\nsurAreaCyl() \n\n","sub_path":"ENGR195H-Honors Creativity-and-Innovation/PYTHON/cylinder_SA.py","file_name":"cylinder_SA.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"299983211","text":"import datetime\n\nfrom dateutil.parser import parse\nfrom dateutil.relativedelta import relativedelta\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.management.base import BaseCommand\nfrom django.db.models.aggregates import Sum\nfrom django.db.models.expressions import Case, When, F\nfrom django.db.models.fields import IntegerField\nfrom django.db.models.query_utils import Q\n\nfrom tunga.settings import TUNGA_URL\nfrom tunga_activity import verbs\nfrom tunga_activity.models import ActivityReadLog\nfrom tunga_settings.slugs import TASK_ACTIVITY_UPDATE_EMAIL\nfrom tunga_tasks.models import Participation, Task\nfrom tunga_utils.emails import send_mail\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n \"\"\"\n Send new task activity notifications\n \"\"\"\n # command to run: python manage.py tunga_send_task_activity_emails\n\n # Initialize missing logs\n client_tasks = Task.objects.filter(closed=False).annotate(user_logs=Sum(\n Case(\n When(\n read_logs__user=F('user'),\n then=1\n ),\n default=0,\n output_field=IntegerField()\n )\n )).filter(user_logs=0)\n\n for task in client_tasks:\n ActivityReadLog.objects.update_or_create(\n user=task.user,\n content_type=ContentType.objects.get_for_model(task),\n object_id=task.id\n )\n\n participants = Participation.objects.filter(task__closed=False).annotate(user_logs=Sum(\n Case(\n When(\n task__read_logs__user=F('user'),\n then=1\n ),\n default=0,\n output_field=IntegerField()\n )\n )).filter(user_logs=0)\n\n for participant in participants:\n ActivityReadLog.objects.update_or_create(\n user=participant.user,\n content_type=ContentType.objects.get_for_model(participant.task),\n object_id=participant.task.id\n )\n\n # Send notifications\n utc_now = datetime.datetime.utcnow()\n min_date = utc_now - relativedelta(minutes=30) # 30 minute window to read new messages\n min_last_email_date = utc_now - relativedelta(hours=3) # Limit to 1 email every 3 hours per channel\n commission_date = parse('2016-08-28 00:00:00') # Don't notify about events before the commissioning date\n user_tasks = ActivityReadLog.objects.filter(\n (\n Q(last_email_at__isnull=True) |\n Q(last_email_at__lt=min_last_email_date)\n ) &\n (\n Q(tasks__user=F('user')) | Q(tasks__participants=F('user'))\n )\n ).exclude(\n user__userswitchsetting__setting__slug=TASK_ACTIVITY_UPDATE_EMAIL,\n user__userswitchsetting__value=False\n ).annotate(new_activity=Sum(\n Case(\n When(\n ~Q(tasks__activity_objects__actor_object_id=F('user_id')) &\n Q(tasks__activity_objects__gt=F('last_read')) &\n Q(tasks__activity_objects__timestamp__lte=min_date) &\n Q(tasks__activity_objects__timestamp__gte=commission_date) &\n (Q(last_email_at__isnull=True) | Q(tasks__activity_objects__timestamp__gt=F('last_email_at'))) &\n Q(tasks__activity_objects__verb__in=[verbs.COMMENT, verbs.UPLOAD]),\n then=1\n ),\n default=0,\n output_field=IntegerField()\n )\n )).filter(new_activity__gt=0)\n\n for user_task in user_tasks:\n task = user_task.content_object\n\n to = [user_task.user.email]\n subject = \"New activity for task: {}\".format(task.summary)\n ctx = {\n 'receiver': user_task.user,\n 'new_activity': user_task.new_activity,\n 'task': user_task.content_object,\n 'task_url': '%s/task/%s/' % (TUNGA_URL, user_task.object_id)\n }\n\n if send_mail(\n subject, 'tunga/email/unread_task_activity', to, ctx, **dict(deal_ids=[task.hubspot_deal_id])\n ):\n user_task.last_email_at = datetime.datetime.utcnow()\n user_task.save()\n","sub_path":"tunga_tasks/management/commands/tunga_send_task_activity_emails.py","file_name":"tunga_send_task_activity_emails.py","file_ext":"py","file_size_in_byte":4423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"19382435","text":"import setup_path\nimport airsim\nimport cv2\nimport numpy as np\nimport os\nimport time\nimport tempfile\n\n'''\nimport keras\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom PIL import Image\n'''\n\n# connect to the AirSim simulator\nclient = airsim.CarClient()\nclient.confirmConnection()\nclient.enableApiControl(False)\nprint(\"API Control enabled: %s\" % client.isApiControlEnabled())\ncar_controls = airsim.CarControls()\n\ntmp_dir = os.path.join(tempfile.gettempdir(), \"airsim_car\")\nprint (\"Saving images to %s\" % tmp_dir)\n\ntry:\n os.makedirs(tmp_dir)\nexcept OSError:\n if not os.path.isdir(tmp_dir):\n raise\n\ntry:\n# while 1:# get state of the car\n for hi in range(1):\n print(client.simListSceneObjects())\n responses = client.simGetImages([\n airsim.ImageRequest(\"0\", airsim.ImageType.Scene), #0\n airsim.ImageRequest(\"0\", airsim.ImageType.DepthPlanner), #1 \n airsim.ImageRequest(\"0\", airsim.ImageType.DepthPerspective), #2\n airsim.ImageRequest(\"0\", airsim.ImageType.DepthVis), #3\n airsim.ImageRequest(\"0\", airsim.ImageType.DisparityNormalized), #4\n airsim.ImageRequest(\"0\", airsim.ImageType.Segmentation), #5\n airsim.ImageRequest(\"0\", airsim.ImageType.SurfaceNormals), #6\n airsim.ImageRequest(\"0\", airsim.ImageType.Infrared)]) #7\n print('Retrieved images: %d' % len(responses))\n \n \n for response_idx, response in enumerate(responses):\n filename = os.path.join(tmp_dir, f\"{0}_{response.image_type}_{response_idx}\")\n\n if response.pixels_as_float:\n print(\"Type %d, size %d\" % (response.image_type, len(response.image_data_float)))\n airsim.write_pfm(os.path.normpath(filename + '.pfm'), airsim.get_pfm_array(response))\n elif response.compress: #png format\n print(\"Type %d, size %d\" % (response.image_type, len(response.image_data_uint8)))\n airsim.write_file(os.path.normpath(filename + '.png'), response.image_data_uint8)\n else: #uncompressed array\n print(\"Type %d, size %d\" % (response.image_type, len(response.image_data_uint8)))\n img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) # get numpy array\n img_rgb = img1d.reshape(response.height, response.width, 3) # reshape array to 3 channel image array H X W X 3\n cv2.imwrite(os.path.normpath(filename + '.png'), img_rgb) # write to png\n \n \n '''\n \n responses = client.simGetImages([airsim.ImageRequest(\"0\", airsim.ImageType.Scene,False,False)])\n response = responses[0]\n img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8)\n img_rgb = img1d.reshape(response.height, response.width, 3)\n \n\n\n for response_idx, response in enumerate(responses):\n filename = os.path.join(tmp_dir, f\"{0}_{response.image_type}_{response_idx}\")\n\n if response.pixels_as_float:\n print(\"Type %d, size %d\" % (response.image_type, len(response.image_data_float)))\n airsim.write_pfm(os.path.normpath(filename + '.pfm'), airsim.get_pfm_array(response))\n elif response.compress: #png format\n print(\"Type %d, size %d\" % (response.image_type, len(response.image_data_uint8)))\n airsim.write_file(os.path.normpath(filename + '.png'), response.image_data_uint8)\n else: #uncompressed array\n print(\"Type %d, size %d\" % (response.image_type, len(response.image_data_uint8)))\n img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) # get numpy array\n img_rgb = img1d.reshape(response.height, response.width, 3) # reshape array to 3 channel image array H X W X 3\n cv2.imwrite(os.path.normpath(filename + '.png'), img_rgb) # write to png\n '''\nexcept:\n print('client broken')\n \n\nclient.enableApiControl(False)\n","sub_path":"airsim_python/record_all.py","file_name":"record_all.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"443635319","text":"# encoding: utf-8\nimport os\nimport math\nimport random\nimport pickle\n\nfrom PIL import Image\nimport numpy as np\nimport cv2\n\n\ndef build_random_replace_background(cfg):\n masks_path = cfg.DATASETS.MASKS_PATH\n if masks_path is None or masks_path == \"\":\n return None\n masks = load_masks(cfg.DATASETS.MASKS_PATH)\n return RandomReplaceBackground(masks, probability=cfg.DATASETS.BACKGROUND_REPLACE_PROBABILITY)\n\n\ndef load_masks(masks_path):\n \"\"\"\n masks file format: dict\n - key: image_name\n - value: mask\n - example\n {\n \"4627_c8_f0118398.jpg\": numpy.ndarray([[True, True, ...], ..., [True, True, ...]]),\n \"0580_c1_f0154127.jpg\": numpy.ndarray([[True, True, ...], ..., [True, True, ...]]),\n ...\n }\n \"\"\"\n with open(masks_path, \"rb\") as f:\n masks = pickle.load(f)\n return masks\n\n\nclass RandomReplaceBackground(object):\n \"\"\"\n Args:\n masks: dataset image person masks\n probability: The probability that the Random Erasing operation will be performed.\n \"\"\"\n\n def __init__(self, masks, probability=0.5):\n self.masks = masks\n self.probability = probability\n\n def __call__(self, image, image_name):\n if random.uniform(0, 1) > self.probability:\n return image\n if image_name not in self.masks:\n return image\n\n mask = self.masks[image_name]\n image = np.asarray(image)\n height, width = image.shape[:2]\n\n background = self.create_random_background(width, height)\n image = self.replace_background(mask, image, background)\n image = Image.fromarray(image)\n return image\n\n def create_random_background(self, width, height):\n background = np.zeros((height, width, 3), dtype=np.uint8)\n r = int(random.uniform(0, 256))\n g = int(random.uniform(0, 256))\n b = int(random.uniform(0, 256))\n background[:, :] = (r, g, b)\n return background\n\n def replace_background(self, mask, image, background):\n assert mask.shape == image.shape[:2] == background.shape[:2]\n\n mask = np.stack((mask,) * 3, axis=-1) # convert to 3-channel\n mask = mask.astype(np.float)\n mask = cv2.GaussianBlur(mask, (5, 5), 0)\n\n image = image / 255\n background = background / 255\n fg = image * mask\n bg = background * (1 - mask)\n output = fg + bg\n output *= 255\n output = output.astype(np.uint8)\n\n return output\n\n","sub_path":"data/ext.py","file_name":"ext.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"615059190","text":"import argparse\nimport logging\nimport re\n\nimport pandas as pd\nimport boto3\nimport botocore\n\nlogging.basicConfig(format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', level=logging.DEBUG)\nlogging.getLogger(\"botocore\").setLevel(logging.ERROR)\nlogging.getLogger(\"s3transfer\").setLevel(logging.ERROR)\nlogging.getLogger(\"urllib3\").setLevel(logging.ERROR)\nlogging.getLogger(\"boto3\").setLevel(logging.ERROR)\nlogging.getLogger(\"asyncio\").setLevel(logging.ERROR)\nlogging.getLogger(\"aiobotocore\").setLevel(logging.ERROR)\nlogging.getLogger(\"s3fs\").setLevel(logging.ERROR)\n\n\nlogger = logging.getLogger('s3')\n\n\ndef parse_s3(s3path):\n \"\"\"\n Parse s3 path to get the bucket name and the path name\n Args:\n s3path (str): s3 path\n Returns:\n s3bucket (str): s3 bucket name\n s3path (str): s3 path\n \"\"\"\n\n regex = r\"s3://([\\w._-]+)/([\\w./_-]+)\"\n\n m = re.match(regex, s3path)\n s3bucket = m.group(1)\n s3path = m.group(2)\n\n return s3bucket, s3path\n\n\ndef upload_file_to_s3(local_path, s3path):\n \"\"\"\n upload file to s3 using local path and s3path\n Args:\n s3path (str): s3 path\n local_path (str): local path\n Returns:\n None\n \"\"\"\n s3bucket, s3_just_path = parse_s3(s3path)\n\n s3 = boto3.resource(\"s3\")\n bucket = s3.Bucket(s3bucket)\n\n try:\n bucket.upload_file(local_path, s3_just_path)\n except botocore.exceptions.NoCredentialsError:\n logger.error('Please provide AWS credentials via AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env variables.')\n else:\n logger.info('Data uploaded from %s to %s', local_path, s3path)\n\n\ndef upload_to_s3_pandas(local_path, s3path, sep=';'):\n \"\"\"\n upload file to s3 using local path and s3path through to_csv function from pandas\n Args:\n s3path (str): s3 path\n local_path (str): local path\n Returns:\n None\n \"\"\"\n df = pd.read_csv(local_path, sep=sep)\n\n try:\n df.to_csv(s3path, sep=sep)\n except botocore.exceptions.NoCredentialsError:\n logger.error('Please provide AWS credentials via AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env variables.')\n else:\n logger.info('Data uploaded from %s to %s', local_path, s3path)\n\n\ndef download_file_from_s3(local_path, s3path):\n \"\"\"\n download file from s3 using local path and s3path\n Args:\n s3path (str): s3 path\n local_path (str): local path\n Returns:\n None\n \"\"\"\n s3bucket, s3_just_path = parse_s3(s3path)\n\n s3 = boto3.resource(\"s3\")\n bucket = s3.Bucket(s3bucket)\n\n try:\n bucket.download_file(s3_just_path, local_path)\n except botocore.exceptions.NoCredentialsError:\n logger.error('Please provide AWS credentials via AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env variables.')\n else:\n logger.info('Data downloaded from %s to %s', s3path, local_path)\n\n\ndef download_from_s3_pandas(local_path, s3path, sep=';'):\n \"\"\"\n download file from s3 using local path and s3path through to_csv function from pandas\n Args:\n s3path (str): s3 path\n local_path (str): local path\n Returns:\n None\n \"\"\"\n try:\n df = pd.read_csv(s3path, sep=sep)\n except botocore.exceptions.NoCredentialsError:\n logger.error('Please provide AWS credentials via AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env variables.')\n else:\n df.to_csv(local_path, sep=sep)\n logger.info('Data uploaded from %s to %s', local_path, s3path)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--sep',\n default=';',\n help=\"CSV separator if using pandas\")\n parser.add_argument('--pandas', default=False, action='store_true',\n help=\"If used, will load data via pandas\")\n parser.add_argument('--download', default=False, action='store_true',\n help=\"If used, will load data via pandas\")\n parser.add_argument('--s3path', default='s3://2021-msia423-wu-qiaozhen/kpop_recommender_s3.csv',\n help=\"If used, will load data via pandas\")\n parser.add_argument('--local_path', default='data/sample/final_train_s3.csv',\n help=\"Where to load data to in S3\")\n args = parser.parse_args()\n\n if args.download:\n if args.pandas:\n download_from_s3_pandas(args.local_path, args.s3path, args.sep)\n else:\n download_file_from_s3(args.local_path, args.s3path)\n else:\n if args.pandas:\n upload_to_s3_pandas(args.local_path, args.s3path, args.sep)\n else:\n upload_file_to_s3(args.local_path, args.s3path)","sub_path":"run_s3.py","file_name":"run_s3.py","file_ext":"py","file_size_in_byte":4675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"578390246","text":"import time\nimport smtplib\nimport pandas as pd\n\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom email.mime.text import MIMEText\n\n# define travel info\ndeparture = 'TPE'\narrival = 'OKA'\ndepart_date = '2020-05-01'\nreturn_date = '2020-05-06'\ntarget_airline = 'EVA Air'\n\n# define agoda web info\nsearch_url = 'https://www.agoda.com/zh-tw/flights/results?cid=1804069&departureFrom={}&departureFromType=1&arrivalTo={}&arrivalToType=1&departDate={}&returnDate={}&adults=2&searchType=2&cabinType=Economy'.format(\n departure,\n arrival,\n depart_date,\n return_date\n)\n\nuser = 'xxxxxxx@gmail.com'\nprice_threshold = 8000\n\ndef main():\n airline, lowest_price = get_price_info(search_url)\n isSend = check_price(airline, lowest_price, target_airline, price_threshold)\n\n if isSend is True:\n msg_text = edit_msg(airline, lowest_price)\n send_mail(msg_text, user)\n\ndef get_price_info(url):\n driver = webdriver.Chrome()\n driver.get(url)\n time.sleep(10)\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n driver.close()\n\n airline = []\n for i, agency in enumerate(soup.select('.FlightIcon__Name')):\n if i % 2 == 1:\n airline.append(agency.text)\n\n lowest_price = []\n for price in soup.select('.FlightPrice__Amount--OuterDisplay'):\n lowest_price.append(price.text)\n\n return airline, lowest_price\n\ndef check_price(airline, lowest_price, target_airline, threshold):\n dic = {\n \"airline\": airline,\n \"price\":lowest_price\n }\n df = pd.DataFrame(dic)\n\n for i in df[df['airline'] == target_airline].price:\n if int(i.replace(',', '')) < threshold:\n return True\n\ndef edit_msg(airline, lowest_price):\n msg_text = \"\"\"\n Airline price\n --------------------------\n \"\"\"\n\n for al, p in zip(airline, lowest_price):\n t = al + \" \" * (20-len(al)) + p + '\\n'\n msg_text += t\n \n return msg_text\n\ndef send_mail(msg_text, user):\n gmail_user = 'xxxxxx@gmail.com'\n gmail_password = 'xxxxxxxx'\n\n msg = MIMEText(msg_text)\n msg['Subject'] = '機票價格'\n msg['From'] = gmail_user\n msg['To'] = user\n\n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.ehlo()\n server.login(gmail_user, gmail_password)\n server.send_message(msg)\n server.quit()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"477718394","text":"\nimport re\nimport json\n\nimport azure.functions as func\n\nfrom sklearn.externals import joblib\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\n\ndef init():\n\n global model\n global vectorizer\n\n model = joblib.load('sentimentpredictor/model.pkl')\n vectorizer = joblib.load('sentimentpredictor/tfidf.pkl')\n\n\ndef preprocess_string(inputstring):\n\n cleanedstring = [re.sub(r'[^a-zA-Z0-9]',' ', D).lower().strip() for D in inputstring]\n cleanedstring = [re.split(r'\\s+', D) for D in cleanedstring if D != '']\n cleanedstring = [[x for x in s if x.isalpha()] for s in cleanedstring]\n cleanedstring = [' '.join(x) for x in cleanedstring]\n\n return cleanedstring\n\n\ndef main(req: func.HttpRequest):\n \n init()\n\n parsed_data = req.get_json()\n comment = parsed_data['comment']\n\n comment = preprocess_string(comment)\n\n comment_vec = vectorizer.transform(comment)\n\n sentiment = model.predict(comment_vec)\n\n return json.dumps(sentiment.tolist())","sub_path":"sentimentpredictor/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"127570319","text":"# -*- coding: utf-8 -*-\n\nfrom service import ResourceHandler\nfrom ontology import Ontology, Document\n\nclass HomeHandler(ResourceHandler):\n def __init__(self, resolver, node):\n ResourceHandler.__init__(self, resolver, node)\n\n def fetch(self, query):\n resolved = False\n self.assign_key(query.genealogy, 'uuid', 'local', query.branch)\n if 'depend' in query.match:\n query.genealogy['language']\n try:\n dependee = self.resolver.resolve(query.match['depend'].format(**dict(query.genealogy)), query.genealogy, query.context)\n except KeyError as e:\n self.log.debug('failed to assemble remote URL for %s because parameter %s was missing.', query.uri, e)\n else:\n if dependee is not None:\n query.discover(dependee.genealogy)\n resolved = True\n\n if resolved or query.result is not None:\n query.sources.append(Ontology.clone(query.genealogy))\n\n def parse(self, query):\n for source in query.sources:\n entry = {\n 'branch':query.branch,\n 'record': Document(self.env, query.branch['namespace'], {\n 'head': {\n 'genealogy': query.genealogy.project('ns/service/genealogy')\n }\n })\n }\n query.add_entry(entry)\n","sub_path":"module/core/service/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"91867174","text":"#! python2\n\nfrom os import walk, path\nfrom operator import itemgetter\nimport sys, getopt, re, argparse\n\n\n\nparser = argparse.ArgumentParser(description='Do stuff with files.', prog='Filecrawler.py', usage='%(prog)s [-h, -r, -v, -p, -c, -t, -z, -e , -o ] -s|-l [] -d|-f ', \\\n\tformatter_class=lambda prog: argparse.HelpFormatter(prog,max_help_position=65, width =150))\ngroup = parser.add_mutually_exclusive_group(required=True)\nparser.add_argument(\"-r\", \"--recursive\", action='store_false', help=\"Do not recursively search all files in the given directory\")\nparser.add_argument(\"-v\", \"--verbose\", action='store_true', help=\"Turn on (extremely) verbose mode\")\nparser.add_argument(\"-e\", \"--extension\", nargs='?', default=None, help=\"filetype(s) to restrict search to. seperate lists via commas with no spaces\")\nparser.add_argument(\"-l\", \"--linecount\", action='store_true', help=\"only perform a linecount. restrict filetypes via the -e flag. may override the -s flag.\")\nparser.add_argument(\"-p\", \"--printout\", action='store_true', help=\"print the lines found containing search term\")\nparser.add_argument(\"-c\", \"--casesensitive\", action=\"store_true\", help=\"make search case sensitive\")\nparser.add_argument(\"-o\", \"--output\", nargs='?', default=None, help=\"specify output file. NOTE: will overwrite file if it currently exists\")\ngroup.add_argument(\"-d\", \"--directory\", default=None, help=\"directory to search\")\ngroup.add_argument(\"-f\", \"--file\", default=None, help=\"file to search\")\nparser.add_argument(\"-s\", \"--search\", default=None, help=\"term to search for; regex is accepted\")\nparser.add_argument(\"-t\", \"--filetypecount\", action='store_true', help=\"print all file types found with the number of occurrences\")\nparser.add_argument(\"-z\", \"--disableerrorhandling\", action='store_true', help=\"disable error handling to see full stack traces on errors\")\nargs = parser.parse_args()\n\nrec = verbose = pr = case = fcount = rcount = typecount = linecount = errorhandling = 0\nterm = tosearch = type = extfilter = outfile = None\n# extlist = {extension: [fcount, lcount]}\nextlist={}\nlockedfiles=[]\n\ndef main():\n\tglobal term, tosearch, type, rec, verbose, extfilter, pr, case, outfile, linecount, typecount, errorhandling\n\t\n\tif args.directory != None:\n\t\ttosearch = args.directory\n\t\ttype = 'd'\n\n\telif args.file != None:\n\t\ttosearch = args.file\n\t\ttype = 'f'\n\t\t\t\n\tif args.extension != None:\n\t\textfilter = args.extension.split(',')\n\t\tfor i,e in enumerate(extfilter):\n\t\t\tif e[0] != '.':\n\t\t\t\textfilter[i] = '.'+e\n\n\trec = args.recursive\n\tverbose = args.verbose\n\tcase = args.casesensitive\n\n\tif args.printout:\n\t\tpr = 1\n\n\tlinecount = args.linecount\n\ttypecount = args.filetypecount\n\terrorhandling = args.disableerrorhandling\n\tterm = args.search\n\toutfile = args.output\n\n\tif (((type != None) and (tosearch != None)) or linecount or typecount):\n\t\tif errorhandling:\n\t\t\tstart()\n\t\telse:\n\t\t\ttry:\n\t\t\t\tstart()\n\t\t\texcept:\n\t\t\t\tprintline('[!] An error ocurred:\\n')\n\t\t\t\tfor e in sys.exc_info():\n\t\t\t\t\tprintline(e)\n\t\t\t\tprintline('[*] Note that this script may break on some filetypes when run with 3.4. Please use 2.7')\n\telif help != 1:\n\t\t\tprint('USAGE:\\tfilecrawler.py [-h, -r, -v, -p, -c, -t, -z, -l, -e , -o , -s ] -d|-f ')\n\t\t\t\ndef start():\n\tglobal term, term, tosearch, rec, linecount, typecount, types, extfilter, outfile, lockedfiles\n\tloc = 0\n\tif outfile != None:\n\t\twith open(outfile, 'w') as f:\n\t\t\t\tf.write(\"\") \n\t\n\t#Print intro messages\n\tprintline('\\n\\t\\t --TODO--\\n')\n\tif type == 'd' and rec:\n\t\tprintline('[*] Recursively running against directory:\\n\\t%s' % tosearch)\n\telif type == 'd' and not rec:\n\t\tprintline('[*] Non-recursively running against directory:\\n\\t%s' % tosearch)\n\telif type == 'f':\n\t\tprintline('[*] Running against file:\\n\\t%s' % tosearch)\n\tif term != None:\n\t\tprintline('[*] Searching for:\\n\\t%s' % term)\n\tif linecount:\n\t\tprintline('[*] Performing line count')\n\tif typecount and extfilter == None:\n\t\tprintline('[*] Enumerating all found file types')\n\tif extfilter != None:\n\t\tprintline('[*] Filtering against the following file extensions:')\n\t\tfor e in extfilter:\n\t\t\tprintline('\\t%s' % e)\n\tif outfile != None:\n\t\tprintline('[*] Output written to file:\\n\\t%s' % outfile) \n\t\t\n\t#Determine appropriate search\n\tprintline('\\n\\t\\t--RESULTS--\\n')\n\tif type == 'd':\n\t\tparsedirectory(tosearch)\n\telif type == 'f':\n\t\tsearchfile(tosearch)\n\t\n\tfor i in extlist.keys():\n\t\t\tloc += extlist.get(i)[1]\n\n\t#Print appropriate results\n\tif term != None:\n\t\tprintline('\\n[*] Search complete. %s lines searched across %s files with %s occurrences found.' % (prettynumbers(loc), prettynumbers(fcount), prettynumbers(rcount)))\n\tif linecount:\n\t\tprintline('[*] %s lines parsed across %s files' % (prettynumbers(loc), prettynumbers(fcount)))\n\tif len(lockedfiles) > 0:\n\t\tprintline('\\n[!] Unable to open the following files:')\n\t\tfor f in lockedfiles:\n\t\t\tprintline('\\t%s'%f)\n\t\tprintline('\\n[*] Note: Hidden files are unable to be opened via Python on Windows; please unhide all files you wish to scan.')\n\tif typecount:\n\t\tif extfilter:\n\t\t\tprintline('[*] Number of occurrences of filtered file extensions:')\n\t\telse:\n\t\t\tprintline('[*] %s file types were discovered:' % prettynumbers(len(extlist)))\n\t\t\n\t\tsorted_extlist = extlist.keys()\n\t\tsorted_extlist.sort()\n\n\t\tif linecount:\n\t\t\tprintline('\\t%s %s %s' % (\"Type\".ljust(18), \"Count\".ljust(8), \"LoC\".ljust(8)))\n\t\t\tfor e in sorted_extlist:\n\t\t\t\tprintline('\\t%s %s %s' % (e.ljust(18), prettynumbers(extlist.get(e)[0]).ljust(8), prettynumbers(extlist.get(e)[1]).ljust(8)))\n\t\telse:\n\t\t\tprintline('\\t%s %s' % (\"Type\".ljust(18), \"Count\".ljust(8)))\n\t\t\tfor e in sorted_extlist:\n\t\t\t\tprintline('\\t%s %s' % (e.ljust(18), prettynumbers(extlist.get(e)[0]).ljust(8)))\n\n\t\t\ndef searchfile(file, fext):\n\tglobal term, pr, tosearch, rcount, fcount, lockedfiles, extlist\n\tcount = 1\n\tfcount+=1\n\tmObj=None\n\tlcount = extlist.get(fext, [1,0])\n\tvprint('[?] Searching %s for %s' % (file, term))\n\t\n\ttry:\n\t\twith open(file,'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tlcount[1]+=1\n\t\t\t\tif case and term:\n\t\t\t\t\tmObj = re.search(term, line, flags=0)\n\t\t\t\telif term:\n\t\t\t\t\tmObj = re.search(term, line, flags=re.IGNORECASE)\n\n\t\t\t\tif mObj:\n\t\t\t\t\tprintline('[*] Line %d in file %s' % (count, file[len(tosearch):]))\n\t\t\t\t\trcount+=1\n\t\t\t\t\tif pr:\n\t\t\t\t\t\tif len(line)>200:\n\t\t\t\t\t\t\tprintline(line.strip(' \\t\\r\\n')[:200] + \"...\\n\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprintline(line.strip(' \\t\\r\\n') + \"\\n\")\n\t\t\t\tcount+=1\n\texcept IOError:\n\t\tlockedfiles.append(file)\n\t\tvprint('[?] IOError thrown opening: %s'%file)\n\t\n\textlist[fext] = lcount\n\tvprint('[?] Number of lines: %d' % count)\n\t\ndef searchfiles(files, dir):\n\tglobal extfilter, typelist, term\n\tfound = False\n\tvprint('[?] Searching file list')\n\t\n\tfor file in files:\n\t\tvprint('[?] Parsing file:\\t%s'%file)\n\t\tfext = path.splitext(file)[1] \n\t\tif len(fext) < 1:\n\t\t\tfext = 'no ext'\n\t\t\t\n\t\tif typecount and (extfilter == None or fext in extfilter):\n\t\t\tif fext in extlist.keys():\n\t\t\t\tinc = extlist.get(fext)\n\t\t\t\textlist[fext] = [inc[0]+1, inc[1]]\n\t\t\telse:\n\t\t\t\textlist[fext] = [1,0]\n\n\t\tif (term != None or linecount) and (extfilter == None or (extfilter != None and fext in extfilter)):\n\t\t\tsearchfile(dir+'/'+file, fext)\n\ndef parsedirectory(dir):\n\tglobal rec\n\tflist = []\n\tdlist = []\n\t\n\tvprint('[?] Parsing %s' % dir)\n\t\n\tfor (dirpath, dirname, filenames) in walk(dir):\n\t\tflist.extend(filenames)\n\t\tdlist.extend(dirname)\n\t\tbreak\n\t\t\n\tvprint('[?] Files found:')\n\tvprint(flist)\n\tvprint('[?] Directories found:')\n\tvprint(dlist)\n\n\tsearchfiles(flist, dir)\n\t\n\tif (rec) & (dlist != []):\n\t\tfor d in dlist:\n\t\t\tparsedirectory(dir+'/'+d)\n\t\t\t\ndef vprint(str):\n\tglobal verbose\n\tif verbose:\n\t\tprintline(str)\n\ndef printline(s):\n\tprint(s)\n\tif outfile != None:\n\t\twith open(outfile, 'a') as f:\n\t\t\tf.write(str(s)+\"\\n\")\n\t\t\t\ndef prettynumbers(str):\n\treturn \"{:,}\".format(str)\n\t\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"filecrawler.py","file_name":"filecrawler.py","file_ext":"py","file_size_in_byte":7852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"141740895","text":"# Copyright 2014 - Savoir-Faire Linux inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport pecan\nfrom pecan import rest\nimport wsmeext.pecan as wsme_pecan\n\nfrom surveil.api.datamodel import live_query\nfrom surveil.api.datamodel.status.metrics import metric as m\nfrom surveil.api.handlers.status.metrics import metric_handler\nfrom surveil.api.handlers.status.metrics import metric_name_handler\nfrom surveil.common import util\n\n\nclass MetricsController(rest.RestController):\n\n @util.policy_enforce(['authenticated'])\n @wsme_pecan.wsexpose([m.Metric])\n def get(self):\n \"\"\"Returns all metrics name for a host with a service.\"\"\"\n handler = metric_name_handler.MetricNameHandler(pecan.request)\n metrics_name = handler.get(\n pecan.request.context['host_name'],\n pecan.request.context['service_name']\n )\n return metrics_name\n\n @pecan.expose()\n def _lookup(self, metric_name, *remainder):\n return MetricController(metric_name), remainder\n\n\nclass MetricController(rest.RestController):\n\n def __init__(self, metric_name):\n pecan.request.context['metric_name'] = metric_name\n self.metric_name = metric_name\n\n @util.policy_enforce(['authenticated'])\n @wsme_pecan.wsexpose(m.Metric)\n def get(self):\n \"\"\"Return the last measure of the metric of the service of the host.\"\"\"\n handler = metric_handler.MetricHandler(pecan.request)\n metric = handler.get(\n pecan.request.context['host_name'],\n self.metric_name,\n pecan.request.context['service_name']\n )\n return metric\n\n @util.policy_enforce(['authenticated'])\n @wsme_pecan.wsexpose([m.Metric], body=live_query.LiveQuery)\n def post(self, query):\n \"\"\"Returns all matching metrics.\n\n :param live query: a live query\n \"\"\"\n handler = metric_handler.MetricHandler(pecan.request)\n metrics = handler.get_all(query=query,\n metric_name=self.metric_name,\n host_name=pecan.request.context['host_name'],\n service_description=pecan.request.\n context['service_name'])\n return metrics\n","sub_path":"surveil/api/controllers/v2/status/hosts/services/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"180911756","text":"import numpy as np\nimport scipy.io as sio\nimport tensorflow as tf\nimport os\nimport matplotlib.pyplot as plt\nimport itertools\nimport csv\nimport random\nfrom random import randint\nfrom keras.layers import Dense, Dropout, Conv2D, Flatten, Input, BatchNormalization, Lambda\nfrom keras.layers import Conv1D, MaxPooling1D, MaxPooling2D, GlobalAveragePooling2D\nfrom keras.layers.merge import concatenate\nfrom keras.layers.core import Reshape\nfrom keras.models import Model, load_model, Sequential\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.utils import to_categorical, plot_model\nfrom keras import backend as K\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\n\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\ndef load_data(dataset):\n # Groups all the transitions into one catagory\n if dataset=='time_and_frequency': # Zero padded time data and frequency data (magnitude+phase) stacked in a matrix\n data = sio.loadmat('data/data_time_and_fft.mat')\n X_train = np.expand_dims(data['X_train'], axis=-1)\n X_dev = np.expand_dims(data['X_dev'], axis=-1)\n X_test = np.expand_dims(data['X_test'], axis=-1)\n Y_train = data['Y_train']\n Y_dev = data['Y_dev']\n Y_test = data['Y_test']\n train_ind = Y_train > 7\n Y_train[train_ind] = 7\n dev_ind = Y_dev > 7\n Y_dev[dev_ind] = 7\n test_ind = Y_test > 7\n Y_test[test_ind] = 7\n Y_train = to_categorical(Y_train)\n Y_dev = to_categorical(Y_dev)\n Y_test = to_categorical(Y_test)\n print('X_train shape:', X_train.shape)\n print('X_dev shape:', X_dev.shape)\n print('X_test shape:', X_test.shape)\n print('X_train Mean value:', np.mean(X_train))\n print('X_train STD value:', np.mean(np.std(X_train)))\n return X_train, X_dev, X_test, Y_train, Y_dev, Y_test\n\n\ndef filter_num(base_filter_num, c, block_num):\n f_pow = min(c, block_num - c)\n if c > block_num/2:\n f_pow += 1\n return base_filter_num*(2**(f_pow-1))\n\n\ndef percept_leg(input, conv1_block, base_filter_num, conv1_kernel, conv2_kernel, drop):\n x = input\n for c1 in range(1, conv1_block+1):\n x = Conv2D(filters=filter_num(base_filter_num, c1, conv1_block), kernel_size=conv1_kernel, activation='relu', padding='same')(x)\n x = BatchNormalization()(x)\n x = MaxPooling2D(pool_size=(2,1))(x)\n x = Dropout(drop)(x)\n x = Conv2D(filters=base_filter_num, kernel_size=conv2_kernel, strides=(1,3), activation = 'relu', padding='same')(x)\n x = BatchNormalization()(x)\n x = Dropout(drop)(x)\n x = GlobalAveragePooling2D()(x)\n x = Dropout(drop)(x)\n output = Dense(units=8, activation='softmax')(x)\n return output\n\n\ndef model_architecture(X_train, architecture, conv1_block, base_filter_num, conv1_kernel, conv2_kernel, dense_size, drop):\n if architecture=='perceptnet':\n input = Input((X_train.shape[1], X_train.shape[2], X_train.shape[3]))\n t = Lambda(lambda x: x[:, :, 0:6])(input)\n f = Lambda(lambda x: x[:, :, 6:18])(input)\n t = percept_leg(t, conv1_block, base_filter_num, conv1_kernel, conv2_kernel, drop)\n f = percept_leg(f, conv1_block, base_filter_num, conv1_kernel, conv2_kernel, drop)\n x = concatenate([t, f])\n x = Dense(dense_size, activation='relu')(x)\n output = Dense(units=8, activation='softmax')(x)\n model = Model(inputs=[input], outputs=[output])\n #plot_model(model, to_file='Network_Figures/'+str(architecture)+'.png', show_shapes=True)\n model.summary()\n return model\n\n\ndef train_model(X_train, Y_train, X_dev, Y_dev, architecture, conv1_block, base_filter_num, conv1_kernel, conv2_kernel, dense_size, drop, batch_size, learning_rate):\n model = model_architecture(X_train, architecture, conv1_block, base_filter_num, conv1_kernel, conv2_kernel, dense_size, drop)\n opt = Adam(lr=learning_rate)\n model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n early_stopper = EarlyStopping(patience=15, verbose=1)\n check_pointer = ModelCheckpoint(filepath='Trained_Networks/network.hdf5', verbose=1, save_best_only=True)\n model.fit(X_train, Y_train, batch_size=batch_size, epochs=100, shuffle='true', callbacks=[early_stopper, check_pointer], validation_data=(X_dev, Y_dev))\n\n\ndef run_experiment(dataset='time_and_frequency', architecture='perceptnet', conv1_block=4, base_filter_num=32, conv1_kernel=(20,1), conv2_kernel=(20,3), dense_size=20, drop=0.4, batch_size=32, learning_rate=0.001, min_loss=0):\n X_train, X_dev, X_test, Y_train, Y_dev, Y_test = load_data(dataset)\n train_model(X_train, Y_train, X_dev, Y_dev, architecture, conv1_block, base_filter_num, conv1_kernel, conv2_kernel, dense_size, drop, batch_size, learning_rate)\n evaluation, predictions = evaluate_experiment(X_test, Y_test, architecture, min_loss) # evaluate on test set\n return evaluation, predictions\n\n\ndef evaluate_experiment(X_test, Y_test, architecture, min_loss):\n loaded_model = load_model('Trained_Networks/network.hdf5') # Loads best loss epoch model\n evaluation = loaded_model.evaluate(X_test, Y_test, verbose=0) # Evaluates the loaded model\n if min_loss > evaluation[0]:\n loaded_model.save('Trained_Networks/grouped_network.hdf5')\n predictions = loaded_model.predict(X_test) # Makes the predictions from the loaded model\n print('Evaluation Metrics:', loaded_model.metrics_names[0], evaluation[0], loaded_model.metrics_names[1], evaluation[1]) # test loss and accuracy\n #os.rename('Trained_Networks/network.hdf5', 'Trained_Networks/'+str(architecture)+'_'+str('%.4f' % evaluation[1])+'.hdf5')\n os.remove('Trained_Networks/network.hdf5') # in hyperparam search: delete model (hyperparams are saved)\n return evaluation, predictions\n\n\ndef plot_confusion_matrix(Y_true, Y_pred, architecture):\n matrix = confusion_matrix(Y_true.argmax(axis=1), Y_pred.argmax(axis=1))\n plt.figure()\n plt.imshow(matrix, interpolation='nearest')\n plt.colorbar()\n fmt = 'd'\n thresh = matrix.max() / 2.\n for i, j in itertools.product(range(matrix.shape[0]), range(matrix.shape[1])):\n plt.text(j, i, format(matrix[i, j], fmt), horizontalalignment=\"center\", color=\"white\")\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.show()\n plt.savefig('Results/confmat_%s.png' % architecture)\n\n\nconv1_block=2\nbase_filter_num=60\nconv1_kernel=14\nconv2_kernel=42\ndense_size=71\ndrop=0.365952189776869\nbatch_size=16 # (powers of 2)\nlearning_rate=0.002602647881969\nmin_loss = 1000\nmax_accuracy = 0\navg_loss = 0\navg_accuracy = 0\nfor j in range(20):\n evaluation, predictions = run_experiment(dataset='time_and_frequency', \n architecture='perceptnet', \n conv1_block=conv1_block,\n base_filter_num=base_filter_num, \n conv1_kernel=(conv1_kernel,1), conv2_kernel=(conv2_kernel,3), \n dense_size=dense_size,\n drop=drop,\n batch_size=batch_size,\n learning_rate=learning_rate,\n min_loss=min_loss)\n print(evaluation)\n if min_loss > evaluation[0]:\n min_loss = evaluation[0]\n max_accuracy = evaluation[1]\n avg_loss += evaluation[0]\n avg_accuracy += evaluation[1]\n K.clear_session() # clears session to prevent slowdown\navg_loss = avg_loss/(j+1)\navg_accuracy = avg_accuracy/(j+1)\n\nprint('Average Loss (test set):', avg_loss, 'Average Accuracy (test set):', avg_accuracy)\nprint('Minimum Loss (test set):', min_loss, 'Max Accuracy (test set):', max_accuracy)\n\n_, _, X_test, _, _, Y_test = load_data('time_and_frequency')\nloaded_model = load_model('Trained_Networks/grouped_network.hdf5') # Loads best model\npredictions = loaded_model.predict(X_test) # Makes the predictions from the loaded model\nplot_confusion_matrix(Y_test, predictions, 'perceptnet') # Plots confusion matrix\n","sub_path":"grouped_net.py","file_name":"grouped_net.py","file_ext":"py","file_size_in_byte":8245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"521337100","text":"\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport os\nimport json\n\nfrom hyper import BATCH_SIZE\n\n#derived from https://github.com/python-engineer/snake-ai-pytorch\n\nclass Linear_QNet(nn.Module):\n def __init__(self,input_size, hidden_size, output_size):\n super().__init__()\n self.linear_relu_stack = nn.Sequential(\n nn.Linear(input_size, hidden_size),\n nn.ReLU(),\n #nn.Linear(hidden_size, hidden_size),\n #nn.ReLU(),\n nn.Linear(hidden_size, output_size)\n )\n\n def forward(self, x):\n return self.linear_relu_stack(x)\n\n def save(self, file_name='model.pth'):\n torch.save(self, file_name)\n\nclass QTrainer:\n def __init__(self, model, lr, gamma,decay_iterations=100, iter_growth_val = 1.1,ogamma=0.7):\n self.lr = lr\n self.gamma = gamma\n self.decay_iterations = decay_iterations\n self.iter_growth_val = iter_growth_val\n self.decay_steps = 0\n self.model = model\n self.optimizer = optim.Adam(model.parameters(), lr=self.lr)\n #self.optimizer = optim.SGD(model.parameters(), lr=self.lr, momentum=0.9)\n self.scheduler = torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=ogamma, verbose=True)\n self.criterion = nn.MSELoss()\n self.iterations = 0\n\n def train_step2(self, state, action, reward, next_state, done):\n BATCH_SIZE = 1 # from agent.py\n self.iterations += 1\n if self.iterations*BATCH_SIZE > self.decay_iterations:\n self.iterations = 0 \n self.decay_iterations = self.decay_iterations + (BATCH_SIZE * self.decay_steps) * self.iter_growth_val\n self.decay_steps += 1\n if self.decay_steps < 5:\n if self.decay_iterations >40000: #was 500K\n self.decay_iterations = 40000\n else:\n scheduled_lr = self.scheduler.state_dict()['_last_lr']\n self.decay_iterations = 40_000 + 0.5/float(scheduled_lr[0])\n print(\"adding to exponential decay iteraions \",scheduled_lr,self.decay_iterations)\n print(\" ************************************ Scheduler.step() *****************************\", )\n self.scheduler.step()\n print(\" ************************************\")\n print(self.scheduler.state_dict())\n print(\" ************************************ Scheduler.step() COMPLETE *********************\", )\n else:\n if self.iterations % 100000 == 0:\n print(\"Iteration \",self.iterations, \" Batch_iteration \", self.iterations*BATCH_SIZE, \" of Decay Iterations \", self.decay_iterations)\n \n \n state = torch.tensor(state, dtype=torch.float)\n next_state = torch.tensor(next_state, dtype=torch.float)\n action = torch.tensor(action, dtype=torch.int32)\n reward = torch.tensor(reward, dtype=torch.float)\n\n if len(state.shape) == 1:\n state = torch.unsqueeze(state, 0)\n next_state = torch.unsqueeze(next_state, 0)\n action = torch.unsqueeze(action, 0)\n reward = torch.unsqueeze(reward, 0)\n done = (done, )\n\n pred = self.model(state)\n target = pred.clone()\n for idx in range(len(done)):\n Q_new = reward[idx]\n if not done[idx]:\n ns = next_state[idx]\n a = self.model(ns)\n Q_new = reward[idx] + self.gamma * torch.max(a)\n\n target[idx][torch.argmax(action[idx]).item()] = Q_new\n \n # 2: Q_new = r + y * max(next_predicted Q value) -> only do this if not done\n self.optimizer.zero_grad()\n loss = self.criterion(target, pred)\n loss.backward()\n self.optimizer.step()\n\n def train_step(self, state, action, reward, next_state, done):\n \n batch_step = True\n\n state = torch.tensor(state, dtype=torch.float)\n next_state = torch.tensor(next_state, dtype=torch.float)\n action = torch.tensor(action, dtype=torch.int32)\n reward = torch.tensor(reward, dtype=torch.float)\n\n if len(state.shape) == 1:\n batch_step = False\n state = torch.unsqueeze(state, 0)\n next_state = torch.unsqueeze(next_state, 0)\n action = torch.unsqueeze(action, 0)\n reward = torch.unsqueeze(reward, 0)\n done = (done, )\n \n\n if batch_step == True:\n self.iterations += 1\n if self.iterations > self.decay_iterations:\n self.iterations = 0 \n self.decay_iterations = self.decay_iterations * self.iter_growth_val\n self.decay_steps += 1\n if self.decay_steps < 5:\n if self.decay_iterations > 200: #was 500K\n self.decay_iterations = 200\n else:\n scheduled_lr = self.scheduler.state_dict()['_last_lr']\n self.decay_iterations = self.decay_iterations + 0.001/float(scheduled_lr[0])\n print(\"adding to exponential decay iteraions \",scheduled_lr,self.decay_iterations)\n print(\" ************************************ Scheduler.step() *****************************\", )\n self.scheduler.step()\n print(\" ************************************\")\n print(self.scheduler.state_dict())\n print(\" ************************************ Scheduler.step() COMPLETE *********************\", )\n else:\n if self.iterations % 100000 == 0:\n print(\"Iteration \",self.iterations, \" Batch_iteration \", self.iterations*BATCH_SIZE, \" of Decay Iterations \", self.decay_iterations) \n \n pred = self.model(state)\n target = pred.clone()\n for idx in range(len(done)):\n Q_new = reward[idx]\n if not done[idx]:\n ns = next_state[idx]\n a = self.model(ns)\n Q_new = reward[idx] + self.gamma * torch.max(a)\n\n target[idx][torch.argmax(action[idx]).item()] = Q_new\n \n # 2: Q_new = r + y * max(next_predicted Q value) -> only do this if not done\n self.optimizer.zero_grad()\n loss = self.criterion(target, pred)\n loss.backward()\n self.optimizer.step()\n\n\n def save(self, filename='model', describe=None):\n # save the model\n # save the data\n model_folder_path = './model'\n if not os.path.exists(model_folder_path):\n os.makedirs(model_folder_path)\n\n full_filename = os.path.join(model_folder_path, filename+'.train_data')\n data = {}\n data['gamma'] = self.gamma\n data['lr'] = self.lr\n data['decay_iterations'] = self.decay_iterations\n data['iter_growth_val'] = self.iter_growth_val\n data['model'] = str(self.model.state_dict())\n data['scheduler'] = str(self.scheduler.state_dict())\n with open(full_filename, 'w') as outfile: \n json.dump(data, outfile)\n full_filename = os.path.join(model_folder_path, filename+'.pth')\n self.model.save(full_filename)\n full_filename = os.path.join(model_folder_path, filename+'.describe')\n if describe is not None:\n with open(full_filename, 'w') as outfile:\n for i,col in describe:\n o = f'{i}\\t {col}\\n'\n outfile.write(o)\n","sub_path":"aiexplore/rls500/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"274712039","text":"# -*- coding: utf-8 -*-\n\nimport datetime\nimport os\n\nfrom scrapy import signals\n\nfrom .exporters import CsvScrapperExporter\n\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings.development')\nfrom ext.scraping.models import JobModel\nfrom django.core.files.storage import default_storage\n\n\nclass CsvExportPipeline(object):\n\n def __init__(self):\n self.result_file = default_storage.open('verde output {0}.csv'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')), 'w+b')\n self.exporter = CsvScrapperExporter(self.result_file)\n\n @classmethod\n def from_crawler(cls, crawler):\n pipeline = cls()\n crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)\n crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)\n crawler.signals.connect(pipeline.spider_error, signals.spider_error)\n return pipeline\n\n def spider_opened(self, spider):\n self.exporter.start_exporting()\n\n job = JobModel.objects.get(pk=spider.job_id)\n job.status = JobModel.STATUSES[1][0]\n job.save()\n\n def spider_closed(self, spider):\n self.exporter.finish_exporting()\n\n job = JobModel.objects.get(pk=spider.job_id)\n job.status = JobModel.STATUSES[2][0]\n job.result = self.result_file.key.generate_url(expires_in=0, query_auth=False)\n job.finished_at = datetime.datetime.now()\n job.save()\n\n self.result_file.close()\n\n def spider_error(self, spider):\n self.exporter.finish_exporting()\n\n job = JobModel.objects.get(pk=spider.job_id)\n job.status = JobModel.STATUSES[3][0]\n job.result = self.result_file.key.generate_url(expires_in=0, query_auth=False)\n job.finished_at = datetime.datetime.now()\n job.save()\n\n self.result_file.close()\n\n def process_item(self, item, spider):\n self.exporter.export_item(item)\n return item\n","sub_path":"src/ext/scraping/abebooks/abebooks/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"604250414","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.13-x86_64/egg/reviewboard/reviews/tests/test_download_raw_diff_view.py\n# Compiled at: 2020-02-11 04:03:56\n\"\"\"Unit tests for reviewboard.reviews.views.DownloadRawDiffView.\"\"\"\nfrom __future__ import unicode_literals\nfrom reviewboard.testing import TestCase\n\nclass DownloadRawDiffViewTests(TestCase):\n \"\"\"Unit tests for reviewboard.reviews.views.DownloadRawDiffView.\"\"\"\n fixtures = [\n b'test_users', b'test_scmtools']\n\n def test_sends_correct_content_disposition(self):\n \"\"\"Testing DownloadRawDiffView sends correct Content-Disposition\"\"\"\n review_request = self.create_review_request(create_repository=True, publish=True)\n self.create_diffset(review_request=review_request)\n response = self.client.get(b'/r/%d/diff/raw/' % review_request.pk)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response[b'Content-Disposition'], b'attachment; filename=diffset')\n\n def test_normalize_commas_in_filename(self):\n \"\"\"Testing DownloadRawDiffView removes commas in filename\"\"\"\n review_request = self.create_review_request(create_repository=True, publish=True)\n self.create_diffset(review_request=review_request, name=b'test, comma')\n response = self.client.get(b'/r/%d/diff/raw/' % review_request.pk)\n content_disposition = response[b'Content-Disposition']\n filename = content_disposition[len(b'attachment; filename='):]\n self.assertFalse(b',' in filename)","sub_path":"pycfiles/ReviewBoard-3.0.17-py2.7/test_download_raw_diff_view.py","file_name":"test_download_raw_diff_view.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"391001452","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n# from mpl_finance import candlestick_ohlc\nfrom mpl_finance import candlestick_ohlc, candlestick2_ohlc, candlestick2_ochl\nimport datetime\nfrom oandapyV20 import API\nimport oandapyV20.endpoints.instruments as instruments\nimport talib\nimport configparser\n\n# 設定ファイルの読み込み\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\naccess_token = config.get(\"oanda\", \"access_token\")\n\napi = API(access_token=access_token, environment=\"practice\")\n \nparams = {\n \"granularity\": \"M1\", # 取得する足\n \"count\": 200, # 取得する足数\n \"price\": \"B\", # Bid\n}\n \ninstrument = \"USD_JPY\" # 通貨ペア\n \ninstruments_candles = instruments.InstrumentsCandles(instrument=instrument, params=params)\n \napi.request(instruments_candles)\nresponse = instruments_candles.response\n \ndf = pd.DataFrame(response[\"candles\"])\n \nohlc = []\nfor r in response[\"candles\"]:\n time = r[\"time\"].replace(\".000000000Z\", \"\")\n time = datetime.datetime.strptime(time, \"%Y-%m-%dT%H:%M:%S\")\n time = mdates.date2num(time)\n r[\"bid\"][\"time\"] = time\n ohlc.append(r[\"bid\"])\n \ndf = pd.DataFrame(ohlc)\ndf = df.astype(np.float64)\n \n# ローソク足のチャートの表示\nfig, ax = plt.subplots(figsize=(10, 5))\n# df = df[[\"time\", \"o\", \"h\", \"l\", \"c\"]]\ndf = df[[\"time\", \"o\", \"h\", \"l\", \"c\"]]\n# opens = df[\"o\"].values\n# highs = df[\"h\"].values\n# lows = df[\"l\"].values\n# closes = df[\"c\"].values\n\ndf[\"time\"] = np.arange(len(df[\"time\"].values))\ncandlestick_ohlc(ax, df.values, colorup=\"green\", colordown=\"red\")\n\n# candlestick2_ochl(ax, opens, closes, highs, lows, colorup=\"green\", colordown=\"red\")\n# candlestick2_ochl(ax, opens, highs, lows, closes, colorup=\"green\", colordown=\"red\")\n \n# candlestick2_ochl(ax, opens, closes, highs, lows, colorup=\"red\", colordown=\"blue\")\n\ntimes = df[\"time\"].values\n\n# 移動平均を計算する\nsma_25 = talib.SMA(df[\"c\"].values, 25) # 25移動平均\nax.plot(times, sma_25, label=\"25\")\n\nsma_75 = talib.SMA(df[\"c\"].values, 75) # 75移動平均\nax.plot(times, sma_75, label=\"75\")\n\nsma_200 = talib.SMA(df[\"c\"].values, 200) # 200移動平均\nax.plot(times, sma_200, label=\"200\")\n\n# ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))\n\nplt.show()\n","sub_path":"fx/plot_chart.py","file_name":"plot_chart.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"150548803","text":"import urllib, urllib2, json, jwt\nfrom django.http import HttpResponseRedirect, JsonResponse,HttpResponse\nfrom django.conf import settings\nfrom application.common import registry\n\nclass AuthenticationMiddleware(object):\n def process_request(self, request):\n authorize = False\n global _USER\n if settings.OAUTH_USER_JWT_DEBUG:\n registry.USER = settings.OAUTH_USER_JWT_DEBUG\n else:\n jwt_token = request.COOKIES.get(settings.OAUTH_COOKIE_NAME)\n if jwt_token:\n user = jwt.decode(jwt_token, settings.OAUTH_CLIENT_EK, algorithms=['HS256'])\n if user:\n authorize = True\n registry.USER = user\n if not authorize:\n redirect_url = request.GET.get('redirect', settings.APPLICATION_URL)\n sso_server_url = settings.SECURITY_SERVER_URL + '/user/login?redirect=%s' % (redirect_url)\n return HttpResponseRedirect(sso_server_url)\n","sub_path":"admin-web/src/www/application/common/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"515221236","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Codec:\n\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\n \"\"\"\n self.list = []\n\n # recursive function declaration\n def dfs(node):\n if not node:\n return\n self.list.append(node.val)\n dfs(node.left)\n dfs(node.right)\n\n dfs(root)\n return \",\".join(map(str, self.list))\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\n \"\"\"\n if not data: return None\n list = [int(d) for d in data.split(\",\")]\n\n def recurse(list, lower, upper):\n if not list: return None\n if not lower <= list[0] <= upper: return None\n\n node = list.pop(0)\n root = TreeNode(node)\n\n root.left = recurse(list, lower, root.val)\n root.right = recurse(list, root.val, upper)\n\n return root\n\n return recurse(list, -float(\"inf\"), float(\"inf\"))\n\n# Your Codec object will be instantiated and called as such:\n# Your Codec object will be instantiated and called as such:\n# ser = Codec()\n# deser = Codec()\n# tree = ser.serialize(root)\n# ans = deser.deserialize(tree)\n# return ans","sub_path":"Interview/Amazon/serializeDeserializeBST.py","file_name":"serializeDeserializeBST.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"497180258","text":"#!/usr/bin/env python\nimport sys\nimport serial\nimport signal\nimport logging\nimport coloredlogs\nfrom multiprocessing.connection import Listener\n\nlog = logging.getLogger('INO Command listener')\ncoloredlogs.install(level='DEBUG')\n\nSERIAL_PORT = '/dev/cu.wchusbserial1410'\n\nino = serial.Serial(SERIAL_PORT, 9600)\naddress = ('localhost', 6000)\nlistener = Listener(address, authkey=b'qweqwe')\nlog.info('Started. Ready to accept commands')\nconnection = None\n\ndef exit_process():\n global connection\n ino.close()\n if connection:\n connection.close()\n listener.close()\n log.info('Exiting..')\n sys.exit(0)\n\n\ndef signal_handler(signal, frame):\n exit_process()\n\nsignal.signal(signal.SIGINT, signal_handler)\n\ndef listen_for_msg():\n global connection\n connection = listener.accept()\n msg = connection.recv_bytes().decode()\n log.info('Command received: %s' % msg)\n if msg == 'close':\n exit_process()\n ino.write('{}\\n'.format(msg).encode())\n connection.close()\n\nwhile True:\n listen_for_msg()\n\nlistener.close()\n","sub_path":"pyserial/py/command_listener.py","file_name":"command_listener.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"232344158","text":"from django.urls import path\n\nfrom posts import views\n\nurlpatterns = [\n #path('',HomeView.as_view(), name='home'),\n path('', views.Home, name='Home'),\n path('contact.html', views.contact, name='contact'),\n path('about.html', views.about, name='about'),\n\n\n\n]","sub_path":"myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"118293971","text":"from .models import DailyCsv\nfrom cov19diff.jhudata import readDaily\nfrom cov19diff.restype import DaylyStatus\n\nfrom datetime import date, datetime, timedelta\nimport random\n\nclass DailyStatusData:\n\n def __init__(self):\n self.start = datetime.today()\n self.end = self.start - timedelta(days=30)\n self.now = self.end\n\n def reset(self):\n self.now = self.end\n\n def getData(self):\n lines = []\n while self.now <= self.start:\n datas = readDaily(self.now.strftime('%m-%d-%Y'))\n self.now = self.now + timedelta(days=1)\n if len(datas) > 0:\n lines.append('name\\tconfirmed\\tdeaths\\tdeathRatio\\trecover\\trecoverRatio\\tactive\\tactiveRatio')\n dss = []\n for name, data in datas.items():\n ds = DaylyStatus(name,data['Confirmed'],data['Deaths'],data['Recovered'])\n dss.append(ds)\n dss = sorted(dss, key=lambda d: random.random())\n for ds in dss:\n lines.append('%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s' % (\n ds.cname,\n ds.confirmed,\n ds.deaths,\n ds.deathRatio(),\n ds.recover,\n ds.recoverRatio(),\n ds.active(),\n ds.activeRatio()\n ))\n break\n return '\\n'.join(lines)\n","sub_path":"cov19diff/dailystatusData.py","file_name":"dailystatusData.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"227069111","text":"# Author: Christian Brodbeck \nimport os\n\nfrom .._utils import ui\n\n__all__ = ['txt']\n\n\ndef txt(iterator, fmt='%s', delim=os.linesep, dest=None):\n \"\"\"\n Writes any object that supports iteration to a text file.\n\n Parameters\n ----------\n iterator : iterator\n Object that iterates over values to be saved\n fmt : fmt-str\n format-string which is used to format the iterator's values\n delim : str\n the delimiter which is inserted between values\n dest : str(path) | None\n The destination; if None, a system save-as dialog is displayed\n \"\"\"\n if dest is None:\n name = repr(iterator)[:20]\n msg = \"Save %s...\" % name\n dest = ui.ask_saveas(msg, msg, [(\"Plain Text File\", '*.txt')])\n\n if dest:\n with open(dest, 'w') as FILE:\n FILE.write(delim.join(fmt % v for v in iterator))\n","sub_path":"eelbrain/save/_txt.py","file_name":"_txt.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"551757564","text":"# Copyright (C) 2013 Google Inc., authors, and contributors \n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n# Created By: david@reciprocitylabs.com\n# Maintained By: david@reciprocitylabs.com\n\nimport ggrc.models\nfrom ggrc import db\nfrom .mixins import deferred, Base, Described\nfrom sqlalchemy.ext.declarative import declared_attr\n\nclass Relationship(Base, db.Model):\n __tablename__ = 'relationships'\n source_id = db.Column(db.Integer, nullable=False)\n source_type = db.Column(db.String, nullable=False)\n destination_id = db.Column(db.Integer, nullable=False)\n destination_type = db.Column(db.String, nullable=False)\n relationship_type_id = db.Column(db.String)\n # FIXME: Should this be a strict constraint? If so, a migration is needed.\n #relationship_type_id = db.Column(\n # db.Integer, db.ForeignKey('relationship_types.id'))\n relationship_type = db.relationship(\n 'RelationshipType',\n primaryjoin='foreign(RelationshipType.relationship_type) == Relationship.relationship_type_id',\n uselist=False)\n\n def get_relationship_node(self, attr, node_type, node_id):\n if hasattr(self, attr):\n return getattr(self, attr)\n if node_type is None:\n return None\n cls = getattr(ggrc.models, node_type)\n value = db.session.query(cls).get(node_id)\n setattr(self, attr, value)\n return value\n\n #FIXME This provides access to source and destination, but likely breaks some\n #notification semantics in sqlalchemy. Is it necessary to go beyond this,\n #though? Are there motivating use cases??\n\n @property\n def source(self):\n return self.get_relationship_node(\n '_source', self.source_type, self.source_id)\n\n @source.setter\n def source(self, value):\n setattr(self, '_source', value)\n self.source_id = value.id if value is not None else None\n self.source_type = value.__class__.__name__ if value is not None else None\n\n @property\n def destination(self):\n return self.get_relationship_node(\n '_destination', self.destination_type, self.destination_id)\n\n @destination.setter\n def destination(self, value):\n setattr(self, '_destination', value)\n self.destination_id = value.id if value is not None else None\n self.destination_type = value.__class__.__name__ if value is not None \\\n else None\n\n __table_args__ = (\n db.UniqueConstraint('source_id', 'source_type', 'destination_id', 'destination_type'),\n )\n\n _publish_attrs = [\n 'source',\n 'destination',\n 'relationship_type_id',\n ]\n\n def _display_name(self):\n return self.source.display_name + '<->' + self.destination.display_name\n\nclass RelationshipType(Base, Described, db.Model):\n __tablename__ = 'relationship_types'\n relationship_type = deferred(db.Column(db.String), 'RelationshipType')\n forward_phrase = deferred(db.Column(db.String), 'RelationshipType')\n backward_phrase = deferred(db.Column(db.String), 'RelationshipType')\n symmetric = deferred(\n db.Column(db.Boolean, nullable=False), 'RelationshipType')\n\n _publish_attrs = [\n 'forward_phrase',\n 'backward_phrase',\n 'symmetric',\n ]\n\nclass Relatable(object):\n @declared_attr\n def related_sources(cls):\n joinstr = 'and_(remote(Relationship.destination_id) == {type}.id, '\\\n 'remote(Relationship.destination_type) == \"{type}\")'\n joinstr = joinstr.format(type=cls.__name__)\n return db.relationship(\n 'Relationship',\n primaryjoin=joinstr,\n foreign_keys = 'Relationship.destination_id',\n cascade = 'all, delete-orphan')\n\n @declared_attr\n def related_destinations(cls):\n joinstr = 'and_(remote(Relationship.source_id) == {type}.id, '\\\n 'remote(Relationship.source_type) == \"{type}\")'\n joinstr = joinstr.format(type=cls.__name__)\n return db.relationship(\n 'Relationship',\n primaryjoin=joinstr,\n foreign_keys = 'Relationship.source_id',\n cascade = 'all, delete-orphan')\n\n _publish_attrs = [\n 'related_sources',\n 'related_destinations'\n ]\n\n @classmethod\n def eager_query(cls):\n from sqlalchemy import orm\n\n query = super(Relatable, cls).eager_query()\n return query.options(\n orm.subqueryload('related_sources'),\n orm.subqueryload('related_destinations'))\n","sub_path":"src/ggrc/models/relationship.py","file_name":"relationship.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"481860183","text":"import json\nimport matplotlib.pyplot as plt\n\n\"\"\"\nSimple plotting for visualizing and debugging.\n\"\"\"\n\ndef plot_geojson(json_data, color='k'):\n \"\"\"\n Plot the features in a GeoJSON dict.\n \"\"\"\n fig = plt.figure()\n ax = fig.gca()\n \n for feature in json_data['features']:\n\n coords = feature['geometry']['coordinates']\n xy = zip(*coords)\n \n ax.plot(xy[0], xy[1], color)\n \n ax.axis('equal')\n plt.show()\n","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"603961958","text":"import textwrap\n\nfrom PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\n\n\ndef create_image(message, title):\n xH = 1\n xW = len(message) + 3\n if xW > 50:\n text = textwrap.wrap(message, width=50, break_long_words=False)\n xH = len(text) + xH\n xW = 58\n message = '\\n'.join(text)\n W, H = 15 * xW, 40 * xH\n background = Image.new('RGB', (W, H), (255, 255, 255))\n font = ImageFont.truetype('fonts/Cambria.ttf', 30)\n draw = ImageDraw.Draw(background)\n w, h = draw.textsize(message, font)\n x = (W - w) / 2\n y = (H - h) / 2\n draw.text((x, y), message, font=font, align=\"center\", fill=0)\n background.save(f'images_with_text/{title}.png')\n\n","sub_path":"marco_utils/text_to_image.py","file_name":"text_to_image.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"149317649","text":"# - * - coding:utf8 - * - -\n###########################################\n# Author: Tinkle\n# E-mail: shutingnjupt@gmail.com\n# Name: Replace Words.py\n# Creation Time: 2018/1/5\n###########################################\n'''\nIn English, we have a concept called root, which can be followed by some other words to form another longer word - let's call this word successor. For example, the root an, followed by other, which can form another word another.\n\nNow, given a dictionary consisting of many roots and a sentence. You need to replace all the successor in the sentence with the root forming it. If a successor has many roots can form it, replace it with the root with the shortest length.\n\nYou need to output the sentence after the replacement.\n\nExample 1:\nInput: dict = [\"cat\", \"bat\", \"rat\"]\nsentence = \"the cattle was rattled by the battery\"\nOutput: \"the cat was rat by the bat\"\nNote:\nThe input will only have lower-case letters.\n1 <= dict words number <= 1000\n1 <= sentence words number <= 1000\n1 <= root length <= 100\n1 <= sentence words length <= 1000\n\n'''\nclass Solution(object):\n def replaceWords(self, dict, sentence):\n \"\"\"\n :type dict: List[str]\n :type sentence: str\n :rtype: str\n \"\"\"\n ret = []\n dict.sort(key=lambda x:len(x))\n for tokens in sentence.split():\n minroot = None\n for tok in dict:\n if tok == tokens[0:len(tok)]:\n if minroot == None:\n minroot = tok\n break\n if minroot == None:\n ret+=[tokens]\n else:\n ret+=[minroot]\n return ' '.join(ret)\n\n\nclass TrieNode(object):\n def __init__(self):\n self.Child = dict()\n self.isWord = False\n\n\nclass Trie(object):\n def __init__(self):\n self.root = TrieNode()\n\n def insert(self, word):\n node = self.root\n for w in word:\n child = node.Child.get(w)\n if child is None:\n child = TrieNode()\n node.Child[w] = child\n node = child\n node.isWord = True\n\n def search(self, word):\n node = self.root\n ret = ''\n for w in word:\n if node.isWord == True:\n return ret\n child = node.Child.get(w)\n if child:\n node = child\n ret += w\n else:\n return ''\n return ret if node.isWord == True else ''\n\n\nclass Solution1(object):\n def replaceWords(self, dict, sentence):\n \"\"\"\n :type dict: List[str]\n :type sentence: str\n :rtype: str\n \"\"\"\n ret = []\n root = Trie()\n for tok in dict:\n root.insert(tok)\n for tokens in sentence.split():\n minroot = root.search(tokens)\n if minroot == '':\n ret += [tokens]\n else:\n ret += [minroot]\n return ' '.join(ret)","sub_path":"HashTable/648. Replace Words.py","file_name":"648. Replace Words.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"369849272","text":"# -*- coding: utf-8 -*-\nimport inspect\nfrom .utils import import_class\nfrom django.db import models\n\n\ntry:\n from celery import task\nexcept ImportError:\n task = None\n\n\nclass BaseSignalHandler(object):\n\n def __init__(self, model=None, index=None):\n self.model_class = model\n self.index_class = index\n self.index = index()\n self.dispatch_uid_base = 'stretch_{0}'.format(self.index_class.__name__)\n\n def setup(self):\n models.signals.post_save.connect(\n self.on_save,\n sender=self.model_class,\n dispatch_uid='{0}_save'.format(self.dispatch_uid_base),\n weak=False\n )\n\n models.signals.post_delete.connect(\n self.on_delete,\n sender=self.model_class,\n dispatch_uid='{0}_delete'.format(self.dispatch_uid_base),\n weak=False\n )\n\n def teardown(self):\n models.signals.post_save.disconnect(\n dispatch_uid='{0}_delete'.format(self.dispatch_uid_base)\n )\n models.signals.post_delete.disconnect(\n dispatch_uid='{0}_delete'.format(self.dispatch_uid_base)\n )\n\n\nclass RealtimeSignalHandler(BaseSignalHandler):\n\n def on_save(self, sender, **kwargs):\n instance = kwargs.get('instance')\n self.index.update_doc(instance)\n\n def on_delete(self, sender, **kwargs):\n instance = kwargs.get('instance')\n self.index.remove_doc(instance)\n\n\nif task:\n @task\n def on_save_task(index_path, model_pk):\n index_class = import_class(index_path)\n index = index_class()\n index.update_doc(model_pk)\n\n\n @task\n def on_delete_task(index_path, model_pk):\n index_class = import_class(index_path)\n index = index_class()\n index.remove_doc(model_pk)\n\n\n class CelerySignalHandler(BaseSignalHandler):\n \"\"\"\n Updates documents asynchronously using celery\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.index_class_path = '{0}.{1}'.format(self.index_class.__module__, self.index_class.__name__)\n\n def on_save(self, sender, **kwargs):\n instance = kwargs.get('instance')\n on_save_task.delay(\n self.index_class_path,\n instance.pk\n )\n\n def on_delete(self, sender, **kwargs):\n instance = kwargs.get('instance')\n on_delete_task.delay(\n self.index_class_path,\n instance.pk\n )\n","sub_path":"stretch/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"586939672","text":"from abc import ABC\nfrom collections import defaultdict\nfrom typing import List, Iterable, Union, Set, Optional, Tuple, Dict, Callable\n\nfrom langtools.parser.exceptions import ParsingException\nfrom langtools.lexer.token import Token\nfrom langtools.ast.ast import ASTNode\n\n\nclass Symbol(ABC):\n def __init__(\n self,\n name: str,\n first: Union[Set[str], None],\n follow: Optional[Set[str]],\n nullable: Optional[bool],\n ):\n self.name = name\n self.first = first\n self.follow = follow\n self.nullable = nullable\n\n def __repr__(self) -> str:\n return self.name\n\n\nclass NonTerminal(Symbol):\n def __init__(self, name):\n super(NonTerminal, self).__init__(\n name=name, first=None, follow=None, nullable=None\n )\n\n\nclass Terminal(Symbol):\n def __init__(self, name: str, first: Optional[Set[str]] = None):\n first = first or {name}\n super(Terminal, self).__init__(\n name=name, first=first, follow=set(), nullable=False\n )\n self.is_epsilon = False\n\n\nclass Epsilon(Terminal):\n def __init__(self):\n super(Epsilon, self).__init__(name=\"Epsilon\", first=None)\n self.is_epsilon = True\n self.follow = set()\n self.nullable = True\n\n\nclass EOF(Terminal):\n def __init__(self):\n super(EOF, self).__init__(name=\"EOF\")\n\n\nclass BOF(Terminal):\n def __init__(self):\n super(BOF, self).__init__(name=\"BOF\")\n\n\nclass ProductionRule:\n def __init__(self, lhs: NonTerminal, rhs: List[Symbol], name: Optional[str] = None):\n self.lhs = lhs\n self.rhs = rhs\n self.name = name\n\n def __repr__(self) -> str:\n return f\"{self.lhs} -> {self.rhs}\"\n\n\nclass CFG:\n def __init__(\n self,\n production_rules: List[ProductionRule],\n alphabet: Iterable[str],\n start_symbol: NonTerminal,\n match_hook=None,\n rule_hook=None,\n ):\n\n self.start_prime = NonTerminal(name=\"S-Prime\")\n self.start_symbol = start_symbol\n self.production_rules = production_rules\n self.production_rules.append(\n ProductionRule(self.start_prime, [BOF(), self.start_symbol, EOF()])\n )\n self.alphabet = alphabet\n self._generate_parse_table()\n self.match_hook: Optional[Callable[[Terminal], None]] = match_hook\n self.rule_hook: Optional[Callable[[ProductionRule], None]] = rule_hook\n\n def _is_nullable(\n self,\n sequence: Union[Symbol, List[Symbol]],\n visited: Optional[Set[Symbol]] = None,\n ) -> bool:\n\n visited = visited or set()\n\n if not hasattr(sequence, \"__iter__\"):\n sequence = [sequence]\n\n result = True\n for symbol in sequence:\n if symbol in visited:\n continue\n else:\n visited.add(symbol)\n if symbol.nullable is False:\n result = False\n break\n elif isinstance(symbol, Terminal) and symbol.is_epsilon is False:\n result = False\n break\n elif isinstance(symbol, NonTerminal):\n symbol_is_nullable = False\n for rule in self.production_rules:\n if rule.lhs is symbol:\n rule_is_nullable = self._is_nullable(\n visited=visited, sequence=rule.rhs\n )\n\n # if any rule is \"nullable\", the symbol is nullable\n if rule_is_nullable is True:\n symbol_is_nullable = True\n break\n\n symbol.nullable = symbol_is_nullable\n if symbol_is_nullable is False:\n result = False\n break\n\n return result\n\n def _find_first(\n self, symbol: Symbol, visited: Optional[Set[Symbol]] = None\n ) -> Set[str]:\n\n visited = visited or set()\n\n result: Set[str] = set()\n if symbol in visited:\n return result\n\n # note terminals should always have first already defined\n if symbol.first is not None:\n return symbol.first\n\n visited.add(symbol)\n for rule in self.production_rules:\n if rule.lhs is symbol:\n if (\n isinstance(rule.rhs[0], Terminal)\n and rule.rhs[0].is_epsilon is False\n and rule.rhs[0].first is not None\n ):\n result = result.union(rule.rhs[0].first)\n elif (\n isinstance(rule.rhs[0], Terminal) and rule.rhs[0].is_epsilon is True\n ):\n # intentionally do nothing\n pass\n elif isinstance(rule.rhs[0], NonTerminal):\n result = result.union(\n self._find_first(visited=visited, symbol=rule.rhs[0])\n )\n\n symbol.first = result\n return result\n\n def _find_follow(\n self, symbol: Symbol, visited: Optional[Set[Symbol]] = None\n ) -> Set[str]:\n\n visited = visited or set()\n\n if symbol in visited:\n return set()\n\n if symbol.follow is not None:\n return symbol.follow\n\n visited.add(symbol)\n result: Set[str] = set()\n\n symbol_has_left_recursive_rule = False\n for rule in self.production_rules:\n if rule.lhs is symbol and rule.rhs[0] is symbol:\n symbol_has_left_recursive_rule = True\n break\n\n for rule in self.production_rules:\n if symbol_has_left_recursive_rule and rule.lhs is symbol:\n result = result.union(self._find_first(rule.rhs[0]))\n if self._is_nullable(rule.rhs[0]):\n result = result.union(self._find_follow(rule.rhs[0], visited))\n\n for i in range(len(rule.rhs)):\n if rule.rhs[i] is symbol:\n if i < len(rule.rhs) - 1:\n result = result.union(self._find_first(rule.rhs[i + 1]))\n if self._is_nullable(rule.rhs[i + 1]):\n result = result.union(\n self._find_follow(rule.rhs[i + 1], visited)\n )\n else:\n result = result.union(self._find_follow(rule.lhs, visited))\n\n return result\n\n def _generate_parse_table(self) -> None:\n parse_table: Dict[Tuple[NonTerminal, str], Set[ProductionRule]] = defaultdict(\n set\n )\n for rule in self.production_rules:\n transition_chars: Set[str] = set()\n transition_chars = transition_chars.union(self._find_first(rule.rhs[0]))\n\n if self._is_nullable(rule.rhs[0]):\n transition_chars = transition_chars.union(\n self._find_follow(rule.rhs[0])\n )\n\n if self._is_nullable(rule.rhs):\n transition_chars = transition_chars.union(self._find_follow(rule.lhs))\n\n for char in transition_chars:\n parse_table[(rule.lhs, char)].add(rule)\n\n self.parse_table = parse_table\n\n def is_grammar_LL1(self) -> bool:\n for index, cell in self.parse_table.items():\n if len(cell) > 1:\n print(f\"index: {index}\")\n print(f\"cell: {cell}\")\n return False\n return True\n\n def print_parse_table(self) -> None:\n for key, value in self.parse_table.items():\n print(f\"{key} : {value}\")\n\n def LL1_parse(self, tokens: List[Token]) -> ASTNode:\n\n root = ASTNode(name=\"Container\")\n ast_stack: List[Tuple[ASTNode, int]] = [(root, 1)]\n\n if self.is_grammar_LL1():\n stack: List[Symbol] = [self.start_prime]\n tokens = [Token(name=\"BOF\")] + tokens + [Token(name=\"EOF\")]\n token_iterator = iter(tokens)\n curr_token = next(token_iterator)\n tokens_exhausted = False\n while stack:\n top = stack.pop()\n while isinstance(top, Epsilon):\n top = stack.pop()\n ast_stack[-1] = (ast_stack[-1][0], ast_stack[-1][1] - 1)\n while len(ast_stack) > 1 and ast_stack[-1][1] == 0:\n ast_stack.pop()\n ast_stack[-1] = (ast_stack[-1][0], ast_stack[-1][1] - 1)\n if isinstance(top, NonTerminal):\n correct_rule = self.parse_table[(top, curr_token.name)]\n if len(correct_rule) < 1:\n raise ParsingException(\n f\"ParsingException: No matching rule starting at {top}, reading {curr_token.name} found\"\n )\n for rule in correct_rule:\n # this will only run once\n rule_node = ASTNode(name=rule.lhs.name)\n ast_stack[-1][0].children.append(rule_node)\n ast_stack.append((rule_node, len(rule.rhs)))\n stack += list(reversed(rule.rhs))\n elif isinstance(top, Terminal):\n if top.name == curr_token.name:\n ast_stack[-1][0].children.append(\n ASTNode(name=curr_token.name, lexme=curr_token.lexme)\n )\n ast_stack[-1] = (ast_stack[-1][0], ast_stack[-1][1] - 1)\n while len(ast_stack) > 1 and ast_stack[-1][1] == 0:\n ast_stack.pop()\n ast_stack[-1] = (ast_stack[-1][0], ast_stack[-1][1] - 1)\n try:\n curr_token = next(token_iterator)\n except StopIteration:\n tokens_exhausted = True\n break\n else:\n # TODO: Improve this error message\n raise ParsingException(\n f\"Failed to match token: {curr_token}, top: {top}\"\n )\n\n if tokens_exhausted is False:\n raise ParsingException(\n f\"ParsingException: Unexpected token: {curr_token}\"\n )\n elif len(stack) > 0:\n raise ParsingException(f\"ParsingException: Expected more tokens\")\n return root.children[0]\n else:\n raise ParsingException(\"Grammar must be LL1 in order to LL1 parse\")\n","sub_path":"langtools/parser/cfg.py","file_name":"cfg.py","file_ext":"py","file_size_in_byte":10743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"608462503","text":"#! /usr/bin/python\n\ndef load_snapshot(fname):\n\n import h5py\n import numpy\n\n with h5py.File(fname,'r') as f:\n return {field:numpy.array(f[field])\n for field in f}\n\ndef consolidate(pattern):\n\n from glob import glob\n import re\n import numpy\n\n file_list = sorted(glob(pattern),\n key=lambda fname:int(re.search('(\\d+)',fname)[0]))\n partitions = [load_snapshot(fname) for fname in file_list]\n return {field:numpy.concatenate([part[field] for part in partitions])\n for field in partitions[0]}\n\ndef main():\n\n import numpy\n from glob import glob\n import logging\n\n LOGLEVEL = os.environ.get('LOGLEVEL', 'WARNING').upper()\n logging.basicConfig(level=LOGLEVEL)\n\n ai = 4./3.\n v = 0.99999\n lf = (1.0-v**2)**-0.5\n d = 1.0\n p = (lf-1.0)*(lf*ai-1.0)*d\n df = d*((ai*lf+1)/(ai-1))**3/lf**2\n p = (ai-1.0)*(lf-1)*df\n ans = p\n \n if len(glob('final_*.h5'))>1:\n final = consolidate('final_*.h5')\n else:\n final = load_snapshot('final.h5')\n index = int(len(final['position'])/20)\n res = final['pressure'][index]\n\n return abs(res-ans)/ans<0.1\n\nif __name__=='__main__':\n\n import os\n\n os.system('rm -rf test_passed.res test_failed.res')\n if main():\n os.system('touch test_passed.res')\n else:\n os.system('touch test_failed.res')\n","sub_path":"tests/srhd_sim/shock_heating_spherical/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"67912959","text":"#!/usr/bin/env python3\nimport argparse, os, shlex, subprocess, sys \n\n# scope\n# role\n# datadir > target-dir\n# connstring\n\n# pghoard_restore get-basebackup --config /var/lib/pghoard/pghoard.json \\\n# --target-dir /var/lib/pgsql/9.5/data\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--scope\")\n parser.add_argument(\"--role\")\n parser.add_argument(\"--datadir\")\n parser.add_argument(\"--connstring\")\n parser.add_argument(\"--config\", help=\"Full path to pghoard json config\")\n parser.add_argument('--no_master')\n args = parser.parse_args()\n print(args)\n cmd = \"pghoard_restore get-basebackup --config {args.config} --target-dir {args.datadir}\".format(args=args)\n print(cmd)\n try:\n ret = subprocess.check_output(shlex.split(cmd), env=os.environ.copy())\n except subprocess.CalledProcessError:\n return 1\n for line in ret.decode('ASCII').splitlines():\n print(line)\n if 'RestoreError' in ret.decode('ASCII'):\n return 1\n\nif __name__ == \"__main__\":\n sys.exit(main() or 0)","sub_path":"patroni_petset/patroni-docker/scripts/pghoard_restore.py","file_name":"pghoard_restore.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"439966621","text":"import pandas as pd\nimport numpy as np\n# import time\n# import math\nimport re\n# import base64\n# from io import BytesIO\n\n# from scipy.spatial.distance import pdist, squareform\n# from scipy.cluster.hierarchy import dendrogram, linkage\n# from scipy import stats\n\nimport streamlit as st\nfrom streamlit_tags import st_tags, st_tags_sidebar\nfrom st_aggrid import AgGrid\nfrom st_aggrid.grid_options_builder import GridOptionsBuilder\nfrom st_aggrid.shared import GridUpdateMode\n\nimport plotly.graph_objects as go\n# from plotly.subplots import make_subplots\nimport plotly.figure_factory as ff\nimport plotly.express as px\n# import matplotlib.pyplot as plt\n# import seaborn as sns\n\n\nst.title(\"Severe COVID-19 blood transcriptomics database\")\nst.markdown('''\n
\nThis database is a curation of 7 transcriptomics datasets which compare gene expression differences between severe and mild COVID-19 patients.\n\n## Getting Started\n\n1. Select a dataset to query:\n\n\n
\n''', unsafe_allow_html=True)\n\ndf_paths = {\"Chan et al., 2021\": [\"counts/GSE155454_counts.csv\", \"anova/GSE155454_anova.csv\"],\n \"Bibert et al., 2021\":[\"counts/Bibert2021_counts.csv\",\"anova/Bibert2021_anova.csv\"],\n \"McClain et al., 2021\":[\"counts/GSE161731_COVID_counts.csv\",\"anova/GSE161731_anova.csv\"],\n \"Overmyer et al., 2021\":[\"counts/GSE157103_counts.csv\",\"anova/GSE157103_anova.csv\"],\n \"Arunachalam et al., 2020\":[\"counts/GSE152418_counts.csv\",\"anova/GSE152418_anova.csv\"],\n \"Carapito et al., 2022\":[\"counts/GSE172114_counts.csv\",\"anova/GSE172114_anova.csv\"],\n \"Zhang et al., 2021\":['counts/GSE164805_counts.csv', 'anova/GSE164805_anova.csv']}\n\ndf_desc = {\"Chan et al., 2021\": (\"14 severe COVID-19, 18 mild COVID-19, and 6 healthy\", \"GSE155454\", \"https://doi.org/10.15252/emmm.202114045\"),\n \"Bibert et al., 2021\":(\"15 severe COVID-19, 63 mild COVID-19, and 27 healthy\", \"Bibert2021\", \"https://dx.doi.org/10.3389%2Ffimmu.2021.666163\"),\n \"McClain et al., 2021\":(\"6 severe COVID-19, 10 mild COVID-19, and 19 healthy\", \"GSE161731\", \"https://www.nature.com/articles/s41467-021-21289-y\"),\n \"Overmyer et al., 2021\":(\"50 severe COVID-19 and 50 mild COVID-19\", \"GSE157103\", \"https://doi.org/10.1016/j.cels.2020.10.003\"),\n \"Arunachalam et al., 2020\":(\"4 severe COVID-19, 12 mild COVID-19, and 17 healthy\", \"GSE152418\", \"https://doi.org/110.1126/science.abc6261\"),\n \"Carapito et al., 2022\":(\"46 severe COVID-19 and 23 mild COVID-19\", \"GSE172114\", \"https://doi.org/10.1126/scitranslmed.abj7521\"),\n \"Zhang et al., 2021\":(\"5 severe COVID-19, 5 mild COVID-19, and 5 healthy\", \"GSE164805\", \"https://www.frontiersin.org/articles/10.3389/fimmu.2021.631226/full\")\n }\n\ndfchoice = st.selectbox(label='Select a dataset', options=df_paths.keys())\n\ndf, df_anova = pd.read_csv(df_paths[dfchoice][0], index_col=0), pd.read_csv(df_paths[dfchoice][1]) # probably have to insert a for loop from here if we want a comparative box plot between datasets\n\n\n\nst.markdown(' 2. Search for your gene of interest using the filters indicated the dataframe headers') \nst.markdown(' 3. Tick the checkbox indicated beside the gene name. You may scroll down or click on the Probeset ID header to query any specific gene of interest. Note that only one gene can be selected at a time for graph plotting. For multiple gene queries, please use processed data available at https://github.com/kuanrongchan/COVID19-severity')\n\n\n######## Ag-Grid Stuff ###########\ngb = GridOptionsBuilder.from_dataframe(df_anova)\ngb.configure_selection('single', use_checkbox=True, pre_selected_rows=[0]) # allows for checkbox selection of the dataframe. Shows the anova data but will plot the raw expression counts\ngridOptions = gb.build()\n\nwith st.expander(\"Expand for dataset details\", expanded=False):\n st.markdown(f'''\n
\n\n **Description of {dfchoice} dataset**\n\n {dfchoice} dataset compares the gene expression differences between {df_desc[dfchoice][0]} subjects. \n Raw count data can be found in [{df_desc[dfchoice][1]}]({df_desc[dfchoice][2]}) and the full processed data is available at https://github.com/kuanrongchan/COVID19-severity. \n In the processed data, the fold-change, p-value (t-test) and adjusted p-value (BH step-up procedure) between severe vs mild and severe vs healthy subjects are presented.\n\n
\n ''', unsafe_allow_html=True)\n\ndata = AgGrid(df_anova, gridOptions=gridOptions, theme='streamlit', update_mode=GridUpdateMode.SELECTION_CHANGED) # assigning a variable as it returns a dict of data and selected columns\n##################################\n\nst.markdown(' 4. Box plots and strip plots comparing severe COVID-19, mild COVID-19 or healthy subjects. Users can mouse over the plots to gather data statistics') \n\n\nidx_name = df_anova.columns[0] # Prevents the need to rename the anova columns\nfig = go.Figure()\n\nif len(data['selected_rows']) != 0:\n severe = df.filter(regex=('[Ss]evere'), axis=1)\n mild = df.filter(regex=('[Mm]ild'), axis=1)\n healthy = df.filter(regex=('[Hh]ealthy'), axis=1)\n genechoice = data['selected_rows'][0][idx_name]\n fig.add_trace(go.Box(y=severe.loc[genechoice], name='Severe', boxpoints='all', marker_color = 'indianred'))\n fig.add_trace(go.Box(y=mild.loc[genechoice], name=\"Mild\", boxpoints='all', marker_color='royalblue'))\n fig.add_trace(go.Box(y=healthy.loc[genechoice], name=\"Healthy\", boxpoints='all', marker_color='lightseagreen'))\n \n fig.update_layout(title=f\"Box Plot of {genechoice} in {dfchoice} Dataset\", title_x=0.5,\n xaxis_title=\"Severity of Disease\", yaxis_title=\"Expression\")\n st.plotly_chart(fig)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"191903697","text":"\"\"\"\nPattern would look like this if, N=6\n ABCDEF\n G$$$$H\n I$$$$J\n K$$$$L\n M$$$$N\n OPQRST\n\nInput:\nFirst line consists of T test cases. Only line of every test case consists of an integer N.\n\nOutput:\nPrint the following pattern.\n\nConstraints:\n1<=T<=6\n1<=N<=6\n\nExample:\nInput:\n1\n6\nOutput:\nABCDEF\nG$$$$H\nI$$$$J\nK$$$$L\nM$$$$N\nOPQRST\n\"\"\"\n\n\ndef print_pattern(n):\n ccode = 65\n for j in range(n):\n for k in range(n):\n if j == 0 or j == n - 1:\n print(chr(ccode), end=\"\")\n ccode += 1\n else:\n if k == 0 or k == n - 1:\n print(chr(ccode), end=\"\")\n ccode += 1\n else:\n print(\"$\", end=\"\")\n print()\n\n\nif __name__ == '__main__':\n t = int(input())\n for i in range(t):\n n = int(input())\n print_pattern(n)\n","sub_path":"practice/school/pattern_1.py","file_name":"pattern_1.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"547576352","text":"# coding=utf-8\n# author=yphacker\n\n\nimport torch.nn as nn\nfrom conf import config\n\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels=1, out_channels=32, kernel_size=2, stride=(1, 1)),\n # nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)),\n nn.ReLU())\n self.conv2 = nn.Sequential(nn.Conv2d(in_channels=32, out_channels=64, kernel_size=2),\n # nn.MaxPool2d(kernel_size=(4, 2), stride=(4, 2)),\n nn.ReLU())\n # self.conv3 = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=2),\n # nn.ReLU())\n # self.linear1 = nn.Linear(2400, 64)\n self.maxpool = nn.MaxPool2d(2, 2)\n self.fc1 = nn.Linear(24480, 2048)\n self.fc2 = nn.Linear(2048, 512)\n self.fc3 = nn.Linear(512, config.num_classes)\n\n self.dropout = nn.Dropout(p=0.5)\n self.relu = nn.ReLU()\n self.softmax = nn.Softmax(dim=1)\n\n def forward(self, input):\n x = self.conv1(input)\n # x = self.maxpool(F.relu(self.conv1(x)))\n x = self.conv2(x)\n # x = self.conv3(x)\n x = x.view(x.size(0), -1)\n x = self.dropout(x)\n x = self.relu(self.fc1(x))\n x = self.dropout(x)\n x = self.softmax(self.fc2(x))\n return x\n\n# print(Model())\n","sub_path":"50种环境声音分类/src1/model/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"560010903","text":"from kivy.uix.screenmanager import Screen\nfrom kivy.lang import Builder\nfrom server_api import get_product_info, get_product_reviews\nfrom custom_widgets import ProductReview\n\nBuilder.load_file('reviews_of.kv')\n\n\nclass ReviewsOf(Screen):\n\n def _clear_reviews(self):\n reviews = self.ids.product_reviews\n header = self.ids.header\n for child in reviews.children[:]:\n if id(child.proxy_ref) != id(header.proxy_ref):\n reviews.remove_widget(child)\n\n def _reload_reviews(self):\n self._clear_reviews()\n product_reviews_views = [ProductReview(**review) for review in\n get_product_reviews(self._product_id)]\n for product_review_view in product_reviews_views:\n self.ids.product_reviews.add_widget(product_review_view)\n\n def _on_enter(self, instance_toolbar, instance_program, instance_screenmanager):\n instance_toolbar.title = '{}: {}'.format('Reviews', instance_program.selected_product_name)\n # self.title = instance_toolbar.title\n instance_toolbar.left_action_items = [\n ['chevron-left', lambda x: instance_program.back_screen(\n self.name)]\n ]\n\n self._product_id = instance_program.selected_product_id\n self.ids.product_name.text = instance_program.selected_product_name\n self.ids.picture.source = instance_program.selected_product_source\n self.ids.rate_bar.value = instance_program.selected_product_rate\n self.ids.av_rate.text = 'Rate: {}'.format(self.ids.rate_bar.value / 20)\n rev_count = instance_program.selected_product_revs\n self.ids.rev_count.text = '{} reviews'.format(rev_count) if rev_count != 1 else '1 review'\n self._reload_reviews()\n\n def create_new_review(self, app):\n if not app._logged_in:\n app.show_error_dialog('You must login before leaving the review!')\n else:\n app.screen_root_manager.current = 'Review'\n","sub_path":"reviews_of.py","file_name":"reviews_of.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"446796073","text":"import pandas as pd\n\nfrom get2017 import get2017\nfrom get2018 import get2018\nfrom get2019 import get2019\nfrom processer import processer\nfrom to_sql import to_sql\n\ndef get861M():\n\tdf_2017 = get2017()\n\tdf_2018 = get2018()\n\tdf_2019 = get2019()\n\t#df_2011 = processer(df_2011)\n\t#df_2012 = processer(df_2012)\n\t#df_2013 = processer(df_2013)\n\t#df_2014 = processer(df_2014)\n\t#df_2015 = processer(df_2015)\n\t#df_2016 = processer(df_2016)\n\t#df_2017 = processer(df_2017)\n\t#df_2018 = processer(df_2018)\n\n\tdf_combined = pd.concat([df_2017, df_2018, df_2019])\n\tprint(df_combined.head())\n\tdf = processer(df_combined)\n\tdf_combined.loc[:, 'value'] = [pd.to_numeric(x, errors='coerce') if isinstance(x, str) else float(x) for x in df_combined['value']]\n\tif(df.empty):\n\t\tprint(\"Error with dataframe. Listed as empty.\")\n\telse:\n\t\t#df.to_excel('/mnt/c/Users/AHolm/SEIA/OneDrive - SEIA/codebin/datasources/SEIA_DB/Outputs/eia826_2019Q1_NonNEM_v1.xlsx')\n\t\tto_sql(df)\n\t\tprint(df.head(), '\\n', df.tail())\n\t\tprint(\"Successfully updated table markets.eia_826_nonnem\")\n\treturn\n\nif __name__ == '__main__':\n\tget861M()\n","sub_path":"scripts/nonnem/get861M_NonNEM.py","file_name":"get861M_NonNEM.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"239785330","text":"# -*- coding: utf-8 -*-\n#\nimport os\n\nimport meshio\nimport numpy\n\n# In general:\n# Use values with an infinite decimal representation to test precision.\n\ntri_mesh = {\n 'points': numpy.array([\n [0.0, 0.0, 0.0],\n [1.0, 0.0, 0.0],\n [1.0, 1.0, 0.0],\n [0.0, 1.0, 0.0]\n ]) / 3.0,\n 'cells': {\n 'triangle': numpy.array([\n [0, 1, 2],\n [0, 2, 3]\n ])\n },\n }\n\nquad_mesh = {\n 'points': numpy.array([\n [0.0, 0.0, 0.0],\n [1.0, 0.0, 0.0],\n [2.0, 0.0, 0.0],\n [2.0, 1.0, 0.0],\n [1.0, 1.0, 0.0],\n [0.0, 1.0, 0.0]\n ]) / 3.0,\n 'cells': {\n 'quad': numpy.array([\n [0, 1, 4, 5],\n [1, 2, 3, 4]\n ])\n },\n }\n\ntri_quad_mesh = {\n 'points': numpy.array([\n [0.0, 0.0, 0.0],\n [1.0, 0.0, 0.0],\n [2.0, 0.0, 0.0],\n [2.0, 1.0, 0.0],\n [1.0, 1.0, 0.0],\n [0.0, 1.0, 0.0]\n ]) / 3.0,\n 'cells': {\n 'triangle': numpy.array([\n [0, 1, 4],\n [0, 4, 5]\n ]),\n 'quad': numpy.array([\n [1, 2, 3, 4]\n ])\n }\n }\n\ntet_mesh = {\n 'points': numpy.array([\n [0.0, 0.0, 0.0],\n [1.0, 0.0, 0.0],\n [1.0, 1.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.5, 0.5, 0.5],\n ]) / 3.0,\n 'cells': {\n 'tetra': numpy.array([\n [0, 1, 2, 4],\n [0, 2, 3, 4]\n ])\n },\n }\n\n\ndef _clone(mesh):\n mesh2 = {\n 'points': numpy.copy(mesh['points'])\n }\n mesh2['cells'] = {}\n for key, data in mesh['cells'].items():\n mesh2['cells'][key] = numpy.copy(data)\n return mesh2\n\n\ndef add_point_data(mesh, dim):\n numpy.random.seed(0)\n mesh2 = _clone(mesh)\n\n if dim == 1:\n data = numpy.random.rand(len(mesh['points']))\n else:\n data = numpy.random.rand(len(mesh['points']), dim)\n\n mesh2['point_data'] = {'a': data}\n return mesh2\n\n\ndef add_cell_data(mesh, dim):\n mesh2 = _clone(mesh)\n numpy.random.seed(0)\n cell_data = {}\n for cell_type in mesh['cells']:\n num_cells = len(mesh['cells'][cell_type])\n if dim == 1:\n cell_data[cell_type] = {\n 'b': numpy.random.rand(num_cells)\n }\n else:\n cell_data[cell_type] = {\n 'b': numpy.random.rand(num_cells, dim)\n }\n\n mesh2['cell_data'] = cell_data\n return mesh2\n\n\ndef write_read(filename, file_format, mesh, atol):\n '''Write and read a file, and make sure the data is the same as before.\n '''\n try:\n input_point_data = mesh['point_data']\n except KeyError:\n input_point_data = {}\n\n try:\n input_cell_data = mesh['cell_data']\n except KeyError:\n input_cell_data = {}\n\n meshio.write(\n filename,\n mesh['points'], mesh['cells'],\n file_format=file_format,\n point_data=input_point_data,\n cell_data=input_cell_data\n )\n points, cells, point_data, cell_data, _ = \\\n meshio.read(filename, file_format)\n\n # Numpy's array_equal is too strict here, cf.\n # .\n # Use allclose.\n\n # We cannot compare the exact rows here since the order of the points might\n # have changes. Just compare the sums\n assert numpy.allclose(mesh['points'], points, atol=atol, rtol=0.0)\n\n for cell_type, data in mesh['cells'].items():\n assert numpy.allclose(data, cells[cell_type])\n for key in input_point_data.keys():\n assert numpy.allclose(\n input_point_data[key], point_data[key],\n atol=atol, rtol=0.0\n )\n for cell_type, cell_type_data in input_cell_data.items():\n for key, data in cell_type_data.items():\n assert numpy.allclose(\n data, cell_data[cell_type][key],\n atol=atol, rtol=0.0\n )\n\n os.remove(filename)\n return\n","sub_path":"test/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":4279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"418604986","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport csv\nimport json\nimport logging\nimport os\n\nfrom django.contrib.auth.models import User, UserManager\nfrom django.db import migrations, models\nimport pytz\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('db', '0003_db_migration_prep'),\n ('auth', '0001_initial')\n ]\n\n operations = [\n # NOTE: placing these schema migrations here for convenience (so that\n # they do not conflict with data migrations in 0003, 0004, 0007)\n migrations.AddField(\n model_name='screensaveruser',\n name='lab_head',\n field=models.ForeignKey(\n related_name='lab_members', to='db.ScreensaverUser', null=True,\n on_delete=models.deletion.SET_NULL),\n ),\n migrations.AddField(\n model_name='screensaveruser',\n name='lab_affiliation',\n field=models.ForeignKey(related_name='lab_heads', \n to='db.LabAffiliation', null=True,\n on_delete=models.deletion.SET_NULL),\n ),\n# migrations.AddField(\n# model_name='screensaveruser',\n# name='rnai_data_sharing_level',\n# field=models.IntegerField(null=True),\n# ),\n# migrations.AddField(\n# model_name='screensaveruser',\n# name='sm_data_sharing_level',\n# field=models.IntegerField(null=True),\n# ),\n # 20170918; stashing here, to avoid sql pending trigger error in 0003\n migrations.RemoveField(\n model_name='screen',\n name='transfection_agent',\n ),\n migrations.RenameField(\n model_name='screen', \n old_name='transfection_agent_text', \n new_name='transfection_agent'\n ),\n \n \n ]\n","sub_path":"db/migrations/0004_postprep.py","file_name":"0004_postprep.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"211984308","text":"import unittest\nimport base\nimport utils\n\nsymbols = ['GOOGL', 'IWO', 'VFINX', '^GSPC', 'BTC-USD', 'SFL']\n\nclass TestGenericPatterns(unittest.TestCase):\n # random testing to make sure info is working \n def test_info(self):\n stocks = [base.TickerBase(symbol) for symbol in symbols]\n for stock in stocks:\n info = stock.get_info()\n self.assertIsNotNone(info)\n\n # testing if MSFT contains maxAge\n def test_msft_info(self):\n msft = base.TickerBase(ticker=\"MSFT\")\n info = msft.get_info()\n self.assertTrue(info['maxAge'])\n\n # testing if HTOO contains maxAge\n def test_htoo_info(self):\n htoo = base.TickerBase(ticker=\"HTOO\")\n info = htoo.get_info()\n self.assertTrue(info['maxAge'])\n \n # testing if BNKO contains maxAge\n def test_bnko_info(self):\n bnko = base.TickerBase(ticker=\"BNKO\")\n info = bnko.get_info()\n self.assertTrue(info['maxAge'])\n\n # testing if ACGL contains cashflowStatements\n def test_ACGL_info(self):\n acgl = base.TickerBase(ticker=\"ACGL\")\n info = acgl.get_info()\n # if info doesn't fail then it worked\n self.assertIsNotNone(info)\n \n # testing if SFL contains cashflowStatements\n def test_SFL_info(self):\n SFL = base.TickerBase(ticker=\"SFL\")\n info = SFL.get_info()\n # if info doesn't fail then it worked\n self.assertIsNotNone(info)\n\nif __name__ == \"__main__\":\n unittest.main() ","sub_path":"yfinance/test_base_walker.py","file_name":"test_base_walker.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"207383464","text":"#\n# @lc app=leetcode.cn id=121 lang=python3\n#\n# [121] 买卖股票的最佳时机\n#\n# https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock/description/\n#\n# algorithms\n# Easy (50.41%)\n# Likes: 515\n# Dislikes: 0\n# Total Accepted: 69.7K\n# Total Submissions: 137.9K\n# Testcase Example: '[7,1,5,3,6,4]'\n#\n# 给定一个数组,它的第 i 个元素是一支给定股票第 i 天的价格。\n#\n# 如果你最多只允许完成一笔交易(即买入和卖出一支股票),设计一个算法来计算你所能获取的最大利润。\n#\n# 注意你不能在买入股票前卖出股票。\n#\n# 示例 1:\n#\n# 输入: [7,1,5,3,6,4]\n# 输出: 5\n# 解释: 在第 2 天(股票价格 = 1)的时候买入,在第 5 天(股票价格 = 6)的时候卖出,最大利润 = 6-1 = 5 。\n# ⁠ 注意利润不能是 7-1 = 6, 因为卖出价格需要大于买入价格。\n#\n#\n# 示例 2:\n#\n# 输入: [7,6,4,3,1]\n# 输出: 0\n# 解释: 在这种情况下, 没有交易完成, 所以最大利润为 0。\n#\n# 我们可以维持两个变量——minprice 和 maxprofit,\n# 它们分别对应迄今为止所得到的最小的谷值和最大的利润\n# (卖出价格与最低价格之间的最大差值)\n#\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n if (len(prices)<=1):\n return 0\n min_p=prices[0]\n max_p=0\n for p in prices:\n min_p= min(min_p, p)\n max_p= max(max_p, p - min_p)\n return max_p\n\n","sub_path":"121.买卖股票的最佳时机.py","file_name":"121.买卖股票的最佳时机.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"169206574","text":"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) 2016, wradlib Development Team. All Rights Reserved.\n# Distributed under the MIT License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n#!/usr/bin/env python\n\nfrom PyQt4 import QtGui, QtCore\n\n# other wradvis imports\nfrom wradvis.glcanvas import RadolanCanvas, ColorbarCanvas\nfrom wradvis.properties import PropertiesWidget\nfrom wradvis import utils\n\n\nclass MainWindow(QtGui.QMainWindow):\n\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n\n self.resize(825, 500)\n self.setWindowTitle('RADOLAN Viewer')\n self._need_canvas_refresh = False\n\n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(self.reload)\n\n # initialize RadolanCanvas\n self.canvas = RadolanCanvas()\n self.canvas.create_native()\n self.canvas.native.setParent(self)\n\n # need some tracer for the mouse position\n self.canvas.mouse_moved.connect(self.mouse_moved)\n\n # add ColorbarCanvas\n self.canvas_cb = ColorbarCanvas()\n self.canvas_cb.create_native()\n self.canvas_cb.native.setParent(self)\n\n # add PropertiesWidget\n self.props = PropertiesWidget()\n self.props.signal_slider_changed.connect(self.slider_changed)\n self.props.signal_playpause_changed.connect(self.start_stop)\n self.props.signal_speed_changed.connect(self.speed)\n\n # add Horizontal Splitter andd the three widgets\n splitter = QtGui.QSplitter(QtCore.Qt.Horizontal)\n splitter.addWidget(self.props)\n splitter.addWidget(self.canvas.native)\n splitter.addWidget(self.canvas_cb.native)\n self.setCentralWidget(splitter)\n\n # finish init\n self.slider_changed()\n\n def reload(self):\n if self.props.slider.value() == self.props.slider.maximum():\n self.props.slider.setValue(1)\n else:\n self.props.slider.setValue(self.props.slider.value() + 1)\n\n def start_stop(self):\n if self.timer.isActive():\n self.timer.stop()\n else:\n self.timer.start()\n\n def speed(self):\n self.timer.setInterval(self.props.speed.value())\n\n def slider_changed(self):\n self.data, self.meta = utils.read_radolan(self.props.filelist[self.props.actualFrame])\n scantime = self.meta['datetime']\n self.props.sliderLabel.setText(scantime.strftime(\"%H:%M\"))\n self.props.date.setText(scantime.strftime(\"%Y-%m-%d\"))\n self.canvas.image.set_data(self.data)\n self.canvas.update()\n\n def mouse_moved(self, event):\n self.props.show_mouse(self.canvas._mouse_position)\n\n\ndef start(arg):\n appQt = QtGui.QApplication(arg.argv)\n win = MainWindow()\n win.show()\n appQt.exec_()\n\nif __name__ == '__main__':\n print('wradview: Calling module as main...')\n","sub_path":"wradvis/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"352590315","text":"from BlackJack.table import Table\r\nfrom BlackJack.betstrategy import BettingStrategy\r\nfrom BlackJack.gamestrategy import GameStrategy\r\nclass ValidPlayer:\r\n def __init__(self, table, bet_strategy, game_strategy):\r\n assert isinstance(table, Table)\r\n assert isinstance(bet_strategy, BettingStrategy)\r\n assert isinstance(game_strategy, GameStrategy)\r\n self.bet_strategy = bet_strategy\r\n self.game_strategy = game_strategy\r\n self.table = table\r\n ","sub_path":"BlackJack/validplayer.py","file_name":"validplayer.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"139809426","text":"import numpy as np\nfrom time import time\nfrom math import sqrt, pow\n#Get list of active taxels per bounding box on the image, create an array of the taxel center\ndef bb_active_taxel (bb_number, T, bb_predictions_reshaped, TIB, skin_faces):\n taxel_predictions, pixel_positions,taxel_predictions_info = np.empty((bb_number,), dtype = object), np.empty((bb_number,), dtype = object), np.empty((bb_number,), dtype = object)\n for n in range(bb_number):\n faces_predictions, pixel_position, info = [], [], []\n cols = range(bb_predictions_reshaped[n].coordinates_reshaped[0], bb_predictions_reshaped[n].coordinates_reshaped[2])\n rows = range(bb_predictions_reshaped[n].coordinates_reshaped[1], bb_predictions_reshaped[n].coordinates_reshaped[3])\n\n for i in cols:\n for j in rows:\n face_index = TIB.get_pixel_face_index( i, j)\n if face_index == (-1) or face_index >= 1218: #checking that taxels are withing boundss\n break\n #Pixel_Position\n pos_on_map = TIB.get_pixel_position_on_map(i, j)\n pixel_pos = T.back_project_point(pos_on_map, face_index)\n pixel_position.append(pixel_pos) \n\n #Taxel_IDs_from_faces\n faces_predictions.append(skin_faces[face_index][0])\n faces_predictions.append(skin_faces[face_index][1])\n faces_predictions.append(skin_faces[face_index][2])\n\n taxel_predictions[n] = set(faces_predictions) #set rmoves duplicates\n pixel_positions[n] = pixel_position\n #Prediction info\n info.append(bb_predictions_reshaped[n].label)\n info.append(bb_predictions_reshaped[n].confidence)\n info.append(len(set(faces_predictions)))\n taxel_predictions_info[n] = info #this is the name, conf and # active taxels per prediction\n return taxel_predictions, pixel_positions, taxel_predictions_info\n\n#Get total data for the all the taxels and bounding boxes\ndef get_total_data(bb_number, S, T, taxel_predictions):\n\n total_taxel_responses = [[S.taxels[i].get_taxel_response() for i in taxel_predictions[n]] for n in range(bb_number)] \n total_taxels_3D_position = [[S.taxels[i].get_taxel_position()for i in taxel_predictions[n]] for n in range(bb_number)] \n total_taxel_normals = [[S.taxels[i].get_taxel_normal() for i in taxel_predictions[n]] for n in range(bb_number)] \n total_taxels_2D_position = [[T.taxels[i].get_taxel_position() for i in taxel_predictions[n]] for n in range(bb_number)] \n\n return total_taxel_responses, total_taxels_3D_position, total_taxel_normals , total_taxels_2D_position\n\n#AVERAGE RESPONSES including taxels with 0 response\ndef get_average_response_per_BB(bb_number, total_taxel_responses, taxel_predictions_info):\n average_responses = [(sum(total_taxel_responses[n])/taxel_predictions_info[n][2]) for n in range(bb_number) if (len(total_taxel_responses[n]) != 0)]\n return average_responses\n\n#2D AND 3D CENTROID OF BB\ndef get_bb_centroids(bb_number,S,T, total_taxels_2D_position, taxel_coords):\n bb_centroid2d, bb_centroid3d = np.empty((bb_number,), dtype = object), np.empty((bb_number,), dtype = object)\n for n in range(bb_number):\n average_position = [0.0,0.0,0.0]\n if len(total_taxels_2D_position[n]) != 0:\n for i,val in enumerate(total_taxels_2D_position[n]):\n average_position[0] = average_position[0] + val[0]\n average_position[1] = average_position[1] + val[1]\n average_position[2] = average_position[2] + val[2] #z should be 0 anyway\n average_position[0] = average_position[0] / len(total_taxels_2D_position[n])\n average_position[1] = average_position[1] / len(total_taxels_2D_position[n])\n average_position[2] = average_position[2] / len(total_taxels_2D_position[n])\n bb_centroid2d[n]=average_position\n #used for projecting a 2D centroid on the tactile map to a 3D point\n bb_centroid3d[n] = back_project_centroid(S, T, bb_centroid2d[n], taxel_coords) \n else:\n bb_centroid2d[n] = []\n bb_centroid3d[n] = [] \n return bb_centroid2d, bb_centroid3d\n\n#BB NORMALS, i put the minus here\ndef get_bb_average_normals(bb_number,total_taxel_normals):\n bb_normal = np.empty((bb_number,), dtype = object)\n #AVERAGE NORMAL\n for n in range(bb_number):\n average_normal = [0.0,0.0,0.0]\n if len(total_taxel_normals[n]) != 0:\n for i, val in enumerate(total_taxel_normals[n]):\n average_normal[0] = average_normal[0] - val[0] #on the x, it is going to be 0 of course\n average_normal[1] = average_normal[1] - val[1]\n average_normal[2] = average_normal[2] - val[2]\n average_normal[0] = average_normal[0] / len(total_taxel_normals[n])\n average_normal[1] = average_normal[1] / len(total_taxel_normals[n])\n average_normal[2] = average_normal[2] / len(total_taxel_normals[n])\n\n bb_normal[n] = average_normal\n #print(\"Position of Centroid\", taxel_predictions_info[n][0], \"is\", bb_centroid[n])\n else:\n bb_normal[n] = [] \n return bb_normal\n\n#BACK PROJECT A POINT FROM 2D MAP TO 3D\ndef back_project_centroid(S, T, bb_centroid2d, taxel_coords):\n #initializing\n centroid_3d, P, B, C = [0.0,0.0,0.0], [0.0,0.0], [0.0,0.0], [0.0,0.0]\n\n #finding the indexes of the 3 closest points, with numpy is very fast\n difference = np.subtract(taxel_coords, bb_centroid2d)\n diff_pow2 = np.square(difference)\n diff_sum = np.sum(diff_pow2, axis=1)\n diff_squared = np.square(diff_sum)\n minimum_indexes = diff_squared.argsort()[:3]\n\n a, b, c = T.taxels[minimum_indexes[0]].get_taxel_position(), T.taxels[minimum_indexes[1]].get_taxel_position(), T.taxels[minimum_indexes[2]].get_taxel_position()\n \n #Compute the cofficents of the convex combination\n P[0], P[1], B[0], B[1], C[0], C[1] = bb_centroid2d[0]-a[0], bb_centroid2d[1]-a[1], b[0]-a[0], b[1]-a[1], c[0]-a[0], c[1]-a[1]\n \n d = B[0]*C[1] - C[0]*B[1]\n wa, wb, wc = (P[0]*(B[1]-C[1]) + P[1]*(C[0]-B[0]) + B[0]*C[1] - C[0]*B[1]) / d, (P[0]*C[1] - P[1]*C[0]) / d, (P[1]*B[0] - P[0]*B[1]) / d\n\n v1, v2, v3 = S.taxels[minimum_indexes[0]].get_taxel_position(), S.taxels[minimum_indexes[1]].get_taxel_position(), S.taxels[minimum_indexes[2]].get_taxel_position()\n\n centroid_3d[0], centroid_3d[1], centroid_3d[2] = wa*v1[0] + wb*v2[0] + wc*v3[0], wa*v1[1] + wb*v2[1] + wc*v3[1], wa*v1[2] + wb*v2[2] + wc*v3[2]\n \n return centroid_3d\n","sub_path":"src/yolo_viz/src/functions/functions_taxel_data.py","file_name":"functions_taxel_data.py","file_ext":"py","file_size_in_byte":6633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"90700131","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Yuki Furuta \n\nimport rospy\nimport message_filters\nimport tf2_ros\nimport tf2_geometry_msgs\nfrom geometry_msgs.msg import TransformStamped, Quaternion\nfrom jsk_topic_tools import ConnectionBasedTransport\nfrom jsk_recognition_msgs.msg import BoundingBoxArray\nfrom jsk_recognition_msgs.msg import ClassificationResult\nfrom posedetection_msgs.msg import ObjectDetection, Object6DPose\nimport numpy as np\nimport tf.transformations as T\n\n\nclass ColorHistogramDetector(ConnectionBasedTransport):\n def __init__(self):\n super(ColorHistogramDetector, self).__init__()\n\n self.queue_size = rospy.get_param(\"~queue_size\", 100)\n\n self.publish_tf = rospy.get_param(\"~publish_tf\", True)\n if self.publish_tf:\n self.tfb = tf2_ros.TransformBroadcaster()\n\n self.align_box_pose = rospy.get_param(\"~align_box_pose\", True)\n self.fixed_frame_id = rospy.get_param(\"~fixed_frame_id\", None)\n if self.fixed_frame_id is not None:\n self.tfl = tf2_ros.BufferClient(\"/tf2_buffer_server\")\n if not self.tfl.wait_for_server(rospy.Duration(10)):\n rospy.logerr(\"Failed to wait /tf2_buffer_server\")\n self.fixed_frame_id = None\n\n self.pub_detect = self.advertise(\"~output\", ObjectDetection, queue_size=1)\n self.pub_detect_nearest = self.advertise(\"~output/nearest\", ObjectDetection, queue_size=1)\n\n def subscribe(self):\n self.subscribers = [\n message_filters.Subscriber(\"~input/boxes\", BoundingBoxArray),\n message_filters.Subscriber(\"~input/classes\", ClassificationResult),\n ]\n sync = message_filters.TimeSynchronizer(self.subscribers, self.queue_size)\n sync.registerCallback(self.on_result)\n\n def unsubscribe(self):\n for s in self.subscribers:\n s.unregister()\n\n def align_box(self, box):\n ori = box.pose.orientation\n if ori.x < ori.y:\n q = T.quaternion_multiply(\n (ori.x, ori.y, ori.z, ori.w),\n T.quaternion_from_euler(0, 0, np.pi / 2.0))\n box.pose.orientation = Quaternion(*q)\n box.dimensions.x, box.dimensions.y = box.dimensions.y, box.dimensions.x\n return box\n\n def get_nearest(self, boxes, classes):\n nearest = {}\n for box, label, proba in zip(boxes.boxes, classes.label_names, classes.label_proba):\n if not label:\n continue\n if self.fixed_frame_id is not None:\n try:\n ps = PoseStamped(header=box.header,\n pose=box.pose)\n ps = self.tfl.transform(\n ps, self.fixed_frame_id)\n dist = np.sqrt(\n ps.pose.position.x ** 2 +\n ps.pose.position.y ** 2 +\n ps.pose.position.z ** 2)\n except Exception as e:\n rospy.logerr(str(e))\n continue\n else:\n dist = np.sqrt(\n box.pose.position.x ** 2 +\n box.pose.position.y ** 2 +\n box.pose.position.z ** 2)\n if label not in nearest:\n nearest[label] = (box, proba, dist)\n elif dist < nearest[label][2]:\n nearest[label] = (box, proba, dist)\n return nearest\n\n def publish_box_tf(self, box, label):\n pos, ori = (box.pose.position,\n box.pose.orientation)\n t = TransformStamped()\n t.header = box.header\n t.child_frame_id = label\n t.transform.translation.x = pos.x\n t.transform.translation.y = pos.y\n t.transform.translation.z = pos.z\n t.transform.rotation.x = ori.x\n t.transform.rotation.y = ori.y\n t.transform.rotation.z = ori.z\n t.transform.rotation.w = ori.w\n self.tfb.sendTransform(t)\n\n def on_result(self, boxes, classes):\n msg = ObjectDetection()\n msg.header = boxes.header\n for box, label, proba in zip(boxes.boxes, classes.label_names, classes.label_proba):\n if not label:\n continue\n if self.align_box_pose:\n box = self.align_box(box)\n pose = Object6DPose()\n pose.pose = box.pose\n pose.reliability = proba\n pose.type = label\n msg.objects.append(pose)\n self.pub_detect.publish(msg)\n\n msg = ObjectDetection()\n msg.header = boxes.header\n nearest = self.get_nearest(boxes, classes)\n for label, (box, proba, dist) in nearest.items():\n if self.align_box_pose:\n box = self.align_box(box)\n pose = Object6DPose()\n pose.pose = box.pose\n pose.reliability = proba\n pose.type = label\n msg.objects.append(pose)\n if self.publish_tf:\n self.publish_box_tf(box, label)\n self.pub_detect_nearest.publish(msg)\n\nif __name__ == '__main__':\n rospy.init_node(\"color_histogram_detector\")\n detector = ColorHistogramDetector()\n rospy.spin()\n","sub_path":"detect_cans_in_fridge_201202/scripts/color_histogram_detector.py","file_name":"color_histogram_detector.py","file_ext":"py","file_size_in_byte":5231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"404261930","text":"import csv\nimport json\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom accounts.models import *\nfrom helpers.currency import arrange_decimal\nfrom .forms import *\nfrom .models import *\nfrom config import common\n\n\ndef index(request):\n news_list = News.objects.all().order_by('-id')[:12]\n show_coin_scroll = True\n return render(request, 'bitcoin/index.html', {\n 'coin_info': common.COIN_INFO,\n 'coin_info_json': dict(common.COIN_INFO),\n 'exchange_info': common.EXCHANGE_INFO,\n 'exchange_info_json': dict(common.EXCHANGE_INFO),\n 'currency_info': common.CURRENCY_INFO,\n 'exchange_list': common.EXCHANGE_LIST,\n 'refresh_time': common.REFRESH_TIME,\n 'show_coin_scroll': show_coin_scroll,\n 'news_list': news_list,\n })\n\n\ndef ping(request):\n return HttpResponse('Ping!', content_type='text/plain')\n\n\ndef maintenance(request):\n return render(request, 'bitcoin/maintenance.html', {\n })\n\n\ndef altcoin(request):\n return redirect('bitcoin:index')\n\n\ndef exchange(request):\n news_list = News.objects.all().order_by('-id')[:12]\n show_coin_scroll = False\n return render(request, 'bitcoin/exchange.html', {\n 'coin_info': common.COIN_INFO,\n 'coin_info_json': dict(common.COIN_INFO),\n 'exchange_info': common.EXCHANGE_INFO,\n 'exchange_info_json': dict(common.EXCHANGE_INFO),\n 'currency_info': common.CURRENCY_INFO,\n 'exchange_list': common.EXCHANGE_LIST,\n 'refresh_time': common.REFRESH_TIME,\n 'show_coin_scroll': show_coin_scroll,\n 'news_list': news_list,\n })\n\n\ndef marketcap(request):\n news_list = News.objects.all().order_by('-id')[:12]\n return render(request, 'bitcoin/marketcap.html', {\n 'refresh_time': common.REFRESH_TIME,\n 'news_list': news_list,\n 'table_count': range(200),\n })\n\n\ndef news(request):\n news_all = News.objects.all().order_by('-id')\n paginator = Paginator(news_all, 18)\n page = request.GET.get('page')\n try:\n news_list = paginator.page(page)\n except PageNotAnInteger:\n news_list = paginator.page(1)\n except EmptyPage:\n news_list = paginator.page(paginator.num_pages)\n\n return render(request, 'bitcoin/news.html', {\n 'news_list': news_list,\n })\n\n\n# def chart(request):\n# chart_exchange = common.EXCHANGE_CODE\n# chart_exchange_list = {}\n# for k in chart_exchange:\n# chart_exchange_list[common.EXCHANGE_CODE.index(k)+1] = k\n# cform = ChartCurrencyForm\n# pform = ChartPeriodForm\n# eform = ChartExchangesForm\n# return render(request, 'bitcoin/chart.html', {\n# 'cform': cform,\n# 'pform': pform,\n# 'eform': eform,\n# 'chart_exchange_list': chart_exchange_list,\n# })\n#\n#\n# def chart_premium(request):\n# chart_exchange = common.EXCHANGE_CODE\n# chart_exchange_list = {}\n# for k in chart_exchange:\n# chart_exchange_list[common.EXCHANGE_CODE.index(k)+1] = k\n# pform = ChartPeriodForm\n# ceform = CriterionExchangesForm\n# teform = TargetExchangesForm\n# return render(request, 'bitcoin/chart_premium.html', {\n# 'pform': pform,\n# 'ceform': ceform,\n# 'teform': teform,\n# 'chart_exchange_list': chart_exchange_list,\n# })\n#\n#\n# def chart_askbid(request):\n# chart_exchange = common.EXCHANGE_CODE\n# chart_exchange_list = {}\n# for k in chart_exchange:\n# chart_exchange_list[common.EXCHANGE_CODE.index(k)+1] = k\n# cform = ChartCurrencyForm\n# pform = ChartPeriodForm\n# aform = AskExchangesForm\n# bform = BidExchangesForm\n# return render(request, 'bitcoin/chart_askbid.html', {\n# 'cform': cform,\n# 'pform': pform,\n# 'aform': aform,\n# 'bform': bform,\n# 'chart_exchange_list': chart_exchange_list,\n# })\n\n\ndef donation(request):\n return render(request, 'bitcoin/donation.html', {\n })\n\n\ndef exchanges(request):\n return render(request, 'bitcoin/exchanges.html', {\n })\n\n\ndef faq(request):\n return render(request, 'bitcoin/faq.html', {\n })\n\n\ndef log_askbid(request, ask_exchange, bid_exchange, currency, period):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"{0}_{1}_{2}_{3}.csv\"'.format(ask_exchange, bid_exchange, currency, period)\n writer = csv.writer(response)\n\n title = ['Datetime (UTC)', common.EXCHANGE_INFO[ask_exchange]['exchange_name'] + ' (Best Ask)', common.EXCHANGE_INFO[bid_exchange]['exchange_name'] + ' (Best Bid)']\n writer.writerow(title)\n\n if period == '1w':\n log_result = LogOneHour.objects.all().order_by('-id')[:24*7]\n elif period == '1m':\n log_result = LogOneHour.objects.filter(Q(datetime__hour=0) | Q(datetime__hour=6) | Q(datetime__hour=12) | Q(datetime__hour=18)).order_by('-id')[:4*30]\n elif period == 'all':\n log_result = LogOneDay.objects.all().order_by('-id')\n\n for r in log_result:\n row = [r.datetime.strftime('%Y-%m-%d %H:%M')]\n try:\n ask_currency = common.EXCHANGE_INFO[ask_exchange]['currency']\n rate = json.loads(r.rates)[currency + ask_currency][0]\n json_loads = json.loads(r.best_ask)\n best_ask = float(json_loads[ask_exchange]) / float(rate)\n best_ask = arrange_decimal(best_ask, currency)\n row.append(best_ask)\n except:\n row.append(None)\n try:\n bid_currency = common.EXCHANGE_INFO[bid_exchange]['currency']\n rate = json.loads(r.rates)[currency + bid_currency][0]\n json_loads = json.loads(r.best_bid)\n best_bid = float(json_loads[bid_exchange]) / float(rate)\n best_bid = arrange_decimal(best_bid, currency)\n row.append(best_bid)\n except:\n row.append(None)\n writer.writerow(row)\n\n return response\n\n\ndef log_premium(request, criterion_exchange, target_exchange, period):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"{0}_{1}_{2}.csv\"'.format(criterion_exchange, target_exchange, period)\n writer = csv.writer(response)\n\n title = ['Datetime (UTC)', 'Premium Percentage (' + common.EXCHANGE_INFO[criterion_exchange]['exchange_name'] + '/' + common.EXCHANGE_INFO[target_exchange]['exchange_name'] + ')']\n writer.writerow(title)\n\n if period == '1w':\n log_result = LogOneHour.objects.all().order_by('-id')[:24*7]\n elif period == '1m':\n log_result = LogOneHour.objects.filter(Q(datetime__hour=0) | Q(datetime__hour=6) | Q(datetime__hour=12) | Q(datetime__hour=18)).order_by('-id')[:4*30]\n elif period == 'all':\n log_result = LogOneDay.objects.all().order_by('-id')\n\n for r in log_result:\n row = [r.datetime.strftime('%Y-%m-%d %H:%M')]\n try:\n json_loads = json.loads(r.converted_prices)\n criterion_exchange_converted_price = float(json_loads[criterion_exchange])\n target_exchange_converted_price = float(json_loads[target_exchange])\n premium_percentage = ( target_exchange_converted_price - criterion_exchange_converted_price ) / criterion_exchange_converted_price * 100\n premium_percentage = '%.2f' % premium_percentage\n row.append(premium_percentage)\n except:\n row.append(None)\n writer.writerow(row)\n\n return response\n\n\ndef log_default(request, currency, period):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"{0}_{1}.csv\"'.format(currency, period)\n writer = csv.writer(response)\n\n title = ['Datetime (UTC)']\n for k, v in common.EXCHANGE_INFO.items():\n title.append(v['exchange_name'])\n writer.writerow(title)\n\n if period == '1w':\n log_result = LogOneHour.objects.all().order_by('-id')[:24*7]\n elif period == '1m':\n log_result = LogOneHour.objects.filter(Q(datetime__hour=0) | Q(datetime__hour=6) | Q(datetime__hour=12) | Q(datetime__hour=18)).order_by('-id')[:4*30]\n elif period == 'all':\n log_result = LogOneDay.objects.all().order_by('-id')\n\n for r in log_result:\n row = [r.datetime.strftime('%Y-%m-%d %H:%M')]\n rate = json.loads(r.rates)['USD'+currency][0]\n for k in common.EXCHANGE_INFO:\n try:\n converted_prices = json.loads(r.converted_prices)\n converted_price = float(converted_prices[k]) * float(rate)\n converted_price = arrange_decimal(converted_price, currency)\n row.append(converted_price)\n except:\n row.append(None)\n writer.writerow(row)\n\n return response\n","sub_path":"bitcoin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"436154794","text":"#!/bin/python3\nimport pygame\nimport pygame.camera\nimport argparse\nimport sys\npygame.init()\npygame.camera.init()\n\nparser = argparse.ArgumentParser()\nparser.add_argument('file', help='The file to be retroized. Use \"webcam\" for a live stream.')\nparser.add_argument('-x', type=int, default=0, help='Starting x for rendering.')\nparser.add_argument('-y', type=int, default=0, help='Starting y for rendering.')\nparser.add_argument('-w', '--width', type=int, default=-1, help='Screen width. Use -1 for autmatic sizing.')\nparser.add_argument('--height', type=int, default=-1, help='Screen height. Use -1 for autmatic sizing.')\nparser.add_argument('-s', '--size', type=int, default=10, help='Set the size of the pixels.')\nparser.add_argument('-v', '--verbose', action='store_true', help='Toggle verbose output.')\nparser.add_argument('--flip', action='store_true', help='Toggle horizontal flipping.')\nparser.add_argument('-o', '--output', default='', help='Output file.')\nargs = parser.parse_args()\n\nwebcam = pygame.camera.Camera(pygame.camera.list_cameras()[0])\nwebcam.start()\n\nif args.file == 'webcam':\n if args.width == -1:\n width = webcam.get_image().get_size()[0]\n else:\n width = args.width\n if args.height == -1:\n height = webcam.get_image().get_size()[1]\n else:\n height = args.height\nelse:\n image = pygame.image.load(args.file)\n if args.width == -1:\n width = image.get_size()[0]\n else:\n width = args.width\n if args.height == -1:\n height = image.get_size()[1]\n else:\n height = args.height\npixel_width, pixel_height = args.size, args.size\nscreen = pygame.display.set_mode((width, height), 0, 32)\n\nwhile True:\n screen.fill((0, 0, 0))\n if args.file == 'webcam':\n image = webcam.get_image()\n if args.flip:\n image = pygame.transform.flip(image, (True, False))\n pixels = pygame.PixelArray(image)\n for y in range(args.y, height, pixel_height):\n for x in range(args.x, width, pixel_width):\n pixel = pixels[x, y]\n pygame.draw.rect(screen, pixel, (x, y, pixel_width, pixel_height))\n if args.output != '':\n try:\n pygame.image.save(screen, args.output)\n except:\n f = open(args.output, 'wb')\n f.close()\n pygame.image.save(screen, args.output)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit(0)\n pygame.display.flip()\n","sub_path":"retroizer.py","file_name":"retroizer.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"44273156","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n### import libraries\nimport numpy as np\nimport pandas as pd\n\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nimport seaborn as sns\n\n\n# In[2]:\n\n\nservice311 = pd.read_csv (r'C:\\Users\\Milind\\Desktop\\Business Analyst\\LVC\\Python\\Python for Data Science\\311_Service_Requests_from_2010_to_Present.csv')\n\n\n# In[3]:\n\n\nservice311.head()\n\n\n# In[4]:\n\n\nservice311.shape\n\n\n# In[5]:\n\n\nservice311.columns\n\n\n# In[7]:\n\n\nservice311['Complaint Type'].unique()\n\n\n# In[8]:\n\n\nservice311['Descriptor'].unique()\n\n\n# In[9]:\n\n\n\ncomplaintTypecity = pd.DataFrame({'count':\n service311.groupby(['Complaint Type','City']).size()}).reset_index()\ncomplaintTypecity\n\n\n# In[10]:\n\n\nservice311.groupby(['Borough','Complaint Type','Descriptor']).size()\n\n\n# In[11]:\n\n\nimport datetime\n\n\n# In[13]:\n\n\ndf = pd.read_csv(r'C:\\Users\\Milind\\Desktop\\Business Analyst\\LVC\\Python\\Python for Data Science\\311_Service_Requests_from_2010_to_Present.csv', parse_dates=[\"Created Date\", \"Closed Date\"])\n\n\n# In[14]:\n\n\ndf[\"Request_Closing_Time\"] = df[\"Closed Date\"] - df[\"Created Date\"]\n\n\n# In[15]:\n\n\n#Have a look at the status of tickets\ndf['Status'].value_counts().plot(kind='bar',alpha=0.6,figsize=(7,7))\nplt.show()\n\n\n# In[16]:\n\n\n#Complaint type Breakdown with bar plot to figure out majority of complaint types and top 10 complaints\nservice311['Complaint Type'].value_counts().head(10).plot(kind='barh',figsize=(5,5));\n\n\n# In[17]:\n\n\nservice311.groupby([\"Borough\",\"Complaint Type\",\"Descriptor\"]).size()\n\n\n# In[18]:\n\n\nmajorcomplints=service311.dropna(subset=[\"Complaint Type\"])\nmajorcomplints=service311.groupby(\"Complaint Type\")\n\nsortedComplaintType = majorcomplints.size().sort_values(ascending = False)\nsortedComplaintType = sortedComplaintType.to_frame('count').reset_index()\n\nsortedComplaintType\nsortedComplaintType.head(10)\n\n\n# In[19]:\n\n\nsortedComplaintType = sortedComplaintType.head()\nplt.figure(figsize=(5,5))\nplt.pie(sortedComplaintType['count'],labels=sortedComplaintType[\"Complaint Type\"], autopct=\"%1.1f%%\")\nplt.show()\n\n\n# In[20]:\n\n\n#Group dataset by complaint type to display plot against city\ngroupedby_complainttype = df.groupby('Complaint Type')\n\n\n# In[21]:\n\n\ngrp_data = groupedby_complainttype.get_group('Blocked Driveway')\ngrp_data.shape\n\n\n# In[22]:\n\n\n#To get nan values in the entire dataset\ndf.isnull().sum()\n\n\n# In[23]:\n\n\n#fix blank values in City column\ndf['City'].dropna(inplace=True)\n\n\n# In[24]:\n\n\n#Shape after dropping nan values\ndf['City'].shape\n\n\n# In[25]:\n\n\n#count of null values in grouped city column data\ngrp_data['City'].isnull().sum()\n\n\n# In[26]:\n\n\n#fix those NAN with \"unknown city\" value instead\ngrp_data['City'].fillna('Unknown City', inplace =True)\n\n\n# In[27]:\n\n\n#Scatter plot displaying all the cities that raised complaint of type 'Blocked Driveway'\nplt.figure(figsize=(20, 15))\nplt.scatter(grp_data['Complaint Type'],grp_data['City'])\nplt.title('Plot showing list of cities that raised complaint of type Blocked Driveway')\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Project NYC.py","file_name":"Project NYC.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"488835778","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport time\n\nprint('Vjezba 29-02-2020 -> 01-03-2020')\ntime.sleep(3)\nos.system('clear')\n\n#AV1DSIO-0\n#----------------------------------\nime = str(input('Unesi ime: '))\nprezime = str(input('Unesi prezime: '))\n\nprint('Zdravo,', ime, prezime, sep=' ')\n#----------------------------------\ntime.sleep(3)\nos.system('clear')\n\n#AV1DSIO-1\n#----------------------------------\nuCV1 = int(input('Unesi prvu vrijednost: '))\nuCV2 = int(input('Unesi drugu vrijednost: '))\n\n# unosCjelobrojneVrijednosti1 = uCV1\n# unosCjelobrojneVrijednosti2 = uCV2\n# P = povrsina\n# O = obim\n\nP = uCV1*uCV2\nO = 2*(uCV1+uCV2)\n\nprint('Povrsina pravougaonika je:', P)\nprint('Obim pravougaonika je:', O)\n#----------------------------------\ntime.sleep(3)\nos.system('clear')\n\n#AV1DSIO-2\n#----------------------------------\nimport math as m\n\nr = float(input('Unesi poluprecnik kruga: '))\n\n#Povrsina = P\nP = m.pow(r, 2)*m.pi\n#Obim = O\nO = 2*r*m.pi\n\n#fun fact: pi=3.1416...\n\n#ovdje formatiraj P rez\n#print \"{:10.4f}\".format(x)\nprint('Povrsina kruga je: {:6.2f}'.format(P))\n#ovdje formatiraj O rez\nprint('Obim kruga je: {:6.2f}'.format(O))\n#----------------------------------\ntime.sleep(3)\nos.system('clear')\n\n#AV1DSIO-3\n#----------------------------------\nimport math as m\n\na = int(input('Unesi stranicu a trokuta: '))\nb = int(input('Unesi stranicu b trokuta: '))\nc = int(input('Unesi stranicu c trokuta: '))\n\nS = (a+b+c)/2\n\nR = m.sqrt(S*(S-a)*(S-b)*(S-c))\n\nprint('Povrsina trokuta je: {:6.2f}'.format(R))\n#----------------------------------\ntime.sleep(3)\nos.system('clear')\n\n#AV1DSIO-4\n#----------------------------------\na = int(input('Prvi unos: '))\nb = int(input('Prvi unos: '))\n\n#print prije swapa za test\nprint(a)\nprint(b)\n\na,b=b,a\n\n#print poslije swapa za test\nprint(a)\nprint(b)\n#----------------------------------\ntime.sleep(3)\nos.system('clear')\n","sub_path":"sumaSumarum.py","file_name":"sumaSumarum.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"43739442","text":"# -*- coding: utf-8 -*-\n\n\nfrom django.conf.urls import (\n include,\n url)\n\nfrom availableworks.art_item import views\n\nurlpatterns = [\n url(r'^(?P[0-9]+)/$',\n views.art_item,\n name = 'art-item')\n]","sub_path":"availableworks/art_item/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"115477304","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport requests\nfrom xml.dom import minidom\nfrom urllib.parse import urlparse\nimport copy\nimport urllib\nimport time\nimport re\nimport os\nimport random\nimport json\nfrom sqlconf.config import BOUNDED_INJECTION_MARKER\nfrom sqlconf.config import DEFAULT_RADIO\nfrom sqlihelper import getPagesRatio, getFlagString\nfrom sqlconf.config import SKIP_PARAMETERS\nfrom sqlconf.config import RANDOM_INT_STR1, RANDOM_INT_STR2\nfrom utils.function import trimResponseTag\n\nsqliboolfile = os.path.join(\"sqlconf\", 'sqlibool.xml')\nsqlierrorfile = os.path.join(\"sqlconf\", 'sqlierror.xml')\nsqlitimefile = os.path.join(\"sqlconf\", 'sqlitime.xml')\n\n'''\npost json格式 upload格式未做判断\n\n'''\n\nclass SqliCheck(object):\n def __init__(self, url, method=\"\", postdata={}, headers=[], skip=[], scanlevel=1, callbacks=''):\n self.url = url\n self.postdata = postdata # 表单的数据\n self.method = method\n self.skip = skip # 忽略的参数\n self.headers = headers\n self.isSQLI = False\n self.scanlevel = scanlevel\n self.post_pattern = 'normal' # json upload post格式\n self.sqliPayload = '' # 注入使用的payload\n self.sqliParam = '' # 存在注入参数\n self.randomint1 = str(random.randint(10, 30))\n self.randomint2 = str(random.randint(40, 60))\n \n def get_url_params(self): # get 参数解析器\n self.get_url,self.params = self.url.split('?')\n self.params = self.params.split('&')\n \n\n def post_params(self): # post 参数解析器\n try:\n self.params = json.loads(self.postdata)\n self.post_pattern = 'json'\n except ValueError:\n self.params = self.postdata.split('&')\n\n def sqlibool(self):\n root = minidom.parse(sqliboolfile).documentElement\n # 扫描等级判断\n for node in root.getElementsByTagName('couple'):\n if self.scanlevel >= int(node.getAttribute('id')):\n for compare in node.getElementsByTagName(\"compare\"):\n compare1 = compare.getElementsByTagName(\n \"compare1\")[0].childNodes[0].nodeValue\n compare2 = compare.getElementsByTagName(\n \"compare2\")[0].childNodes[0].nodeValue\n if self.method == \"POST\":\n for parm in self.params:\n if self.post_pattern == 'normal' or self.post_pattern == 'json':\n sql_data1 = self.postdata.replace(parm,parm+compare1).replace(\n RANDOM_INT_STR2, self.randomint2).replace(RANDOM_INT_STR1, self.randomint1)\n sql_data2 = self.postdata.replace(parm,parm+compare2).replace(\n RANDOM_INT_STR2, self.randomint2).replace(RANDOM_INT_STR1, self.randomint1)\n html1 = requests.post(url=self.url,data=sql_data1,headers=self.headers).text\n html2 = requests.post(url=self.url,data=sql_data2,headers=self.headers).text\n flagString = getFlagString(html1, html2)\n if flagString:\n sql_data3 = self.postdata.replace(parm,parm+compare1).replace(RANDOM_INT_STR2, str(\n random.randint(81, 99))).replace(RANDOM_INT_STR1, str(random.randint(61, 80)))\n sql_data4 = self.postdata.replace(parm,parm+compare2).replace(RANDOM_INT_STR2, str(\n random.randint(81, 99))).replace(RANDOM_INT_STR1, str(random.randint(61, 80)))\n html3 = requests.post(url=self.url,data=sql_data3,headers=self.headers).text\n html4 = requests.post(url=self.url,data=sql_data4,headers=self.headers).text\n if (flagString in html3 and flagString not in html4) or (flagString not in html3 and flagString in html4):\n self.isSQLI = True\n self.sqliPayload = sql_data3\n self.sqliParam = parm\n return self.isSQLI\n if getPagesRatio(html1, html2) < DEFAULT_RADIO:\n self.isSQLI = True\n self.sqliPayload = \"%s\" % (sql_test1)\n self.sqliParam = parm\n return self.isSQLI\n\n if self.method == \"GET\":\n for parm in self.params:\n sql_test1 = self.url.replace(parm,parm+compare1).replace(\n RANDOM_INT_STR2, self.randomint2).replace(RANDOM_INT_STR1, self.randomint1)\n sql_test2 = self.url.replace(parm,parm+compare2).replace(\n RANDOM_INT_STR2, self.randomint2).replace(RANDOM_INT_STR1, self.randomint1)\n \n html1 = requests.get(\n sql_test1, headers=self.headers).text\n html2 = requests.get(\n sql_test2, headers=self.headers).text\n \n flagString = getFlagString(html1, html2)\n if flagString:\n # print('--string type')\n sql_test3 = self.url.replace(parm,parm+compare1).replace(RANDOM_INT_STR2, str(\n random.randint(81, 99))).replace(RANDOM_INT_STR1, str(random.randint(61, 80)))\n sql_test4 = self.url.replace(parm,parm+compare2).replace(RANDOM_INT_STR2, str(\n random.randint(81, 99))).replace(RANDOM_INT_STR1, str(random.randint(61, 80)))\n html3 = trimResponseTag(requests.get(\n sql_test3, headers=self.headers).text)\n html4 = trimResponseTag(requests.get(\n sql_test4, headers=self.headers).text)\n if (flagString in html3 and flagString not in html4) or (flagString not in html3 and flagString in html4):\n # print('--string SQLI type')\n self.isSQLI = True\n self.sqliPayload = sql_test3\n self.sqliParam = parm\n return self.isSQLI\n if getPagesRatio(html1, html2) < DEFAULT_RADIO:\n self.isSQLI = True\n self.sqliPayload = \"%s\" % (sql_test1)\n self.sqliParam = parm\n return self.isSQLI\n \n else:\n return self.isSQLI\n\n def sqlitime(self):\n root = minidom.parse(sqlitimefile).documentElement\n for node in root.getElementsByTagName('couple'):\n if self.scanlevel >= int(node.getAttribute('id')):\n payloads = node.getElementsByTagName(\n 'requests')[0].childNodes[0].nodeValue.strip()\n for payitem in payloads.splitlines():\n if self.method == \"POST\":\n for parm in self.params:\n if self.post_pattern == 'normal' or self.post_pattern == 'json':\n sql_data = self.postdata.replace(parm,parm+payitem.strip())\n time_start = time.time()\n html = requests.post(url=self.url,data=sql_data, headers=self.headers).text\n time_end = time.time()\n cost_time = time_end - time_start\n if cost_time >= 5:\n self.isSQLI = True\n self.sqliPayload = \"%s\" % (payitem)\n self.sqliParam = parm\n return self.isSQLI\n if self.method == \"GET\":\n for parm in self.params:\n sql_test = self.url.replace(parm,parm+payitem.strip())\n time_start = time.time()\n html = requests.get(\n sql_test, headers=self.headers).text\n time_end = time.time()\n cost_time = time_end - time_start\n if cost_time >= 5:\n self.isSQLI = True\n self.sqliPayload = \"%s\" % (payitem)\n self.sqliParam = parm\n return self.isSQLI\n else:\n return self.isSQLI\n\n\n def sqlierror(self):\n root = minidom.parse(sqlierrorfile).documentElement\n for node in root.getElementsByTagName('couple'):\n if self.scanlevel >= int(node.getAttribute('id')):\n payloads = node.getElementsByTagName(\n 'requests')[0].childNodes[0].nodeValue.strip()\n for payitem in payloads.splitlines():\n if self.method == \"POST\":\n for parm in self.params:\n if self.post_pattern == 'normal' or self.post_pattern == 'json':\n sql_data = self.postdata.replace(parm,parm+payitem.strip())\n html = requests.post(url=self.url,data=sql_data, headers=self.headers).text\n # print(html)\n for response_rule in node.getElementsByTagName('responses')[0].childNodes[0].nodeValue.strip().splitlines():\n if re.search(response_rule.strip(), html):\n self.isSQLI = True\n self.sqliPayload = \"%s\" % (payitem)\n self.sqliParam = parm\n return self.isSQLI\n if self.method == \"GET\":\n for parm in self.params:\n sql_test = self.url.replace(parm,parm+payitem.strip())\n html = requests.get(\n sql_test, headers=self.headers).text\n for response_rule in node.getElementsByTagName('responses')[0].childNodes[0].nodeValue.strip().splitlines():\n if re.search(response_rule.strip(), html):\n self.isSQLI = True\n self.sqliPayload = \"%s\" % (payitem)\n self.sqliParam = parm\n return self.isSQLI\n else:\n return self.isSQLI\n \n\n\n def run(self):\n if self.method == 'GET':\n self.get_url_params()\n elif self.method == 'POST':\n self.post_params()\n issqli = self.sqlierror() or self.sqlibool() or self.sqlitime()#self.sqlibool()#self.sqlierror() or self.sqlitime()#or self.sqlibool() or self.sqlitime()\n # print(issqli)\n return self\n\n\ndef main():\n url1 = \"http://10.246.190.63/DVWA-master/vulnerabilities/sqli/session-input.php\"\n postdata = \"id=1&Submit=Submit\"\n\n headers = {\n \"cookie\": \"security=medium; PHPSESSID=moreos061jjhfrnirbdblbat83\",\n 'Content-Type':'application/x-www-form-urlencoded'\n }\n\n sqli = SqliCheck(url1, method=\"POST\", postdata=postdata, headers=headers, skip=[\n 'allecIDs', 'beginDate', '_t', 'billBeginDate', 'billEndDate', 'note', 'level', 'goodsIDs', 'pageNo', 'pageSize'], scanlevel=3).run()\n if sqli.isSQLI:\n print('[+] URL : ', sqli.url)\n print('[+] isSQLI : ', sqli.isSQLI)\n print('[+] Scan level : ', sqli.scanlevel)\n print('[+] Sqli param : ', sqli.sqliParam)\n print('[+] Sqli payload : ', sqli.sqliPayload)\n \n\n\nif __name__ == '__main__':\n main()\n","sub_path":"sqli/sqlicheck.py","file_name":"sqlicheck.py","file_ext":"py","file_size_in_byte":12500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"580317438","text":"import time\nu = input(\"Enter word: \").upper()\nstart_time = time.perf_counter()\nc = list()\nfor i in range(len(u)):\n c.append(\"_\")\nfor i in range(65,91):\n for x in range(len(u)):\n if chr(i) == u[x]:\n c[x] = chr(i)\n print(c)\nprint(\"--- %s seconds ---\" % (time.perf_counter() - start_time))","sub_path":"Python/hangman/Fast Hangman.py","file_name":"Fast Hangman.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"56410167","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__all__ = [\n 'plot_aggregated_klds',\n 'plot_grid_search_results',\n 'plot_kld_violins',\n 'plot_nneg_dims_over_time',\n 'plot_multiple_performances',\n 'plot_pruning_results',\n 'plot_single_performance',\n 'plot_val_accs_across_seeds',\n 'plot_complexities_and_loglikelihoods',\n 'plot_dim_correlations',\n 'plot_dim_evolution',\n 'plot_kld_violins',\n ]\n\nimport json\nimport os\nimport re\nimport torch\n\nfrom os.path import join as pjoin\nfrom typing import List, Tuple, Dict\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndef plot_nneg_dims_over_time(plots_dir:str, nneg_d_over_time:list) -> None:\n \"\"\"plot number of non-negative dimensions as a function of time (i.e., epochs)\"\"\"\n fig = plt.figure(figsize=(10, 6), dpi=100)\n ax = plt.subplot(111)\n\n #hide the right and top spines\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n #only show ticks on the left (y-axis) and bottom (x-axis) spines\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n epochs, nneg_dims = zip(*nneg_d_over_time)\n ax.plot(epochs, nneg_dims, '-o')\n ax.set_xticks(epochs)\n ax.set_xticklabels(epochs)\n ax.set_xlabel('Epochs')\n ax.set_ylabel('Number of non-negative dimensions')\n\n PATH = pjoin(plots_dir, 'nneg_dimensions')\n if not os.path.exists(PATH):\n os.makedirs(PATH)\n\n plt.savefig(pjoin(PATH, 'nneg_dimensions_over_time.png'))\n plt.close()\n\ndef plot_single_performance(\n plots_dir:str,\n val_accs:list,\n train_accs:list,\n ) -> None:\n fig = plt.figure(figsize=(10, 6), dpi=100)\n ax = plt.subplot(111)\n\n #hide the right and top spines\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n #only show ticks on the left (y-axis) and bottom (x-axis) spines\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n ax.plot(val_accs,'-+', alpha=.5, label='Test')\n ax.plot(train_accs, '-+', alpha=.5, label='Train')\n ax.annotate('Val acc: {:.3f}'.format(np.max(val_accs)), (len(val_accs) - len(val_accs) * 0.1, np.max(val_accs) / 2))\n ax.set_xlim([0, len(val_accs)])\n ax.set_xlabel(r'Epochs')\n ax.set_ylabel(r'Accuracy')\n ax.legend(fancybox=True, shadow=True, loc='lower left')\n\n PATH = pjoin(plots_dir, 'grid_search')\n if not os.path.exists(PATH):\n os.makedirs(PATH)\n\n plt.savefig(pjoin(PATH, 'single_model_performance_over_time.png'))\n plt.close()\n\ndef plot_multiple_performances(\n plots_dir:str,\n val_accs:list,\n train_accs:list,\n lambdas:np.ndarray,\n) -> None:\n n_rows = len(lambdas) // 2\n n_cols = n_rows\n fig, axes = plt.subplots(n_rows, n_cols, figsize=(20, 20), dpi=100)\n max_conv = max(list(map(lambda accs: len(accs), val_accs)))\n\n #keep track of k\n k = 0\n for i in range(n_rows):\n for j in range(n_cols):\n #hide the right and top spines\n axes[i, j].spines['right'].set_visible(False)\n axes[i, j].spines['top'].set_visible(False)\n\n #only show ticks on the left (y-axis) and bottom (x-axis) spines\n axes[i, j].yaxis.set_ticks_position('left')\n axes[i, j].xaxis.set_ticks_position('bottom')\n\n axes[i, j].plot(val_accs[k],'-+', alpha=.5, label='Test')\n axes[i, j].plot(train_accs[k], '-+', alpha=.5, label='Train')\n axes[i, j].annotate('Val acc: {:.3f}'.format(np.max(val_accs)), (max_conv - max_conv * 0.1, np.max(val_accs) / 2))\n axes[i, j].set_xlim([0, max_conv])\n axes[i, j].set_xlabel(r'Epochs')\n axes[i, j].set_ylabel(r'Accuracy')\n axes[i, j].set_title(f'Lambda-L1: {lambdas[k]}')\n axes[i, j].legend(fancybox=True, shadow=True, loc='lower left')\n k += 1\n\n for ax in axes.flat:\n ax.label_outer()\n\n PATH = pjoin(plots_dir, 'grid_search')\n if not os.path.exists(PATH):\n os.makedirs(PATH)\n\n plt.savefig(pjoin(PATH, 'model_performances_over_time.png'))\n plt.close()\n\ndef plot_val_accs_across_seeds(plots_dir:str, lmbdas:np.ndarray, val_accs:np.ndarray) -> None:\n fig = plt.figure(figsize=(14, 8), dpi=100)\n ax = plt.subplot(111)\n\n #hide the right and top spines\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n #only show ticks on the left (y-axis) and bottom (x-axis) spines\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n ax.plot(lmbdas, val_accs*100)\n ax.set_xticks(lmbdas)\n ax.set_xlabel(f'$\\lambda')\n ax.set_ylabel(r'Val acc (%)')\n\n plt.savefig(pjoin(plots_dir, 'lambda_search_results.png'))\n plt.close()\n\ndef plot_grid_search_results(\n results:dict,\n plot_dir:str,\n rnd_seed:int,\n modality:str,\n version:str,\n subfolder:str,\n vision_model=None,\n layer=None,\n) -> None:\n fig = plt.figure(figsize=(16, 8), dpi=100)\n ax = plt.subplot(111)\n\n #hide the right and top spines\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n #only show ticks on the left (y-axis) and bottom (x-axis) spines\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n lambdas = list(map(lambda l: round(float(l), 4), results.keys()))\n train_accs, val_accs = zip(*[(val['train_acc'], val['val_acc']) for lam, val in results.items()])\n\n ax.plot(train_accs, alpha=.8, label='Train')\n ax.plot(val_accs, alpha=.8, label='Val')\n ax.set_xticks(range(len(results)))\n ax.set_xticklabels(lambdas)\n ax.set_ylabel('Accuracy')\n ax.set_xlabel(r'$\\lambda$')\n ax.legend(fancybox=True, shadow=True, loc='upper right')\n plt.tight_layout()\n\n if modality == 'visual':\n assert isinstance(vision_model, str) and isinstance(layer, str), 'name of vision model and corresponding layer are required'\n PATH = pjoin(plot_dir, f'seed{rnd_seed}', modality, vision_model, layer, version, subfolder)\n else:\n PATH = pjoin(plot_dir, f'seed{rnd_seed}', modality, version, subfolder)\n if not os.path.exists(PATH):\n os.makedirs(PATH)\n\n plt.savefig(pjoin(PATH, 'lambda_search_results.png'))\n plt.close()\n\ndef plot_dim_correlations(\n W_mu_vspose:torch.Tensor,\n W_mu_dspose:torch.Tensor,\n plots_dir:str,\n epoch:int,\n ) -> None:\n \"\"\"Pearson correlations between top k VSPoSE and dSPoSE dimensions\"\"\"\n fig = plt.figure(figsize=(16, 8), dpi=200)\n ax = plt.subplot(111)\n\n #hide the right and top spines\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n #only show ticks on the left (y-axis) and bottom (x-axis) spines\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n top_k = 50\n rhos = np.array([pearsonr(dspose_d, vspose_d)[0] for dspose_d, vspose_d in zip(W_mu_dspose[:, :top_k].T, W_mu_vspose[:, :top_k].T)])\n ax.bar(np.arange(len(rhos)), rhos, alpha=.5)\n ax.set_ylabel(r'$\\rho$', fontsize=13)\n ax.set_xlabel('Dimension', fontsize=13)\n ax.set_title(f'Epoch: {epoch}', fontsize=13)\n\n PATH = pjoin(plots_dir, 'dim_correlations')\n if not os.path.exists(PATH):\n os.makedirs(PATH)\n\n plt.savefig(pjoin(PATH, f'dim_correlations_{epoch:03d}.png'))\n plt.close()\n\ndef plot_dim_evolution(\n W_mu_sorted:torch.Tensor,\n W_l_sorted:torch.Tensor,\n plots_dir:str,\n epoch:int,\n ) -> None:\n \"\"\"barplot of |W_mu|_1 and mean W_l values\"\"\"\n fig = plt.figure(figsize=(16, 8), dpi=200)\n ax = plt.subplot(111)\n\n #hide the right and top spines\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n #only show ticks on the left (y-axis) and bottom (x-axis) spines\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n #bring modes (mu) and scales (lambdas) onto the same scale (0, 1]\n W_mu_l1_norms = W_mu_sorted.norm(p=1, dim=0)\n W_mu_l1_norms /= W_mu_l1_norms.max().item()\n\n W_l_means = W_l_sorted.mean(dim=0)\n W_l_means /= W_l_means.max().item()\n\n ax.bar(np.arange(W_mu_sorted.shape[1]), W_mu_l1_norms, alpha=.5, label=r'$||W_{\\mu}||_{1}$')\n ax.bar(np.arange(W_l_sorted.shape[1]) + .25, W_l_means, alpha=.5, label=r'$\\overline{W}_{\\lambda}$')\n ax.set_xlabel('Dimension', fontsize=13)\n ax.set_title(f'Epoch: {epoch}', fontsize=13)\n ax.legend(fancybox=True, shadow=True, loc='upper right')\n\n PATH = pjoin(plots_dir, 'dim_evolutions')\n if not os.path.exists(PATH):\n os.makedirs(PATH)\n\n plt.savefig(pjoin(PATH, f'dim_evolution_{epoch:03d}.png'))\n plt.close()\n\ndef plot_complexities_and_loglikelihoods(\n plots_dir:str,\n loglikelihoods:list,\n complexity_losses:list,\n ) -> None:\n losses = [loglikelihoods, complexity_losses]\n labels = [r'$L^{E}$', r'$L^{C}$']\n ylabels = [r'Cross-entropy loss', r'Complexity cost']\n n_cols = len(losses)\n fig, axes = plt.subplots(1, n_cols, figsize=(16, 10), dpi=100)\n\n for i, ax in enumerate(axes):\n #hide the right and top spines\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n #only show ticks on the left (y-axis) and bottom (x-axis) spines\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n ax.plot(losses[i],'-o', alpha=.5, label=labels[i])\n ax.set_xlim([0, len(losses[i])])\n ax.set_xlabel(r'Epochs')\n ax.set_ylabel(ylabels[i])\n ax.legend(fancybox=True, shadow=True, loc='upper right')\n\n PATH = pjoin(plots_dir, 'losses')\n if not os.path.exists(PATH):\n os.makedirs(PATH)\n\n plt.savefig(pjoin(PATH, 'llikelihood_and_complexity_over_time.png'))\n plt.close()\n\ndef plot_aggregated_klds(\n klds:np.ndarray,\n plot_dir:str,\n rnd_seed:int,\n modality:str,\n version:str,\n dim:int,\n lmbda:float,\n reduction:str,\n show_plot:bool=False,\n ) -> None:\n \"\"\"elbow plot of KL divergences aggregated over n_items\"\"\"\n fig = plt.figure(figsize=(16, 8), dpi=200)\n ax = plt.subplot(111)\n\n #hide the right and top spines\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n #only show ticks on the left (y-axis) and bottom (x-axis) spines\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n ax.plot(klds)\n ax.set_xticks(np.arange(0, len(klds)+1, 10))\n ax.set_xlabel('Dimension', fontsize=10)\n ax.set_ylabel('KLD', fontsize=10)\n\n PATH = os.path.join(plot_dir, modality, version, f'{dim}d', f'{lmbda}', f'seed{rnd_seed}')\n if not os.path.exists(PATH):\n os.makedirs(PATH)\n\n plt.savefig(os.path.join(PATH, f'kld_elbowplot_{reduction}.png'))\n\n if show_plot:\n plt.show()\n plt.clf()\n\n plt.close()\n\ndef plot_kld_violins(\n klds:np.ndarray,\n plot_dir:str,\n rnd_seed:int,\n modality:str,\n version:str,\n dim:int,\n lmbda:float,\n reduction:str,\n show_plot:bool=False,\n ) -> None:\n \"\"\"violinplot of KL divergences across all items and latent dimensions\"\"\"\n fig = plt.figure(figsize=(16, 8), dpi=200)\n ax = plt.subplot(111)\n\n #hide the right and top spines\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n #only show ticks on the left (y-axis) and bottom (x-axis) spines\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n ax.violinplot(klds, widths=0.8)\n ax.set_xticks(np.arange(0, klds.shape[1]+1, 10))\n ax.set_xlabel('Dimension', fontsize=10)\n ax.set_ylabel('KLD', fontsize=10)\n plt.subplots_adjust(bottom=0.15, wspace=0.05)\n\n PATH = os.path.join(plot_dir, modality, version, f'{dim}d', f'{lmbda}', f'seed{rnd_seed}')\n if not os.path.exists(PATH):\n os.makedirs(PATH)\n\n plt.savefig(os.path.join(PATH, f'kld_violinplot_{reduction}.png'))\n\n if show_plot:\n plt.show()\n plt.clf()\n\n plt.close()\n\ndef plot_pruning_results(\n results:list,\n plot_dir:str,\n rnd_seed:int,\n modality:str,\n version:str,\n dim:int,\n lmbda:float,\n reduction:str,\n ) -> None:\n \"\"\"plot validation accuracy as a function of pruned weights percentage\"\"\"\n fig = plt.figure(figsize=(16, 8), dpi=100)\n ax = plt.subplot(111)\n\n #hide the right and top spines\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n #only show ticks on the left (y-axis) and bottom (x-axis) spines\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n pruning_fracs, val_accs = zip(*results)\n ax.bar(pruning_fracs, val_accs, alpha=.5, width=4.0)\n ax.set_xticks(pruning_fracs)\n ax.set_xticklabels(pruning_fracs)\n ax.set_ylim([np.floor(np.min(val_accs)), np.ceil(np.max(val_accs))])\n ax.set_ylabel('Val acc (%)')\n ax.set_xlabel(r'% of weights pruned')\n ax.set_title(f'$\\lambda$ = {lmbda}')\n\n PATH = os.path.join(plot_dir, modality, version, f'{dim}d', f'{lmbda}', f'seed{rnd_seed}')\n if not os.path.exists(PATH):\n os.makedirs(PATH)\n\n plt.savefig(os.path.join(PATH, f'val_acc_against_pruned_weights_{reduction}.png'))\n plt.close()\n\n\ndef plot_r2_scores(out_path:str, r2_scores:np.ndarray, nmf_components:list) -> None:\n fig = plt.figure(figsize=(14, 8), dpi=150)\n ax = plt.subplot(111)\n\n #hide the right and top spines\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n #only show ticks on the left (y-axis) and bottom (x-axis) spines\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n ax.plot(r2_scores)\n ax.set_xticks(range(len(r2_scores)))\n ax.set_xticklabels(nmf_components, fontsize=12)\n ax.set_xlabel('Latent dimensionality', fontsize=13)\n ax.set_ylabel(r'$r2$ score', fontsize=13)\n plt.tight_layout()\n plt.savefig(os.path.join(out_path, 'nmf_components_vs_r2_scores.png'))\n plt.close()\n\n\ndef plot_nmf_correlations(out_path:str, correlations:List[Tuple[float]], thresholds:np.ndarray, n_components:list) -> None:\n fig = plt.figure(figsize=(14, 8), dpi=150)\n ax = plt.subplot(111)\n\n #hide the right and top spines\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n #only show ticks on the left (y-axis) and bottom (x-axis) spines\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n for i, r in enumerate(correlations):\n ax.plot(np.array(r)*100, '-x', alpha=.7, label=f'$>{thresholds[i]:.2f}$')\n\n ax.set_xticks(range(len(correlations[0])))\n ax.set_xticklabels(n_components, fontsize=11)\n ax.set_xlabel('Latent dimensionality', fontsize=12)\n ax.set_ylabel(r'$\\%$ of dimensions', fontsize=12)\n ax.legend(fancybox=True, title=r\"Pearson's $r$\", shadow=True)\n plt.tight_layout()\n plt.savefig(os.path.join(out_path, 'correlations_across_random_sets_of_nmfs.png'))\n plt.close()\n","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":16413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"513107718","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# unit tests\n\n\n# In[2]:\n\n\ndef make_rgb_vector():\n R = .692\n G = .582\n B = .140\n\n # why fourth power?\n vector = [R, G, B]\n vector = vector / vector.sum()\n vector = np.pow(vector, 4)\n return vector\n \ndef save_rgb_as_uint8_file(imgIn, fileout):\n\n vector = make_rgb_vector()\n imgOut = imgIn * vector\n io.imsave(\n fileout, \n imgOut.astype(np.uint8))\n \ndef more_uint8_stuff(img):\n # convert to gray\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # make color channels\n red = gray.copy()\n green = gray.copy()\n blue = gray.copy()\n\n # set weights\n R = .642\n G = .532\n B = .44\n\n # get sum of weights and normalize them by the sum\n \n sum = R**4 + G**4 + B**4\n R = R/sum\n G = G/sum\n B = B/sum\n print(R,G,B)\n\n # combine channels with weights\n red = (R*red)\n green = (G*green)\n blue = (B*blue)\n result = cv2.merge([red,green,blue])\n\n # scale by ratio of 255/max to increase to fully dynamic range\n max=np.amax(result)\n result = ((255/max)*result).clip(0,255).astype(np.uint8)\n\n # write result to disk\n cv2.imwrite(\"car_colored.png\", result)\n\n # display it\n cv2.imshow(\"RESULT\", result)\n cv2.waitKey(0) \n\n\n# In[3]:\n\n\ndef create_grid(rows, cols, drow, dcol, intensity=255): \n a = np.zeros((rows, cols), dtype=np.int)\n for i in range(rows):\n if i%drow == 0:\n a[::, i:i+1] = intensity\n for j in range(cols):\n if j%dcol == 0:\n a[j:j + 1, ::] = intensity\n \n return a\n\ndef box(a, x, y, w, h, intensity):\n a[x : x+w, y : y + 1] = intensity\n a[x : x+w, y + h - 1 : y + h] = intensity\n a[x : x + 1, y : y + h] = intensity\n a[x + w -1: x + w, y : y + h] = intensity\n \ndef create_boxes(rows, cols, boxdx, boxdy, boxw, boxh, intensity=255):\n a = np.zeros((rows, cols), dtype=np.int)\n for i in range(rows):\n x0 = boxdx * i\n for j in range(cols):\n y0 = boxdy * j\n box(a, boxdx, boxdy, boxw, boxh, intensity)\n return a\n \ndef create_skeleton(image_file):\n import labels\n binary = labels.thin_skeleton_file(image_file)\n return binary\n\n\n# In[4]:\n\n\ndef convolve2d(a, kernel):\n \"\"\"Maybe not used\"\"\"\n import scipy.signal.convolve2d as convolve2d\n# convolved = np.asmatrix(convolve2d(a,kernel,'same'))\n convolved = convolve2d(a,kernel,'same')\n \n return convolved\n\n\n# In[5]:\n\n\ndef convolve_normalize_transpose(grid, kernel, title=\"title\"):\n from scipy import ndimage\n from PIL import Image\n\n# scipy.ndimage.convolve(input, weights, output=None, mode='reflect', cval=0.0, origin=0)\n convolve_cut = 0.9\n\n if np.any(grid)==None:\n print(\"grid missing\")\n return\n if np.any(kernel)==None:\n print(\"kernel missing\")\n return\n convolved = ndimage.convolve(grid, kernel)\n max = np.max(convolved)\n # normalize to grayscale\n convolved = np.where(convolved < max * convolve_cut , 0, convolved)\n convolved = convolved.astype(np.uint8)\n img = Image.fromarray(convolved)\n img.save(title+\".png\")\n convolved_t = np.transpose(convolved)\n return convolved, convolved_t\n\ndef convolve_and_plot(grid, kernel, title=\"title\"):\n from PIL import Image\n convolved, convolved_t = convolve_normalize_transpose(grid, kernel, title)\n plot(convolved)\n# img = Image.fromarray(convolved)\n# img.save(title+\".png\")\n\n# plot(convolved_t)\n\n\n# In[6]:\n\n\ndef plot(myarray):\n import matplotlib.pyplot as plt\n plt.imshow(myarray)\n plt.show()\n \ndef plot_file(file):\n array = Image.open(file, \"r\")\n plot(array)\n\n\n# In[7]:\n\n\nimport numpy as np\n\ndef normalize_box(box):\n \"\"\" adds offset to normalize sum = zero \"\"\"\n area = box.shape[0] * box.shape[1]\n newbox = box - np.sum(box) / area\n return newbox\n \ndef create_array_kernel(name, data_array):\n array = normalize_box(data_array)\n return array\n\ndef create_kernel(name, width=None, height=None, linewidth=1, intens=1):\n if width==None or height==None:\n raise Exception(\"must give width and height\") \n\n def half_points(w, h, lw):\n hw = (w + 1) // 2\n hh = (h + 1) // 2\n hlw = (lw + 1) // 2 \n return hw, hh, hlw\n\n def rect(w, h, lw, intens):\n box = np.zeros((width, height))\n box[0:lw, ::] = intens\n box[w - lw, ::] = intens\n box[::, 0:lw] = intens\n box[::, h-lw:h] = intens\n return box\n \n def vert(w, h, lw, intens):\n hw, hh, hlw = half_points(w, h, lw)\n box = np.zeros((w, h))\n box[hw - hlw : hw + hlw - 1, ::] = intens\n return box\n \n def horiz(w, h, lw, intens):\n box = np.zeros((w, h))\n hw, hh, hlw = half_points(w, h, lw)\n box[::, hw - hlw : hw + hlw - 1] = intens\n return box\n \n def cross(w, h, lw, intens):\n box = np.zeros((w, h))\n hw, hh, hlw = half_points(w, h, lw)\n box[hw - hlw : hw + hlw - 1, ::] = intens\n box[::, hh - hlw : hh + hlw - 1] = intens\n \n return box\n \n # top left angle\n def nw(w, h, lw, intens):\n box = np.zeros((w, h))\n hw, hh, hlw = half_points(w, h, lw)\n box[hw - 1 : hw, hh:] = intens\n box[0:hw, hh - 1: hh] = intens\n# print(\"nw\", w, h, \"\\n\", box)\n return box\n \n # top right angle\n def sw(w, h, lw, intens):\n box = np.zeros((w, h))\n hw, hh, hlw = half_points(w, h, lw)\n box[hw - 1 :, hh - 1: hh] = intens\n box[hw - 1 : hw, 0: hh] = intens\n# print(\"sw\", w, h, \"\\n\", box)\n return box\n \n # bottom left\n def ne(w, h, lw, intens):\n box = np.zeros((w, h))\n hw, hh, hlw = half_points(w, h, lw)\n box[hw - 1 : hw , hh - 1:] = intens\n box[hw : , hh - 1 : hh] = intens\n# print(\"ne\", h, w, \"\\n\", box)\n return box\n \n # bottom right\n def se(w, h, lw, intens):\n box = np.zeros((w, h))\n hw, hh, hlw = half_points(w, h, lw)\n box[0 : hw - 1, hh - 1: hh] = intens\n box[hw - 1 : hw, 0 : hh] = intens\n# print(\"se\", w, h, \"\\n\", box)\n return box\n \n kernels = {\n \"rect\" : rect (width, height, linewidth, intens),\n \"horiz\": horiz(width, height, linewidth, intens),\n \"vert\" : vert (width, height, linewidth, intens),\n \"cross\": cross(width, height, linewidth, intens),\n \"se\" : se (width, height, linewidth, intens),\n \"ne\" : ne (width, height, linewidth, intens),\n \"sw\" : sw (width, height, linewidth, intens),\n \"nw\" : nw (width, height, linewidth, intens),\n }\n \n func = kernels[name]\n box = func\n new_box = normalize_box(box)\n return new_box\n\ndef create_small_grid():\n grid = np.ones((10,15))*100\n grid[1:4,1:3] = 0\n grid[5:8,1:3] = 0\n grid[1:4,4:6] = 0\n grid[5:8,4:6] = 0\n grid[1:4,7:9] = 0\n grid[5:8,7:9] = 0\n grid[1:4,10:12] = 0\n grid[5:8,10:12] = 0\n grid[1:4,13:15] = 0\n grid[5:8,13:15] = 0\n return\n \n\n\n# In[8]:\n\n\ndef smalltest():\n halfw = 2\n import os\n import numpy as np\n from scipy import ndimage\n from PIL import Image\n\n float_formatter = \"{:.2f}\".format\n np.set_printoptions(formatter={'float_kind':float_formatter})\n\n gridw = 129\n gridh = 137\n grid = create_grid(gridw, gridh, 7, 3)\n\n print(\"rect73\")\n rect_kernel = create_kernel(\"rect\", 7,3)\n convolve_and_plot(grid, rect_kernel)\n print(\"rect37\")\n rect_kernel = create_kernel(\"rect\", 3,7)\n convolve_and_plot(grid, rect_kernel)\n print(\"cross\")\n cross_kernel = create_kernel(\"cross\", 5, 7, intens=3)\n convolve_and_plot(grid, cross_kernel)\n print(\"horiz\")\n horiz_kernel = create_kernel(\"horiz\", 5, 5)\n convolve_and_plot(grid, horiz_kernel)\n print(\"vert\")\n vert_kernel = create_kernel(\"vert\", 5, 5)\n convolve_and_plot(grid, vert_kernel)\n\n mydata = np.array([\n [1, 1, 0, 1, 1],\n [1, 0, 0, 0, 1],\n [1, 0, 0, 0, 1],\n [1, 1, 0, 1, 1],\n ]) \n\n print(\"[]\")\n kernel = create_array_kernel(\"[]\", mydata)\n\n convolve_and_plot(grid, kernel)\n\n print(\"===FINISHED smalltest===\")\n\nsmalltest()\n\n\n# In[9]:\n\n\ndef old_plot():\n \"\"\"probably abandon\"\"\"\n from matplotlib import pyplot as plt\n grid = create_boxes(4, 5, 5, 6, 3, 4)\n print(\"grid\", grid.shape, \"\\n\", grid)\n plt.imshow(grid)\n plt.show()\n\n\n convolved = plot_convolve(grid, kernel)\n\n from matplotlib import pyplot as plt\n # plt.imshow(grid_points, interpolation='nearest')\n plt.imshow(grid)\n plt.show()\n plt.imshow(convolved,cmap='gray', vmin=0, vmax=255)\n plt.show()\n\n\n# In[10]:\n\n\ndef plot_conv(title, w, h, file):\n import os\n print(title, \"-\", w, \"-\", h, \"\\n\", \"NOTE x and y may be interchanged\")\n kernel = create_kernel(title, w, h)\n plot_pdfimages(kernel, title + str(w)+\"-\"+str(h)+\".png\", file)\n return\n\ndef plot_pdfimages(kernel, title, file):\n import os\n from PIL import Image\n from matplotlib import pyplot as plt\n \n bits = os.path.split(file)\n print(\"bits\", bits)\n skeleton = create_skeleton(file)\n convolve_and_plot(skeleton, kernel, title);\n\ndef explore_hw(file):\n for i in range(3, 9, 2):\n for j in range(3, 9, 2):\n plot_conv(\"vert\", i, j, file)\n\n for i in range(3, 9, 2):\n for j in range(3, 9, 2):\n plot_conv(\"horiz\", i, j, file)\n\ndef plot_pdf_files():\n import os\n HOME = os.path.expanduser(\"~\")\n files = [os.path.join(HOME,\n 'workspace/jupyter/physchem/liion/PMC7077619/pdfimages/image.8.3.81_523.164_342/raw.png'),\n os.path.join(HOME, 'workspace/jupyter/physchem/images/capacitycycle.png'),\n os.path.join(HOME, 'workspace/jupyter/physchem/liion/PMC7077619/pdfimages/image.8.3.81_523.164_342/raw.png'),\n os.path.join(HOME, 'workspace/jupyter/physchem/liion/PMC7075112/pdfimages/image.5.2.98_499.292_449/raw.png'),\n os.path.join(HOME, 'workspace/jupyter/physchem/liion/PMC7075112/pdfimages/image.4.3.117_479.722_864/raw.png'),\n os.path.join(HOME, 'workspace/jupyter/physchem/liion/PMC7074852/pdfimages/image.7.3.86_507.385_495/raw.png'),\n os.path.join(HOME, 'workspace/jupyter/physchem/liion/PMC7067258/pdfimages/image.5.1.52_283.71_339/raw.png'),\n ]\n \n for file in files:\n print(file)\n if not os.path.isfile(file):\n print(\"nonexistent \", file)\n return\n\n test = False\n if test: \n # explore best hw values\n explore_hw(file)\n else:\n plot_conv(\"vert\", 3, 3, file)\n plot_conv(\"horiz\", 3, 3, file)\n plot_conv(\"se\", 5, 5, file)\n plot_conv(\"sw\", 9, 9, file)\n plot_conv(\"nw\", 9, 9, file)\n plot_conv(\"ne\", 13, 13, file)\n return\n \nplot_pdf_files()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"physchem/ipynb/convolve1.py","file_name":"convolve1.py","file_ext":"py","file_size_in_byte":11057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"373178092","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Aug 15 12:47:37 2020\r\n\r\n@author: saz2n\r\n\"\"\"\r\nimport yaml\r\nimport numpy as np\r\nfrom preprocessor import PreprocessText\r\n\r\nclass FindSummary:\r\n \r\n def __init__(self,configPath):\r\n with open(configPath,'r') as fl:\r\n self.config = yaml.load(fl)\r\n\r\n def loadData(self):\r\n with open(self.config['data']['articles_path'],'r',encoding='utf-8') as fl:\r\n articleText = fl.read()\r\n return articleText\r\n \r\n def splitSentence(self,text):\r\n \"\"\"\r\n Split sentences using full stop\r\n \r\n Inputs:\r\n text: string\r\n Outputs:\r\n sentences: List\r\n \"\"\"\r\n sentences = text.split('.')\r\n return sentences\r\n \r\n def groupSentence(self,sentences):\r\n \"\"\"\r\n Group sentences into first and rest\r\n \r\n Input:\r\n sentences: List or strings\r\n Output:\r\n text1: string\r\n remainingText: List of string\r\n \"\"\"\r\n text1 = sentences[0]\r\n remainingText = sentences[1:]\r\n return text1,remainingText\r\n \r\n def findSentLen(self,sentences):\r\n sentLenghts = [len(sent) for sent in sentences]\r\n return sentLenghts\r\n \r\n def findTopFive(self,sentences,sentLengths):\r\n sortedIdx = np.argsort(sentLengths)\r\n top5idx = sortedIdx[-5:]\r\n top5Sents = [sentences[i] for i in top5idx]\r\n return top5Sents\r\n \r\n def summarise(self,article):\r\n \r\n articleText = article# self.loadData()\r\n preprocessObj = PreprocessText()\r\n loweredText = preprocessObj.convertToLower(articleText)\r\n filteredText = preprocessObj.removeSpecialChar(loweredText)\r\n sentences = self.splitSentence(filteredText)\r\n text1,remainingText = self.groupSentence(sentences)\r\n sentLengths = self.findSentLen(remainingText)\r\n top5Sents = self.findTopFive(remainingText,sentLengths)\r\n summaryTextList = [text1]\r\n summaryTextList.extend(top5Sents)\r\n summaryText = ' '.join(summaryTextList)\r\n return summaryText\r\n \r\n#if __name__==\"__main__\":\r\n# summaryObj = FindSummary('../config/config')\r\n# summaryText = summaryObj.summarise()\r\n \r\n \r\n ","sub_path":"bin/summariserga.py","file_name":"summariserga.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"23433426","text":"import os\n\nweights = '2022-12-22 20_17_24.876640_train'\nmodel = 'SFDet-ResNet'\nmode = 'test'\nbatch = 32\nscore_threshold = 0.01\nuse_gpu = 'True'\n\nstart = 140\nsave_step = 5\nnum_epochs = 300\n\nfor i in range(start + save_step, num_epochs + save_step, save_step):\n pretrained_model = '\"{}/{}\"'.format(weights, i)\n args = ('--mode {} --pretrained_model {} --model {} --use_gpu {} '\n '--batch_size {} --score_threshold {}')\n args = args.format(mode,\n pretrained_model,\n model,\n use_gpu,\n batch,\n score_threshold)\n command = 'python main.py {}'.format(args)\n os.system(command)\n","sub_path":"run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"337389457","text":"import csv\nimport numpy as np\nimport pandas as pd\nimport math\nimport os\nfrom time import gmtime, strftime\nimport tensorflow as tf\nfrom sklearn.metrics import average_precision_score, recall_score, precision_score, f1_score, accuracy_score\n\nfrom tensorflow.keras.models import Sequential,Model, model_from_json\nfrom tensorflow.keras.layers import Activation, Dense, Conv1D, MaxPooling1D, Flatten,Permute,RepeatVector\nfrom tensorflow.keras.layers import Dropout,Input\nfrom tensorflow.keras.layers import Dense, Lambda, Dot, Activation, Concatenate\nfrom tensorflow.keras.layers import Layer\nfrom tensorflow.keras import backend as K\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nclass Attention(Layer):\n\n def __init__(self, units=128, **kwargs):\n self.units = units\n super().__init__(**kwargs)\n\n def __call__(self, inputs):\n \"\"\"\n Many-to-one attention mechanism for Keras.\n @param inputs: 3D tensor with shape (batch_size, time_steps, input_dim).\n @return: 2D tensor with shape (batch_size, 128)\n @author: felixhao28, philipperemy.\n \"\"\"\n hidden_states = inputs\n hidden_size = int(hidden_states.shape[2])\n # Inside dense layer\n # hidden_states dot W => score_first_part\n # (batch_size, time_steps, hidden_size) dot (hidden_size, hidden_size) => (batch_size, time_steps, hidden_size)\n # W is the trainable weight matrix of attention Luong's multiplicative style score\n score_first_part = Dense(hidden_size, use_bias=False, name='attention_score_vec')(hidden_states)\n # score_first_part dot last_hidden_state => attention_weights\n # (batch_size, time_steps, hidden_size) dot (batch_size, hidden_size) => (batch_size, time_steps)\n h_t = Lambda(lambda x: x[:, -1, :], output_shape=(hidden_size,), name='last_hidden_state')(hidden_states)\n score = Dot(axes=[1, 2], name='attention_score')([h_t, score_first_part])\n attention_weights = Activation('softmax', name='attention_weight')(score)\n # (batch_size, time_steps, hidden_size) dot (batch_size, time_steps) => (batch_size, hidden_size)\n context_vector = Dot(axes=[1, 1], name='context_vector')([hidden_states, attention_weights])\n pre_activation = Concatenate(name='attention_output')([context_vector, h_t])\n attention_vector = Dense(self.units, use_bias=False, activation='tanh', name='attention_vector')(pre_activation)\n return attention_vector\n\n def get_config(self):\n return {'units': self.units}\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n\n\n\ndef report_evaluation_metrics(y_true, y_pred):\n average_precision = average_precision_score(y_true, y_pred)\n precision = precision_score(y_true, y_pred, labels=[0, 1], pos_label=1)\n recall = recall_score(y_true, y_pred, labels=[0, 1], pos_label=1)\n f1 = f1_score(y_true, y_pred, labels=[0, 1], pos_label=1)\n acc = accuracy_score(y_true, y_pred)\n print('Average precision-recall score: {0:0.2f}'.format(average_precision))\n print('Precision: {0:0.4f}'.format(precision))\n print('Recall: {0:0.4f}'.format(recall))\n print('F1: {0:0.4f}'.format(f1))\n print('acc: {0:0.4f}'.format(acc))\n\n\ndef get_data(dataset):\n data = []\n with open(dataset, \"r\") as f:\n reader = csv.reader(f)\n for row in reader:\n data.append(row)\n data = np.array(data)\n return data\n\n# Attention Mechanism\nclass BahdanauAttention(tf.keras.layers.Layer):\n def __init__(self, units):\n super(BahdanauAttention, self).__init__()\n self.W1 = tf.keras.layers.Dense(units)\n self.W2 = tf.keras.layers.Dense(units)\n self.V = tf.keras.layers.Dense(1)\n\n def call(self, query, values):\n # query hidden state shape == (batch_size, hidden size)\n # values shape == (batch_size, max_len, hidden size)\n\n # we are doing this to broadcast addition along the time axis to calculate the score\n # query_with_time_axis shape == (batch_size, 1, hidden size)\n query_with_time_axis = tf.expand_dims(query, 1)\n\n # score shape == (batch_size, max_length, 1)\n # we get 1 at the last axis because we are applying score to self.V\n # the shape of the tensor before applying self.V is (batch_size, max_length, units)\n score = self.V(tf.nn.tanh(\n self.W1(query_with_time_axis) + self.W2(values)))\n\n # attention_weights shape == (batch_size, max_length, 1)\n attention_weights = tf.nn.softmax(score, axis=1)\n\n # context_vector shape after sum == (batch_size, hidden_size)\n context_vector = attention_weights * values\n context_vector = tf.reduce_sum(context_vector, axis=1)\n\n return context_vector, attention_weights\n\ndef Build_seq_cnn_regression_model(input_shape, output_size, neurons, activ_func=\"linear\",\n dropout=0.25, loss=\"mae\", optimizer=\"adam\"):\n model = Sequential()\n model.add(Conv1D(64, 5, activation='relu', input_shape=input_shape))\n\n model.add(MaxPooling1D(pool_size=2))\n\n # model.add(Attention(128))\n\n\n attentionModel = model.add(Attention(128))\n\n\n # model.add(Flatten())\n model.add(Dropout(0.1))\n\n\n model.add(Dense(512, activation='relu'))\n\n model.add(Dense(256, activation='relu'))\n\n\n model.add(Dense(128, activation='relu'))\n\n\n model.add(Dense(13, activation='linear'))\n\n model.compile(loss=loss, # one may use 'mean_absolute_error' as mean_squared_error\n optimizer=optimizer,\n metrics=[tf.keras.metrics.RootMeanSquaredError()] # you can add several if needed\n )\n model.summary()\n return model,attentionModel\n\ndef Build_cnn_regression_model(input_shape, output_size, neurons, activ_func=\"linear\",\n dropout=0.25, loss=\"mae\", optimizer=\"adam\"):\n input_layer = Input(input_shape)\n\n conv1D_layer = Conv1D(64, 5, activation='relu')(input_layer)\n\n maxPooling_layer = MaxPooling1D(pool_size=2)(conv1D_layer)\n\n attention_layer = Attention(128)(maxPooling_layer)\n\n encoder = Dense(512, activation='relu')(attention_layer)\n encoder = Dense(256, activation='relu')(encoder)\n encoder = Dense(128, activation='relu')(encoder)\n encoder = Dense(13, activation='linear')(encoder)\n\n model = Model(inputs=input_layer, outputs=encoder)\n\n model.compile(loss=loss, # one may use 'mean_absolute_error' as mean_squared_error\n optimizer=optimizer,\n metrics=[tf.keras.metrics.RootMeanSquaredError()] # you can add several if needed\n )\n model.summary()\n return model,attention_layer\n\ndef split_data(data: list, ratio=0.9):\n size = len(data)\n train_sample = int(size * ratio)\n train_dataset, test_dataset = data[: train_sample], data[train_sample:]\n return np.array(train_dataset), np.array(test_dataset)\n\n\ndef mse(label: np.ndarray, predict: np.ndarray):\n sum_square = (label - predict) ** 2\n return np.mean(sum_square)\n\n\ndef mpe(label: np.ndarray, predict: np.ndarray):\n sum_square = (label - predict) ** 2 / np.average(label)*100\n return np.mean(sum_square)\n\ndef _pprint(field, output_test, res):\n total_loss = 0\n for i in range(len(field)):\n loss = mse(output_test[:, i], res[:, i])\n total_loss += loss\n print(f\"Field: {field[i]} Mse: {loss}\")\n print(f\"Total Loss MSE: {total_loss}\")\n\n print(\"_____________________________\")\n\n total_loss = 0\n for i in range(len(field)):\n loss = mpe(output_test[:, i], res[:, i])\n total_loss += loss\n print(f\"Field: {field[i]} Mpe: {loss} %\")\n\ndef main():\n # doc dũ liệu từ tệp ảnh\n folder_name = \"data/EEM\"\n col1 = 0\n col2 = 0\n files = [file for file in os.listdir(folder_name)]\n input_data = []\n for file in files:\n excel = pd.ExcelFile(os.path.join(folder_name, file))\n sheets = excel.sheet_names\n\n for sheet in sheets:\n if sheet != \"18b\":\n row = excel.parse(sheet_name=sheet).values\n # print(len(row))\n input_data.append(row)\n\n # các trường chính cần xét\n full_col =['pH', 'DO', 'BOD5', 'CODMn', 'TN', 'TP', 'TOC', 'DOC',\n 'TN,', 'NH3-N', 'NO3-N', 'DTP', 'PO4-P']\n \n field = full_col\n\n\n # doc file ket quả (nong do cac chat)\n output_df = pd.read_excel(\"data/river_data.xlsx\", sheet_name=\"Data(2018-2020)\")\n print(output_df.columns)\n output = output_df[field].values\n\n # chia tap du lieu\n train_dataset, test_dataset = split_data(input_data)\n input_data = np.array(input_data)\n output_train = np.array(output[:len(train_dataset)])\n output_test = np.array(output[len(train_dataset):len(input_data)])\n\n\n # huan luyen mo hinh\n my_model,my_att = Build_cnn_regression_model((len(train_dataset[0]), len(train_dataset[0][0])), output_size=5, neurons=100)\n my_model.fit(train_dataset, output_train, epochs=5, batch_size=1)\n res = my_model.predict(test_dataset)\n\n #trich rut vector dac trung attention\n att_output = K.function([my_model.layers[0].input],\n [my_model.layers[8].output])\n layer_output = att_output(input_data)[0]\n\n\n # tinh do ruong quan\n res = np.concatenate((layer_output,output), axis=1)\n corr = np.corrcoef(np.transpose(res))\n final = []\n i = 0\n for line in corr:\n final.append(line[-13:])\n i = i + 1;\n if(i >128):\n break\n final = np.nan_to_num(np.array(final))\n\n # tong cac he so tuong quan\n total1 = np.sum(final,axis=0) \n sort1 = (-total1).argsort()[:3]\n\n print(\"Các chỉ số ứng với vector attention\")\n for i in sort1:\n print(full_col[i])\n\n total2 = np.sum(np.absolute(final),axis=0) \n sort2 = (-total2).argsort()[:3]\n\n # tong cac he so tuong quan (tinh theo tri tuye doi)\n print(\"Các chỉ số ứng với vector attention (abs)\")\n for i in sort2:\n print(full_col[i])\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"attention-corr.py","file_name":"attention-corr.py","file_ext":"py","file_size_in_byte":10191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"545267221","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nimport mnist_inference\n\nBATCH_SIZE = 100\nLEARNING_RATE_BASE = 0.8\nLEARNING_RATE_DECAY = 0.99\nREGULARAZTION_RATE = 0.0001\nTRAINING_STEPS = 30000\nMOVING_AVERAGE_DECAY = 0.99\n\nMODEL_SAVE_PATH = \"mnistModel/\"\nMODEL_NAME = \"model.ckpt\"\n\ndef train(mnist):\n x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')\n y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')\n\n # regularizer\n","sub_path":"mnist_better/mnist_train.py","file_name":"mnist_train.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"483077861","text":"#!/usr/bin/env python\n\nimport StringIO\nimport string\nimport re\nimport sys\nimport cPickle\nimport os\nimport unittest\nfrom bag import Bag\n\nhas_a_vowel_re = re.compile(r'[aeiouy]')\nlong_enough_re = re.compile(r'^i$|^a$|^..')\nnon_letter_re = re.compile(r'[^a-z]')\n\n\ndef word_acceptable(w):\n if non_letter_re.search(w):\n return False\n if(not long_enough_re.match(w)):\n return False\n if(not has_a_vowel_re.search(w)):\n return False\n\n return True\n\ndefault_dict_name = os.path.join(os.path.dirname(__file__), \"../words.utf8\")\n\n\ndef snarf_dictionary_from_IO(I):\n print >> sys.stderr, \"Snarfing\", I\n hash_table = {}\n for w in re.findall(r'.+', I.read()):\n w = string.lower(w)\n\n if not word_acceptable(w):\n continue\n\n key = Bag(w)\n if key in hash_table:\n if(0 == hash_table[key].count(w)): # avoid duplicates\n hash_table[key].append(w)\n else:\n hash_table[key] = [w]\n\n print >> sys.stderr, \"done\"\n return hash_table\n\nhash_cache = os.path.join(os.path.dirname(__file__), \"hash.cache\")\n\n\ndef snarf_dictionary(fn):\n try:\n fh = open(hash_cache, \"rb\")\n rv = cPickle.load(fh)\n print >> sys.stderr, \"Reading cache\", hash_cache, \"instead of dictionary\", fn\n except:\n fh = open(fn, \"r\")\n rv = snarf_dictionary_from_IO(fh)\n fh.close()\n fh = open(hash_cache, \"wb\")\n cPickle.dump(rv, fh, 2)\n\n fh.close()\n return rv\n\n\f\nif __name__ == \"__main__\":\n class TestStuff(unittest.TestCase):\n def setUp(self):\n self.fake_input = \"cat\\ntac\\nfred\\n\"\n self.fake_dict = snarf_dictionary_from_IO(StringIO.StringIO(self.fake_input))\n\n def test_word_acceptable(self):\n self.assert_(word_acceptable(\"dog\"))\n self.assertFalse(word_acceptable(\"C3PO\"))\n d = snarf_dictionary(os.path.join(default_dict_name))\n self.assertEqual(66965, len(d))\n self.assertEqual(72794, sum(len(words) for words in d.values()))\n\n def test_this_and_that(self):\n self.assert_(2 == len(self.fake_dict.keys()))\n cat_hits = self.fake_dict[Bag(\"cat\")]\n self.assert_(2 == len(cat_hits))\n self.assert_(cat_hits[0] == \"cat\")\n self.assert_(cat_hits[1] == \"tac\")\n self.assert_(1 == len(self.fake_dict[Bag(\"fred\")]))\n self.assert_(self.fake_dict[Bag(\"fred\")][0] == \"fred\")\n\n unittest.main()\n","sub_path":"python/dict.py","file_name":"dict.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"643389411","text":"import os\nimport re\nimport logging\nimport subprocess\nfrom collections import namedtuple, defaultdict\n\nimport serial\nimport serial.tools.list_ports as listports\n\nTITLE = \"CS 21 17.2 - Project 2 Server v1.1\"\nMARS_PATH = \"mars.jar\"\nDEFAULT_ASM_FILENAME = \"test.asm\"\n\nDEFAULT_PORT_CHOICE = 1\nBAUDRATE = 9600\nLOGGING_LEVEL = logging.DEBUG\nMAX_ITERATIONS = 1e9\n\nclass State(object):\n IFUOutput = namedtuple('IFUnitOutput', ('inst'))\n RFOutput = namedtuple('RegFileOutput', ('readdata1', 'readdata2'))\n DMOutput = namedtuple('DataMemOutput', ('readdata'))\n StateRepr = namedtuple('StateString', ('pc', 'regfile', 'mem'))\n\n TEXTSEG_FILENAME = \"textsegment.dump\"\n DATASEG_FILENAME = \"datasegment.dump\"\n DEFAULT_PC = 0x00400000\n DEFAULT_DATASEG = 0x10010000\n\n def __init__(self, zeropc=False):\n self.pc = 0 if zeropc else State.DEFAULT_PC\n self.regfile = defaultdict(int)\n self.mem = defaultdict(int)\n\n def load_asm(self, filename, marspath):\n self._populate_text_segment(filename, marspath)\n self._populate_data_segment(filename, marspath)\n\n def _populate_text_segment(self, filename, marspath):\n self._mars_dump(marspath, filename, State.TEXTSEG_FILENAME, \".text\")\n data = self._get_bytes_from_dump(State.TEXTSEG_FILENAME)\n self._populate_memory(self.pc, data)\n\n def _populate_data_segment(self, filename, marspath):\n self._mars_dump(marspath, filename, State.DATASEG_FILENAME, \".data\")\n data = self._get_bytes_from_dump(State.DATASEG_FILENAME)\n self._populate_memory(State.DEFAULT_DATASEG, data)\n\n def _get_bytes_from_dump(self, filepath):\n ret = []\n\n with open(filepath, \"r\") as f:\n for line in f:\n line = line.strip()\n\n for b in [int(line[i:i+2].zfill(2), 16) for i in range(0, len(line), 2)]:\n ret.append(b)\n\n return ret\n\n def _populate_memory(self, startaddr, data):\n logger.info(\"Populating memory starting address %s (%s)\" % (startaddr, hex(startaddr)))\n for i in range(len(data)):\n # Optimization: defaultdict writes zero anyway\n if data[i] != 0:\n self.mem[startaddr + i] = data[i]\n\n def _mars_dump(self, marspath, inputfile, outputfile, segment):\n command = \"java -Dapple.awt.UIElement=true -jar {marspath} nc np sm {zero} dump {segment} HexText {outputfile} {inputfile}\".format(\n marspath=marspath,\n inputfile=inputfile,\n outputfile=outputfile,\n segment=segment,\n zero=\"mc CompactTextAtZero\" if self.pc == 0 else \"\",\n )\n\n logging.info(\"Running %s\" % command)\n\n subprocess.run(command.split())\n\n def input_if_unit(self, pc):\n pc = self._hex_to_int(pc)\n self.pc = pc\n\n inst = 0\n for i in range(4):\n inst <<= 8\n inst |= self._read_mem(pc + i)\n\n return State.IFUOutput(inst=inst)\n\n def input_register_file(self, readreg1, readreg2, writereg, writedata, write):\n logger.debug(\"Register file inputs: RR1({rr1}), RR2({rr2}), WR({wr}), WD({wd}), W({w})\".format(\n rr1=readreg1,\n rr2=readreg2,\n wr=writereg,\n wd=writedata,\n w=write,\n ))\n\n if self._bin_to_int(write):\n self._write_regfile(writereg, writedata)\n\n rd1 = self._read_regfile(readreg1)\n rd2 = self._read_regfile(readreg2)\n\n return State.RFOutput(readdata1=rd1, readdata2=rd2)\n\n def input_data_memory(self, addr, writedata, memread, memwrite):\n logger.debug(\"Data memory inputs: Addr({addr}), WD({wd}), MR({mr}), MW({mw})\".format(\n addr=addr,\n wd=writedata,\n mr=memread,\n mw=memwrite,\n ))\n\n if self._bin_to_int(memwrite):\n self._write_mem(addr, writedata)\n\n # MemRead is ignored\n data = self._read_mem(addr)\n\n return State.DMOutput(readdata=data)\n \n def _hex_to_int(self, s):\n if type(s) == int:\n return s\n\n if s.startswith(\"0x\"):\n s = s.replace(\"0x\", \"\")\n\n return int(s, 16)\n\n def _bin_to_int(self, s):\n if type(s) == int:\n return s\n\n if s.startswith(\"0b\"):\n s = s.replace(\"0b\", \"\")\n\n return int(s, 2)\n\n def _read_regfile(self, reg):\n nreg = self._bin_to_int(reg)\n logger.debug(\"Reading register: %s (%s)\" % (reg, nreg))\n\n return self.regfile[nreg]\n\n def _write_regfile(self, reg, data):\n nreg = self._bin_to_int(reg)\n ndata = self._hex_to_int(data)\n logger.debug(\"Writing to register %s: %s (%s: %s)\" % (reg, data, nreg, ndata))\n\n if nreg == 0:\n logger.warning(\"Attempting to write to register zero: %s (%s)\" % (data, ndata))\n\n self.regfile[nreg] = ndata\n\n def _read_mem(self, addr):\n naddr = self._hex_to_int(addr)\n logger.debug(\"Reading memory address: %s (%s)\" % (addr, naddr))\n\n return self.mem[naddr]\n\n def _write_mem(self, addr, data):\n naddr = self._hex_to_int(addr)\n ndata = self._hex_to_int(data)\n logger.debug(\"Writing to memory address %s: %s (%s: %s)\" % (addr, data, naddr, ndata))\n\n self.mem[naddr] = ndata\n \n def __str__(self):\n ss = State.StateRepr(pc=self.pc, regfile=self.regfile, mem=self.mem)\n\n return str(ss)\n\n# Note: bad practice to instantiate module-level logging\nlogging.basicConfig(level=LOGGING_LEVEL, format='[%(levelname)s] %(message)s')\nlogger = logging.getLogger(__name__)\n\ndef get_com_ports():\n # Not all COM ports have \"Arduino\" in description\n # port.device, port.description\n return [port for port in listports.comports()]\n\ndef select_com_port():\n com_ports = get_com_ports()\n\n print(\"COM ports:\")\n for i, port in enumerate(com_ports, start=1):\n print(\"{num} - {path} ({desc})\".format(\n num=i,\n path=port.device,\n desc=port.description,\n ))\n\n # No need for validation\n choice = input(\"Select COM port [%s]: \" % DEFAULT_PORT_CHOICE)\n if not choice:\n choice = DEFAULT_PORT_CHOICE\n choice = int(choice) - 1\n\n return com_ports[choice]\n\ndef recv_cstring(ser):\n buf = []\n\n while True:\n ch = ser.read()\n logger.debug(\"Received character: [%s]\" % ch)\n\n if ch != b\"\\0\":\n buf.append(ch)\n else:\n # Returns a string (not a bytes object)\n try:\n s = b''.join(buf).decode(\"ascii\")\n except UnicodeDecodeError as e:\n logger.error(\"Arduino did not restart cleanly! Exception: %s\" % e)\n exit(-1)\n\n logger.debug(\"Constructed C-style string: %s\" % s)\n\n return s\n\ndef send_asciizstr(ser, msg):\n logger.info(\"Sending ASCII string: %s\" % msg)\n bytemsg = msg.encode(\"ascii\")\n\n ser.write(bytemsg)\n\ndef is_start_message(msg):\n return msg == 'START'\n\ndef is_input_message(msg):\n # pppppppp aaaaa bbbbb ccccc dddddddd e ffffffff gggggggg h i\n # pc rr1 rr2 wr wdrf w addr wddm mr mw\n\n # Lowercase only\n HEX_REGEX = \"[0-9a-f]\"\n BIN_REGEX = \"[01]\"\n HEX_8 = (8, HEX_REGEX)\n BIN_1 = (1, BIN_REGEX)\n BIN_5 = (5, BIN_REGEX)\n\n # Order is significant\n tokens = (\n HEX_8, # pc\n BIN_5, # rr1\n BIN_5, # rr2\n BIN_5, # wr\n HEX_8, # wdrf\n BIN_1, # w\n HEX_8, # addr\n HEX_8, # wddm\n BIN_1, # mr\n BIN_1, # mw\n )\n\n # Tokens are delimited by single space\n pattern = ' '.join((\"%s{%s}\" % (pattern, length) for length, pattern in tokens))\n logger.debug(\"Full-match pattern: %s\" % pattern)\n\n # Can be optimized by compiling once for entire run; pattern is constant\n return re.fullmatch(pattern, msg)\n\ndef handle_start_message(state, ser):\n send_asciizstr(ser, \"{:08x}\".format(state.pc))\n\ndef handle_input_message(ser, state, msg):\n ifout, rfout, dmout = simulate_inputs(state, msg)\n send_output_response(ser, ifout, rfout, dmout)\n\ndef simulate_inputs(state, msg):\n logger.debug(\"Simulating input: %s\" % msg)\n\n # Message is already formatted properly\n pc, rr1, rr2, wr, wdrf, w, addr, wddm, mr, mw = msg.split()\n\n ifuout = state.input_if_unit(pc)\n rfout = state.input_register_file(rr1, rr2, wr, wdrf, w)\n dmout = state.input_data_memory(addr, wddm, mr, mw)\n\n logger.debug(\"Simulation outputs: %s, %s, %s\" % (ifuout, rfout, dmout))\n\n return ifuout, rfout, dmout\n\ndef send_output_response(ser, ifuout, rfout, dmout):\n msg = make_output_message(ifuout, rfout, dmout)\n send_asciizstr(ser, msg)\n\ndef make_output_message(ifuout, rfout, dmout):\n # iiiiiiii aaaaaaaa bbbbbbbb cccccccc\n # inst rd1 rd2 rd\n \n msg = \"{inst:08x} {rd1:08x} {rd2:08x} {rd:08x}\".format(\n inst=ifuout.inst,\n rd1=rfout.readdata1,\n rd2=rfout.readdata2,\n rd=dmout.readdata,\n )\n logger.debug(\"Created output message: %s\" % msg)\n\n return msg\n\ndef get_asm_filename():\n filename = input(\"Enter source ASM filename [%s]: \" % DEFAULT_ASM_FILENAME)\n if not filename:\n filename = DEFAULT_ASM_FILENAME\n\n return filename\n\ndef clear_temp_files():\n for path in (State.TEXTSEG_FILENAME, State.DATASEG_FILENAME):\n if os.path.exists(path):\n logger.debug(\"Deleting [%s]...\" % path)\n os.remove(path)\n\ndef check_filename(filename):\n if not os.path.exists(filename):\n logger.error(\"The file [%s] does not exist\" % filename)\n exit(-1)\n\ndef check_temp_files():\n if not os.path.exists(State.DATASEG_FILENAME) or not os.path.exists(State.TEXTSEG_FILENAME):\n logger.error(\"Failed to generate temporary files\")\n exit(-1)\n\ndef main():\n print(TITLE)\n print()\n\n clear_temp_files()\n check_filename(MARS_PATH)\n print(\"Please ensure that your Arduino is connected.\")\n\n filename = get_asm_filename()\n check_filename(filename)\n\n state = State()\n state.load_asm(filename, MARS_PATH)\n\n check_temp_files()\n\n port = select_com_port()\n print(\"Port selected: %s\" % port.device)\n\n try:\n ser = serial.Serial(port.device, BAUDRATE)\n except (OSError, serial.SerialException):\n logging.error(\"Port is being used (ensure that Serial Monitor is not running)\")\n exit(-1)\n\n print(\"Serial port opened successfully.\")\n\n # Arduino program is automatically restarted\n print(\"Waiting for START message...\")\n\n mainloop(state, ser)\n\ndef mainloop(state, ser):\n iteration = 0\n cycle = 0\n started = False\n\n while iteration < MAX_ITERATIONS:\n iteration += 1\n logger.debug(\"Iteration: %s\" % iteration)\n\n logger.debug(\"Old state: %s\" % state)\n\n msg = recv_cstring(ser)\n logger.info(\"Received message: [%s]\" % msg)\n\n if is_start_message(msg):\n logger.info(\"Recognized start init message\")\n handle_start_message(state, ser)\n started = True\n cycle = 0\n\n elif not started:\n logger.warn(\"Not started; ignoring message: [%s]\" % msg)\n\n else:\n # Has already started\n cycle += 1\n logger.info(\"Cycle: %s\" % cycle)\n\n if is_input_message(msg):\n logger.info(\"Recognized input message\")\n handle_input_message(ser, state, msg)\n\n else:\n logger.warn(\"Unrecognized message: [%s]\" % msg)\n\n print(state)\n\n logger.debug(\"New state: %s\" % state)\n print()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"server_files/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":11654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"175674281","text":"import pytest\n\nimport procrastinate\n\npytestmark = pytest.mark.asyncio\n\n\n@pytest.fixture\ndef pg_app(psycopg2_connector):\n return procrastinate.App(connector=psycopg2_connector)\n\n\n# Even if we test the purely sync parts, we'll still need an async worker to execute\n# the tasks\nasync def test_defer(pg_app, aiopg_connector):\n\n sum_results = []\n product_results = []\n\n @pg_app.task(queue=\"default\", name=\"sum_task\")\n def sum_task(a, b):\n sum_results.append(a + b)\n\n @pg_app.task(queue=\"default\", name=\"product_task\")\n async def product_task(a, b):\n product_results.append(a * b)\n\n sum_task.defer(a=1, b=2)\n sum_task.configure().defer(a=3, b=4)\n pg_app.configure_task(name=\"sum_task\").defer(a=5, b=6)\n product_task.defer(a=3, b=4)\n\n async_app = procrastinate.App(connector=aiopg_connector)\n async_app.tasks = pg_app.tasks\n\n await async_app.run_worker_async(queues=[\"default\"], wait=False)\n\n assert sum_results == [3, 7, 11]\n assert product_results == [12]\n","sub_path":"tests/acceptance/test_sync.py","file_name":"test_sync.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"31533774","text":"import numpy as np\nimport pandas as pd\n\nimport batman_plot\nimport evaluator\n\n\ndef prepare_canvas():\n plt = batman_plot.get_arranged_canvas()\n return plt\n\n\ndef prepare_data():\n n = 50000\n mid = 0.5\n x_range = 7\n y_range = 6\n x_data = (np.random.rand(n) - mid) * x_range * 2\n y_data = (np.random.rand(n) - mid) * y_range * 2\n return x_data, y_data\n\n\ndef create_data_frame(x_data, y_data):\n df = pd.DataFrame({'x': x_data, 'y': y_data})\n near_boundary = np.empty(len(df), dtype=bool)\n for i in range(len(df)):\n x, y = df.x[i], df.y[i]\n if evaluator.is_near_boundary(x, y):\n near_boundary[i] = True\n else:\n near_boundary[i] = False\n df = df.assign(near_boundary=near_boundary)\n return df[near_boundary]\n\n\ndef write_csv(df):\n filename = 'batman_approx.csv'\n df.to_csv(filename, columns=['x', 'y'], index_label=False, index=False)\n\n\nif __name__ == '__main__':\n # plt = prepare_canvas()\n x_data, y_data = prepare_data()\n df = create_data_frame(x_data, y_data)\n write_csv(df)\n","sub_path":"python/scipy/misc/tim_burton/batman_approx_create_data_tim_burton.py","file_name":"batman_approx_create_data_tim_burton.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"549021774","text":"import os\nimport sys\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nmodule_path = os.path.abspath(os.path.join('./'))\nif module_path not in sys.path:\n sys.path.append(module_path)\nimport model_functions_PhaseI as functions\nimport model_morphology as momo\n\n\ndef Phase_I_eval_model(model, testing_data_set, optimizer):\n\n model.eval()\n \n dists = []\n syn_scs = []\n ant_scs = []\n labs = []\n \n syn_criterion = functions.Loss_Synonymy()\n ant_criterion = functions.Loss_Antonymy()\n Lm_criterion = functions.Loss_Labels()\n\n with torch.no_grad():\n test_losses = []\n syn_test_losses = []\n ant_test_losses = []\n Lm_test_losses = []\n \n syn_test_acc_list = []\n ant_test_acc_list = []\n irrel_test_acc_list = []\n \n test_total = 0\n ant_el_count = 0\n ant_correct = 0\n syn_el_count = 0\n syn_correct = 0\n \n #for confusion matrix\n syn_predictions = []\n ant_predictions = []\n syn_true = []\n ant_true = []\n\n for i, (inputs, labels) in enumerate(testing_data_set):\n \n inputs = torch.from_numpy(np.asarray(inputs)).long()\n labels = torch.from_numpy(np.asarray(labels)).long()\n \n em1, em2, S1_out, S2_out, A1_out, A2_out, synonymy_score, antonymy_score = model(inputs)\n \n #gather cosine distances, scores, and labels for phase II\n cos_tens = F.cosine_similarity(em1, em2, dim = 1)\n\n dists.extend(cos_tens.tolist())\n syn_scs.extend(synonymy_score.squeeze().tolist())\n ant_scs.extend(antonymy_score.squeeze().tolist())\n labs.extend(labels.tolist())\n \n #calculate loss per batch of testing data\n syn_test_loss = syn_criterion(S1_out, S2_out, synonymy_score)\n ant_test_loss = ant_criterion(S2_out, A1_out, antonymy_score)\n Lm_test_loss = Lm_criterion(synonymy_score, antonymy_score, labels)\n \n test_loss = syn_test_loss + ant_test_loss + Lm_test_loss\n \n test_losses.append(test_loss.item())\n syn_test_losses.append(syn_test_loss.item())\n ant_test_losses.append(ant_test_loss.item())\n Lm_test_losses.append(Lm_test_loss.item())\n\n test_total += 1 \n \n #accuracy function\n acc = functions.Phase1Accuracy()\n accuracies = acc(synonymy_score, antonymy_score, labels)\n \n #TODO: add accuracy list for irrelevant pairs (accuracies[2])\n syn_test_acc_list.append(accuracies[0])\n ant_test_acc_list.append(accuracies[1])\n irrel_test_acc_list.append(accuracies[2])\n \n #get predictions and labels for confusion matrix\n preds, truths = acc.confusion(synonymy_score, antonymy_score, labels)\n \n syn_predictions.extend(preds[:,0].tolist())\n ant_predictions.extend(preds[:,1].tolist())\n syn_true.extend(truths[:,0].tolist())\n ant_true.extend(truths[:,1].tolist())\n\n test_epoch_loss = sum(test_losses)/test_total\n syn_test_epoch_loss = sum(syn_test_losses)/test_total\n ant_test_epoch_loss = sum(ant_test_losses)/test_total\n Lm_test_epoch_loss = sum(Lm_test_losses)/test_total\n\n syn_epoch_acc = sum(syn_test_acc_list)/test_total\n ant_epoch_acc = sum(ant_test_acc_list)/test_total\n irrel_epoch_acc = sum(irrel_test_acc_list)/test_total\n \n #test Phase II\n p2_path = '/Users/wesleytatum/Desktop/post_doc/data/phase2_xgb_model.model'\n\n p2 = momo.Phase2XGBoost(p2_path)\n preds = p2.test_pred(dists, syn_scs, ant_scs, labs)\n\n p2_accs = p2.accuracy(preds, labs)\n\n\n# print(f\"Total Epoch Testing Loss is: {test_epoch_loss}\")\n# print(f\"Total Epoch Antonym Testing Accuracy is: {ant_epoch_acc}\")\n# print(f\"Total Epoch Synonym Testing Accuracy is: {syn_epoch_acc}\") \n \n \n return test_epoch_loss, syn_test_epoch_loss, ant_test_epoch_loss, Lm_test_epoch_loss, syn_epoch_acc, ant_epoch_acc, irrel_epoch_acc, syn_true, syn_predictions, ant_true, ant_predictions, p2_accs\n\n","sub_path":"Ant_Syn_Scraping/syn_ant_modules/model_testing_PhaseI.py","file_name":"model_testing_PhaseI.py","file_ext":"py","file_size_in_byte":4326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"276264734","text":"from sovrin.common.identity import Identity\n\n\nclass Caching:\n \"\"\"\n Mixin for agents to manage caching.\n\n Dev notes: Feels strange to inherit from WalletedAgent, but self-typing\n doesn't appear to be implemented in Python yet.\n \"\"\"\n def getIdentity(self, identifier):\n identity = Identity(identifier=identifier)\n req = self.wallet.requestIdentity(identity,\n sender=self.wallet.defaultId)\n self.client.submitReqs(req)\n return req\n","sub_path":"sovrin/agent/caching.py","file_name":"caching.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"10408671","text":"##xmlファイルを読み込み、解析\n\n#xml解析の方法\nimport xml.etree.ElementTree as ET\n\n# XMLファイルを解析\ntree = ET.parse('_2_000001.xml')\n\n# 解析結果を取得\nannotation = tree.getroot()\n\n# textを取得\nfilename = annotation[1].text\nsize = [annotation[4][0].text, annotation[4][1].text]\n\nbndboxs = []\nfor i in range(6,len(annotation)):\n obj_names = annotation[i][0]\n bndbox_tags = annotation[i][4]\n\n if obj_names.text == \"person\":\n bndbox = [bndbox_tags[n].text for n in range(4)]\n bndboxs.append(bndbox)\n\nprint(filename)\nprint(size)\nprint(bndboxs)\n","sub_path":"Python_Make_xml/_2_analyze_xml_Only_person.py","file_name":"_2_analyze_xml_Only_person.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"54939219","text":"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data loader and evaluation functions for sintel.\"\"\"\n\nimport os\n\nimport matplotlib\n# pylint:disable=g-import-not-at-top\nmatplotlib.use('Agg') # None-interactive plots do not need tk\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.ndimage\nimport tensorflow as tf\n\nfrom uflow import uflow_plotting\nfrom uflow.data import data_utils\n# pylint:disable=unused-import\nfrom uflow.data.data_utils import evaluate\nfrom uflow.data.data_utils import list_eval_keys\nfrom uflow.data_conversion_scripts import conversion_utils\n\n\n# pylint:disable=unused-argument\ndef make_dataset(path,\n mode,\n seq_len=2,\n shuffle_buffer_size=0,\n height=None,\n width=None,\n resize_gt_flow=True,\n seed=41):\n \"\"\"Make a dataset for training or evaluating Uflow in uflow_main.\n\n Args:\n path: string, in the format of 'some/path/dir1,dir2,dir3' to load all files\n in some/path/dir1, some/path/dir2, and some/path/dir3.\n mode: string, one of ['train-clean', 'eval-clean', 'train-final',\n 'eval-final] to switch between loading train / eval data from the clean or\n final renderings.\n seq_len: length of sequence to return. Currently only 2 is supported.\n shuffle_buffer_size: int, size of the shuffle buffer; no shuffling if 0.\n height: int, height for reshaping the images (only if mode==train)\n width: int, width for reshaping the images (only if mode==train)\n resize_gt_flow: bool, indicates if ground truth flow should be resized\n during traing or not (only relevant for supervised training)\n seed: int, controls the shuffling of the data shards.\n\n Returns:\n A tf.dataset of image sequences and ground truth flow for training\n (see parse functions above). The dataset still requires batching\n and prefetching before using it to make an iterator.\n \"\"\"\n\n if ',' in path:\n paths = []\n l = path.split(',')\n paths.append(l[0])\n for subpath in l[1:]:\n subpath_length = len(subpath.split('/'))\n basedir = '/'.join(l[0].split('/')[:-subpath_length])\n paths.append(os.path.join(basedir, subpath))\n else:\n paths = [path]\n\n # Generate list of filenames.\n # pylint:disable=g-complex-comprehension\n files = [os.path.join(d, f) for d in paths for f in tf.io.gfile.listdir(d)]\n if 'train' in mode:\n rgen = np.random.RandomState(seed=seed)\n rgen.shuffle(files)\n num_files = len(files)\n\n ds = tf.data.Dataset.from_tensor_slices(files)\n # Create a nested dataset.\n ds = ds.map(tf.data.TFRecordDataset)\n # Parse each element of the subsequences and unbatch the result\n # Do interleave rather than flat_map because it is much faster.\n include_flow = 'eval' in mode or 'sup' in mode\n include_occlusion = 'occlusion' in mode\n include_invalid = 'invalid' in mode\n # pylint:disable=g-long-lambda\n ds = ds.interleave(\n lambda x: x.map(\n lambda y: data_utils.parse_data(\n y,\n include_flow=include_flow,\n height=height,\n width=width,\n include_occlusion=include_occlusion,\n include_invalid=include_invalid,\n resize_gt_flow=resize_gt_flow,\n gt_flow_shape=[436, 1024, 2]),\n num_parallel_calls=tf.data.experimental.AUTOTUNE),\n cycle_length=1 if 'video' in mode else min(10, num_files),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n if shuffle_buffer_size:\n # Shuffle image pairs.\n ds = ds.shuffle(buffer_size=shuffle_buffer_size)\n # Put repeat after shuffle.\n if 'train' in mode:\n ds = ds.repeat()\n # Prefetch a number of batches because reading new ones can take much longer\n # when they are from new files.\n ds = ds.prefetch(10)\n\n return ds\n\n\ndef benchmark_iterator(data_dir, output_dir):\n \"\"\"Iterates through Sintel test data filepaths for benchmarking.\"\"\"\n for data_type in ['clean', 'final']:\n output_folder = os.path.join(output_dir, data_type)\n if not tf.io.gfile.exists(output_folder):\n tf.io.gfile.mkdir(output_folder)\n\n input_folder = os.path.join(data_dir, 'test', data_type)\n\n # Directory with images.\n image_folders = sorted(tf.io.gfile.glob(input_folder + '/*'))\n\n if not image_folders:\n raise ValueError('Must pass path to raw MPI-Sintel-complete dataset. '\n 'Got instead: {}'.format(data_dir))\n\n def sort_by_frame_index(x):\n return int(os.path.basename(x).split('_')[1].split('.')[0])\n\n for image_folder in image_folders:\n images = tf.io.gfile.glob(image_folder + '/*png')\n\n images = sorted(images, key=sort_by_frame_index)\n image_pairs = zip(images[:-1], images[1:])\n\n for images in image_pairs:\n img1_path, img2_path = images\n tf.compat.v1.logging.info('im1 path: %s, im2 path: %s', img1_path,\n img2_path)\n image1_data = scipy.ndimage.imread(img1_path)\n image2_data = scipy.ndimage.imread(img2_path)\n folder_name = os.path.basename(os.path.dirname(img1_path))\n flow_output_path = os.path.join(output_folder, folder_name)\n if not tf.io.gfile.exists(flow_output_path):\n tf.io.gfile.mkdir(flow_output_path)\n frame_num = os.path.basename(img1_path).replace('_', '').replace(\n 'png', 'flo')\n flow_output_path = os.path.join(flow_output_path, frame_num)\n yield image1_data, image2_data, flow_output_path\n\n\ndef benchmark(inference_fn,\n height,\n width,\n sintel_path,\n plot_dir='',\n num_plots=100):\n \"\"\"Produce benchmark data.\"\"\"\n\n assert plot_dir\n output_path = os.path.join(plot_dir, 'sintel-upload-ready')\n if not tf.io.gfile.exists(output_path):\n tf.io.gfile.mkdir(output_path)\n\n it = benchmark_iterator(sintel_path, output_path)\n\n plot_count = 0\n\n for index, test_batch in enumerate(it):\n\n tf.compat.v1.logging.info('Writing results for image number %d...', index)\n\n (image1, image2, output_path) = test_batch\n image1 = image1.astype(np.float32) / 255\n image2 = image2.astype(np.float32) / 255\n\n flow = inference_fn(image1, image2, input_height=height, input_width=width)\n flow = flow.numpy()\n\n # Sintel expects horizontal and then vertical flow\n flow = flow[Ellipsis, ::-1]\n conversion_utils.write_flow(output_path, flow)\n\n if plot_dir and plot_count < num_plots:\n plot_count += 1\n num_rows = 2\n num_columns = 2\n\n # pylint:disable=cell-var-from-loop\n def subplot_at(column, row):\n plt.subplot(num_rows, num_columns, 1 + column + row * num_columns)\n\n def post_imshow(label):\n plt.xlabel(label)\n plt.xticks([])\n plt.yticks([])\n\n plt.figure('eval', [10, 10])\n plt.clf()\n\n subplot_at(0, 0)\n plt.imshow(image1)\n post_imshow(label='Image1')\n\n subplot_at(1, 0)\n plt.imshow(image2)\n post_imshow(label='Image2')\n\n subplot_at(0, 1)\n plt.imshow(uflow_plotting.flow_to_rgb(flow))\n post_imshow(label='Prediction')\n\n plt.subplots_adjust(\n left=0.02,\n bottom=0.02,\n right=1 - 0.02,\n top=1,\n wspace=0.01,\n hspace=0.01)\n\n filename = 'benchmark_{}.png'.format(plot_count)\n uflow_plotting.save_and_close(os.path.join(plot_dir, filename))\n","sub_path":"uflow/data/sintel.py","file_name":"sintel.py","file_ext":"py","file_size_in_byte":7968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"143398104","text":"class Node:\n def __init__(self, value, left=None, right=None):\n self.value = value \n self.left = left\n self.right = right\n\nclass BinaryTree:\n def __init__(self):\n self.root = None\n\n def pre_order(self):\n # root > left > right\n output = []\n\n def walk(node):\n if not node:\n return\n output.append(node.value)\n walk(node.left)\n walk(node.right)\n walk(self.root)\n return output\n\n def in_order(self):\n # left>root>right\n output = []\n\n def walk(node):\n if not node:\n return\n walk(node.left)\n output.append(node.value)\n walk(node.right)\n walk(self.root)\n return output\n\n def post_order(self):\n # left > right > root\n output = []\n\n def walk(node):\n if not node:\n return\n walk(node.left)\n walk(node.right)\n output.append(node.value)\n walk(self.root)\n return output\n\n def find_maximum_value(self, root):\n # https://www.geeksforgeeks.org/find-maximum-or-minimum-in-binary-tree/\n # if the tree does not have a root, return None\n if not root:\n return None\n\n # assigns current root.value as the current max_value\n max_value = root.value\n\n #\n left_value = self.find_maximum_value(root.left)\n\n #\n right_value = self.find_maximum_value(root.right)\n\n # if we're checking the left root value, if the left root value is greater than the maximum value, then the new maximum value is equal to the left root value\n if root.left:\n if left_value > max_value:\n max_value = left_value\n\n # if we're checking the right root value, if the right root value is greather than the max value, then the max value is equal to the right root value\n if root.right:\n if right_value > max_value:\n max_value = right_value\n\n return max_value\n \n\n\n\n\n\n\n\n\n\n\nclass BST(BinaryTree):\n def add(self, value):\n def walk(node, node_to_add):\n if not node:\n return\n if node_to_add.value < node.value:\n if not node.left:\n node.left = node_to_add\n else:\n walk(node.left, node_to_add)\n else:\n if not node.right:\n node.right = node_to_add\n else:\n walk(node.right, node_to_add)\n \n n = Node(value)\n\n if not self.root:\n self.root = n\n return\n\n walk(self.root, n)\n\n def contains(self, value):\n pass\n # I've been at this for 5 hours. I'm done.\n \n \n\n\n\n\nbst = BST()\nbst.add(4)\nbst.add(7)\nbst.add(5)\nbst.add(9)\nbst.add(2)\nbst.add(30)\nbst.add(-1)\n# bst.find_maximum_value(root)\nprint(bst.pre_order())\nprint(bst.post_order())\nprint(bst.in_order())\n# print(bst.contains(675))\n\n","sub_path":"data_structures/tree/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"27977861","text":"'''Builds a 2-layer fully-connected neural network'''\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\n\n\ndef inference(images, image_pixels, hidden1_units, hidden2_units, y_data, classes, reg_constant=0):\n '''Build the model up to where it may be used for inference.\n\n Args:\n images: Images placeholder (input data).\n image_pixels: Number of pixels per image.\n hidden_units: Size of the first (hidden) layer.\n classes: Number of possible image classes/labels.\n reg_constant: Regularization constant (default 0).\n\n Returns:\n logits: Output tensor containing the computed logits.\n '''\n w0 = tf.get_variable(\n name='w0',\n shape=[image_pixels, hidden1_units],\n initializer=tf.truncated_normal_initializer(\n stddev=1.0 / np.sqrt(float(image_pixels))),\n regularizer=tf.contrib.layers.l2_regularizer(reg_constant))\n b0 = tf.Variable(tf.zeros([hidden1_units]), name='b0')\n w1 = tf.get_variable('w1', [hidden1_units, hidden2_units],\n initializer=tf.truncated_normal_initializer(\n stddev=1.0 / np.sqrt(float(hidden2_units))),\n regularizer=tf.contrib.layers.l2_regularizer(reg_constant))\n\n b1 = tf.Variable(tf.zeros([hidden2_units]), name='b1')\n w2 = tf.get_variable('w2', [hidden2_units, classes],\n initializer=tf.truncated_normal_initializer(\n stddev=1.0 / np.sqrt(float(classes))),\n regularizer=tf.contrib.layers.l2_regularizer(reg_constant))\n b2 = tf.Variable(tf.zeros([classes]), name='b2')\n\n\n # Define the layer's output\n hidden1 = tf.nn.relu(tf.matmul(images, w0) + b0)\n hidden2 = tf.nn.relu(tf.matmul(hidden1, w1) + b1 + tf.matmul(y_data, tf.transpose(w2)))\n hidden1 = tf.nn.relu(tf.matmul(images, w0) + b0 + tf.matmul(hidden2, tf.transpose(w1)))\n logits = tf.matmul(hidden2, w2) + b2\n return logits, hidden1, w0\n\n\ndef loss(y_pre, y_data):\n '''Calculates the loss from logits and labels.\n\n Args:\n logits: Logits tensor, float - [batch size, number of classes].\n labels: Labels tensor, int64 - [batch size].\n\n Returns:\n loss: Loss tensor of type float.\n '''\n\n with tf.name_scope('Loss'):\n # Operation to determine the cross entropy between y_pre and y_data\n\n # Operation for the loss function\n loss = tf.losses.softmax_cross_entropy(onehot_labels=y_data, logits=y_pre) + tf.add_n(tf.get_collection(\n tf.GraphKeys.REGULARIZATION_LOSSES))\n # +tf.losses.mean_squared_error(x_data, x_pre)\n\n # Add a scalar summary for the loss\n tf.summary.scalar('loss', loss)\n\n return loss\n\n\ndef training(loss, learning_rate):\n '''Sets up the training operation.\n\n Creates an optimizer and applies the gradients to all trainable variables.\n\n Args:\n loss: Loss tensor, from loss().\n learning_rate: The learning rate to use for gradient descent.\n\n Returns:\n train_step: The op for training.\n '''\n\n # Create a variable to track the global step\n global_step = tf.Variable(0, name='global_step', trainable=False)\n\n # Create a gradient descent optimizer\n # (which also increments the global step counter)\n train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(\n loss, global_step=global_step)\n\n return train_step\n\n\ndef evaluation(y_pre, y_data):\n '''Evaluates the quality of the logits at predicting the label.\n\n Args:\n logits: Logits tensor, float - [batch size, number of classes].\n labels: Labels tensor, int64 - [batch size].\n\n Returns:\n accuracy: the percentage of images where the class was correctly predicted.\n '''\n\n correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(y_data, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\n # Summary operation for the accuracy\n tf.summary.scalar('train_accuracy', accuracy)\n\n return accuracy\n","sub_path":"lmser_cifar/lmser_cifar10/lmser2_inference.py","file_name":"lmser2_inference.py","file_ext":"py","file_size_in_byte":4123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"439834833","text":"\"\"\"This module contains the (experimental) Classifier class.\"\"\"\n\nimport operator\nimport typing\n\nimport numpy\n\nfrom .. import core\nfrom ..anomaly_detection import CHAODA\nfrom ..utils import helpers\n\nlogger = helpers.make_logger(__name__)\n\n\nclass Classifier:\n \"\"\"A CLAM Classifier.\n\n This is very experimental.\n \"\"\"\n\n def __init__(\n self,\n labels: numpy.ndarray,\n metric_spaces: typing.Sequence[core.Space],\n **kwargs, # noqa: ANN003\n ) -> None:\n \"\"\"Creates and initializes a CLAM Classifier.\n\n Lower scores are better.\n\n Args:\n metric_spaces: See `CHAODA`.\n labels: 1d array of labels. dtype must be numpy.uint.\n kwargs: These are the same as the kwargs for `CHAODA`.\n \"\"\"\n if labels.dtype != numpy.uint:\n msg = f\"labels must have dtype {numpy.uint}. Got {labels.dtype} instead.\"\n raise ValueError(\n msg,\n )\n\n self.__metric_spaces = metric_spaces\n self.__labels = list(map(int, labels))\n self.__unique_labels = set(self.__labels)\n self.__kwargs = kwargs\n self.__bowls: dict[int, CHAODA] = {}\n\n @property\n def labels(self) -> list[int]:\n \"\"\"Returns the labels in the dataset.\"\"\"\n return self.__labels\n\n @property\n def unique_labels(self) -> set[int]:\n \"\"\"Returns the unique labels in the dataset.\"\"\"\n return self.__unique_labels\n\n def build(self) -> \"Classifier\":\n \"\"\"Fits the Classifier to the data and returns the fitted object.\"\"\"\n for label in self.__unique_labels:\n logger.info(f\"Fitting CHAODA object for label {label} ...\")\n\n indices = [i for i, _l in enumerate(self.__labels) if _l == label]\n metric_spaces = [\n s.subspace(indices, f\"{s.data.name}__{label}\")\n for s in self.__metric_spaces\n ]\n\n self.__bowls[label] = CHAODA(metric_spaces, **self.__kwargs).build()\n\n return self\n\n def rank_single(self, query: typing.Any) -> list[tuple[int, float]]: # noqa: ANN401\n \"\"\"Predicts the class rankings for a single query.\"\"\"\n label_scores = []\n for label, bowl in self.__bowls.items():\n score = bowl.predict_single(query)\n label_scores.append((label, score))\n\n return label_scores\n\n def rank(self, queries: core.Dataset) -> list[list[tuple[int, float]]]:\n \"\"\"Predicts the class rankings for a set of queries.\"\"\"\n label_scores = []\n for i in range(queries.cardinality):\n logger.info(f\"Predicting class for query {i} ...\")\n label_scores.append(self.rank_single(queries[i]))\n return label_scores\n\n def predict_single(self, query: typing.Any) -> tuple[int, float]: # noqa: ANN401\n \"\"\"Predicts the label for a single query.\"\"\"\n label_scores = self.rank_single(query)\n best_label, best_score = min(label_scores, key=operator.itemgetter(1))\n return best_label, best_score\n\n def predict(self, queries: core.Dataset) -> tuple[list[int], list[float]]:\n \"\"\"Predicts the label for a set of queries.\"\"\"\n label_scores = []\n for i in range(queries.cardinality):\n logger.info(f\"Predicting class for query {i + 1}/{queries.cardinality} ...\")\n label_scores.append(self.predict_single(queries[i]))\n\n [labels, scores] = list(zip(*label_scores))\n return labels, scores # type: ignore[return-value]\n\n\n__all__ = [\n \"Classifier\",\n]\n","sub_path":"py-clam/abd_clam/classification/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"276294471","text":"from genericpath import exists\nimport json\nimport os, sys\nfrom pathlib import Path\n\nimport click\nimport git\nfrom datapackage import Package\nimport inquirer\nfrom loguru import logger\nfrom mkdocs.commands.serve import serve as _serve\n\nfrom dataherb.cmd.configs import load_dataherb_config\nfrom dataherb.cmd.create import describe_dataset\nfrom dataherb.cmd.sync_git import upload_dataset_to_git\nfrom dataherb.cmd.sync_s3 import upload_dataset_to_s3\nfrom dataherb.core.base import Herb\nfrom dataherb.flora import Flora\nfrom dataherb.parse.model_json import STATUS_CODE, MetaData\nfrom dataherb.serve.save_mkdocs import SaveMkDocs\n\n__CWD__ = os.getcwd()\n\n\n@click.group(invoke_without_command=True)\n@click.pass_context\ndef dataherb(ctx):\n if ctx.invoked_subcommand is None:\n click.echo(\"Hello {}\".format(os.environ.get(\"USER\", \"\")))\n click.echo(\"Welcome to DataHerb.\")\n else:\n click.echo(\"Loading Service: %s\" % ctx.invoked_subcommand)\n\n\n@dataherb.command()\ndef configure():\n \"\"\"\n configure dataherb\n \"\"\"\n\n home = Path.home()\n config_path = home / \".dataherb/config.json\"\n\n if config_path.exists():\n is_overwite = click.confirm(f\"Config file ({config_path}) already exists. Overwrite?\", default=False)\n if is_overwite:\n click.echo(\"Overwriting config file...\")\n else:\n click.echo(\"Skipping...\")\n sys.exit(0)\n\n if not config_path.parent.exists():\n config_path.parent.mkdir(parents=True)\n\n ###############\n # Ask questions\n ###############\n questions = [\n inquirer.Path(\n \"workdir\",\n message=\"Where should I put all the datasets and flora database? An empty folder is recommended.\",\n # path_type=inquirer.Path.DIRECTORY,\n normalize_to_absolute_path=True\n ),\n inquirer.Text(\n \"default_flora\",\n message=\"How would you name the default flora? Please keep the default value if this is not clear to you.\",\n default=\"flora\"\n )\n ]\n\n answers = inquirer.prompt(questions)\n\n config = {\n \"workdir\": answers.get(\"workdir\"),\n \"default\": {\n \"flora\": answers.get(\"default_flora\")\n }\n }\n\n logger.debug(f\"config: {config}\")\n\n with open(config_path, \"w\") as f:\n json.dump(config, f, indent=4)\n\n click.echo(f\"The dataherb config has been saved to {config_path}!\")\n\n\n\nCONFIG = load_dataherb_config()\nlogger.debug(CONFIG)\nWD = CONFIG.get(\"workdir\", \".\")\nwhich_flora = CONFIG.get(\"default\", {}).get(\"flora\")\nif which_flora:\n which_flora = str(Path(WD) / \"flora\" / Path(which_flora + \".json\"))\n logger.debug(f\"Using flora path: {which_flora}\")\n if not os.path.exists(which_flora):\n raise Exception(f\"flora config {which_flora} does not exist\")\n\n\n\n\n@dataherb.command()\n@click.argument(\"keywords\", required=False)\n@click.option(\"--id\", \"-i\", required=False)\n@click.option(\"--flora\", \"-f\", default=which_flora)\ndef search(flora, id=None, keywords=None):\n \"\"\"\n search datasets on DataHerb by keywords or id\n \"\"\"\n SHOW_KEYS = [\"name\", \"description\", \"contributors\"]\n fl = Flora(flora=flora)\n if not id:\n click.echo(\"Searching Herbs in DataHerb Flora ...\")\n results = fl.search(keywords)\n click.echo(f\"Found {len(results)} results\")\n if not results:\n click.echo(f\"Could not find dataset related to {keywords}\")\n else:\n for result in results:\n result_metadata = result.get(\"herb\").metadata\n click.echo(f'DataHerb ID: {result_metadata.get(\"id\")}')\n click.echo(result_metadata)\n else:\n click.echo(f\"Fetching Herbs {id} in DataHerb Flora ...\")\n result = fl.herb(id)\n if not result:\n click.echo(f\"Could not find dataset with id {id}\")\n else:\n result_metadata = result.metadata\n click.echo(f'DataHerb ID: {result_metadata.get(\"id\")}')\n click.echo(result_metadata)\n\n\n@dataherb.command()\n@click.option(\"--flora\", \"-f\", default=which_flora)\n@click.option(\"--workdir\", \"-w\", default=WD, required=True)\n@click.option(\"--dev_addr\", \"-a\", metavar=\"\")\ndef serve(flora, workdir, dev_addr):\n fl = Flora(flora=flora)\n mk = SaveMkDocs(flora=fl, workdir=workdir)\n mk.save_all()\n\n mkdocs_config = str(Path(WD) / \"serve\" / \"mkdocs.yml\")\n\n click.echo(\"Open http://localhost:8000\")\n _serve(config_file=mkdocs_config, dev_addr=dev_addr)\n\n\n@dataherb.command()\n@click.argument(\"id\", required=True)\n@click.option(\"--flora\", \"-f\", default=which_flora)\ndef download(id, flora):\n \"\"\"\n download dataset using id\n \"\"\"\n\n fl = Flora(flora=flora)\n click.echo(f\"Fetching Herbs {id} in DataHerb Flora ...\")\n result = fl.herb(id)\n if not result:\n click.echo(f\"Could not find dataset with id {id}\")\n else:\n result_metadata = result.metadata\n click.echo(f'Downloading DataHerb ID: {result_metadata.get(\"id\")}')\n result_uri = result_metadata.get(\"uri\")\n result_id = result_metadata.get(\"id\")\n dest_folder = str(Path(WD) / result_id)\n if os.path.exists(dest_folder):\n click.echo(f\"Can not download dataset to {dest_folder}: folder exists.\\n\")\n\n is_pull = click.confirm(f\"Would you like to pull from remote?\")\n if is_pull:\n repo = git.Repo(dest_folder)\n repo.git.pull()\n else:\n click.echo(\n f\"Please go to the folder {dest_folder} and sync your repo manually.\"\n )\n else:\n # dest_folder_parent = f\"./{result_repository.split('/')[0]}\"\n os.makedirs(dest_folder)\n repo = git.repo.base.Repo.clone_from(result_uri, to_path=dest_folder)\n # git.Git(dest_folder).clone(result_uri)\n\n\n@dataherb.command()\n@click.confirmation_option(\n prompt=f\"Your current working directory is {__CWD__}\\n\"\n \"A dataherb.json file will be created right here.\\n\"\n \"Are you sure this is the correct path?\"\n)\n@click.option(\"--flora\", \"-f\", default=which_flora)\ndef create(flora):\n \"\"\"\n creates metadata for current dataset\n \"\"\"\n\n use_existing_dpkg = False\n\n if (Path(__CWD__) / \"dataherb.json\").exists():\n use_existing_dpkg = click.confirm(\n f\"A dataherb.json file already exists in {__CWD__}. \"\n f\"Shall we use the existing dataherb.json?\",\n default=True,\n show_default=True,\n )\n\n fl = Flora(flora=flora)\n md = MetaData(folder=__CWD__)\n\n if use_existing_dpkg:\n logger.debug(\"Using existing dataherb.json ...\")\n md.load()\n else:\n dataset_basics = describe_dataset()\n print(dataset_basics)\n md.metadata.update(dataset_basics)\n\n pkg = Package()\n pkg.infer(\"**/*.csv\")\n pkg_descriptor = {\"datapackage\": pkg.descriptor}\n\n md.metadata.update(pkg_descriptor)\n\n md.create()\n\n click.echo(\n \"The dataherb.json file has been created inside \\n\"\n f\"{__CWD__}\\n\"\n \"Please review the dataherb.json file and update other necessary fields.\"\n )\n\n hb = Herb(md.metadata)\n fl.add(hb)\n\n click.echo(f\"Added {hb.metadata['id']} into the flora.\")\n\n\n@dataherb.command()\n@click.confirmation_option(\n prompt=f\"Your current working directory is {__CWD__}\\n\"\n \"All contents in this folder will be uploaded.\\n\"\n \"Are you sure this is the correct path?\"\n)\ndef upload():\n \"\"\"\n upload dataset in the current folder to the remote destination\n \"\"\"\n\n md = MetaData(folder=__CWD__)\n md.load()\n\n md_uri = md.metadata[\"uri\"]\n\n is_upload = click.confirm(\n f\"The dataset in the current folder\\n\"\n f\"{__CWD__}\\n\"\n f\"will be uploaded to {md_uri}\",\n default=True,\n show_default=True,\n )\n\n if not is_upload:\n click.echo(\"Upload aborted.\")\n else:\n click.echo(f\"Uploading dataset to {md_uri} ...\")\n if md.metadata.get(\"source\") == \"s3\":\n upload_dataset_to_s3(__CWD__, md_uri)\n elif md.metadata.get(\"source\") == \"git\":\n upload_dataset_to_git(__CWD__, md_uri)\n\n\n@dataherb.command()\n@click.option(\"-v\", \"--verbose\", type=str, default=\"warning\")\ndef validate(verbose):\n \"\"\"\n validates the existing metadata for current dataset\n \"\"\"\n\n click.secho(\n f\"Your current working directory is {__CWD__}\\n\"\n \"I will look for the .dataherb folder right here.\\n\",\n bold=True,\n )\n\n ALL_VERBOSE = [\"warning\", \"error\", \"all\"]\n if verbose not in ALL_VERBOSE:\n logger.error(f\"-v or --verbose can only take one of {ALL_VERBOSE}\")\n\n md = MetaData()\n\n validate = md.validate()\n\n def echo_summary(key, value_dict, bg=None, fg=None):\n if bg is None:\n bg = \"black\"\n if fg is None:\n fg = \"white\"\n return click.secho(\n f' {key}: {value_dict.get(\"value\")}\\n'\n f' STATUS: {value_dict.get(\"status\")};\\n'\n f' MESSAGE: {value_dict.get(\"message\")}',\n bg=bg,\n fg=fg,\n )\n\n click.secho(\"Summary: validating metadata:\\n- data:\", bold=True)\n for val in validate.get(\"data\"):\n for val_key, val_val in val.items():\n if (val_val.get(\"status\") == STATUS_CODE[\"SUCCESS\"]) and (verbose == \"all\"):\n echo_summary(val_key, val_val, bg=\"green\")\n elif (val_val.get(\"status\") == STATUS_CODE[\"WARNING\"]) and (\n verbose == \"warning\"\n ):\n echo_summary(val_key, val_val, bg=\"magenta\")\n elif (val_val.get(\"status\") == STATUS_CODE[\"ERROR\"]) and (\n verbose in [\"warning\", \"error\"]\n ):\n echo_summary(val_key, val_val, bg=\"red\")\n\n click.secho(\n \"The .dataherb folder and metadata.yml file \\n\"\n f\"{__CWD__}\\n\"\n \" has been validated. Please read the summary and fix the errors.\",\n bold=True,\n )\n\n\n\n\n\nif __name__ == \"__main__\":\n fl = Flora()\n pass\n","sub_path":"dataherb/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":10094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"224300527","text":"import socket\n\ns = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nhost = socket.gethostname()\nport=8086\ns.connect(('127.0.0.1',port))\nwhile True :\n\tmessage = input(\"mensaje:\")\n\ts.send(message.encode())\n\n\tprint(\"Esperando respuesta\")\n\treply = s.recv(1024)\n\tprint(\"Recibido\",repr(reply))\n\tif message == \"exit\":\n\t\ts.close()\n\t\tbreak\n","sub_path":"cliente_5.py","file_name":"cliente_5.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"398751602","text":"import sys\r\n\r\n\r\ndef distTo(_from, _to):\r\n return _from - _to if _from >= _to else _to - _from - 1\r\n\r\nn = sys.stdin.readline().strip()\r\nif int(n) % 25 == 0:\r\n print(0)\r\n exit(0)\r\nl = list(n)\r\nzero, secondZero, two, five, seven = False, False, False, False, False\r\nd = dict(zip([0, -1, 2, 5, 7], [0]*5))\r\nvals = []\r\nfor i in range(1, len(l)+1):\r\n if (zero and secondZero) or (zero and five) or (two and five) or (five and seven):\r\n vals = [(x, d[x]) for x in d if d[x] > 0]\r\n break\r\n if zero and l[-i] == '0':\r\n d[-1] = i - 1\r\n secondZero = True\r\n if not zero and l[-i] == '0':\r\n d[0] = i - 1\r\n zero = True\r\n if not five and l[-i] == '5':\r\n d[5] = i - 1\r\n five = True\r\n if not two and l[-i] == '2':\r\n d[2] = i - 1\r\n two = True\r\n if not seven and l[-i] == '7':\r\n d[7] = i - 1\r\n seven = True\r\n\r\nif zero and secondZero:\r\n vals = [(x, d[x]) for x in d if x in [0, -1]]\r\nelif zero and five:\r\n vals = [(x, d[x]) for x in d if x in [0, 5]]\r\nelif two and five:\r\n vals = [(x, d[x]) for x in d if x in [2, 5]]\r\nelif five and seven:\r\n vals = [(x, d[x]) for x in d if x in [5, 7]]\r\n\r\nsum = 0\r\nprint(\"vals:\",vals)\r\nif len(vals) < 2:\r\n print(-1)\r\n# Case 1\r\nelif zero and (five or secondZero):\r\n daMin = vals[0] if vals[0][1] < vals[1][1] else vals[1]\r\n daMax = vals[1] if vals[0][1] < vals[1][1] else vals[0]\r\n if daMin[0] != 0 and (vals[0][1] != 0 and vals[1][1] != 1) and (vals[0][1] != 1 and vals[1][1] != 0):\r\n sum += 1\r\n sum += distTo(vals[0][1], 0) + distTo(vals[1][1], 1) if vals[0][0] == 0 else distTo(vals[1][1], 0) + distTo(vals[0][1], 1)\r\n print(sum)\r\nelif five and (two or seven):\r\n daMin = vals[0] if vals[0][1] < vals[1][1] else vals[1]\r\n daMax = vals[1] if vals[0][1] < vals[1][1] else vals[0]\r\n if daMin[0] != five and (vals[0][1] != 0 and vals[1][1] != 1 and vals[0][1] != 1 and vals[1][1] != 0):\r\n sum += 1\r\n sum += distTo(vals[0][1], 0) + distTo(vals[1][1], 1) if vals[0][0] == 5 else distTo(vals[1][1], 0) + distTo(vals[0][1], 1)\r\n print(sum)\r\n# elif vals[0][0] == -1:\r\n# if vals[0][1] == 0 or vals[1][1] == 0:\r\n# sum = vals[1][1] - 1 if vals[0][1] == 0 else vals[0][1] - 1\r\n# else:\r\n \r\n# elif vals[0][0] == 5 and vals[0][1] == 0 or vals[0][0] == 0 and vals[0][1] == 0:\r\n# sum = vals[1][1]\r\n# print(sum)\r\n# elif vals[1][0] == 5 and vals[1][1] == 0 or vals[1][0] == 0 and vals[1][1] == 0:\r\n# sum = vals[0][1]\r\n# print(sum)\r\n# else:\r\n# sum = vals[0][1] + vals[1][1] - 1\r\n# print(sum)\r\n# if vals[0][0] == vals[1][0]:\r\n# sum = vals[0][1] + vals[1][1] - 1\r\n# elif vals[0][0] == 0 or vals[1][0] == 0:\r\n# sum = vals[0][1] + vals[1][1] - 1\r\n# else:\r\n#\r\n","sub_path":"CodeForces/988E.py","file_name":"988E.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"486693958","text":"class Solution:\n def maxProfit(self, prices: List[int]) -> int:\n N = len(prices)\n if N < 2:\n return 0\n \n # buy = [-float('inf')] * N \n # sell = [0] * N\n # for i in range(N):\n # buy[i] = max((buy[i - 1] if i > 0 else -float('inf')), (sell[i - 2] if i > 1 else 0) - prices[i])\n # sell[i] = max((sell[i - 1] if i > 0 else 0), (buy[i - 1] if i > 0 else -float('inf')) + prices[i])\n # return sell[-1]\n \n buy = float('-inf')\n sell = sell1 = sell2 = 0\n for i in range(N):\n buy = max(buy, sell2 - prices[i])\n sell = max(sell1, buy + prices[i])\n \n sell2, sell1 = sell1, sell\n # buy1 = buy\n return sell\n ","sub_path":"Python/309_Best Time to Buy and Sell Stock with Cooldown.py","file_name":"309_Best Time to Buy and Sell Stock with Cooldown.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"327516879","text":"#Instances - A herd of turtles\r\n#-----------------------------\r\n\r\nimport turtle\r\n\r\nwn = turtle.Screen() #set up the window and its attributes\r\nwn.bgcolor(\"green\")\r\n\r\ntess = turtle.Turtle() #create tess and some attributes\r\ntess.color(\"hotpink\")\r\ntess.pensize(5)\r\n\r\nalex = turtle.Turtle() #create alex\r\n\r\n\r\ntess.forward(80) #Let tess draw an equilateral triangle\r\ntess.left(120)\r\ntess.forward(80)\r\ntess.left(120)\r\ntess.forward(80)\r\ntess.left(120) #complete the triangle\r\n\r\ntess.right(180) #turn tess around\r\ntess.forward(80) #move her away from the origin\r\n\r\nalex.forward(50) #make alex draw a square\r\nalex.left(90)\r\nalex.forward(50)\r\nalex.left(90)\r\nalex.forward(50)\r\nalex.left(90)\r\nalex.forward(50)\r\nalex.left(90)\r\n\r\nwn.exitonclick()\r\n\r\n###Draw a Circle\r\n###-------------\r\n##\r\n##import turtle\r\n##\r\n##wn = turtle.Screen()\r\n##mosta = turtle.Turtle()\r\n##\r\n##mosta.circle(60)\r\n","sub_path":"Lec Turtle/instance_turtle.py","file_name":"instance_turtle.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"343410811","text":"# ---------------------------------------------------------------------\n# DLink.DGS3100.get_arp\n# ---------------------------------------------------------------------\n# Copyright (C) 2007-2014 The NOC Project\n# See LICENSE for details\n# ---------------------------------------------------------------------\n\n\nfrom noc.core.script.base import BaseScript\nfrom noc.sa.interfaces.igetarp import IGetARP\nimport re\n\n\nclass Script(BaseScript):\n name = \"DLink.DGS3100.get_arp\"\n interface = IGetARP\n rx_line = re.compile(\n r\"^(?P\\S+)\\s+(?P[0-9]+\\.[0-9]+\"\n r\"\\.[0-9]+\\.[0-9]+)\\s+(?P\\S+)\\s+\\S+\\s*$\",\n re.MULTILINE,\n )\n\n def execute(self):\n r = []\n for match in self.rx_line.finditer(self.cli(\"show arpentry\")):\n r += [\n {\n \"ip\": match.group(\"ip\"),\n \"mac\": match.group(\"mac\"),\n \"interface\": match.group(\"interface\"),\n }\n ]\n return r\n","sub_path":"sa/profiles/DLink/DGS3100/get_arp.py","file_name":"get_arp.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"394837466","text":"import re\nfrom typing import Dict, List\n\nRE_TEXT = re.compile('([^а-яёa-z0-9\\n\\r])+', re.IGNORECASE)\nRE_MULTI_SPACE = re.compile('[ ]+')\nRE_MULTI_EOL = re.compile('([\\n\\r])+')\nRE_PAGE_NUM = re.compile('(\\d)+')\n\n\ndef clean_txt(s: str, multi_eol: bool = True) -> str:\n \"\"\"\n Clean page text from all but letters, numbers, hyphens and EOLs\n\n :param s: text\n :param multi_eol: remove multi EOL characters\n :return: preprocessed text\n \"\"\"\n txt = s.lower()\n txt = RE_TEXT.sub(' ', txt)\n txt = RE_MULTI_SPACE.sub(' ', txt)\n if multi_eol:\n txt = RE_MULTI_EOL.sub('\\n', txt)\n return txt\n \n\ndef find_occurrences(s: str, ch: str, shift: int = 0) -> List[int]:\n \"\"\"\n Find all character occurrences in string\n\n :param s: string\n :param ch: character\n :param shift: shift for addition to all indexes\n :return: indexes in str\n \"\"\"\n return [i+shift for i, letter in enumerate(s) if letter == ch]\n\n\ndef extract_real_page_num(p: int, line: str, verbose: bool = False) -> int:\n \"\"\"\n Extract real page number using OCR and previous page numbers\n NB! this function modifies `real_page_nums` dict\n\n :param p: page file index\n :param line: line with number\n :param verbose: print verbose messages\n :return: real page number (None if not extracted)\n \"\"\"\n if len(line) < 5:\n m = RE_PAGE_NUM.search(line)\n real_page_num = None if m is None else int(m.group(0))\n if verbose:\n print(f'[{p}|{real_page_num}] ', end='')\n else:\n real_page_num = None\n if verbose:\n print(f'[{p}|{line[:10]}] ', end='')\n return real_page_num\n\n\ndef fix_real_page_nums_old(real_page_nums: Dict[int, int]) -> Dict[int, int]:\n \"\"\"\n Fix real page numbers: remove noise and OCR misses - Deprecated\n\n :param real_page_nums: parsed real page numbers\n :return: fixed real page numbers\n \"\"\"\n page_indices = sorted(real_page_nums.keys())\n idx_page_nums, fixed_real_page_nums = {}, {}\n for i, p in enumerate(page_indices):\n real_page_num = real_page_nums[p]\n if real_page_num is not None and real_page_num > 0:\n if i > 0 and real_page_num in fixed_real_page_nums:\n # fix incorrect\n real_page_num = idx_page_nums[i-1] + 1\n else:\n if i > 0:\n # fix incorrect\n real_page_num = idx_page_nums[i-1] + 1\n else:\n real_page_num = 1\n\n fixed_real_page_nums[p] = real_page_num\n idx_page_nums[i] = real_page_num\n \n # fix single noise numbers\n if i > 1 and idx_page_nums[i-2] == real_page_num - 2 and idx_page_nums[i-1] != real_page_num - 1:\n fixed_real_page_nums[page_indices[i-1]] = real_page_num - 1\n idx_page_nums[i-1] = real_page_num - 1\n \n return fixed_real_page_nums\n\n\ndef fix_real_page_nums(real_page_nums: Dict[int, int], verbose: bool = False) -> Dict[int, int]:\n \"\"\"\n Fix real page numbers: remove noise and OCR misses - new chains algorithm\n\n :param real_page_nums: parsed real page numbers\n :param verbose: print verbose messages\n :return: fixed real page numbers\n \"\"\"\n if verbose:\n print('REAL_PAGE_NUM_FIX: total pages', len(real_page_nums))\n # 1.searching chains\n chains = []\n page_indices = range(len(real_page_nums))\n for page_idx in page_indices:\n real_page_num = real_page_nums[page_idx]\n if real_page_num is not None:\n chain_found = False\n for chain in chains:\n last_page_idx, last_real_page_num = chain[-1]\n if page_idx - last_page_idx == real_page_num - last_real_page_num:\n # exist chain\n chain_found = True\n chain.append((page_idx, real_page_num))\n if not chain_found:\n # new chain\n chain = [(page_idx, real_page_num)]\n chains.append(chain)\n if verbose:\n print('REAL_PAGE_NUM_FIX: chains found', len(chains))\n # 2.selecting biggest chain for each page as reper point\n page_repers = []\n for page_idx in page_indices:\n chain_selected = None\n for chain in chains:\n start_page_idx = chain[0][0]\n finish_page_idx = chain[-1][0]\n if start_page_idx <= page_idx <= finish_page_idx and \\\n (chain_selected is None or len(chain) > len(chain_selected)):\n chain_selected = chain\n page_repers.append(chain_selected[0] if chain_selected is not None else None)\n if verbose:\n pages_found_count = len([x for x in page_repers if x is not None])\n print('REAL_PAGE_NUM_FIX:', len(set(page_repers)), 'chains selected for', pages_found_count, 'pages')\n # 3.fixing page nums using repers (current and last)\n fixed_real_page_nums = {}\n page_repers_non_empty = [x for x in page_repers if x is not None]\n last_reper = page_repers_non_empty[0] if page_repers_non_empty else None\n if last_reper is not None:\n for page_idx, page_reper in zip(page_indices, page_repers):\n reper = page_reper if page_reper is not None else last_reper\n last_page_idx, last_real_page_num = reper\n delta = page_idx - last_page_idx\n fixed_real_page_nums[page_idx] = last_real_page_num + delta\n last_reper = reper\n if verbose:\n print('REAL_PAGE_NUM_FIX: fixed real page nums', fixed_real_page_nums)\n \n return fixed_real_page_nums\n","sub_path":"prototype/page_reader_utils.py","file_name":"page_reader_utils.py","file_ext":"py","file_size_in_byte":5531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"245039979","text":"from django.conf import settings\nfrom django.db import models\nfrom django.utils import timezone\n\nfrom stdimage.validators import MinSizeValidator, MaxSizeValidator\nfrom stdimage.models import StdImageField\n\nclass Price(models.Model):\n title = models.CharField(verbose_name = u'Заголовок', max_length=200, default='')\n detail_url = models.CharField(verbose_name = u'URL', max_length=100, default='')\n meta_title = models.CharField(verbose_name = u'meta title', max_length=70, default='')\n meta_description = models.CharField(verbose_name = u'meta description', max_length=160, default='')\n\n img = StdImageField(\n upload_to = '',\n blank = True,\n validators=[\n MinSizeValidator(100, 100),\n MaxSizeValidator(4096, 4096)],\n variations = {\n 'l': (600, 400),\n 'm': (300, 200)\n }\n )\n\t\n img_alt = models.CharField(verbose_name = u'alt', max_length=160, default='')\n img_title = models.CharField(verbose_name = u'title', max_length=160, default='')\n\n price_one = models.DecimalField(verbose_name = u'Цена', max_digits=7, decimal_places=2, default='0')\n price_two = models.DecimalField(verbose_name = u'Цена переклейки', max_digits=7, decimal_places=2, default='0')\n price_three = models.DecimalField(verbose_name = u'Цена скупки', max_digits=7, decimal_places=2, default='0')\n\t\n description = models.TextField(verbose_name = u'Описание')\n\t\n created_date = models.DateTimeField(\n\t\t\tverbose_name = u'Создано в', \n default=timezone.now)\n published_date = models.DateTimeField(\n\t\t\tverbose_name = u'Опубликовать в', \n blank=True, null=True)\n\n def publish(self):\n self.published_date = timezone.now()\n self.save()\n\n def __str__(self):\n return self.title\n","sub_path":"sm_index/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"531282660","text":"from app.dbclass import db, Node1, Node2, Node3, Converge, ConvergeSum, StandardQuestions, SimilarQuestions, ThresholdValue\nimport pymysql\n\n\n\"\"\"\n增加节点表中的节点\n\"\"\"\n\n\ndef add_node_data(node1data=None, node2data=None, node3data=None):\n if (len(node1data) > 0) and 0 == len(db.session.query(Node1).filter_by(nodename=node1data).all()):\n maxnode1id = db.session.query(Node1.id).all()\n db.session.add(Node1(id=(max(maxnode1id)[0]+1), nodename=node1data))\n db.session.commit()\n if (len(node1data) > 0) and (len(node2data) > 0) and 0 == len(db.session.query(Node2).filter_by(nodename=node2data).all())\\\n and 1 == len(db.session.query(Node1).filter_by(nodename=node1data).all()):\n maxnode2id = db.session.query(Node2.id).all()\n node1id = db.session.query(Node1).filter_by(nodename=node1data).first()\n db.session.add(Node2(id=(max(maxnode2id)[0] + 1), nodename=node2data, prev_node=node1id.id))\n db.session.commit()\n if (len(node2data) > 0) and (len(node3data) > 0) and 1 == len(db.session.query(Node2).filter_by(nodename=node2data).all()):\n maxnode3id = db.session.query(Node3.id).all()\n node2id = db.session.query(Node2).filter_by(nodename=node2data).first()\n db.session.add(Node3(id=(max(maxnode3id)[0]+1), nodename=node3data, prev_node=node2id.id))\n db.session.commit()\n return\n\n\n\"\"\"\n删除节点表中的节点\n\"\"\"\n\n\ndef delete_node_data(node1data=None, node2data=None, node3data=None):\n if (len(node3data) > 0) and 1 == len(db.session.query(Node3).filter_by(nodename=node3data).all()):\n db.session.query(Node3).filter_by(nodename=node3data).delete()\n db.session.commit()\n if (len(node3data) > 0) and (len(node2data) > 0) and len(db.session.query(Node3).filter_by(nodename=node3data).all()) > 0:\n node2id = db.session.query(Node2).filter_by(nodename=node2data).first()\n db.session.query(Node3).filter_by(nodename=node3data, prev_node=node2id.id).delete()\n db.session.commit()\n if (len(node2data) > 0) and 1 == len(db.session.query(Node2).filter_by(nodename=node2data).all()):\n node2id = db.session.query(Node2).filter_by(nodename=node2data).first()\n db.session.query(Node3).filter_by(prev_node=node2id.id).delete()\n db.session.query(Node2).filter_by(nodename=node2data).delete()\n db.session.commit()\n if (len(node1data) > 0) and 1 == len(db.session.query(Node1).filter_by(nodename=node1data).all()):\n node1id = db.session.query(Node1).filter_by(nodename=node1data).first()\n node2idlist = db.session.query(Node2).filter_by(prev_node=node1id.id).all()\n for node2id in node2idlist:\n db.session.query(Node3).filter_by(prev_node=node2id.id).delete()\n db.session.query(Node2).filter_by(id=node2id.id).delete()\n db.session.query(Node1).filter_by(nodename=node1data).delete()\n db.session.commit()\n return\n\n\n\"\"\"\n获取节点表\n\"\"\"\n\n\ndef get_nodedata():\n nodetablename = Node1\n children = get_next_nodedata(nodetablename)\n data = {\"name\": '甜橙', \"children\": children}\n return data\n\n\ndef get_next_nodedata(nodetablename, nodename_id=None):\n data = []\n nodetablename_temp = [Node1, Node2, Node3]\n nodetableindex = nodetablename_temp.index(nodetablename)\n if nodetablename != nodetablename_temp[-1]:\n if nodename_id:\n nodedata = db.session.query(nodetablename.nodename).filter_by(prev_node=nodename_id).all()\n else:\n nodedata = db.session.query(nodetablename.nodename)\n\n for name in nodedata:\n nodeid = db.session.query(nodetablename.id).filter_by(nodename=name[0]).all()\n children = get_next_nodedata(nodetablename=nodetablename_temp[nodetableindex+1], nodename_id=nodeid[0][0])\n data_temp = {\"name\": name, \"children\": children}\n data.append(data_temp)\n else:\n nodedata = db.session.query(nodetablename.nodename).filter_by(prev_node=nodename_id).all()\n for name in nodedata:\n data_temp = {\"name\": name}\n data.append(data_temp)\n return data\n\n\n\"\"\"\n从聚类数据库中删除聚类信息,并提交标准句,相似句入库\n\"\"\"\n\n\ndef maintainingsentencedatabase(operateid=None, sentenceid=None, duiid=None, productnodeid=None, businessnodeid=None):\n convergesentence = Converge.query.filter_by(id=sentenceid).first()\n sentence = convergesentence.sentence\n if operateid == 1 and sentence is not None:\n Converge.query.filter_by(sentence=sentence).delete()\n db.session.commit()\n ConvergeSum.query.filter_by(duiid=duiid).update({ConvergeSum.sumvalue: ConvergeSum.sumvalue-1})\n db.session.commit()\n print('delete ok')\n if operateid == 2 and sentence is not None and duiid is not None and \\\n productnodeid not in (None, 0) and businessnodeid not in (None, 0):\n db.session.flush()\n productnode = Node2.query.filter_by(id=productnodeid).first()\n businessnode = Node3.query.filter_by(id=businessnodeid).first()\n if 0 < len(StandardQuestions.query.filter_by(duiid=duiid).all()):\n standardsentence = StandardQuestions.query.filter_by(duiid=duiid).first()\n if sentence == standardsentence.sentence:\n sentence = None\n else:\n pass\n\n if 16 > len(SimilarQuestions.query.filter_by(standardquestionid=standardsentence.id).all()) and \\\n 0 == len(SimilarQuestions.query.filter_by(sentence=sentence).all()) and sentence is not None:\n db.session.add(SimilarQuestions(sentence=sentence, standardquestionid=standardsentence.id))\n db.session.commit()\n print('similarquestion add ok')\n else:\n print('same sentence or space full')\n pass\n else:\n db.session.add(StandardQuestions(sentence=sentence, duiid=duiid, productnode=productnode.nodename, businessnode=businessnode.nodename))\n db.session.commit()\n print('standardquestion add OK')\n return\n\n\n\"\"\"\n维护问句,进行标准问句的所属产品节点/业务节点/标准句/相似句的修改\n\"\"\"\n\n\ndef maintainingquestions(did=None, productnodeid=None, businessnodeid=None, question=None, questionselecte=None, similarsentence=None):\n if productnodeid not in (None, 0):\n productnode = Node2.query.filter_by(id=productnodeid).first()\n StandardQuestions.query.filter_by(duiid=did).update({StandardQuestions.productnode: productnode.nodename})\n db.session.commit()\n if businessnodeid not in (None, 0):\n businessnode = Node3.query.filter_by(id=businessnodeid).first()\n StandardQuestions.query.filter_by(duiid=did).update({StandardQuestions.businessnode: businessnode.nodename})\n db.session.commit()\n if question not in (None, '') and questionselecte == 1:\n StandardQuestions.query.filter_by(duiid=did).update({StandardQuestions.sentence: question})\n db.session.commit()\n standardsentence = StandardQuestions.query.filter_by(duiid=did).first()\n if question not in (None, '') and questionselecte == 2 and \\\n 16 > len(SimilarQuestions.query.filter_by(standardquestionid=standardsentence.id).all()) and \\\n 0 == len(SimilarQuestions.query.filter_by(sentence=question).all()):\n try:\n db.session.add(SimilarQuestions(sentence=question, standardquestionid=standardsentence.id))\n db.session.commit()\n except:\n print(\"Error: can't add similarquestion\")\n if similarsentence is not None:\n standardsentence = db.session.query(StandardQuestions).filter_by(duiid=did).first()\n db.session.query(SimilarQuestions).filter_by(sentence=similarsentence, standardquestionid=standardsentence.id).delete()\n db.session.commit()\n return\n\n\n\"\"\"\n阈值表更新\n\"\"\"\n\n\ndef thresholdvalueupdata(productname=None, thresholdselecte=None, thresholdvalue=None):\n if productname is not None and thresholdselecte == 1 and thresholdvalue not in (None, ''):\n ThresholdValue.query.filter_by(productname=productname).update({ThresholdValue.threshold7value: thresholdvalue})\n if productname is not None and thresholdselecte == 2 and thresholdvalue not in (None, ''):\n ThresholdValue.query.filter_by(productname=productname).update({ThresholdValue.threshold30value: thresholdvalue})\n if productname is not None and thresholdselecte == 3 and thresholdvalue not in (None, ''):\n ThresholdValue.query.filter_by(productname=productname).update({ThresholdValue.threshold30periodvalue: thresholdvalue})\n db.session.commit()\n return\n\n\n\"\"\"\n阈值表增删\n\"\"\"\n\n\ndef thresholdvalueaddanddelete(productname=None, thresholdname=None, number1=None, number2=None, number3=None):\n if thresholdname not in [None, 0]:\n ThresholdValue.query.filter_by(id=thresholdname).delete()\n db.session.commit()\n if 1 > len(ThresholdValue.query.filter_by(productname=productname).all()) and number1 is not None \\\n and number2 is not None and number3 is not None:\n db.session.add(ThresholdValue(productname=productname, threshold7value=number1, threshold30value=number2, threshold30periodvalue=number3))\n db.session.commit()\n","sub_path":"app/nodetool.py","file_name":"nodetool.py","file_ext":"py","file_size_in_byte":9324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"322752987","text":"import json #Работаем с json\n\nimport codecs #Читаем с учетом кодировки\n\nimport files\n\nfrom tree import *\n\nimport os\n\naccesses = {}\ndefAccesses = {}\n\ndef load(accessesFilePath):\n global accesses\n\n accesses = files.loadFile(accessesFilePath)\n\ndef create():\n global accesses\n\n accesses = defAccesses\n \ndef save(accessesFilePath):\n files.saveFile(accesses, accessesFilePath)\n\ndef check(access):\n if accesses.get(access) == None:\n return False\n return True\n\ndef add(access, command):\n if check(access) and not accesses[access].count(command):\n accesses[access].append(command)\n return True\n return False\n\ndef remove(access, command):\n if check(access) and accesses[access].count(command) > 0:\n accesses[access].remove(command)\n return True\n else:\n return False\n\ndef getCommands(access):\n\n if check(access):\n result = f'У вас рівень {access}. Ви маєте такі команди:'\n\n for command in accesses[access]:\n result += f'\\n\\t{command}'\n\n else:\n result = f'Немає команд для рівня {access}\\n'\n\n return result\n\ndef checkCommand(access, command):\n if check(access) and accesses[access].count(command) > 0:\n return True\n else:\n return False\n\nload(accessesFilePath)","sub_path":"sources/py/accesses.py","file_name":"accesses.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"583966755","text":"# -*- coding:utf-8 -*-\n\"\"\"\ncreated_at: 2016-3-18\n@author: Gaga.Yan\n\"\"\"\nfrom naas.api.dboperation.tunnel_dao import NaasTunnelDao\nfrom naas.db.object.models import Tunnel\nfrom naas.api.util.handlerUtil import *\nfrom oslo_log import log as logging\nfrom naas.api.util.pagePackage import *\nfrom naas.api.mq.producer import TunnelMQ\n\nLOG = logging.getLogger(__name__)\n\n\ndef add(tunnelJson, headers=None, **kwargs):\n \"\"\"创建专线通道\"\"\"\n try:\n session = sa.get_session()\n if isinstance(tunnelJson, str):\n tunnelJson = json.loads(tunnelJson)\n tunnelDict = tunnelJson['tunnel']\n # 获取tunnel距离\n distance = get_tunnel_distance(session, tunnelDict['endpointA'], tunnelDict['endpointB'])\n tunnelDict['distance'] = distance\n tunnel = Tunnel()\n getObjFromJson(tunnel, tunnelDict)\n tunnelOP = NaasTunnelDao(tunnel)\n tunnelOP.add(session)\n # 向scheduler发布创建专用通道MQ\n tunnel_id = tunnelOP.Tunnel.tunnel_id\n TunnelMQ('create_tunnel', tunnel_id).send_message()\n return outSuccess(\"tunnel\", getJsonFromObj(tunnelOP.Tunnel))\n except Exception as e:\n LOG.error(str(e))\n return outError(\"创建专线通道失败!\")\n\n\ndef detail(tunnel_id, show_detail_dict=None):\n \"\"\"专用通道详情\"\"\"\n try:\n tunnel = Tunnel()\n tunnel.tunnel_id = tunnel_id\n tunnelOP = NaasTunnelDao(tunnel)\n tunnelOP.detail()\n return outSuccess(\"tunnel\", getJsonFromObj(tunnelOP.Tunnel), show_detail_dict=show_detail_dict)\n except Exception as e:\n LOG.error(str(e))\n return outError(\"取得专线通道详情失败!\")\n\n\ndef update(tunnelJson, headers=None, **kwargs):\n \"\"\"更新专线通道\"\"\"\n try:\n session = sa.get_session()\n if isinstance(tunnelJson, str):\n tunnelJson = json.loads(tunnelJson)\n tunnelDict = tunnelJson['tunnel']\n tunnel = Tunnel()\n tunnel.tunnel_id = tunnelDict['tunnel_id']\n tunnelOP = NaasTunnelDao(tunnel)\n if kwargs.get('action') == 'create_tunnel':\n tunnelOP.update(tunnelDict, session)\n if kwargs.get('action') == 'start_billing':\n allow_billing = session.execute(\"SELECT switch_port.plugged FROM switch_port JOIN tunnel_point_switch_port \"\n \"ON switch_port.switch_port_id = tunnel_point_switch_port.switch_port_id JOIN \"\n \"tunnel_point ON tunnel_point_switch_port.tunnel_point_id = tunnel_point.tunnel_point_id \"\n \"WHERE tunnel_point.tunnel_id=%d AND switch_port.plugged='true'\" % int(str(tunnelDict['tunnel_id']))).first()\n if allow_billing:\n tunnelOP.update(tunnelDict, session)\n outSuccess(\"tunnel\", getJsonFromObj(tunnelOP.Tunnel))\n else:\n outError(\"专线通道所在交换机端口未插线!\")\n # 向scheduler发布带宽调整、终止、恢复message\n if kwargs.get('action') and kwargs.get('action') != 'start_billing':\n if kwargs.get('action') == 'change_bandwidth':\n TunnelMQ(kwargs.get('action'), tunnelDict['tunnel_id'], bandwidth=tunnelDict['bandwidth']).send_message()\n else:\n TunnelMQ(kwargs.get('action'), tunnelDict['tunnel_id']).send_message()\n return outSuccess(\"tunnel\", getJsonFromObj(tunnelOP.Tunnel))\n except Exception as e:\n LOG.error(str(e))\n return outError(\"更新专线通道失败!\")\n\n\ndef listByPage(conditionJson, likeConditionjson=None, page_no=1, page_size=15, headers=None, **kwargs):\n \"\"\"\n 分页查询策略\n \"\"\"\n try:\n session = sa.get_session()\n tunnel = NaasTunnelDao()\n result = tunnel.getTunnelByPage(tunnel.getQueryByCondition(conditionJson, likeConditionjson), page_no,\n page_size, session)\n data = []\n if result:\n for i in result:\n middle_data = getJsonFromObj(i)\n row1 = session.execute(\"SELECT name FROM endpoint WHERE endpoint_id=%d\" % i.endpointA).first()\n row2 = session.execute(\"SELECT name FROM endpoint WHERE endpoint_id=%d\" % i.endpointB).first()\n row3 = session.execute(\n \"SELECT name FROM network_type WHERE network_type_id=%d\" % i.network_type_id).first()\n row4 = session.execute(\"SELECT name,company FROM keystone.user WHERE \"\n \"keystone.user.default_project_id=:project_id\",\n {'project_id': i.project_id}).first()\n middle_data.update(\n {'endpointA_name': row1.name if row1 else None, 'endpointB_name': row2.name if row2 else None,\n 'network_type': row3.name if row3 else None, 'user_name': row4.name if row4 else None,\n 'company': row4.company if row4 else None})\n data.append(middle_data)\n return outSuccess(\"tunnelList\", pagePackage(\"tunnels\", data, page_no=result.no,\n page_size=result.page_size, total=result.total))\n except Exception as e:\n LOG.error(str(e))\n return outError(\"取得专线通道列表失败!\")\n\n\ndef delete(tunnel_id):\n try:\n tunnel = Tunnel()\n tunnel.tunnel_id = tunnel_id\n tunnelOP = NaasTunnelDao(tunnel)\n tunnelOP.delete()\n # 向scheduler发布删除专用通道MQ\n TunnelMQ('del_tunnel', tunnel_id).send_message()\n return outSuccess(\"msg\", \"删除专线通道成功!\")\n except Exception as e:\n LOG.error(str(e))\n return outError(\"删除专线通道失败!\")\n","sub_path":"naas/api/handler/tunnel_handler.py","file_name":"tunnel_handler.py","file_ext":"py","file_size_in_byte":5833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"540850624","text":"\"\"\"\nCopyright 2020 The Magma Authors.\n\nThis source code is licensed under the BSD-style license found in the\nLICENSE file in the root directory of this source tree.\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport unittest\nimport time\n\nimport s1ap_types\nimport s1ap_wrapper\n\n\nclass TestSecondaryPdnRejectMultipleSessionsNotAllowedPerApn(\n unittest.TestCase\n):\n def setUp(self):\n self._s1ap_wrapper = s1ap_wrapper.TestWrapper()\n\n def tearDown(self):\n self._s1ap_wrapper.cleanup()\n\n def test_secondary_pdn_reject_multiple_sessions_not_allowed_per_apn(self):\n \"\"\" Attach a single UE + send standalone PDN connectivity\n request with the same apn as the default apn + attach reject\n + detach\"\"\"\n num_ue = 1\n\n self._s1ap_wrapper.configUEDevice(num_ue)\n req = self._s1ap_wrapper.ue_req\n ue_id = req.ue_id\n\n print(\n \"************************* Running End to End attach for UE id \",\n ue_id,\n )\n # Attach\n self._s1ap_wrapper.s1_util.attach(\n ue_id,\n s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,\n s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,\n s1ap_types.ueAttachAccept_t,\n )\n\n # Wait on EMM Information from MME\n self._s1ap_wrapper._s1_util.receive_emm_info()\n\n # Send PDN Connectivity Request\n apn = \"magma.ipv4\"\n self._s1ap_wrapper.sendPdnConnectivityReq(ue_id, apn)\n # Receive PDN Connectivity reject\n response = self._s1ap_wrapper.s1_util.get_response()\n self.assertEqual(\n response.msg_type, s1ap_types.tfwCmd.UE_PDN_CONN_RSP_IND.value\n )\n\n print(\"Sleeping for 5 seconds\")\n time.sleep(5)\n print(\n \"************************* Running UE detach (switch-off) for \",\n \"UE id \",\n ue_id,\n )\n # Now detach the UE\n self._s1ap_wrapper.s1_util.detach(\n ue_id, s1ap_types.ueDetachType_t.UE_SWITCHOFF_DETACH.value, False\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"lte/gateway/python/integ_tests/s1aptests/test_secondary_pdn_reject_multiple_sessions_not_allowed_per_apn.py","file_name":"test_secondary_pdn_reject_multiple_sessions_not_allowed_per_apn.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"299143355","text":"#!/usr/bin/env python\nimport rospy\nfrom nav_msgs.msg import Odometry\nimport math , time\nfrom sensor_msgs.msg import Imu\nrospy.init_node('wheel_listener', anonymous=True) \n\n\nodom1Fix_pub = rospy.Publisher('/imu1_gps_fix', Odometry, queue_size=10)\nodom1Float_pub = rospy.Publisher('/imu1_gps_float', Odometry, queue_size=10)\nodom2Fix_pub = rospy.Publisher('/imu2_gps_fix', Odometry, queue_size=10)\nodom2Float_pub = rospy.Publisher('/imu2_gps_float', Odometry, queue_size=10)\n\n\nodomFix1Msg = Odometry()\nodomFloat1Msg = Odometry()\nodomFix2Msg = Odometry()\nodomFloat2Msg = Odometry()\n\norientation_list =[0.0,0.0,0.0,1.0]\norientation_list2=[0.0,0.0,0.0,1.0]\n\nfix=Odometry()\nflooat=Odometry()\n\ndef callback_odom(data): \n global fix\n fix=data\n\ndef callback_odom2(data): \n global flooat\n flooat=data\n\n\n\ndef callback_IMU(data): \n global orientation_list\n orientation_list = [data.orientation.x, data.orientation.y, data.orientation.z, data.orientation.w] \n\ndef callback_IMU2(data2): \n global orientation_list2\n orientation_list2 = [data2.orientation.x, data2.orientation.y, data2.orientation.z, data2.orientation.w] \n\n\ndef publish(event):\n global orientation_list ,orientation_list2 , fix , flooat\n data1=fix\n odomFix1Msg.header.frame_id=\"map\"\n odomFix1Msg.pose.pose.position.x= data1.pose.pose.position.x\n odomFix1Msg.pose.pose.position.y= data1.pose.pose.position.y\n odomFix1Msg.pose.pose.orientation.x= 0.0 #orientation_list[0]\n odomFix1Msg.pose.pose.orientation.y= 0.0 #orientation_list[1]\n odomFix1Msg.pose.pose.orientation.z= orientation_list[2]\n odomFix1Msg.pose.pose.orientation.w= orientation_list[3]\n odomFix1Msg.header.stamp= rospy.Time.now()\n odom1Fix_pub.publish(odomFix1Msg)\n\n odomFix2Msg.header.frame_id=\"map\"\n odomFix2Msg.pose.pose.position.x= data1.pose.pose.position.x\n odomFix2Msg.pose.pose.position.y= data1.pose.pose.position.y\n odomFix2Msg.pose.pose.orientation.x= 0.0 #orientation_list2[0]\n odomFix2Msg.pose.pose.orientation.y= 0.0 #orientation_list2[1]\n odomFix2Msg.pose.pose.orientation.z= orientation_list2[2]\n odomFix2Msg.pose.pose.orientation.w= orientation_list2[3]\n odomFix2Msg.header.stamp= rospy.Time.now()\n odom2Fix_pub.publish(odomFix2Msg)\n\n data2=flooat\n odomFloat1Msg.header.frame_id=\"map\"\n odomFloat1Msg.pose.pose.position.x= data2.pose.pose.position.x\n odomFloat1Msg.pose.pose.position.y= data2.pose.pose.position.y\n odomFloat1Msg.pose.pose.orientation.x= 0.0 #orientation_list[0]\n odomFloat1Msg.pose.pose.orientation.y= 0.0 #orientation_list[1]\n odomFloat1Msg.pose.pose.orientation.z= orientation_list[2]\n odomFloat1Msg.pose.pose.orientation.w= orientation_list[3]\n odomFloat1Msg.header.stamp= rospy.Time.now()\n\n odomFloat2Msg.header.frame_id=\"map\"\n odomFloat2Msg.pose.pose.position.x= data2.pose.pose.position.x\n odomFloat2Msg.pose.pose.position.y= data2.pose.pose.position.y\n odomFloat2Msg.pose.pose.orientation.x= 0.0 #orientation_list2[0]\n odomFloat2Msg.pose.pose.orientation.y= 0.0 #orientation_list2[1]\n odomFloat2Msg.pose.pose.orientation.z= orientation_list2[2]\n odomFloat2Msg.pose.pose.orientation.w= orientation_list2[3]\n odomFloat2Msg.header.stamp= rospy.Time.now()\n\n odom1Float_pub.publish(odomFloat1Msg)\n odom2Float_pub.publish(odomFloat2Msg)\n\n\ndef listener():\n rospy.Subscriber(\"/odometry/gps\", Odometry, callback_odom)\n rospy.Subscriber(\"/odometry2/gps\", Odometry, callback_odom2)\n rospy.Subscriber(\"/imu_enu_new\", Imu, callback_IMU)\n rospy.Subscriber(\"/imu2_enu_new\", Imu, callback_IMU2)\n rospy.Timer(rospy.Duration(0.5), publish)\n rospy.spin()\n\nif __name__ == '__main__':\n listener()\n","sub_path":"src/others/rebublish/gps-2Imu.py","file_name":"gps-2Imu.py","file_ext":"py","file_size_in_byte":3694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"15402430","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 31 10:30:41 2020\r\n\r\n@author: Izak de Heer\r\n\"\"\"\r\nimport sys\r\nimport json\r\nimport os\r\n\r\nimport numpy as np\r\nimport time\r\nimport datetime\r\nfrom PyQt5.QtCore import QThread, pyqtSignal\r\nimport matplotlib.pyplot as plt\r\nimport skimage.external.tifffile as skimtiff\r\nimport scipy.optimize\r\nimport skimage.draw\r\nfrom CoordinatesManager.backend import readRegistrationImages\r\nfrom CoordinatesManager.backend.polynomialTransformation import polynomialRegression\r\nfrom CoordinatesManager import CoordinateTransformations\r\nfrom CoordinatesManager import DMDActuator\r\nfrom NIDAQ.DAQoperator import DAQmission\r\n\r\nfrom HamamatsuCam.HamamatsuActuator import CamActuator\r\nfrom SampleStageControl.Stagemovement_Thread import StagemovementRelativeThread, StagemovementAbsoluteThread\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nclass GalvoRegistrator:\r\n def __init__(self, *args, **kwargs):\r\n self.cam = CamActuator()\r\n self.cam.initializeCamera()\r\n \r\n def registration(self, grid_points_x = 3, grid_points_y = 3):\r\n galvothread = DAQmission()\r\n readinchan = []\r\n \r\n x_coords = np.linspace(-10, 10, grid_points_x+2)[1:-1]\r\n y_coords = np.linspace(-10, 10, grid_points_y+2)[1:-1]\r\n \r\n xy_mesh = np.reshape(np.meshgrid(x_coords, y_coords), (2, -1), order='F').transpose()\r\n \r\n galvo_coordinates = xy_mesh\r\n camera_coordinates = np.zeros((galvo_coordinates.shape))\r\n \r\n for i in range(galvo_coordinates.shape[0]):\r\n \r\n galvothread.sendSingleAnalog('galvosx', galvo_coordinates[i,0])\r\n galvothread.sendSingleAnalog('galvosy', galvo_coordinates[i,1])\r\n time.sleep(1)\r\n \r\n image = self.cam.SnapImage(0.06)\r\n plt.imsave(os.getcwd()+'/CoordinatesManager/Registration_Images/2P/image_'+str(i)+'.png', image)\r\n \r\n camera_coordinates[i,:] = readRegistrationImages.gaussian_fitting(image)\r\n \r\n print('Galvo Coordinate')\r\n print(galvo_coordinates)\r\n print('Camera coordinates')\r\n print(camera_coordinates)\r\n del galvothread\r\n self.cam.Exit()\r\n \r\n transformation = CoordinateTransformations.polynomial2DFit(camera_coordinates, galvo_coordinates, order=1)\r\n \r\n print('Transformation found for x:')\r\n print(transformation[:,:,0])\r\n print('Transformation found for y:')\r\n print(transformation[:,:,1])\r\n return transformation\r\n\r\nclass DMDRegistator:\r\n def __init__(self, DMD, *args, **kwargs):\r\n self.DMD = DMD\r\n self.cam = CamActuator()\r\n self.cam.initializeCamera()\r\n \r\n def registration(self, laser = '640', grid_points_x = 2, grid_points_y = 3, registration_pattern = 'circles'):\r\n x_coords = np.linspace(0, 768, grid_points_x+2)[1:-1]\r\n y_coords = np.linspace(0, 1024, grid_points_y+2)[1:-1]\r\n \r\n x_mesh, y_mesh = np.meshgrid(x_coords, y_coords)\r\n \r\n x_coords = np.ravel(x_mesh)\r\n y_coords = np.ravel(y_mesh)\r\n \r\n dmd_coordinates = np.stack((x_coords, y_coords), axis=1)\r\n \r\n camera_coordinates = np.zeros(dmd_coordinates.shape)\r\n \r\n for i in range(dmd_coordinates.shape[0]):\r\n x = int(dmd_coordinates[i,0])\r\n y = int(dmd_coordinates[i,1])\r\n \r\n if registration_pattern == 'squares': \r\n mask = create_registration_image_touching_squares(x,y)\r\n else:\r\n mask = create_registration_image_circle(x,y)\r\n \r\n self.DMD.send_data_to_DMD(mask)\r\n self.DMD.start_projection()\r\n \r\n image = self.cam.SnapImage(0.04)\r\n plt.imsave(os.getcwd()+'/CoordinatesManager/Registration_Images/TouchingSquares/image_'+str(i)+'.png', image)\r\n camera_coordinates[i, :] = readRegistrationImages.touchingCoordinateFinder(image, method = 'curvefit')\r\n \r\n self.DMD.stop_projection()\r\n \r\n print('DMD coordinates:')\r\n print(dmd_coordinates)\r\n print('Found camera coordinates:')\r\n print(camera_coordinates)\r\n \r\n self.DMD.free_memory()\r\n self.cam.Exit()\r\n \r\n transformation = CoordinateTransformations.polynomial2DFit(camera_coordinates, dmd_coordinates, order=1)\r\n print('Transformation found for x:')\r\n print(transformation[:,:,0])\r\n print('Transformation found for y:')\r\n print(transformation[:,:,1])\r\n return transformation\r\n \r\n def create_registration_image_touching_squares(x, y, sigma = 75):\r\n array = np.zeros((768, 1024))\r\n array[skimage.draw.rectangle((x-sigma, y-sigma), (x,y))] = 255\r\n array[skimage.draw.rectangle((x+sigma, y+sigma), (x,y))] = 255\r\n return array\r\n \r\n def create_registration_image_circle(x, y, sigma = 75):\r\n array = np.zeros((768, 1024))\r\n array[skimage.draw.circle(x, y, sigma)] = 255\r\n return array\r\n \r\n# class RegistrationThread(QThread):\r\n \r\n# sig_finished_registration = pyqtSignal(dict)\r\n \r\n# def __init__(self, parent, laser = None):\r\n# QThread.__init__(self)\r\n# self.flag_finished = [0, 0, 0]\r\n# self.backend = parent\r\n# self.dmd = self.backend.DMD\r\n \r\n# if not isinstance(laser, list): \r\n# self.laser_list = [laser]\r\n# else:\r\n# self.laser_list = laser\r\n \r\n# self.dict_transformators = {}\r\n \r\n# self.dict_transformations = {}\r\n# self.dtype_ref_co = np.dtype([('camera', int, (3,2)), ('dmd', int, (3,2)), ('galvos', int, (3,2)), ('stage', int, (3,2))])\r\n# self.reference_coordinates = {}\r\n \r\n# def set_device_to_register(self, device_1, device_2 = 'camera'):\r\n# self.device_1 = device_1\r\n# self.device_2 = device_2\r\n \r\n# def run(self):\r\n# #Make sure registration can only start when camera is connected\r\n# try:\r\n# self.cam = CamActuator()\r\n# self.cam.initializeCamera()\r\n# except:\r\n# print(sys.exc_info())\r\n# self.backend.ui_widget.normalOutputWritten('Unable to connect Hamamatsu camera')\r\n# return \r\n \r\n# self.cam.setROI(0, 0, 2048, 2048)\r\n \r\n# if self.device_1 == 'galvos':\r\n# reference_coordinates = self.gather_reference_coordinates_galvos()\r\n# self.dict_transformations['camera-galvos'] = findTransform(reference_coordinates[0], \\\r\n# reference_coordinates[1])\r\n# elif self.device_1 == 'dmd':\r\n# reference_coordinates = self.gather_reference_coordinates_dmd()\r\n# for laser in self.laser_list:\r\n# self.dict_transformations['camera-dmd-'+laser] = findTransform(reference_coordinates[0], \\\r\n# reference_coordinates[1])\r\n \r\n# elif self.device_1 == 'stage':\r\n# reference_coordinates = self.gather_reference_coordinates_stage()\r\n# self.dict_transformations['camera-stage'] = findTransform(reference_coordinates[0], \\\r\n# reference_coordinates[1])\r\n \r\n# self.cam.Exit()\r\n \r\n# ## Save transformation to file\r\n# with open('CoordinatesManager/Registration/transformation.txt', 'w') as json_file:\r\n \r\n# dict_transformations_list_format = {}\r\n# for key, value in self.dict_transformations.items():\r\n# dict_transformations_list_format[key] = value.tolist()\r\n \r\n# json.dump(dict_transformations_list_format, json_file)\r\n \r\n# self.sig_finished_registration.emit(self.dict_transformations)\r\n \r\n# def gather_reference_coordinates_stage(self):\r\n# image = np.zeros((2048, 2048, 3)) \r\n# stage_coordinates = np.array([[-2800, 100], [-2500, 400], [-1900, -200]])\r\n \r\n# self.backend.loadMask(mask = np.ones((768,1024)))\r\n# self.backend.startProjection()\r\n \r\n# for idx, pos in enumerate(stage_coordinates):\r\n \r\n# stage_movement_thread = StagemovementAbsoluteThread(pos[0], pos[1])\r\n# stage_movement_thread.start()\r\n# time.sleep(0.5)\r\n# stage_movement_thread.quit()\r\n# stage_movement_thread.wait()\r\n \r\n# image[:,:,idx] = self.cam.SnapImage(0.04)\r\n \r\n# camera_coordinates = find_subimage_location(image, save=True)\r\n \r\n# self.backend.stopProjection()\r\n# self.backend.freeMemory()\r\n \r\n# return np.array([camera_coordinates, stage_coordinates])\r\n \r\n# def gather_reference_coordinates_galvos(self):\r\n# galvothread = DAQmission()\r\n# readinchan = []\r\n \r\n# camera_coordinates = np.zeros((3,2))\r\n# galvo_coordinates = np.array([ [0, 3], [3, -3], [-3, -3] ])\r\n \r\n# for i in range(3):\r\n# pos_x = galvo_coordinates[i,0]\r\n# pos_y = galvo_coordinates[i,1]\r\n \r\n# galvothread.sendSingleAnalog('galvosx', pos_x)\r\n# galvothread.sendSingleAnalog('galvosy', pos_y)\r\n \r\n# image = self.cam.SnapImage(0.04)\r\n \r\n# camera_coordinates[i,:] = gaussian_fitting(image)\r\n \r\n# del galvothread\r\n# return np.array([camera_coordinates, galvo_coordinates])\r\n \r\n# def gather_reference_coordinates_dmd(self):\r\n# galvo_coordinates = np.zeros((3,2))\r\n \r\n# for laser in self.laser_list:\r\n# self.flag_finished = [0, 0, 0]\r\n \r\n# self.backend.ui_widget.sig_control_laser.emit(laser, 5)\r\n \r\n# self.registration_single_laser(laser)\r\n \r\n# self.backend.ui_widget.sig_control_laser.emit(laser, 0)\r\n \r\n# return np.array([self.camera_coordinates, self.dmd_coordinates, galvo_coordinates])\r\n \r\n# def registration_single_laser(self,laser): \r\n# date_time = datetime.datetime.now().timetuple()\r\n# image_id = ''\r\n# for i in range(5): \r\n# image_id += str(date_time[i])+'_'\r\n# image_id += str(date_time[5]) + '_l'+laser\r\n\r\n# self.camera_coordinates = np.zeros((3,2))\r\n# self.touchingCoordinateFinder = []\r\n \r\n# for i in range(3):\r\n# self.touchingCoordinateFinder.append(touchingCoordinateFinder_Thread(i, method='curvefit')) \r\n# self.touchingCoordinateFinder[i].sig_finished_coordinatefinder.connect(self.touchingCoordinateFinder_finished)\r\n\r\n# for i in range(3):\r\n# self.loadFileName = './CoordinatesManager/Registration_Images/TouchingSquares/registration_mask_'+str(i)+'.png'\r\n \r\n# # Transpose because mask in file is rotated by 90 degrees.\r\n# mask = np.transpose(plt.imread(self.loadFileName))\r\n \r\n# self.backend.loadMask(mask)\r\n# self.backend.startProjection()\r\n \r\n# time.sleep(0.5)\r\n# self.image = self.cam.SnapImage(0.0015)\r\n# time.sleep(0.5)\r\n \r\n# self.backend.stopProjection()\r\n# self.backend.freeMemory()\r\n \r\n# # Start touchingCoordinateFinder thread\r\n# self.touchingCoordinateFinder[i].put_image(self.image)\r\n# self.touchingCoordinateFinder[i].start()\r\n \r\n# self.dmd_coordinates = self.read_dmd_coordinates_from_file()\r\n \r\n# # Block till all touchingCoordinateFinder_Thread threads are finished\r\n# while np.prod(self.flag_finished) == 0:\r\n# time.sleep(0.1)\r\n \r\n \r\n# def read_dmd_coordinates_from_file(self):\r\n# file = open('./CoordinatesManager/Registration_Images/TouchingSquares/positions.txt', 'r')\r\n \r\n# self.dmd_coordinates = []\r\n# for ln in file.readlines():\r\n# self.dmd_coordinates.append(ln.strip().split(','))\r\n# file.close()\r\n \r\n# return np.asarray(self.dmd_coordinates).astype(int)\r\n \r\n# def touchingCoordinateFinder_finished(self, sig):\r\n# self.camera_coordinates[sig,:] = np.flip(self.touchingCoordinateFinder[sig].coordinates)\r\n# self.flag_finished[sig] = 1\r\n \r\nif __name__ == \"__main__\":\r\n pass \r\n\r\n \r\n \r\n \r\n ","sub_path":"CoordinatesManager/Registrator.py","file_name":"Registrator.py","file_ext":"py","file_size_in_byte":12835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"252112274","text":"#! /usr/bin/env python\n# import packages\nimport serial\nimport time\nimport datetime\n##import deltatime\nimport struct\nimport os\nimport sys\n\ndef main():\n \n ev3PortBase = '/dev/rfcomm'\n# a number will be appended to try to open it. This is supposed to be the bluetooth port\n timeStart = datetime.datetime.now()\n \n for n in range(0,100):\n ev3Port = ev3PortBase + str(n)\n## print('Trying port {}'.format(ev3Port))\n try:\n EV3 = serial.Serial(ev3Port)\n except serial.SerialException:\n continue\n else:\n print('Opened EV3 Brick on {}'.format(ev3Port))\n break\n else: # If no port are found\n print('EV3 does not appear to be open on any /dev/rfcomm port')\n sys.exit()\n\n\n # Only if port is opened\n print('Time is {}; inWaiting() is {}'.format(datetime.datetime.now().time(), EV3.inWaiting()))\n \n #Try resetting te port if it doesnt look like it's working\n## if EV3.inWaiting() == 0:\n## print('Trying flush')\n## EV3.flush()\n## print('EV3.inWaiting() = {}'.format(EV3.inWaiting()))\n## print('Trying input reset')\n## EV3.reset_input_buffer()\n## print('EV3.inWaiting() = {}'.format(EV3.inWaiting()))\n## print('Trying output reset')\n## EV3.reset_output_buffer()\n## print('EV3.inWaiting() = {}'.format(EV3.inWaiting()))\n## print('Trying close/open')\n## EV3.close()\n## EV3.open()\n## print('EV3.inWaiting() = {}'.format(EV3.inWaiting()))\n## print('Trying pipe_abort_read')\n## EV3.pipe_abort_read_r\n## EV3.pipe_abort_read_w\n## print('EV3.inWaiting() = {}'.format(EV3.inWaiting()))\n## print('Trying cancel_read()')\n## EV3.cancel_read()\n## print('EV3.inWaiting() = {}'.format(EV3.inWaiting())) \n \n \n\n while 1:\n \n timeFromStart = datetime.datetime.now() - timeStart\n## print('Time {} inWaiting {} cts {} dsr {} dsrdtr {} fd {} CD {} CTS {} DSR {} RI {}'.format(timeFromStart,EV3.inWaiting(),\n## EV3.cts, EV3.dsr, EV3.dsrdtr, EV3.fd, EV3.getCD(), EV3.getCTS(), EV3.getDSR(), EV3.getRI()))\n\n \n try:\n if EV3.inWaiting() >= 2: # check for ev3 message\n # Get the number of bytes in this message\n s = EV3.read(2)\n # struct.unpack returns a tuple unpack using []\n [numberOfBytes] = struct.unpack(\" data/custom\ndef mkdirs(Dataset_folder, csv_folder, domain_dict):\n\n directory_list = ['train', 'validation']\n\n for directory in directory_list:\n for group_name in domain_dict:\n if not Dataset_folder.endswith('_nl'):\n folder = os.path.join(Dataset_folder, directory, group_name, 'Label')\n else:\n folder = os.path.join(Dataset_folder, directory, group_name, 'Label')\n if not os.path.exists(folder):\n os.makedirs(folder)\n filelist = [f for f in os.listdir(folder) if f.endswith(\".txt\")]\n for f in filelist:\n os.remove(os.path.join(folder, f))\n\n if not os.path.exists(csv_folder):\n os.makedirs(csv_folder)\n\n# 다운로드 진행상항 출력\ndef progression_bar(total_images, index):\n # 윈도우에서\n if os.name == 'nt':\n from ctypes import windll, create_string_buffer\n\n h = windll.kernel32.GetStdHandle(-12)\n csbi = create_string_buffer(22)\n res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)\n\n if res:\n import struct\n (bufx, bufy, curx, cury, wattr,\n left, top, right, bottom, maxx, maxy) = struct.unpack(\"hhhhHhhhhhh\", csbi.raw)\n columns = right - left + 1\n rows = bottom - top + 1\n else:\n columns, rows = 80, 25 # can't determine actual size - return default values\n # 리눅스에서\n else:\n rows, columns = os.popen('stty size', 'r').read().split()\n toolbar_width = int(columns) - 10\n image_index = index\n index = int(index / total_images * toolbar_width)\n\n print(' ' * (toolbar_width), end='\\r')\n bar = \"[{}{}] {}/{}\".format('-' * index, ' ' * (toolbar_width - index), image_index, total_images)\n print(bar.rjust(int(columns)), end='\\r')\n\ndef show_classes(classes):\n for n in classes:\n print(\"- {}\".format(n))\n print(\"\\n\")\n\ndef logo(command):\n\n bc = bcolors\n\n print(bc.OKGREEN + \"\"\"\n\t\t ___ ___ _____ __ \n\t\t .' `. .' `.|_ _| \\ \\ [ ]\n\t\t/ .-.__\\/ .-.__\\ | | \\ \\ / /\n\t\t| | _ _| | _ _ | | \\ \\ / /\n\t\t\\ `-' /\\ `-' /_| |_ \\ \\/ / \n\t\t `.___.' `.___.'|_____| \\__/ \n\t\"\"\" + bc.ENDC)\n\n if command == 'downloader':\n print(bc.OKGREEN + '''\n _____ _ _ \n (____ \\ | | | | \n | | \\ \\ ___ _ _ _ ____ | | ___ ____ _ | | ____ ____ \n | | | / _ \\| | | | _ \\| |/ _ \\ / _ |/ || |/ _ )/ ___)\n | |__/ / |_| | | | | | | | | |_| ( ( | ( (_| ( (/ /| | \n |_____/ \\___/ \\____|_| |_|_|\\___/ \\_||_|\\____|\\____)_| \n \n ''' + bc.ENDC)\n\nclass bcolors:\n HEADER = '\\033[95m'\n \n INFO = ' [INFO] | '\n OKBLUE = '\\033[94m[DOWNLOAD] | '\n WARNING = '\\033[93m [WARN] | '\n FAIL = '\\033[91m [ERROR] | '\n\n OKGREEN = '\\033[92m'\n ENDC = '\\033[0m'","sub_path":"CCAI/data_modules/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"103442233","text":"# 함수\r\n# 코드를 미리 작성해놓고 필요할 때 마다\r\n# 불러다 사용하는 개념\r\n# 함수에 만든 코드를 동작시키기 위해서 요청하는 것을\r\n# 함수를 호출한다고 부른다.\r\n\r\n# 기본 함수\r\ndef test1() :\r\n print('test1 함수 호출')\r\n\r\n# 함수 호출\r\ntest1()\r\n\r\n# 함수의 매개변수. 함수를 호출할 때 값을 넘겨주면\r\n# 이를 받아주는 변수\r\n# 함수 내부에서 사용해야할 값이 있을 경우.\r\ndef test2(a1, a2) :\r\n r1 = a1 + a2\r\n print(f'r1 : {r1}')\r\n\r\ntest2(10, 20)\r\ntest2(100, 200)\r\ntest2(1000, 2000)\r\n\r\n# 반환 값\r\n# 함수 수행이 완료되면 함수를 호출 한 쪽으로 돌아가게 되는데\r\n# 이 때 값을 하나 가지고 갈 수 있다.\r\ndef test3(a1, a2) :\r\n return a1 + a2\r\n\r\nr10 = test3(10, 20)\r\nr20 = test3(100, 200)\r\nr30 = test3(1000, 2000)\r\nprint(f'r10 : {r10}')\r\nprint(f'r20 : {r20}')\r\nprint(f'r30 : {r30}')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"20_함수/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"154352736","text":"#-*- coding: utf-8 -*-\nfrom typing import List\n\n#Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n def print(self, depth=0):\n print(\"%s%s\" % (' '*depth, self.val))\n if self.left: self.left.print(depth+1)\n if self.right: self.right.print(depth+1)\n\nclass Solution:\n def rec(self, node: TreeNode):\n if node is None: return None\n tmp = node.left\n node.left = node.right\n node.right = tmp\n if node.left is not None: self.invertTree(node.left)\n if node.right is not None: self.invertTree(node.right)\n\n def invertTree(self, root: TreeNode) -> TreeNode:\n self.rec(root)\n return root\n\n\n\n\nnode = TreeNode(4)\nsub = TreeNode(2)\nsub.left = TreeNode(1)\nsub.right = TreeNode(3)\nnode.left= sub\nr1 = TreeNode(7)\nr1.left = TreeNode(6)\nr1.right = TreeNode(9)\nnode.right= r1\n\nnode.print()\nSolution().invertTree(node).print()\n\n\nnode.print()\nSolution().invertTree(None)\n","sub_path":"lc/esy/20190705_esy_226_invert_binary_tree.py","file_name":"20190705_esy_226_invert_binary_tree.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"243487406","text":"# --------------------------------------------------------------------------\n# ***** BEGIN GPL LICENSE BLOCK *****\n#\n# Copyright (C) 2010 Dennis Ippel\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n# ***** END GPL LICENCE BLOCK *****\n# --------------------------------------------------------------------------\n\nbl_info = {\n \"name\": \"WebGL Native formats (Javascript or JSON)\",\n \"author\": \"Dennis Ippel, John Villar\",\n \"blender\": (2, 5, 7),\n \"api\": 35622,\n \"location\": \"File > Import-Export\",\n \"description\": \"Import-Export WebGL data with materials\",\n \"warning\": \"\",\n \"wiki_url\": \"http://code.google.com/p/blender-webgl-exporter/\",\n \"tracker_url\": \"\",\n \"support\": 'COMMUNITY',\n \"category\": \"Import-Export\"}\n\n# To support reload properly, try to access a package var, if it's there, reload everything\nif \"bpy\" in locals():\n import imp\n if \"io_export_webgl\" in locals():\n imp.reload(io_export_ply)\n \nimport bpy\nimport os\n\nfrom bpy.props import CollectionProperty, StringProperty, BoolProperty\n#from io_utils import ExportHelper\nfrom bpy_extras.io_utils import ExportHelper\nimport struct\nimport base64\nimport binascii\nimport json\nfrom functools import reduce\n\ndef export_scenejson(class_name, mesh):\n \"\"\"Exports the current mesh as a JSON model.\n\n Developed by johnvillarzavatti [at] gmail [dot] com\n\n returns an escaped string valid for any JSON parser\n \"\"\"\n mats = \"\\\"m\\\":[\"\n \n mtemp = \"\"\n for m in mesh.materials:\n mat = \"{\"\n #flags = m.get_mode()\n shaders = \"\"\n \n if m.use_shadeless: #flags & Material.Modes['SHADELESS']:\n shaders += ',\"fb\"' # Fullbright\n \n if m.type == \"HALO\": #flags & Material.Modes['HALO']:\n shaders += ',\"ha\"' # Halo\n \n mtexs = m.texture_slots\n ts = \"\"\n for t_slot in mtexs:\n if (t_slot) and (t_slot.texture) and (t_slot.texture.type == \"IMAGE\"):\n t = t_slot.texture\n toks = t.image.filepath.split(\"\\\\\")\n pipe = \"\" # Stages of the pipeline this texture is involved in\n if t_slot.use_map_color_diffuse: # Color modulation\n pipe += \",tx_m\"\n \n if t_slot.use_map_displacement: # Bump mapping\n pipe += \",bump\"\n if shaders.find('\"bm\"')<0:\n shaders += ',\"bm\"'\n \n if t_slot.use_map_normal: # Bump mapping\n pipe += \",norm\"\n if shaders.find('\"nm\"')<0:\n shaders += ',\"nm\"'\n \n ts += ',{\"fn\":\"'+toks[-1]\n if len(pipe)>0:\n ts +='\",\"pipe\":\"'+pipe[1:]\n ts += '\"}'\n\n mat += \"\\\"shaders\\\":[\" + shaders[1:] + \"],\"\n mat += \"\\\"texs\\\":[\"+ts[1:]+\"]\"\n mat += \"}\"\n mtemp += \",\"+mat\n mats += mtemp[1:]\n mats += \"]\"\n \n # Init arrays for each material group\n a_verts = list()\n a_idxs = list()\n for x in range(len(mesh.materials)):\n a_verts.append(\"\")\n a_idxs.append(0)\n \n a_norms = list()\n for x in range(len(mesh.materials)):\n a_norms.append(\"\")\n \n a_indices = list()\n for x in range(len(mesh.materials)):\n a_indices.append(\"\")\n\n a_uvs = list()\n for x in range(len(mesh.materials)):\n a_uvs.append(\"\")\n \n a_vcs = list()\n for x in range(len(mesh.materials)):\n a_vcs.append(\"\")\n \n # Now dump the faces\n for i, f in enumerate(mesh.faces):\n #t_indices = \"\"\n \n # Quads not supported\n for v_idx in f.vertices:\n v = mesh.vertices[v_idx]\n a_verts[f.material_index] += \",%.2f,%.2f,%.2f\" % (-v.co[0], v.co[2], v.co[1])\n a_norms[f.material_index] += \",%.2f,%.2f,%.2f\" % (-v.normal[0], v.normal[2], v.normal[1])\n #a_indices[f.mat] += \",%i\" % (a_idxs[f.mat])\n #t_indices = \",%i%s\" % (a_idxs[f.material_index], t_indices)\n a_indices[f.material_index] += \",%i\" % a_idxs[f.material_index]\n a_idxs[f.material_index] += 1\n \n #a_indices[f.material_index] += t_indices\n \n if (mesh.uv_textures):\n uv = mesh.uv_textures[0]\n #for uv in f.uv_textures:\n tcs = uv.data[i].uv_raw\n \n #Quads not supported here\n a_uvs[f.material_index] += \",%.4f,%.4f,%.4f,%.4f,%.4f,%.4f\" % (tcs[0], tcs[1], tcs[2], tcs[3], tcs[4], tcs[5])\n \n # Currently not working because i don't use it -- john\n #if (mesh.vertexColors):\n # for color in f.col:\n # a_vcs[f.mat] += \",%.2f,%.2f,%.2f,%.2f\" % ( color.r / 255.0, color.g / 255.0, color.b / 255.0, color.a / 255.0)\n \n # Now compact all face arrays into each material group\n indices = \"\\\"f\\\":[\"\n p_indices = \"\"\n for a in a_indices:\n if len(a)>0:\n p_indices += \",[\" + a[1:] + \"]\"\n indices += p_indices[1:] + \"]\"\n \n texcoords = \"\\\"uvs\\\":[\"\n p_texcoords = \"\"\n for a in a_uvs:\n p_texcoords += \",[\" + a[1:] + \"]\"\n texcoords += p_texcoords[1:]+\"]\"\n \n vertices = \"\\\"v\\\":[\"\n p_vertices = \"\"\n for a in a_verts:\n p_vertices += \",[\" + a[1:] + \"]\"\n vertices += p_vertices[1:]+\"]\"\n \n normals = \"\\\"n\\\":[\"\n p_normals = \"\"\n for a in a_norms:\n p_normals += \",[\" + a[1:] + \"]\"\n normals += p_normals[1:]+\"]\"\n \n p_vcols = \"\"\n for a in a_vcs:\n if len(a)>0:\n p_vcols += \",[\" + a[1:] + \"]\"\n if len(p_vcols)>0:\n vcols = \"\\\"vcs\\\":[\" + p_vcols[1:] + \"]\"\n else:\n vcols = \"\"\n \n # Now build our output\n s = \"{\"+vertices + \",\"\n s += normals + \",\"\n s += indices + \",\"\n s += texcoords + \",\"\n if (len(vcols) > 0):\n s += vcols + \",\"\n s += mats\n \n s += \"}\"\n \n return s\n\ndef to_fixed16(flt):\n i_part = abs(int(flt))\n d_part = int(abs(flt - i_part)*256) & 255\n \n result = (i_part << 8) | d_part\n \n if flt < 0:\n result = -result\n \n return result\n \ndef export_animdata(ob, scene):\n frame_start = scene.frame_start\n frame_end = scene.frame_end\n \n frames = []\n \n for frame in range(frame_start, frame_end + 1):\n scene.frame_set(frame)\n me = ob.to_mesh(scene, True, 'PREVIEW')\n \n numverts = len(me.vertices)\n \n frames.append(struct.pack(\">%dh\" % (numverts * 3), *[to_fixed16(ax) for v in me.vertices for ax in v.co]))\n \n return base64.encodebytes(bytes().join(frames)).decode('ascii')[:-1]\n \ndef export_objectJson(ob, me, scene):\n obj = \"{\\\"name\\\":\\\"\"+ob.name+\"\\\",\"\n \n #ipo = ob.getIpo()\n ipo = None\n \n #print ob.matrix_local\n #obj += \"\\\"mtx\\\":[[%.4f,%.4f,%.4f,%.4f],[%.4f,%.4f,%.4f,%.4f],[%.4f,%.4f,%.4f,%.4f],[%.4f,%.4f,%.4f,%.4f]],\" % (ob.matrix_local[0][0], ob.matrix_local[0][1], ob.matrix_local[0][2], ob.matrix_local[0][3], ob.matrix_local[1][0], ob.matrix_local[1][1], ob.matrix_local[1][2], ob.matrix_local[1][3], ob.matrix_local[2][0], ob.matrix_local[2][1], ob.matrix_local[2][2], ob.matrix_local[2][3], ob.matrix_local[3][0], ob.matrix_local[3][1], ob.matrix_local[3][2], ob.matrix_local[3][3])\n obj += \"\\\"mtx\\\":[[%.4f,%.4f,%.4f,%.4f],[%.4f,%.4f,%.4f,%.4f],[%.4f,%.4f,%.4f,%.4f],[%.4f,%.4f,%.4f,%.4f]],\" % (ob.matrix_local[0][0], ob.matrix_local[1][0], ob.matrix_local[2][0], ob.matrix_local[3][0], ob.matrix_local[0][1], ob.matrix_local[1][1], ob.matrix_local[2][1], ob.matrix_local[3][1], ob.matrix_local[0][2], ob.matrix_local[1][2], ob.matrix_local[2][2], ob.matrix_local[3][2], ob.matrix_local[0][3], ob.matrix_local[1][3], ob.matrix_local[2][3], ob.matrix_local[3][3])\n #obj += \"\\\"x\\\":%.4f,\\\"y\\\":%.4f,\\\"z\\\":%.4f,\\\"rx\\\":%.4f,\\\"ry\\\":%.4f,\\\"rz\\\":%.4f,\" % (ob.LocX, ob.LocZ, -ob.LocY, ob.RotX, ob.RotZ, ob.RotY)\n \n if ipo is not None:\n ipos = \"\"\n \n for crv in ipo.curves:\n ipos += \",{\\\"curve\\\":\\\"%s\\\",\\\"emode\\\":%i,\\\"imode\\\":%i,\\\"bezs\\\":[\" % (crv.name, crv.extend, crv.interpolation)\n bzs = \"\"\n for bp in crv.bezierPoints:\n h1, p, h2 = bp.vec\n bzs += \",%.4f,%.4f,%.4f,%.4f,%.4f,%.4f\" % (h1[0], h1[1], p[0], p[1], h2[0], h2[1])\n #bzs += \",%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f\" % (h1[0], h1[1], h1[2], p[0], p[1], p[2], h2[0], h2[1], h2[2])\n ipos += bzs[1:]+\"]}\"\n \n obj += \"\\\"ipos\\\":[\" + ipos[1:] + \"],\"\n \n obj += \"\\\"mesh\\\":\" + export_scenejson(ob.name.replace(\".\", \"\"), me)\n \n obj = \"\".join([obj, \",\\\"anim_data\\\": \\\"\", export_animdata(ob, scene), \"\\\"\"])\n \n obj += \"}\"\n \n return obj\n \ndef object_to_dict(scene, object, binary=False):\n outp = {'name': object.name}\n \n armature = object.find_armature()\n if armature is not None:\n # Put the armature in REST position\n armature_proper = bpy.data.armatures[armature.name]\n #armature.pose_position = 'REST'\n \n # Convert all the mesh's faces to triangles\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.mesh.quads_convert_to_tris()\n bpy.context.scene.update()\n bpy.ops.object.mode_set(mode='OBJECT') # set it in object\n \n me = object.to_mesh(scene, True, \"PREVIEW\")\n reduce(lambda x,y: max(x,y), [grp.group for v in me.vertices for grp in v.groups])\n ome = {}\n \n numverts = len(me.vertices)\n numfaces = len(me.faces)\n \n # Binary mode packs everything as 16-bit fixed point big-endian arrays, \n # taking the absolute value of the floating point original value, the first\n # 8 bits are the decimal part, last 8 bits are the integral part and then\n # the sign is swaped if necessary. UVs are packed differently, to obtain\n # your original approximated UV take the 16-bit int of each coord and divide\n # it by 8192.0f (keeps sign). Vertex groups are packed with the 10 less\n # significative bits taking the weight in the range (0 <= weight <= 1)*1023\n # and the 6 most significative bits as the vertex group. Each group is\n # preceded by a byte specifying how much groups follow it.\n # Everything gets Base64 encoded in binary mode.\n # Think of JSON as a binary-safe transport in this mode.\n #\n # Normal mode is the least efficient space-wise but is far more readable.\n #\n # Arrays are 'flattened' in both cases.\n if binary:\n fixed_proc = lambda x: to_fixed16(x)\n fixed_pack = lambda arr: base64.encodebytes(struct.pack(\">%dh\" % len(arr), *arr)).decode('ascii')[:-1]\n v_co_proc, v_co_pack = fixed_proc, fixed_pack\n v_normal_proc, v_normal_pack = fixed_proc, fixed_pack\n v_face_pack = fixed_pack\n v_uv_proc = lambda x: int(8192.0 * x)\n v_uv_pack = fixed_pack\n v_bw_proc = lambda x: ((x[0] & 63) << 10) | (int(x[1] * 1023.0) & 1023)\n v_bw_pack = lambda x: base64.encodebytes(bytes().join([struct.pack(\">B%iH\" % len(y), len(y), *y) for y in x])).decode('ascii')[:-1]\n else:\n identity = lambda i: i\n v_co_proc, v_co_pack = identity, identity\n v_normal_proc, v_normal_pack = identity, identity\n v_face_pack = identity\n v_uv_proc, v_uv_pack = identity, identity\n v_bw_proc, v_bw_pack = identity, identity\n \n ome['v'] = v_co_pack([v_co_proc(ax) for v in me.vertices for ax in v.co])\n ome['n'] = v_normal_pack([v_normal_proc(ax) for v in me.vertices for ax in v.normal])\n ome['f'] = v_face_pack([idx for f in me.faces for idx in f.vertices])\n \n ome['uv'] = []\n for layer in me.uv_textures:\n ome['uv'].append(v_uv_pack([v_uv_proc(st) for tex_face in layer.data for uv in tex_face.uv for st in uv]))\n \n if armature is not None:\n ome['bw'] = v_bw_pack([[v_bw_proc((grp.group, grp.weight)) for grp in v.groups] for v in me.vertices])\n\n # Put the armature in POSE position\n #armature.pose_position = 'POSE'\n \n #ome['b'] =\n else:\n # Export vertex animations\n pass\n \n outp['mesh'] = ome\n \n return outp\n \ndef export_scene_json(scene, binary=False):\n outp = {'scene': scene.name, 'fps': scene.render.fps}\n \n outp['objs'] = [object_to_dict(scene, obj, binary) for obj in scene.objects if (obj.type == 'MESH') and (obj.select)]\n \n return json.dumps(outp)\n\ndef savejson(operator, context,\n filepath=\"\",\n use_modifiers=True,\n use_normals=True,\n use_uv_coords=True,\n use_colors=True,\n in_place_anim=True, \n vertex_anim_as_deltas=True, \n anim_as_image=True,\n export_binary=False):\n \n sce = context.scene #bpy.data.scenes[0]\n\n with open(filepath, 'wb') as file:\n file.write(export_scene_json(sce, export_binary).encode('utf-8'))\n \n \"\"\"obs = [ob for ob in sce.objects if (ob.type == 'MESH') and (ob.select)]\n\n # if nothing is selected, export everything\n if len(obs) == 0:\n obs = [ob for ob in sce.objects if ob.type == 'MESH']\n \n data_string = \"{\\\"scene\\\":1,\\\"fps\\\":%i,\\\"objs\\\":[\" % (25) # Fixed for now\n \n ob_string = \"\"\n for ob in obs:\n me = ob.to_mesh(sce, True, \"PREVIEW\")\n ob_string = \"%s,%s\" % (ob_string, export_objectJson(ob, me, sce))\n \n data_string = \"%s%s]}\" % (data_string, ob_string[1:])\n \n with open(filepath, 'wb') as file:\n file.write(data_string.encode('utf-8'))\"\"\"\n\n return \"FINISHED\"\n\nclass ExportJSON(bpy.types.Operator, ExportHelper):\n '''Export objects as a JSON object with normals and texture coordinates.'''\n bl_idname = \"export_scene.webgl_json\"\n bl_label = \"Export JSON\"\n\n filename_ext = \".json\"\n filter_glob = StringProperty(default=\"*.json\", options={'HIDDEN'})\n\n use_modifiers = BoolProperty(name=\"Apply Modifiers\", description=\"Apply Modifiers to the exported mesh\", default=True)\n use_normals = BoolProperty(name=\"Normals\", description=\"Export Normals for smooth and hard shaded faces\", default=True)\n use_uv_coords = BoolProperty(name=\"UVs\", description=\"Export the active UV layer\", default=True)\n use_colors = BoolProperty(name=\"Vertex Colors\", description=\"Export the active vertex color layer\", default=True)\n in_place_anim = BoolProperty(name=\"InPlace Anim\", description=\"Normalize animation for in-place animation\", default=True)\n vertex_anim_as_deltas = BoolProperty(name=\"Vertex Deltas\", description=\"Export vertex position changes as deltas\", default=True)\n anim_as_image = BoolProperty(name=\"Anim on Image\", description=\"Export animation data as an embedded image for shader animations\", default=True)\n export_binary = BoolProperty(name=\"Export mostly binary\", description=\"Export most arrays as Base64 encoded arrays\", default=False)\n\n def execute(self, context):\n filepath = self.filepath\n filepath = bpy.path.ensure_ext(filepath, self.filename_ext)\n return savejson(self, context, **self.as_keywords(ignore=(\"check_existing\", \"filter_glob\")))\n\n def draw(self, context):\n layout = self.layout\n\n row = layout.row()\n row.prop(self, \"use_modifiers\")\n row.prop(self, \"use_normals\")\n row = layout.row()\n row.prop(self, \"use_uv_coords\")\n row.prop(self, \"use_colors\")\n row = layout.row()\n row.prop(self, \"in_place_anim\")\n row.prop(self, \"vertex_anim_as_deltas\")\n row = layout.row()\n row.prop(self, \"anim_as_image\")\n row.prop(self, \"export_binary\")\n \n\ndef menu_func_export(self, context):\n #self.layout.operator(ExportWebgl.bl_idname, text=\"WebGL (.js)\") # unmaintained\n self.layout.operator(ExportJSON.bl_idname, text=\"WebGL JSON (.json)\")\n\n\ndef register():\n bpy.utils.register_module(__name__)\n\n bpy.types.INFO_MT_file_export.append(menu_func_export)\n\n\ndef unregister():\n bpy.utils.unregister_module(__name__)\n\n bpy.types.INFO_MT_file_export.remove(menu_func_export)\n\nif __name__ == \"__main__\":\n register()","sub_path":"js/io_export_webgl.py","file_name":"io_export_webgl.py","file_ext":"py","file_size_in_byte":16735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"580402582","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport uuid\n\nimport jsonfield.fields\nfrom django.conf import settings\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('custom_user', '0001_initial'),\n ('routing', '0001_initial'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Country',\n fields=[\n ('country_id', models.AutoField(serialize=False, primary_key=True)),\n ('key', models.CharField(max_length=140, verbose_name=b'key for country')),\n ],\n ),\n migrations.CreateModel(\n name='Location',\n fields=[\n ('location_id', models.AutoField(serialize=False, primary_key=True)),\n ('key', models.CharField(max_length=140, verbose_name=b'key id for location')),\n ('country', models.ForeignKey(verbose_name=b'country of location', to='resorts.Country')),\n ],\n ),\n migrations.CreateModel(\n name='Resort',\n fields=[\n ('resort_pk', models.AutoField(serialize=False, primary_key=True)),\n ('resort_id', models.UUIDField(default=uuid.uuid4, verbose_name=b'resort unique id', editable=False)),\n ('resort_name', models.CharField(max_length=255, verbose_name=b'resort name')),\n ('website', models.URLField(verbose_name=b'resort website', blank=True)),\n ('network_key', models.UUIDField(default=uuid.uuid4, verbose_name=b'resort unique id')),\n ('license_expiry_date',\n models.DateTimeField(null=True, verbose_name=b'resort expiry expiration date', blank=True)),\n ('licenses', models.IntegerField(null=True, verbose_name=b'licenses owned by resort', blank=True)),\n ('map_kml', models.CharField(max_length=255, blank=True)),\n ('map_type', models.IntegerField(default=1, blank=True)),\n ('report_form', models.TextField(verbose_name=b'report form for resort', blank=True)),\n ('print_on_device', models.IntegerField(default=0, blank=True)),\n ('map_lat', models.FloatField(null=True, blank=True)),\n ('map_long', models.FloatField(null=True, blank=True)),\n ('default_unit_temp', models.IntegerField(default=0, blank=True)),\n ('default_unit_length', models.IntegerField(default=0, blank=True)),\n ('default_unit_distance', models.IntegerField(default=0, blank=True)),\n ('timezone', models.CharField(max_length=25, blank=True)),\n ('image', models.TextField(verbose_name=b'resort logo', blank=True)),\n ('dt_modified', models.DateTimeField(auto_now=True)),\n ('incident_template',\n jsonfield.fields.JSONField(verbose_name=b'Incident template for resort', blank=True)),\n ('domain_id', models.ForeignKey(to='routing.Domains', null=True)),\n ('location',\n models.ForeignKey(verbose_name=b'resort location', blank=True, to='resorts.Location', null=True)),\n ],\n ),\n migrations.CreateModel(\n name='UserResortMap',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('resort', models.ForeignKey(to='resorts.Resort')),\n ('role', models.ForeignKey(to='custom_user.UserRoles')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AlterUniqueTogether(\n name='userresortmap',\n unique_together=set([('user', 'resort')]),\n ),\n ]\n","sub_path":"project/apps/resorts/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"552132186","text":"import re\nimport numpy as np\n\nfrom bs4 import BeautifulSoup\n\n\ndef removeSpacing(string):\n return re.sub(\"\\n|\\t\", \"\", string)\n\ndef readHTMLtable(tag):\n\n data = []\n for line in tag.find_all( \"tr\" ):\n info_line = []\n for value in line.find_all( \"td\" ):\n if value.string and '-' not in value.string:\n info_line.append(removeSpacing(value.string))\n else:\n for v in value.find_all( \"span\", { \"class\" : \"wx-value\" }):\n info_line.append(v.string)\n data.append(info_line)\n\n return np.array(data[1:])\n\n\n\nclass weatherParser:\n def __init__(self, html):\n\n self.soup = BeautifulSoup(html, \"lxml\")\n self.date = ''\n self.city = ''\n self.daily_data = ''\n self.hourly_data = ''\n self.daily_dict = dict()\n\n def generateInfo(self):\n\n for tag in self.soup.find_all( \"h2\", { \"class\" : \"history-date\" } ):\n self.date = tag.string\n \n for tag in self.soup.find_all( \"h2\", { \"class\" : \"city-nav-header is-parent\" } ):\n self.city = removeSpacing(tag.contents[0])\n\n for tag in self.soup.find_all( \"table\", { \"id\" : \"historyTable\" } ):\n self.daily_data = readHTMLtable(tag)\n\n for tag in self.soup.find_all( \"table\", { \"id\" : \"obsTable\" } ):\n self.hourly_data = readHTMLtable(tag)\n\n for info in self.daily_data:\n if len(info)>2:\n self.daily_dict[info[0]] = info[1]\n\n self.soup = ''\n\n","sub_path":"weatherlib/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"413119170","text":"# TO-DO: Complete the selection_sort() function below \ndef selection_sort( arr ):\n # loop through n-1 elements\n A= arr\n for i in range(len(A)): \n \n # Find the minimum element in remaining \n # unsorted array \n min_idx = i \n for j in range(i+1, len(A)): \n if A[min_idx] > A[j]: \n min_idx = j \n \n # Swap the found minimum element with \n # the first element \n A[i], A[min_idx] = A[min_idx], A[i]\n # TO-DO: find next smallest element\n # (hint, can do in 3 loc) \n \n\n\n\n # TO-DO: swap\n\n\n return arr\n\n\n# TO-DO: implement the Bubble Sort function below\ndef bubble_sort( arr ):\n n = len(arr) \n \n # Traverse through all array elements \n for i in range(n): \n \n # Last i elements are already in place \n for j in range(0, n-i-1): \n \n # traverse the array from 0 to n-i-1 \n # Swap if the element found is greater \n # than the next element \n if arr[j] > arr[j+1] : \n arr[j], arr[j+1] = arr[j+1], arr[j] \n\n\n return arr\n\n\n# STRETCH: implement the Count Sort function below\ndef count_sort( arr, maximum=-1 ):\n\n\n\n return arr","sub_path":"src/iterative_sorting/iterative_sorting.py","file_name":"iterative_sorting.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"645845130","text":"class TreeNode:\n def __init__(self, data=None):\n self.left = None\n self.right = None\n self.data = data\n\nclass LinkedListNode:\n def __init__(self, data=None):\n self.data = data\n self.next = None\n\n\ndef binary_search(root_node, target):\n while(root_node.left or root_node.right):\n if root_node.data == target:\n return True\n elif root_node.data < target:\n root_node = root_node.right\n else:\n root_node = root_node.left\n return False\n\ndef binary_search(search_list, target):\n left_index = 0\n right_index = len(list) - 1\n while left_index < right_index:\n mid = (left_index + right_index) / 2\n if target == search_list[mid]:\n return True\n elif target < search_list[mid]:\n right_index = mid - 1\n else:\n left_index = mid + 1\n\n return False\n\ndef bfs(tree_node, target):\n q = []\n q.append(tree_node)\n while q:\n current = q.popleft()\n if not current:\n continue\n if current.data == target:\n return True\n q.append(current.left)\n q.append(current.right)\n return False\n\ndef permute(self, n):\n self.result = []\n self.permute_helper([], n)\n return self.result\n\ndef permute_helper(self, current_result, n):\n if len(current_result) == n:\n self.result.append(current_result)\n return\n\n cannot_permute_R = len(current_result) >= 2 and current_result[-1] == current_result[-2] == 'R'\n cannot_permute_B = len(current_result) >= 2 and current_result[-1] == current_result[-2] == 'B'\n\n if not cannot_permute_R:\n self.permute_helper(current_result + ['R'], n)\n\n if not cannot_permute_B:\n self.permute_helper(current_result + ['B'], n)\n","sub_path":"Ninja/playground.py","file_name":"playground.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"297752335","text":"import tkinter\nimport os\n\nmain_window = tkinter.Tk()\nmain_window.title('игра 15')\n\n# files_list = os.listdir('nums')\nfiles_list = sorted(os.listdir('nums'))\n\nimages_list = []\nfor file_name in files_list:\n # print('nums\\\\' + file_name)\n # print(f'nums\\\\{file_name}')\n rel_path = os.path.join('nums', file_name)\n # print(rel_path)\n\n image = tkinter.PhotoImage(file=rel_path)\n images_list.append(image)\n\nprint(images_list)\n\n\n\nmain_window.mainloop()\n","sub_path":"game_1.py","file_name":"game_1.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"275812439","text":"import matplotlib\nfrom matplotlib import pyplot\n\nimport inspect\nfrom .omas_utils import *\n\n__all__=[]\n\ndef add_to__all__(f):\n __all__.append(f.__name__)\n return f\n\n# ================================\n# plotting helper functions\n# ================================\ndef contourPaths(x, y, Z, levels, remove_boundary_points=False, smooth_factor=1):\n '''\n returns contour paths\n\n :param x: x grid\n\n :param y: y grid\n\n :param Z: z values\n\n :param levels: levels of the contours\n\n :param remove_boundary_points: how to treat the last point of contour surfaces that are close\n\n :param smooth_factor: smoothing before contouring\n\n :return: list of matplotlib contour paths objects\n '''\n import matplotlib\n from matplotlib import _cntr\n\n sf = int(round(smooth_factor))\n if sf > 1:\n x = scipy.ndimage.zoom(x, sf)\n y = scipy.ndimage.zoom(y, sf)\n Z = scipy.ndimage.zoom(Z, sf)\n\n [X,Y]=numpy.meshgrid(x,y)\n Cntr = matplotlib._cntr.Cntr(X,Y,Z)\n\n allsegs = []\n for level in levels:\n nlist = Cntr.trace(level)\n nseg = len(nlist)//2\n segs = nlist[:nseg]\n if not remove_boundary_points:\n segs_ = segs\n else:\n segs_ = []\n for segarray in segs:\n x_ = segarray[:,0]\n y_ = segarray[:,1]\n valid = []\n for i in range(len(x_)-1):\n if numpy.isclose(x_[i],x_[i+1]) and (numpy.isclose(x_[i],max(x)) or numpy.isclose(x_[i],min(x))):\n continue\n if numpy.isclose(y_[i],y_[i+1]) and (numpy.isclose(y_[i],max(y)) or numpy.isclose(y_[i],min(y))):\n continue\n valid.append((x_[i],y_[i]))\n if i==len(x_):\n valid.append(x_[i+1],y_[i+1])\n if len(valid):\n segs_.append(numpy.array(valid))\n\n segs=map(matplotlib.path.Path,segs_)\n allsegs.append(segs)\n return allsegs\n\nclass Uband(object):\n \"\"\"\n This class wraps the line and PollyCollection(s) associated with a banded\n errorbar plot for use in the uband function.\n\n It's methods are Line2D methods distributed to both the line and bands if\n applicable, or just to the line alone otherwise.\n\n \"\"\"\n\n def __init__(self, line, bands):\n \"\"\"\n :param line: Line2D\n A line of the x,y nominal values\n :param bands: list of PolyCollections\n The fill_between and/or fill_betweenx PollyCollections spanning the\n std_devs of the x,y data.\n\n \"\"\"\n self.line = line # matplotlib.lines.Line2D\n self.bands = list(matplotlib.cbook.flatten([bands])) # matplotlib.collections.PolyCollection(s)\n\ndef _method_factory(self, key, bands=True):\n \"\"\"Add a method that calls the same method for line and band\n or just for the line\"\"\"\n if bands:\n def method(self, *args, **kw):\n \"\"\"\n Call the same method for line and band.\n Returns Line2D method call result.\n \"\"\"\n for band in self.bands:\n getattr(band, key)(*args, **kw)\n return getattr(self.line, key)(*args, **kw)\n else:\n def method(self, *args, **kw):\n \"\"\"Call the line method\"\"\"\n return getattr(self.line, key)(*args, **kw)\n return method\n\nfor _name, _method in inspect.getmembers(matplotlib.lines.Line2D, predicate=inspect.ismethod):\n if _name.startswith('_'):\n continue\n setattr(Uband, _name, _method_factory(Uband, _name,\n bands=_name in ['set_color',\n 'set_lw',\n 'set_linewidth',\n 'set_dashes',\n 'set_linestyle']))\n\ndef uband(x, y, ax=None, fill_kw={'alpha': 0.25}, **kw):\n '''\n Given arguments x,y where either or both have uncertainties, plot x,y using pyplt.plot\n of the nominal values and surround it with with a shaded error band using matplotlib's\n fill_between and/or fill_betweenx.\n\n If y or x is more than 1D, it is flattened along every dimension but the last.\n\n :param x: array of independent axis values\n\n :param y: array of values with uncertainties, for which shaded error band is plotted\n\n :param ax: The axes instance into which to plot (default: gca())\n\n :param fill_kw: dict. Passed to pyplot.fill_between\n\n :param \\**kw: Passed to pyplot.plot\n\n :return: list. A list of Uband objects containing the line and bands of each (x,y) along\n the last dimension.\n\n '''\n\n result = []\n if ax is None:\n ax = gca()\n\n # enable combinations of 1D and 2D x's and y's\n y = numpy.array(y)\n y = y.reshape(-1, y.shape[-1])\n x = numpy.array(x)\n x = x.reshape(-1, x.shape[-1])\n if x.shape[0] == 1 and y.shape[0] > 1: # one x for all y's\n x = numpy.tile(x[0, :], y.shape[0]).reshape(-1, x.shape[-1])\n\n # plot each (x,y) and collect the lines/bands into a single object\n for xi, yi in zip(x, y):\n xnom = numpy.atleast_1d(numpy.squeeze(uncertainties.unumpy.nominal_values(xi)))\n xerr = numpy.atleast_1d(numpy.squeeze(uncertainties.unumpy.std_devs(xi)))\n ynom = numpy.atleast_1d(numpy.squeeze(uncertainties.unumpy.nominal_values(yi)))\n yerr = numpy.atleast_1d(numpy.squeeze(uncertainties.unumpy.std_devs(yi)))\n\n l, = ax.plot(xnom, ynom, **kw)\n\n fkw = copy.copy(fill_kw) # changes to fill_kw propagate to the next call of uband!\n fkw.setdefault('color', l.get_color())\n bands = []\n if numpy.any(yerr != 0):\n bandy = ax.fill_between(xnom, ynom - yerr, ynom + yerr, **fkw)\n bands.append(bandy)\n if numpy.any(xerr != 0):\n bandx = ax.fill_betweenx(ynom, xnom - xerr, xnom + xerr, **fkw)\n bands.append(bandx)\n\n tmp = Uband(l, bands)\n result.append(tmp)\n\n return result\n\n# ================================\n# ODSs' plotting methods\n# ================================\n@add_to__all__\ndef equilibrium_CX(ods, time_index=0, ax=None, **kw):\n '''\n Plot equilibrium cross-section\n as per `ods['equilibrium']['time_slice'][time_index]`\n\n :param ods: input ods\n\n :param time_index: time slice to plot\n\n :param ax: axes to plot in (active axes is generated if `ax is None`)\n\n :param kw: arguments passed to matplotlib plot statements\n\n :return: axes\n '''\n if ax is None:\n ax=pyplot.gca()\n\n wall=None\n eq=ods['equilibrium']['time_slice'][time_index]\n if 'wall' in ods:\n if time_index in ods['wall']['description_2d']:\n wall=ods['wall']['description_2d'][time_index]['limiter']['unit']\n elif 0 in ods['wall']['description_2d']:\n wall=ods['wall']['description_2d'][0]['limiter']['unit']\n\n # first try to plot as function of `rho` and fallback on `psi`\n if 'phi' in eq['profiles_2d'][0] and 'phi' in eq['profiles_1d']:\n value2D=numpy.sqrt(abs(eq['profiles_2d'][0]['phi']))\n value1D=numpy.sqrt(abs(eq['profiles_1d']['phi']))\n else:\n value2D=eq['profiles_2d'][0]['psi']\n value1D=eq['profiles_1d']['psi']\n value2D=(value2D-min(value1D))/(max(value1D)-min(value1D))\n levels=numpy.r_[0.1:10:0.1]\n\n # contours\n line=numpy.array([numpy.nan,numpy.nan])\n for item1 in contourPaths(eq['profiles_2d'][0]['grid']['dim1'],\n eq['profiles_2d'][0]['grid']['dim2'],\n value2D, levels, smooth_factor=1):\n for item in item1:\n line=numpy.vstack(( line, item.vertices,numpy.array([numpy.nan,numpy.nan]) ))\n\n # internal flux surfaces w/ or w/o masking\n if wall is not None:\n path = matplotlib.path.Path(numpy.transpose(numpy.array([wall[0]['outline']['r'],wall[0]['outline']['z']])))\n patch = matplotlib.patches.PathPatch(path, facecolor='none')\n ax.add_patch(patch)\n pyplot.plot(line[:,0],line[:,1],**kw)\n ax.lines[-1].set_clip_path(patch)\n else:\n pyplot.plot(line[:,0],line[:,1],**kw)\n\n # plotting style\n kw1=copy.deepcopy(kw)\n kw1['linewidth']=kw.setdefault('linewidth',1)+1\n kw1.setdefault('color',ax.lines[-1].get_color())\n\n # boundary\n ax.plot(eq['boundary']['outline']['r'],eq['boundary']['outline']['z'],**kw1)\n\n # axis\n ax.plot(eq['global_quantities']['magnetic_axis']['r'],eq['global_quantities']['magnetic_axis']['z'],'+',**kw1)\n\n # wall\n if wall is not None:\n ax.plot(wall[0]['outline']['r'],wall[0]['outline']['z'],'k',linewidth=2)\n\n ax.axis([min(wall[0]['outline']['r']), max(wall[0]['outline']['r']),\n min(wall[0]['outline']['z']), max(wall[0]['outline']['z'])])\n\n # axes\n ax.set_aspect('equal')\n ax.set_frame_on(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n return ax\n\n@add_to__all__\ndef equilibrium_summary(ods, time_index=0, fig=None, **kw):\n '''\n Plot equilibrium cross-section and P, q, P', FF' profiles\n as per `ods['equilibrium']['time_slice'][time_index]`\n\n :param ods: input ods\n\n :param time_index: time slice to plot\n\n :param fig: figure to plot in (a new figure is generated if `fig is None`)\n\n :param kw: arguments passed to matplotlib plot statements\n\n :return: list of axes\n '''\n if fig is None:\n fig=pyplot.figure()\n\n ax=pyplot.subplot(1,3,1)\n ax=equilibrium_CX(ods, time_index=time_index, ax=ax, **kw)\n eq=ods['equilibrium']['time_slice'][time_index]\n\n # x\n if 'phi' in eq['profiles_2d'][0] and 'phi' in eq['profiles_1d']:\n x=numpy.sqrt(abs(eq['profiles_1d']['phi']))\n xName='$\\\\rho$'\n else:\n x=eq['profiles_1d']['psi']\n xName='$\\\\psi$'\n x=(x-min(x))/(max(x)-min(x))\n\n # pressure\n ax=pyplot.subplot(2,3,2)\n ax.plot(x,eq['profiles_1d']['pressure'], **kw)\n kw.setdefault('color',ax.lines[-1].get_color())\n ax.set_title('$\\,$ Pressure')\n ax.ticklabel_format(style='sci', scilimits=(-1,2), axis='y')\n pyplot.setp(ax.get_xticklabels(), visible=False)\n\n # q\n ax=fig.add_subplot(2,3,3,sharex=ax)\n ax.plot(x,eq['profiles_1d']['q'], **kw )\n ax.set_title('$q$ Safety factor')\n ax.ticklabel_format(style='sci', scilimits=(-1,2), axis='y')\n if 'label' in kw:\n ax.legend(loc=0).draggable(True)\n pyplot.setp(ax.get_xticklabels(), visible=False)\n\n # dP_dpsi\n ax=fig.add_subplot(2,3,5,sharex=ax)\n ax.plot(x,eq['profiles_1d']['dpressure_dpsi'], **kw )\n ax.set_title(\"$P\\,^\\\\prime$ source function\")\n ax.ticklabel_format(style='sci', scilimits=(-1,2), axis='y')\n pyplot.xlabel(xName)\n\n # FdF_dpsi\n ax=fig.add_subplot(236,sharex=ax)\n ax.plot(x,eq['profiles_1d']['f_df_dpsi'], **kw)\n ax.set_title(\"$FF\\,^\\\\prime$ source function\")\n ax.ticklabel_format(style='sci', scilimits=(-1,2), axis='y')\n pyplot.xlabel(xName)\n\n ax.set_xlim([0,1])\n\n return fig\n\n@add_to__all__\ndef core_profiles_summary(ods, time_index=0, fig=None, combine_dens_temps=True, **kw):\n '''\n Plot densities and temperature profiles for electrons and all ion species\n as per `ods['core_profiles']['profiles_1d'][time_index]`\n\n :param ods: input ods\n\n :param time_index: time slice to plot\n\n :param fig: figure to plot in (a new figure is generated if `fig is None`)\n\n :param combine_dens_temps: combine species plot of density and temperatures\n\n :param kw: arguments passed to matplotlib plot statements\n\n :return: list of axes\n '''\n if fig is None:\n fig=pyplot.figure()\n\n prof1d=ods['core_profiles']['profiles_1d'][time_index]\n x=prof1d['grid.rho_tor_norm']\n\n what=['electrons']+['ion[%d]'%k for k in range(len(prof1d['ion']))]\n names=['Electrons']+[prof1d['ion[%d].label'%k]+' ion' for k in range(len(prof1d['ion']))]\n\n r=len(prof1d['ion'])+1\n\n ax=None\n for k,item in enumerate(what):\n\n #densities (thermal and fast)\n for therm_fast in ['','_fast']:\n therm_fast_name=['',' (fast)'][therm_fast=='_fast']\n density=item+'.density'+therm_fast\n if item+'.density'+therm_fast in prof1d:\n if combine_dens_temps:\n if k==0:\n ax=ax0=pyplot.subplot(1,2,1)\n else:\n ax=ax0=pyplot.subplot(r,2,(2*k)+1,sharex=ax)\n if any(is_uncertain(prof1d[density])):\n uband(x,prof1d[density],label=names[k]+therm_fast_name,ax=ax0,**kw)\n else:\n ax0.plot(x,prof1d[density],label=names[k]+therm_fast_name,**kw)\n if k==len(prof1d['ion']):\n ax0.set_xlabel('$\\\\rho$')\n if combine_dens_temps:\n ax0.legend(loc=0).draggable(True)\n if k==0:\n ax0.set_title('Density [m$^{-3}$]')\n if not combine_dens_temps:\n ax0.set_ylabel(names[k])\n\n #temperatures\n if item+'.temperature' in prof1d:\n if combine_dens_temps:\n if k==0:\n ax=ax1=pyplot.subplot(1,2,2,sharex=ax0)\n else:\n ax=ax1=pyplot.subplot(r,2,(2*k)+2,sharex=ax)\n if any(is_uncertain(prof1d[item+'.temperature'])):\n uband(x,prof1d[item+'.temperature'],label=names[k],ax=ax1,**kw)\n else:\n ax1.plot(x,prof1d[item+'.temperature'],label=names[k],**kw)\n if k==len(prof1d['ion']):\n ax1.set_xlabel('$\\\\rho$')\n if k==0:\n ax1.set_title('Temperature [eV]')\n\n ax.set_xlim([0,1])\n return fig\n","sub_path":"omas/omas_plot.py","file_name":"omas_plot.py","file_ext":"py","file_size_in_byte":13857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"504961438","text":"import numpy as np\nimport os\nimport re\nimport email\nimport base64\nfrom nltk.corpus import stopwords\nfrom bs4 import BeautifulSoup\nfrom nltk import word_tokenize, WordNetLemmatizer\nfrom stemming.porter2 import stem\nfrom scipy.special import factorial\nimport nltk\n\nstoplist = stopwords.words('english')\nWfo_Lambda = 0.5\nWfo_threshold = 0.01\n\n\ndef getFeature(emailText):\n \"\"\"\n preprocess and get feature\n word_tokenize: Splitting the text by white spaces and punctuation marks\n lemmatizers: identify different forms of the same word only once\n (e.g. price and prices -> price)\n Word Stemming (removes suffix’s using Porters algorithm)\n stopwords: remove those words which do not contain important significance\n :param text: original email content\n :return: filtered email content\n \"\"\"\n\n lemmatizer = WordNetLemmatizer()\n regex = re.compile(r'[^a-zA-Z]|\\d')\n words = regex.split(_getSubjectBodyText(emailText))\n filteredwords = [stem(w) for w in words if w and not w.lower() in stoplist]\n #filteredwords = [w for w in words if w and not w.lower() in stoplist]\n #print(filteredwords)\n return filteredwords\n\n# def loadMailDataTest(folderAddress):\n# classCategory = [] # 1 for spam,0 for ham\n# emailWords = []\n# file_list = os.listdir(folderAddress)\n# for rFile in file_list:\n# if rFile.find('ham') != -1:\n# classCategory.append(0)\n# elif rFile.find('spam') != -1:\n# classCategory.append(1)\n#\n# filename = folderAddress + rFile\n# inFile = open(filename, 'r', errors='ignore')\n# content = inFile.read()\n# regex = re.compile(r'[^a-zA-Z]|\\d')\n# lemmatizer = WordNetLemmatizer()\n# features = regex.split(_getSubjectBodyTextTest(content))\n# filteredwords = [stem(w) for w in features if w and not w.lower() in stoplist]\n# if filteredwords:\n# emailWords.append(filteredwords)\n# else:\n# print(rFile, \"feature is empty\")\n#\n# return emailWords, classCategory\n\ndef loadMailData(folderAddress):\n \"\"\"\n read Mails in specific folder\n :param folderAddress: the path of mail folder\n :return:\n \"\"\"\n classCategory = [] # 1 for spam,0 for ham\n emailWords = []\n file_list = os.listdir(folderAddress)\n for rFile in file_list:\n if rFile.find('ham') != -1:\n classCategory.append(0)\n elif rFile.find('spam') != -1:\n classCategory.append(1)\n\n filename = folderAddress + rFile\n inFile = open(filename,'r', errors='ignore')\n content = inFile.read()\n # for line in inFile.readlines():\n # lines += line\n features = getFeature(content)\n if features:\n emailWords.append(features)\n else:\n print(rFile,\"feature is empty\")\n\n\n return emailWords, classCategory\n\ndef createVocabularyList(smsWords):\n # :set[] erase repetition\n # \"\"\"\n vocabularySet = set([])\n for words in smsWords:\n vocabularySet = vocabularySet | set(words)\n vocabularyList = list(vocabularySet)\n return vocabularyList\n\n\ndef getVocabularyList(fileName):\n \"\"\"\n get vovabularyList\n :param fileName:\n :return:\n \"\"\"\n print(\"getVocabularyList\")\n fr = open(fileName, 'r')\n vocabularyList=[]\n line = fr.readline()\n while line:\n vocabularyList.append(line.strip())\n line = fr.readline()\n fr.close()\n print(\"length of vocabularyList\", len(vocabularyList))\n return vocabularyList\n\n\ndef setOfWordsToVecTor(vocabularyList, emailWords):\n\n vocabMarked = [0] * len(vocabularyList)\n for word in emailWords:\n if word in vocabularyList:\n vocabMarked[vocabularyList.index(word)] += 1\n return vocabMarked\n\n\ndef setOfWordsListToVecTor(vocabularyList, smsWordsList):\n vocabMarkedList = []\n for i in range(len(smsWordsList)):\n vocabMarked = setOfWordsToVecTor(vocabularyList, smsWordsList[i])\n vocabMarkedList.append(vocabMarked)\n return vocabMarkedList\n\ndef wfoFilter(preVocabularyList, emailWordsList, classLables):\n filteredVocabularyList = []\n vocabMarkedList = setOfWordsListToVecTor(preVocabularyList, emailWordsList)\n pVocabularyinSpam, pVocabularyinHam, pSpam = _calPvocabulary(vocabMarkedList, classLables)\n\n conditionTemp = pVocabularyinSpam / pVocabularyinHam\n wfo = np.zeros(len(vocabMarkedList[0]))\n\n for i in range(len(conditionTemp)):\n if conditionTemp[i] > 1:\n wfo[i] = pow(np.log(pVocabularyinSpam[i]) - np.log(pVocabularyinHam[i]), 1 - Wfo_Lambda) \\\n * pow(pVocabularyinSpam[i], Wfo_Lambda)\n else:\n wfo[i] = pow(np.log(pVocabularyinHam[i]) - np.log(pVocabularyinSpam[i]), 1 - Wfo_Lambda) \\\n * pow(pVocabularyinHam[i], Wfo_Lambda)\n\n for j in range(len(wfo)):\n if wfo[j] > Wfo_threshold:\n filteredVocabularyList.append(preVocabularyList[j])\n return filteredVocabularyList\n\n\ndef trainingNaiveBayes(trainMarkedWords, trainCategory):\n pVocabularyinSpam, pVocabularyinHam, pSpam = _calPvocabulary(trainMarkedWords, trainCategory)\n pWordsSpamicity = np.log(pVocabularyinSpam)\n pWordsHealthy = np.log(pVocabularyinHam)\n #pWordsSpamicity = pVocabularyinSpam\n #pWordsHealthy = pVocabularyinHam\n\n return pWordsSpamicity, pWordsHealthy, pSpam\n\n\ndef getTrainedModelInfo():\n \"\"\"\n get trained model info including\n vocabularyList\n pWordsHealthy\n pWordsSpamicity\n pSpam\n \"\"\"\n vocabularyList = getVocabularyList('vocabularyList.txt')\n pWordsHealthy = np.loadtxt('pWordsHealthy.txt', delimiter='\\t')\n pWordsSpamicity = np.loadtxt('pWordsSpamicity.txt', delimiter='\\t')\n DS = np.loadtxt('trainDS.txt', delimiter='\\t')\n #print(\"length of pWordsHealthy: \", len(pWordsHealthy))\n # print(\"length of pWordsSpamicity: \", len(pWordsSpamicity))\n fr = open('pSpam.txt')\n pSpam = float(fr.readline().strip())\n fr.close()\n\n return vocabularyList, pWordsSpamicity, pWordsHealthy, pSpam, DS\n\n\ndef classify(vocabularyList, pWordsSpamicity, pWordsHealthy, pSpam, testWords):\n\n testWordsCount = setOfWordsToVecTor(vocabularyList, testWords)\n print(testWordsCount)\n testWordsMarkedArray = np.array(testWordsCount)\n\n # ci = argmax (log pi + sum (log P(wj|ci)))\n p1 = sum(testWordsMarkedArray * pWordsSpamicity) + np.log(pSpam)\n p0 = sum(testWordsMarkedArray * pWordsHealthy) + np.log(1 - pSpam)\n\n # np.power(pWordsSpamicity, testWordsMarkedArray) / factorial(testWordsMarkedArray, exact=False)\n\n if p1 > p0:\n return 1\n else:\n return 0\n\ndef classifyMSE(vocabularyList, pWordsSpamicity, pWordsHealthy, pSpam, testWords):\n\n testWordsCount = setOfWordsToVecTor(vocabularyList, testWords)\n testWordsMarkedArray = np.array(testWordsCount)\n\n # ci = argmax (log pi + sum (log P(wj|ci)))\n p1 = sum(testWordsMarkedArray * pWordsSpamicity) + np.log(pSpam)\n p0 = sum(testWordsMarkedArray * pWordsHealthy) + np.log(1 - pSpam)\n\n # np.power(pWordsSpamicity, testWordsMarkedArray) / factorial(testWordsMarkedArray, exact=False)\n print(\"p1->\", p1, \" p0->\", p0)\n return p1, p0\n # if p1 > p0:\n # return 1\n # else:\n # return 0\n\ndef adaboostClassify(vocabularyList, pWordsSpamicity, pWordsHealthy, DS, pSpam, testWordsMarkedArray):\n\n # ci = argmax (log pi + sum (log P(wj|ci)))* DS\n p1 = sum(testWordsMarkedArray * pWordsSpamicity ) + np.log(pSpam)\n p0 = sum(testWordsMarkedArray * pWordsHealthy) + np.log(1 - pSpam)\n\n if p1 > p0:\n return p1, p0, 1\n else:\n return p1, p0, 0\n\ndef adaboostClassifyForPredict(vocabularyList, pWordsSpamicity, pWordsHealthy, DS, pSpam, testWords):\n testWordsCount = setOfWordsToVecTor(vocabularyList, testWords)\n testWordsMarkedArray = np.array(testWordsCount)\n ps, ph, mailType = adaboostClassify(vocabularyList, pWordsSpamicity, pWordsHealthy, DS, pSpam, testWordsMarkedArray)\n\n return mailType\n\ndef adaboostPredict(testEmailWords):\n predicted = []\n vocabularyList, pWordsSpamicity, pWordsHealthy, pSpam, DS = getTrainedModelInfo()\n\n for i in range(len(testEmailWords)):\n emailType = adaboostClassifyForPredict(vocabularyList, pWordsSpamicity,\n pWordsHealthy, DS, pSpam, testEmailWords[i])\n predicted.append(emailType)\n\n return predicted\n\ndef baselinePredict(testEmailWords):\n predicted = []\n for i in range(len(testEmailWords)):\n if len(testEmailWords[i]) > 200:\n predicted.append(0)\n else:\n predicted.append(1)\n return predicted\n\ndef predict(testEmailWords):\n \"\"\"\n predict spam or ham depend on pretrained info\n :param testEmailWords:array including many email words need to predict\n :return: predicted array {1:spam (positive),0:ham (negative)}\n \"\"\"\n predicted = []\n vocabularyList, pWordsSpamicity, pWordsHealthy, pSpam, DS = getTrainedModelInfo()\n\n for i in range(len(testEmailWords)):\n emailType = classify(vocabularyList, pWordsSpamicity,\n pWordsHealthy, pSpam, testEmailWords[i])\n predicted.append(emailType)\n\n return predicted\n\ndef _getTextFromDifferentContentType(contentType,contentEncoding, text):\n if contentType == 'text/plain':\n if contentEncoding == 'base64':\n text = base64.standard_b64decode(text)\n return str(text)\n elif contentType == 'text/html':\n if contentEncoding == 'base64':\n text = base64.standard_b64decode(text)\n\n soup = BeautifulSoup(text, 'lxml')\n # for script in soup([\"script\", \"style\"]):\n # script.extract()\n clean_html = ' '.join(soup.findAll(text=True))\n return clean_html\n else:\n return ''\n\ndef _getSubjectBodyText(emailText):\n email_message = email.message_from_string(emailText)\n subject = email_message.get('Subject')\n mailSubjectBody = []\n if subject != None:\n mailSubjectBody.append(subject)\n if email_message.is_multipart():\n for part in email_message.get_payload():\n if part.is_multipart():\n for subPart in part.get_payload():\n mailSubjectBody.append(_getTextFromDifferentContentType\n (subPart.get_content_type(),\n subPart['Content-Transfer-Encoding'],subPart.get_payload()))\n else:\n mailSubjectBody.append(_getTextFromDifferentContentType\n (part.get_content_type(),\n part['Content-Transfer-Encoding'],part.get_payload()))\n else:\n mailSubjectBody.append(_getTextFromDifferentContentType\n (email_message.get_content_type(),\n email_message['Content-Transfer-Encoding'],email_message.get_payload()))\n\n\n return ' '.join(mailSubjectBody)\n\ndef _calPvocabulary(trainMarkedWords, trainCategory):\n numTrainDoc = len(trainMarkedWords)\n numWords = len(trainMarkedWords[0])\n # P(Spam) - prior probability\n pSpam = sum(trainCategory) / float(numTrainDoc)\n\n # count the occurrence of each vocabularyList(corpus) word in spam and ham\n # Laplace correction\n wordsInSpamNum = np.ones(numWords)\n wordsInHealthNum = np.ones(numWords)\n spamWordsNum = 2.0\n healthWordsNum = 2.0\n for i in range(0, numTrainDoc):\n if trainCategory[i] == 1: # spam\n wordsInSpamNum += trainMarkedWords[i]\n spamWordsNum += sum(trainMarkedWords[i]) # count total number of words in spam\n else:\n wordsInHealthNum += trainMarkedWords[i] # ham\n healthWordsNum += sum(trainMarkedWords[i]) # count total number of words in ham\n\n pVocabularyinSpam = wordsInSpamNum / spamWordsNum\n pVocabularyinHam = wordsInHealthNum / healthWordsNum\n\n return pVocabularyinSpam, pVocabularyinHam, pSpam\n\n# def _getSubjectBodyTextTest(emailText):\n# email_message = email.message_from_string(emailText)\n# subject = email_message.get('Subject')\n# mailSubjectBody = []\n# if subject != None:\n# mailSubjectBody.append(subject)\n# if email_message.is_multipart():\n# for part in email_message.get_payload():\n# if part.is_multipart():\n# for subPart in part.get_payload():\n# mailSubjectBody.append(_getTextFromDifferentContentTypeTest\n# (subPart.get_content_type(),\n# subPart['Content-Transfer-Encoding'],subPart.get_payload()))\n# else:\n# mailSubjectBody.append(_getTextFromDifferentContentTypeTest\n# (part.get_content_type(),\n# part['Content-Transfer-Encoding'],part.get_payload()))\n# else:\n# mailSubjectBody.append(_getTextFromDifferentContentTypeTest\n# (email_message.get_content_type(),\n# email_message['Content-Transfer-Encoding'],email_message.get_payload()))\n#\n#\n# return ' '.join(mailSubjectBody)\n\n# def _getTextFromDifferentContentTypeTest(contentType,contentEncoding, text):\n# if contentType == 'text/plain':\n# if contentEncoding == 'base64':\n# text = base64.standard_b64decode(text)\n# return str(text)\n# elif contentType == 'text/html':\n# if contentEncoding == 'base64':\n# text = base64.standard_b64decode(text)\n# # soup = BeautifulSoup(text, 'lxml')\n# # clean_html = ' '.join(soup.findAll(text=True))\n# return cleanhtml(str(text))\n# else:\n# return ''\n\ndef cleanhtml(raw_html):\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, '', raw_html)\n return cleantext","sub_path":"naive_spam_filter/simpleNavie.py","file_name":"simpleNavie.py","file_ext":"py","file_size_in_byte":13898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"75915361","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 6 14:25:48 2020\n\n@author: vladg, phillipe\n@version: 2.2\n\"\"\"\n\nimport numpy as np\nfrom control.matlab import *\nimport matplotlib.pyplot as plt\nimport numpy.linalg\n\n\n# +++++++++++++++++++++++++++++++++ Helper Functions ++++++++++++++++++++++++++++++++++++++++++++++\n\ndef plotting(x,y,name,variable,unit):\n \"\"\"Use this for plotting.\"\"\"\n\n ax = plt.figure(str(name))\n # ax.legend(\"best\")\n\n plt.plot(x,y,label=name)\n plt.xlabel(\"t [s]\")\n\n lab = str(str(variable)+\" \"+\"[\"+unit+\"]\")\n plt.ylabel(lab)\n plt.grid(True)\n plt.show()\n\n\n#+++++++++++++++++++++++++++++++++++ Global variables+++++++++++++++++++++++++++++++++++++++++++++++\n\n# Citation 550 - Linear simulation\n # Aircraft geometry\n\nS = 30.00\t # wing area [m^2]\nSh = 0.2 * S # stabiliser area [m^2]\nSh_S = Sh / S\t # [ ]\nlh = 0.71 * 5.968 # tail length [m]\nc = 2.0569\t # mean aerodynamic cord [m]\nlh_c = lh / c\t # [ ]\nb = 15.911\t # wing span [m]\nbh = 5.791\t # stabilser span [m]\nA = b ** 2 / S # wing aspect ratio [ ]\nAh = bh ** 2 / Sh # stabilser aspect ratio [ ]\nVh_V = 1\t # [ ]\nih = -2 * np.pi / 180 # stabiliser angle of incidence [rad]\n\noew = 4157.174 #Operational Empty Weight [kg]\nm_payload = 765 # Payload mass [kg]\n # Aerodynamic properties\ne = 0.7448 # Oswald factor [ ]\nCD0 = 0.02115 # Zero lift drag coefficient [ ]\nCLa = 4.811 # Slope of CL-alpha curve [ ]\n\n # Constant values concerning atmosphere and gravity\nrho0 = 1.2250 # air density at sea level [kg/m^3]\nlam = -0.0065 # temperature gradient in ISA [K/m]\nTemp0 = 288.15 # temperature at sea level in ISA [K]\nR = 287.05 # specific gas constant [m^2/sec^2K]\ng = 9.81 # [m/sec^2] (gravity constant)\n\n\n #Simulation parameters:\nnsteps = 10**3\n\n#+++++++++++++++++++++++++++++++++ MAIN ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\ndef main(t0,deltat,input_type):\n \"\"\"Input type: elevator\n rudder\n airleron\"\"\"\n\n #C.G location\n xcg = 0.25 * c\n\n # Stationary flight condition\n m_fuel = 1197.484 # CHANGE M_fule(t0) Total fuel mass [kg]\n gamma = 0 # !!!!!! flight path angle -\n hp0 = 1527.048 \t # CHANGE pressure altitude in the stationary flight condition [m]\n V0 = 127.067 # CHANGE true airspeed in the stationary flight condition [m/sec]\n alpha0 = np.radians(1.4) # CHANGE angle of attack in the stationary flight condition [rad]\n th0 = alpha0 + gamma # CHANGE pitch angle in the stationary flight condition [rad]\n\n # Aircraft mass\n m = 4989.516 + m_payload # CHANGE mass [kg]\n\n # Longitudinal stability\n Cma = -0.4934 # ref data # CHANGE longitudinal stabilty [ ]\n Cmde = 1.031 # ref data # CHANGE elevator effectiveness [ ]\n\n # air density [kg/m^3]\n rho = rho0 * pow( ((1+(lam * hp0 / Temp0))), (-((g / (lam*R)) + 1)))\n W = m * g # [N] (aircraft weight)\n\n # Aircraft inertia (depend on t0):\n muc = m / (rho * S * c) #CHANGE\n mub = m / (rho * S * b) #CHANGE\n KX2 = 0.019\n KZ2 = 0.042\n KXZ = 0.002\n KY2 = 1.25 * 1.114\n\n # Aerodynamic constants:\n\n Cmac = 0 # Moment coefficient about the aerodynamic centre [ ]\n CNwa = CLa # Wing normal force slope [1/rad]\n CNha = 2 * np.pi * Ah / (Ah + 2) # Stabiliser normal force slope [ ]\n depsda = 4 / (A + 2) # Downwash gradient [ ]\n\n # Lift and drag coefficient (depend on t0):\n\n CL = 2 * W / (rho * V0 ** 2 * S) # CHANGES Lift coefficient [ ]\n CD = CD0 + (CLa * alpha0) ** 2 / (np.pi * A * e) # Drag coefficient [ ]\n\n # Stabiblity derivatives\n CX0 = W * np.sin(th0) / (0.5 * rho * V0 ** 2 * S)\n CXu = -0.095 #corrected\n CXa = +0.47966\t\t# Positive! (has been erroneously negative since 1993)\n CXadot = +0.08330\n CXq = -0.28170\n CXde = -0.03728\n\n CZ0 = -W * np.cos(th0) / (0.5 * rho * V0 ** 2 * S)\n CZu = -0.37616\n CZa = -5.74340\n CZadot = -0.00350\n CZq = -5.66290\n CZde = -0.69612\n\n Cmu = +0.06990 #positive!\n Cmadot = +0.17800 #positive!\n Cmq = -8.79415\n\n CYb = -0.7500\n CYbdot = 0\n CYp = -0.0304\n CYr = +0.8495\n CYda = -0.0400\n CYdr = +0.2300\n\n Clb = -0.10260\n Clp = -0.71085\n Clr = +0.23760\n Clda = -0.23088\n Cldr = +0.03440\n\n Cnb = +0.1348\n Cnbdot = 0\n Cnp = -0.0602\n Cnr = -0.2061\n Cnda = -0.0120\n Cndr = -0.0939\n\n #c-matrix dimensions\n s1 = (4,4)\n s2 = (4,1)\n s3 = (4,2)\n\n #Creating the different c-matrices (c1, c2 &c3) for symmetrical flight\n #c1 matrix\n c1 = np.zeros(s1)\n c1[0,0] = -2*muc*(c/V0)\n c1[1,1] = (CZadot - 2*muc)*(c/V0)\n c1[2,2] = -(c/V0)\n c1[3,1] = Cmadot*(c/V0)\n c1[3,3] = -2*muc*KY2*((c/V0)**2)\n\n #c2 matrix\n c2 = np.zeros(s1)\n c2[0,0] = -CXu\n c2[0,1] = -CXa\n c2[0,2] = -CZ0\n c2[0,3] = -CXq\n c2[1,0] = -CZu\n c2[1,1] = -CZa\n c2[1,2] = -CX0\n c2[1,3] = -(CZq + 2*muc)*(c/V0)\n c2[2,3] = -(c/V0)\n c2[3,0] = -Cmu\n c2[3,1] = -Cma\n c2[3,3] = -Cmq*(c/V0)\n\n #c3 matrix\n c3 = np.zeros(s2)\n c3[0,0] = -CXde\n c3[1,0] = -CZde\n c3[3,0] = -Cmde\n\n\n #Creating the different c-matrices (c4, c5 &c6) for asymmetrical flight\n\n #c4 matrix\n c4 = np.zeros(s1)\n c4[0,0] = (CYbdot - 2*mub)*(b/V0)\n c4[1,1] = (-0.5)*(b/V0)\n c4[2,2] = -4*mub*KX2*(b/V0)*(b/(2*V0))\n c4[2,3] = 4*mub*KXZ*(b/V0)*(b/(2*V0))\n c4[3,0] = Cnb*(b/V0)\n c4[3,2] = 4*mub*KXZ*(b/V0)*(b/(2*V0))\n c4[3,3] = -4*mub*KZ2*(b/V0)*(b/(2*V0))\n\n #c5 matrix\n c5 = np.zeros(s1)\n c5[0,0] = CYb\n c5[0,1] = CL\n c5[0,2] = CYp*(b/(2*V0))\n c5[0,3] = (CYr - 4*mub)*(b/(2*V0))\n c5[1,2] = (b/(2*V0))\n c5[2,0] = Clb\n c5[2,2] = Clp*(b/(2*V0))\n c5[2,3] = Clr*(b/(2*V0))\n c5[3,0] = Cnb\n c5[3,2] = Cnp*(b/(2*V0))\n c5[3,3] = Cnr*(b/(2*V0))\n\n #c6 matrix\n c6 = np.zeros(s3)\n c6[0,0] = -CYda\n c6[0,1] = -CYdr\n c6[2,0] = -Clda\n c6[2,1] = -Cldr\n c6[3,0] = -Cnda\n c6[3,1] = -Cndr\n\n # Time responses for unit steps:\n t = np.linspace(t0, deltat, nsteps)\n\n #Now, we distinct between inputs:\n\n if input_type==\"elevator\":\n #Symmetric system is triggered:\n\n #Creating the state matrix(A) and the input matrix(B) for symmetrical flight - xdot = c1^-1*c2*x c1^-1*c3*u = Ax + Bu\n A_s = np.dot(np.linalg.inv(c1), c2)\n B_s = np.dot(np.linalg.inv(c1), c3)\n C_s = np.identity(4)\n D_s = np.zeros((4, 1))\n\n #System in state-space\n sys_s = StateSpace(A_s, B_s, C_s, D_s)\n poles_s = pole(sys_s)\n damp(sys_s)\n #print(\"Eigen values of the symmetric system: \", poles_s) #verified\n\n # Time responses for unit steps:\n\n # u = input_array\n u = np.ones(t.shape)\n yout,t,u = lsim(sys_s,u,t) #general time response\n\n u_out_s = yout[:,0]\n alpha_out_s = yout[:,1]\n theta_out_s = yout[:,2]\n q_out_s = yout[:,3]\n\n #Plotting....\n plotting(t,u_out_s,str(\"u Response for \" +input_type+ \" input\"),r\"$u$\",\"m/s\")\n plotting(t,alpha_out_s,str(\"Alpha Response for \" +input_type+ \" input\"),r\"$\\alpha$\",\"-\")\n plotting(t,theta_out_s,str(\"Theta Response for \" +input_type+ \" input\"),r\"$\\theta$\",\"-\")\n plotting(t,q_out_s,str(\"q Response for \" +input_type+ \" input\"),\"$q$\",r\"1/s\")\n\n\n else:\n #Creating the state matrix(A) and the input matrix(B) for asymmetrical flight - y = c4^-1*c5*x c4^-1*c5*u = Ax + Bu\n A_a = -np.dot(np.linalg.inv(c4), c5)\n B_a = np.dot(np.linalg.inv(c4), c6)\n C_a = np.identity(4)\n #D_a depends on the input\n\n if input_type ==\"rudder\":\n D_a = np.zeros((4, 2))\n D_a[:,0] = 1 #we should check this...\n u = np.ones((len(t),2)) * -0.804 #step input\n u[:,0]=1\n print(u.shape)\n\n elif input_type==\"aileron\":\n D_a = np.zeros((4, 2))\n D_a[:,1] = 1\n u = np.ones((len(t),2)) #step input\n u[:,1]=1\n\n #System in state-space\n sys_a = StateSpace(A_a, B_a, C_a, D_a)\n poles_a = pole(sys_a)\n #print(\"Eigen values of the asymmetric system: \", poles_a) #verified\n\n\n\n yout,t,u = lsim(sys_a,u,t) #general time response for the input u\n\n u_out_a = yout[:,0]\n alpha_out_a = yout[:,1]\n theta_out_a = yout[:,2]\n q_out_a = yout[:,3]\n\n #Plotting...\n plotting(t,u_out_a,str(\"Beta Response for \" + input_type +\" input\"), r\"$beta$\",\"-\")\n plotting(t,alpha_out_a,str(\"Phi Response for \" +input_type + \" input\"), r\"$\\phi$\",\"-\")\n plotting(t,theta_out_a,str(\"p Response for \" +input_type + \" input\") , r\"$p$\" ,\"1/s\")\n plotting(t,q_out_a,str(\"r Response for \" +input_type + \" input\"), \"$r$\" ,r\"1/s\")\n\n return 1\n\nif __name__==\"__main__\":\n\n main(0,140,\"elevator\")\n","sub_path":"main_sim_ref.py","file_name":"main_sim_ref.py","file_ext":"py","file_size_in_byte":9326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"514006960","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 22 12:10:21 2017\n\n@author: owen\n\"\"\"\nimport collections\nclass Solution(object):\n def numberOfBoomerangs(self, points):\n \"\"\"\n :type points: List[List[int]]\n :rtype: int\n \"\"\"\n # time O(n^2), space O(n)\n# cnt=0\n# # i=(x1,y1), j=k=(x2,y2)\n# for x1,y1 in points:\n# dmap=collections.defaultdict(int) \n# # For every i, we capture the number of points equidistant from i.\n# for x2,y2 in points:\n# dmap[(x1-x2)**2+(y1-y2)**2]+=1\n# # For this i, we calculate all possible permutations of (j,k) from these equidistant points. \n# for dist in dmap:\n# cnt+=dmap[dist]*(dmap[dist]-1)\n# \n# return cnt\n\n# cnt=0\n# dmap=collections.defaultdict(int)\n# # i=(x1,y1), j=k=(x2,y2)\n# for x1,y1 in points:\n# # For every i, we capture the number of points equidistant from i.\n# for x2,y2 in points:\n# dmap[(x1-x2)**2+(y1-y2)**2]+=1\n# # For this i, we calculate all possible permutations of (j,k) from these equidistant points. \n# for dist in dmap:\n# cnt+=dmap[dist]*(dmap[dist]-1)\n# dmap.clear()\n# \n# return cnt\n\n# cnt=0\n# for x1,y1 in points:\n# dict={}\n# for x2,y2 in points:\n# dict[(x1-x2)**2+(y1-y2)**2]=dict.get((x1-x2)**2+(y1-y2)**2,0)+1 # dictionary.get() provides a default value if the key is missing\n# for dist in dict:\n# cnt+=dict[dist]*(dict[dist]-1)\n# \n# return cnt\n \n cnt=0\n for x1,y1 in points:\n dict={}\n for x2,y2 in points:\n dist=(x1-x2)**2+(y1-y2)**2\n if dist in dict:\n dict[dist]+=1\n else:\n dict[dist]=1\n for dist in dict:\n cnt+=dict[dist]*(dict[dist]-1)\n \n return cnt\n \n \nif __name__==\"__main__\":\n points=[[0,0],[1,0],[2,0]]\n print(Solution().numberOfBoomerangs(points))","sub_path":"447. Number of Boomerangs.py","file_name":"447. Number of Boomerangs.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"293486793","text":"import pygame\r\nimport random\r\n\r\npygame.init()\r\n\r\nclock = pygame.time.Clock()\r\neasy = 1000\r\nmedium = 800\r\nhard = 600\r\nmode_time = 0\r\nsw = 800\r\nsh = 600\r\nscreen = pygame.display.set_mode((sw, sh))\r\npygame.display.set_caption(\"Hui-Hui\")\r\nfont_32_bold = pygame.font.Font('fonts/SF-Pro-Text-Bold.otf', 32)\r\nfont_64_bold = pygame.font.Font('fonts/SF-Pro-Text-Bold.otf', 64)\r\nfont_32 = pygame.font.Font('fonts/SF-Pro-Text-Regular.otf', 32)\r\nfont_64 = pygame.font.Font('fonts/SF-Pro-Text-Regular.otf', 64)\r\nxy = []\r\nscore = 0\r\n\r\n\r\n# TIME OUT\r\ndef time_out_mode(st, md):\r\n global xy\r\n start_time = st\r\n font = pygame.font.Font('fonts/SF-Pro-Text-Regular.otf', 32)\r\n sCoord = (10, 10)\r\n\r\n def score_print(scr):\r\n screen.blit(font.render(\"Score: \" + str(scr), True, (255, 255, 255)), sCoord)\r\n\r\n def generate_box(x, y):\r\n return (pygame.Rect(x, y, 100, 100))\r\n\r\n def isClicked(xy, mx, my):\r\n global score\r\n if xy[0] < mx < xy[0] + 100 and xy[1] < my < xy[1] + 100:\r\n score += 1\r\n return True\r\n return False\r\n\r\n clicked = False\r\n start = pygame.time.get_ticks()\r\n TimeOutRun = True\r\n while TimeOutRun:\r\n screen.fill((0, 0, 0))\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n TimeOutRun = False\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n clicked = True\r\n if event.type == pygame.MOUSEBUTTONUP:\r\n if event.button == 1:\r\n clicked = False\r\n\r\n box = generate_box(xy[0], xy[1])\r\n pygame.draw.rect(screen, (0, 255, 0), box)\r\n\r\n mx, my = pygame.mouse.get_pos()\r\n current_time = pygame.time.get_ticks()\r\n if (current_time - start > md):\r\n start = pygame.time.get_ticks()\r\n xy = [random.randint(0, 700), random.randint(0, 500)]\r\n\r\n if clicked:\r\n if (current_time - start < md) and isClicked(xy, mx, my):\r\n pygame.draw.rect(screen, (0, 255, 0), box)\r\n start = pygame.time.get_ticks()\r\n xy = [random.randint(0, 700), random.randint(0, 500)]\r\n\r\n game_time = pygame.time.get_ticks()\r\n timer = pygame.font.Font('fonts/SF-Pro-Text-Regular.otf', 32)\r\n screen.blit(timer.render(\"Time: \" + str((game_time - start_time) // 1000) + \" secs\", True, (255, 255, 255)), (10, 50))\r\n\r\n if game_time - start_time >= 10000:\r\n screen.fill((255, 0, 0))\r\n msg = pygame.font.Font('fonts/SF-Pro-Text-Bold.otf', 64)\r\n mCoord = (180, 200)\r\n screen.blit(msg.render(\"GAME OVER!!!\", True, (255, 255, 255)), mCoord)\r\n\r\n fsc = pygame.font.Font('fonts/SF-Pro-Text-Bold.otf', 32)\r\n fsCoord = (280, 300)\r\n screen.blit(fsc.render(\"FINAL SCORE: \" + str(score), True, (255, 255, 255)), fsCoord)\r\n\r\n score_print(score)\r\n clock.tick(60)\r\n pygame.display.update()\r\n\r\n# ARCADE MODE\r\ndef arcade_mode(st, md):\r\n global xy\r\n life = 10\r\n clicked = False\r\n font = pygame.font.Font('fonts/SF-Pro-Text-Regular.otf', 32)\r\n start_time = st\r\n sCoord = (10, 10)\r\n\r\n def score_print(scr):\r\n screen.blit(font.render(\"Score: \" + str(scr), True, (255, 255, 255)), sCoord)\r\n\r\n def generate_box(x, y):\r\n return (pygame.Rect(x, y, 100, 100))\r\n\r\n def isClicked(xy, mx, my):\r\n global score\r\n if xy[0] < mx < xy[0] + 100 and xy[1] < my < xy[1] + 100:\r\n score += 1\r\n return True\r\n return False\r\n\r\n def draw_lives(lives):\r\n for i in range(lives):\r\n pygame.draw.circle(screen, (255, 0, 0), (760 - 30 * i, 20), 15)\r\n\r\n start = pygame.time.get_ticks()\r\n ArcadeRun = True\r\n while ArcadeRun:\r\n screen.fill((0, 0, 0))\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n ArcadeRun = False\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n clicked = True\r\n if event.type == pygame.MOUSEBUTTONUP:\r\n if event.button == 1:\r\n clicked = False\r\n\r\n box = generate_box(xy[0], xy[1])\r\n pygame.draw.rect(screen, (0, 255, 0), box)\r\n\r\n mx, my = pygame.mouse.get_pos()\r\n current_time = pygame.time.get_ticks()\r\n\r\n if (current_time - start > md) and not (clicked):\r\n life -= 1\r\n print(life)\r\n start = pygame.time.get_ticks()\r\n xy = [random.randint(0, 700), random.randint(0, 500)]\r\n\r\n if clicked:\r\n if (current_time - start < md) and isClicked(xy, mx, my):\r\n clicked = False\r\n pygame.draw.rect(screen, (0, 255, 0), box)\r\n xy = [random.randint(0, 700), random.randint(0, 500)]\r\n start = pygame.time.get_ticks()\r\n\r\n elif (current_time - start < md) and not (isClicked(xy, mx, my)):\r\n clicked = False\r\n life -= 1\r\n pygame.draw.rect(screen, (0, 0, 255), box)\r\n xy = [random.randint(0, 700), random.randint(0, 500)]\r\n start = pygame.time.get_ticks()\r\n print(life)\r\n\r\n draw_lives(life)\r\n score_print(score)\r\n game_time = pygame.time.get_ticks()\r\n timer = pygame.font.Font('fonts/SF-Pro-Text-Regular.otf', 32)\r\n screen.blit(timer.render(\"Time: \" + str((game_time - start_time) // 1000) + \" secs\", True, (255, 255, 255)), (10, 50))\r\n\r\n if life <= 0:\r\n screen.fill((255, 0, 0))\r\n msg = pygame.font.Font('fonts/SF-Pro-Text-Bold.otf', 64)\r\n screen.blit(msg.render(\"GAME OVER!!!\", True, (255, 255, 255)), (170, 200))\r\n\r\n fsc = pygame.font.Font('fonts/SF-Pro-Text-Bold.otf', 32)\r\n screen.blit(fsc.render(\"FINAL SCORE: \" + str(score), True, (255, 255, 255)), (265, 275))\r\n\r\n clock.tick(60)\r\n pygame.display.update()\r\n\r\n# MENUS\r\ndef selectMode(mode_time):\r\n clicked = False\r\n mode = 0\r\n finalstate = 0\r\n menu2Run = True\r\n while menu2Run:\r\n screen.fill((0, 0, 0))\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n menu2Run = False\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n clicked = True\r\n if event.type == pygame.MOUSEBUTTONUP:\r\n if event.button == 1:\r\n clicked = False\r\n\r\n Welcome_Message = font_64_bold.render(\"Hui-Hui\", True, (255, 255, 255))\r\n screen.blit(Welcome_Message, (sw // 2 - 130, 20))\r\n\r\n mx, my = pygame.mouse.get_pos()\r\n\r\n Select_Mode = font_32_bold.render(\"Select Mode:\", True, (255, 255, 255))\r\n screen.blit(Select_Mode, (10, sh - 400))\r\n\r\n time_out = font_32.render(\"Time-Out\", True, (255, 255, 255))\r\n screen.blit(time_out, (290, sh - 360))\r\n\r\n arcade = font_32.render(\"Arcade\", True, (255, 255, 255))\r\n screen.blit(arcade, (290, sh - 320))\r\n\r\n Start = font_64_bold.render(\"Start\", True, (255, 255, 255))\r\n screen.blit(Start, (320, sh - 100))\r\n\r\n if clicked:\r\n if 285 < mx < 420 and sh - 360 < my < sh - 323:\r\n mode = 1 # Time-Out\r\n elif 285 < mx < 420 and sh - 320 < my < sh - 283:\r\n mode = 2 # Arcade\r\n elif sw // 2 - 95 < mx < sw // 2 + 95 and sh - 105 < my < sh - 30:\r\n finalstate = 1\r\n\r\n if mode == 1:\r\n pygame.draw.rect(screen, (255, 0, 0), pygame.Rect(275, sh - 360, 190, 37), 2)\r\n elif mode == 2:\r\n pygame.draw.rect(screen, (255, 0, 0), pygame.Rect(275, sh - 320, 190, 37), 2)\r\n\r\n if finalstate == 1:\r\n pygame.draw.rect(screen, (0, 255, 0), pygame.Rect(sw // 2 - 90, sh - 97, 180, 70), 2)\r\n if mode == 1: # Time-Out\r\n menu2Run = False\r\n time_out_mode(pygame.time.get_ticks(), mode_time)\r\n\r\n elif mode == 2: # Arcade\r\n menu2Run = False\r\n arcade_mode(pygame.time.get_ticks(), mode_time)\r\n\r\n pygame.display.update()\r\n\r\n\r\nMainRun = True\r\nnextMenu = 0\r\nstate = 0\r\nIsClicked = False\r\nwhile MainRun:\r\n screen.fill((0, 0, 0))\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n MainRun = False\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n IsClicked = True\r\n if event.type == pygame.MOUSEBUTTONUP:\r\n if event.button == 1:\r\n IsClicked = False\r\n\r\n Welcome_Message = font_64_bold.render(\"Hui-Hui\", True, (255, 255, 255))\r\n screen.blit(Welcome_Message, (sw // 2 - 130, 20))\r\n\r\n Select_Level = font_32_bold.render(\"Select Level:\", True, (255, 255, 255))\r\n screen.blit(Select_Level, (10, sh - 400))\r\n\r\n level1 = font_32.render(\"Level 1 (Easy)\", True, (0, 255, 0))\r\n screen.blit(level1, (290, sh - 360))\r\n\r\n level2 = font_32.render(\"Level 2 (Medium)\", True, (242, 255, 0))\r\n screen.blit(level2, (290, sh - 320))\r\n\r\n level3 = font_32.render(\"Level 3 (Hard)\", True, (255, 0, 0))\r\n screen.blit(level3, (290, sh - 280))\r\n\r\n select_text = font_64_bold.render(\"Select a level\", True, (255, 255, 255))\r\n screen.blit(select_text, (210, sh - 100))\r\n\r\n mx, my = pygame.mouse.get_pos()\r\n if IsClicked == True and nextMenu == 0:\r\n if 285 < mx < 420 and sh - 360 < my < sh - 323:\r\n mode_time = easy\r\n xy = [100, 100]\r\n selectMode(mode_time)\r\n elif 285 < mx < 420 and sh - 320 < my < sh - 283:\r\n mode_time = medium\r\n xy = [80, 80]\r\n selectMode(mode_time)\r\n elif 285 < mx < 420 and sh - 280 < my < sh - 150:\r\n mode_time = hard\r\n xy = [60, 60]\r\n selectMode(mode_time)\r\n\r\n pygame.display.update()\r\n","sub_path":"VMC Pygame/VMC Pygame - Homework/VMC Pygame - Homework 5/VMC Pygame - Homework 5 Solution.py","file_name":"VMC Pygame - Homework 5 Solution.py","file_ext":"py","file_size_in_byte":10026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"620026095","text":"#!/usr/bin/python \n# -*- coding:utf-8 -*-\n\nimport Tkinter \nimport tkMessageBox\nimport serial\nimport sqlite3\nimport os \nimport dbNew\nimport re\nimport datetime\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import update\nfrom enum import Enum\nimport time\nimport tkMessageBox\n\n\n\n#####################################################################################################################\n\n\"Classes\"\n\nclass stateRegistration(Enum): #Classe qui sert à verrouiller un etat\n accepted = 1\n pending = 2\n rejeted = 3\n\n#############################################################################################################################################\n\n\"Fonctions\"\n\ndef testIfEntryExists(testPresence): #Vérification de la presence de l'outade dans la base de données et si elle a deja ete pese, regarde environ 30000 outardes\n if testPresence is None:\n situation.configure(text=\"Warning, this individual is not present in the database\\n\")\n return {\"isNew\" : False} \n elif testPresence.Weight is not None:\n situation.configure(text=\"This bustard has already been weighed\\n\")\n return {\"isNew\" : False}\n else:\n situation.configure(text=\"This bustard has not been weighed\\n\")\n return {\"isNew\" : True}\n \n\ndef testWeight(data,ref,WeightMes): #Test de la pesée pour voir si l'animal a un weight pathologique ou impossible pour eviter d'enregistreer des valeurs incoherentes et prevenir les maladies possibles de facon independante\n textWeightPathMin.configure(text =\"Pathological minimum weight : \" + str(ref.WeightMinPath) + \" grams.\")\n textWeightPathMax.configure(text =\"Pathological maximum weight : \" + str(ref.WeightMaxPath) + \" grams.\\n\") #Affichage des Weight bornes\n textWeightImpMin.configure(text =\"Impossible minimum weight : \" + str(ref.WeightMinImp) + \" grams.\")\n textWeightImpMax.configure(text =\"Impossible maximum weight : \" + str(ref.WeightMaxImp) + \" grams.\\n\")\n if WeightMes <= ref.WeightMinImp:\n textValidationWeight.configure(text=\"This weight is impossible, too light !\\n\")\n return stateRegistration.rejeted \n elif WeightMes >= ref.WeightMaxImp:\n textValidationWeight.configure(text=\"This weight is impossible, too heavy !\\n\")\n return stateRegistration.rejeted\n elif WeightMes <= ref.WeightMinPath:\n textValidationWeight.configure(text=\"Warning, this animal is too thin compared to its last weights.\\n\")\n return stateRegistration.pending\n elif WeightMes >= ref.WeightMaxPath:\n textValidationWeight.configure(text=\"Warning, this animal is too big compared to its last weights.\\n\")\n return stateRegistration.pending \n else:\n textValidationWeight.configure(text=\"Weighting accepted\\n\")\n return stateRegistration.accepted\n\n\n\ndef identification_weighing_recording(): #Programme principal démarré avec le bouton start up\n textWeightPathMin.configure(text=' ')\n textWeightPathMax.configure(text=' ')\n textWeightImpMin.configure(text=' ')\n textWeightImpMax.configure(text=' ')\n textValidationWeight.configure(text=' ')\n textConfirmation.configure(text=' ') #Remise à zéro du log\n textValidationRegister.configure(text=' ')\n textPersoData.configure(text='No scanned individual')\n textWeight.configure(text='No measured weight')\n situation.configure(text = \"Waiting for a scan\")\n FlagLec = True\n root.update()\n while FlagLec: #Boucle de lecture du scanner\n serLec.open() \n uid = serLec.readline()\n serLec.close()\n root.update()\n if uid: # Si on détecte un scan\n FlagLec = False\n serLec.close()\n strUid = str(uid) #Conversion binaire to string\n persoData = s.query(dbNew.Session).filter(dbNew.Session.RFID == str(strUid).strip()).first() #Recherche de l'individu dans la database et recuperation de ses infos\n Age = str(persoData.BirthDate - datetime.datetime.now())\n Age = int(re.findall(\"\\d+\\ \",Age)[0])\n speciesData = s.query(dbNew.Reference).filter(dbNew.Reference.Age == Age).filter(dbNew.Reference.Species == persoData.Species).first() #Recherche les données pour l'espece et pour l'âge\n del Age\n textPersoData[\"text\"] = (\"SQL request is : \\n\\n\" + str(persoData) + \"\\n\") #Afichage des infos\n result = testIfEntryExists(persoData) #Test avec la fonction avant\n root.update()\n if(result[\"isNew\"]):\n FlagBal = True\n root.update()\n while FlagBal: #Boucle de lecture de la balance\n serBal.open()\n Weight = serBal.readline()\n serBal.close()\n root.update()\n if Weight: #Si on détecte un weight\n serBal.close()\n FlagBal = False\n strWeight = str(Weight) #Conversion binaire to string\n WeightDB = float(re.findall(\"\\d+\\.\\d+\", strWeight)[0]) #On garde que le nombre en float\n strWeight = str(WeightDB)\n textWeight.configure(text = \"Mesured weight : \" + strWeight + \" grams.\\n\") #Affichage du weight mesure\n valid = testWeight(persoData, speciesData, WeightDB)\n root.update()\n if (valid == stateRegistration.accepted):\n s.query(dbNew.Session).filter(dbNew.Session.RFID == persoData.RFID).update({\"weight\" : WeightDB, \"WeightMinPath\" : speciesData.WeightMinPath, \"WeightMaxPath\" : speciesData.WeightMaxPath, \"WeightMinImp\" : speciesData.WeightMinImp, \"WeightMaxImp\" : speciesData.WeightMaxImp, \"Date\" : datetime.datetime.now()})\n s.commit()\n textValidationRegister.configure(text=\"Weighing recorded\")\n elif (valid == stateRegistration.pending):\n textConfirmation.configure(text =\"Confirm or cancel, please.\\n\")\n question = tkMessageBox.askokcancel(\"Confirmation of pathological weighing\",\"This weighing is pathological, do you want to confirm it?\")\n if question == True:\n s.query(dbNew.Session).filter(dbNew.Session.RFID == persoData.RFID).update({\"Weight\" : WeightDB, \"WeightMinPath\" : speciesData.WeightMinPath, \"WeightMaxPath\" : speciesData.WeightMaxPath, \"WeightMinImp\" : speciesData.WeightMinImp, \"WeightMaxImp\" : speciesData.WeightMaxImp, \"Date\" : datetime.datetime.now()})\n s.commit()\n textValidationRegister.configure(text=\"Weighing recorded\")\n else:\n textValidationRegister.configure(text=\"Weighing canceled\")\n elif (valid == stateRegistration.rejeted):\n textValidationRegister.configure(text=\"Weighing canceled\")\n root.update()\n else:\n textValidationRegister.configure(text=\"Weighing canceled\")\n root.update()\n \n \n################################################################################################################################\n\n\"Configurations\"\n\nserLec = serial.Serial('/dev/ttyUSB0', 9600, timeout = 1) #Connexion au lecteur\nserBal = serial.Serial('/dev/ttyUSB1', 9600, timeout = 1) #Connexion a la balance\n\nserLec.close() #Fermeture des ports pour les ouvrir seulement quand nécessaire pour eviter le stack de données\nserBal.close()\n\n\"\"\"f = open('releveText.txt','w+') #Creation d'un fichier test backup\nf.write(\"Acquisition des donnees \\n\\n\") #Initialisation du fichier texte\"\"\"\n\nif(not os.path.isfile('releve.db')): #Creation de la base de donnees\n dbNew.setDb()\n\nconn = create_engine('sqlite:///releve.db') #Connexion de la base de donnees\n\nsession = sessionmaker()\nsession.configure(bind=conn) #Outils SQLAlchemy pour gérer la base SQL avec des requetes\ns = session()\n\n\n###########################################################################################################################################################\n\n\n\"Définition de la fenêtre\"\n\nroot = Tkinter.Tk() \nroot.title(\"RFID weighing program\") #Création de la fenetre\nroot.geometry(\"1000x2000\")\nroot.aspect(3, 2, 5, 3)\n\n\"Définition des frames\"\n\n#A FAIRE CHALLAH\n\n\"Définition des boutons\"\n\nstart = Tkinter.Button(root, text = \"Start-up\", command = identification_weighing_recording) #Bouton démarrer\nstart.pack(side=Tkinter.BOTTOM)\n\n\"Définition des labels\"\n\nsituation = Tkinter.Label(root, text = \"Waiting for startup\")\nsituation.pack()\n\ntextPersoData = Tkinter.Label(root, text = \"No scanned individual\",wraplength=720)\ntextPersoData.pack()\n\ntextWeight = Tkinter.Label(root, text = \"No measured weight\")\ntextWeight.pack() #Différents labels pour le log de la pesée\n\ntextWeightPathMin = Tkinter.Label(root, text = \" \" )\ntextWeightPathMax = Tkinter.Label(root, text = \" \" )\ntextWeightImpMin = Tkinter.Label(root, text = \" \" )\ntextWeightImpMax = Tkinter.Label(root, text = \" \" )\n\ntextWeightPathMin.pack()\ntextWeightPathMax.pack()\ntextWeightImpMin.pack()\ntextWeightImpMax.pack()\n\ntextValidationWeight = Tkinter.Label(root, text =\" \")\ntextValidationWeight.pack()\n\ntextConfirmation = Tkinter.Label(root, text = \" \")\ntextConfirmation.pack()\n\ntextValidationRegister = Tkinter.Label(root, text =\" \")\ntextValidationRegister.pack()\n\n############################################################################################################\n\nroot.mainloop() #Bouble infinie de la fenete\n\n\n","sub_path":"test_interface_test.py","file_name":"test_interface_test.py","file_ext":"py","file_size_in_byte":10503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"518247723","text":"from .models import DbChanges\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save, post_delete\n\n\n@receiver(post_save)\ndef signal_post_save(sender, created, **kwargs):\n # Migration and ContentType is for testing\n if sender.__name__ != 'DbChanges' and sender.__name__ != 'Migration' and sender.__name__ != 'ContentType':\n if created:\n DbChanges.objects.create(model=sender.__name__, operation='create')\n else:\n DbChanges.objects.create(model=sender.__name__, operation='update')\n\n\n@receiver(post_delete)\ndef signal_post_delete(sender, **kwargs):\n print('signal_post_delete')\n if sender.__name__ != 'DbChanges' and sender.__name__ != 'Migration' and sender.__name__ != 'ContentType':\n DbChanges.objects.create(model=sender.__name__, operation='delete')\n\n\n","sub_path":"main/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"388235995","text":"class Noeud:\n arbre_vide = None\n\n def __init__(self, etiquette, gauche, droit):\n self._etiquette = etiquette\n self._gauche = gauche\n self._droit = droit\n\n def etiquette(self):\n return self._etiquette\n\n def gauche(self):\n return self._gauche\n\n def droit(self):\n return self._droit\n\n def est_vide(arbre):\n return arbre is Noeud.arbre_vide\n\n def est_feuille(self):\n if Noeud.est_vide(self):\n return False\n return Noeud.est_vide(self.gauche()) and Noeud.est_vide(self.droit())\n\n def compte_feuille (self):\n if self.est_feuille():\n return 1\n n = 0\n if not Noeud.est_vide(self.gauche()):\n n += self.gauche().compte_feuille()\n if not Noeud.est_vide(self.droit()):\n n += self.droit().compte_feuille()\n return n\n\n def taille (self):\n if self.est_feuille():\n return 1\n n = 1\n if not Noeud.est_vide(self.gauche()):\n n += self.gauche().taille()\n if not Noeud.est_vide(self.droit()):\n n += self.droit().taille()\n return n\n\n def hauteur (self):\n if self.est_feuille():\n return 0\n h1 = 0\n h2 = 0\n if not Noeud.est_vide(self.gauche()):\n h1 = 1 + self.gauche().hauteur()\n if not Noeud.est_vide(self.droit()):\n h2 = 1 + self.droit().hauteur()\n return max(h1,h2)\n\n def represente (arbre, p=0):\n if Noeud.est_vide(arbre):\n print('*')\n else :\n print(arbre.etiquette())\n p += 1\n print('-' * p, end ='')\n Noeud.represente(arbre.gauche(),p)\n print('-' * p, end ='')\n Noeud.represente(arbre.droit(),p)\n\n\na = Noeud('a', Noeud.arbre_vide, Noeud.arbre_vide)\nc = Noeud('c', Noeud.arbre_vide, Noeud.arbre_vide)\nb = Noeud('b', Noeud.arbre_vide, c)\nd = Noeud('d', a, b)\n\nprint('== Test est_feuille ==')\n\nprint('est_feuille:', a.est_feuille())\nprint('est_feuille:', b.est_feuille())\nprint('est_feuille:', d.est_feuille())\n\nprint('==============')\n\nprint('== Test compte_feuille ==')\n\nprint('compte_feuille:', a.compte_feuille())\nprint('compte_feuille:', b.compte_feuille())\nprint('compte_feuille:', d.compte_feuille())\n\nprint('==============')\n\nprint('== Test taille ==')\n\nprint('taille:', a.taille())\nprint('taille:', b.taille())\nprint('taille:', d.taille())\n\nprint('==============')\n\nprint('== Test hauteur ==')\n\nprint('hauteur:', a.hauteur())\nprint('hauteur:', b.hauteur())\nprint('hauteur:', d.hauteur())\n\nprint('==============')\n\nprint('== Test represente ==')\n\na.represente()\nprint()\nb.represente()\nprint()\nd.represente()\n\nprint('==============')","sub_path":"td/arbres/solution_arbres_classe.py","file_name":"solution_arbres_classe.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"269263011","text":"# this class get the radius of the blur and blur the wincdow\n\n# take screen shot of the windows\n\nfrom pyautogui import screenshot\nfrom PIL import Image, ImageFilter\nfrom kivy.clock import Clock\nimport os\n\n\ndef WindowBlur(radius=5):\n try:\n get_image = screenshot()\n\n # save the acquired screenshot\n get_image.save('./image.png')\n\n # open the image\n image = Image.open('./image.png')\n\n # blur the image\n blurred_image = image.filter(ImageFilter.GaussianBlur(radius=radius))\n\n blurred_image_saved = blurred_image.save('./assets/blur_window.png')\n\n # return blurred png\n return blurred_image_saved\n\n except ValueError:\n return 'Radius cannot be string.'\n\n\ndef delete(file='./blur_window.png'):\n try:\n os.remove('./assets/blur_window.png')\n os.remove('./image.png')\n except FileNotFoundError:\n pass\n\n\nClock.schedule_interval(delete, 3)\n","sub_path":"fluentapp/windoweffect.py","file_name":"windoweffect.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"304741429","text":"import pygame\nfrom settings import Settings\nfrom car import Car\nimport game_functions\n\n\ndef run():\n # Game initialization and creation of screen object\n pygame.init()\n all_settings = Settings()\n screen = pygame.display.set_mode((all_settings.screen_width, all_settings.screen_height))\n car = Car(screen)\n pygame.display.set_caption(\"Race\")\n\n # Game main loop\n while True:\n game_functions.check_events(car)\n car.update()\n game_functions.update_screen(all_settings, screen, car)\n\nrun()","sub_path":"race.py","file_name":"race.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"120023841","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom .models import Book, Comment\nfrom django.contrib.auth import login\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom .forms import BookForm, CommentForm\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect\nfrom .forms import RegisterForm\n\n\ndef index(request):\n if not request.user.is_authenticated:\n return render(request, 'library/index.html', {'page_title': 'Library app'})\n else:\n return redirect('library:books')\n\n@login_required()\ndef books(request):\n tmp = Book.objects.all()\n return render(request, 'library/books.html', {'books': tmp})\n\ndef comment_view(request):\n return render(request, 'library/book.html', {'books': Book.object.all()})\n\n@login_required()\ndef book(request, id):\n tmp = get_object_or_404(Book, id=id)\n tmp1 = tmp.comment_set.all()\n print(tmp1)\n return render(request, 'library/book.html', {'book': tmp, 'page_title': tmp.title, 'comments': tmp1})\n\n@permission_required('library.change_book')\ndef edit(request, id):\n if request.method == 'POST':\n form = BookForm(request.POST)\n\n if form.is_valid():\n a = Book.objects.get(id=id)\n a.title = form.cleaned_data['title']\n a.content = form.cleaned_data['content']\n a.save()\n return redirect('library:books')\n else:\n return render(request, 'library/edit.html', {'form': form, 'id': id})\n else:\n a = Book.objects.get(id=id)\n form = BookForm(instance=a)\n return render(request, 'library/edit.html', {'form': form, 'id': id})\n\n@permission_required('library.add_book')\ndef new(request):\n if request.method == 'POST':\n form = BookForm(request.POST)\n\n if form.is_valid():\n a = Book(title=form.cleaned_data['title'], content=form.cleaned_data['content'], owner=request.user)\n a.save()\n return redirect('library:books')\n else:\n return render(request, 'library/new.html', {'form': form})\n else:\n form = BookForm()\n return render(request, 'library/new.html', {'form': form})\n\n\n@permission_required('library.delete_book')\ndef delete(request, id):\n Book.objects.filter(id=id).delete()\n tmp = Book.objects.all()\n return render(request, 'library/books.html', {'books': tmp})\n\n\ndef user_register(request):\n # if this is a POST request we need to process the form data\n template = 'library/register.html'\n\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = RegisterForm(request.POST)\n # check whether it's valid:\n if form.is_valid():\n if User.objects.filter(username=form.cleaned_data['username']).exists():\n return render(request, template, {\n 'form': form,\n 'error_message': 'Username already exists.'\n })\n elif User.objects.filter(email=form.cleaned_data['email']).exists():\n return render(request, template, {\n 'form': form,\n 'error_message': 'Email already exists.'\n })\n elif form.cleaned_data['password'] != form.cleaned_data['password_repeat']:\n return render(request, template, {\n 'form': form,\n 'error_message': 'Passwords do not match.'\n })\n else:\n # Create the user:\n user = User.objects.create_user(\n form.cleaned_data['username'],\n form.cleaned_data['email'],\n form.cleaned_data['password']\n )\n user.first_name = form.cleaned_data['first_name']\n user.last_name = form.cleaned_data['last_name']\n user.phone_number = form.cleaned_data['phone_number']\n user.save()\n\n # Login the user\n login(request, user)\n\n # redirect to accounts page:\n return redirect('library:books')\n\n # No post data availabe, let's just show the page.\n else:\n form = RegisterForm()\n\n return render(request, template, {'form': form})\n\n\n","sub_path":"library/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"642729502","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n## plant_params.py\n##================================================\n# ~ capacity ## unit:KWe\n# ~ running_cost ## unit: cent/kWh\n# ~ investment_cost ## unit: cent = cent/KW * KW\n# ~ lifetime ## unit:year\n# ~ emission_intensity ## unit: ton CO2eq/kWh\n##================================================\n\nimport pandas as pd\nfrom p_sys import CarbonTax_lst ,hurdle_r as h_r\n\npp_list =['coal','gas','nuclear','solar','wind']\n##================================================\ncoal_pp = {'plant_type': 'coal', 'capacity':1000*10**3,'running_cost': 2,'investment_cost':145000*500*10**3,'lifetime':40,'emission_intensity':0.001}\ngas_pp = {'plant_type': 'gas', 'capacity':1000*10**3,'running_cost': 4.5,'investment_cost':90000*500*10**3,'lifetime':30,'emission_intensity':0.00045}\nnuclear_pp = {'plant_type': 'nuclear', 'capacity':1000*10**3,'running_cost': 1,'investment_cost':600000*500*10**3,'lifetime':40,'emission_intensity':0}\nsolar_pp = {'plant_type': 'solar', 'capacity':1000*10**3,'running_cost': 0,'investment_cost':80000*500*10**3,'lifetime':25,'emission_intensity':0}\nwind_pp = {'plant_type': 'wind', 'capacity':1000*10**3,'running_cost': 0,'investment_cost':150000*500*10**3,'lifetime':25,'emission_intensity':0}\n##================================================\ncapacity = {'nuclear':500*10**3,'coal':500*10**3,'gas':500*10**3,'solar':500*10**3,'wind':500*10**3}\ninvestment_cost = {'nuclear':600000*500*10**3,'coal':145000*500*10**3,'gas':90000*500*10**3,'solar':80000*500*10**3,'wind':150000*500*10**3}\nlifetime = {'nuclear':40,'coal':40,'gas':30,'solar':25,'wind':25}\nfuel_cost = {'biogas': 8,'nuclear':1,'coal':2,'natural_gas':4.5,'wind':0,'solar':0}\nrunning_cost = {'nuclear':1,'coal':2,'gas':4.5,'wind':0,'solar':0}\nemission_intensity = {'nuclear':0,'coal':0.001,'gas':0.00045,'solar':0,'wind':0}\n##================================================\n\nCRF_pp = {str(pp) : h_r*(1+h_r)**lifetime[pp]/((1+h_r)**lifetime[pp]-1) for pp in pp_list}\n# ~ print(CRF_pp)\ni_r=0.04\nannuitized_cost={pp : investment_cost[pp]* (i_r/(1-(1+i_r)**-lifetime[pp])) for pp in pp_list}\n\n\nmarginal_cost = {}##marginal cost of each type of plant over years.\nfor pp in pp_list:\n marginal_cost[pp] = [running_cost[pp]+ CarbonTax * emission_intensity[pp] for CarbonTax in CarbonTax_lst]\nmarginal_cost['gas'] = [fuel_cost['biogas'] if cost >fuel_cost['biogas'] else cost for cost in marginal_cost['gas']]\n\ndf_MarginalCost=pd.DataFrame.from_dict(marginal_cost,orient='index')\nmask = df_MarginalCost[df_MarginalCost.index == 'gas'] < fuel_cost['biogas'] ##if biogas is cheaper than Nat_gas.\n\ndf_MarginalCost.where(mask, fuel_cost['biogas'],inplace=True)##Entries where cond is False are replaced with corresponding value \n# ~ print(marginal_cost['gas'][52])\n# ~ print(df_MarginalCost.iloc[:,30:55])\n##end##\n","sub_path":"p_plant Kan Xiaoming.py","file_name":"p_plant Kan Xiaoming.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"193539037","text":"\ndef solution(money):\n \n dp1 = []\n dp1.append(money[0])\n dp1.append(max(money[0], money[1]))\n for i in range(2, len(money) - 1):\n if dp1[i - 2] + money[i] > dp1[i - 1]:\n dp1.append(dp1[i - 2] + money[i])\n else:\n dp1.append(dp1[i - 1])\n \n dp2 = [0]\n dp2.append(money[1])\n dp2.append(max(money[1], money[2]))\n for i in range(3, len(money)):\n if dp2[i - 2] + money[i] > dp2[i - 1]:\n dp2.append(dp2[i - 2] + money[i])\n else:\n dp2.append(dp2[i - 1])\n \n return max(dp1[-1], dp2[-1])\n","sub_path":"programmers/42897.py","file_name":"42897.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"282860011","text":"import math\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom collections import OrderedDict\nfrom collections.abc import Iterable\n\nfrom ..initializer import initialize_from_cfg\nfrom ...extensions import DeformableConvInOne\nfrom ..normalize import build_norm_layer, parse_deprecated_bn_style\nfrom ...utils.checkpoint import fully_checkpoint_sequential\n\n__all__ = [\n 'SENet',\n 'senet154',\n 'se_resnet50',\n 'se_resnet101',\n 'se_resnet152',\n 'se_resnext50_32x4d',\n 'se_resnext101_32x4d',\n 'se_resnext101_64x4d']\n\n\nclass AdaptiveAvgPool2d(nn.Module):\n def __init__(self, output_size):\n super(AdaptiveAvgPool2d, self).__init__()\n self.output_size = output_size\n\n def extra_repr(self):\n return 'output_size={}'.format(self.output_size)\n\n def forward(self, x):\n data_type = x.dtype\n out = F.adaptive_avg_pool2d(x.float(), self.output_size)\n out = out.to(data_type)\n return out\n\ndef Sigmoid_Activate(input):\n return input * F.sigmoid(input)\n\nclass SEModule(nn.Module):\n\n def __init__(self, channels, reduction):\n super(SEModule, self).__init__()\n self.avg_pool = AdaptiveAvgPool2d(1)\n self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0)\n self.relu = nn.ReLU(inplace=True)\n self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n module_input = x\n x = self.avg_pool(x)\n x = self.fc1(x)\n x = self.relu(x)\n x = self.fc2(x)\n x = self.sigmoid(x)\n return module_input * x\n\n\nclass Bottleneck(nn.Module):\n \"\"\"Base class for bottlenecks that implements `forward()` method\"\"\"\n\n @property\n def norm1(self):\n return getattr(self, self.norm1_name)\n\n @property\n def norm2(self):\n return getattr(self, self.norm2_name)\n\n @property\n def norm3(self):\n return getattr(self, self.norm3_name)\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.norm1(out)\n #out = self.relu(out)\n out = Sigmoid_Activate(out)\n \n out = self.conv2(out)\n out = self.norm2(out)\n out = Sigmoid_Activate(out)\n #out = self.relu(out)\n\n out = self.conv3(out)\n out = self.norm3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out = self.se_module(out) + residual\n #out = self.relu(out)\n out = Sigmoid_Activate(out)\n\n return out\n\n\nclass SEBottleneck(Bottleneck):\n \"\"\"Bottleneck for SENet154\"\"\"\n\n expansion = 4\n\n def __init__(self, inplanes, planes, groups, reduction, stride=1,\n downsample=None,\n deformable=False,\n normalize={'type': 'solo_bn'}):\n super(SEBottleneck, self).__init__()\n\n self.norm1_name, norm1 = build_norm_layer(planes * 2, normalize, 1)\n self.norm2_name, norm2 = build_norm_layer(planes * 4, normalize, 2)\n self.norm3_name, norm3 = build_norm_layer(planes * self.expansion, normalize, 3)\n\n key_conv = nn.Conv2d\n if deformable:\n key_conv = DeformableConvInOne\n self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)\n self.add_module(self.norm1_name, norm1)\n self.conv2 = key_conv(planes * 2, planes * 4, kernel_size=3, stride=stride,\n padding=1, groups=groups, bias=False)\n self.add_module(self.norm2_name, norm2)\n self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1, bias=False)\n self.add_module(self.norm3_name, norm3)\n self.relu = nn.ReLU(inplace=True)\n self.se_module = SEModule(planes * 4, reduction=reduction)\n self.downsample = downsample\n self.stride = stride\n\n\nclass SEResNetBottleneck(Bottleneck):\n \"\"\"\n ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe\n implementation and uses `stride=stride` in `conv1` and not in `conv2`\n (the latter is used in the torchvision implementation of ResNet).\n \"\"\"\n\n expansion = 4\n\n def __init__(self, inplanes, planes, groups, reduction, stride=1,\n downsample=None,\n deformable=False,\n normalize={'type': 'solo_bn'}):\n super(SEResNetBottleneck, self).__init__()\n\n self.norm1_name, norm1 = build_norm_layer(planes, normalize, 1)\n self.norm2_name, norm2 = build_norm_layer(planes, normalize, 2)\n self.norm3_name, norm3 = build_norm_layer(planes * self.expansion, normalize, 3)\n\n key_conv = nn.Conv2d\n if deformable:\n key_conv = DeformableConvInOne\n\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, stride=stride)\n self.add_module(self.norm1_name, norm1)\n self.conv2 = key_conv(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False)\n self.add_module(self.norm2_name, norm2)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.add_module(self.norm3_name, norm3)\n self.relu = nn.ReLU(inplace=True)\n self.se_module = SEModule(planes * 4, reduction=reduction)\n self.downsample = downsample\n self.stride = stride\n\n\nclass SEResNeXtBottleneck(Bottleneck):\n \"\"\"ResNeXt bottleneck type C with a Squeeze-and-Excitation module\"\"\"\n\n expansion = 4\n\n def __init__(self, inplanes, planes, groups, reduction, stride=1,\n downsample=None, base_width=4, deformable=False,\n normalize={'type': 'solo_bn'}):\n super(SEResNeXtBottleneck, self).__init__()\n\n key_conv = nn.Conv2d\n if deformable:\n key_conv = DeformableConvInOne\n\n width = math.floor(planes * (base_width / 64)) * groups\n\n self.norm1_name, norm1 = build_norm_layer(width, normalize, 1)\n self.norm2_name, norm2 = build_norm_layer(width, normalize, 2)\n self.norm3_name, norm3 = build_norm_layer(planes * self.expansion, normalize, 3)\n\n self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False, stride=1)\n self.add_module(self.norm1_name, norm1)\n self.conv2 = key_conv(width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)\n self.add_module(self.norm2_name, norm2)\n self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)\n self.add_module(self.norm3_name, norm3)\n self.relu = nn.ReLU(inplace=True)\n self.se_module = SEModule(planes * 4, reduction=reduction)\n self.downsample = downsample\n self.stride = stride\n\n\nclass SENet(nn.Module):\n \"\"\"\n \"\"\"\n def __init__(self, block, layers, groups, reduction,\n out_layers, out_strides,\n frozen_layers=None,\n deformable=None,\n initializer=None,\n inplanes=128,\n bn=None,\n normalize={'type': 'freeze_bn'},\n checkpoint=False,\n input_3x3=True, downsample_kernel_size=3,\n downsample_padding=1):\n \"\"\"\n Arguments:\n - block (``nn.Module``): ``Bottleneck`` class::\n\n - For SENet154: SEBottleneck\n - For SE-ResNet models: SEResNetBottleneck\n - For SE-ResNeXt models: SEResNeXtBottleneck\n\n - layers (:obj:`list` of :obj:`int`): Number of residual blocks for 4 layers of the\n network (layer1...layer4).\n - groups (:obj:`int`): Number of groups for the 3x3 convolution in each::\n\n bottleneck block.\n - For SENet154: 64\n - For SE-ResNet models: 1\n - For SE-ResNeXt models: 32\n\n - reduction (:obj:`int`): Reduction ratio for Squeeze-and-Excitation modules::\n\n - For all models: 16\n\n - dropout_p (:obj:`float` or None): Drop probability for the Dropout layer::\n\n If `None` the Dropout layer is not used.\n - For SENet154: 0.2\n - For SE-ResNet models: None\n - For SE-ResNeXt models: None\n\n - inplanes (:obj:`int`): Number of input channels for layer1::\n\n - For SENet154: 128\n - For SE-ResNet models: 64\n - For SE-ResNeXt models: 64\n\n - input_3x3 (:obj:`bool`): If :obj:`True`, use three 3x3\n convolutions instead of::\n\n a single 7x7 convolution in layer0.\n - For SENet154: True\n - For SE-ResNet models: False\n - For SE-ResNeXt models: False\n\n - downsample_kernel_size (:obj:`int`): Kernel size\n for downsampling convolutions in layer2, layer3 and layer4::\n\n - For SENet154: 3\n - For SE-ResNet models: 1\n - For SE-ResNeXt models: 1\n\n - downsample_padding (:obj:`int`): Padding for downsampling\n convolutions in layer2, layer3 and layer4::\n\n - For SENet154: 1\n - For SE-ResNet models: 0\n - For SE-ResNeXt models: 0\n\n - bn (:obj:`dict`): Deprecated (see normalize). Config of BatchNorm (see Configuration#Normalization).\n - normalize (:obj:`dict`): Config of Normalization Layer (see Configuration#Normalization).\n \"\"\"\n super(SENet, self).__init__()\n\n if bn is not None:\n normalize = parse_deprecated_bn_style(bn)\n\n self.segments = self.get_segments(checkpoint)\n self.inplanes = inplanes\n if input_3x3:\n layer0_modules = [\n ('conv1', nn.Conv2d(3, 64, 3, stride=2, padding=1, bias=False)),\n build_norm_layer(64, normalize, 1),\n ('relu1', nn.ReLU(inplace=True)),\n ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)),\n build_norm_layer(64, normalize, 2),\n ('relu2', nn.ReLU(inplace=True)),\n ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)),\n build_norm_layer(inplanes, normalize, 3),\n ('relu3', nn.ReLU(inplace=True)),\n ]\n else:\n layer0_modules = [\n ('conv1', nn.Conv2d(3, inplanes, kernel_size=7, stride=2, padding=3, bias=False)),\n build_norm_layer(inplanes, normalize, 1),\n ('relu1', nn.ReLU(inplace=True)),\n ]\n # To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`.\n layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True)))\n self.layer0 = nn.Sequential(OrderedDict(layer0_modules))\n self.layer1 = self._make_layer(\n block,\n planes=64,\n blocks=layers[0],\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=1,\n downsample_padding=0,\n deformable=deformable,\n normalize=normalize\n )\n self.layer2 = self._make_layer(\n block,\n planes=128,\n blocks=layers[1],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding,\n deformable=deformable,\n normalize=normalize\n )\n self.layer3 = self._make_layer(\n block,\n planes=256,\n blocks=layers[2],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding,\n deformable=deformable,\n normalize=normalize\n )\n self.layer4 = self._make_layer(\n block,\n planes=512,\n blocks=layers[3],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding,\n deformable=deformable,\n normalize=normalize\n )\n\n if frozen_layers is not None and len(frozen_layers) > 0:\n assert min(frozen_layers) >= 0, frozen_layers\n assert max(frozen_layers) <= 4, frozen_layers\n assert min(out_layers) >= 0, out_layers\n assert max(out_layers) <= 4, out_layers\n self.out_layers = out_layers\n self.out_strides = out_strides\n self.frozen_layers = frozen_layers\n midplanes = [64, 256, 512, 1024, 2048]\n self.out_planes = [midplanes[i] for i in self.out_layers]\n\n if initializer is not None:\n initialize_from_cfg(self, initializer)\n # It's IMPORTANT when you want to freeze part of your backbone.\n # ALWAYS remember freeze layers in __init__ to avoid passing freezed params\n # to optimizer\n self.freeze_layer()\n\n def get_segments(self, checkpoint):\n if isinstance(checkpoint, Iterable):\n segments = [int(x) for x in checkpoint]\n else:\n segments = [int(checkpoint)] * 5\n return segments\n\n def _make_layer(self, block, planes, blocks, groups, reduction, stride=1,\n downsample_kernel_size=1, downsample_padding=0,\n deformable=False,\n normalize={'type': 'solo_bn'}):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=downsample_kernel_size, stride=stride,\n padding=downsample_padding, bias=False),\n build_norm_layer(planes * block.expansion, normalize)[1]\n )\n\n layers = []\n layers.append(block(self.inplanes, planes,\n groups, reduction, stride, downsample,\n normalize=normalize))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups,\n reduction, normalize=normalize))\n if deformable:\n layers[-1] = block(self.inplanes, planes, groups,\n reduction, deformable=deformable, normalize=normalize)\n\n return nn.Sequential(*layers)\n\n def get_outplanes(self):\n \"\"\"\n \"\"\"\n return self.out_planes\n\n def get_outstrides(self):\n return self.out_strides\n\n def train(self, mode=True):\n \"\"\"\n Sets the module in training mode.\n This has any effect only on modules such as Dropout or BatchNorm.\n\n Returns:\n Module: self\n \"\"\"\n self.training = mode\n for module in self.children():\n module.train(mode)\n self.freeze_layer()\n return self\n\n def freeze_layer(self):\n layers = [self.layer0, self.layer1, self.layer2, self.layer3, self.layer4]\n for layer_idx in self.frozen_layers:\n layer = layers[layer_idx]\n layer.eval()\n for param in layer.parameters():\n param.requires_grad = False\n\n def checkpoint_fwd(self, layer, input, segments=2):\n \"\"\"checkpoint forward\"\"\"\n # Make sure that the input to checkpoint have requires_grad=True, so that\n # the autograd can take care of the checkpointed part of model\n if not input.requires_grad:\n input.detach_()\n input.requires_grad = True\n return fully_checkpoint_sequential(layer, segments, input)\n\n def forward(self, input):\n \"\"\"\n\n Arguments:\n - input (:obj:`dict`): output of\n :class:`~pod.datasets.base_dataset.BaseDataset`\n\n Returns:\n - out (:obj:`dict`):\n\n Output example::\n\n {\n 'features': [], # list of tenosr\n 'strides': [] # list of int\n }\n \"\"\"\n x = input['image']\n outs = []\n for layer_idx in range(0, 5):\n layer = getattr(self, f'layer{layer_idx}', None)\n if layer is not None: # layer4 is None for C4 backbone\n # Use checkpoint for learnable layer\n if self.segments[layer_idx] > 0 and layer_idx not in self.frozen_layers:\n x = self.checkpoint_fwd(layer, x, self.segments[layer_idx])\n else:\n x = layer(x)\n outs.append(x)\n\n features = [outs[i] for i in self.out_layers]\n return {'features': features, 'strides': self.get_outstrides()}\n\n\ndef senet154(**kwargs):\n model = SENet(SEBottleneck, [3, 8, 36, 3], groups=64, reduction=16, **kwargs)\n return model\n\n\ndef se_resnet50(**kwargs):\n model = SENet(SEResNetBottleneck, [3, 4, 6, 3], groups=1, reduction=16, inplanes=64, input_3x3=False,\n downsample_kernel_size=1, downsample_padding=0, **kwargs)\n return model\n\n\ndef se_resnet101(**kwargs):\n model = SENet(SEResNetBottleneck, [3, 4, 23, 3], groups=1, reduction=16, inplanes=64, input_3x3=False,\n downsample_kernel_size=1, downsample_padding=0, **kwargs)\n return model\n\n\ndef se_resnet152(**kwargs):\n model = SENet(SEResNetBottleneck, [3, 8, 36, 3], groups=1, reduction=16, inplanes=64, input_3x3=False,\n downsample_kernel_size=1, downsample_padding=0, **kwargs)\n return model\n\n\ndef se_resnext50_32x4d(**kwargs):\n model = SENet(SEResNeXtBottleneck, [3, 4, 6, 3], groups=32, reduction=16, inplanes=64, input_3x3=False,\n downsample_kernel_size=1, downsample_padding=0, **kwargs)\n return model\n\n\ndef se_resnext101_32x4d(**kwargs):\n model = SENet(SEResNeXtBottleneck, [3, 4, 23, 3], groups=32, reduction=16, inplanes=64, input_3x3=False,\n downsample_kernel_size=1, downsample_padding=0, **kwargs)\n return model\n\n\ndef se_resnext101_64x4d(**kwargs):\n model = SENet(SEResNeXtBottleneck, [3, 4, 23, 3], groups=64, reduction=16, inplanes=64, input_3x3=False,\n downsample_kernel_size=1, downsample_padding=0, **kwargs)\n return model\n","sub_path":"unn/models/backbones/senet.py","file_name":"senet.py","file_ext":"py","file_size_in_byte":18381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"119330739","text":"import csv\n\nfrom comgen.constants import docstring_header, ast_header, full_dataset_path\n\n\ndef load_dataset():\n docstring_data, ast_data = [], []\n try:\n with open(full_dataset_path, newline='') as dataset:\n reader = csv.DictReader(dataset)\n for row in reader:\n docstring_data.append(row[docstring_header])\n ast_data.append(row[ast_header])\n return docstring_data, ast_data\n except Exception as e:\n print('Error loading dataset', e)\n","sub_path":"deepdocstringgen/logic/model/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"152903014","text":"from django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom django.forms import *\nfrom .models import *\nfrom django import forms\nfrom alunoespecial.avaliacao.models import *\nfrom django.contrib.admin.widgets import AdminDateWidget \nfrom django.core.urlresolvers import reverse\n\n\nclass AdicionarCategoriaForm(ModelForm):\n class Meta:\n model = Categoria\n fields = ['nome','descricao']\n exclude = ['created_at', 'upload_at'] \n\n def __init__(self, *args, **kwargs):\n\n super(AdicionarCategoriaForm, self).__init__(*args, **kwargs) \n #self.user = user\n self.fields['nome'].widget.attrs['class'] = 'form-control'\n self.fields['descricao'].widget.attrs={ \n \n 'rows': '5',\n \n }\n\n self.fields['descricao'].widget.attrs['class'] = 'form-control'\n\nclass AdicionarNivelDicenteForm(ModelForm):\n class Meta:\n model = NivelDicente\n fields = ['nome','descricao']\n exclude = ['created_at', 'upload_at'] \n\n def __init__(self, *args, **kwargs):\n\n super(AdicionarNivelDicenteForm, self).__init__(*args, **kwargs) \n #self.user = user\n self.fields['nome'].widget.attrs['class'] = 'form-control'\n self.fields['descricao'].widget.attrs={ \n \n 'rows': '5',\n \n }\n\n self.fields['descricao'].widget.attrs['class'] = 'form-control'\n\nclass AdicionarNivelDocenteForm(ModelForm):\n class Meta:\n model = NivelDocente\n fields = ['nome','descricao']\n exclude = ['created_at', 'upload_at'] \n\n def __init__(self, *args, **kwargs):\n\n super(AdicionarNivelDocenteForm, self).__init__(*args, **kwargs) \n #self.user = user\n self.fields['nome'].widget.attrs['class'] = 'form-control'\n self.fields['descricao'].widget.attrs={ \n \n 'rows': '5',\n \n }\n\n self.fields['descricao'].widget.attrs['class'] = 'form-control'\n\nclass AdicionarAvaliacaoForm(ModelForm):\n class Meta:\n model = Avaliacao\n fields = ['avaliacao_pro','avaliacao_contra','recomendacoes']\n exclude = ['created_at', 'upload_at'] \n\n def __init__(self, *args, **kwargs):\n\n super(AdicionarAvaliacaoForm, self).__init__(*args, **kwargs) \n #self.user = user\n \n self.fields['avaliacao_pro'].widget.attrs={ \n \n 'rows': '5',\n \n }\n\n self.fields['avaliacao_contra'].widget.attrs={ \n \n 'rows': '5',\n \n }\n\n self.fields['recomendacoes'].widget.attrs={ \n \n 'rows': '5',\n \n }\n\n self.fields['avaliacao_pro'].widget.attrs['class'] = 'form-control'\n self.fields['avaliacao_contra'].widget.attrs['class'] = 'form-control'\n self.fields['recomendacoes'].widget.attrs['class'] = 'form-control'\n\nclass AdicionarFerramentaForm(ModelForm):\n class Meta:\n model = Ferramenta \n \n fields = ['nome','categoria','nivel_professor','nivel_aluno','descricao','requisitos_sistema','indicacao_pedagogica','link_download','imagem']\n #fields = ['nome','categoria','descricao','requisitos_sistema','indicacao_pedagogica','link_download','imagem']\n exclude = ['created_at', 'upload_at'] \n\n def __init__(self, *args, **kwargs):\n\n super(AdicionarFerramentaForm, self).__init__(*args, **kwargs) \n #self.user = user\n self.fields['nome'].widget.attrs['class'] = 'form-control'\n self.fields['link_download'].widget.attrs['class'] = 'form-control'\n self.fields['categoria'].queryset = Categoria.objects.all().order_by('nome')\n \n \n \n self.fields['categoria'].widget.attrs={ \n 'class': 'selectpicker', \n 'data-style': 'btn btn-rose btn-round', \n 'data-size':'7',\n\n } \n\n\n \n self.fields['nivel_professor'].widget.attrs={ \n 'class': 'selectpicker', \n 'data-style': 'btn btn-rose btn-round', \n 'data-size':'7',\n \n } \n\n self.fields['nivel_aluno'].widget.attrs={ \n 'class': 'selectpicker', \n 'data-style': 'btn btn-rose btn-round', \n 'data-size':'7',\n \n }\n \n self.fields['requisitos_sistema'].widget.attrs={ \n \n 'rows': '5',\n \n }\n\n self.fields['indicacao_pedagogica'].widget.attrs={ \n \n 'rows': '5',\n \n }\n\n self.fields['descricao'].widget.attrs={ \n \n 'rows': '5',\n \n }\n\n \n\n self.fields['requisitos_sistema'].widget.attrs['class'] = 'form-control'\n self.fields['indicacao_pedagogica'].widget.attrs['class'] = 'form-control'\n self.fields['descricao'].widget.attrs['class'] = 'form-control'\n #self.fields[''].widget.attrs['class'] = 'form-control'\n self.fields['imagem'].widget.attrs={ \n \n 'data-provides':'fileinput',\n 'class':'btn btn-rose btn-round btn-file ',\n \n \n\n \n }\n \n \n \n \n\n\n","sub_path":"alunoespecial/avaliacao/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"389813625","text":"import time, os, datetime, re, json, shutil\n\nstart_time = time.time() # 初始时间戳\nprint(start_time)\n\nfile_dir = '/Users/alicewish/Pictures/'\nnew_file_dir = '/Users/alicewish/Desktop/临时上传/'\n\nfile_list = os.listdir(file_dir) # 获得目录中的内容\nprint(file_list)\n\nall_info = []\nordinal_num = 0\nfor file_name in file_list:\n file_path = file_dir + file_name\n # ================文件信息================\n is_dir = os.path.isdir(file_path) # 判断目标是否目录\n extension = os.path.splitext(file_path)[1] # 拓展名\n extension_list = [\".jpg\"]\n if not is_dir and extension in extension_list and \"Injustice- Gods Among Us - Year Five (2015-)\" in file_name:\n print(ordinal_num)\n print(file_name)\n # ================按规则重命名================\n issue_number = 30\n new_file_name = \"第五年\" + str(issue_number).zfill(3) + \"-\" + str(ordinal_num).zfill(3) + \".jpg\"\n new_file_path = new_file_dir + new_file_name\n print(new_file_name)\n # ================按规则移动================\n shutil.move(file_path, new_file_path)# 文件或目录都是使用这条命令\n ordinal_num += 1\n\n# ================运行时间计时================\nrun_time = time.time() - start_time\nif run_time < 60: # 秒(两位小数)\n print(\"耗时:{:.2f}秒\".format(run_time))\nelif run_time < 3600: # 分+秒(取整)\n print(\"耗时:{:.0f}分{:.0f}秒\".format(run_time // 60, run_time % 60))\nelse: # 时分秒取整\n print(\"耗时:{:.0f}时{:.0f}分{:.0f}秒\".format(run_time // 3600, run_time % 3600 // 60, run_time % 60))\n","sub_path":"文件操作-Pictures-移动.py","file_name":"文件操作-Pictures-移动.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"78683710","text":"import os\nimport sys\nimport pytest\nfrom pprint import pprint\nfrom wes.main import clone_update_repo\nfrom wes.framework_plugins.common import PythonProcessor\nfrom typed_ast import ast3\nimport _ast3\n\n@pytest.fixture(scope=\"module\")\ndef processor(tmpdir_factory):\n # Setup the object by cloning WEST and creating instance of JavaProcessor\n workingDir = str(tmpdir_factory.getbasetemp())\n project = {'baseUrl': 'http://west.example.com/', 'gitRepo': 'git@github.com:indeedsecurity/WEST.git'}\n projectRepoPath = project['gitRepo'].split(':')[-1][:-4]\n projectName = project['gitRepo'].split('/')[-1][:-4]\n productGroup = projectRepoPath.split('/')[0]\n\n groupFolder = os.path.join(workingDir, productGroup)\n projectFolder = os.path.join(groupFolder, projectName)\n\n # clone/update the repositories\n clone_update_repo(projectFolder, project['gitRepo'])\n\n return PythonProcessor(workingDir=projectFolder)\n\n\ndef test_processor_init(processor):\n assert type(processor) is PythonProcessor\n assert hasattr(processor, 'workingDir')\n assert hasattr(processor, 'pythonFileAsts')\n\n# Still need a python project in WEST before we can test the load_python_project method\n# def test_load_python_project(processor):\n# pass\n\ndef test_filter_ast(processor):\n myAst = ast3.parse(\n \"\"\"\ndef testing(request):\n TEST1 = True\n if TEST1:\n print(\"Got here first!\")\n TEST2 = \"Eat my dust\"\n print(TEST2 + \"!\")\n else:\n print(\"No I got here first!\")\n TEST3 = \"Oh yeah I won\"\n TEST4 = \"Okay well yeah this is a string\"\n print(TEST3 + \"!\")\nTEST5 = \"YOU CAN'T SEE ME\"\n \"\"\"\n )\n\n assert len(list(processor.filter_ast(myAst, _ast3.Assign))) == 5\n assert len(list(processor.filter_ast(myAst.body[0], _ast3.Assign))) == 4\n assert len(list(processor.filter_ast(myAst.body[0], _ast3.Assign))) == 4\n assert len(list(processor.filter_ast(myAst.body[0], _ast3.Assign))) == 4\n\ndef test_strip_work_dir(processor):\n path = \"myawesomePath/Testing/workingDir/CANiMeSSItuP.TxT\"\n fullPath = os.path.join(processor.workingDir, path)\n assert processor.strip_work_dir(fullPath) == path, \"Check that it removes working dir\"\n with pytest.raises(IndexError):\n processor.strip_work_dir(\"Testing\")\n\ndef test_parse_python_method_args(processor):\n myAst = ast3.parse(\n \"\"\"\ntesting1(1, 2, 3)\ntesting2(name1=1, name2=2, name3=\"3\")\ntesting3(1, 2, 3, name=4)\ntesting4(1, 2, 3, 4, 5, 6, 7)\ntesting5(1, 2, 3, name3=4)\n \"\"\"\n )\n args = processor.parse_python_method_args(myAst.body[0].value, ['arg1', 'arg2', 'arg3'])\n assert args == {'arg1': 1, 'arg2': 2, 'arg3': 3}\n\n args = processor.parse_python_method_args(myAst.body[1].value, ['name1', 'name2', 'name3'])\n assert args == {'name1': 1, 'name2': 2, 'name3': '3'}\n\n args = processor.parse_python_method_args(myAst.body[2].value, ['arg1', 'arg2', 'arg3', 'name'])\n assert args == {'arg1': 1, 'arg2': 2, 'arg3': 3, 'name': 4}\n\n args = processor.parse_python_method_args(myAst.body[3].value, ['arg1', 'arg2', 'arg3'])\n assert args == {'arg1': 1, 'arg2': 2, 'arg3': 3}\n\n args = processor.parse_python_method_args(myAst.body[4].value, ['arg1', 'arg2', 'arg3', 'name1', 'name2', 'name3'])\n assert args == {'arg1': 1, 'arg2': 2, 'arg3': 3, 'name3': 4}","sub_path":"tests/test_common_python.py","file_name":"test_common_python.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"43194824","text":"import csv, os\ndef csv2list(filename):\n with open(filename, 'r') as f:\n reader = csv.reader(f); seq_list = list(reader)\n my_seq = [];\n for i in range(len(seq_list)):\n my_seq.append(seq_list[i][0])\n return my_seq\n\nprint(csv2list(\"asdf.txt\"))\nprint(os.getcwd())","sub_path":"RunningViennaRNA/draft.py","file_name":"draft.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"426932127","text":"#\n# Copyright 2017 by Delphix\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n#\n# This class has been automatically generated from:\n# /delphix-source.json\n#\n# Do not edit this file manually!\n#\n\nfrom delphixpy.v1_9_0.web.vo.UserObject import UserObject\nfrom delphixpy.v1_9_0 import factory\nfrom delphixpy.v1_9_0 import common\n\nclass __Undef(object):\n def __repr__(self):\n return \"undef\"\n\n_UNDEFINED = __Undef()\n\nclass Source(UserObject):\n \"\"\"\n *(extends* :py:class:`v1_9_0.web.vo.UserObject` *)* A source represents an\n external database instance outside the Delphix system.\n \"\"\"\n def __init__(self, undef_enabled=True):\n super(Source, self).__init__()\n self._type = (\"Source\", True)\n self._status = (self.__undef__, True)\n self._container = (self.__undef__, True)\n self._description = (self.__undef__, True)\n self._virtual = (self.__undef__, True)\n self._hosts = (self.__undef__, True)\n self._staging = (self.__undef__, True)\n self._runtime = (self.__undef__, True)\n self._config = (self.__undef__, True)\n self._linked = (self.__undef__, True)\n\n API_VERSION = \"1.9.0\"\n\n @classmethod\n def from_dict(cls, data, dirty=False, undef_enabled=True):\n obj = super(Source, cls).from_dict(data, dirty, undef_enabled)\n obj._status = (data.get(\"status\", obj.__undef__), dirty)\n if obj._status[0] is not None and obj._status[0] is not obj.__undef__:\n assert isinstance(obj._status[0], basestring), (\"Expected one of [u'string'], but got %s\" % type(obj._status[0]))\n assert obj._status[0] in [u'DEFAULT', u'PENDING_UPGRADE'], \"Expected enum [u'DEFAULT', u'PENDING_UPGRADE'] but got %s\" % obj._status[0]\n common.validate_format(obj._status[0], \"None\", None, None)\n obj._container = (data.get(\"container\", obj.__undef__), dirty)\n if obj._container[0] is not None and obj._container[0] is not obj.__undef__:\n assert isinstance(obj._container[0], basestring), (\"Expected one of [u'string'], but got %s\" % type(obj._container[0]))\n common.validate_format(obj._container[0], \"objectReference\", None, None)\n obj._description = (data.get(\"description\", obj.__undef__), dirty)\n if obj._description[0] is not None and obj._description[0] is not obj.__undef__:\n assert isinstance(obj._description[0], basestring), (\"Expected one of [u'string'], but got %s\" % type(obj._description[0]))\n common.validate_format(obj._description[0], \"None\", None, None)\n obj._virtual = (data.get(\"virtual\", obj.__undef__), dirty)\n if obj._virtual[0] is not None and obj._virtual[0] is not obj.__undef__:\n assert isinstance(obj._virtual[0], bool), (\"Expected one of [u'boolean'], but got %s\" % type(obj._virtual[0]))\n common.validate_format(obj._virtual[0], \"None\", None, None)\n obj._hosts = []\n for item in data.get(\"hosts\") or []:\n assert isinstance(item, basestring), (\"Expected one of [u'string'], but got %s\" % type(item))\n common.validate_format(item, \"objectReference\", None, None)\n obj._hosts.append(item)\n obj._hosts = (obj._hosts, dirty)\n obj._staging = (data.get(\"staging\", obj.__undef__), dirty)\n if obj._staging[0] is not None and obj._staging[0] is not obj.__undef__:\n assert isinstance(obj._staging[0], bool), (\"Expected one of [u'boolean'], but got %s\" % type(obj._staging[0]))\n common.validate_format(obj._staging[0], \"None\", None, None)\n if \"runtime\" in data and data[\"runtime\"] is not None:\n obj._runtime = (factory.create_object(data[\"runtime\"], \"SourceRuntime\"), dirty)\n factory.validate_type(obj._runtime[0], \"SourceRuntime\")\n else:\n obj._runtime = (obj.__undef__, dirty)\n obj._config = (data.get(\"config\", obj.__undef__), dirty)\n if obj._config[0] is not None and obj._config[0] is not obj.__undef__:\n assert isinstance(obj._config[0], basestring), (\"Expected one of [u'string'], but got %s\" % type(obj._config[0]))\n common.validate_format(obj._config[0], \"objectReference\", None, None)\n obj._linked = (data.get(\"linked\", obj.__undef__), dirty)\n if obj._linked[0] is not None and obj._linked[0] is not obj.__undef__:\n assert isinstance(obj._linked[0], bool), (\"Expected one of [u'boolean'], but got %s\" % type(obj._linked[0]))\n common.validate_format(obj._linked[0], \"None\", None, None)\n return obj\n\n def to_dict(self, dirty=False):\n dct = super(Source, self).to_dict(dirty)\n\n def dictify(obj):\n if isinstance(obj, list):\n return [dictify(o) for o in obj]\n elif hasattr(obj, \"to_dict\"):\n return obj.to_dict()\n else:\n return obj\n if \"status\" == \"type\" or (self.status is not self.__undef__ and not (dirty and not self._status[1])):\n dct[\"status\"] = dictify(self.status)\n if \"container\" == \"type\" or (self.container is not self.__undef__ and not (dirty and not self._container[1])):\n dct[\"container\"] = dictify(self.container)\n if \"description\" == \"type\" or (self.description is not self.__undef__ and not (dirty and not self._description[1])):\n dct[\"description\"] = dictify(self.description)\n if \"virtual\" == \"type\" or (self.virtual is not self.__undef__ and not (dirty and not self._virtual[1])):\n dct[\"virtual\"] = dictify(self.virtual)\n if \"hosts\" == \"type\" or (self.hosts is not self.__undef__ and not (dirty and not self._hosts[1])):\n dct[\"hosts\"] = dictify(self.hosts)\n if \"staging\" == \"type\" or (self.staging is not self.__undef__ and not (dirty and not self._staging[1])):\n dct[\"staging\"] = dictify(self.staging)\n if \"runtime\" == \"type\" or (self.runtime is not self.__undef__ and not (dirty and not self._runtime[1])):\n dct[\"runtime\"] = dictify(self.runtime)\n if \"config\" == \"type\" or (self.config is not self.__undef__ and not (dirty and not self._config[1])):\n dct[\"config\"] = dictify(self.config)\n if \"linked\" == \"type\" or (self.linked is not self.__undef__ and not (dirty and not self._linked[1])):\n dct[\"linked\"] = dictify(self.linked)\n return dct\n\n def dirty(self):\n return self.from_dict(self.to_dict(dirty=False), dirty=True)\n\n def force_dirty(self):\n self._status = (self._status[0], True)\n self._container = (self._container[0], True)\n self._description = (self._description[0], True)\n self._virtual = (self._virtual[0], True)\n self._hosts = (self._hosts[0], True)\n self._staging = (self._staging[0], True)\n self._runtime = (self._runtime[0], True)\n self._config = (self._config[0], True)\n self._linked = (self._linked[0], True)\n\n def is_dirty(self):\n return any([self._status[1], self._container[1], self._description[1], self._virtual[1], self._hosts[1], self._staging[1], self._runtime[1], self._config[1], self._linked[1]])\n\n def __eq__(self, other):\n if other is None:\n return False\n if not isinstance(other, Source):\n return False\n return super(Source, self).__eq__(other) and \\\n self.status == other.status and \\\n self.container == other.container and \\\n self.description == other.description and \\\n self.virtual == other.virtual and \\\n self.hosts == other.hosts and \\\n self.staging == other.staging and \\\n self.runtime == other.runtime and \\\n self.config == other.config and \\\n self.linked == other.linked\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __repr__(self):\n return common.generate_repr_string(self)\n\n @property\n def status(self):\n \"\"\"\n Status of this source. *(permitted values: DEFAULT, PENDING_UPGRADE)*\n\n :rtype: ``basestring``\n \"\"\"\n return self._status[0]\n\n @status.setter\n def status(self, value):\n self._status = (value, True)\n\n @property\n def container(self):\n \"\"\"\n Reference to the container being fed by this source, if any.\n\n :rtype: ``basestring``\n \"\"\"\n return self._container[0]\n\n @container.setter\n def container(self, value):\n self._container = (value, True)\n\n @property\n def description(self):\n \"\"\"\n A user-provided description of the source.\n\n :rtype: ``basestring``\n \"\"\"\n return self._description[0]\n\n @description.setter\n def description(self, value):\n self._description = (value, True)\n\n @property\n def virtual(self):\n \"\"\"\n Flag indicating whether the source is a virtual source in the Delphix\n system.\n\n :rtype: ``bool``\n \"\"\"\n return self._virtual[0]\n\n @virtual.setter\n def virtual(self, value):\n self._virtual = (value, True)\n\n @property\n def hosts(self):\n \"\"\"\n Hosts that might affect operations on this source. Property will be\n null unless the includeHosts parameter is set when listing sources.\n\n :rtype: ``list`` of ``basestring``\n \"\"\"\n return self._hosts[0]\n\n @hosts.setter\n def hosts(self, value):\n self._hosts = (value, True)\n\n @property\n def staging(self):\n \"\"\"\n Flag indicating whether the source is used as a staging source for pre-\n provisioning. Staging sources are managed by the Delphix system.\n\n :rtype: ``bool``\n \"\"\"\n return self._staging[0]\n\n @staging.setter\n def staging(self, value):\n self._staging = (value, True)\n\n @property\n def runtime(self):\n \"\"\"\n Runtime properties of this source.\n\n :rtype: :py:class:`v1_9_0.web.vo.SourceRuntime`\n \"\"\"\n return self._runtime[0]\n\n @runtime.setter\n def runtime(self, value):\n self._runtime = (value, True)\n\n @property\n def config(self):\n \"\"\"\n Reference to the configuration for the source.\n\n :rtype: ``basestring``\n \"\"\"\n return self._config[0]\n\n @config.setter\n def config(self, value):\n self._config = (value, True)\n\n @property\n def linked(self):\n \"\"\"\n Flag indicating whether the source is a linked source in the Delphix\n system.\n\n :rtype: ``bool``\n \"\"\"\n return self._linked[0]\n\n @linked.setter\n def linked(self, value):\n self._linked = (value, True)\n\n","sub_path":"src/main/resources/delphixpy/v1_9_0/web/vo/Source.py","file_name":"Source.py","file_ext":"py","file_size_in_byte":11213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"318321610","text":"class Student:\n def details(self,name,marks) :\n self.name=name\n print(\"Name:\",name)\n for i in range(len(marks)) :\n print(\"Subject\",i,\"=\",marks[i])\nname=input(\"Enter Name\\n\")\nst=[]\nfor i in range(3) :\n stt=int(input(\"Enter marks\"))\n st.append(stt)\ns=Student()\ns.details(name,st)\nr=Student()\nname1=input(\"Enter name\\n\")\nl=[]\nfor i in range(3) :\n stt=int(input(\"Enter marks\"))\n l.append(stt)\n r.details(name,l)\n","sub_path":"manu.py","file_name":"manu.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"582704726","text":"#!/usr/bin/env python\n\n####################################################\n##Visualizes generic atom properties read from OEB##\n####################################################\n\nimport sys\nfrom openeye.oechem import *\nfrom openeye.oedepict import *\nfrom openeye.oegrapheme import *\n\n\ndef main(argv=[__name__]):\n\n itf = OEInterface(InterfaceData)\n OEConfigureImageWidth(itf, 400.0)\n OEConfigureImageHeight(itf, 400.0)\n OEConfigure2DMolDisplayOptions(itf, OE2DMolDisplaySetup_AromaticStyle)\n OEConfigureColor(itf, \"-negcolor\", \"-nc\", \"Color for negative values\", \"red\")\n OEConfigureColor(itf, \"-poscolor\", \"-pc\", \"Color for positive values\", \"blue\")\n\n if not OEParseCommandLine(itf, argv):\n return 1\n\n iname = itf.GetString(\"-in\")\n oname = itf.GetString(\"-out\")\n style = itf.GetString(\"-style\")\n\n # check input/output files\n\n ifs = oemolistream()\n if not ifs.open(iname):\n OEThrow.Fatal(\"Cannot open input file!\")\n\n if ifs.GetFormat() != OEFormat_OEB:\n OEThrow.Fatal(\"Only works for oeb input file!\")\n\n ext = OEGetFileExtension(oname)\n if not OEIsRegisteredImageFile(ext):\n OEThrow.Fatal(\"Unknown image type!\")\n\n ofs = oeofstream()\n if not ofs.open(oname):\n OEThrow.Fatal(\"Cannot open output file!\")\n\n # read a molecule\n\n mol = OEGraphMol()\n if not OEReadMolecule(ifs, mol):\n OEThrow.Fatal(\"Cannot read input file!\")\n\n # check atom properties\n\n tagname = itf.GetString(\"-tagname\")\n if not CheckAtomProperties(mol, tagname):\n OEThrow.Error(\"Cannot find tag %s on input molecule\" % tagname)\n\n # prepare depiction\n\n clearcoords = itf.GetBool(\"-clearcoords\")\n suppressH = itf.GetBool(\"-suppressH\")\n OEPrepareDepiction(mol, clearcoords, suppressH)\n\n # create image\n\n width, height = OEGetImageWidth(itf), OEGetImageHeight(itf)\n image = OEImage(width, height)\n\n # setup depiction options\n\n opts = OE2DMolDisplayOptions(width, height, OEScale_AutoScale)\n OESetup2DMolDisplayOptions(opts, itf)\n opts.SetAtomColorStyle(OEAtomColorStyle_WhiteMonochrome)\n\n negcolor = OEGetColor(itf, \"-negcolor\")\n poscolor = OEGetColor(itf, \"-poscolor\")\n\n DepictAtomProperty(image, mol, opts, tagname, negcolor, poscolor, style)\n\n OEWriteImage(oname, image)\n\n return 0\n\n\ndef DepictAtomProperty(image, mol, opts, tagname, negcolor, poscolor, style):\n\n mwidth, mheight = image.GetWidth(), image.GetHeight() * 0.9\n cwidth, cheight = image.GetWidth(), image.GetHeight() * 0.1\n\n mframe = OEImageFrame(image, mwidth, mheight, OE2DPoint(0.0, 0.0))\n cframe = OEImageFrame(image, cwidth, cheight, OE2DPoint(0.0, mheight))\n\n opts.SetDimensions(mwidth, mheight, OEScale_AutoScale)\n opts.SetScale(OEGetMoleculeSurfaceScale(mol, opts))\n\n colorg = GetColorGradient(mol, tagname, negcolor, poscolor)\n\n disp = OE2DMolDisplay(mol, opts)\n\n if style == \"propmap\":\n DepictAtomPropertyPropMap(disp, tagname, negcolor, poscolor)\n\n if style == \"atomglyph\":\n DepictAtomPropertyAtomGlyph(disp, tagname, colorg)\n\n if style == \"molsurface\":\n DepictAtomPropertyMolSurface(disp, tagname, colorg)\n\n OERenderMolecule(mframe, disp)\n\n OEDrawColorGradient(cframe, colorg)\n\n font = OEFont(OEFontFamily_Default, OEFontStyle_Default, 14, OEAlignment_Left, OEBlack)\n cframe.DrawText(OE2DPoint(10.0, -10.0), tagname, font)\n\n\ndef CheckAtomProperties(mol, tagname):\n\n tag = OEGetTag(tagname)\n\n for atom in mol.GetAtoms():\n if atom.HasData(tag):\n return True\n return False\n\n\ndef GetMinMaxAtomProperty(mol, tagname):\n\n minvalue = float(\"inf\")\n maxvalue = float(\"-inf\")\n\n tag = OEGetTag(tagname)\n\n for atom in mol.GetAtoms():\n if atom.HasData(tag):\n val = atom.GetData(tag)\n minvalue = min(minvalue, val)\n maxvalue = max(maxvalue, val)\n\n return minvalue, maxvalue\n\n\ndef GetColorGradient(mol, tagname, ncolor, pcolor):\n\n minvalue, maxvalue = GetMinMaxAtomProperty(mol, tagname)\n\n colorg = OELinearColorGradient(OEColorStop(0.0, OEWhite))\n if minvalue < 0.0:\n colorg.AddStop(OEColorStop(minvalue, ncolor))\n if maxvalue > 0.0:\n colorg.AddStop(OEColorStop(maxvalue, pcolor))\n\n return colorg\n\n\ndef DepictAtomPropertyPropMap(disp, tagname, negcolor, poscolor):\n\n opts = disp.GetOptions()\n propmap = OE2DPropMap(opts.GetBackgroundColor())\n propmap.SetLegendLocation(OELegendLocation_Hidden)\n propmap.SetNegativeColor(negcolor)\n propmap.SetPositiveColor(poscolor)\n propmap.Render(disp, tagname)\n\n\ndef DepictAtomPropertyAtomGlyph(disp, tagname, colorg):\n\n tag = OEGetTag(tagname)\n mol = disp.GetMolecule()\n\n for atom in mol.GetAtoms():\n if atom.HasData(tag):\n value = atom.GetDoubleData(tag)\n color = colorg.GetColorAt(value)\n pen = OEPen(color, color, OEFill_Off, 3.0)\n glyph = OEAtomGlyphCircle(pen, OECircleStyle_Default, 1.2)\n OEAddGlyph(disp, glyph, OEHasAtomIdx(atom.GetIdx()))\n\n\ndef DepictAtomPropertyMolSurface(disp, tagname, colorg):\n\n tag = OEGetTag(tagname)\n mol = disp.GetMolecule()\n\n for atom in mol.GetAtoms():\n if atom.HasData(tag):\n value = atom.GetDoubleData(tag)\n color = colorg.GetColorAt(value)\n pen = OEPen(color, color, OEFill_Off, 4.0)\n OESetSurfaceArcFxn(mol, atom, OEDefaultArcFxn(pen))\n\n OEDraw2DSurface(disp)\n\n\nInterfaceData = \"\"\"\n!CATEGORY \"input/output options\"\n\n !PARAMETER -in\n !ALIAS -i\n !TYPE string\n !REQUIRED true\n !KEYLESS 1\n !VISIBILITY simple\n !BRIEF Input filename\n !END\n\n !PARAMETER -out\n !ALIAS -o\n !TYPE string\n !REQUIRED true\n !KEYLESS 2\n !VISIBILITY simple\n !BRIEF Output filename\n !END\n\n!END\n\n!CATEGORY \"general options\"\n\n !PARAMETER -tagname\n !ALIAS -tag\n !TYPE string\n !REQUIRED true\n !KEYLESS 3\n !VISIBILITY simple\n !BRIEF Generic data tag name for atomic data.\n !END\n\n !PARAMETER -dispstyle\n !ALIAS -style\n !TYPE string\n !REQUIRED false\n !DEFAULT propmap\n !VISIBILITY simple\n !LEGAL_VALUE atomglyph\n !LEGAL_VALUE propmap\n !LEGAL_VALUE molsurface\n !BRIEF Display style\n !DETAIL\n atomglyph - atom properties visualized by using atom glyphs\n propmap - atom properties visualized by property map\n molsurface - atom properties visualized on molecule surface\n !END\n\n !PARAMETER -clearcoords\n !ALIAS -clear\n !TYPE bool\n !REQUIRED false\n !DEFAULT false\n !VISIBILITY simple\n !BRIEF Clear 2D coordinates of input structure\n !END\n\n !PARAMETER -suppressH\n !ALIAS -sh\n !TYPE bool\n !REQUIRED false\n !DEFAULT false\n !VISIBILITY simple\n !BRIEF Suppress explicit hydrogens of input structure\n !DETAIL\n Explicit hydrogens that are necessary to represent tetrahedral stereochemistry are kept\n !END\n\n!END\n\n\"\"\"\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","sub_path":"atom2img.py","file_name":"atom2img.py","file_ext":"py","file_size_in_byte":6980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"578512554","text":"import tensorflow as tf\nfrom tensorflow.keras import backend as K\nimport LabelImage\nimport ImageInfoClass as IF\nimport CreateImagePatches as CIP\nimport numpy as np\nimport cv2\nimport CommonMath\nimport CommonStruct\nimport math\nimport ILBPLayer\nimport gc\nimport os\nfrom matplotlib import pyplot as plt\n'''keras import'''\nfrom tensorflow.keras.models import Sequential,Model\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten,Input,AveragePooling2D,concatenate\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.keras.optimizers import SGD\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.wrappers.scikit_learn import KerasClassifier\nfrom ILBPLayer import MyLayer\nimport pickle\n#\nfrom sklearn import svm\nfrom skimage.feature import hog\nfrom sklearn import svm,neighbors\n#\nimport progressbar\n\ndef tryCreateDir(dir):\n try:\n if not os.path.exists(dir):\n os.makedirs(dir)\n except Exception as e:\n pass\n\nclass KerasObj:\n def __init__(self, _ImageSize=[100, 100], _ImageChannel=1, _ImageFlatten=False):\n self.KerasMdl = []\n self.Layers = []\n self.labelDict = []\n self.LayerNum = 0\n self.label = 'none'\n # define input data\n self.ImageInfo = IF.ImageInfoClass()\n self.ImageInfo.Size = _ImageSize\n self.ImageInfo.Channel = _ImageChannel\n self.ImageInfo.NeedFlatten = _ImageFlatten\n self.ValidateSplit = 0.0\n\n def NewSeq(self):\n self.KerasMdl = Sequential()\n self.Layers = []\n\n def AddLayer(self, fn):\n if self.KerasMdl != []:\n self.KerasMdl.add(fn)\n self.Layers.append(fn._name)\n\n # load models\n def LoadAll(self, _loadPath, _name):\n _loadPath = LabelImage.PathCheck(_loadPath)\n self.KerasMdl = tf.keras.models.load_model(_loadPath+_name+\"_mdl.h5\")\n self.KerasMdl.load_weights(_loadPath+_name+'_weight.h5')\n\n def LoadMdl(self, _loadPath, _modelName):\n _loadPath = LabelImage.PathCheck(_loadPath)\n self.KerasMdl = tf.keras.models.load_model(\n _loadPath+_modelName+\"_mdl.h5\")\n\n # load weight(with h5py, please install h5py with pip)\n def LoadWeight(self, _loadPath, _weightName):\n _loadPath = LabelImage.PathCheck(_loadPath)\n self.KerasMdl.load_weights(_loadPath+_weightName+'_weight.h5')\n\n def SaveAll(self, _savePath, _name):\n _savePath = LabelImage.PathCheck(_savePath)\n self.KerasMdl.save(_savePath+_name+\"_mdl.h5\")\n self.KerasMdl.save_weights(_savePath+_name+\"_weight.h5\")\n\n def SaveMdl(self, _savePath, _modelName):\n _savePath = LabelImage.PathCheck(_savePath)\n self.KerasMdl.save(_savePath+_modelName+\"_mdl.h5\")\n\n # load weight(with h5py, please install h5py with pip)\n def SaveWeight(self, _savePath, _weightName):\n _savePath = LabelImage.PathCheck(_savePath)\n self.KerasMdl.save_weights(_savePath+_weightName+\"_weight.h5\")\n\n def ExtractFeature(self,imgObj):\n lastModel = Model( input = self.KerasMdl.input, output = self.KerasMdl.get_layer('global_average_pooling2d').output )\n img,label = imgObj.RadomLoad(self.ImageInfo,PickSize=-1 , PreProcess = '',SeqLoad = True)\n f = lastModel.predict(img)\n return f\n\n def BenchMark(self,imgObj,PreProcess = '',divideSize = 1000):\n\n listSize = imgObj.GetListSize()\n DoTimes = math.ceil(listSize/divideSize)\n correct = 0\n loss = 0\n \n for i in range(DoTimes):\n pickIdx = list(range(i*divideSize,min((i+1)*divideSize,listSize)))\n img,label = imgObj.RadomLoad(self.ImageInfo,PickSize=len(pickIdx), Dim=4 , PreProcess = PreProcess,randIdx = pickIdx,kerasLabel=True)\n\n p = self.KerasMdl.evaluate(img,label) \n #l = self.KerasMdl.predict(img[0:10,:,:,:])\n\n \n print('current:' ,p)\n correct = p[1]+correct\n loss = loss+p[0]\n return correct/DoTimes,loss/DoTimes\n\n def TrainBySvm(self, imgObj, SelectMethod='all',rdnSize = -1 ,\n global_epoche = 1,\n PreProcess = '',\n batch_size=32, epochs=1, verbose=0,valitationSplit = 0.0,\n savePath = ''):\n\n cellSize = 15\n cellNum = 2\n\n PreProcess = \"ILBPNet\"\n\n #若RDN==-1則使用全部的影像\n if rdnSize == -1:\n rdnSize = imgObj.GetListSize()\n\n for i in range(global_epoche):\n print(\"Start Training\")\n print(\"==Total layer : \", len(self.KerasMdl.layers))\n print(\"==Global Epoche : \", i)\n print(\"Prepare data...-\")\n imageData,label = imgObj.RadomLoad(self.ImageInfo,\n PickSize=rdnSize, \n Dim=4 , \n PreProcess = \"none\",\n kerasLabel=False, \n SeqLoad=True)\n\n\n trainInHOG = imgObj.BuildLBP(imageData,cellSize=cellSize,cellNum=cellNum)\n infoName = savePath+'_'+str(epochs)+'_'+str(i) \n\n #learning svm\n print(\"SVM Learning\")\n clf = svm.LinearSVC(verbose=0)\n\n clf.fit(trainInHOG,label)\n predict = clf.predict(trainInHOG)\n accCount = np.count_nonzero(predict == label)\n print(\"-----------------acc on training sample\",accCount/imgObj.SetSize())\n \n testImg = LabelImage.DataObj() \n testImg.LoadList(\"D:\\\\DataSet\\\\ICDAR03\\\\testList.txt\",\n SortedClass=imgObj.MappedClass)\n \n tt,testLabel = testImg.RadomLoad(self.ImageInfo,PickSize=-1, Dim=4 , PreProcess = PreProcess,kerasLabel=False)\n testHOG = testImg.BuildLBP( tt,cellSize=cellSize,cellNum=cellNum )\n\n tpredict = clf.predict(testHOG)\n testNum = tpredict.shape[0]\n \n testAccCount = np.count_nonzero(tpredict == testLabel)\n print(\"testSet:\",testAccCount/testNum)\n \n\n for j in range(testNum):\n if tpredict[j] != testLabel[j]:\n cv2.imwrite(\"D:\\\\tmp\\\\errImg\\\\\"+str(j)+\"_\"+testImg.MappedClass[testLabel[j]]+\"_to_\"+testImg.MappedClass[tpredict[j]]+'.jpg', tt[j,:,:,0].reshape(100,100))\n\n \n \n\n def Train(self, imgObj, SelectMethod='all',rdnSize = -1 ,\n global_epoche = 1,\n PreProcess = '',\n batch_size=32, epochs=1, verbose=0,valitationSplit = 0.0,\n savePath = '',\n dataAug = False):\n\n #若RDN==-1則使用全部的影像\n if rdnSize == -1:\n rdnSize = imgObj.GetListSize()\n\n for i in range(global_epoche):\n print(\"Start Training\")\n print(\"==Total layer : \", len(self.KerasMdl.layers))\n print(\"==Global Epoche : \", i)\n print(\"Prepare data...-\")\n imageData,label = imgObj.RadomLoad(self.ImageInfo,PickSize=rdnSize, Dim=3 , PreProcess = PreProcess,dataAug=dataAug)\n \n\n infoName = savePath+'_'+str(epochs)+'_'+str(i)\n \n tryCreateDir(\"D:\\\\TBData\\\\\"+infoName)\n #tbCallBack = tf.keras.callbacks.TeorBoard(log_dir=\"D:\\\\TBData\\\\\"+infoName, histogram_freq=0, write_graph=True, write_images=True)\n \n \n early_stopping = EarlyStopping(monitor='val_acc', patience=50,verbose=2,mode='max',baseline=0.9)\n\n singleResult = self.KerasMdl.fit(imageData,label,\n batch_size = batch_size,\n epochs=epochs,\n verbose=verbose,\n validation_split=self.ValidateSplit,\n callbacks=[] )\n\n if savePath != '':\n with open('TrainHistory\\\\'+infoName, 'wb') as file_pi:\n pickle.dump(singleResult.history, file_pi)\n\n if i == 0:\n epochs = int(epochs/4)\n \n gc.collect()#clear previous image data\n\n\n def TrainByGener(self, trainPath,\n IncludeSet=[],\n TrimName = '',\n LoadSize = 50, #每一次讀入的子集影像數量\n GenerSize = 3000,\n GlobalEpochs=10,\n GlobalBatchSize=3000,\n batch_size=-1, epochs=-1, verbose=-1):\n if IncludeSet == []:\n raise Exception('Data Set cant be empty')\n\n imgObj = LabelImage.DataObj()\n if os.path.isfile( trainPath )==False:\n labels, imgPaths = imgObj.CreateAccessCache(trainPath, IncludeSet)\n else:\n labels, imgPaths = imgObj.CreateAccessCacheByFile(trainPath, IncludeSet,TrimName=TrimName)\n\n baseEpoch = GenerSize\n\n #prepare each learning size\n eachSize = []\n for i in range(0,len(IncludeSet)):\n eachSize.append(LoadSize)\n\n for i in range(GlobalEpochs):\n imgObj.GenImage(\n imgPaths, labels, IncludeSet, self.ImageInfo, eachSize=eachSize)\n imgObj.LoadFromList(\n 'D:/tempLabel.txt', (self.ImageInfo.Size[0], self.ImageInfo.Size[1]), IncludeSet)\n\n # get learning data\n x_train = imgObj.GetImageData(self.ImageInfo, Dim=4)\n y_train, _ = imgObj.ToIntLable(FinalTarget=\"ArrayExtend\")\n\n\n print(\"Globle Epoche run:\",i)\n\n datagen = tf.keras.preprocessing.image.ImageDataGenerator(\n width_shift_range=0.1,\n height_shift_range=0.1,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\n datagen.fit(x_train)\n\n self.KerasMdl.fit_generator(datagen.flow(x_train, y_train, batch_size=64),\n steps_per_epoch=2000, epochs=epochs)\n\n # prepare for next round\n baseEpoch = baseEpoch+5\n\n gc.collect()\n continue # for next round\n\n return 1\n\n def Compile(self, _optimize, _loss, _metrics):\n self.KerasMdl.compile(optimizer=_optimize,\n loss=_loss,\n metrics=['accuracy'])\n\n def FindInImage(self, img, score, labels):\n imgPatch, BoxSize, boxR, boxC = CIP.CreatePatch(\n img, Stride=(80, 80), TargetSize=(200, 200))\n box = []\n MatchTable = self.KerasMdl.predict(imgPatch)\n for i in range(MatchTable.shape[1]):\n scoreMap = np.reshape(MatchTable[:, i], (BoxSize[0], BoxSize[1]))\n loc = np.where(scoreMap > score)\n\n for j in range(len(loc[0])):\n targetRow = loc[0][j]\n targetCol = loc[1][j]\n # local maxima means it is greater than neighbor\n if CommonMath.LocalMaxima(scoreMap, targetRow, targetCol):\n idx = targetRow*BoxSize[1]+targetCol\n CommonMath.CalcAccuracyPoint(\n scoreMap, targetRow, targetCol)\n\n # add a label object\n newLabel = CommonStruct.ObjectLabel()\n\n newLabel.Label = labels[i]\n newLabel.Score = scoreMap[targetRow, targetCol]\n newLabel.Box.r1 = boxR[idx]\n newLabel.Box.c1 = boxC[idx]\n newLabel.Box.r2 = boxR[idx]+100\n newLabel.Box.c2 = boxC[idx]+100\n\n box.append(newLabel)\n\n '''cv2.rectangle(\n img, (boxC[idx], boxR[idx]), (boxC[idx]+100, boxR[idx]+100), (0, 0, 0))\n cv2.imshow(\"test\", img)\n cv2.waitKey()'''\n\n return box\n # print(MatchTable.shape)\n\n def ToTargetImageFormat(self, img):\n img = LabelImage.ToTargetImageFormat(img, self.ImageInfo)\n return img\n\n def DataSetPredict(self, source):\n imgs = source.GetImageData(self.ImageInfo, Dim=4)\n labels, _ = source.ToIntLable(ArrayExtend=True)\n self.KerasMdl.evaluate(imgs, labels)\n return 1\n\n def ImagePredict(self, source):\n source = self.ToTargetImageFormat(source)\n # extend to four dimemsion array\n source = source.reshape((1,) + source.shape+(1,))\n return self.KerasMdl.predict(source)\n\n def Evaluate(self, source):\n if isinstance(source, LabelImage.DataObj):\n result = self.DataSetPredict(source)\n return result\n elif isinstance(source, str):\n source = cv2.imread(source)\n return self.ImagePredict(source)\n\n def Predict(self, source):\n imgs = source.GetImageData(self.ImageInfo, Dim=4)\n labels, _ = source.ToIntLable(ArrayExtend=True)\n return self.KerasMdl.predict(imgs)\n\n def LoadModelFromTxt(self, txtFile):\n with open(txtFile, 'r') as f:\n lines = f.readlines()\n \n\n for l in lines:\n try:\n l = l.strip('\\n')\n if l != '' and l[0]!='#':\n target = l\n exec(target)\n self.LayerNum = self.LayerNum+1\n except OSError as err:\n print(\"Error on adding \", str(err))\n pass\n \n exec('self.KerasMdl = model') \n\n def AssignModel(self,mdl):\n self.KerasMdl = mdl","sub_path":"MyKeras.py","file_name":"MyKeras.py","file_ext":"py","file_size_in_byte":13333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"609054083","text":"import pandas as pd\nfrom flask import Flask, jsonify, request\nfrom sklearn.preprocessing import MinMaxScaler, OneHotEncoder\nimport joblib\n\napp = Flask(__name__)\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n\treq \t\t\t\t= request.get_json()\n\tinput_data \t\t\t= req['data']\n\tinput_data_df \t\t= pd.DataFrame.from_dict(input_data)\n\n\tfeat_obj\t\t\t= joblib.load('feats.pkl')\n\tscale_obj \t\t\t= joblib.load('scale.pkl')\n\tohe_obj \t\t\t= joblib.load('ohe.pkl')\n\tmodel \t\t\t\t= joblib.load('model.pkl')\n\n\tinput_data_scaled \t= pd.DataFrame(scale_obj.transform(input_data_df[feat_obj['num_cols']]), columns=feat_obj['num_cols'])\n\tinput_data_ohe \t\t= pd.DataFrame(ohe_obj.transform(input_data_df[feat_obj['cat_cols']]).toarray(), columns=feat_obj['cat_ohe_cols'])\n\tinput_data_bin \t\t= input_data_df[feat_obj['bin_cols']]\n\tinput_data_df \t\t= pd.concat((input_data_bin,input_data_scaled,input_data_ohe), axis=1)\n\n\tprediction \t\t\t= model.predict(input_data_df)\n\n\tif prediction[0] == 1:\n\t\tresult = 'Принято'\n\telse:\n\t\tresult = 'Отказ'\n\n\treturn jsonify({'output':{'result':result}})\n\n@app.route('/')\ndef home():\n\treturn 'Система прогнозирования'\n\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0', port='3000')","sub_path":"deploy/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"517037450","text":"#coding:utf-8\nimport re, os ,sys,smtplib\n \ndef get_ipconfig_ip(): \n match_ip_dict = {}\n ipconfig_result_list = os.popen('ipconfig').readlines()\n for i in range(len(ipconfig_result_list)):\n print(ipconfig_result_list[i])\n return ipconfig_result_list\nip = get_ipconfig_ip()\nf = open(os.getcwd()+'/ip.txt','w')\nfor i in range(len(ip)):\n f.writelines(ip[i]+'\\n')\n print(ip[i]+'\\n')\nf.close()\nimport smtplib\nimport email\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.image import MIMEImage\n \nHOST = 'SMTP'#Your SMTP host\nSUBJECT = 'IP[YourIP]'#Title\nFROM = 'email'#Your email\nTo = 'email'#To someone\n\ndef add_image(path,imgid):\n \n data = open(path,'rb')\n msg_image = MIMEImage(data.read())\n data.close()\n msg_image.add_header('Content-ID',imgid)\n \n return msg_image\nmsg = MIMEMultipart('related')\nmsg_text = MIMEText('

IP[report]

','html','utf-8')\nmsg.attach(msg_text)\nattach = MIMEText(open(os.getcwd()+'/ip.txt','rb').read(),'base64','utf-8')\nattach['Content-type'] = 'application/octet-stream'\nattach['Content-Disposition'] = 'attachment;filename=\"ip.txt\"'\nmsg.attach(attach)\nmsg['From'] = FROM\nmsg['To'] = To\nmsg['Subject'] = SUBJECT\nsmtp_server = smtplib.SMTP()\nsmtp_server.set_debuglevel(1)\nsmtp_server.connect(HOST,'25')\nsmtp_server.starttls()\nsmtp_server.login(FROM,'passwd')#Your passwd\nsmtp_server.sendmail(FROM,To,msg.as_string())\nsmtp_server.quit()\n","sub_path":"raspi-ip-python-windows.py","file_name":"raspi-ip-python-windows.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"632188576","text":"from django.shortcuts import render, redirect\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.contrib.auth import login\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom .forms import TeamForm\nfrom .models import Player, Team, Game, Day, Rankings\nimport random\n\n\ndef home(request):\n return render(request, 'home.html')\n\n@login_required\ndef dashboard(request):\n try:\n team=Team.objects.get(owner_id=request.user.id)\n except Team.DoesNotExist:\n team={}\n try:\n teams=Team.objects.all().order_by('-owner_points')\n except Team.DoesNotExist:\n teams={}\n return render(request, 'dashboard/dashboard.html',{\n 'team':team,\n 'teams': teams,\n })\n\ndef create_team(request):\n team_form = TeamForm()\n return render(request, 'dashboard/team_form.html',{\n 'team_form':team_form,\n })\n\ndef add_team(request):\n form = TeamForm(request.POST)\n if form.is_valid():\n new_team=form.save(commit=False)\n new_team.owner = request.user\n new_team.save()\n return redirect('team_detail', team_id=new_team.id)\n\ndef team_detail(request, team_id):\n team = Team.objects.get(id=team_id)\n team_player_count=team.players.count()\n players = Player.objects.all()\n myplayers = Player.objects.filter(owner=request.user)\n \n return render(request, 'dashboard/team_detail.html', {\n 'team':team,\n 'team_player_count':team_player_count,\n 'players':players,\n 'myplayers':myplayers,\n })\n\ndef add_player(request, team_id, player_id):\n Team.objects.get(id=team_id).players.add(player_id)\n player = Player.objects.get(id=player_id)\n player.owner = request.user\n player.status = False\n player.save()\n return redirect('team_detail', team_id=team_id)\n\ndef drop_player(request, team_id, player_id):\n Team.objects.get(id=team_id).players.remove(player_id)\n player = Player.objects.get(id=player_id)\n player.owner=None\n player.status = True\n player.save()\n return redirect('team_detail', team_id=team_id)\n\ndef simulate_day(request):\n players=Player.objects.filter(status='False')\n day=Day.objects.get()\n day.day_counter+=1\n day.save()\n # print(players)\n for player in players:\n team=Team.objects.get(owner=player.owner)\n game=Game.objects.create()\n game.player=player\n game.owner=player.owner \n game.day_played=day.day_counter\n game.points=round(player.point_rating * random.random() * 5) + (player.point_rating)\n # print(game.points)\n game.rebounds=round(player.rebound_rating * random.random() * 2) + (round(player.rebound_rating/2))\n game.assists=round(player.assist_rating * random.random() * 1.5) + (round(player.assist_rating/2))\n game.steals=round(player.steal_rating * random.random() * .5) + (round(random.random()*2))\n game.blocks=round(player.block_rating * random.random() * .5 + player.block_rating * random.random()*.5)\n game.turnovers=round((10-player.turnover_rating) * random.random())\n game.threepointers=round(player.threepointer_rating*random.random()+random.random())\n game.save()\n\n team.team_points+=game.points\n team.team_rebounds+=game.rebounds\n team.team_assists+=game.assists\n team.team_steals+=game.steals\n team.team_blocks+=game.blocks\n team.team_turnovers+=game.turnovers\n team.team_threepointers+=game.threepointers\n team.players.clear()\n team.save()\n\n player.owner=None\n player.status=True\n player.save()\n\n def assign_fantasy_points(list):\n for idx, team in enumerate(list):\n team.owner_points+=number_of_teams-idx \n team.save()\n\n points_ranking=Team.objects.all().order_by('-team_points')\n rebounds_ranking=Team.objects.all().order_by('-team_rebounds')\n steals_ranking=Team.objects.all().order_by('-team_steals')\n threepointers_ranking=Team.objects.all().order_by('-team_threepointers')\n assists_ranking=Team.objects.all().order_by('-team_assists')\n turnovers_ranking=Team.objects.all().order_by('-team_turnovers')\n blocks_ranking=Team.objects.all().order_by('-team_blocks')\n number_of_teams=points_ranking.count()\n \n assign_fantasy_points(points_ranking)\n assign_fantasy_points(rebounds_ranking)\n assign_fantasy_points(steals_ranking)\n assign_fantasy_points(threepointers_ranking)\n assign_fantasy_points(assists_ranking)\n assign_fantasy_points(turnovers_ranking)\n assign_fantasy_points(blocks_ranking)\n \n return redirect('dashboard')\n\ndef results(request):\n teams=Team.objects.all()\n games=Game.objects.all()\n day=Day.objects.get()\n days=[]\n for day in range(day.day_counter):\n days.append(day + 1)\n\n try:\n team=Team.objects.get(owner_id=request.user.id)\n except Team.DoesNotExist:\n team={}\n\n return render(request, 'dashboard/results.html', {\n 'team':team,\n 'teams':teams,\n 'games':games,\n 'days':days,\n })\n\ndef start_league(request):\n day = Day.objects.create()\n day.day_counter=0\n day.save()\n return redirect('dashboard')\n\ndef ranking_results(request):\n owner_point_rankings=Team.objects.all().order_by('-owner_points')\n\n points_ranking=Team.objects.all().order_by('-team_points')\n rebounds_ranking=Team.objects.all().order_by('-team_rebounds')\n steals_ranking=Team.objects.all().order_by('-team_steals')\n threepointers_ranking=Team.objects.all().order_by('-team_threepointers')\n assists_ranking=Team.objects.all().order_by('-team_assists')\n turnovers_ranking=Team.objects.all().order_by('-team_turnovers')\n blocks_ranking=Team.objects.all().order_by('-team_blocks')\n number_of_teams=points_ranking.count()\n\n try:\n team=Team.objects.get(owner_id=request.user.id)\n except Team.DoesNotExist:\n team={}\n\n return render(request, 'dashboard/ranking_results.html',{\n 'team':team,\n 'points_ranking' : points_ranking,\n 'rebounds_ranking':rebounds_ranking,\n 'steals_ranking' : steals_ranking,\n 'threepointers_ranking':threepointers_ranking,\n 'assists_ranking':assists_ranking,\n 'turnovers_ranking' : turnovers_ranking,\n 'blocks_ranking':blocks_ranking,\n 'number_of_teams': number_of_teams,\n 'owner_point_rankings' : owner_point_rankings,\n })\n\ndef signup(request):\n error_message=''\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n return redirect('dashboard')\n else:\n error_message = 'Invalid sign up-Try Again'\n form = UserCreationForm()\n context = {'form': form, 'error_message':error_message}\n return render(request, 'registration/signup.html', context)\n","sub_path":"main_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"295146252","text":"\"\"\"\n7.\tВ одномерном массиве целых чисел определить два наименьших элемента.\nОни могут быть как равны между собой (оба являться минимальными),\n так и различаться.\n\"\"\"\nimport random\n\n\ndef searchmin(len):\n a = [random.randrange(-1000, 1000, 1) for _ in range(len)]\n x2 = 1000\n x1 = min(a)\n for j in range(0, len):\n if a[j] < x2 and j != a.index(x1):\n x2 = a[j]\n j += 1\n print(a, x1, x2)\n\n\nif __name__ == '__main__':\n dig = int(input('Введи длину массива: '))\n searchmin(dig)\n","sub_path":"Lesson_3/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"644230190","text":"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom comm import views\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', views.index),\n url(r'^index/', views.post),\n url(r'^monitor/(\\w+)', views.monitor),\n url(r'^delete/(\\w+)', views.delete_Module),\n url(r'^run/(\\w+)', views.exec_Module),\n\n]\n","sub_path":"Interface/webapp/webapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"588455538","text":"import random\n\n# n 이 판의 총 병사 수 (범위 50~100)\n# game 전체 게임에서 이 게임이 몇번째 게임인지 알려줌\n# myn 내가 현재 가지고 있는 총 병사 수\n# m 이 판에서 진행될 총 턴 수 (범위 3~n/5)\n# oparr 상대가 여태껏 낸 병사 수가 저장된 배열\n# return 값이 자기가 가지고 있는 병사보다 클 시 모든 병사를 내보내는 것으로 처리\n# 여러분이 짠 코드를 strategy 함수에 넣어서 테스트 해 보세요\n\narr1=[[0]*100 for _ in range(100)]\narr2=[[0]*100 for _ in range(100)]\nn,m=0,0\nn_a=0,0\nn_b=0,0\nw_a,w_b=0,0\n\n#평균 값을 return하는 전략. 가장 쉽게 생각할 수 있는 전략\ndef s1(game,turn,myn,oparr):\n if turn==m-1:\n return myn\n return n//m\n\n#위의 전략을 저격하여 만든 전략. 1턴에만 져주고, 나머지 턴을 비김 or 승리함\ndef s2(game,turn,myn,oparr):\n if turn==0:\n return 0\n elif turn==m-1:\n return myn\n else:\n return n//(m-1)\n\n#상대방이 전 턴에 낸 병사 수만큼 내는 전략\ndef s3(game,turn, myn, oparr):\n if turn==0:\n return 0\n elif turn==m-1:\n return myn\n else:\n return oparr[game][turn-1]\n \n#여러분의 아이디어를 펼쳐 보세요\ndef strategy(game,turn, myn, oparr):\n return 0\n\nfor i in range(10):\n n=n_a=n_b=random.randint(50,100)\n m=random.randint(3,n//5)\n s_a,s_b=0,0\n for j in range(m):\n n1=s1(i,j,n_a,arr2) #s1을 s2, s3로 바꿔 테스트 해 보세요\n n2=strategy(i,j,n_b,arr1) \n if n1>n_a:\n n1=n_a\n if n2>n_b:\n n2=n_b\n arr1[i][j]=n1\n arr2[i][j]=n2\n n_a-=n1\n n_b-=n2\n if n1>n2:\n s_a+=1\n elif n1s_b:\n w_a+=1\n print(\"a win\")\n elif s_aw_b:\n print(\"p1 win!\")\nelif w_a ma200_start:\n c_4 = True\n else:\n c_4 = False\n break\n\n lowest = stock.df['close'].rolling(window=260).min().loc[stock.df.index[-1]]\n highest = stock.df['close'].rolling(window=260).max().loc[stock.df.index[-1]]\n\n\n c_1 = close > ma50\n c_2 = ma50 > ma150\n c_3 = ma150 > ma200\n c_5 = close / lowest > 1.3\n c_6 = close / highest > 0.75\n\n self.trending = c_1 and c_2 and c_3 and c_4 and c_5 and c_6\n\n\n if self.trending:\n print('{0:40} {1:<8.0%} {2:<8.0%}'.format(stock.name, close/highest, close/lowest))\n #self.plot(stock)\n #stock.plot_pivots()\n\n def plot(self, stock):\n fig = self.plt.figure(figsize=(14, 7))\n fig.suptitle('{}'.format(stock.name), fontsize=20)\n ax1 = self.plt.subplot2grid((6, 3), (0, 0), rowspan=5, colspan=3)\n ax2 = self.plt.subplot2grid((6, 3), (5, 0), rowspan=1, colspan=3, sharex=ax1)\n stock.df.plot(y=['close'], ax=ax1)\n stock.df.plot(y=['sma50'], linewidth=0.5, ax=ax1)\n stock.df.plot(y=['sma150'], linewidth=0.5, ax=ax1)\n stock.df.plot(y=['sma200'], linewidth=0.5, ax=ax1)\n stock.df.plot(y=['volume'], linewidth=1, color='b', ax=ax2)\n ax1.legend(['close', 'sma50', 'sma150', 'sma200'], loc='upper left')\n ax2.legend(['volume'], loc='upper left')\n self.plt.show()","sub_path":"screeners.py","file_name":"screeners.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"374376309","text":"#!/usr/bin/python3\n\n'''\n Run `chmod +x install.py` before you excute this script.\n Then `./install.py` will do the installation part.\n'''\n\ntry:\n from city_info import fetch\n from records import store_city_name_id\n from os import environ, getcwd, chmod\n from shutil import copytree\n from os.path import join, exists\nexcept ImportError as e:\n print('[!]Module Unavailable : {}'.format(str(e)))\n exit(1)\n\n\ndef __is_init_setup_done__():\n home_dir = environ.get('HOME', '')\n if(not home_dir):\n print('[!]Set environment variable HOME first')\n exit(1)\n data_dir = join(home_dir, '.imd_weather')\n if(exists(data_dir)):\n return True\n return False\n\n\ndef __init_setup__():\n resp = fetch()\n if(resp.get('error')):\n print('[!]Couldn\\'t download required data: \\'{}\\'\\n[!]Installation \\\n failed :/\\n'.format(resp.get('error', '???')))\n return False\n if(store_city_name_id(resp).get('status') != 'success'):\n return False\n home_dir = environ.get('HOME')\n try:\n copytree(getcwd(), join(home_dir, '.imd_weather'))\n chmod(join(home_dir, '.imd_weather', 'imd_weather_app.py'), 0o775) # making imd_weather_app.py executable.\n chmod(join(home_dir, '.imd_weather', 'install.py'), 0o664) # making install.py not executable.\n except Exception as e:\n print('[!]Error : {}'.format(str(e)))\n return False\n return True\n\n\ndef install():\n if(__is_init_setup_done__()):\n print('[!]Installation already done')\n return\n if(__init_setup__()):\n print('[+]Successful Installation\\n[+]Now add \\'{}\\' to your path variable and invoke \\'imd_weather_app.py\\' from anywhere in your directory tree.'.format(join(environ.get('HOME'), '.imd_weather')))\n return\n print('[!]Installation failed')\n return\n\n\nif __name__ == '__main__':\n try:\n install()\n except KeyboardInterrupt:\n print('\\n[!]Terminated')\n finally:\n exit(0)\n","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"558536999","text":"class Solution:\r\n def maxProfit(self, prices: List[int]) -> int:\r\n len_p = len(prices)\r\n if len_p == 1 or len_p == 0:\r\n return 0\r\n profit = 0\r\n temp = prices[0]\r\n i = 1\r\n while i < len_p:\r\n if prices[i] < temp:\r\n temp = prices[i]\r\n elif prices[i] - temp > profit:\r\n profit = prices[i] - temp\r\n i += 1\r\n return profit\r\n\r\n","sub_path":"121.买卖股票的最佳时机.py","file_name":"121.买卖股票的最佳时机.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"625863072","text":"\"\"\"\nBackground Subtraction\n\nThis is a very useful computer vision technique which allows \nus to separate foregrounds from the backgrounds in a video stream.\n\nThese algorithms essentially learn about the frame in view \n(video stream) and are able to accurate learn and identify \nthe foreground mask. What results is a binary segmentation of the \nimage which highlights regions of non-stationary objects.\n\nThere are a several Background subtraction algorithms in OpenCV \nspecifically for video analysis:\n\nBackgroundSubtractorMOG - Gaussian Mixture-based background/foreground \nSegmentation Algorithm.\n\nBackgroundSubtractorMOG2 – Another Gaussian Mixture-based background \nsubtraction method, however with better adaptability to illumination \nchanges and with better ability to detect shadows!\n\nGeometric Multigrid (GMG) -This method combines statistical background \nimage estimation and per-pixel Bayesian segmentation\n\n\"\"\"\n\n# OpenCV 2.4.13 only\nimport numpy as np\nimport cv2\n\n# Intialize Webcam\ncap = cv2.VideoCapture(0)\n\n# Initlaize background subtractor\nforeground_background = cv2.BackgroundSubtractorMOG()\n\nwhile True:\n \n ret, frame = cap.read()\n\n # Apply background subtractor to get our foreground mask\n foreground_mask = foreground_background.apply(frame)\n\n cv2.imshow('Output', foreground_mask)\n if cv2.waitKey(1) == 13: \n break\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"Background_Subtraction/webcam-MOG.py","file_name":"webcam-MOG.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"333272321","text":"# -*- coding:utf-8 -*-\n\n# \n# English:\n# Given a characters array letters that is sorted in non-decreasing order and a character target, return the smallest character in the array that is larger than target.\n# Note that the letters wrap around.\n# For example, if target == 'z' and letters == ['a', 'b'], the answer is 'a'.\n# Example 1:\n# Input: letters = [\"c\",\"f\",\"j\"], target = \"a\" Output: \"c\"\n# Example 2:\n# Input: letters = [\"c\",\"f\",\"j\"], target = \"c\" Output: \"f\"\n# Example 3:\n# Input: letters = [\"c\",\"f\",\"j\"], target = \"d\" Output: \"f\"\n# Constraints:\n# 2 <= letters.length <= 104\n# letters[i] is a lowercase English letter.\n# letters is sorted in non-decreasing order.\n# letters contains at least two different characters.\n# target is a lowercase English letter.\n#\n# 中文:\n# 给你一个排序后的字符列表 letters ,列表中只包含小写英文字母。另给出一个目标字母 target,请你寻找在这一有序列表里比目标字母大的最小字母。\n# 在比较时,字母是依序循环出现的。举个例子:\n# 如果目标字母 target = 'z' 并且字符列表为 letters = ['a', 'b'],则答案返回 'a'\n# 示例 1:\n# 输入: letters = [\"c\", \"f\", \"j\"],target = \"a\" 输出: \"c\"\n# 示例 2:\n# 输入: letters = [\"c\",\"f\",\"j\"], target = \"c\" 输出: \"f\"\n# 示例 3:\n# 输入: letters = [\"c\",\"f\",\"j\"], target = \"d\" 输出: \"f\"\n# 提示:\n# 2 <= letters.length <= 104\n# letters[i] 是一个小写字母\n# letters 按非递减顺序排序\n# letters 最少包含两个不同的字母\n# target 是一个小写字母\n\n\n#\n# @lc app=leetcode.cn id=744 lang=python\n#\n# [744] 网络延迟时间\n#\n# https://leetcode-cn.com/problems/find-smallest-letter-greater-than-target/description/\n#\n# algorithms\n# Easy (41.37%)\n# Total Accepted: 3.9K\n# Total Submissions: 9.2K\n# Testcase Example: '[\"c\",\"f\",\"j\"]\\n\"a\"'\n#\n# 给定一个只包含小写字母的有序数组letters 和一个目标字母 target,寻找有序数组里面比目标字母大的最小字母。\n#\n# 数组里字母的顺序是循环的。举个例子,如果目标字母target = 'z' 并且有序数组为 letters = ['a', 'b'],则答案返回 'a'。\n#\n# 示例:\n#\n#\n# 输入:\n# letters = [\"c\", \"f\", \"j\"]\n# target = \"a\"\n# 输出: \"c\"\n#\n# 输入:\n# letters = [\"c\", \"f\", \"j\"]\n# target = \"c\"\n# 输出: \"f\"\n#\n# 输入:\n# letters = [\"c\", \"f\", \"j\"]\n# target = \"d\"\n# 输出: \"f\"\n#\n# 输入:\n# letters = [\"c\", \"f\", \"j\"]\n# target = \"g\"\n# 输出: \"j\"\n#\n# 输入:\n# letters = [\"c\", \"f\", \"j\"]\n# target = \"j\"\n# 输出: \"c\"\n#\n# 输入:\n# letters = [\"c\", \"f\", \"j\"]\n# target = \"k\"\n# 输出: \"c\"\n#\n#\n# 注:\n#\n#\n# letters长度范围在[2, 10000]区间内。\n# letters 仅由小写字母组成,最少包含两个不同的字母。\n# 目标字母target 是一个小写字母。\n#\n#\n#\nclass Solution(object):\n def nextGreatestLetter(self, letters, target):\n \"\"\"\n :type letters: List[str]\n :type target: str\n :rtype: str\n \"\"\"\n def binary_search(nums, target):\n left = 0\n right = len(nums) - 1\n while left <= right:\n mid = (left + right) // 2\n if target < nums[mid]:\n right = mid - 1\n elif nums[mid] < target:\n left = mid + 1\n else:\n return mid # 查找到的位置加一\n return left\n\n next_letter = chr(ord(target)+1)\n idx = binary_search(letters, next_letter)\n return letters[idx%len(letters)]\n\nif __name__ == \"__main__\":\n s = Solution().nextGreatestLetter([\"e\",\"e\",\"e\",\"e\",\"e\",\"e\",\"n\",\"n\",\"n\",\"s\"], 'e')\n print(s)\n\n","sub_path":"questions/745-find-smallest-letter-greater-than-target/find-smallest-letter-greater-than-target.py","file_name":"find-smallest-letter-greater-than-target.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"154701800","text":"\n#unchangable\ndef unChangeFunc(a):\n a += 10\n\nb = 2\nunChangeFunc(b)\nprint(b)\n\n#changable\ndef changeFunc(mylist):\n \"modify the input list\"\n mylist.append([1,2,3,4])\n print(\"in the function get value :\",mylist)\n return\n\n# use the changeList\nmylist = [10,20,30]\nchangeFunc(mylist)\nprint(\"outside the function value is :\",mylist)","sub_path":"module0711/unchangable_test.py","file_name":"unchangable_test.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"218713114","text":"import unittest\n\nfrom data.storage.fields import StringField\nfrom data.storage.fields import StructField\nfrom data.storage.model import Model\nfrom data.storage.model import storage\nfrom data.storage.storage import StorageMixin\nfrom test import TestCase\n\n\nclass TestModel(Model):\n title = StringField()\n\n\n@storage(provider='memory')\nclass TestMemoryModel(Model):\n title = StringField()\n\n\n@storage(provider='mongo', db='test', collection='test')\nclass TestMongoModel(Model):\n title = StringField()\n\n\n@storage(provider='mongo', db='test', collection='test')\nclass TestUnsetModel(Model):\n name = StringField()\n data = StructField(\n info=StructField(\n nickname=StringField()\n ),\n name=StringField(),\n surname=StringField()\n )\n\nclass TestApp(StorageMixin):\n pass\n\n\nclass TestStorage(TestCase):\n\n def check_storage(self, config, model_class):\n app = TestApp(config=config)\n storage = app.get_storage(model_class)\n model = storage.create_model({'title': 'Hello'})\n self.assertEqual(len(storage.get_all({})), 0)\n model.save()\n results = storage.get_all({})\n self.assertEqual(len(results), 1)\n self.assertEqual(results[0].title, 'Hello')\n id1 = results[0].id\n model2 = storage.create_model({'title': 'Model 2'})\n model2.save()\n self.assertEqual(len(storage.get_all({})), 2)\n model1 = storage.get(id1)\n self.assertEqual(model1.id, id1)\n model1.delete()\n self.assertEqual(len(storage.get_all({})), 1)\n\n def test_default_provider(self):\n config = {\n 'storage': {\n 'providers': {\n 'memory': {\n 'class': 'data.storage.providers.MemoryProvider',\n 'default': True,\n }\n }\n },\n }\n self.check_storage(config, TestModel)\n\n def test_memory_storage(self):\n config = {\n 'storage': {\n 'providers': {\n 'memory': {\n 'class': 'data.storage.providers.MemoryProvider',\n }\n }\n },\n }\n self.check_storage(config, TestMemoryModel)\n\n @unittest.skip('TODO: clear test database before each test')\n def test_mongo_storage(self):\n config = {\n 'storage': {\n 'providers': {\n 'mongo': {\n 'class': 'data.storage.providers.MongoProvider',\n 'connection_string': 'mongodb://127.0.0.1:27017/?serverSelectionTimeoutMS=100',\n }\n }\n },\n }\n self.check_storage(config, TestMongoModel)\n\n def test_unset_none_fields(self):\n config = {\n 'storage': {\n 'providers': {\n 'mongo': {\n 'class': 'data.storage.providers.MongoProvider',\n 'connection_string': 'mongodb://127.0.0.1:27017/',\n }\n }\n },\n }\n app = TestApp(config=config)\n storage = app.get_storage(TestUnsetModel)\n model = storage.create_model({'id': 123})\n model.name = 'test'\n model.data.info.nickname = 'iv'\n model.data.name = 'Ivan'\n model.data.surname = 'Ivanov'\n model.save()\n self.assertEqual(model.id, 123)\n model.data.name = None\n model.data.info = None\n model.save()\n documents = storage.provider.get_all({'id': model.id})\n self.assertEqual(len(documents), 1)\n d = documents[0]\n self.assertEqual(d['name'], 'test')\n self.assertEqual(d['id'], 123)\n self.assertTrue('info' not in d['data'])\n self.assertTrue('name' not in d['data'])\n storage.provider.delete({'id': model.id})\n","sub_path":"src/data/storage/tests/test_storage.py","file_name":"test_storage.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"131629020","text":"import redis\nimport datetime\nimport time\nimport asyncio\n# from './lib' import sendBirthdayEmail\n\nclass User(object):\n\n\tdef __init__(self,id=None,birthday=None):\n\t\tself.redisClient = redis.Redis(host='127.0.0.1',port=6379)\n\t\tself.id = id or self.redisClient.incr('age-app:user-ids');\n\t\tself.birthday = birthday \n\n\n\t# gets the highest value of ID in the existing cache to iterate over it\n\tasync def getHighestId(self,key):\n\t\ttry:\n\t\t\tvalue = await self.redisClient.get(key)\n\t\t\treturn value\n\t\texcept redis.RedisError as err:\n\t\t\traise Exception \n\t\tprint(value)\n\t#finds the user's cached value\n\tdef find(self,id):\n\t\ttry: \n\t\t\tcached_data = self.redisClient.get(id)\n\t\texcept redis.RedisError as e:\n\t\t\tprint(e)\n\n\t\tif cached_data is not None:\n\t\t\treturn cached_data\n\n\n\tasync def celebrateBirthday(self):\n\t\tkey = 'sent-'+self.id\n\t\tif (await hasEmailSent(key) == False):\n\t\t\tawait sendBirthdayEmail(key);\n\t\t\tawait self.setSentStatus(key)\n\t\t\tawait self.save()\n\n\t#Setting the status of the email sent to True, it will expire after a year -1 \n\tasync def setSentStatus(self,key):\n\t\texpiryTime = 60*60*24*364\n\t\tawait self.redisClient.setex(key,expiryTime,True)\n\t\treturn\n\n\t#check if the birthday email has been sent out already to the person \n\tasync def hasEmailSent(self,key):\n\t\ttry:\n\t\t\tresult = await self.redisClient.get(key)\n\t\texcept redis.RedisError:\n\t\t\traise err\n\t\tif result:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\n\t\t#check if its the birthday of the key\n\tdef isBirthday(self,birthday):\n\t\tif (birthday == str(datetime.datetime.now().date())):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\t#saves the value to the cache\n\tasync def save(self):\n\t\ttry:\n\t\t\tresult = await self.redisClient.hset('users',self.id,'birthday',self.birthday)\n\t\texcept redis.RedisError:\n\t\t\traise err\n\t\tif (result == 0):\n\t\t\traise Exception(\"the value was not written\")\n\n\tdef sendEmail(self,highestId):\n\t\tfor userId in range[0,highestId]:\n\t\t\tuser = self.find(userId);\n\t\t\tif (self.isBirthday(user.birthday)):\n\t\t\t\tself.celebrateBirthday()\n\nif __name__ == '__main__':\n r = User()\n highestId = r.getHighestId('age-app:user-ids')\n time.sleep(24*60*60)\n","sub_path":"birthdayEmail.py","file_name":"birthdayEmail.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"256678268","text":"import csv\n\nfrom django.shortcuts import render\n\ndef inflation_view(request):\n template_name = 'inflation.html'\n\n with open('inflation_russia.csv', 'r', encoding='utf-8') as f:\n reader = csv.reader(f, delimiter=';')\n data = []\n\n header = next(reader)\n for row in reader:\n tmp_data = [int(row[0])]\n for elem in row[1:]:\n try:\n val = float(elem)\n except ValueError:\n val = '-'\n finally:\n tmp_data.append(val)\n\n data.append(tmp_data)\n context = {'header': header, 'data': data}\n\n return render(request, template_name, context)","sub_path":"task1/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"13036651","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n\"\"\"\nExample function for ODHQL. Copy this file and use a valid Python module name (e.g. my_function.py).\nYour function will then be automatically loaded and made available within the ODH Query Language Interpreter.\nSee hub.odhql.function package for more concrete implementation examples.\n\"\"\"\n\nfrom hub.odhql.functions.core import VectorizedFunction\n\n\nclass ExampleFunction(VectorizedFunction):\n # __doc__ is used to generate function documentation (formatted as reStructured Text)\n # By convention, function names and other keywords should be written in all-caps.\n \"\"\"\n Beispiel-Funktion für ODHQL, welche prüft, ob ein Integer-Feld den Wert 42 enthält.\n\n Parameter\n - `values`: Integer-Spalte\n\n Beispiel\n .. code:: sql\n\n IS42(t.some_field) AS is42\n \"\"\"\n name = 'IS42'\n\n def apply(self, values):\n self.assert_int(0, values)\n return values == 42\n","sub_path":"src/main/python/plugins/odhql_function.tmpl.py","file_name":"odhql_function.tmpl.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"184702133","text":"import atexit\nimport json\nfrom shutil import rmtree\n\nimport better_exceptions\nfrom zenlog import log\n\nfrom manga_py.libs import fs\nfrom manga_py.libs.modules import info\nfrom . import args\nfrom ._helper import CliHelper\nfrom .db import DataBase\n\n\nclass Cli(CliHelper):\n db = None\n\n def __init__(self):\n self._temp_path = fs.get_temp_path()\n atexit.register(self.exit)\n fs.make_dirs(self._temp_path)\n self.global_info = info.InfoGlobal()\n self.db = DataBase()\n\n def exit(self):\n # remove temp directory\n rmtree(self._temp_path)\n\n def run(self):\n better_exceptions.hook()\n _args = self._args.copy()\n if _args.get('title'):\n urls = self._search_for_title(_args.get('title'))\n else:\n self._print_cli_help()\n urls = _args.get('url', []).copy()\n _args.get('force_make_db', False) and self.db.clean()\n\n if self._args.get('update_all'):\n self._update_all()\n else:\n if len(urls) == 0:\n pass\n if len(urls) > 1:\n _args['name'] = None\n _args['skip_volumes'] = None\n _args['max_volumes'] = None\n self._run_normal(_args, urls)\n\n def _update_all(self):\n default_args = self.get_default_args()\n for manga in self.db.get_all(): # type Manga\n self.log() and log.info('Update %s', manga.url)\n _args = default_args.copy()\n data = json.loads(manga.data)\n data_args = data.get('args', {})\n del data_args['rewrite_exists_archives']\n del data_args['user_agent']\n del data_args['url']\n\n if not fs.is_dir(fs.path_join(data_args['destination'], data_args['name'])):\n self.log() and log.warn('Destination not exists. Skip')\n continue\n\n _args.update({ # re-init args\n 'url': manga.url,\n **data_args,\n })\n provider = self._get_provider(_args)\n if provider:\n provider = provider() # type Provider\n provider.before_provider(_args)\n provider.http.cookies = data.get('cookies')\n provider.http.ua = data.get('browser')\n provider.run(_args)\n provider.after_provider()\n provider.update_db()\n self.global_info.add_info(info)\n\n def _run_normal(self, _args, urls):\n for url in urls:\n _args['url'] = url\n provider = self._get_provider(_args)\n if provider:\n provider = provider() # type Provider\n provider.before_provider(_args)\n provider.run(_args)\n provider.after_provider()\n provider.update_db()\n self.global_info.add_info(info)\n","sub_path":"manga_py/cli/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"266806020","text":"import random\n\n\n# A function to quasi-randomly shuffle up a game, #\n# for the sake of testing/starting #\n\ndef shuffle(board, n):\n dirs = [(0, 1), (0, -1), (-1, 0), (1, 0)]\n\n for i in range(n):\n while True:\n dir = dirs[random.randint(0, 3)]\n newBoard = board.slideBlank(dir)\n if board.slideBlank(dir) is not None:\n board = newBoard\n break\n return board\n","sub_path":"a3/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"376566783","text":"# -*- coding: utf-8 -*-\n\"\"\"\nmetagene_finder.py\nincludes functions for finding the longest protien sequence shared by the\nmetagenome and the nitrogenase sequence\nneeds amino_acids.py, load.py, and gene_finder\n\nAll code is either from SoftDes class or personally written\n\nsome of the code is based on information found on the wikipedia page on the\nlongest substring problem,\nhttps://en.wikipedia.org/wiki/Longest_common_substring_problem\n\n@nrivkin: Noah Rivkin\n\n\"\"\"\n\nimport random\nfrom amino_acids import aa, codons, aa_table # you may find these useful\nimport load\nfrom gene_finder import get_complement, get_reverse_complement, find_all_ORFs\nfrom gene_finder import find_all_ORFs_oneframe, find_all_ORFs_both_strands, coding_strand_to_AA # these are useful, though possibly not overly efficient\n\n\n# Use find_all_ORFs and coding_strand_to_AA to find possible, then compare?\ndef gen_array(n,m):\n \"\"\"\n creates a list of n lists, each of which has m zeros\n this can be used for the dynamic method of finding substrings\n \"\"\"\n array = []\n for i in range(n):\n array.append([])\n for j in range(m):\n array[i].append(0)\n return array\n\n\ndef find_common_sub(str1,str2):\n \"\"\"\n finds the longest common substring from str1 and str2\n with dynamic method\n \"\"\"\n l1 = len(str1)\n l2 = len(str2)\n longest = 0\n longest_loc = []\n matrix = gen_array(l1,l2)\n # marks location in matrix where there is a match with length of common suffix\n for i in range(l1):\n for j in range(l2):\n if str1[i] == str2[j]:\n if i == 0 or j == 0:\n matrix[i][j] = 1\n if matrix[i][j] > longest:\n longest = matrix[i][j]\n else:\n matrix[i][j] = matrix[i - 1][j -1] + 1\n if matrix[i][j] > longest:\n longest = matrix[i][j]\n longest_loc = i\n common = ''\n for k in range(longest):\n pass\n common = common + str1[longest_loc - k]\n return common\n\n\nsubstrings = []\nnitrogenase = load_nitrogenase_seq()\nnitrogenase = nitrogenase.replace('\\n','')\nnit_strand = gene_finder(nitrogenase)\nmetagenome = load_metagenome()\n# for i in range(10):\n# meta_strands = find_all_ORFs_both_strands(metagenome[i][1])\n# aa_meta_strands = []\n# for strand in meta_strands:\n# aa_meta_strands.append(coding_strand_to_AA(strand))\n# for j in range(len(meta_strands)):\n# substrings.append(find_common_sub(nit_strand[0], aa_meta_strands[j]))\nsubstrings","sub_path":"metagene_finder.py","file_name":"metagene_finder.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"103426481","text":"import os\nimport sys\nimport tensorflow as tf\nimport numpy as np\nimport cv2\n\nsys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'srcnn'))\nimport srcnn\n\nflags = tf.flags\n\nflags.DEFINE_string('checkpoint_dir', 'results/64-32-3_9-1-5_128', 'Checkpoint directory.')\nflags.DEFINE_string('image_file', 'lr1.png', 'Sample image file.')\n\nFLAGS = flags.FLAGS\nFLAGS._parse_flags()\n\nexperiment = os.path.basename(FLAGS.checkpoint_dir)\nlayer_sizes = [int(k) for k in experiment.split(\"_\")[0].split(\"-\")]\nfilter_sizes = [int(k) for k in experiment.split(\"_\")[1].split(\"-\")]\nprint(layer_sizes)\nx = tf.placeholder(tf.float32, shape=(None, None, None, 3),\n name=\"input\")\ny = tf.placeholder(tf.float32, shape=(None, None, None, 3),\n name=\"label\")\nis_training = tf.placeholder_with_default(False, (), name='is_training')\n\nmodel = srcnn.SRCNN(x, y, layer_sizes, filter_sizes, is_training=is_training,\n device='/cpu:0', input_depth=3, output_depth=3)\n\nsaver = tf.train.Saver()\ninit_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n\nsess = tf.Session()\nsess.run(init_op)\n\ncheckpoint = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\nprint(\"checkpoint\", checkpoint)\nsaver.restore(sess, checkpoint)\n\nimg = cv2.imread(FLAGS.image_file)\nprint(img.shape)\nhr = img.copy()\n\nhr = cv2.resize(hr, (0, 0), fx=3, fy=3, interpolation=cv2.INTER_CUBIC)\nnew = hr.copy()\nfeed_dict = {x: hr[np.newaxis], is_training: False}\nhr = sess.run(model.prediction, feed_dict=feed_dict)[0]\n\n\ndef luminance(img):\n return 0.299 * img[:, :, 0] + 0.587 * img[:, :, 1] + 0.114 * img[:, :, 2]\n\n\ndef compute_psnr(x1, x2):\n x1_lum = luminance(x1)\n x2_lum = luminance(x2)\n mse = np.mean((x1_lum - x2_lum)**2)\n return 20 * np.log10(255 / np.sqrt(mse))\n\n\nimport matplotlib.pyplot as plt\nfig, axs = plt.subplots(1, 3)\naxs = np.ravel(axs)\naxs[0].imshow(img[:, :, [2, 1, 0]], interpolation='nearest', vmin=0, vmax=255)\naxs[0].axis('off')\naxs[0].set_title(\"Nearest\")\n\naxs[1].imshow(img[:, :, [2, 1, 0]], interpolation='bicubic', vmin=0, vmax=255)\naxs[1].axis('off')\naxs[1].set_title(\"Bicubic\")\n\naxs[2].imshow(hr.astype(np.uint8)[:, :, [2, 1, 0]], vmin=0, vmax=255)\naxs[2].axis('off')\naxs[2].set_title(\"SRCNN\")\n\nplt.savefig('result.png')\n# print(new.shape)\nprint(hr.shape)\n# print(compute_psnr(hr, new))\ncv2.imwrite('color_img.png', hr)\ncv2.imwrite('color_img_new.png', new)\nori = cv2.imread('hr1.png')\nprint(ori.shape)\nprint(compute_psnr(hr, ori))\nprint(compute_psnr(new, ori))\nprint(hr)\n","sub_path":"model/test_module.py","file_name":"test_module.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"488105155","text":"total=0\nprint (\"SUMA DE NUMEROS\")\ncan=int(input(\"Cuantos numeros quiere sumar?: \"))\nfor i in range(1,can+1):\n num=float(input(f\"Ingrese el numero {i} : \"))\n total=total/num\n\nprint(f\"El resultado es: {total}\")\n\n\n","sub_path":"2.5-Suma_con_mas_numeros.py","file_name":"2.5-Suma_con_mas_numeros.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"609816564","text":"\"\"\"Regex Search\n\nOpen all .txt files in a folder and searches for any line that matches a\nuser-supplied regular expression. Results are printed to the screen.\n\nUsage:\n python3 regex_search.py \"\"\n\"\"\"\n\nimport os\nimport re\nimport sys\n\nFOLDER_PATH = os.path.abspath(sys.argv[1])\n\nif len(sys.argv) != 3:\n print('Usage: python3 regex_search.py \"\"')\n sys.exit()\n\nfor text_folder in os.listdir(FOLDER_PATH):\n with open(os.path.join(FOLDER_PATH, text_folder), \"r\") as text_file:\n lines = text_file.readlines()\n for line in lines:\n if re.search(r\"{}\".format(sys.argv[2]), line):\n print(text_folder, \"-> \" + line.strip())\n","sub_path":"automate-the-boring-stuff/chapter08/regex-search/regex_search.py","file_name":"regex_search.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"9545655","text":"import string\n\nimport numpy as np\nimport pytest\nfrom torch import nn\nfrom torchvision.transforms import ToTensor, Normalize\nfrom sklearn.model_selection import train_test_split\n\nfrom pytorch_widedeep.models import (\n Wide,\n DeepText,\n WideDeep,\n DeepDense,\n DeepImage,\n)\n\n# Wide array\nX_wide = np.random.choice(50, (32, 10))\n\n# Deep Array\ncolnames = list(string.ascii_lowercase)[:10]\nembed_cols = [np.random.choice(np.arange(5), 32) for _ in range(5)]\nembed_input = [(u, i, j) for u, i, j in zip(colnames[:5], [5] * 5, [16] * 5)]\ncont_cols = [np.random.rand(32) for _ in range(5)]\nX_deep = np.vstack(embed_cols + cont_cols).transpose()\n\n#  Text Array\npadded_sequences = np.random.choice(np.arange(1, 100), (32, 48))\nX_text = np.hstack((np.repeat(np.array([[0, 0]]), 32, axis=0), padded_sequences))\nvocab_size = 100\n\n#  Image Array\nX_img = np.random.choice(256, (32, 224, 224, 3))\nX_img_norm = X_img / 255.0\n\n# Target\ntarget = np.random.choice(2, 32)\n\n# train/validation split\n(\n X_wide_tr,\n X_wide_val,\n X_deep_tr,\n X_deep_val,\n X_text_tr,\n X_text_val,\n X_img_tr,\n X_img_val,\n y_train,\n y_val,\n) = train_test_split(X_wide, X_deep, X_text, X_img, target)\n\n# build model components\nwide = Wide(np.unique(X_wide).shape[0], 1)\ndeepdense = DeepDense(\n hidden_layers=[32, 16],\n dropout=[0.5, 0.5],\n deep_column_idx={k: v for v, k in enumerate(colnames)},\n embed_input=embed_input,\n continuous_cols=colnames[-5:],\n)\ndeeptext = DeepText(vocab_size=vocab_size, embed_dim=32, padding_idx=0)\ndeepimage = DeepImage(pretrained=True)\n\n# transforms\nmean = [0.406, 0.456, 0.485] # BGR\nstd = [0.225, 0.224, 0.229] # BGR\ntransforms1 = [ToTensor, Normalize(mean=mean, std=std)]\ntransforms2 = [Normalize(mean=mean, std=std)]\n\ndeephead_ds = nn.Sequential(nn.Linear(16, 8), nn.Linear(8, 4))\ndeephead_dt = nn.Sequential(nn.Linear(64, 8), nn.Linear(8, 4))\ndeephead_di = nn.Sequential(nn.Linear(512, 8), nn.Linear(8, 4))\n\n# #############################################################################\n# Test many possible scenarios of data inputs I can think off. Surely users\n# will input something unexpected\n# #############################################################################\n\n\n@pytest.mark.parametrize(\n \"X_wide, X_deep, X_text, X_img, X_train, X_val, target, val_split, transforms, nepoch, null\",\n [\n (X_wide, X_deep, X_text, X_img, None, None, target, None, transforms1, 0, None),\n (X_wide, X_deep, X_text, X_img, None, None, target, None, transforms2, 0, None),\n (X_wide, X_deep, X_text, X_img, None, None, target, None, None, 0, None),\n (\n X_wide,\n X_deep,\n X_text,\n X_img_norm,\n None,\n None,\n target,\n None,\n transforms2,\n 0,\n None,\n ),\n (\n X_wide,\n X_deep,\n X_text,\n X_img_norm,\n None,\n None,\n target,\n None,\n transforms1,\n 0,\n None,\n ),\n (X_wide, X_deep, X_text, X_img_norm, None, None, target, None, None, 0, None),\n (X_wide, X_deep, X_text, X_img, None, None, target, 0.2, None, 0, None),\n (\n None,\n None,\n None,\n None,\n {\n \"X_wide\": X_wide,\n \"X_deep\": X_deep,\n \"X_text\": X_text,\n \"X_img\": X_img,\n \"target\": target,\n },\n None,\n None,\n None,\n None,\n 0,\n None,\n ),\n (\n None,\n None,\n None,\n None,\n {\n \"X_wide\": X_wide,\n \"X_deep\": X_deep,\n \"X_text\": X_text,\n \"X_img\": X_img,\n \"target\": target,\n },\n None,\n None,\n None,\n transforms1,\n 0,\n None,\n ),\n (\n None,\n None,\n None,\n None,\n {\n \"X_wide\": X_wide,\n \"X_deep\": X_deep,\n \"X_text\": X_text,\n \"X_img\": X_img,\n \"target\": target,\n },\n None,\n None,\n 0.2,\n None,\n 0,\n None,\n ),\n (\n None,\n None,\n None,\n None,\n {\n \"X_wide\": X_wide,\n \"X_deep\": X_deep,\n \"X_text\": X_text,\n \"X_img\": X_img,\n \"target\": target,\n },\n None,\n None,\n 0.2,\n transforms2,\n 0,\n None,\n ),\n (\n None,\n None,\n None,\n None,\n {\n \"X_wide\": X_wide_tr,\n \"X_deep\": X_deep_tr,\n \"X_text\": X_text_tr,\n \"X_img\": X_img_tr,\n \"target\": y_train,\n },\n {\n \"X_wide\": X_wide_val,\n \"X_deep\": X_deep_val,\n \"X_text\": X_text_val,\n \"X_img\": X_img_val,\n \"target\": y_val,\n },\n None,\n None,\n None,\n 0,\n None,\n ),\n (\n None,\n None,\n None,\n None,\n {\n \"X_wide\": X_wide_tr,\n \"X_deep\": X_deep_tr,\n \"X_text\": X_text_tr,\n \"X_img\": X_img_tr,\n \"target\": y_train,\n },\n {\n \"X_wide\": X_wide_val,\n \"X_deep\": X_deep_val,\n \"X_text\": X_text_val,\n \"X_img\": X_img_val,\n \"target\": y_val,\n },\n None,\n None,\n transforms1,\n 0,\n None,\n ),\n ],\n)\ndef test_widedeep_inputs(\n X_wide,\n X_deep,\n X_text,\n X_img,\n X_train,\n X_val,\n target,\n val_split,\n transforms,\n nepoch,\n null,\n):\n model = WideDeep(\n wide=wide, deepdense=deepdense, deeptext=deeptext, deepimage=deepimage\n )\n model.compile(method=\"binary\", transforms=transforms, verbose=0)\n model.fit(\n X_wide=X_wide,\n X_deep=X_deep,\n X_text=X_text,\n X_img=X_img,\n X_train=X_train,\n X_val=X_val,\n target=target,\n val_split=val_split,\n batch_size=16,\n )\n assert (\n model.history.epoch[0] == nepoch\n and model.history._history[\"train_loss\"] is not null\n )\n\n\n@pytest.mark.parametrize(\n \"X_wide, X_deep, X_text, X_img, X_train, X_val, target\",\n [\n (\n X_wide,\n X_deep,\n X_text,\n X_img,\n None,\n {\n \"X_wide\": X_wide_val,\n \"X_deep\": X_deep_val,\n \"X_text\": X_text_val,\n \"X_img\": X_img_val,\n \"target\": y_val,\n },\n target,\n ),\n ],\n)\ndef test_xtrain_xval_assertion(\n X_wide,\n X_deep,\n X_text,\n X_img,\n X_train,\n X_val,\n target,\n):\n model = WideDeep(\n wide=wide, deepdense=deepdense, deeptext=deeptext, deepimage=deepimage\n )\n model.compile(method=\"binary\", verbose=0)\n with pytest.raises(AssertionError):\n model.fit(\n X_wide=X_wide,\n X_deep=X_deep,\n X_text=X_text,\n X_img=X_img,\n X_train=X_train,\n X_val=X_val,\n target=target,\n batch_size=16,\n )\n\n\n@pytest.mark.parametrize(\n \"wide, deepdense, deeptext, deepimage, X_wide, X_deep, X_text, X_img, target\",\n [\n (wide, None, None, None, X_wide, None, None, None, target),\n (None, deepdense, None, None, None, X_deep, None, None, target),\n (None, None, deeptext, None, None, None, X_text, None, target),\n (None, None, None, deepimage, None, None, None, X_img, target),\n ],\n)\ndef test_individual_inputs(\n wide, deepdense, deeptext, deepimage, X_wide, X_deep, X_text, X_img, target\n):\n model = WideDeep(\n wide=wide, deepdense=deepdense, deeptext=deeptext, deepimage=deepimage\n )\n model.compile(method=\"binary\", verbose=0)\n model.fit(\n X_wide=X_wide,\n X_deep=X_deep,\n X_text=X_text,\n X_img=X_img,\n target=target,\n batch_size=16,\n )\n # check it has run succesfully\n assert len(model.history._history) == 1\n\n\n###############################################################################\n#  test deephead is not None and individual components\n###############################################################################\n\n\n@pytest.mark.parametrize(\n \"deepdense, deeptext, deepimage, X_deep, X_text, X_img, deephead, target\",\n [\n (deepdense, None, None, X_deep, None, None, deephead_ds, target),\n (None, deeptext, None, None, X_text, None, deephead_dt, target),\n (None, None, deepimage, None, None, X_img, deephead_di, target),\n ],\n)\ndef test_deephead_individual_components(\n deepdense, deeptext, deepimage, X_deep, X_text, X_img, deephead, target\n):\n model = WideDeep(\n deepdense=deepdense, deeptext=deeptext, deepimage=deepimage, deephead=deephead\n ) # noqa: F841\n model.compile(method=\"binary\", verbose=0)\n model.fit(\n X_wide=X_wide,\n X_deep=X_deep,\n X_text=X_text,\n X_img=X_img,\n target=target,\n batch_size=16,\n )\n # check it has run succesfully\n assert len(model.history._history) == 1\n\n\n###############################################################################\n#  test deephead is None and head_layers is not None and individual components\n###############################################################################\n\n\n@pytest.mark.parametrize(\n \"deepdense, deeptext, deepimage, X_deep, X_text, X_img, target\",\n [\n (deepdense, None, None, X_deep, None, None, target),\n (None, deeptext, None, None, X_text, None, target),\n (None, None, deepimage, None, None, X_img, target),\n ],\n)\ndef test_head_layers_individual_components(\n deepdense, deeptext, deepimage, X_deep, X_text, X_img, target\n):\n model = WideDeep(\n deepdense=deepdense, deeptext=deeptext, deepimage=deepimage, head_layers=[8, 4]\n ) # noqa: F841\n model.compile(method=\"binary\", verbose=0)\n model.fit(\n X_wide=X_wide,\n X_deep=X_deep,\n X_text=X_text,\n X_img=X_img,\n target=target,\n batch_size=16,\n )\n # check it has run succesfully\n assert len(model.history._history) == 1\n","sub_path":"tests/test_model_functioning/test_data_inputs.py","file_name":"test_data_inputs.py","file_ext":"py","file_size_in_byte":10841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"484009000","text":"# reference url : https://www.analyticsvidhya.com/blog/2019/01/guide-pytorch-neural-networks-case-studies/\r\n\r\nimport torch\r\nn_input, n_hidden, n_output = 5, 3, 1\r\nprint(n_input, n_hidden, n_output)\r\n\r\n## initialize tensor for inputs, and outputs \r\nx = torch.randn((1, n_input))\r\ny = torch.randn((1, n_output))\r\nprint(x)\r\nprint(y)\r\n\r\n## initialize tensor variables for weights \r\nw1 = torch.randn(n_input, n_hidden) # weight for hidden layer\r\nw2 = torch.randn(n_hidden, n_output) # weight for output layer\r\nprint(w1)\r\nprint(w2)\r\n\r\n## initialize tensor variables for bias terms \r\nb1 = torch.randn((1, n_hidden)) # bias for hidden layer\r\nb2 = torch.randn((1, n_output)) # bias for output layer\r\nprint(b1)\r\nprint(b2)\r\n\r\n## sigmoid activation function using pytorch\r\ndef sigmoid_activation(z):\r\n return 1 / (1 + torch.exp(-z))\r\n## activation of hidden layer \r\nz1 = torch.mm(x, w1) + b1\r\na1 = sigmoid_activation(z1)\r\n## activation (output) of final layer \r\nz2 = torch.mm(a1, w2) + b2\r\noutput = sigmoid_activation(z2)\r\n\r\nloss = y - output\r\n\r\n## function to calculate the derivative of activation\r\ndef sigmoid_delta(x):\r\n return x * (1 - x)\r\n\r\n## compute derivative of error terms\r\ndelta_output = sigmoid_delta(output)\r\ndelta_hidden = sigmoid_delta(a1)\r\n## backpass the changes to previous layers \r\nd_outp = loss * delta_output\r\nloss_h = torch.mm(d_outp, w2.t())\r\nd_hidn = loss_h * delta_hidden\r\n\r\nlearning_rate = 0.1\r\n\r\nw2 += torch.mm(a1.t(), d_outp) * learning_rate\r\nw1 += torch.mm(x.t(), d_hidn) * learning_rate\r\nb2 += d_outp.sum() * learning_rate\r\nb1 += d_hidn.sum() * learning_rate\r\n\r\n#-----------------------------------------------------------\r\nfrom torchvision import transforms\r\n\r\n_tasks = transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.5,), (0.5,))\r\n ])\r\n \r\nfrom torchvision.datasets import MNIST\r\n\r\n## Load MNIST Dataset and apply transformations\r\nmnist = MNIST(\"data\", download=True, train=True, transform=_tasks)\r\n\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.utils.data.sampler import SubsetRandomSampler\r\n## create training and validation split \r\nsplit = int(0.8 * len(mnist))\r\nindex_list = list(range(len(mnist)))\r\ntrain_idx, valid_idx = index_list[:split], index_list[split:]\r\n\r\n#print(train_idx)\r\n## create sampler objects using SubsetRandomSampler\r\ntr_sampler = SubsetRandomSampler(train_idx)\r\nval_sampler = SubsetRandomSampler(valid_idx)\r\n#print(tr_sampler)\r\n## create iterator objects for train and valid datasets\r\ntrainloader = DataLoader(mnist, batch_size=256, sampler=tr_sampler)\r\nvalidloader = DataLoader(mnist, batch_size=256, sampler=val_sampler)\r\n#print(trainloader)\r\n\r\nimport torch.nn.functional as F\r\nimport torch.nn as nn\r\n\r\nclass Model(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n self.hidden = nn.Linear(784, 128)\r\n self.output = nn.Linear(128, 10)\r\n def forward(self, x):\r\n x = self.hidden(x)\r\n x = F.sigmoid(x)\r\n x = self.output(x)\r\n return x\r\nmodel = Model()\r\n\r\nfrom torch import optim\r\nloss_function = nn.CrossEntropyLoss()\r\noptimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay= 1e-6, momentum = 0.9, nesterov = True)\r\n\r\n\r\nfor epoch in range(1, 11): ## run the model for 10 epochs\r\n train_loss, valid_loss = [], []\r\n ## training part \r\n model.train()\r\n for data, target in trainloader:\r\n optimizer.zero_grad()\r\n\r\n\r\n#-----------------------------------------------------------\r\nfrom torchvision import transforms\r\n## load the dataset \r\n_tasks = transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.5,), (0.5,))\r\n ])\r\nfrom torchvision.datasets import CIFAR10\r\ncifar = CIFAR10('data', train=True, download=True, transform=_tasks)\r\n## create training and validation split \r\nsplit = int(0.8 * len(cifar))\r\nindex_list = list(range(len(cifar)))\r\ntrain_idx, valid_idx = index_list[:split], index_list[split:]\r\n## create training and validation sampler objects\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.utils.data.sampler import SubsetRandomSampler\r\ntr_sampler = SubsetRandomSampler(train_idx)\r\nval_sampler = SubsetRandomSampler(valid_idx)\r\n## create iterator objects for train and valid datasets\r\ntrainloader = DataLoader(cifar, batch_size=512, sampler=tr_sampler)\r\nvalidloader = DataLoader(cifar, batch_size=512, sampler=val_sampler)\r\n\r\nimport torch.nn.functional as F\r\nimport torch.nn as nn\r\n\r\nclass Model(nn.Module):\r\n def __init__(self):\r\n super(Model, self).__init__()\r\n \r\n ## define the layers\r\n self.conv1 = nn.Conv2d(3, 16, 3, padding=1)\r\n self.conv2 = nn.Conv2d(16, 32, 3, padding=1)\r\n self.conv3 = nn.Conv2d(32, 64, 3, padding=1)\r\n self.pool = nn.MaxPool2d(2, 2)\r\n self.linear1 = nn.Linear(1024, 512)\r\n self.linear2 = nn.Linear(512, 10)\r\n \r\n def forward(self, x):\r\n x = self.pool(F.relu(self.conv1(x)))\r\n x = self.pool(F.relu(self.conv2(x)))\r\n x = self.pool(F.relu(self.conv3(x)))\r\n x = x.view(-1, 1024) ## reshaping \r\n x = F.relu(self.linear1(x))\r\n x = self.linear2(x)\r\n return x\r\n\r\nmodel = Model()\r\n\r\nimport torch.optim as optim\r\nloss_function = nn.CrossEntropyLoss()\r\noptimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay= 1e-6, momentum = 0.9, nesterov = True)\r\n## run for 30 Epochs\r\nfor epoch in range(1, 2):\r\n train_loss, valid_loss = [], []\r\n ## training part \r\n model.train()\r\n for data, target in trainloader:\r\n optimizer.zero_grad()\r\n output = model(data)\r\n loss = loss_function(output, target)\r\n loss.backward()\r\n optimizer.step()\r\n train_loss.append(loss.item()) \r\n \r\n ## evaluation part \r\n model.eval()\r\n for data, target in validloader:\r\n output = model(data)\r\n loss = loss_function(output, target)\r\n valid_loss.append(loss.item())\r\n print (\"Epoch:\", epoch)\r\n \r\n## dataloader for validation dataset \r\ndataiter = iter(validloader)\r\ndata, labels = dataiter.next()\r\noutput = model(data)\r\nprint(output)\r\noutput.shape\r\nimport numpy as np\r\n_, preds_tensor = torch.max(output, 1)\r\npreds = np.squeeze(preds_tensor.numpy())\r\nprint (\"Actual:\", labels)\r\nprint (\"Predicted:\", preds)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"pytorch_navdeep.py","file_name":"pytorch_navdeep.py","file_ext":"py","file_size_in_byte":6301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"156468630","text":"'''\nMODULE\n CommUtilFeeUI - This module prvoides Commitment fees details\n \nHISTORY\n Date Developer Notes\n 2017-10-09 Ntuthuko Matthews created\n'''\n\n\nfrom acm import FUxLayoutBuilder\n\n\ndef CommUtilFeePane(cls):\n b = FUxLayoutBuilder() \n b.BeginVertBox('None')\n b.AddSpace(5)\n b. BeginVertBox('None')\n b. BeginHorzBox('None')\n cls.txtTradeNumberCtrl.BuildLayoutPart(b, 'Trade Number')\n b. AddButton('btnInsertItems', '...', False, True)\n b. EndBox()\n cls.cboCalcFeeTypeCtrl.BuildLayoutPart(b, 'Calculate Fee Type*')\n b. AddCheckbox('chkCalcCommFeeCtrl', 'Calculate Comm Fee*')\n b. AddCheckbox('chkCalcUtilFeeCtrl', 'Calculate Utilization Fee*')\n b. AddCheckbox('chkLinkedCtrl', 'Credit Linked & Financial Covenant Linked*')\n cls.txtFacilityMaxCtrl.BuildLayoutPart(b, 'Facility Limit*')\n cls.txtThresholdCtrl.BuildLayoutPart(b, 'Threshold (0\")\n\tprint(\" pes-cpk-unpack [OPTIONS] --list\")\n\tprint(\"Options:\")\n\tprint(\" -r, --allow-replace Allow overwriting existing packed files\")\n\tprint(\" -d, --directory Unpack in directory \")\n\tprint(\" -l, --list List packed files\")\n\tprint(\" -h, --help Display this help\")\n\tsys.exit()\n\nallowOverwrite = False\ndirectory = None\nlistMode = False\ncpkFile = None\n\nindex = 1\nwhile index < len(sys.argv):\n\targ = sys.argv[index]\n\tindex += 1\n\tif arg in ['-r', '--allow-replace']:\n\t\tallowOverwrite = True\n\telif arg in ['-d', '--directory']:\n\t\tif index >= len(sys.argv):\n\t\t\tusage()\n\t\tif directory is not None:\n\t\t\tusage()\n\t\tdirectory = sys.argv[index]\n\t\tindex += 1\n\telif arg in ['-l', '--list']:\n\t\tlistMode = True\n\telif arg[0:1] == '-':\n\t\tusage()\n\telif cpkFile is None:\n\t\tcpkFile = arg\n\telse:\n\t\tusage()\n\nif cpkFile is None:\n\tusage()\n\nmain(cpkFile, listMode, allowOverwrite, directory)\n","sub_path":"tools/cpk/pes-cpk-unpack.py","file_name":"pes-cpk-unpack.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"302362710","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 14 14:21:02 2017\n\n@author: zhi\n\"\"\"\n\nfrom simple_detector import simple_detector\nimport pandas as pd\nimport matplotlib.image as mpimg\n #%%\nif __name__ == '__main__':\n dataset_path = '/home/zhi/Downloads/data1/'\n dataset_info_path = '/home/zhi/Downloads/data1/img_test_data.csv'\n table = pd.read_csv(dataset_info_path)\n pos_count = 0\n neg_count = 0\n neg_index = []\n for image_name,value in zip(table.image_name,table.value):\n #print(image_name,value)\n image_path = dataset_path+image_name\n raw_image = mpimg.imread(image_path)\n result = simple_detector(raw_image)\n if result==value:\n pos_count+=1\n else:\n neg_index.append(image_path)\n neg_count+=1\n print(pos_count)\n print(neg_count)\n print(neg_index)\n ","sub_path":"evaluation_speed.py","file_name":"evaluation_speed.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"468051748","text":"class TinyMCESettings(object):\n \"\"\" This sets the settings for TinyMCE.\n \"\"\"\n spellchecker = False\n\n def __getattr__(self, name):\n return self.a[name]\n\n def __init__(self, static_url):\n self.static_url = static_url\n\n @property\n def default_config(self):\n SEP = \"|\"\n cj = lambda l: ','.join(l)\n\n plugins = cj([\n \"media\", \"tabfocus\", \"inlinepopups\", \"paste\", \"safari\",\n \"spellchecker\", \"table\", \"fullscreen\",\n ])\n\n theme_advanced_buttons1 = cj([\n \"bold\", \"italic\", \"underline\", \"formatselect\", \"styleselect\", SEP,\n \"bullist\", \"numlist\", \"hr\", \"outdent\", \"indent\", \"blockquote\", SEP,\n \"link\", \"unlink\", \"anchor\",\n ])\n\n theme_advanced_buttons2 = cj([\n \"table\", \"delete_table\", \"row_props\", \"cell_props\", \"delete_col\",\n \"col_after\", \"col_before\", \"delete_row\", \"row_after\", \"row_before\",\n \"split_cells\", \"merge_cells\", SEP, \"selectall\", \"pastetext\",\n \"pasteword\", \"charmap\", SEP, \"cleanup\", \"undo\", \"redo\", \"image\",\n SEP, \"fullscreen\", \"code\", \"removeformat\"\n ])\n\n theme_advanced_buttons3 = \"\"\n\n style_formats = [\n {'title': \"Lead\", 'block': 'p', 'classes': 'lead'},\n ]\n\n content_css = \"{}stylesheets/admin/tinymce.css\".format(self.static_url)\n\n return {\n 'theme': \"advanced\",\n 'skin': \"modern\",\n 'inlinepopups_skin': \"modern\",\n 'relative_urls': False,\n 'height': \"350px\",\n 'width': \"620px\",\n 'plugins': plugins,\n 'theme_advanced_blockformats': cj([\n 'p', 'h2', 'h3', 'h4', 'h5', 'h6',\n ]),\n 'theme_advanced_buttons1': theme_advanced_buttons1,\n 'theme_advanced_buttons2': theme_advanced_buttons2,\n 'theme_advanced_buttons3': theme_advanced_buttons3,\n 'theme_advanced_toolbar_location': \"top\",\n 'theme_advanced_toolbar_align': \"left\",\n 'theme_advanced_statusbar_location': \"bottom\",\n 'theme_advanced_resizing': True,\n 'theme_advanced_resize_horizontal': \"\",\n 'tab_focus': ':prev,:next',\n 'button_tile_map': True,\n 'paste_create_linebreaks': False,\n 'paste_create_paragraphs': True,\n 'paste_auto_cleanup_on_paste': True,\n 'paste_convert_middot_lists': True,\n 'paste_convert_headers_to_strong': False,\n 'paste_remove_spans': True,\n 'paste_remove_styles': True,\n 'paste_strip_class_attributes': \"all\",\n 'paste_use_dialog': False,\n 'content_css': content_css,\n 'style_formats': style_formats,\n 'valid_elements': (\n \"@[id|class|style|title|dir 0:\n return store_list\n else:\n return False\n\n\n","sub_path":"masldb.py","file_name":"masldb.py","file_ext":"py","file_size_in_byte":4852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"249152131","text":"from unittest import TestCase\nfrom unittest.mock import patch\nimport io\n\nfrom game import regen_health\n\n\nclass TestRegenHealth(TestCase):\n def test_full_health(self):\n actual = regen_health(20, 20)\n expected = 20\n\n self.assertEqual(actual, expected)\n\n @patch('sys.stdout', new_callable=io.StringIO)\n def test_full_amount_healed(self, mock_output):\n actual_health = regen_health(10, 20)\n actual_output = mock_output.getvalue()\n\n expected_health = 14\n expected_output = \"You repaired your ship by \\033[94m4\\033[0m points!\\n\"\n\n self.assertEqual(actual_health, expected_health)\n self.assertEqual(actual_output, expected_output)\n\n @patch('sys.stdout', new_callable=io.StringIO)\n def test_partial_amount_healed(self, mock_output):\n actual_health = regen_health(18, 20)\n actual_output = mock_output.getvalue()\n\n expected_health = 20\n expected_output = \"You repaired your ship \\033[94mcompletely\\033[0m!\\n\"\n\n self.assertEqual(actual_health, expected_health)\n self.assertEqual(actual_output, expected_output)\n","sub_path":"test_regen_health.py","file_name":"test_regen_health.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"432216910","text":"\"\"\" Implementation of Cosmic RIM estimator\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\nprint(physical_devices)\nassert len(physical_devices) > 0, \"Not enough GPU hardware devices available\"\nconfig = tf.config.experimental.set_memory_growth(physical_devices[0], True)\n\n\nimport numpy as np\nimport os, sys, argparse, time\nfrom scipy.interpolate import InterpolatedUnivariateSpline as iuspline\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\n\nfrom rim_utils import build_rim_parallel, myAdam\nfrom recon_models import Recon_DM\n\nimport flowpm\nfrom flowpm import linear_field, lpt_init, nbody, cic_paint\nfrom flowpm.utils import r2c3d, c2r3d\nsys.path.append('../../utils/')\nimport tools\n\n\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\n\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument('--nc', type=int, default=16, help='Grid size')\nparser.add_argument('--bs', type=float, default=100, help='Box Size')\nparser.add_argument('--nsteps', type=int, default=3, help='')\nparser.add_argument('--niter', type=int, default=200, help='Number of iterations/Max iterations')\nparser.add_argument('--lr', type=float, default=0.01, help='Learning rate')\nparser.add_argument('--optimizer', type=str, default='adam', help='Which optimizer to use')\nparser.add_argument('--batch_size', type=int, default=8, help='Batch size')\nparser.add_argument('--nsims', type=int, default=100, help='Number of simulations')\nparser.add_argument('--nbody', type=str2bool, default=True, help='Number of simulationss')\nparser.add_argument('--lpt_order', type=int, default=1, help='Order of LPT Initial conditions')\nparser.add_argument('--input_size', type=int, default=16, help='Input layer channel size')\nparser.add_argument('--cell_size', type=int, default=16, help='Cell channel size')\nparser.add_argument('--rim_iter', type=int, default=10, help='Optimization iteration')\nparser.add_argument('--epochs', type=int, default=10, help='Number of epochs')\nparser.add_argument('--suffix', type=str, default='', help='Suffix for folder pathname')\nparser.add_argument('--batch_in_epoch', type=int, default=20, help='Number of batches in epochs')\nparser.add_argument('--RR', type=int, default=2, help='number of annealing steps')\n\n\n\nargs = parser.parse_args()\n\n\nnc, bs = args.nc, args.bs\nniter = args.niter\noptimizer = args.optimizer\nlr = args.lr\na0, af, nsteps = 0.1, 1.0, args.nsteps\nstages = np.linspace(a0, af, nsteps, endpoint=True)\n#anneal = True\nif args.RR == 5: RRs = [4., 2., 1., 0.5, 0.]\nelif args.RR == 4: RRs = [2., 1., 0.5, 0.]\nelif args.RR == 2: RRs = [1., 0.]\nelse: RRs = [0.]\n\n#\nklin = np.loadtxt('../../data/Planck15_a1p00.txt').T[0]\nplin = np.loadtxt('../../data//Planck15_a1p00.txt').T[1]\nipklin = iuspline(klin, plin)\n# Compute necessary Fourier kernels \nkvec = tools.fftk((nc, nc, nc), boxsize=nc, symmetric=False)\nkmesh = (sum(k**2 for k in kvec)**0.5).astype(np.float32)\npriorwt = ipklin(kmesh)\n\n\n\n#RIM params\nparams = {}\nparams['input_size'] = args.input_size\nparams['cell_size'] = args.cell_size\nparams['strides'] = 2\nparams['middle_size'] = args.input_size // params['strides'] #lets divide by strides\nparams['cell_kernel_size'] = 5\nparams['input_kernel_size'] = 5\nparams['middle_kernel_size'] = 5\nparams['output_kernel_size'] = 5\nparams['rim_iter'] = args.rim_iter\nparams['input_activation'] = 'tanh'\nparams['output_activation'] = 'linear'\nparams['nc'] = nc\n\n\nadamiters, adamiters10 = max(10, params['rim_iter']), max(100, params['rim_iter']*10)\nadam = myAdam(adamiters)\nadam10 = myAdam(adamiters10)\nfid_recon = Recon_DM(nc, bs, a0=a0, af=af, nsteps=nsteps, nbody=args.nbody, lpt_order=args.lpt_order, anneal=True)\n\nsuffpath = '_parallel' + args.suffix\nif args.nbody: ofolder = './models/L%04d_N%03d_T%02d%s/'%(bs, nc, nsteps, suffpath)\nelse: ofolder = './models/L%04d_N%03d_LPT%d%s/'%(bs, nc, args.lpt_order, suffpath)\ntry: os.makedirs(ofolder)\nexcept Exception as e: print(e)\n\n\n\n\ndef get_data(nsims=args.nsims):\n #if args.nbody: dpath = '/project/projectdirs/m3058/chmodi/rim-data/L%04d_N%03d_T%02d/'%(bs, nc, nsteps)\n #else: dpath = '/project/projectdirs/m3058/chmodi/rim-data/L%04d_N%03d_LPT%d/'%(bs, nc, args.lpt_order)\n if args.nbody: dpath = '../../data/rim-data/L%04d_N%03d_T%02d/'%(bs, nc, nsteps)\n else: dpath = '../../data/rim-data/L%04d_N%03d_LPT%d/'%(bs, nc, args.lpt_order)\n alldata = np.array([np.load(dpath + '%04d.npy'%i) for i in range(nsims)]).astype(np.float32)\n traindata, testdata = alldata[:int(0.9*nsims)], alldata[int(0.9*nsims):]\n return traindata, testdata\n\n\n@tf.function()\ndef pm_data(dummy):\n print(\"PM graph\")\n linear = flowpm.linear_field(nc, bs, ipklin, batch_size=args.batch_size)\n if args.nbody:\n print('Nobdy sim')\n state = lpt_init(linear, a0=a0, order=args.lpt_order)\n final_state = nbody(state, stages, nc)\n else:\n print('ZA/2LPT sim')\n final_state = lpt_init(linear, a0=af, order=args.lpt_order)\n tfinal_field = cic_paint(tf.zeros_like(linear), final_state[0])\n return linear, tfinal_field\n\n@tf.function()\ndef pm_data_test(dummy):\n print(\"PM graph\")\n linear = flowpm.linear_field(nc, bs, ipklin, batch_size=1)\n if args.nbody:\n print('Nobdy sim')\n state = lpt_init(linear, a0=a0, order=args.lpt_order)\n final_state = nbody(state, stages, nc)\n else:\n print('ZA/2LPT sim')\n final_state = lpt_init(linear, a0=af, order=args.lpt_order)\n tfinal_field = cic_paint(tf.zeros_like(linear), final_state[0])\n return linear, tfinal_field\n\n\n@tf.function\ndef pm(linear):\n if args.nbody:\n print('Nobdy sim')\n state = lpt_init(linear, a0=a0, order=args.lpt_order)\n final_state = nbody(state, stages, nc)\n else:\n print('ZA/2LPT sim')\n final_state = lpt_init(linear, a0=af, order=args.lpt_order)\n tfinal_field = cic_paint(tf.zeros_like(linear), final_state[0])\n return tfinal_field\n\n\n\n@tf.function\ndef recon_dm(linear, data):\n\n print('new graph')\n final_field = pm(linear)\n residual = final_field - data #.astype(np.float32)\n chisq = tf.multiply(residual, residual)\n chisq = tf.reduce_mean(chisq) \n lineark = r2c3d(linear, norm=nc**3)\n priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))\n prior = tf.reduce_mean(tf.multiply(priormesh, 1/priorwt))\n loss = chisq + prior\n\n return loss, chisq, prior\n\n\n@tf.function\ndef recon_dm_grad(x, y):\n with tf.GradientTape() as tape:\n tape.watch(x)\n loss = recon_dm(x, y)[0]\n grad = tape.gradient(loss, x)\n return grad\n\n\n\n\ndef check_im(xx, x_init, pred, fname=None):\n fig, ax = plt.subplots(1, 3, figsize = (12, 4))\n vmin, vmax = xx.sum(axis=0).min(), xx.sum(axis=0).max()\n ax[0].imshow(xx.sum(axis=0), vmin=vmin, vmax=vmax)\n ax[0].set_title('Truth')\n ax[1].imshow(x_init.sum(axis=0), vmin=vmin, vmax=vmax)\n ax[1].set_title('initial point')\n ax[2].imshow(pred.sum(axis=0), vmin=vmin, vmax=vmax)\n ax[2].set_title('RIM recon')\n if fname is not None: plt.savefig(fname)\n else: plt.savefig(ofolder + 'rim-im.png')\n plt.close()\n\n\n\ndef check_2pt(xx, yy, rim, grad_fn, compares, nrim=10, fname=None):\n truemesh = [xx[0], yy[0]]\n rimpreds = []\n for it in range(nrim):\n x_init = np.random.normal(size=np.prod(xx.shape)).reshape(xx.shape).astype(np.float32)\n #x_init = (yy - (yy.max() - yy.min())/2.)/yy.std() + np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)\n pred = rim(tf.constant(x_init), tf.constant(yy), grad_fn)[-1]\n rimpreds.append([pred[0].numpy(), pm(pred)[0].numpy()])\n\n fig, ax = plt.subplots(1, 2, figsize=(9, 4), sharex=True)\n for ip, preds in enumerate(rimpreds):\n k, pks = tools.get_ps(preds, truemesh, bs)\n for i in range(2):\n lbl = None\n if ip == 0 and i == 0: lbl = 'Linear'\n if ip == 0 and i == 1: lbl = 'Final'\n ax[0].plot(k, pks[i][2]/(pks[i][0]*pks[i][1])**0.5, 'C%d-'%i, alpha=0.4, label=lbl)\n ax[1].plot(k, (pks[i][0]/pks[i][1])**0.5, 'C%d-'%i, alpha=0.4)\n\n lss = ['-', '--', ':', '-.']\n lws = [ 1, 1, 2, 2]\n lbls = ['Adam', 'Adam 10x', 'Best recon']\n #for ip, preds in enumerate([pred_adam, pred_adam10]):\n for ip, preds in enumerate(compares):\n k, pks = tools.get_ps(preds, truemesh, bs)\n for i in range(2):\n lbl = None\n if i == 0: lbl = lbls[ip]\n ax[0].plot(k, pks[i][2]/(pks[i][0]*pks[i][1])**0.5, 'C%d'%i, ls=lss[ip+1], lw=lws[ip+1])\n ax[1].plot(k, (pks[i][0]/pks[i][1])**0.5, 'C%d'%i, label=lbl, ls=lss[ip+1], lw=lws[ip+1])\n \n for axis in ax: \n axis.semilogx()\n axis.grid(which='both')\n axis.legend(fontsize=12)\n axis.set_xlabel('k(h/Mpc)', fontsize=12)\n ax[0].set_ylim(-0.1, 1.2)\n ax[1].set_ylim(-0.5, 2.0)\n ax[0].set_ylabel('$r_c$', fontsize=12)\n ax[1].set_ylabel('$t_f$', fontsize=12)\n plt.tight_layout()\n if fname is not None: plt.savefig(fname)\n else: plt.savefig('rim-2pt.png')\n plt.close()\n\n\n\n\n\n\ndef main():\n \"\"\"\n Model function for the CosmicRIM.\n \"\"\"\n\n rim = build_rim_parallel(params)\n grad_fn = recon_dm_grad\n #\n\n#\n train_dataset = tf.data.Dataset.range(args.batch_in_epoch)\n train_dataset = train_dataset.map(pm_data)\n train_dataset = train_dataset.prefetch(-1)\n test_dataset = tf.data.Dataset.range(1).map(pm_data_test).prefetch(-1)\n#\n #traindata, testdata = get_data()\n #idx = np.random.randint(0, traindata.shape[0], 1)\n #xx, yy = traindata[idx, 0].astype(np.float32), traindata[idx, 1].astype(np.float32), \n #x_init = np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)\n #x_pred = rim(x_init, yy, grad_fn)\n\n \n\n #\n # @tf.function\n def rim_train(x_true, x_init, y):\n with tf.GradientTape() as tape:\n x_pred = rim(x_init, y, grad_fn)\n res = (x_true - x_pred)\n loss = tf.reduce_mean(tf.square(res))\n gradients = tape.gradient(loss, rim.trainable_variables)\n return loss, gradients\n\n\n ##Train and save\n piter, testiter = 10, 20\n losses = []\n lrs = [0.001, 0.0005, 0.0001]\n lepochs = [2, 20, 20]\n trainiter = 0 \n x_test, y_test = None, None\n\n for il in range(3):\n print('Learning rate = %0.3e'%lrs[il])\n opt = tf.keras.optimizers.Adam(learning_rate=lrs[il])\n\n i = 0 \n for ie in range(lepochs[il]):\n\n start = time.time()\n for ii, ix in enumerate(train_dataset):\n xx, yy = ix\n x_init = np.random.normal(size=np.prod(xx.shape)).reshape(xx.shape).astype(np.float32)\n loss, gradients = rim_train(x_true=tf.constant(xx), \n x_init=tf.constant(x_init), \n y=tf.constant(yy))\n losses.append(loss.numpy()) \n opt.apply_gradients(zip(gradients, rim.trainable_variables))\n i+=1 \n trainiter +=1\n if ii > args.batch_in_epoch: break\n\n print(\"Time taken for %d iterations : \"%i, time.time() - start)\n print(\"Loss at iteration %d : \"%i, losses[-1])\n\n if i%testiter == 0: \n plt.plot(losses)\n plt.savefig(ofolder + 'losses.png')\n plt.close()\n\n if x_test is None:\n for x_test, y_test in test_dataset: \n print(\"shape of test set : \", x_test.shape)\n pred_adam = adam(tf.constant(x_init), tf.constant(y_test), grad_fn)\n pred_adam = [pred_adam[0].numpy(), pm(pred_adam)[0].numpy()]\n pred_adam10 = adam10(tf.constant(x_init), tf.constant(y_test), grad_fn)\n pred_adam10 = [pred_adam10[0].numpy(), pm(pred_adam10)[0].numpy()]\n minic, minfin = fid_recon.reconstruct(tf.constant(y_test), RRs=RRs, niter=adamiters10, lr=0.1)\n compares = [pred_adam, pred_adam10, [minic[0], minfin[0]]]\n x_test, y_test = x_test.numpy(), y_test.numpy()\n print('Test set generated')\n break\n\n \n x_init = np.random.normal(size=np.prod(x_test.shape)).reshape(x_test.shape).astype(np.float32)\n #x_init = (y_test - (y_test.max() - y_test.min())/2.)/y_test.std() + np.random.normal(size=x_test.size).reshape(x_test.shape).astype(np.float32)\n pred = rim(tf.constant(x_init), tf.constant(y_test), grad_fn)[-1]\n check_im(x_test[0], x_init[0], pred.numpy()[0], fname=ofolder + 'rim-im-%04d.png'%trainiter)\n check_2pt(x_test, y_test, rim, grad_fn, compares, fname=ofolder + 'rim-2pt-%04d.png'%trainiter)\n\n rim.save_weights(ofolder + '/%d'%trainiter)\n\n\nif __name__==\"__main__\":\n main()\n","sub_path":"code/rim/cosmic_rim_parallel2.py","file_name":"cosmic_rim_parallel2.py","file_ext":"py","file_size_in_byte":13510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"101341626","text":"\"\"\"\nPython makes performing file I/O simple. Take a look\nat how to read and write to files here:\n\nhttps://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files\n\"\"\"\n\n# Open up the \"foo.txt\" file (which already exists) for reading\n# Print all the contents of the file, then close the file\n\n# YOUR CODE HERE\nwith open('src/foo.txt') as f:\n read_data = f.read()\n print(read_data)\nf.closed\n# Open up a file called \"bar.txt\" (which doesn't exist yet) for\n# writing. Write three lines of arbitrary content to that file,\n# then close the file. Open up \"bar.txt\" and inspect it to make\n# sure that it contains what you expect it to contain\n\n# YOUR CODE HERE\nf = open(\"src/bar.txt\", \"w+\")\nf.write(\"Mr. and Mrs. Dursley, of number four, Privet Drive, \\nwere proud to say that they were perfectly normal, \\nthank you very much.\")\nf.close()\n\n# f = open(\"file\", \"w+\") creates and opens a new file by that file name\n# with open(\"file\") as f opens an already created file by that name","sub_path":"src/13_file_io.py","file_name":"13_file_io.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"271867695","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nPurpose : Speech recognition(Using Google API)\r\nCreated by : Sathiya Raj Subburayan\r\nCreated date : 4/21/2019\r\n\r\nOther comments : \r\n \r\nRequired packages/modules : conda install -c conda-forge speechrecognition\r\n\r\nModification history:\r\nDate Modified by Modifications\r\n\r\n\r\n\"\"\"\r\n\r\nimport speech_recognition as sr\r\n#import numpy as np\r\n\r\nr = sr.Recognizer()\r\nwith sr.Microphone() as source:\r\n print(\"Please say something....\")\r\n audio = r.listen(source)\r\ntry:\r\n print(\"You said: \" + r.recognize_google(audio))\r\nexcept Exception:\r\n print(\"Oops... Something went wrong\")","sub_path":"Speech_Recog_20190421.py","file_name":"Speech_Recog_20190421.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"347480437","text":"import math\n\ntxa, tya, txb, tyb, t, v = map(int, input().split())\nn = int(input())\nx = []\ny = []\n\nfor i in range(n):\n X, Y = map(int, input().split())\n x.append(X)\n y.append(Y)\n distance_from = math.sqrt((txa - X) ** 2 + (tya - Y) ** 2)\n distance_to = math.sqrt((txb - X) ** 2 + (tyb - Y) ** 2)\n if distance_from + distance_to <= t * v:\n print('YES')\n exit(0)\n\nprint('NO')\n","sub_path":"python/abc/010/c/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"98704821","text":"import excel_to_text\r\nimport excel_to_table\r\n\r\ndef enter_choice():\r\n optional = ('1', '2')\r\n while True:\r\n try:\r\n print(\"功能选择:\\n1、以文本形式导出批量word\\n2、以表格形式导出批量word\\n请选择:(1/2):\")\r\n choice = input()\r\n if choice not in optional:\r\n raise ValueError('请输入\"1\"或\"2\"')\r\n break\r\n except Exception as err:\r\n print(err)\r\n return choice\r\n\r\ndef main():\r\n choice = enter_choice()\r\n if choice == '1':\r\n while True:\r\n print(\"请输入要处理的excel文件绝对路径:\")\r\n file_path = input()\r\n print(\"请输入模板文件绝对路径:\")\r\n formatpath = input()\r\n try:\r\n excel_to_text.main(file_path,formatpath)\r\n break\r\n except Exception as err:\r\n print(err)\r\n print(\"请检查文件路径是否正确\")\r\n elif choice == '2':\r\n while True:\r\n print(\"请输入要处理的excel文件绝对路径:\")\r\n file_path = input()\r\n try:\r\n excel_to_table.main(file_path)\r\n break\r\n except Exception as err:\r\n print(err)\r\n print(\"请检查文件路径是否正确\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n choice = enter_choice()\r\n file_path = '学生信息表.xlsx' # 要处理的excel文件路径\r\n if choice == '1':\r\n formatpath = 'template_text.docx'\r\n excel_to_text.main(file_path,formatpath)\r\n elif choice == '2':\r\n excel_to_table.main(file_path)\r\n\r\n\r\n\r\n\r\n","sub_path":"CoolTurnCodes/ExcelToWord/excel_to_word_test.py","file_name":"excel_to_word_test.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"120066862","text":"import numpy as np \nimport matplotlib.pyplot as plt \nimport scipy.io as sio\nfrom scipy.stats.stats import pearsonr \n\n\ndef single_trial(Tau_1, Tau_2, Tau_3, Tau_5, a, Pi_1, Pi_2, Pi_3, z_thrshld, evkd_amp, z_init, isplot, fig_num, is_save, save_name):\n\ttimestep = 10000\n\tnoise_std_1 = 0.003 #standard deviation of noise \n\tnoise_std_2 = 0.003\n\tnoise_std_3 = 0.003\n\t#initialize the array with size: timestep*1 \n\te_1 = np.zeros((timestep,1),dtype = np.float32) \n\tx = np.zeros((timestep,1),dtype = np.float32)\n\tz = np.zeros((timestep,1),dtype = np.float32)\n\tu = np.zeros((timestep,1),dtype = np.float32)\n\te_2 = np.zeros((timestep,1),dtype = np.float32)\n\tv = np.zeros((timestep,1),dtype = np.float32)\n\tz[0] = z_init\n\tx[np.int64(.4*timestep):np.int64(.45*timestep)] = x[np.int64(.4*timestep):np.int64(.45*timestep)] + evkd_amp\n\t#use Euler method to calculate Ordinal Differential Equations\n\tis_reset_z = 0\n\tfor i in range(1,timestep):\t\n\n\t\t# define tau_4, I chose 3*x[i] because 3 makes results look good. You can try other values.\n\t\tTau_4 = a/(1+np.exp(x[i])) \n\n\t\t# before the integration of z reaches z_threshold\n\t\tif (np.sum(z[max(i-3*Tau_5, 0):i,:]))< z_thrshld and is_reset_z < 1:\n\t\t\t#if timestep is less than Tau_5, then x(t-Tau_5) = 0 in the original equations.\n\t\t\tif i < Tau_5:\n\t\t\t\tz[i,0] = z[i-1,0]*(1 - 1./Tau_4) + np.random.normal(0,noise_std_3) #Euler method\n\t\t\telse:\n\t\t\t\tz[i,0] = z[i-1,0]*(1 - 1./Tau_4) + 1./Tau_4*x[i-Tau_5,0] + np.random.normal(0,noise_std_3) #Euler method\n\t\t#after the integration of z reaches z_threshold, set z=0\n\t\telse:\n\t\t\tis_reset_z = 1\n\t\t\tz[i,0] = 0\n\n\t\te_1[i,0] = np.abs(x[i,0] - z[i,0])\n\t\tu[i,0] = (1 - 1./Tau_1)*u[i-1] + Pi_1*e_1[i-1,0]*1./Tau_1 + np.random.normal(0,noise_std_1) #Euler method\n\n\t\t#if timestep is less than Tau_2, then x(t- Tau_2) = 0 in the original equations.\n\t\tif i > Tau_2:\n\t\t\te_2[i,0] = u[i-Tau_2,0] \n\t\t\tv[i,0] = (1 - 1./Tau_3)*v[i-1] + Pi_2*e_2[i-1,0]*1./Tau_3 + Pi_3*z[i-1,0]*1./Tau_3+np.random.normal(0,noise_std_2) #Euler method\n\t\telse:\n\t\t\tv[i,0] = (1 - 1./Tau_3)*v[i-1] + Pi_3*z[i-1,0]*1./Tau_3 + np.random.normal(0,noise_std_2) #Euler method\n\n\t#show the withdraw timepoint\n\tj = np.max(np.where(z>0))\n\tprint('Paw withdraw at timestep: '+str(j))\n\n\tif isplot > 0:\n\t\tt = (np.arange(timestep)-j)/1000.\n\t\tplt.figure(fig_num)\n\t\tplt.plot(t,u,c = 'b',label = 'u')\n\t\tplt.plot(t,v,c = 'r',label = 'v')\n\t\tplt.plot(t,z,c = 'g',label = 'z')\n\t\tplt.plot(t,x, c= 'm',label = 'x')\n\t\tplt.legend()\n\tif is_save > 0:\n\t\tsio.savemat(save_name, {'u':u, 'v':v, 'z':z, 'x':x})\n\n\treturn sum(u[1:j])/j, sum(v[j:-1])/(timestep-j), j","sub_path":"pred_coding.py","file_name":"pred_coding.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"213744985","text":"import sys\n\ndef duplicates( values ):\n for i in values:\n count=0\n for j in values:\n if(i==j):\n count+=1\n if(count>1):\n return True\n\n return False\n\ndef main():\n # === print user instructions ======\n print( \"Enter a series of numbers, one at a time.\" )\n\n # === get user input ======\n inputs = []\n for i in range(0,5):\n num=input(\"\")\n inputs.append(num)\n\n # === confirm to the user what they entered ======\n print( \"You entered :\", ', '.join( inputs ) )\n if(duplicates(inputs)==True):\n print(\"There are duplicates\")\n else:\n print(\"There are no duplicates\")\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"lab_duplicates.py","file_name":"lab_duplicates.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"130552403","text":"#program to compute grades using a computegrade function\ndef computegrade(score):\n if score > 1.0:\n return 'Bad score'\n elif score >= 0.9:\n return 'A'\n elif score >= 0.8:\n return 'B'\n elif score >= 0.7:\n return 'C'\n elif score >= 0.6:\n return 'D'\n elif score < 0.6:\n return 'F'\n\n\ntry:\n score = input('Enter score: ')\n score = float(score)\n grade = computegrade(score)\n print(grade)\n\nexcept Exception:\n print('Bad score')\n","sub_path":"src/chapter 4/exercise7.py","file_name":"exercise7.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"479967241","text":"from enum import Enum\n\nclass OrbitalRatio(Enum):\n EARTH = 1\n MERCURY = 0.2408467\n VENUS = 0.61519726\n MARS = 1.8808158\n JUPITER = 11.862615\n SATURN = 29.447498\n URANUS = 84.016846\n NEPTUNE = 164.79132\n\nclass SpaceAge(object):\n SECONDS_IN_YEAR = 31557600\n\n def __init__(self, seconds):\n self.seconds = seconds\n for name, member in OrbitalRatio.__members__.items():\n _method = self._make_method(member.value)\n setattr(self, \"on_\" + name.lower(), _method)\n\n def _make_method(self, value):\n def _method():\n return round(self.seconds / self.SECONDS_IN_YEAR / value, 2)\n return _method\n","sub_path":"python/space-age/space_age.py","file_name":"space_age.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"481282250","text":"from pygametk.actions.helpers import Call\nfrom pygametk.dialog import DialogResponse, DialogStatus, WidgetDialog\nfrom pygametk.geometry import Size, EXPAND\nfrom pygametk.input import KeyBinding\nfrom pygametk.widgets.geometry import VSpace, HSpace, VExpand\nfrom pygametk.widgets.layout import LayoutGridBox, LayoutVBox, MarginLayout\n\n\nclass MpdSearchDialog(WidgetDialog):\n\n def __init__(self, theme):\n super().__init__(theme[\"bg\"])\n self.theme = theme\n\n def _make_entry_box(self, theme, item_height):\n entries = {}\n entry_box = LayoutGridBox(3)\n\n width, height = self.surface.get_size()\n\n for term in [\"Any\", \"Artist\", \"Album\", \"Title\"]:\n label = self._make_label(term + \":\", item_height)\n hspace = HSpace(0.005 * width)\n entry = self._make_entry(\"\", item_height)\n entry_box.append_row_with_widgets([label, hspace, entry])\n\n vspace = VSpace(0.05 * height)\n entry_box.append_row_with_widgets([vspace])\n\n entries[term] = entry\n\n return (entry_box, entries)\n\n def _get_search_args(self, entries):\n args = []\n\n for k,v in {k.lower(): v.text for k,v in entries.items() if v.text}.items():\n args.append(k)\n args.append(v)\n return args\n\n def _search_button_activated(self):\n self._finish_dialog(DialogResponse(DialogStatus.OK, self._get_search_args(self.entries)))\n\n def _init_root_widget(self):\n hmargin, vmargin = 0.05, 0.05\n\n width, height = self.surface.get_size()\n\n theme = self.theme\n\n item_height = int(0.045 * height)\n\n button_size = Size(int(0.08 * width), int(0.055 * height))\n grid, entries = self._make_entry_box(theme, item_height)\n search_button = self._make_button(\"Search\", item_height, button_size,\n EXPAND, EXPAND)\n search_button.connect(\"activated\", self._search_button_activated)\n\n grid.append_row_with_widgets([VSpace(0), VSpace(0), search_button])\n\n vbox = LayoutVBox([VExpand(), grid, VExpand()])\n\n margin = MarginLayout(hmargin, hmargin, vmargin, vmargin)\n margin.add(vbox)\n margin.show_all()\n\n self._set_root_widget(margin)\n self.entries = entries\n\n entries[\"Any\"].focus()\n entries[\"Any\"].set_textmode()\n\n def activate():\n search_button.activate()\n\n self.key_handler.add_binding(KeyBinding(\"s\", Call(activate)))\n","sub_path":"mpd/dialogs/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"334246658","text":"\"\"\"\nFile: testbandw.py\nTests a function for converting a color image to\nblack and white.\n\"\"\"\n\nfrom images import Image\n\ndef blackAndWhite(image):\n \"\"\"Converts the argument image to black and white.\"\"\"\n blackPixel = (0, 0, 0)\n whitePixel = (255, 255, 255)\n for y in range(image.getHeight()):\n for x in range(image.getWidth()):\n (r, g, b) = image.getPixel(x, y)\n average = (r + g + b) / 3\n if average < 128:\n image.setPixel(x, y, blackPixel)\n else:\n image.setPixel(x, y, whitePixel)\n\ndef main(filename = \"smokey.gif\"):\n image = Image(filename)\n print(\"Close the image window to continue. \")\n image.draw()\n blackAndWhite(image)\n print(\"Close the image window to quit. \")\n image.draw()\n\nmain()\n","sub_path":"WA170 - Programming with Python/WA170_textbook_example_programs/Ch_07_Student_Files/testblackandwhite.py","file_name":"testblackandwhite.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"259853709","text":"import pygame\nfrom pygame.locals import *\nimport util\n\nclass Paleta(pygame.sprite.Sprite):\n def __init__(self, x, dir, width, height):\n pygame.sprite.Sprite.__init__(self)\n\n # Imagen de la paleta\n self.image = util.load_image(\"paleta.png\", dir, alpha=True)\n\n # Tamaño que ocupa\n self.rect = self.image.get_rect()\n\n # Se posiciona la paleta\n self.rect.centerx = x\n self.rect.centery = height / 2\n\n # Resto de propiedades necesarias\n self.points = 0\n self.width = width\n self.height = height\n\n def update(self):\n # Se controla que la paleta no salga de la pantalla\n if self.rect.bottom >= self.height:\n self.rect.bottom = self.height\n elif self.rect.top <= 0:\n self.rect.top = 0","sub_path":"paleta.py","file_name":"paleta.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"470987654","text":"'''\nCreated on 11.03.2015\n\n@author: Waetzold Plaum\n'''\nimport copy\nfrom PyQt4 import QtGui, QtCore\n\nfrom PyLinXData import * \nimport PX_Templates as PX_Templ\nfrom PyLinXGui import BEasyWidget\n\nclass PX_Dialogue_SelectDataViewer(QtGui.QDialog):\n \n def __init__(self, parent, variable, mainController, drawWidget):\n \n super(PX_Dialogue_SelectDataViewer, self).__init__(parent)\n \n self.mainController = mainController\n listSelectedDispObj = variable.get(u\"listSelectedDispObj\")\n layout = QtGui.QVBoxLayout(self)\n listDataDispObj = mainController.get(u\"listDataDispObj\")\n listSelectionDisp = list(set(listDataDispObj).intersection(set(listSelectedDispObj)))\n if len(listSelectionDisp ) > 0:\n listSelectionDisp.sort()\n \n idxLastSelectedDataViewer = self.mainController.get(u\"idxLastSelectedDataViewer\")\n \n init_list = []\n for item in listDataDispObj:\n dict_cache = {}\n dict_cache[u\"Name\"] = u\"bDataViewer_\" + str(item)\n dict_cache[u\"DisplayName\"] = u\"data viewer \" + str(item)\n dict_cache[u\"ValueType\"] = u\"bool\"\n if item in listSelectionDisp or \\\n ((item == idxLastSelectedDataViewer) and len(listSelectionDisp)== 0):\n value = True\n else:\n value = False\n dict_cache[u\"Value\"] = value\n init_list.append(dict_cache)\n \n\n dict_cache = {}\n dict_cache[u\"Name\"] = u\"bNewDataViewer\"\n dict_cache[u\"DisplayName\"] = u\"New data viewer\"\n dict_cache[u\"ValueType\"] = u\"bool\"\n\n if len(listSelectionDisp ) == 0 and idxLastSelectedDataViewer < 0:\n dict_cache[u\"Value\"] = True\n else:\n dict_cache[u\"Value\"] = False\n \n init_list.append(dict_cache)\n\n \n self.setLayout(layout)\n self.drawWidget = drawWidget\n self.variable = variable\n easyWidget = BEasyWidget.EasyWidget(init_list, True)\n self.layout().addWidget(easyWidget)\n self.formWidget = easyWidget\n\n # OK and Cancel buttons\n self.buttons = QtGui.QDialogButtonBox(\n QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel,\n QtCore.Qt.Horizontal, self)\n self.buttons.accepted.connect(self.on_accept)\n self.buttons.rejected.connect(self.on_reject)\n self.layout().addWidget(self.buttons)\n self.result = False\n \n def on_reject(self):\n self.hide()\n \n def on_accept(self):\n \n self.result = True\n values = self.formWidget.getValues()\n listSelectedDispObj_new = []\n idx = self.mainController.get(u\"idxLastSelectedDataViewer\")\n listSelectedDispObj = self.variable.get(u\"listSelectedDispObj\")\n \n for key in values:\n if u\"bDataViewer_\" in key:\n if values[key]:\n listSelectedDispObj_new.append(int(key[12:]))\n \n if values[u\"bNewDataViewer\"]:\n execStr = u\"new dataViewer 50 50\"\n newVarDispObj = self.mainController.execCommand(execStr)\n idx = newVarDispObj.get(u\"idxDataDispObj\")\n listSelectedDispObj_new.append(idx)\n \n execStr = u\"set ./\" + self.variable.get(u\"Name\") + u\".listSelectedDispObj \" +\\\n unicode(repr(listSelectedDispObj_new))\n self.mainController.execCommand(execStr)\n self.mainController.set(u\"idxLastSelectedDataViewer\", idx)\n self.hide()\n \n @staticmethod\n def getParams(parent, variable, mainController, drawWidget):\n dialog = PX_Dialogue_SelectDataViewer(parent, variable, mainController, drawWidget)\n result = dialog.exec_()\n drawWidget.repaint() \n return dialog.result\n ","sub_path":"PyLinXGui/PX_Dialogue_SelectDataViewer.py","file_name":"PX_Dialogue_SelectDataViewer.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"95403075","text":"import pymysql\r\nclass admission:\r\n def __init__(self):\r\n self.__servername=\"localhost\"\r\n self.__username=\"root\"\r\n self.__password=\"\"\r\n self.__dbname=\"student\"\r\n try:\r\n self.con=pymysql.connect(self.__servername,self.__username,self.__password,self.__dbname)\r\n print(\"\")\r\n except:\r\n print(\"\\t\\t\\tConnection Error\")\r\n def add(self):\r\n self.id=int(input(\"\\t\\t\\tEnter Student' ID : \"))\r\n self.roll=int(input(\"\\t\\t\\tEnter Student's ROLL NO. : \"))\r\n self.name=input(\"\\t\\t\\tEnter Student's NAME : \")\r\n self.std=input(\"\\t\\t\\tEnter Student's CLASS (FY/SY) : \")\r\n self.addr=input(\"\\t\\t\\tEnter Student's ADDRESS : \")\r\n self.pincode=int(input(\"\\t\\t\\tEnter PINCODE : \"))\r\n self.ph=int(input(\"\\t\\t\\tEnter Student's CONTACT : \"))\r\n self.fph=int(input(\"\\t\\t\\tEnter Father's CONTACT : \"))\r\n self.tot=60000\r\n print(\"\\t\\t\\tTotal Fees : \",self.tot)\r\n self.paidfee=int(input(\"\\t\\t\\tCurrent Paid Fees : \"))\r\n self.pendfee=self.tot-self.paidfee\r\n print(\"\\t\\t\\tPending Fees : \",self.pendfee)\r\n query=\"INSERT INTO studentadm(ID,ROLL,NAME,STD,ADDR,PINCODE,PHONE,FPHONE,TOTAL,PAIDFEE,PEFEE) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\r\n val=(self.id,self.roll,self.name,self.std,self.addr,self.pincode,self.ph,self.fph,self.tot,self.paidfee,self.pendfee)\r\n cur=self.con.cursor()\r\n try:\r\n cur.execute(query,val)\r\n except:\r\n print(\"\\t\\t\\tERROR IN QUERY \")\r\n else:\r\n self.con.commit()\r\n print(\"\\t\\t\\tRecord Insert Successfully \")\r\n print(\"\")\r\n def delete(self):\r\n self.id=int(input(\"\\t\\t\\tEnter Student's ID to DELTE : \"))\r\n query=\"DELETE FROM studentadm WHERE ID=%s\"\r\n cur=self.con.cursor()\r\n try:\r\n cur.execute(query,self.id)\r\n except:\r\n print(\"\\t\\t\\tRecord Not Found \")\r\n else:\r\n self.con.commit()\r\n print(\"\\t\\t\\tRecord Deleted Successfully \")\r\n print(\"\\t\\t\\t\")\r\n def update(self):\r\n self.id=int(input (\"\\t\\t\\tEnter Student's ID for UPDATE : \"))\r\n print(\"\\t\\t\\t1. Enter 1 for Update Roll Number \\n\\t\\t\\t2. Enter 2 for Update Name\\n\\t\\t\\t3. Enter 3 for Update Class \\n\\t\\t\\t4. Enter 4 for Update Address \\n\\t\\t\\t 5. Enter 5 for Update Contact\\n\\t\\t\\t6. Enter 6 for Update Father's Contact\\n\\t\\t\\t7.Enter 7 for Exit \")\r\n while(True):\r\n print(\"\\t\\t\\t=====================================\")\r\n ch=int(input(\"\\t\\t\\tEnter your Choice for Updation : \"))\r\n print(\"\\t\\t\\t=====================================\")\r\n if(ch==1):\r\n self.roll=int(input(\"\\t\\t\\tEnter New Roll Number :\"))\r\n query=\"update studentadm set ROLL=%s where ID=%s\"\r\n cur=self.con.cursor()\r\n val=(self.roll,self.id)\r\n try:\r\n cur.execute(query,val)\r\n self.con.commit()\r\n print(\"\\t\\t\\t -- New Roll Number is Updated -- \")\r\n except:\r\n print(\"\\t\\t\\t Oops ,Record Not Found !\")\r\n if(ch==2):\r\n self.name=input(\"\\t\\t\\tEnter New Student's Name : \")\r\n query=\"update studentadm set NAME=%s where ID=%s\"\r\n cur=self.con.cursor()\r\n val=(self.name,self.id)\r\n try:\r\n cur.execute(query,val)\r\n self.con.commit()\r\n print(\" \\t\\t\\t-- New Name is Updated --\") \r\n except:\r\n print(\" \\t\\t\\tOops ,Record Not Found !\")\r\n if(ch==3):\r\n self.cls=input(\"\\t\\t\\tEnter New Student's Class : \")\r\n query=\"update studentadm set STD=%s where ID=%s\"\r\n cur=self.con.cursor()\r\n val=(self.cls,self.id)\r\n try:\r\n cur.execute(query,val)\r\n self.con.commit()\r\n print(\" \\t\\t\\t-- New Class is Updated --\")\r\n except:\r\n print(\"\\t\\t\\t Oops ,Record Not Found !\")\r\n if(ch==4):\r\n self.adr=input(\"\\t\\t\\tEnter New Student's Address : \")\r\n query=\"update studentadm set ADDR=%s where ID=%s\"\r\n cur=self.con.cursor()\r\n val=(self.adr,self.id)\r\n try:\r\n cur.execute(query,val)\r\n self.con.commit()\r\n print(\"\\t\\t\\t -- New Address is Updated -- \")\r\n except:\r\n print(\"\\t\\t\\t Oops ,Record Not Found !\")\r\n if(ch==5):\r\n self.ph=int(input(\"\\t\\t\\tEnter New Student's Contact : \"))\r\n query=\"update studentadm set PHONE=%s where ID=%s\"\r\n cur=self.con.cursor()\r\n val=(self.ph,self.id)\r\n try:\r\n cur.execute(query,val)\r\n self.con.commit()\r\n print(\"\\t\\t\\t -- New Contact is Updated -- \")\r\n except:\r\n print(\"\\t\\t\\t Oops ,Record Not Found !\")\r\n if(ch==6):\r\n self.fph=int(input(\"\\t\\t\\tEnter New Father's Contact : \"))\r\n query=\"update studentadm set FPHONE=%s where ID=%s\"\r\n cur=self.con.cursor()\r\n val=(self.fph,self.id)\r\n try:\r\n cur.execute(query,val)\r\n self.con.commit()\r\n print(\"\\t\\t\\t -- New Contact is Updated -- \")\r\n except:\r\n print(\"\\t\\t\\t Oops ,Record Not Found !\")\r\n if(ch==7):\r\n print(\"\\t\\t\\t -- All Updation Done -- \")\r\n break\r\n def show(self):\r\n query=\"SELECT * FROM studentadm\"\r\n try:\r\n cur=self.con.cursor()\r\n cur.execute(query)\r\n except:\r\n print(\"\\t\\t\\tError in Query \")\r\n else:\r\n result=cur.fetchall()\r\n for row in result:\r\n print(\"\\t\\t\\t\",row)\r\n \r\n def __del__(self):\r\n self.con.close()\r\n print(\"\\t\\t\\t\\tGood Day\")\r\n print(\"\\t\\t\\t ******* Thank You ******* \")\r\n","sub_path":"Student Management/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":7277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"370806586","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom lxml import etree\n\n\nheader = { 'User-Agent' :'MMozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0'\n ,'content-type':\"application/json\"}\nr = requests.get(\"https://www.smzdm.com/\",headers=header)\nr.encoding = \"utf-8\"\nbs = BeautifulSoup(r.text,'lxml')\n\n#smzdm_produce = bs.find('ul', id='feed-main-list')\n\n\n#smzdm_produce = bs.xpath(\"body/div[@id='content']\")\n\nxml = etree.HTML(r.text)\nprint(xml)\nsmzdm_produce = xml.xpath(\"body/div[@id='content']/div/div[@id='feed-wrap']/div\"\n \"/div[@class='feed-main-con']/ul[@id='feed-main-list']\")\nul = smzdm_produce[0].xpath('./li')\nfor u in ul :\n web = u.xpath('./h5/a/@href')\n print(web)\n\n#lists = smzdm_produce.find_all(\"li\",\"\")\n#print(type(smzdm_produce))\n#print(ul)","sub_path":"test_request/test_request_smzdm.py","file_name":"test_request_smzdm.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"281084053","text":"from telegram import ReplyKeyboardMarkup\nfrom application.models import Mood\nfrom application.moodbot import messages\n\ndef get_mood_keyboard():\n buttons = []\n for rate, mood in reversed(Mood.get_available()).items():\n buttons.append([\n messages.MOOD_TEMPLATE.format(emoji=mood['emoji'], name=mood['name'], rate=rate)\n ])\n return ReplyKeyboardMarkup(buttons, resize_keyboard=True)\n","sub_path":"application/moodbot/keyboards.py","file_name":"keyboards.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"23636208","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# author: 王树根\n# email: wangshugen@ict.ac.cn\n# date: 2018-12-12 20:08\nimport subprocess\n\nfrom options import get_project_root\nfrom options import parse_convert_arguments\n\n\ndef main(args):\n if args.output_path is None:\n setattr(args, 'output_path', '{}.tok'.format(args.file_path))\n script_path = get_project_root()\n kwargs = {\n 'class_path': '{}/vendor/stanford-postagger-3.9.1.jar'.format(script_path),\n 'main_class': 'edu.stanford.nlp.process.PTBTokenizer',\n 'input': args.file_path,\n 'output': args.output_path,\n 'options': ' '.join([\n '-preserveLines',\n '-lowerCase'\n ])\n }\n cmd_str = 'java -cp {class_path} {main_class} {options} < {input} > {output}'.format(**kwargs)\n if args.verbose:\n print(cmd_str)\n subprocess.call(cmd_str, shell=True)\n\n\ndef cli_main():\n arguments = parse_convert_arguments()\n main(arguments)\n\n\nif __name__ == '__main__':\n cli_main()\n","sub_path":"tok.py","file_name":"tok.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"606309957","text":"class Wallet:\n\n def __init__(self, walletId,userId,type,balance,updatedTimestamp,status):\n self.walletId = id\n self.userId = userId\n self.type=type\n self.balance=balance\n self.updatedTimestamp=updatedTimestamp\n self.status=status\n\n\n def __repr__(self): \n return \"walletId:% s userId:% s type:% s balance:% s updatedTimestamp:% s status:% s\" % (self.walletId, self.userId,self.type,self.balance,self.updatedTimestamp,self.status) ","sub_path":"src/main/py/pozo/Wallet.py","file_name":"Wallet.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"106789612","text":"import os\nimport smtplib\nimport mimetypes\nimport optparse\nfrom email.message import EmailMessage\n\naws_ses_user = os.environ['AWS_EMAIL_USER']\naws_ses_pwd = os.environ['AWS_EMAIL_PWD']\n\ndef path_leaf(path):\n head, tail = os.path.split(path)\n return tail or os.path.basename(head)\n\npsr = optparse.OptionParser(usage=\"Usage: %prog [--exp=name] attachments\")\npsr.add_option(\"-e\", \"--exp\", action=\"store\", type=\"string\", dest=\"exp\", default=\"\",\n help=\"Specify name of the experiment.\")\n\nopts, args = psr.parse_args()\ntitle = \"untitled\"\nif (opts.exp != \"\"):\n title = opts.exp\n\ncontent = 'Experiment: {} is done.'.format(title)\n\nmsg = EmailMessage()\n\nfor filename in args:\n path = os.path.join('./', filename)\n if not os.path.isfile(path):\n content += '\\n\\rInfo: File \\\"{}\\\" not found, not attached.'.format(filename)\nmsg.set_content(content)\n\nmsg['Subject'] = 'Experiment Finished'\nmsg['From'] = 'AWS '\nmsg['To'] = 'hanx@g.harvard.edu'\n\nfor filename in args:\n path = os.path.join('./', filename)\n if os.path.isfile(path):\n ctype, encoding = mimetypes.guess_type(path)\n if ctype is None or encoding is not None:\n ctype = 'application/octet-stream'\n maintype, subtype = ctype.split('/', 1)\n with open(path, 'rb') as fp:\n msg.add_attachment(fp.read(),\n maintype=maintype,\n subtype=subtype,\n filename=path_leaf(path))\n\ns = smtplib.SMTP_SSL(host='email-smtp.us-east-1.amazonaws.com')\ns.login(aws_ses_user, aws_ses_pwd)\ns.send_message(msg)\ns.quit()\n","sub_path":"send_email.py","file_name":"send_email.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"520518914","text":"from pipeline.MyCNNLibrary import * #this is my own \"keras\" extension onto tensorflow\nfrom pipeline.Hyperparameters import Hyperparameters\nfrom pipeline.DatasetMaker_Universal import DatasetMaker_Universal\nfrom pipeline.DataParser_Universal import DataParser_Universal\nfrom housekeeping.csv_to_mat import ConfusionMatrixVisualizer\n\nDP = DataParser_Universal()\n\nname = \"Vanilla\"\n\nversion = \"AllDataCNN\" + Hyperparameters.MODE_OF_LEARNING\n\nweight_bias_list = list() #this is the weights and biases matrix\n\nbase_directory = \"../Graphs_and_Results/\" + name + \"/\" + version + \"/\"\ntry:\n os.mkdir(base_directory)\n print(\"made directory {}\".format(base_directory)) #this can only go one layer deep\nexcept:\n print(\"directory exists!\")\n pass\n\n\npool_size = (int(Hyperparameters.sizedict[Hyperparameters.MODE_OF_LEARNING]/4.0 + 0.99))**2 * 8\nclass Model():\n def __init__(self, DM):\n self.cnn_1 = Convolve(weight_bias_list, [3, 3, 1, 4], \"Layer_1_CNN\")\n self.cnn_2 = Convolve(weight_bias_list, [3, 3, 4, 4], \"Layer_2_CNN\")\n self.pool_1 = Pool()\n\n self.cnn_3 = Convolve(weight_bias_list, [3, 3, 4, 8], \"Layer_2_CNN\")\n self.pool_2 = Pool()\n\n self.flat = Flatten([-1, pool_size], \"Fully_Connected\")\n self.fc_1 = FC(weight_bias_list, [pool_size, DM.num_labels()], \"Layer_1_FC\")\n self.softmax = Softmax()\n\n def build_model_from_pickle(self, file_dir):\n big_list = unpickle(file_dir)\n #weights and biases are arranged alternating and in order of build\n self.cnn_1.build(from_file = True, weights = big_list[0:2])\n self.cnn_2.build(from_file = True, weights = big_list[2:4])\n self.cnn_3.build(from_file=True, weights=big_list[4:6])\n self.fc_1.build(from_file = True, weights = big_list[6:8])\n\n def build_model(self):\n self.cnn_1.build()\n self.cnn_2.build()\n self.cnn_3.build()\n self.fc_1.build()\n\n @tf.function\n def call(self, input):\n print(\"I am in calling {}\".format(np.shape(input)))\n x= self.cnn_1.call(input)\n l2 = self.cnn_1.l2loss()\n x = self.cnn_2.call(x)\n l2 += self.cnn_2.l2loss()\n x = self.pool_1.call(x)\n\n x = self.cnn_3.call(x)\n l2 += self.cnn_3.l2loss()\n x = self.pool_2.call(x)\n\n x = self.flat.call(x)\n x = self.fc_1.call(x)\n output = self.softmax.call(x)\n return output, l2\n\ndef Big_Train():\n logger = Logging(base_directory, 10, 20, 100) # makes logging object\n print(\"Is there a GPU available: \"),\n print(tf.test.is_gpu_available())\n print(\"*****************Training*****************\")\n\n print(\"loading dataset\")\n DM = DatasetMaker_Universal(DP, Hyperparameters.MODE_OF_LEARNING)\n\n optimizer = tf.keras.optimizers.Adam(learning_rate = Hyperparameters.LEARNING_RATE) #can use a changing learning rate\n loss_function = tf.keras.losses.CategoricalCrossentropy()\n\n\n summary_writer = tf.summary.create_file_writer(logdir=base_directory)\n print(\"starting training\")\n\n print(\"Making model\")\n model = Model(DM)\n try:\n semantic = input(\"restore model? (y,n)\")\n if semantic == \"y\":\n model.build_model_from_pickle(base_directory + \"SAVED_WEIGHTS.pkl\")\n else:\n model.build_model()\n except:\n model.build_model()\n\n\n tf.summary.trace_on(graph=True, profiler=False)\n\n\n for epoch in range(1001):\n data, label = DM.next_epoch_batch()\n\n\n with tf.GradientTape() as tape:\n predictions, l2_loss = model.call(data) #this is the big call\n\n pred_loss = loss_function(label, predictions) #this is the loss function\n pred_loss = pred_loss + Hyperparameters.L2WEIGHT * l2_loss #this implements lasso regularization\n\n if epoch == 0: #creates graph\n with summary_writer.as_default():\n tf.summary.trace_export(name=\"Graph\", step=0, profiler_outdir=base_directory)\n\n if epoch % 50 == 0: #takes care of validation accuracy\n valid_accuracy = Validation(model, DM)\n with summary_writer.as_default():\n logger.log_valid(valid_accuracy, epoch)\n\n with summary_writer.as_default(): #this is the big player logger and printout\n logger.log_train(epoch, predictions, label, pred_loss, l2_loss, weight_bias_list)\n\n gradients = tape.gradient(pred_loss, weight_bias_list)\n optimizer.apply_gradients(zip(gradients, weight_bias_list))\n\n Test_live(model, DM)\n\ndef Validation(model, datafeeder):\n print(\"\\n##############VALIDATION##############\\n\")\n\n data, label = datafeeder.valid_batch()\n\n predictions, l2loss = model.call(data)\n assert len(label) == len(predictions)\n valid_accuracy = accuracy(predictions, label)\n print(\"This is the validation set accuracy: {}\".format(valid_accuracy))\n return valid_accuracy\n\n\ndef Test_live(model, datafeeder):\n print(\"\\n##############TESTING##############\\n\")\n\n data, label = datafeeder.test_batch()\n\n predictions, l2loss = model.call(data)\n Logging.test_log(base_directory, predictions, label, \"\")\n\n print(\"This is the test set accuracy: {}\".format(accuracy(predictions, label)))\n right, wrong, wrong_index = record_error_with_labels(data, label, predictions)\n ConfusionMatrixVisualizer(name=name, version=version, testTag = \"\")\n return right, wrong, wrong_index\n\ndef Test():\n print(\"Making model\")\n testTag = \"_bigbedroom_only\"\n DM = DatasetMaker_Universal(DP, Hyperparameters.MODE_OF_LEARNING)\n model = Model(DM)\n model.build_model_from_pickle(base_directory + \"SAVED_WEIGHTS.pkl\")\n\n data, label = DM.test_batch()\n\n #data = data[0] # this is because we now have multiple images in the pickle\n predictions, l2loss = model.call(data)\n Logging.test_log(base_directory, predictions, label, testTag)\n\n print(\"This is the test set accuracy: {}\".format(accuracy(predictions, label)))\n ConfusionMatrixVisualizer(name = name, version = version, testTag = testTag)\n\n\ndef main():\n print(\"Starting the program!\")\n query = input(\"What mode do you want? Train (t) or Test from model (m)?\\n\")\n if query == \"t\":\n Big_Train()\n if query == \"m\":\n Test()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"models/BasicCNN.py","file_name":"BasicCNN.py","file_ext":"py","file_size_in_byte":6309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"455709527","text":"import telebot\r\nimport config\r\nimport translator\r\n\r\nimport database\r\nfrom telebot import types\r\n\r\n\r\nbot = telebot.TeleBot(config.BOT_TOKEN)\r\nusers_dict = database.server_start(config.FILE_WITH_USERS)\r\n\r\n\r\n@bot.message_handler(commands=['start'])\r\ndef welcome(message):\r\n global users_dict\r\n\r\n database.search_lang(users_dict, message.chat.id, config.FILE_WITH_USERS)\r\n\r\n bot.send_message(message.chat.id,\r\n \"Добро пожаловать!\"\r\n \"\\n\\nЯ - бот созданный переводить введеный вами текст, на один из 16 доступный языков.\"\r\n \"\\n\\nВсё что от вас нужно, выбрать язык, на который нужно перевести текст, а остальное я сделаю сам.\\n\"\r\n \"По умолчанию стоит английский язык.\"\r\n \"\\n\\n/lang - чтобы изменить языка\".format(\r\n message.from_user, bot.get_me()),\r\n parse_mode='html')\r\n\r\n\r\n@bot.message_handler(commands=['lang'])\r\ndef сhange_language(message):\r\n bot.delete_message(message.chat.id, message.message_id)\r\n markup = types.InlineKeyboardMarkup(row_width=4)\r\n item1 = types.InlineKeyboardButton(\"English\", callback_data='English en ')\r\n item2 = types.InlineKeyboardButton(\"Italian\", callback_data='Italian it')\r\n item3 = types.InlineKeyboardButton(\"Spanish\", callback_data='Spanish es')\r\n item4 = types.InlineKeyboardButton(\"Chinese\", callback_data='Chinese zh')\r\n item5 = types.InlineKeyboardButton(\"German\", callback_data='German de')\r\n item6 = types.InlineKeyboardButton(\r\n \"Norwegian\", callback_data='Norwegian no')\r\n item7 = types.InlineKeyboardButton(\"Polish\", callback_data='Polish pl')\r\n item8 = types.InlineKeyboardButton(\r\n \"Portuguese\", callback_data='Portuguese pt')\r\n item9 = types.InlineKeyboardButton(\"Russian\", callback_data='Russian ru')\r\n item10 = types.InlineKeyboardButton(\"Turkish\", callback_data='Turkish tr')\r\n item11 = types.InlineKeyboardButton(\r\n \"Ukrainian\", callback_data='Ukrainian uk')\r\n item12 = types.InlineKeyboardButton(\"French\", callback_data='French fr')\r\n item13 = types.InlineKeyboardButton(\"Czech\", callback_data='Czech cs')\r\n item14 = types.InlineKeyboardButton(\"Hindi\", callback_data='Hindi hi')\r\n item15 = types.InlineKeyboardButton(\r\n \"Esperanto\", callback_data='Esperanto eo')\r\n item16 = types.InlineKeyboardButton(\r\n \"Japanese\", callback_data='Japanese ja')\r\n markup.add(item1, item2, item3, item4, item5, item6, item7, item8,\r\n item9, item10, item11, item12, item13, item14, item15, item16)\r\n bot.send_message(message.chat.id,\r\n \"Выберите язык, сейчас установлен \" + database.search_lang(users_dict,\r\n message.chat.id,\r\n config.FILE_WITH_USERS)[0]\r\n + \":\".format(message.from_user, bot.get_me()),\r\n parse_mode='html', reply_markup=markup)\r\n\r\n\r\n@bot.message_handler(content_types=['text'])\r\ndef translat(message):\r\n bot.send_message(message.chat.id,\r\n translator.trans(message.text,\r\n users_dict.get(str(message.chat.id))[1]))\r\n\r\n\r\n@bot.callback_query_handler(func=lambda call: True)\r\ndef callback_inline(call):\r\n global users_dict\r\n database.update(str(call.message.chat.id),\r\n call.data, config.FILE_WITH_USERS)\r\n users_dict.update({str(call.message.chat.id): call.data.split()})\r\n bot.delete_message(call.message.chat.id, call.message.message_id)\r\n bot.send_message(call.message.chat.id,\r\n \"Вы выбрали \" + users_dict.get(str(call.message.chat.id))[0])\r\n\r\n\r\nprint(\"Чтобы коректно закончить работу сервера: CTRL+C и нажать ENTER\")\r\nbot.polling(none_stop=True)\r\n","sub_path":"bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"397858335","text":"import matplotlib\nmatplotlib.use('Qt5Agg')\n\nimport queue\nimport sys\n\nfrom matplotlib.animation import FuncAnimation\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sounddevice as sd\n\n\ndef int_or_str(text):\n \"\"\"Helper function for argument parsing.\"\"\"\n try:\n return int(text)\n except ValueError:\n return text\n\n\nchannels = [1] # input channels to plot\ndevice = 6 # input device (numeric ID or substring)\nwindow = 100 # visible time slot (ms)\ninterval = 30 # minimum time between plot updates (ms)\nblocksize = None # block size (in samples)\nsamplerate = None # sampling rate of audio device\ndownsample = 1 # display every Nth sample\n\nmapping = [c - 1 for c in channels] # Channel numbers start with 1\nq = queue.Queue()\n\n\ndef audio_callback(indata, frames, time, status):\n \"\"\"This is called (from a separate thread) for each audio block.\"\"\"\n if status:\n print(status, file=sys.stderr)\n # Fancy indexing with mapping creates a (necessary!) copy:\n q.put(indata[::downsample, mapping])\n\n\ndef update_plot(frame):\n \"\"\"This is called by matplotlib for each plot update.\n\n Typically, audio callbacks happen more frequently than plot updates,\n therefore the queue tends to contain multiple blocks of audio data.\n\n \"\"\"\n global plotdata\n while True:\n try:\n data = q.get_nowait()\n except queue.Empty:\n break\n shift = len(data)\n plotdata = np.roll(plotdata, -shift, axis=0)\n plotdata[-shift:, :] = data\n for column, line in enumerate(lines):\n line.set_ydata(plotdata[:, column])\n return lines\n\n\ntry:\n if samplerate is None:\n device_info = sd.query_devices(device, 'input')\n samplerate = device_info['default_samplerate']\n\n length = int(window * samplerate / (1000 * downsample))\n plotdata = np.zeros((length, len(channels)))\n\n fig, ax = plt.subplots()\n lines = ax.plot(plotdata)\n if len(channels) > 1:\n ax.legend(['channel {}'.format(c) for c in channels],\n loc='lower left', ncol=len(channels))\n ax.axis((0, len(plotdata), -1, 1))\n ax.set_yticks([0])\n ax.yaxis.grid(True)\n ax.tick_params(bottom=False, top=False, labelbottom=False,\n right=False, left=False, labelleft=False)\n fig.tight_layout(pad=0)\n\n stream = sd.InputStream(\n device=device, channels=max(channels),\n samplerate=samplerate, callback=audio_callback)\n ani = FuncAnimation(fig, update_plot, interval=interval, blit=True)\n with stream:\n plt.show()\nexcept Exception as e:\n print(type(e).__name__ + ': ' + str(e))\n","sub_path":"plotters/plottest.py","file_name":"plottest.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"291448952","text":"from flask import Flask, escape, request\nfrom flask_restful import Resource, Api\nfrom sqlalchemy import create_engine\nfrom json import dumps\nfrom flask_jsonpify import jsonify\nimport requests\nimport os\n\nfrom config import app_config\n\n\ndb_connect = create_engine('sqlite:///bravo.db')\n\n# config_name = os.getenv('FLASK_CONFIG')\n\nconfig_name = 'development'\n\napp = Flask(__name__)\napp.config.from_object(app_config[config_name])\napp.config.from_pyfile('config.py')\n\napi = Api(app)\n\n\n@app.route('/')\ndef index():\n name = request.args.get(\"name\", \"API\")\n return f'Bravo Currency Conversion , {escape(name)}!'\n\n\nclass UpdateTables(Resource):\n def get(self):\n response = requests.get('https://api.github.com')\n if response.status_code == 200:\n result = {\n \"data\": {\n \"message\": \"SUCESS\"\n }\n }\n elif response.status_code == 404:\n result = {\n \"data\": {\n \"message\": \"NOT FOUND\"\n }\n }\n return jsonify(result)\n\n\nclass CurrencyConverter(Resource):\n def get(self):\n conn = db_connect.connect()\n query = conn.execute(\"Select * from CurrencyConverter\")\n return {'Price': [i[0] for i in query.cursor.fetchall()]}\n\n\nclass CurrencyList(Resource):\n def get(self):\n conn = db_connect.connect()\n query = conn.execute(\"Select * from CurrencyList\")\n result = {'data': [dict(zip(tuple(query.keys()), i))\n for i in query.cursor]}\n return jsonify(result)\n\n\napi.add_resource(CurrencyConverter, '/price')\napi.add_resource(CurrencyList, '/list')\napi.add_resource(UpdateTables, '/update')\n\nif __name__ == '__main__':\n app.run(port='5052',\n debug=True)\n","sub_path":"python_api/bravoapi.py","file_name":"bravoapi.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"553204555","text":"#!/usr/bin/python3\n#--coding:utf-8--\n#--coding:utf-8--\nclass Restaurant():\n\n#初始化参数,定义变量\n def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type\n\n def describe_restaurant(self):\n print(\"Our restaurant's name is \" + self.restaurant_name.title() + \".\")\n print(\"And our restaurant's cuisine type is \" + self.cuisine_type + \".\\n\")\n\n def open_restaurant(self):\n print(\"Now our restaurant is opening.\")\n print(\"Welcome to \" + self.restaurant_name.title() +\".\\n\")\n\n#创建 Restaurant 类的实例(相当于给类传递实参后赋值给变量)\nrestaurant = [\n Restaurant(\"liuyishou\",\"chuanwei\"),\n Restaurant(\"laobeijing\",\"jingwei\"),\n Restaurant(\"long jiang zhu jiao\",\"yueshi\"),\n ]\n\n#调用 Restaurant 类中的方法\nfor rt in restaurant:\n rt.describe_restaurant()\n rt.open_restaurant()\n","sub_path":"ex19.py","file_name":"ex19.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"403015544","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 13 13:53:06 2017\n\n@author: rush\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport logging\nimport os \n\nfrom tqdm import tqdm as tqdm\nfrom sqlalchemy import create_engine\nfrom datetime import datetime as dt\n\nlogging.basicConfig(format='%(asctime)s %(message)s', \n handlers=[logging.FileHandler(\"etl_mta.log\"),\n logging.StreamHandler()], level=logging.DEBUG)\n\ndef convert_to_datetime(row):\n return pd.to_datetime(row['date']+\" \"+row['time'], format=\"%m/%d/%Y %H:%M:%S\")\n\ndef return_key(x):\n return x[0] + x[1] + x[2]\n\ndef process_df(df):\n df.index = df.apply(convert_to_datetime, axis=1)\n \n df['a_key'] = df[['C_A','UNIT','SCP']].apply(return_key, axis = 1)\n \n if not os.path.isfile('MTA_q3.csv'):\n df.to_csv('MTA_q3.csv',index_label='timestamp')\n else:\n df.to_csv('MTA_q3.csv',mode = 'a', index_label='timestamp', header=None)\n \n\nchunk_size = 20000\nj = 0\n\n\nquarter_3_range = [str(item.date()) for item in pd.date_range(start='07-01-2013', \n end='09-30-2013')]\n\n# # Takes 588 seconds: completed 2855479 (2860000) rows\n# start = dt.now()\n# for df in pd.read_csv('Final_MTA_data.txt', chunksize=chunk_size, iterator=True, encoding='utf-8'):\n# process_df(df)\n\n# j+=1\n# print('{} seconds: completed {} rows'.format((dt.now() - start).seconds, j*chunk_size))\n\n\ndata = pd.read_csv('MTA_q3.csv')\n \ndata['timestamp'] = pd.to_datetime(data['timestamp'])\n\ndata.index = data['timestamp']\n\ndata.drop('timestamp', axis =1, inplace=True)\n\ndef process_tallies(lst_days):\n for day in lst_days:\n logging.info(\"Processing date : {}\".format(day))\n process_day(data, day)\n\n\ndef process_day(data, day):\n df = data[day]\n\n for key in tqdm(df['a_key'].unique()):\n \n observations = df[df['a_key'] == key].copy()\n \n observations['val_entries'] = observations.entries - observations.entries.shift(1)\n \n observations['val_exits'] = observations.exits - observations.exits.shift(1)\n \n mask = (observations['val_entries'] <= 0) & (observations['val_exits'] <= 0) | \\\n ( (observations['val_entries'] >= 5000) & (observations['val_exits'] >= 5000))\n \n observations = observations[mask]\n observations.drop(['entries','exits','date','time'], inplace = True, axis = 1)\n \n if not os.path.isfile('messy_data/{}.csv'.format(day)):\n observations.to_csv('messy_data/{}.csv'.format(day),index_label='timestamp')\n else:\n observations.to_csv('messy_data/{}.csv'.format(day),mode = 'a', index_label='timestamp', header=None)\n\n\ndef chunks(L, n):\n \"\"\" Yield successive n-sized chunks from L.\n \"\"\"\n for i in range(0, len(L), n):\n yield L[i:i+n]\n\ngenerate_dates = chunks(quarter_3_range,23)\n\nimport multiprocessing as mp\n\nprocesses = [mp.Process(target=process_tallies, args=(x,)) for x in generate_dates]\n\n\n# Run processes\nfor p in processes:\n p.start()\n\n# Exit the completed processes\nfor p in processes:\n p.join()\n\n\n# process_tallies(['2013-09-28','2013-09-29','2013-09-30'])?\n\n\n\n\n\n\n\n\n\n","sub_path":"etl_messy.py","file_name":"etl_messy.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"442339213","text":"import redis\n# import logging.handlers\nimport time\nimport multiprocessing as mp\nimport flask\nfrom flask_cors import CORS\n# from flask_wtf.csrf import CSRFProtect\n# from flask_cache import Cache\nimport twitter\nimport requests\nfrom requests.auth import HTTPBasicAuth\nimport json\nfrom raven.contrib.flask import Sentry\n\nfrom .config import *\n\n\n# Bridge Object ##\nclass Bridge(object):\n app = None\n services = []\n flask = None\n # csrf = None\n sentry = None\n # cache = None\n updater_request_status = 0\n\n def __init__(self):\n # Load Needed Flask Functions for Bridge Extensions\n self.flask = flask\n self.app = flask.Flask(__name__)\n self.sentry = Sentry(\n self.app,\n dsn='https://7b8c5989c8ce4e0e8a596864aecb630c:bd9a209aba8547c4a6689c6f4428ee13@sentry.io/198489')\n CORS(self.app)\n # CORS(\n # self.app,\n # resources={\n # r\"/get/*\": {\n # \"origins\": \"*\"}})\n # WTF_CSRF_SECRET_KEY = 'TOKEN'\n # self.csrf = CSRFProtect(self.app)\n # self.cache = Cache(self.app, config={'CACHE_TYPE': 'redis', 'CACHE_REDIS_HOST': REDIS_HOST})\n\n def apps(self):\n # @self.csrf.exempt\n @self.app.route(\"/update/trigger\")\n def updater():\n return self.updater()\n\n def start(self):\n self.apps()\n self.app.run(host=BRIDGE_HOST)\n\n def updater(self):\n logging.info('Updater Started')\n r = self._updater_request(UPDATER_URL)\n if r:\n logging.debug('Updater lst file received')\n self._updater_lst(r.json())\n\n def _updater_lst(self, lst):\n errors = []\n for fi, uri in list(lst.items()):\n r = self._updater_request(uri)\n if not r:\n errors.append((\n self.updater_request_status,\n fi,\n uri\n ))\n continue\n if not self._updater_json_write(fi, r.json()):\n errors.append((\n None,\n fi,\n r.json()\n ))\n logging.error(\n \"!!!THE FILE %s MAY BE CORRUPTED OR MISSING!!!\" % fi.upper()\n )\n continue\n if len(errors):\n logging.warning('%d errors found.' % len(errors))\n\n def _updater_request(self, _url):\n r = requests.get(\n \"%s://%s:%s/%s\" % (\n UPDATER_PROTOCOL,\n UPDATER_HOST,\n UPDATER_PORT,\n _url\n ),\n auth=HTTPBasicAuth(\n UPDATER_USER,\n UPDATER_PASS\n ),\n timeout=60\n )\n if r.status_code == 200:\n return r\n else:\n self.updater_request_status = r.status_code\n return False\n\n @staticmethod\n def _updater_json_write(_file, _json):\n try:\n with open(\n '%s/config/%s' % (BASE_DIR, _file),\n \"w\"\n ) as _cfg_file:\n json.dump(_json, _cfg_file)\n except:\n return False\n return True\n\n\nclass Service(object):\n api = None\n api_tokens = None\n queue = None\n TwitterError = None\n\n def __init__(self):\n # Connect to REDIS\n self.queue = redis.Redis(\n host=REDIS_HOST,\n port=REDIS_PORT,\n decode_responses=True)\n # Load Twitter Exceptions Objects\n self.TwitterError = twitter.TwitterError\n\n def get_json(self, json_path):\n try:\n with open('%s/%s' % (BASE_DIR, json_path), \"r\") as json_file:\n return json.load(json_file)\n except:\n return False\n\n def get_key_json(self, key, json_path, default=0):\n json_file = self.get_json(json_path)\n if key in json_file:\n return json_file[key]\n else:\n logging.log(\n logging.WARN,\n 'Keyword %s not found in %s. Returning default #%d' % (key, json_path, default))\n return list(json_file.items())[default]\n\n def connect_twitter_api(self, key=STREAM_API):\n self.api_tokens = self.get_key_json(key, CONFIG_APIKEYS)\n self.api = twitter.Api(\n self.api_tokens['consumer_key'],\n self.api_tokens['consumer_secret'],\n self.api_tokens['access_token'],\n self.api_tokens['access_token_secret'],\n sleep_on_rate_limit=GLOBAL_RATESLEEP)\n\n\nclass Process(object):\n process = []\n loaded = {}\n\n def __init__(self):\n pass\n\n def start(self):\n logging.log(logging.INFO, 'Starting Process Sentinel Service')\n for pr in self.process:\n pr.daemon = True\n # Don't use try here. So if errors occurs, the service wont start a useless loop\n pr.start()\n for pr in self.process:\n pr.join()\n self.sentinel()\n\n def add(self, name, target, args=None, kwargs=None):\n all_args = {\n 'target': target,\n 'name': name\n }\n if args:\n all_args['args'] = args\n if kwargs:\n all_args['kwargs'] = kwargs\n self.process.append(mp.Process(**all_args))\n self.loaded[name] = {\n 'target': target,\n 'name': name,\n 'args': args,\n 'kwargs': kwargs\n }\n logging.log(logging.INFO, 'Loaded %s to process list' % name)\n\n def sentinel(self, sleep=60):\n while True:\n errors = 0\n # Checking Process Status\n for pr in self.process:\n if not pr.is_alive():\n errors += 1\n # Found an error in a process\n logging.log(logging.ERROR, 'Process %s is dead' % pr.name)\n # Removing from active list\n self.process.remove(pr)\n try:\n # Try to load the process again\n self.process.append(self.loaded[pr.name])\n self.process[-1].start()\n # The process started success\n errors -= 1\n except:\n # Process Didn't Started\n logging.critical(\n 'Cant Restart Process %s' % pr.name,\n self.loaded[pr.name],\n pr)\n sleep_time = 10 if errors else sleep\n time.sleep(int(sleep_time))","sub_path":"i4media/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":6619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"174561885","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 27 01:34:46 2019\n\n@author: dr_d3mz\n\"\"\"\n\nimport pickle\nimport wikipedia\nimport random\nimport pandas as pd\nimport numpy as np\nfrom ast import literal_eval\nimport nltk\nimport enchant\nimport pycountry\nfrom bs4 import BeautifulSoup\nfrom bs4.element import Comment\nimport urllib.request\nimport dummy_data_funcs as dm\nimport re\n\ndef get_wiki_url_from_search_term(word):\n \"\"\"\n Search a word meaning on wikipedia.\n \"\"\"\n wikipedia.set_lang('en')\n results = wikipedia.search(word) \n\n # get first result\n if results:\n page = wikipedia.page(results[0])\n url = page.url\n summary = page.summary\n content = page.content\n else:\n url = 'NA'\n summary = 'NA'\n content = 'NA'\n return url\n\n\n\n\"\"\"\nFunctions related to the wiki_entity_country_from_url.py\n\"\"\"\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\ndef text_from_html(body):\n soup = BeautifulSoup(body, 'html.parser')\n texts = soup.findAll(text=True)\n visible_texts = filter(tag_visible, texts) \n return u\" \".join(t.strip() for t in visible_texts)\n\ndef get_entity_points(index):\n points = 0\n if 0 <= index < 5:\n points = 10\n elif 5<= index < 10:\n points = 5\n elif 10 <= index < 20:\n points = 2\n elif index > 20:\n points = 1\n return points\n\n# Get the total points for the entity across the whole article\ndef get_keyword_total_points(keyword, headline, text):\n import nltk\n sentences = nltk.sent_tokenize(text)\n points = 0\n if keyword in headline:\n points += 20\n for i in range(len(sentences)):\n temp = sentences[i]\n if keyword in temp:\n pnt = get_entity_points(i)\n points += pnt\n return points\n\n\ndef get_top_5_entity_countries(country_list, text): # there is a slight bug here in cases where an country may have multiple variations within the text, like united states for example, however its giving the right answer so we good\n \n country_points_list = []\n top_5_country_list = []\n for country in country_list:\n points = get_keyword_total_points(country, '', text)\n country_points_list.append(points)\n sorted_ind = sorted(range(len(country_points_list)), key=lambda k: country_points_list[k], reverse=True)\n sorted_country_list = [country_list[i] for i in sorted_ind]\n sorted_points_list = [country_points_list[i] for i in sorted_ind]\n \n try:\n top_5_countries = sorted_country_list[0:5]\n coverage = round(sorted_points_list[0]/sum(sorted_points_list),2)\n for countries in top_5_countries:\n top_5_country_list.append(countries)\n \n return top_5_country_list, coverage\n except:\n return ['Unknown'], 0\n\ndef get_name_from_wiki_url(url): # Strips out the name of an entity from its Wikipedia URL\n text = re.search(r'(?<=wiki/)[^.\\s]*',url)\n names1 = text.group(0)\n names2 = names1.split('_')\n if len(names2) > 1:\n names2 = ' '.join(names2)\n else:\n names2 = names2[0]\n return names2\n\ndef generate_country_alpha_dicts(): # This generates a dict of world countries and maps to their 2-digit ISO codes\n import pycountry\n #Create dictionary of countries and their 2 name list\n gb_countries = ['England', 'Scotland', 'Northern Ireland', 'Wales']\n country_names_list = []\n country_alpha_list = []\n for country in gb_countries:\n country_names_list.append(country)\n country_alpha_list.append('GB')\n \n # Accomodate for other ways america is said (may need to add some more exceptions here)\n us_countries = ['America', 'U.S', 'U.S.A']\n for country in us_countries:\n country_names_list.append(country)\n country_alpha_list.append('US')\n \n for country in pycountry.countries:\n country_names_list.append(country.name)\n country_alpha_list.append(country.alpha_2)\n \n # Add in the edge case for when its unknown\n country_names_list.append('Unknown')\n country_alpha_list.append('Unknown')\n \n country_alpha_dict = dict(zip(country_names_list, country_alpha_list))\n alpha_to_country_dict = dict(zip(country_alpha_list, country_names_list))\n \n # Save the country_alpha dict, and alpha_country dict into a pickle *** Change this filename to your current one\n with open('/Users/dr_d3mz/Documents/GitHub/Bloverse_Data_Science/Development/country_alpha_dict.pickle', \"wb\") as output_file:\n pickle.dump(country_alpha_dict, output_file)\n \n with open('/Users/dr_d3mz/Documents/GitHub/Bloverse_Data_Science/Development/alpha_country_dict.pickle', \"wb\") as output_file:\n pickle.dump(alpha_to_country_dict, output_file)\n \n return None\n\n\n\ndef get_infobox_text(soup): # This extracts all the text from the infobox card of a wiki page\n infobox_list = ['infobox', 'infobox vcard', 'infobox biography vcard', 'infobox geography vcard']\n for vcards in infobox_list: # loops through a number of infobox vcards to find the one that works\n try:\n info_text = soup.find_all(\"table\",vcards)[0]\n if len(info_text) > 0:\n break\n except:\n continue\n try:\n col_len = len(info_text)\n except:\n info_text = []\n return info_text\n\ndef get_entity_country_from_infobox(url): # Get the country of an entity from just the infobox text\n try:\n html = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(html,'lxml')\n \n except:\n text = re.search(r'(?<=wiki/)[^.\\s]*',url)\n encoded_name = urllib.parse.quote(text.group(0))\n url = url.replace(text.group(0),encoded_name)\n html = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(html,'lxml')\n\n \n info_text = get_infobox_text(soup)\n \n if len(info_text) > 0:\n info_text_list = []\n for cols in info_text:\n info_text_list.append(cols.text)\n \n text = '. '.join(info_text_list)\n \n with open('/Users/dr_d3mz/Documents/GitHub/Bloverse_Data_Science/Development/country_alpha_dict.pickle', \"rb\") as input_file:\n country_alpha_dict = pickle.load(input_file)\n \n country_list_full = []\n \n for country in country_alpha_dict.keys(): # Find a list of countries appearing in the text\n if country in text:\n country_list_full.append(country)\n \n countries, confidence = get_top_5_entity_countries(country_list_full, text)\n \n for country in countries: # CLean up the list of countries\n for cntry in countries:\n if country in cntry:\n if country != cntry:\n countries.remove(country)\n \n country_final = [] # Convert the countries into their 2 digits ISO codes\n for country in countries:\n country_final.append(country_alpha_dict[country])\n #print(country_final)\n\n if len(country_final) > 0:\n unique_countries = list(set(country_final))\n return unique_countries[0], confidence, unique_countries[1:], unique_countries\n else:\n return None\n else:\n return None\n\ndef get_entity_country_from_wiki_text(url):\n\n try:\n name = get_name_from_wiki_url(url)\n text = wikipedia.summary(name, sentences=2)\n except:\n try:\n html = urllib.request.urlopen(url).read()\n text = text_from_html(html)\n except:\n text = re.search(r'(?<=wiki/)[^.\\s]*',url)\n encoded_name = urllib.parse.quote(text.group(0))\n url = url.replace(text.group(0),encoded_name)\n html = urllib.request.urlopen(url).read()\n text = text_from_html(html)\n\n with open('/Users/dr_d3mz/Documents/GitHub/Bloverse_Data_Science/Development/country_alpha_dict.pickle', \"rb\") as input_file:\n country_alpha_dict = pickle.load(input_file)\n \n country_list_full = []\n for country in country_alpha_dict.keys():\n if country in text:\n country_list_full.append(country)\n \n countries, confidence = get_top_5_entity_countries(country_list_full, text)\n #print(countries)\n \n country_final = [] # Convert the countries into their 2 digits ISO codes\n for country in countries:\n country_final.append(country_alpha_dict[country])\n #print(country_final)\n\n if len(country_final) > 0:\n unique_countries = list(set(country_final))\n return unique_countries[0], confidence, unique_countries[1:], unique_countries\n else:\n return None\n\ndef get_wiki_country(url):\n name = get_name_from_wiki_url(url)\n try:\n main_country, confidence, other_countries, all_countries = get_entity_country_from_infobox(url)\n if len(all_countries) > 2:\n main_country, confidence, other_countries, all_countries = get_entity_country_from_wiki_text(url)\n except:\n main_country, confidence, other_countries, all_countries = get_entity_country_from_wiki_text(url)\n \n if len(all_countries) > 2:\n main_country = 'Multiple'\n other_countries = all_countries\n return main_country, confidence, other_countries, all_countries\n\ndef get_wiki_summary(url):\n #print(url) \n try:\n name = get_name_from_wiki_url(url)\n# print(name)\n summ = wikipedia.summary(name, sentences=3)\n# print('yass1')\n except:\n# print('yass2')\n try:\n text = re.search(r'(?<=wiki/)[^.\\s]*',url)\n encoded_name = urllib.parse.quote(text.group(0))\n url2 = url.replace(text.group(0),encoded_name)\n html = urllib.request.urlopen(url2).read()\n text = text_from_html(html)\n sentences = nltk.sent_tokenize(text)\n summ = ' '.join(sentences[2:4])\n except:\n summ = 'Not Available'\n \n return summ\n\n\n\n\n\"\"\"\nObjective: To build functionality that allows you to put in a search term, and \nget the wikipedia page link for that search term, then analyse the wikipedia page\nfor the search term and determine what country the individual is from\n\n- Get the country\n- Look at the country of its most related entities and see if it agrees with what you have determined\n\"\"\"\n## Load country_alpha and alpha_country dicts\ntry:\n with open('/Users/dr_d3mz/Documents/GitHub/Bloverse_Data_Science/Development/country_alpha_dict.pickle', \"rb\") as input_file:\n country_alpha_dict = pickle.load(input_file)\nexcept:\n generate_country_alpha_dicts()\n with open('/Users/dr_d3mz/Documents/GitHub/Bloverse_Data_Science/Development/country_alpha_dict.pickle', \"rb\") as input_file:\n country_alpha_dict = pickle.load(input_file)\n print('Country dicts have been generated and saved')\n \ntry:\n with open('/Users/dr_d3mz/Documents/GitHub/Bloverse_Data_Science/Development/alpha_country_dict.pickle', \"rb\") as input_file:\n alpha_to_country_dict = pickle.load(input_file)\nexcept:\n generate_country_alpha_dicts()\n with open('/Users/dr_d3mz/Documents/GitHub/Bloverse_Data_Science/Development/alpha_country_dict.pickle', \"rb\") as input_file:\n alpha_to_country_dict = pickle.load(input_file)\n print('Country dicts have been generated and saved')\n \ndef get_entity_country_from_wikipedia(entity):\n \n url = get_wiki_url_from_search_term(entity)\n\n # Bespoke function to get summary built by myself that allows for error handling if the wikipedia approach doesnt work\n summ = get_wiki_summary(url)\n# print(summ)\n\n # Run the function that takes a wikipedia URL and return the that the entity belongs to (@Nas make sure you look into this and work to ensure you understand how it works)\n main_country, confidence, other_countries, all_countries = get_wiki_country(url)\n \n return main_country, all_countries, summ, url\n\n\n\n\n\n\n\n\n\n\"\"\"\n******************************************************************************************************************************************************\nStart\n******************************************************************************************************************************************************\n\"\"\"\n\n\"\"\"\nExample implementation\n\"\"\"\n\n# Get the url and summary for a search term\nexample_entity = 'Melania Trump'\n\nentity_country, entity_all_countries, entity_summary, entity_url = get_entity_country_from_wikipedia(example_entity)\nprint(entity_country)\nprint()\nprint(entity_all_countries)\nprint()\nprint(entity_summary)\nprint()\nprint(entity_url)\n\n\"\"\"\nDo a webhose extraction for Nigeria and run the functionality\n\"\"\"\n\nNG = \"\"\"\n (site:vanguardngr.com OR site:punchng.com OR site:naijaloaded.com.ng OR site:guardian.ng OR site:dailypost.ng OR site:pulse.ng OR \n site:nigerianeye.com OR site:gistmania.com OR site:targist.com OR site:liveofofo.com ) is_first:true has_video:false \n \"\"\"\n \noutput_df = dm.get_webhouse_articles(NG, 0.25)\noutput_df['country'] = 'NG' # sets the country to NG since this is where we have extracted the content from\nprint(len(output_df))\nprint(output_df['source'].value_counts()) # look at the number of returns for each news source and then decide \nprint(output_df.columns)\n\n\n## Run through the articles, get the entities, get the entity countries and then determine\n## if that article belongs to the country\n## improvement: Do the analysis for only the top 3 entities on that article in order to save you time\n## improvement: Work on script that runs through the entities found that day, and updates the entity document if a new entity has been found\n## Investigate why some entity classifications don't seem to be making sense, e.g Davido doesnt seem to be detected as naija\nfor i in range(len(output_df)):\n print('____________________')\n print(i)\n print('____________________')\n \n entity_name_list = []\n entity_type_list = []\n entity_sentiment_list = []\n \n # Get entities, article country and article title\n entities = output_df.iloc[i]['entities']\n country = output_df.iloc[i]['country']\n title = output_df.iloc[i]['title']\n \n persons = entities['persons']\n for ent in persons:\n entity_name_list.append(ent['name'])\n entity_type_list.append('Person')\n entity_sentiment_list.append(ent['sentiment'])\n \n organisations = entities['organizations']\n for ent in organisations:\n entity_name_list.append(ent['name'])\n entity_type_list.append('Organisation')\n entity_sentiment_list.append(ent['sentiment'])\n \n locations = entities['locations']\n for ent in locations:\n entity_name_list.append(ent['name'])\n entity_type_list.append('Location')\n entity_sentiment_list.append(ent['sentiment'])\n \n entity_country_list = []\n for ents in entity_name_list:\n try:\n entity_country, entity_all_countries, entity_summary, entity_url = get_entity_country_from_wikipedia(ents)\n entity_country_list += entity_all_countries\n except:\n# print('Unable to get country for %s' % ents)\n pass\n \n print(title)\n print(country)\n print(entity_country_list)\n print()\n if country in entity_country_list:\n print('The article belongs to Nigeria')\n else:\n print('The article does not belong to Nigeria')\n print()\n print()\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"Development/.ipynb_checkpoints/get_entity_country_from_wikipedia-checkpoint.py","file_name":"get_entity_country_from_wikipedia-checkpoint.py","file_ext":"py","file_size_in_byte":15756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"372872807","text":"#!/usr/bin/env python\nimport sys\nimport pickle\nimport demod\nimport rtlsdr\nimport player\nimport constants\nimport multiprocessing\n\nmax_calls = 7\n@rtlsdr.limit_calls(max_calls)\ndef sdr_callback(samples, sdr):\n fm = demod.demod_fm(samples)\n aud_que.put(fm)\n rds_que.put(fm)\n\ndef read_file(filename):\n with open(filename, 'rb') as f:\n samples = pickle.load(f)\n fm = demod.demod_fm(samples)\n aud_que.put(fm)\n rds_que.put(fm)\n\ndef read_rtlsdr(station):\n sdr = rtlsdr.RtlSdr()\n sdr.gain = 400\n sdr.sample_rate = constants.fs\n sdr.center_freq = float(station) * 1e6\n sdr.read_samples_async(\n callback=sdr_callback, num_samples=constants.ns, context=sdr)\n sdr.close()\n\nif __name__ == \"__main__\":\n arg1 = sys.argv[1]\n arg2 = sys.argv[2]\n\n aud_que = multiprocessing.Queue()\n rds_que = multiprocessing.Queue()\n aud_proc = multiprocessing.Process(target=player.receive, args=(aud_que,))\n rds_proc = multiprocessing.Process(target=demod.receive, args=(rds_que,))\n\n aud_proc.start()\n rds_proc.start()\n\n if arg1 == '-f':\n read_file(arg2)\n\n elif arg1 == '-r':\n read_rtlsdr(arg2)\n\n aud_que.put(None)\n rds_que.put(None)\n\n aud_proc.join()\n rds_proc.join()\n\n#geeksforgeeks.com/multiprocessing\n","sub_path":"fm/fmrx.py","file_name":"fmrx.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"247740978","text":"from abc import ABC\nfrom datetime import datetime\n\n\n__all__ = [\"File\", \"Folder\", \"FileVersion\"]\n\n\nclass Component(ABC):\n \"\"\"\n Base class for creating local components of the remote file storage system.\n\n .. note:: Subclasses have to overload the following methods:\n - ``shallow_update_content``\n - ``get_component``\n - ``is_empty``\n\n .. seealso:: File(), Folder()\n \"\"\"\n\n def __init__(self, name, refresh_date=None, parent=None):\n \"\"\"\n Initialize.\n\n :param name: The name of the component\n :param refresh_date: The last refresh date of the component\n :param parent: The parent component\n\n :type name: str\n :type refresh_date: datetime, None\n :type parent: Component, None\n \"\"\"\n\n assert isinstance(name, str), \"Component name has to be a str object\"\n self._name = name\n assert isinstance(refresh_date, datetime) or refresh_date is None, \"Refresh date has to be a datetime object or None\"\n self._refresh_date = refresh_date\n assert isinstance(parent, Component) or parent is None, \"Parent has to be None or a Component object\"\n self._parent = parent\n\n def __eq__(self, other):\n \"\"\"\n Define equal operator.\n\n :param other: Component's name\n :type other: str\n\n :return True if names are identical else False\n :rtype: bool\n \"\"\"\n\n assert isinstance(other, str), \"Object has to be a str object\"\n return True if self._name == other else False\n\n def get_location(self, header=None):\n \"\"\"\n Generate the Component's path.\n\n :param header: Initial path (used by the recursive calls)\n :type header: list, None\n\n :return The component's path\n :rtype: list, None\n \"\"\"\n\n assert isinstance(header, list) or header is None, \"Header has to be a list object or None\"\n header = list() if header is None else header\n header = [self._name] + header\n if self._parent is not None:\n return self._parent.get_location(header)\n else:\n assert header is not None, \"The component cannot at the same time has no parent and no header\"\n return header\n\n def shallow_update_content(self, component):\n raise NotImplementedError()\n\n def get_component(self, location=None):\n raise NotImplementedError()\n\n def shallow_update(self, component):\n \"\"\"\n Update superficial attributes with another component.\n\n .. note:: Superficial attributes are:\n - ``name``\n - ``refresh_date``\n\n :param component: The reference component\n :type component: Component\n \"\"\"\n\n assert isinstance(component, self.__class__), \"Component has to be a {} object\".format(self.__class__.__name__)\n assert component.name == self._name, \"Component's name doesn't correspond\"\n self._refresh_date = component.refresh_date\n\n @property\n def name(self):\n \"\"\"\n :return The name of the component\n :rtype: str\n \"\"\"\n\n return self._name\n\n @property\n def refresh_date(self):\n \"\"\"\n :return The last refresh date of the component\n :rtype: datetime\n \"\"\"\n\n return self._refresh_date\n\n @property\n def parent(self):\n \"\"\"\n :return The parent component of the component\n :rtype: Component\n \"\"\"\n\n return self._parent\n\n @parent.setter\n def parent(self, value):\n \"\"\"\n :param value: The new parent component\n :type: value: Component\n \"\"\"\n\n assert isinstance(value, Folder) or value is None, \"Parent has to be None or a Component object\"\n self._parent = value\n\n @property\n def is_empty(self):\n raise NotImplementedError()\n\n\nclass File(Component):\n \"\"\"\n Local representation of files in the remote file storage system.\n\n It contains file versions arranged by creation date in the remote file storage system.\n\n .. seealso:: FileVersion(), Folder(), Component()\n \"\"\"\n\n def __init__(self, name, refresh_date=None, parent=None):\n \"\"\"\n Initialize.\n\n :param name: The name of the component\n :param refresh_date: The last refresh date of the component\n :param parent: The parent component\n\n :type name: str\n :type refresh_date: datetime, None\n :type parent: Component, None\n \"\"\"\n\n super(File, self).__init__(name, refresh_date, parent)\n self._file_versions = {}\n\n def add_file_version(self, file_version):\n \"\"\"\n Add a new file version.\n\n :param file_version: The new file version\n :type file_version: FileVersion\n \"\"\"\n\n assert isinstance(file_version, FileVersion), \"File version has to be a FileVersion object\"\n assert file_version.creation_date not in self._file_versions, \"File version with this creation date already exists\"\n self._file_versions[file_version.creation_date] = file_version\n\n def remove_file_version(self, creation_date):\n \"\"\"\n Remove a file version.\n\n :param creation_date: creation date of the selected file version\n :type creation_date: datetime\n \"\"\"\n\n assert isinstance(creation_date, datetime), \"Creation_date has to be a Datetime object\"\n del self._file_versions[creation_date]\n\n def shallow_update_content(self, component):\n \"\"\"\n Update file versions according to the file versions in the reference file.\n\n :param component: The reference file\n :type component: File\n \"\"\"\n\n assert isinstance(component, File), \"Component has to be a File object\"\n self.shallow_update(component)\n\n current_file_versions_dates = set(self._file_versions.keys())\n update_file_versions_dates = set(component._file_versions.keys())\n\n unknown_file_versions_dates = update_file_versions_dates - current_file_versions_dates\n [self.add_file_version(component._file_versions[file_version_date]) for file_version_date in unknown_file_versions_dates]\n\n missing_file_versions_dates = current_file_versions_dates - update_file_versions_dates\n [self.remove_file_version(file_version_date) for file_version_date in missing_file_versions_dates]\n\n def get_component(self, location=None):\n \"\"\"\n Find a component according to the location.\n\n :param location: The component's path\n :type location: None\n :return: The selected component\n :rtype: Component\n \"\"\"\n\n assert location is None, \"Location has to be None\"\n return self\n\n @property\n def file_versions(self):\n \"\"\"\n :return: The file versions of the file\n :rtype: dict\n \"\"\"\n\n return self._file_versions\n\n @property\n def is_empty(self):\n \"\"\"\n :return: True if the file doesn't have file versions else False\n :rtype: bool\n \"\"\"\n\n return False if len(self._file_versions) > 0 else True\n\n\nclass Folder(Component):\n \"\"\"\n Local representation of folders in the remote file storage system.\n \"\"\"\n\n def __init__(self, name, refresh_date=None, parent=None):\n \"\"\"\n Initialize.\n\n :param name: The name of the component\n :param refresh_date: The last refresh date of the component\n :param parent: The parent component\n\n :type name: str\n :type refresh_date: datetime, None\n :type parent: Component, None\n \"\"\"\n\n super(Folder, self).__init__(name, refresh_date, parent)\n self._content = {}\n\n def add_content(self, *new_content):\n \"\"\"\n Add new content in the folder.\n\n :param new_content: Added component\n :type new_content: Component\n \"\"\"\n\n assert any([isinstance(new_component, Component) for new_component in new_content]), \"New content has to contain Component objects\"\n for new_component in new_content:\n assert new_component.name not in self._content, \"'{}' component already exists\".format(new_component.name)\n self._content[new_component.name] = new_component\n self._content[new_component.name].parent = self\n\n def remove_content(self, *names):\n \"\"\"\n Remove content in the folder.\n\n :param names: Removed component name\n :type names: str\n \"\"\"\n\n assert any([isinstance(name, str) for name in names]), \"Names have to be a str object\"\n for name in names:\n assert name in self._content, \"'{}' file doesn't exist\".format(name)\n del self._content[name]\n\n def shallow_update_content(self, component):\n \"\"\"\n Update content according to the content in the reference folder.\n\n :param component: The reference folder\n :type component: Folder\n \"\"\"\n\n assert isinstance(component, Folder), \"Component has to be a Folder object\"\n self.shallow_update(component)\n\n current_components_names = set(self._content.keys())\n update_components_names = set(component.content.keys())\n\n unknown_components_names = set.difference(update_components_names, current_components_names)\n [self.add_content(component.content[component_name]) for component_name in unknown_components_names]\n\n obsolete_components_names = set.difference(current_components_names, update_components_names)\n [self.remove_content(component_name) for component_name in obsolete_components_names]\n\n def get_component(self, location=None):\n \"\"\"\n Find a component according to the location.\n\n :param location: The component's path\n :type location: list, None\n :return: The selected component\n :rtype: Component\n \"\"\"\n\n if location is not None:\n assert isinstance(location, list), \"Location has to be a list object\"\n assert len(location) >= 1, \"Location's length has to be greater than or equal to 1\"\n return self._content[location[0]].get_component(location[1:] if len(location) > 1 else None)\n else:\n return self\n\n def __contains__(self, item):\n \"\"\"\n Check if the folder contains the component.\n\n .. note:: This is a recursive method.\n\n :param item: The path of the component\n :type item: list\n\n :return: True if the folder contains the component else False\n :rtype: bool\n \"\"\"\n\n assert isinstance(item, list), \"Item has to be a list object\"\n assert len(item) >= 1, \"Item's length has to be greater than or equal to 1\"\n assert isinstance(item[0], str), \"Elements contained in item have to be 'str' type\"\n\n copy_item = item.copy()\n\n for component in self._content.values():\n if copy_item[0] == component:\n if len(copy_item) == 1:\n return True\n else:\n if isinstance(component, Folder):\n copy_item = copy_item[1:]\n if copy_item in component:\n return True\n else:\n return False\n\n return False\n\n def create_hierarchy(self, location, final_component_is_file=False):\n \"\"\"\n Create a component and its folder tree.\n\n :param location: The path of the final component\n :param final_component_is_file: Define if the final component is a file or a folder\n\n :type location: list\n :type final_component_is_file: bool\n\n :return: The final component\n :rtype: Component\n \"\"\"\n\n assert isinstance(location, list), \"Item has to be a list object\"\n assert len(location) >= 1, \"Item's length has to be greater than or equal to 1\"\n assert isinstance(location[0], str), \"Elements contained in item have to be 'str' type\"\n if location[0] not in self._content:\n component = File(location[0]) if (len(location) == 1 and final_component_is_file is True) else Folder(location[0])\n self.add_content(component)\n else:\n if len(location) > 1:\n assert isinstance(self._content[location[0]], Folder), \"{} component has to be a Folder object but the current component is a file object\".format(location[0])\n component = self._content[location[0]]\n if len(location) == 1:\n return component\n else:\n new_location = location.copy()\n new_location = new_location[1:]\n return component.create_hierarchy(new_location, final_component_is_file=final_component_is_file)\n\n @property\n def content(self):\n \"\"\"\n :return: Folder's content\n :rtype: dict\n \"\"\"\n\n return self._content\n\n @property\n def is_empty(self):\n \"\"\"\n :return: True if the folder doesn't have components else False\n :rtype: bool\n \"\"\"\n\n return False if len(self._content) > 0 else True\n\n\nclass FileVersion:\n \"\"\"\n Local representation of file versions in the remote file storage system.\n \"\"\"\n\n def __init__(self, creation_date, identifier, checksum, content_type, additional_info):\n \"\"\"\n Initialize.\n\n :param creation_date: The creation date of the file version in the remote file storage system\n :type creation_date: datetime\n\n :param identifier: The unique identifier of the file version in the remote file storage system\n :type identifier: str\n\n :param checksum: The control element of the file version content\n :type checksum: str\n\n :param content_type: The content type of the file version\n :type content_type: str\n\n :param additional_info: Additional information\n :type additional_info: dict\n \"\"\"\n\n assert isinstance(creation_date, datetime), \"Creation date has to be datetime object\"\n self._creation_date = creation_date\n assert isinstance(identifier, str), \"File identifier has to be a str object\"\n self._identifier = identifier\n assert isinstance(checksum, str), \"File checksum has to be a str object\"\n self._checksum = checksum\n assert isinstance(content_type, str), \"File content type has to be a str object\"\n self._content_type = content_type\n assert isinstance(additional_info, dict), \"Additional information have to be contained in a dictionary\"\n self._additional_info = additional_info\n\n @property\n def creation_date(self):\n \"\"\"\n :return: The creation date of the file version in the remote file storage system\n :rtype: datetime\n \"\"\"\n\n return self._creation_date\n\n @property\n def identifier(self):\n \"\"\"\n :return: The unique identifier of the file version in the remote file storage system\n :rtype: str\n \"\"\"\n\n return self._identifier\n\n @property\n def checksum(self):\n \"\"\"\n :return: The control element of the file version content\n :rtype: str\n \"\"\"\n\n return self._checksum\n\n @property\n def content_type(self):\n \"\"\"\n :return: The content type of the file version\n :rtype: str\n \"\"\"\n\n return self._content_type\n\n @property\n def additional_info(self):\n \"\"\"\n :return: Additional information\n :rtype: dict\n \"\"\"\n\n return self._additional_info\n","sub_path":"storagehub/core/components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":16060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"294096359","text":"from functools import wraps\nimport logging\nimport application.settings as settings\nfrom application.src import databases\nfrom application.src import utils\n\n# Logging handler\nlog = logging.getLogger(__name__)\n\n\ndef request_id(get_request_uid_func, log_hash=\"\"):\n \"\"\"\n Creates request_uid inside Redis if it does not exist. It avoids duplicate requests.\n \"\"\"\n def wrap(func):\n @wraps(func)\n def wrapped_f(self, *args):\n try:\n redis = databases.Redis.get_instance(\"redis-request-id\")\n except Exception as e:\n log.error(\"Could not send request to partner. Failed to check or create request id. \"\n \"Error: {0}. \"\n \"Requested URL: {1}. \"\n \"Operation Hash: {2}. \"\n .format(e, self.request.uri, log_hash))\n return self.error({\"success\": 0, \"message\": \"Failed to check or create request id. Request aborted.\"}, 500)\n \n try:\n uid = get_request_uid_func(self)\n if not uid:\n log.error(\"Could not send request to partner. Failed to check or create request id. \"\n \"Error: request id is mandatory. \"\n \"Requested URL: {0}. \"\n \"Operation Hash: {1}. \".format(self.request.uri, log_hash))\n return self.error({\"success\": 0,\n \"message\": \"Request aborted: request id is mandatory.\"}, 400)\n request_uid = redis.get(uid)\n request_uid = utils.decode(request_uid) if request_uid else None\n if request_uid == '0':\n log.error(\"Could not send request: duplicated operation for request id {0}. \"\n \"Requested URL: {1}. \"\n \"Operation Hash: {2}. \"\n .format(uid, self.request.uri, log_hash))\n return self.error({\"success\": 0,\n \"message\": \"Could not send request: duplicated operation for request id {0}. \"\n .format(uid)\n }, 400)\n if request_uid == '1':\n log.error(\"Could not send request: operation already successful by request id {0}. \"\n \"Requested URL: {1}. \"\n \"Operation Hash: {2}. \"\n .format(uid, self.request.uri, log_hash))\n return self.error(\n {\n \"success\": 0,\n \"message\": \"Could not send request: operation already successful by request id {0}. \"\n .format(uid)\n }, 403)\n\n log.info(\"Request Id key created with value 0 for uid: {0}. \"\n \"Requested URL: {1}. \".format(uid, self.request.uri, log_hash))\n redis.setex(uid, 0, settings.REQUEST_ID_PERIODICITY)\n \n except Exception as e:\n log.error(\"Could not check or create request id. Request aborted. \"\n \"Error: {0}. \"\n \"Requested URL: {1}. \"\n \"Operation Hash: {2}. \".format(e, self.request.uri, log_hash))\n return self.error({\"success\": 0, \"message\": \"Could not check or create request id. Request aborted.\"}, 500)\n\n return func(self, *args)\n return wrapped_f\n return wrap\n","sub_path":"globalsdp/Code/application/src/request_id.py","file_name":"request_id.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"108485369","text":"def merge_list(*args):\n \"\"\"\n 用于将多个列表合并成一个, 返回新列表并去重\n :param args: list\n :return: new list\n \"\"\"\n new_list = []\n for a in args:\n assert isinstance(a, list), [\"merge_list's args [%s] must be list\"]\n for s in a:\n new_list.append(s)\n return list(set(new_list))\n","sub_path":"utils/component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"305213028","text":"from django.db.models.signals import pre_save\nfrom django.dispatch import receiver\n\nfrom .models import PDU, Powered\nfrom .views import get_pdu\n\nfrom rest_framework import serializers\n\n\n@receiver(pre_save, sender=Powered)\ndef check_pdu(sender, instance, *args, **kwargs):\n num_powered = len(Powered.objects.filter(asset=instance.asset))\n if instance not in Powered.objects.all() and num_powered == instance.asset.itmodel.power_ports:\n raise serializers.ValidationError(\"All the power port connections have already been used.\")\n if instance.pdu.assets.count() > 24:\n raise serializers.ValidationError(\"This PDU is already full.\")\n if instance.pdu.rack != instance.asset.rack:\n raise serializers.ValidationError(\n \"PDU must be on the same rack as the asset.\")\n\n\n@receiver(pre_save, sender=PDU)\ndef set_connected(sender, instance, *args, **kwargs):\n if instance.rack.site.abbr.lower() == 'rtp1':\n response = get_pdu(instance.rack.rack, instance.position)\n instance.networked = response[1] < 400\n else:\n instance.networked = False\n","sub_path":"app/hyposoft/power/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"572982369","text":"from selenium import webdriver\r\nimport time,os, platform\r\n\r\n# Pick chromedriver file according to the OS\r\nplat = platform.system()\r\nend = r'/chromedriverL'\r\nif plat != 'Linux':\r\n end = r'\\chromedriverW'\r\n\r\nwith open('grades.csv') as file:\r\n \r\n # Skip first line (column names)\r\n file.readline()\r\n # Directory to the chromedriver file (current directory)\r\n\r\n driver = os.getcwd() + end\r\n url = 'https://forms.gle/ViHMJutEngJBguDg9' \r\n browser = webdriver.Chrome(driver) \r\n for each in file:\r\n browser.get(url)\r\n line = each.split(',')\r\n # Pair columns in file to HTML tags on the web form\r\n # for instance, line[0] is 'name'\r\n data = [\r\n \t(line[0],'entry.1130254001'),(line[1],'entry.990310996'),\r\n \t(line[2],'entry.88868606'),(line[3],'entry.1513875359')\r\n \t]\r\n for i in data:\r\n browser.find_element_by_name(i[1]).send_keys(i[0])\r\n # Click on the submit button\r\n browser.find_element_by_class_name('freebirdFormviewerViewNavigationButtons').click()\r\n # Wait 3 seconds before inserting the next record\r\n time.sleep(3)\r\n # Close navegator at the end\r\n browser.quit()\r\n","sub_path":"auto_web_form.py","file_name":"auto_web_form.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"547984499","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 17 23:38:23 2019\r\n\r\n@author: binxi\r\n\"\"\"\r\n\r\nclass Solution(object):\r\n def isAnagram(self, s, t):\r\n \"\"\"\r\n :type s: str\r\n :type t: str\r\n :rtype: bool\r\n \"\"\"\r\n s1, t1 = list(s), list(t)\r\n s1.sort()\r\n t1.sort()\r\n \r\n return s1 == t1","sub_path":"Leetcode/#242 Valid Anagram.py","file_name":"#242 Valid Anagram.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"47963517","text":"# -*- encoding: utf-8 -*-\n\nfrom openerp.osv import osv, fields\n\nclass account_invoice(osv.osv):\n _inherit = \"account.invoice\"\n\n def _numero_factura(self, cr, uid, ids, field_name, arg, context):\n result = {}\n\n for factura in self.browse(cr, uid, ids):\n if factura.state != \"cancel\":\n result[factura.id] = factura.number\n else:\n result[factura.id] = factura.internal_number\n\n return result\n\n def _validar_factura_proveedor(self, cr, uid, ids, context=None):\n obj = self.browse(cr, uid, ids[0], context=context)\n\n if not obj.supplier_invoice_number:\n return True\n\n facturas = self.search(cr, uid, [('supplier_invoice_number','=',obj.supplier_invoice_number), ('partner_id','=',obj.partner_id.id), ('type','=','in_invoice')])\n if len(facturas) > 1:\n return False\n else:\n return True\n\n def copy(self, cr, uid, id, default=None, context=None):\n default.update({\n 'supplier_invoice_number': False,\n })\n return super(account_invoice, self).copy(cr, uid, id, default, context)\n\n _constraints = [\n (_validar_factura_proveedor, 'La factura está duplicada', ['supplier_invoice_number']),\n ]\n\n _columns = {\n 'tipo_gasto': fields.selection((('compra', 'Compra/Bien'), ('servicio', 'Servicio'), ('importacion', 'Importación/Exportación'), ('combustible', 'Combustible'), ('mixto', 'Mixto')), 'Tipo de Gasto', required=True),\n 'pequenio_contribuyente': fields.boolean('Pequeño contribuyente'),\n 'numero_factura': fields.function(_numero_factura, type='char', method=True, string='Numero Factura'),\n }\n\n _defaults = {\n 'tipo_gasto': lambda *a: 'compra',\n }\n\nclass account_journal(osv.osv):\n _inherit = \"account.journal\"\n\n _columns = {\n 'direccion': fields.many2one('res.partner', 'Dirección'),\n }\n","sub_path":"account_invoice.py","file_name":"account_invoice.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"499982060","text":"import sys\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport scipy.stats as st\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\n\npd.set_option('max_columns', 1000)\npd.set_option('max_info_columns', 1000)\npd.set_option('expand_frame_repr', False)\npd.set_option('display.max_rows', 40000)\npd.set_option('max_colwidth', 4000)\npd.set_option('display.float_format', lambda x: '%.3f' % x)\n\n\ndef data_in():\n return pd.read_csv('/Users/thowe/Downloads/RawData.csv').\\\n assign(\n time=lambda x: pd.to_datetime(x['TransDateTime']),\n month=lambda x: x['time'].dt.month,\n day=lambda x: x['time'].dt.day,\n hour=lambda x: x['time'].dt.hour,\n minute=lambda x: x['time'].dt.minute,\n second=lambda x: x['time'].dt.second,\n dow=lambda x: x['time'].dt.weekday,\n weekend=lambda x: x['dow'].apply(lambda y: 1 if y in [5, 6] else 0)\n ).\\\n sort_values('time', ascending=True). \\\n assign(time_between=lambda x: (x['time'] - x['time'].shift()).dt.seconds)\n\n\ndef hist_plotter(df):\n fig = plt.figure(figsize=(12, 8))\n ax = fig.add_subplot(1, 1, 1)\n ax.hist(df['RespTime'], bins=100)\n plt.savefig('/Users/thowe/Downloads/hist.png')\n plt.show()\n\n\ndef confidence_intervals(df):\n print(np.mean(df['RespTime']))\n print(np.median(df['RespTime']))\n print(df['RespTime'].describe())\n print(st.norm.interval(0.975, loc=np.mean(df['RespTime']), scale=st.sem(df['RespTime'])))\n print(st.norm.interval(0.99, loc=np.mean(df['RespTime']), scale=st.sem(df['RespTime'])))\n\n\ndef median_ols(df):\n print(df.head())\n df_agg = df[['RespTime', 'minute', 'hour']].groupby(['minute', 'hour']).agg(['count', 'median']).\\\n reset_index(drop=False).\\\n assign(\n quarter_binary=lambda x: x['minute'].apply(lambda y: 1 if y in [0, 15, 30, 45] else 0),\n quarter_traffic_interaction=lambda x: x['RespTime']['count'] * x['quarter_binary'],\n outcome=lambda x: x['RespTime']['median'],\n traffic=lambda x: x['RespTime']['count'],\n intercept=1\n ) \\\n [['outcome', 'intercept', 'traffic', 'quarter_binary', 'quarter_traffic_interaction']]\n print(df_agg.head(10))\n print(df_agg.tail(10))\n\n model = sm.OLS(df_agg['outcome'], df_agg[['intercept', 'traffic', 'quarter_binary', 'quarter_traffic_interaction']])\n results = model.fit()\n print(results.summary())\n\n\ndef max_ols(df):\n print(df.head())\n df_agg = df[['RespTime', 'minute', 'hour']].groupby(['minute', 'hour']).agg(['count', 'max']).\\\n reset_index(drop=False).\\\n assign(\n quarter_binary=lambda x: x['minute'].apply(lambda y: 1 if y in [0, 15, 30, 45] else 0),\n quarter_traffic_interaction=lambda x: x['RespTime']['count'] * x['quarter_binary'],\n outcome=lambda x: x['RespTime']['max'],\n traffic=lambda x: x['RespTime']['count'],\n intercept=1\n ) \\\n [['outcome', 'intercept', 'traffic', 'quarter_binary', 'quarter_traffic_interaction']]\n print(df_agg.head(10))\n print(df_agg.tail(10))\n\n model = sm.OLS(df_agg['outcome'], df_agg[['intercept', 'traffic', 'quarter_binary', 'quarter_traffic_interaction']])\n results = model.fit()\n print(results.summary())\n\n\ndef diags(df):\n print(df.sort_values('RespTime', ascending=False).head(1000))\n\n # print(df[['RespTime', 'minute']].groupby('minute').agg(['count', 'median']))\n # print(df[['RespTime', 'hour']].groupby('hour').agg(['count', 'median']))\n # print(df[['RespTime', 'day']].groupby('day').agg(['count', 'median']))\n\n\n # pd.crosstab(index=df['hour'], columns=df['minute']).to_csv('/Users/thowe/Downloads/pivot_count.csv')\n # pd.pivot_table(df, values='RespTime', index='hour', columns='minute', aggfunc=np.max).to_csv('/Users/thowe/Downloads/pivot_max.csv')\n # pd.pivot_table(df, values='RespTime', index='hour', columns='minute', aggfunc=np.median).to_csv('/Users/thowe/Downloads/pivot.csv')\n\n# todo: correlation between count by hour of day and response time, median and max?\n# traffic causes increase in response times. (might it be the opposite?)\n# todo: there are regularities at 0, 15, 30, and 45.\n# todo: other spikes that are unpredicatable given this data. What drives these? What are the consequences?\n\n# todo: regress RespTime on and (0, 15, 30, 45)-binary\n# data: row representing hour and minute; columns (0, 15, 30, 45)-binary, count, interaction of the two, ...and outcome (1) median RespTime, and (2) max RespTime\n\n\n\n# need other descriptions of distributions.\n# how do I figure out if there is some regularity in quarter hour verses not extreme values. Crosstab...regression...what should I do?\n\n# what about other aspects of the distribution?\ndef agg_plotter(df, covar):\n sns.set_style(\"whitegrid\")\n fig = plt.figure(figsize=(20, 8))\n ax = fig.add_subplot(1, 1, 1)\n sns.barplot(x=covar, y='RespTime', data=df, ax=ax, estimator=np.max)\n # sns.barplot(x=covar, y='RespTime', data=df, ax=ax, estimator=np.median, ci=99)\n plt.show()\n\n # df_agg = df[['RespTime', covar]].\\\n # groupby(covar).median().\\\n # reset_index()\n #\n # sns.set_style(\"whitegrid\")\n # fig = plt.figure(figsize=(20, 8))\n # ax = fig.add_subplot(1, 1, 1)\n # sns.barplot(x=covar, y='RespTime', data=df_agg, ax=ax, ci='sd')\n # plt.show()\n\n\ndef time_series_overall_plotter(df):\n df = df.sort_values('time', ascending=True)\n print(df.head(10))\n\n sns.set_style(\"whitegrid\")\n fig = plt.figure(figsize=(20, 8))\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(df['time'], df['RespTime'])\n plt.savefig('/Users/thowe/Downloads/time_series_overall.png')\n plt.show()\n\n\ndef time_series_day_plotter(df):\n # df = df.query('(month == 6) and (day == 11) and (hour == 3)').sort_values('time', ascending=True)\n df = df.query('(month == 6) and (day == 1)').sort_values('time', ascending=True)\n print(df.head(10))\n\n sns.set_style(\"whitegrid\")\n fig = plt.figure(figsize=(20, 8))\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(df['time'], df['RespTime'])\n # plt.savefig('/Users/thowe/Downloads/time_series_day.png')\n plt.show()\n\ndef time_series_hour_plotter(df):\n # average time between\n # df = df.sort_values('time', ascending=True)\n # df = df.query('\"2017-06-01\" <= time < \"2017-06-30\"').sort_values('time', ascending=True)\n # df = df.query('\"2017-06-01\" <= time < \"2017-06-02\" and (hour == 21)').sort_values('time', ascending=True)\n # df = df.query('\"2017-07-24\" <= time <= \"2017-07-31\"').sort_values('time', ascending=True)\n # df = df.query('(month == 6)').sort_values('time', ascending=True)\n # df = df.query('(month == 6) and (day == 11)').sort_values('time', ascending=True)\n # df = df.query('(month == 6) and (day == 11) and (hour == 7)').sort_values('time', ascending=True)\n df = df.query('(month == 6) and (day == 1) and (hour == 9)').sort_values('time', ascending=True)\n print(df.head(10))\n\n sns.set_style(\"whitegrid\")\n fig = plt.figure(figsize=(20, 8))\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(df['time'], df['RespTime'])\n plt.savefig('/Users/thowe/Downloads/time_series_hour.png')\n plt.show()\n\ndef time_between(df_in):\n df = df_in.query('(month == 6) and (day == 6) and (hour == 9)')\n\n print(df[['minute', 'time_between']].groupby('minute').mean())\n\n sns.set_style(\"whitegrid\")\n fig = plt.figure(figsize=(20, 8))\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(df['minute'], df['time_between'])\n plt.show()\n\ndef count_per_minute(df_in):\n df = df_in.query('(month == 6) and (day == 6) and (hour == 9)')\n\n print(df[['RespTime', 'minute']].groupby('minute').count())\n\n sns.set_style(\"whitegrid\")\n fig = plt.figure(figsize=(20, 8))\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(df['minute'], df['RespTime'])\n plt.show()\n\ndef count_by_date_plotter(df):\n # todo: number doesn't change over time, but is seasonality weekly\n df = df.\\\n assign(date=lambda x: x['time'].dt.date).\\\n sort_values('time', ascending=True) \\\n [['time', 'date']].\\\n groupby('date').count().\\\n reset_index()\n print(df.head(10))\n # sys.exit()\n\n sns.set_style(\"whitegrid\")\n fig = plt.figure(figsize=(20, 8))\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(df['date'], df['time'])\n plt.show()\n\ndef time_between_plotter(df_in):\n # todo: the only abnormality looks like 6/11\n df = df_in.sort_values('time', ascending=True).\\\n query('(month == 6) and (day == 6) and (hour == 9)')\n print(df.head(10))\n # sys.exit()\n\n sns.set_style(\"whitegrid\")\n fig = plt.figure(figsize=(20, 8))\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(df['time'], df['time_between'])\n plt.show()\n\n print(df.query('\"2017-06-11\" <= time < \"2017-06-12\"').query('time_between > 100'))\n# I don't exactly know what to make of this.\n\n\n# todo: are some days worse? does it not happen some days?\n\ndef main():\n df = data_in()\n\n # hist_plotter(df)\n confidence_intervals(df)\n sys.exit()\n\n # time_series_overall_plotter(df)\n # time_series_day_plotter(df)\n # time_series_hour_plotter(df)\n\n # count_by_date_plotter(df) # use this?\n # time_between_plotter(df) # use this?\n # time_between(df) # use this?\n\n # median_ols(df)\n # max_ols(df)\n\n diags(df)\n\n # for covar in ['minute', 'day']:\n for covar in ['month', 'hour', 'minute', 'weekend', 'dow', 'day']:\n agg_plotter(df, covar)\n\n\n\nif __name__ == '__main__':\n main()\n\n# print(df.info())\n# print(df.head())\n# print(df.tail())\n# print(df.describe())\n\n# mean = np.mean(df['RespTime'])\n# median = np.median(df['RespTime'])\n# mad = np.mean(np.abs(median - df['RespTime']))\n# print(mean)\n# print(median)\n# print(mad)\n#\n#\n# fig = plt.figure(figsize=(12, 8))\n# ax = fig.add_subplot(1, 1, 1)\n# ax.hist(df['RespTime'], bins=25)\n# plt.show()\n\n\n\n\n","sub_path":"projects/nic/nic.py","file_name":"nic.py","file_ext":"py","file_size_in_byte":9929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"87254105","text":"\nimport random\n\nchoices = [\"rock\", 'paper', 'scissors']\n\n\n\n\ndef playainput():\n while True:\n playerinput = input(\"Input rock, paper, or scissors: \")\n if playerinput in choices:\n print (\"you chose\", playerinput)\n break\n\n\n\n print (\"oops try again: \")\n return playerinput\n \n \n\n\n\n\n\ndef choose(x):\n choice = random.choice(x)\n print (\"I chose %s\" %choice)\n return choice\n\nx = playainput()\ny = choose(choices)\n\n\noutcomes = {\n (\"rock\", \"rock\"): \"Tie!\",\n (\"rock\", \"paper\"): \"you lose\",\n (\"rock\", \"scissors\"): \"you win\",\n (\"paper\", \"rock\"): \"you win\",\n (\"paper\", \"paper\"): \"tie!\",\n (\"paper\", \"scissors\"): \"you lose\",\n (\"scissors\", \"rock\"): \"you lose\",\n (\"scissors\", \"paper\"): \"you win\",\n (\"scissors\", \"scissors\"): \"tie!\",\n}\nprint(outcomes[x, y])\n","sub_path":"rockpaperscissors.py","file_name":"rockpaperscissors.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"599262492","text":"import os\nimport csv\n\nclass CSVToSQL:\n def __init__(self, csvfile):\n self.csvfile = csvfile\n self.table_name = os.path.splitext(os.path.basename(self.csvfile))[0]\n self.cols = self.extract_column_names()\n \n def insert_records(self):\n \"\"\" Reads each row of data, returning a prepared statement and list of database input values for each row \"\"\"\n with open(self.csvfile) as f:\n reader = csv.reader(f)\n rowlen = len(next(reader, None)) # skip the headers and get the rowlength\n line = [(row) for row in reader]\n stmt = self.build_insert_statement(rowlen)\n return stmt, line\n \n def build_insert_statement(self, num_cols):\n \"\"\" Builds escaped SQLite insert statement \"\"\"\n from itertools import repeat\n params = ','.join(list(repeat('?', num_cols)))\n cols = ', '.join(self.cols)\n return f\"INSERT INTO {self.table_name} ({cols}) VALUES ({params})\"\n \n def create_table_sql(self):\n \"\"\" Wrapper around the 'build_create_table_string' method \"\"\"\n col_types = self.infer_column_dtypes()\n assert len(self.cols) == len(col_types)\n return self.build_create_table_string(col_types)\n \n \n def build_create_table_string(self, col_types):\n \"\"\" Makes the CREATE TABLE statement \"\"\"\n sql = f\"CREATE TABLE IF NOT EXISTS {self.table_name} (id INTEGER PRIMARY KEY AUTOINCREMENT,\"\n for col, dtype in zip(self.cols, col_types):\n sql += f\" {col} {dtype},\"\n sql = sql.rstrip(\",\")\n sql += \")\"\n return sql\n\n def extract_column_names(self):\n \"\"\" Gets the column names for creating the database table \"\"\"\n with open(self.csvfile) as f:\n reader = csv.reader(f)\n return [col.replace(\"%\",\"\").replace(\" \", \"_\") for col in next(reader)]\n \n def infer_column_dtypes(self):\n \"\"\" Uses data in the CSV columns to infer the datatype\n Limited to strings, integers and floats for now... \"\"\"\n dtypes = []\n python_sql_typemap = {str: \"VARCHAR\", int: \"INTEGER\", float: \"FLOAT\"} # extend for datetimes, etc\n dtype_test_order = [int, float, str]\n with open(self.csvfile) as f:\n reader = csv.DictReader(f)\n for header, value in next(reader).items():\n for dtype in dtype_test_order:\n try:\n dtype(value) # Test this works without error: if so, valid dtype\n dtypes.append(dtype)\n break\n except ValueError:\n continue\n \n return [python_sql_typemap[d] for d in dtypes]\n ","sub_path":"main/csvparser.py","file_name":"csvparser.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"52518943","text":"from django import forms\nfrom note.models import Note\n\nclass NoteForm(forms.ModelForm):\n anonymous = forms.BooleanField(required=False)\n class Meta:\n model = Note\n fields = ('title', 'body')\n \n def __init__(self, instance=None, initial=None, **kwargs):\n if instance is not None and instance.author is None:\n if initial is None:\n initial = {}\n initial['anonymous'] = True\n \n super(forms.ModelForm, self).__init__(instance=instance, initial=initial, **kwargs)\n","sub_path":"note/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"355017454","text":"import netCDF4 as nc\nimport os\nimport shutil\n\nfrom TimeStepping import TimeStepping\n\nfrom Grid import Grid, Zmin, Zmax, Center, Node\nimport numpy as np\n\nclass NetCDFIO_Stats:\n def __init__(self, namelist, paramlist, grid, root_dir):\n self.root_grp = None\n self.profiles_grp = None\n self.ts_grp = None\n\n self.last_output_time = 0.0\n self.uuid = str(namelist['meta']['uuid'])\n\n self.frequency = namelist['stats_io']['frequency']\n\n # Setup the statistics output path\n p = []\n p.append(namelist['output']['output_root'])\n p.append('Output.')\n p.append(namelist['meta']['simname'])\n p.append('.')\n p.append(self.uuid[len(self.uuid )-5:len(self.uuid)])\n self.outpath = str(os.path.join(root_dir, ''.join(p))) + os.sep\n self.figpath = str(os.path.join(root_dir, ''.join(p))) + os.sep + 'figs'+os.sep\n\n os.makedirs(self.outpath, exist_ok=True)\n os.makedirs(self.figpath, exist_ok=True)\n\n self.stats_path = str(os.path.join(self.outpath, namelist['stats_io']['stats_dir']))\n\n os.makedirs(self.stats_path, exist_ok=True)\n\n self.path_plus_file = str( self.stats_path + '/' + 'Stats.' + namelist['meta']['simname'] + '.nc')\n if os.path.exists(self.path_plus_file):\n for i in range(100):\n res_name = 'Restart_'+str(i)\n print(\"Here \" + res_name)\n if os.path.exists(self.path_plus_file):\n self.path_plus_file = str( self.stats_path + '/' + 'Stats.' + namelist['meta']['simname']\n + '.' + res_name + '.nc')\n else:\n break\n\n shutil.move(os.path.join( './', namelist['meta']['simname'] + '.in'),\n os.path.join( self.outpath, namelist['meta']['simname'] + '.in'))\n\n shutil.move(os.path.join( './paramlist_'+paramlist['meta']['casename']+ '.in'),\n os.path.join( self.outpath, 'paramlist_'+paramlist['meta']['casename']+ '.in'))\n self.setup_stats_file(grid)\n return\n\n def open_files(self):\n self.root_grp = nc.Dataset(self.path_plus_file, 'r+', format='NETCDF4')\n self.profiles_grp = self.root_grp.groups['profiles']\n self.ts_grp = self.root_grp.groups['timeseries']\n return\n\n def close_files(self):\n self.root_grp.close()\n return\n\n def setup_stats_file(self, grid):\n k_b_1 = grid.boundary(Zmin())\n k_b_2 = grid.boundary(Zmax())\n k_1 = k_b_1\n k_2 = k_b_2\n # k_1 = grid.first_interior(Zmin()) # IO assumes full and half fields are equal sizes\n # k_2 = grid.first_interior(Zmax()) # IO assumes full and half fields are equal sizes\n\n root_grp = nc.Dataset(self.path_plus_file, 'w', format='NETCDF4')\n\n # Set profile dimensions\n profile_grp = root_grp.createGroup('profiles')\n profile_grp.createDimension('z', grid.nz)\n profile_grp.createDimension('t', None)\n z = profile_grp.createVariable('z', 'f8', ('z'))\n z[:] = np.array(grid.z[k_b_1:k_b_2])\n z_half = profile_grp.createVariable('z_half', 'f8', ('z'))\n z_half[:] = np.array(grid.z_half[k_1:k_2])\n profile_grp.createVariable('t', 'f8', ('t'))\n del z\n del z_half\n\n reference_grp = root_grp.createGroup('reference')\n reference_grp.createDimension('z', grid.nz)\n z = reference_grp.createVariable('z', 'f8', ('z'))\n z[:] = np.array(grid.z[k_b_1:k_b_2])\n z_half = reference_grp.createVariable('z_half', 'f8', ('z'))\n z_half[:] = np.array(grid.z_half[k_1:k_2])\n del z\n del z_half\n\n ts_grp = root_grp.createGroup('timeseries')\n ts_grp.createDimension('t', None)\n ts_grp.createVariable('t', 'f8', ('t'))\n\n root_grp.close()\n return\n\n def add_profile(self, var_name):\n\n root_grp = nc.Dataset(self.path_plus_file, 'r+', format='NETCDF4')\n profile_grp = root_grp.groups['profiles']\n new_var = profile_grp.createVariable(var_name, 'f8', ('t', 'z'))\n\n root_grp.close()\n\n return\n\n def add_reference_profile(self, var_name):\n root_grp = nc.Dataset(self.path_plus_file, 'r+', format='NETCDF4')\n reference_grp = root_grp.groups['reference']\n new_var = reference_grp.createVariable(var_name, 'f8', ('z',))\n root_grp.close()\n\n return\n\n def add_ts(self, var_name):\n\n root_grp = nc.Dataset(self.path_plus_file, 'r+', format='NETCDF4')\n ts_grp = root_grp.groups['timeseries']\n new_var = ts_grp.createVariable(var_name, 'f8', ('t',))\n\n root_grp.close()\n return\n\n def write_profile(self, var_name, data):\n var = self.profiles_grp.variables[var_name]\n var[-1, :] = np.array(data)\n return\n\n def write_profile_new(self, var_name, grid, data):\n var = self.profiles_grp.variables[var_name]\n k_1 = grid.boundary(Zmin())\n k_2 = grid.boundary(Zmax())\n var[-1, :] = np.array(data[k_1:k_2])\n return\n\n def write_reference_profile(self, var_name, data):\n '''\n Writes a profile to the reference group NetCDF Stats file. The variable must have already been\n added to the NetCDF file using add_reference_profile\n :param var_name: name of variables\n :param data: data to be written to file\n :return:\n '''\n\n root_grp = nc.Dataset(self.path_plus_file, 'r+', format='NETCDF4')\n reference_grp = root_grp.groups['reference']\n var = reference_grp.variables[var_name]\n var[:] = np.array(data)\n root_grp.close()\n return\n\n def write_ts(self, var_name, data):\n var = self.ts_grp.variables[var_name]\n var[-1] = data\n return\n\n def write_simulation_time(self, t):\n # Write to profiles group\n profile_t = self.profiles_grp.variables['t']\n profile_t[profile_t.shape[0]] = t\n\n # Write to timeseries group\n ts_t = self.ts_grp.variables['t']\n ts_t[ts_t.shape[0]] = t\n\n return\n\n","sub_path":"src/NetCDFIO.py","file_name":"NetCDFIO.py","file_ext":"py","file_size_in_byte":6111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"26280085","text":"from skills.skill import Skill, Ability\n\n\nclass Strength(Skill):\n\n def __init__(self, experience: int):\n super().__init__(\n 'Strength',\n experience, [\n Ability('Beginner', 'You are beginning to learn to damage', 5),\n Ability('Intermediate', 'You are good at dealing damage', 10),\n Ability('Pro', 'You are pro at dealing damage', 20),\n ])\n","sub_path":"dragex/skills/strength.py","file_name":"strength.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"28896454","text":"import uuid\n\nimport falcon\n\nfrom api.helpers import validate_schema\nfrom api.items.logic import (\n get_next_position,\n remove_item_artifacts\n)\nfrom api.items.schemas import (\n item_create_schema,\n item_save_schema,\n item_delete_schema,\n switch_state_schema\n)\nfrom api.items_sets.logic import (\n get_item_set,\n get_project_by_set_key,\n get_project_set,\n get_item_set_by_id\n)\nfrom api.logic import validate_auth\nfrom api.permissions import can_edit_set, can_read_set\nfrom api.item_states.logic import get_states_workflow\nfrom api.items_review.logic import start_item_review\nfrom db.models import Item, ItemType, ItemState, ItemStatesTransition, User\nfrom decorators import with_db_session\n\n\ndef item_as_dict(item):\n return {\n 'summary': item.summary,\n 'description': item.description,\n 'key': item.key,\n 'history': [],\n 'version': item.version,\n 'position': item.position,\n 'is_header': item.is_header\n }\n\n\n@falcon.before(validate_auth)\nclass ItemsResource:\n @staticmethod\n def fill_requirement(item_db, user_db, state_db):\n item = item_db.as_dict()\n item.update({\n 'state': state_db.as_dict(),\n 'creator': user_db.as_dict()\n })\n return item\n\n @with_db_session\n def handle_get_items(self, set_key, user, db_session=None):\n\n set_db = get_item_set(db_session, set_key, True)\n if can_read_set(db_session, set_db.id, user) is False:\n raise falcon.HTTPForbidden()\n\n workflow = get_states_workflow(db_session, set_db.workflow_id)\n items_db = db_session.query(Item, User, ItemState).filter(\n Item.set_id == set_db.id,\n ).filter(\n User.id == Item.creator_id\n ).filter(\n ItemState.id == Item.state_id\n ).order_by(Item.position.asc()).all()\n\n return {\n 'workflow': workflow,\n 'items': [self.fill_requirement(*i) for i in items_db]\n }\n\n def on_get(self, req, resp, item_key=None):\n user = req.context['user']\n\n if item_key is None:\n params = falcon.uri.parse_query_string(req.query_string)\n project_key = params.get('project_key', None)\n set_key = params.get('set_key', None)\n\n if set_key is None or project_key is None:\n resp.body = []\n else:\n resp.body = self.handle_get_items(\n set_key=set_key,\n user=user\n )\n else:\n resp.body = 'not realised yet'\n\n @staticmethod\n @with_db_session\n def handle_create_post(\n user, set_key, summary, description, db_session=None\n ):\n set_db = get_item_set(db_session, set_key, True)\n\n if can_edit_set(db_session, set_db.id, user) is False:\n raise falcon.HTTPForbidden()\n\n project_db = get_project_by_set_key(db_session, set_key, True)\n\n item_position = get_next_position(db_session, set_db.id)\n item_type_db = db_session.query(ItemType).filter(\n ItemType.id == set_db.item_type_id\n ).one()\n\n state_db = db_session.query(ItemState).filter(\n ItemState.workflow_id == set_db.workflow_id,\n ItemState.start.is_(True)\n ).one()\n\n new_item = Item(\n key=uuid.uuid4().hex,\n position=item_position,\n summary=summary,\n description=description,\n item_type_id=item_type_db.id,\n state_id=state_db.id\n )\n new_item.creator_id = user.id\n new_item.set_id = set_db.id\n db_session.add(new_item)\n db_session.flush()\n new_item.key = '{}-{}-{}'.format(project_db.key, item_type_db.key, new_item.id).upper()\n db_session.commit()\n result = new_item.as_dict()\n result.update({\n 'state': state_db.as_dict()\n })\n return result\n\n @staticmethod\n @with_db_session\n def handle_save_post(\n user, key, summary, description, db_session=None\n ):\n item_db = db_session.query(Item).filter(Item.key == key).one_or_none()\n if item_db is None:\n raise falcon.HTTPNoteFound(\n 'Item with key {} not found.'.format(key)\n )\n\n state_db = db_session.query(ItemState).filter(\n ItemState.id == item_db.state_id\n ).one()\n\n if state_db.conditions.get('disable_edit', False):\n raise falcon.HTTPConflict('Item edit disabled')\n\n if can_edit_set(db_session, item_db.set_id, user) is False:\n raise falcon.HTTPForbidden()\n\n item_db.summary = summary\n item_db.description = description\n\n db_session.commit()\n\n item = item_db.as_dict()\n if state_db.conditions.get('change_request', False):\n item.update({\n 'debug_info': 'Mark all children as suspected',\n })\n return item\n\n @staticmethod\n @with_db_session\n def handle_delete_post(\n user, key, db_session=None\n ):\n item_query = db_session.query(Item).filter(Item.key == key)\n item_db = item_query.one_or_none()\n if item_db is None:\n raise falcon.HTTPNoteFound(\n 'Item with key {} not found.'.format(key)\n )\n\n if can_edit_set(db_session, item_db.set_id, user) is False:\n raise falcon.HTTPForbidden()\n\n remove_item_artifacts(db_session, item_db.id)\n key = item_db.key\n db_session.commit()\n return {\n 'key': key\n }\n\n @staticmethod\n @with_db_session\n def handle_switch_state_post(\n user, key, transition_id, db_session=None\n ):\n item_query = db_session.query(Item).filter(Item.key == key)\n item_db = item_query.one_or_none()\n if item_db is None:\n raise falcon.HTTPNotFound(\n 'Item with key {} not found.'.format(key)\n )\n\n if can_edit_set(db_session, item_db.set_id, user) is False:\n raise falcon.HTTPForbidden()\n\n set_db = get_item_set_by_id(db_session, item_db.set_id, True)\n transition_db = db_session.query(ItemStatesTransition).filter(\n ItemStatesTransition.workflow_id == set_db.workflow_id,\n ItemStatesTransition.id == transition_id\n ).one_or_none()\n\n if transition_db is None:\n raise falcon.HTTPNotFound(\n 'Transition with id {} not found.'.format(transition_id)\n )\n\n state_db = db_session.query(ItemState).filter(\n ItemState.workflow_id == set_db.workflow_id,\n ItemState.id == transition_db.end_state_id\n ).one_or_none()\n\n if transition_db is None:\n raise falcon.HTTPInternalError(\n 'State with id {} not found.'.format(ItemStatesTransition.end_state_id)\n )\n\n if state_db.conditions.get('need_review', False):\n start_item_review(db_session, item_db.id)\n\n item_db.state_id = state_db.id\n db_session.commit()\n return {\n 'key': key,\n 'state': state_db.as_dict()\n }\n\n def on_post(self, req, resp):\n body = req.context['body']\n user = req.context['user']\n action = body.get('action', None)\n\n if action == 'create':\n validate_schema(body, item_create_schema)\n resp.body = self.handle_create_post(\n user=user,\n set_key=body.get('set_key'),\n summary=body.get('summary'),\n description=body.get('description', None)\n )\n elif action == 'update':\n validate_schema(body, item_save_schema)\n resp.body = self.handle_save_post(\n user=user,\n key=body.get('key'),\n summary=body.get('summary', ''),\n description=body.get('description', '')\n )\n elif action == 'delete':\n validate_schema(body, item_delete_schema)\n resp.body = self.handle_delete_post(\n user=user,\n key=body['key'],\n )\n elif action == 'switch_state':\n validate_schema(body, switch_state_schema)\n resp.body = self.handle_switch_state_post(\n user=user,\n key=body['key'],\n transition_id=body['transition_id'],\n )\n else:\n raise falcon.HTTPBadRequest(title='Wrong action {}'.format(action))","sub_path":"src/api/items/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":8550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"341197697","text":"# -*- coding: utf-8 -*-\n# 80\n\nfrom sys import argv\n\ntry:\n\tfrom argparse import ArgumentParser\nexcept ImportError:\n\terrwrite(argv[0] + \": argparse module not found!\\n\")\n\nclass CommandLineParser:\n\tclass __clpI(ArgumentParser):\n\t\tdef __init__(self, args):\n\t\t\tif args is None:\n\t\t\t\targs = argv[1:]\n\n\t\t\tself.args = args\n\t\t\tself.namespace = None\n\n\t\t\tArgumentParser.__init__(self, description='Basic overlay services')\n\n\t\t\tArgumentParser.add_argument(self, '-v', dest='verbose',\n\t\t\t\t\t\t\t\t\t\taction='store_true',\n\t\t\t\t\t\t\t\t\t\tdefault=False,\n\t\t\t\t\t\t\t\t\t\thelp='Turn verbose mode on')\n\n\t\t\tArgumentParser.add_argument(self, '-c', '--connectTo',\n\t\t\t\t\t\t\t\t\t\tdest='mhost', nargs='+',\n\t\t\t\t\t\t\t\t\t\tdefault=[ ], metavar='PEER',\n\t\t\t\t\t\t\t\t\t\thelp='Already running peers to connect')\n\n\t\t\tArgumentParser.add_argument(self, '-l', '--listePort',\n\t\t\t\t\t\t\t\t\t\tdest='lport', type=int,\n\t\t\t\t\t\t\t\t\t\tnargs='+', default=[ 8001 ],\n\t\t\t\t\t\t\t\t\t\thelp='Ports to listen for connection')\n\n\t\t\tArgumentParser.add_argument(self, 'PeerName',\n\t\t\t\t\t\t\t\t\t\thelp='The name of this peer')\n\n\t\tdef parse_args(self):\n\t\t\tself.namespace = ArgumentParser.parse_args(self, self.args)\n\n\t\tdef getArguments(self):\n\t\t\treturn self.namespace\n\n\t__instance = None\n\n\tdef __init__(self, args=None):\n\t\tif CommandLineParser.__instance is None:\n\t\t\tCommandLineParser.__instance = CommandLineParser.__clpI(args)\n\n\t\tself.__dict__['_CommandLineParser__instance'] =\\\n\t\t\t\t\t\t\t\t\t\t\t\t\tCommandLineParser.__instance\n\n\tdef __getattr__(self, attr):\n\t\treturn getattr(self.__instance, attr)\n\n\tdef __setattr__(self, attr, value):\n\t\treturn setattr(self.__instance, attr, value)\n\nif __name__ == '__main__':\n\tfrom sys import argv\n\n\tap = CommandLineParser(argv[1:])\n\tap.parse_args()\n","sub_path":"old/commandLineParser.py","file_name":"commandLineParser.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"239128569","text":"if userInput.lower() == \"s\":\n\n while True:\n respuesta = input(\"Respecto a: (1)Interfaz? (2)Metodos? (3)Funciones? (4)misceláneos (5)Volver? \")\n\n if respuesta == \"1\":\n while True:\n DetalleRespuesta = input(\"para interfaz tenemos (1)Teclado y (2)Mouse,*(3)Volver*, Cual de ellos le iteresa?: \")\n\n if DetalleRespuesta == \"1\":\n print(\"Teclado\")\n\n elif DetalleRespuesta == \"2\": \n print(\"Mouse\")\n\n elif DetalleRespuesta == \"3\":\n break # Salimos de insterfaz\n\n else:\n print(\"Opción inválida\")\n\n elif respuesta == \"2\":\n print(\"Métodos\")\n\n elif respuesta == \"3\":\n print(\"Funciones\")\n\n elif respuesta == \"4\":\n print(\"Misceláneos\")\n\n elif respuesta == \"5\":\n break # Salimos de la opción \"s\"\n\n else:\n print(\"Opción inválida\")","sub_path":"PRACTICA 1/prueba.py","file_name":"prueba.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"257487836","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/odin/bay/distributions/zero_inflated.py\n# Compiled at: 2019-09-17 08:51:50\n# Size of source mod 2**32: 11720 bytes\n\"\"\"The ZeroInflated distribution class.\"\"\"\nfrom __future__ import absolute_import, division, print_function\nimport tensorflow as tf\nfrom tensorflow_probability.python.distributions import Bernoulli, Independent, distribution\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow_probability.python.util.seed_stream import SeedStream\n__all__ = [\n 'ZeroInflated']\n\ndef _broadcast_rate(probs, *others):\n others = list(others)\n others_ndims = [o.shape.ndims for o in others]\n assert len(set(others_ndims)) == 1\n others_ndims = others_ndims[0]\n probs_ndims = probs.shape.ndims\n if others_ndims < probs_ndims:\n for i in range(probs_ndims - others_ndims):\n others = [tf.expand_dims(o, -1) for o in others]\n\n else:\n if others_ndims > probs_ndims:\n for i in range(others_ndims - probs_ndims):\n probs = tf.expand_dims(probs, -1)\n\n return [\n probs] + others\n\n\nclass ZeroInflated(distribution.Distribution):\n __doc__ = 'zero-inflated distribution.\\n\\n The `zero-inflated` object implements batched zero-inflated distributions.\\n The zero-inflated model is defined by a zero-inflation rate\\n and a python list of `Distribution` objects.\\n\\n Methods supported include `log_prob`, `prob`, `mean`, `sample`, and\\n `entropy_lower_bound`.\\n '\n\n def __init__(self, count_distribution, inflated_distribution=None, logits=None, probs=None, validate_args=False, allow_nan_stats=True, name='ZeroInflated'):\n \"\"\"Initialize a zero-inflated distribution.\n\n A `ZeroInflated` is defined by a zero-inflation rate (`inflated_distribution`,\n representing the probabilities of excess zeros) and a `Distribution` object\n having matching dtype, batch shape, event shape, and continuity\n properties (the dist).\n\n Parameters\n ----------\n count_distribution : A `tfp.distributions.Distribution` instance.\n The instance must have `batch_shape` matching the zero-inflation\n distribution.\n\n inflated_distribution: `tfp.distributions.Bernoulli`-like instance.\n Manages the probability of excess zeros, the zero-inflated rate.\n Must have either scalar `batch_shape` or `batch_shape` matching\n `count_distribution.batch_shape`.\n\n logits: An N-D `Tensor` representing the log-odds of a excess zeros\n A zero-inflation rate, where the probability of excess zeros is\n sigmoid(logits).\n Only one of `logits` or `probs` should be passed in.\n\n probs: An N-D `Tensor` representing the probability of a zero event.\n Each entry in the `Tensor` parameterizes an independent\n ZeroInflated distribution.\n Only one of `logits` or `probs` should be passed in.\n\n validate_args: Python `bool`, default `False`. If `True`, raise a runtime\n error if batch or event ranks are inconsistent between pi and any of\n the distributions. This is only checked if the ranks cannot be\n determined statically at graph construction time.\n\n allow_nan_stats: Boolean, default `True`. If `False`, raise an\n exception if a statistic (e.g. mean/mode/etc...) is undefined for any\n batch member. If `True`, batch members with valid parameters leading to\n undefined statistics will return NaN for this statistic.\n\n name: A name for this distribution (optional).\n\n References\n ----------\n Liu, L. & Blei, D.M.. (2017). Zero-Inflated Exponential Family Embeddings.\n Proceedings of the 34th International Conference on Machine Learning,\n in PMLR 70:2140-2148\n\n \"\"\"\n parameters = dict(locals())\n self._runtime_assertions = []\n with tf.compat.v1.name_scope(name) as (name):\n if not isinstance(count_distribution, distribution.Distribution):\n raise TypeError('count_distribution must be a Distribution instance but saw: %s' % count_distribution)\n else:\n self._count_distribution = count_distribution\n if inflated_distribution is None:\n inflated_distribution = Bernoulli(logits=logits, probs=probs,\n dtype=(tf.int32),\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n name='ZeroInflatedRate')\n elif not isinstance(inflated_distribution, distribution.Distribution):\n raise TypeError('inflated_distribution must be a Distribution instance but saw: %s' % inflated_distribution)\n else:\n self._inflated_distribution = inflated_distribution\n if self._count_distribution.batch_shape.ndims is None:\n raise ValueError('Expected to know rank(batch_shape) from count_disttribution')\n if self._inflated_distribution.batch_shape.ndims is None:\n raise ValueError('Expected to know rank(batch_shape) from inflated_distribution')\n inflated_batch_ndims = self._inflated_distribution.batch_shape.ndims\n count_batch_ndims = self._count_distribution.batch_shape.ndims\n if count_batch_ndims < inflated_batch_ndims:\n self._inflated_distribution = Independent((self._inflated_distribution),\n reinterpreted_batch_ndims=(inflated_batch_ndims - count_batch_ndims),\n name='ZeroInflatedRate')\n elif count_batch_ndims > inflated_batch_ndims:\n raise ValueError('count_distribution has %d-D batch_shape, which smallerthan %d-D batch_shape of inflated_distribution' % (\n count_batch_ndims, inflated_batch_ndims))\n if validate_args:\n self._runtime_assertions.append(tf.assert_equal((self._count_distribution.batch_shape_tensor()),\n (self._inflated_distribution.batch_shape_tensor()),\n message='dist batch shape must match logits|probs batch shape'))\n reparameterization_type = [\n self._count_distribution.reparameterization_type,\n self._inflated_distribution.reparameterization_type]\n if any(i == reparameterization.NOT_REPARAMETERIZED for i in reparameterization_type):\n reparameterization_type = reparameterization.NOT_REPARAMETERIZED\n else:\n reparameterization_type = reparameterization.FULLY_REPARAMETERIZED\n super(ZeroInflated, self).__init__(dtype=(self._count_distribution.dtype), reparameterization_type=reparameterization_type,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n graph_parents=(self._count_distribution._graph_parents + self._inflated_distribution._graph_parents),\n name=name)\n\n @property\n def logits(self):\n \"\"\"Log-odds of a `1` outcome (vs `0`).\"\"\"\n if isinstance(self._inflated_distribution, Independent):\n return self._inflated_distribution.distribution.logits_parameter()\n else:\n return self._inflated_distribution.logits_parameter()\n\n @property\n def probs(self):\n \"\"\"Probability of a `1` outcome (vs `0`).\"\"\"\n if isinstance(self._inflated_distribution, Independent):\n return self._inflated_distribution.distribution.probs_parameter()\n else:\n return self._inflated_distribution.probs_parameter()\n\n @property\n def count_distribution(self):\n return self._count_distribution\n\n @property\n def inflated_distribution(self):\n return self._inflated_distribution\n\n def _batch_shape_tensor(self):\n return self._count_distribution._batch_shape_tensor()\n\n def _batch_shape(self):\n return self._count_distribution._batch_shape()\n\n def _event_shape_tensor(self):\n return self._count_distribution._event_shape_tensor()\n\n def _event_shape(self):\n return self._count_distribution._event_shape()\n\n def _mean(self):\n with tf.compat.v1.control_dependencies(self._runtime_assertions):\n probs, d_mean = _broadcast_rate(self.probs, self._count_distribution.mean())\n return (1 - probs) * d_mean\n\n def _variance(self):\n \"\"\"\n (1 - pi) * (d.var + d.mean^2) - [(1 - pi) * d.mean]^2\n\n Note: mean(ZeroInflated) = (1 - pi) * d.mean\n where:\n - pi is zero-inflated rate\n - d is count distribution\n \"\"\"\n with tf.compat.v1.control_dependencies(self._runtime_assertions):\n d = self._count_distribution\n probs, d_mean, d_variance = _broadcast_rate(self.probs, d.mean(), d.variance())\n return (1 - probs) * (d_variance + tf.square(d_mean)) - tf.math.square(self._mean())\n\n def _log_prob(self, x):\n with tf.compat.v1.control_dependencies(self._runtime_assertions):\n eps = tf.cast(1e-08, x.dtype)\n x = tf.convert_to_tensor(x, name='x')\n d = self._count_distribution\n pi = self.probs\n log_prob = d.log_prob(x)\n prob = tf.math.exp(log_prob)\n pi, prob, log_prob = _broadcast_rate(pi, prob, log_prob)\n y_0 = tf.math.log(pi + (1 - pi) * prob)\n y_1 = tf.math.log(1 - pi) + log_prob\n return tf.where(x <= eps, y_0, y_1)\n\n def _prob(self, x):\n return tf.math.exp(self._log_prob(x))\n\n def _sample_n(self, n, seed):\n with tf.compat.v1.control_dependencies(self._runtime_assertions):\n seed = SeedStream(seed, salt='ZeroInflated')\n mask = self.inflated_distribution.sample(n, seed())\n samples = self.count_distribution.sample(n, seed())\n mask, samples = _broadcast_rate(mask, samples)\n return samples * tf.cast(1 - mask, samples.dtype)\n\n def denoised_mean(self):\n return self.count_distribution.mean()\n\n def denoised_variance(self):\n return self.count_distribution.variance()","sub_path":"pycfiles/odin_ai-1.2.0-py3.6/zero_inflated.cpython-36.py","file_name":"zero_inflated.cpython-36.py","file_ext":"py","file_size_in_byte":10269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"568158683","text":"import ctypes\n\ndef BiDArray_to_PointerPointer(array,dim,type='double'):\n\n\tbidimensionalArray=tuple(tuple(i) for i in array)\n\tprint(bidimensionalArray)\n\n\tif type=='double':\n\n\t\tTypeArray=ctypes.c_double*dim\n\t\tTypeArrayArray=TypeArray*dim\n\t\tTypePointer=ctypes.POINTER(ctypes.c_double)\n\t\tTypePointerPointer=ctypes.POINTER(TypePointer)\n\t\tTypePointerArray=TypePointer*dim\n\n\tif type=='int':\n\n\t\tTypeArray=ctypes.c_int*dim\n\t\tTypeArrayArray=TypeArray*dim\n\t\tTypePointer=ctypes.POINTER(ctypes.c_int)\n\t\tTypePointerPointer=ctypes.POINTER(TypePointer)\n\t\tTypePointerArray=TypePointer*dim\n\n\tbidimensionalArray=TypeArrayArray(*bidimensionalArray)\n\tbidimensionalArray_2=TypePointerArray(*(ctypes.cast(i,TypePointer) for i in bidimensionalArray))\n\tbidimensionalArray_2=ctypes.cast(bidimensionalArray_2,TypePointerPointer)\n\n\treturn bidimensionalArray_2\n\n\n","sub_path":"Estudo/Metodos numericos/MultiDArray_to_PointerPointer.py","file_name":"MultiDArray_to_PointerPointer.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"509432168","text":"from Interfaces import AzureOMSInterface, SqlInterface, GraylogInterface, PRTGInterface, FileInterface, \\\n AzureTableInterface, AzureBlobInterface\nimport AuditLogSubscriber\nimport ApiConnection\nimport os\nimport sys\nimport yaml\nimport time\nimport json\nimport logging\nimport datetime\nimport argparse\nimport collections\nimport threading\n\n\nclass AuditLogCollector(ApiConnection.ApiConnection):\n\n def __init__(self, content_types=None, resume=True, fallback_time=None, skip_known_logs=True,\n log_path='collector.log', debug=False, auto_subscribe=False, max_threads=20, retries=3,\n retry_cooldown=3, file_output=False, sql_output=False, graylog_output=False, azure_table_output=False,\n azure_blob_output=False, azure_oms_output=False, prtg_output=False, **kwargs):\n \"\"\"\n Object that can retrieve all available content blobs for a list of content types and then retrieve those\n blobs and output them to a file or Graylog input (i.e. send over a socket).\n :param content_types: list of content types to retrieve (e.g. 'Audit.Exchange', 'Audit.Sharepoint')\n :param resume: Resume from last known run time for each content type (Bool)\n :param fallback_time: if no last run times are found to resume from, run from this start time (Datetime)\n :param retries: Times to retry retrieving a content blob if it fails (int)\n :param retry_cooldown: Seconds to wait before retrying retrieving a content blob (int)\n :param skip_known_logs: record retrieved content blobs and log ids, skip them next time (Bool)\n :param file_output: path of file to output audit logs to (str)\n :param log_path: path of file to log to (str)\n :param debug: enable debug logging (Bool)\n :param auto_subscribe: automatically subscribe to audit log feeds for which content is retrieved (Bool)\n :param graylog_output: Enable graylog Interface (Bool)\n :param azure_oms_output: Enable Azure workspace analytics OMS Interface (Bool)\n :param prtg_output: Enable PRTG output (Bool)\n \"\"\"\n super().__init__(**kwargs)\n self.content_types = content_types or collections.deque()\n self.resume = resume\n self._fallback_time = fallback_time or datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(\n hours=23)\n self.retries = retries\n self.retry_cooldown = retry_cooldown\n self.max_threads = max_threads\n self.skip_known_logs = skip_known_logs\n self.log_path = log_path\n self.debug = debug\n self.auto_subscribe = auto_subscribe\n self.filters = {}\n\n self.file_output = file_output\n self.file_interface = FileInterface.FileInterface(**kwargs)\n self.azure_table_output = azure_table_output\n self.azure_table_interface = AzureTableInterface.AzureTableInterface(**kwargs)\n self.azure_blob_output = azure_blob_output\n self.azure_blob_interface = AzureBlobInterface.AzureBlobInterface(**kwargs)\n self.azure_oms_output = azure_oms_output\n self.azure_oms_interface = AzureOMSInterface.AzureOMSInterface(**kwargs)\n self.sql_output = sql_output\n self.sql_interface = SqlInterface.SqlInterface(**kwargs)\n self.graylog_output = graylog_output\n self.graylog_interface = GraylogInterface.GraylogInterface(**kwargs)\n self.prtg_output = prtg_output\n self.prtg_interface = PRTGInterface.PRTGInterface(**kwargs)\n\n self._last_run_times = {}\n self._known_content = {}\n self._known_logs = {}\n\n self.blobs_to_collect = collections.defaultdict(collections.deque)\n self.monitor_thread = threading.Thread()\n self.retrieve_available_content_threads = collections.deque()\n self.retrieve_content_threads = collections.deque()\n self.run_started = None\n self.logs_retrieved = 0\n self.errors_retrieving = 0\n\n @property\n def all_interfaces(self):\n\n return {self.file_interface: self.file_output, self.azure_table_interface: self.azure_table_output,\n self.azure_blob_interface: self.azure_blob_output, self.azure_oms_interface: self.azure_oms_output,\n self.sql_interface: self.sql_output, self.graylog_interface: self.graylog_output,\n self.prtg_interface: self.prtg_output}\n\n @property\n def all_enabled_interfaces(self):\n\n return [interface for interface, enabled in self.all_interfaces.items() if enabled]\n\n @property\n def all_content_types(self):\n \"\"\"\n :return: list of str\n \"\"\"\n return ['Audit.General', 'Audit.AzureActiveDirectory', 'Audit.Exchange', 'Audit.SharePoint', 'DLP.All']\n\n def load_config(self, path):\n \"\"\"\n Load a YML config containing settings for this collector and its' interfaces.\n :param path: str\n \"\"\"\n with open(path, 'r') as ofile:\n config = yaml.safe_load(ofile)\n self._load_log_config(config=config)\n self._load_collect_config(config=config)\n self._load_filter_config(config=config)\n self._load_output_config(config=config)\n\n def _load_log_config(self, config):\n \"\"\"\n :param config: str\n \"\"\"\n az_logger = logging.getLogger(\"azure.core.pipeline.policies.http_logging_policy\")\n az_logger.setLevel(logging.WARNING)\n if 'log' in config['collect']:\n if 'path' in config['collect']['log']:\n self.log_path = config['collect']['log']['path']\n if 'debug' in config['collect']['log']:\n self.debug = config['collect']['log']['path']\n\n def _load_collect_config(self, config):\n \"\"\"\n :param config: str\n \"\"\"\n if 'collect' in config:\n if 'contentTypes' in config['collect']:\n self.content_types = [\n x for x in self.all_content_types if x in config['collect']['contentTypes'] and\n config['collect']['contentTypes'][x] is True]\n if 'maxThreads' in config['collect']:\n self.max_threads = config['collect']['maxThreads']\n if 'retries' in config['collect']:\n self.retries = config['collect']['retries']\n if 'retryCooldown' in config['collect']:\n self.retry_cooldown = config['collect']['retryCooldown']\n if 'autoSubscribe' in config['collect']:\n self.auto_subscribe = config['collect']['autoSubscribe']\n if 'skipKnownLogs' in config['collect']:\n self.skip_known_logs = config['collect']['skipKnownLogs']\n if 'resume' in config['collect']:\n self.resume = config['collect']['resume']\n if 'hoursToCollect' in config['collect']:\n self._fallback_time = datetime.datetime.now(datetime.timezone.utc) -\\\n datetime.timedelta(hours=config['collect']['hoursToCollect'])\n\n def _load_filter_config(self, config):\n \"\"\"\n :param config: str\n \"\"\"\n if 'filter' in config and config['filter']:\n self.filters = config['filter']\n\n def _load_output_config(self, config):\n \"\"\"\n :param config: str\n \"\"\"\n if 'output' in config:\n self._load_file_output_config(config=config)\n self._load_azure_log_analytics_output_config(config=config)\n self._load_azure_table_output_config(config=config)\n self._load_azure_blob_output_config(config=config)\n self._load_sql_output_config(config=config)\n self._load_graylog_output_config(config=config)\n self._load_prtg_output_config(config=config)\n\n def _load_file_output_config(self, config):\n \"\"\"\n :param config: str\n \"\"\"\n if 'file' in config['output']:\n if 'enabled' in config['output']['file']:\n self.file_output = config['output']['file']['enabled']\n if 'path' in config['output']['file']:\n self.file_interface.output_path = config['output']['file']['path']\n if 'separateByContentType' in config['output']['file']:\n self.file_interface.separate_by_content_type = config['output']['file']['separateByContentType']\n if 'separator' in config['output']['file']:\n self.file_interface.separator = config['output']['file']['separator']\n if 'cacheSize' in config['output']['file']:\n self.file_interface.cache_size = config['output']['file']['cacheSize']\n\n def _load_azure_log_analytics_output_config(self, config):\n \"\"\"\n :param config: str\n \"\"\"\n if 'azureLogAnalytics' in config['output']:\n if 'enabled' in config['output']['azureLogAnalytics']:\n self.azure_oms_output = config['output']['azureLogAnalytics']['enabled']\n if 'workspaceId' in config['output']['azureLogAnalytics']:\n self.azure_oms_interface.workspace_id = config['output']['azureLogAnalytics']['workspaceId']\n if 'sharedKey' in config['output']['azureLogAnalytics']:\n self.azure_oms_interface.shared_key = config['output']['azureLogAnalytics']['sharedKey']\n\n def _load_sql_output_config(self, config):\n \"\"\"\n :param config: str\n \"\"\"\n if 'sql' in config['output']:\n if 'enabled' in config['output']['sql']:\n self.sql_output = config['output']['sql']['enabled']\n if 'cacheSize' in config['output']['sql']:\n self.sql_interface.cache_size = config['output']['sql']['cacheSize']\n if 'chunkSize' in config['output']['sql']:\n self.sql_interface.chunk_size = config['output']['sql']['chunkSize']\n\n def _load_azure_table_output_config(self, config):\n \"\"\"\n :param config: str\n \"\"\"\n if 'azureTable' in config['output']:\n if 'enabled' in config['output']['azureTable']:\n self.azure_table_output = config['output']['azureTable']['enabled']\n if 'tableName' in config['output']['azureTable']:\n self.azure_table_interface.table_name = config['output']['azureTable']['tableName']\n\n def _load_azure_blob_output_config(self, config):\n \"\"\"\n :param config: str\n \"\"\"\n if 'azureBlob' in config['output']:\n if 'enabled' in config['output']['azureBlob']:\n self.azure_blob_output = config['output']['azureBlob']['enabled']\n if 'containerName' in config['output']['azureBlob']:\n self.azure_blob_interface.container_name = config['output']['azureBlob']['containerName']\n if 'blobName' in config['output']['azureBlob']:\n self.azure_blob_interface.blob_name = config['output']['azureBlob']['blobName']\n if 'tempPath' in config['output']['azureBlob']:\n self.azure_blob_interface.output_path = config['output']['azureBlob']['tempPath']\n if 'separateByContentType' in config['output']['azureBlob']:\n self.azure_blob_interface.separate_by_content_type = config['output']['azureBlob']['separateByContentType']\n if 'separator' in config['output']['azureBlob']:\n self.azure_blob_interface.separator = config['output']['azureBlob']['separator']\n if 'cacheSize' in config['output']['azureBlob']:\n self.azure_blob_interface.cache_size = config['output']['azureBlob']['cacheSize']\n\n def _load_graylog_output_config(self, config):\n \"\"\"\n :param config: str\n \"\"\"\n if 'graylog' in config['output']:\n if 'enabled' in config['output']['graylog']:\n self.graylog_output = config['output']['graylog']['enabled']\n if 'address' in config['output']['graylog']:\n self.graylog_interface.gl_address = config['output']['graylog']['address']\n if 'port' in config['output']['graylog']:\n self.graylog_interface.gl_port = config['output']['graylog']['port']\n\n def _load_prtg_output_config(self, config):\n \"\"\"\n :param config: str\n \"\"\"\n if 'prtg' in config['output']:\n if 'enabled' in config['output']['prtg']:\n self.prtg_output = config['output']['prtg']['enabled']\n self.prtg_interface.config = config['output']['prtg']\n\n def init_logging(self):\n \"\"\"\n Start logging to file and console. If PRTG output is enabled do not log to console, as this will interfere with\n the sensor result.\n \"\"\"\n logger = logging.getLogger()\n file_handler = logging.FileHandler(self.log_path, mode='w')\n if not self.prtg_output:\n stream_handler = logging.StreamHandler(sys.stdout)\n logger.addHandler(stream_handler)\n logger.addHandler(file_handler)\n logger.setLevel(logging.INFO if not self.debug else logging.DEBUG)\n\n def _prepare_to_run(self):\n \"\"\"\n Make sure that self.run_once can be called multiple times by resetting to an initial state.\n \"\"\"\n if self.auto_subscribe:\n self._auto_subscribe()\n if self.resume:\n self._get_last_run_times()\n if self.skip_known_logs:\n self._known_content.clear()\n self._known_logs.clear()\n self._clean_known_content()\n self._clean_known_logs()\n self.logs_retrieved = 0\n for interface in self.all_enabled_interfaces:\n interface.successfully_sent = 0\n interface.unsuccessfully_sent = 0\n self.run_started = datetime.datetime.now()\n\n def run_once(self, start_time=None):\n \"\"\"\n Check available content and retrieve it, then exit.\n \"\"\"\n logging.log(level=logging.INFO, msg='Starting run @ {}. Content: {}.'.format(\n datetime.datetime.now(), self.content_types))\n self._prepare_to_run()\n self._start_monitoring()\n self._get_all_available_content(start_time=start_time)\n self.monitor_thread.join()\n self._finish_run()\n\n def _finish_run(self):\n \"\"\"\n Save relevant information and output PRTG result if the interface is enabled. The other interfaces output\n while collecting.\n \"\"\"\n if self.skip_known_logs:\n self._add_known_log()\n self._add_known_content()\n if self.resume and self._last_run_times:\n with open('last_run_times', 'w') as ofile:\n json.dump(fp=ofile, obj=self._last_run_times)\n self._log_statistics()\n\n def _log_statistics(self):\n \"\"\"\n Write run statistics to log file / console.\n \"\"\"\n logging.info(\"Finished. Total logs retrieved: {}. Total logs with errors: {}. Run time: {}.\".format(\n self.logs_retrieved, self.errors_retrieving, datetime.datetime.now() - self.run_started))\n for interface in self.all_enabled_interfaces:\n logging.info(\"{} reports: {} successfully sent, {} errors\".format(\n interface.__class__.__name__, interface.successfully_sent, interface.unsuccessfully_sent))\n\n def _get_last_run_times(self):\n \"\"\"\n Load last_run_times file and interpret the datetime for each content type.\n \"\"\"\n if os.path.exists('last_run_times'):\n try:\n with open('last_run_times', 'r') as ofile:\n self._last_run_times = json.load(ofile)\n except Exception as e:\n logging.error(\"Could not read last run times file: {}.\".format(e))\n for content_type, last_run_time in self._last_run_times.items():\n try:\n self._last_run_times[content_type] = datetime.datetime.strptime(last_run_time, \"%Y-%m-%dT%H:%M:%SZ\")\n except Exception as e:\n logging.error(\"Could not read last run time for content type {}: {}.\".format(content_type, e))\n del self._last_run_times[content_type]\n\n @property\n def done_retrieving_content(self):\n \"\"\"\n Returns True if there are no more content blobs to be collected. Used to determine when to exit the script.\n :return: Bool\n \"\"\"\n for content_type in self.blobs_to_collect:\n if self.blobs_to_collect[content_type]:\n return False\n return True\n\n @property\n def done_collecting_available_content(self):\n \"\"\"\n Once a call is made to retrieve content for a particular type, and there is no 'NextPageUri' in the response,\n the type is removed from 'self.content_types' to signal that all available content has been retrieved for that\n type.\n \"\"\"\n return not bool(self.content_types)\n\n def _start_monitoring(self):\n \"\"\"\n Start a thread monitoring the list containing blobs that need collecting.\n \"\"\"\n self.monitor_thread = threading.Thread(target=self._monitor_blobs_to_collect, daemon=True)\n self.monitor_thread.start()\n\n def _auto_subscribe(self):\n \"\"\"\n Subscribe to all content types that are set to be retrieved.\n \"\"\"\n subscriber = AuditLogSubscriber.AuditLogSubscriber(tenant_id=self.tenant_id, client_key=self.client_key,\n secret_key=self.secret_key)\n status = subscriber.get_sub_status()\n if status == '':\n raise RuntimeError(\"Auto subscribe enabled but could not get subscription status\")\n unsubscribed_content_types = self.content_types.copy()\n for s in status:\n if s['contentType'] in self.content_types and s['status'].lower() == 'enabled':\n unsubscribed_content_types.remove(s['contentType'])\n for content_type in unsubscribed_content_types:\n logging.info(\"Auto subscribing to: {}\".format(content_type))\n subscriber.set_sub_status(content_type=content_type, action='start')\n\n def _get_all_available_content(self, start_time=None):\n \"\"\"\n Start a thread to retrieve available content blobs for each content type to be collected.\n :param start_time: DateTime\n \"\"\"\n for content_type in self.content_types.copy():\n if not start_time:\n if self.resume and content_type in self._last_run_times.keys():\n start_time = self._last_run_times[content_type]\n else:\n start_time = self._fallback_time\n self.retrieve_available_content_threads.append(threading.Thread(\n target=self._get_available_content, daemon=True,\n kwargs={'content_type': content_type, 'start_time': start_time}))\n self.retrieve_available_content_threads[-1].start()\n\n def _get_available_content(self, content_type, start_time):\n \"\"\"\n Retrieve available content blobs for a content type. If the response contains a\n 'NextPageUri' there is more content to be retrieved; rerun until all has been retrieved.\n \"\"\"\n try:\n logging.log(level=logging.DEBUG, msg='Getting available content for type: \"{}\"'.format(content_type))\n current_time = datetime.datetime.now(datetime.timezone.utc)\n formatted_end_time = str(current_time).replace(' ', 'T').rsplit('.', maxsplit=1)[0]\n formatted_start_time = str(start_time).replace(' ', 'T').rsplit('.', maxsplit=1)[0]\n logging.info(\"Retrieving {}. Start time: {}. End time: {}.\".format(\n content_type, formatted_start_time, formatted_end_time))\n response = self.make_api_request(url='subscriptions/content?contentType={0}&startTime={1}&endTime={2}'.format(\n content_type, formatted_start_time, formatted_end_time))\n self.blobs_to_collect[content_type] += response.json()\n while 'NextPageUri' in response.headers.keys() and response.headers['NextPageUri']:\n logging.log(level=logging.DEBUG, msg='Getting next page of content for type: \"{0}\"'.format(content_type))\n self.blobs_to_collect[content_type] += response.json()\n response = self.make_api_request(url=response.headers['NextPageUri'], append_url=False)\n logging.log(level=logging.DEBUG, msg='Got {0} content blobs of type: \"{1}\"'.format(\n len(self.blobs_to_collect[content_type]), content_type))\n except Exception as e:\n logging.log(level=logging.DEBUG, msg=\"Error while getting available content: {}: {}\".format(\n content_type, e))\n self.content_types.remove(content_type)\n else:\n self.content_types.remove(content_type)\n self._last_run_times[content_type] = start_time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n def _start_interfaces(self):\n\n for interface in self.all_enabled_interfaces:\n interface.start()\n\n def _stop_interfaces(self):\n\n for interface in self.all_enabled_interfaces:\n interface.stop()\n\n def _monitor_blobs_to_collect(self):\n \"\"\"\n Wait for the 'retrieve_available_content' function to retrieve content URI's. Once they become available\n start retrieving in a background thread.\n \"\"\"\n self._start_interfaces()\n threads = collections.deque()\n while True:\n threads = [thread for thread in threads if thread.is_alive()]\n if self.done_collecting_available_content and self.done_retrieving_content and not threads:\n break\n if not self.blobs_to_collect:\n continue\n for content_type, blobs_to_collect in self.blobs_to_collect.copy().items():\n if len(threads) >= self.max_threads:\n break\n if self.blobs_to_collect[content_type]:\n blob_json = self.blobs_to_collect[content_type].popleft()\n self._collect_blob(blob_json=blob_json, content_type=content_type, threads=threads)\n self._stop_interfaces()\n\n def _collect_blob(self, blob_json, content_type, threads):\n \"\"\"\n Collect a single content blob in a thread.\n :param blob_json: JSON\n :param content_type: str\n :param threads: list\n \"\"\"\n if blob_json and 'contentUri' in blob_json:\n logging.log(level=logging.DEBUG, msg='Retrieving content blob: \"{0}\"'.format(blob_json))\n threads.append(threading.Thread(\n target=self._retrieve_content, daemon=True,\n kwargs={'content_json': blob_json, 'content_type': content_type, 'retries': self.retries}))\n threads[-1].start()\n\n def _retrieve_content(self, content_json, content_type, retries):\n \"\"\"\n Get an available content blob. If it exists in the list of known content blobs it is skipped to ensure\n idempotence.\n :param content_json: JSON dict of the content blob as retrieved from the API (dict)\n :param content_type: Type of API being retrieved for, e.g. 'Audit.Exchange' (str)\n :param retries: Times to retry retrieving a content blob if it fails (int)\n \"\"\"\n if self.skip_known_logs and self.known_content and content_json['contentId'] in self.known_content:\n return\n try:\n results = self.make_api_request(url=content_json['contentUri'], append_url=False).json()\n if not results:\n return\n except Exception as e:\n if retries:\n time.sleep(self.retry_cooldown)\n return self._retrieve_content(content_json=content_json, content_type=content_type, retries=retries - 1)\n else:\n self.errors_retrieving += 1\n logging.error(\"Error retrieving content: {}\".format(e))\n return\n else:\n self._handle_retrieved_content(content_json=content_json, content_type=content_type, results=results)\n\n def _handle_retrieved_content(self, content_json, content_type, results):\n \"\"\"\n Check known logs, filter results and output what remains.\n :param content_json: JSON dict of the content blob as retrieved from the API (dict)\n :param content_type: Type of API being retrieved for, e.g. 'Audit.Exchange' (str)\n :param results: list of JSON\n \"\"\"\n\n if self.skip_known_logs:\n self._known_content[content_json['contentId']] = content_json['contentExpiration']\n for log in results.copy():\n if self.skip_known_logs:\n if log['Id'] in self.known_logs:\n results.remove(log)\n continue\n self.known_logs[log['Id']] = log['CreationTime']\n if self.filters and not self._check_filters(log=log, content_type=content_type):\n results.remove(log)\n self.logs_retrieved += len(results)\n self._output_results(results=results, content_type=content_type)\n\n def _output_results(self, results, content_type):\n \"\"\"\n :param content_type: Type of API being retrieved for, e.g. 'Audit.Exchange' (str)\n :param results: list of JSON\n \"\"\"\n for interface in self.all_enabled_interfaces:\n interface.send_messages(*results, content_type=content_type)\n\n def _check_filters(self, log, content_type):\n \"\"\"\n :param log: JSON\n :param content_type: Type of API being retrieved for, e.g. 'Audit.Exchange' (str)\n :return: True if log matches filter, False if not (Bool)\n \"\"\"\n if content_type in self.filters and self.filters[content_type]:\n for log_filter_key, log_filter_value in self.filters[content_type].items():\n if log_filter_key not in log or log[log_filter_key].lower() != log_filter_value.lower():\n return False\n return True\n\n def _add_known_log(self):\n \"\"\"\n Add a content ID to the known content file to avoid saving messages more than once.\n :return:\n \"\"\"\n with open('known_logs', 'w') as ofile:\n for log_id, creation_time in self.known_logs.items():\n ofile.write('{},{}\\n'.format(log_id, creation_time))\n\n def _add_known_content(self):\n \"\"\"\n Add a content ID to the known content file to avoid saving messages more than once.\n :return:\n \"\"\"\n with open('known_content', 'w') as ofile:\n for content_id, content_expiration in self.known_content.items():\n ofile.write('{0},{1}\\n'.format(content_id, content_expiration))\n\n def _clean_known_logs(self):\n \"\"\"\n Remove any known content ID's that have expired. Can't download a duplicate if it is not available for\n download.\n \"\"\"\n known_logs = self.known_logs\n if os.path.exists('known_logs'):\n os.remove('known_logs')\n for log_id, creation_time in known_logs.copy().items():\n try:\n date = datetime.datetime.strptime(creation_time.strip()+'Z', \"%Y-%m-%dT%H:%M:%S%z\")\n expire_date = date + datetime.timedelta(days=7)\n if not datetime.datetime.now(datetime.timezone.utc) < expire_date:\n del self.known_logs[log_id]\n except Exception as e:\n logging.debug(\"Could not parse known logs: {}\".format(e))\n del self.known_logs[log_id]\n if not known_logs:\n return\n with open('known_logs', mode='w') as ofile:\n for log_id, creation_time in known_logs.items():\n ofile.write(\"{},{}\\n\".format(log_id, creation_time.strip()))\n\n def _clean_known_content(self):\n \"\"\"\n Remove any known content ID's that have expired. Can't download a duplicate if it is not available for\n download.\n \"\"\"\n known_content = self.known_content\n if os.path.exists('known_content'):\n os.remove('known_content')\n for content_id, expire_date in known_content.copy().items():\n try:\n date = datetime.datetime.strptime(expire_date, \"%Y-%m-%dT%H:%M:%S.%f%z\")\n if not datetime.datetime.now(datetime.timezone.utc) < date:\n del known_content[content_id]\n except Exception as e:\n logging.debug(\"Could not parse known content: {}\".format(e))\n del known_content[content_id]\n if not known_content:\n return\n with open('known_logs', 'w') as ofile:\n for content_id, expire_date in known_content.items():\n ofile.write(\"{},{}\\n\".format(content_id, expire_date))\n\n @property\n def known_logs(self):\n \"\"\"\n Parse and return known content file.\n :return: {content_id: content_expiration_date} dict\n \"\"\"\n if not self._known_logs and os.path.exists('known_logs'):\n with open('known_logs', 'r') as ofile:\n for line in ofile.readlines():\n if not line.strip():\n continue\n try:\n self._known_logs[line.split(',')[0].strip()] = line.split(',')[1]\n except:\n continue\n return self._known_logs\n\n @property\n def known_content(self):\n \"\"\"\n Parse and return known content file.\n :return: {content_id: content_expiration_date} dict\n \"\"\"\n if not self._known_content and os.path.exists('known_content'):\n with open('known_content', 'r') as ofile:\n for line in ofile.readlines():\n if not line.strip():\n continue\n try:\n self._known_content[line.split(',')[0].strip()] = line.split(',')[1]\n except:\n continue\n return self._known_content\n\n\nif __name__ == \"__main__\":\n\n description = \\\n \"\"\"\n Retrieve audit log contents from Office 365 API and save to file or Graylog.\n Example: Retrieve all available content and send it to Graylog (using mock ID's and keys):\n \"AuditLogCollector.py 123 456 789 --general --exchange --azure_ad --sharepoint --dlp -g -gA 10.10.10.1 -gP 5000\n \"\"\"\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('tenant_id', type=str, help='Tenant ID of Azure AD', action='store')\n parser.add_argument('client_key', type=str, help='Client key of Azure application', action='store')\n parser.add_argument('secret_key', type=str, help='Secret key generated by Azure application', action='store')\n parser.add_argument('--config', metavar='config', type=str, help='Path to YAML config file',\n action='store', dest='config')\n parser.add_argument('--table-string', metavar='table_string', type=str,\n help='Connection string for Azure Table output interface', action='store', dest='table_string')\n parser.add_argument('--blob-string', metavar='blob_string', type=str,\n help='Connection string for Azure Blob output interface', action='store', dest='blob_string')\n parser.add_argument('--sql-string', metavar='sql_string', type=str,\n help='Connection string for SQL output interface', action='store', dest='sql_string')\n parser.add_argument('--interactive-subscriber', action='store_true',\n help='Manually (un)subscribe to audit log feeds', dest='interactive_subscriber')\n parser.add_argument('--general', action='store_true', help='Retrieve General content', dest='general')\n parser.add_argument('--exchange', action='store_true', help='Retrieve Exchange content', dest='exchange')\n parser.add_argument('--azure_ad', action='store_true', help='Retrieve Azure AD content', dest='azure_ad')\n parser.add_argument('--sharepoint', action='store_true', help='Retrieve SharePoint content', dest='sharepoint')\n parser.add_argument('--dlp', action='store_true', help='Retrieve DLP content', dest='dlp')\n parser.add_argument('-p', metavar='publisher_id', type=str, help='Publisher GUID to avoid API throttling',\n action='store', dest='publisher_id',\n default=os.path.join(os.path.dirname(__file__), 'AuditLogCollector.log'))\n parser.add_argument('-r',\n help='Look for last run time and resume looking for content from there (takes precedence over '\n '-tH and -tD)', action='store_true', dest='resume')\n parser.add_argument('-tH', metavar='time_hours', type=int, help='Amount of hours to go back and look for content',\n action='store', dest='time_hours')\n parser.add_argument('-s',\n help='Keep track of each retrieved log ID and skip it in the future to prevent duplicates',\n action='store_true', dest='skip_known_logs')\n parser.add_argument('-l', metavar='log_path', type=str, help='Path of log file', action='store', dest='log_path',\n default=os.path.join(os.path.dirname(__file__), 'AuditLogCollector.log'))\n parser.add_argument('-d', action='store_true', dest='debug_logging',\n help='Enable debug logging (generates large log files and decreases performance).')\n parser.add_argument('-f', help='Output to file.', action='store_true', dest='file')\n parser.add_argument('-fP', metavar='file_output_path', type=str, help='Path of directory of output files',\n default=os.path.join(os.path.dirname(__file__), 'output'), action='store',\n dest='output_path')\n parser.add_argument('-P', help='Output to PRTG with PrtgConfig.yaml.', action='store_true', dest='prtg')\n parser.add_argument('-a', help='Output to Azure Log Analytics workspace.', action='store_true', dest='azure')\n parser.add_argument('-aC', metavar='azure_workspace', type=str, help='ID of log analytics workspace.',\n action='store', dest='azure_workspace')\n parser.add_argument('-aS', metavar='azure_key', type=str, help='Shared key of log analytics workspace.',\n action='store', dest='azure_key')\n parser.add_argument('-g', help='Output to graylog.', action='store_true', dest='graylog')\n parser.add_argument('-gA', metavar='graylog_address', type=str, help='Address of graylog server.', action='store',\n dest='graylog_addr')\n parser.add_argument('-gP', metavar='graylog_port', type=str, help='Port of graylog server.', action='store',\n dest='graylog_port')\n args = parser.parse_args()\n argsdict = vars(args)\n\n if argsdict['interactive_subscriber']:\n subscriber = AuditLogSubscriber.AuditLogSubscriber(\n tenant_id=argsdict['tenant_id'], secret_key=argsdict['secret_key'], client_key=argsdict['client_key'])\n subscriber.interactive()\n quit(0)\n\n content_types = []\n if argsdict['general']:\n content_types.append('Audit.General')\n if argsdict['exchange']:\n content_types.append('Audit.Exchange')\n if argsdict['sharepoint']:\n content_types.append('Audit.Sharepoint')\n if argsdict['azure_ad']:\n content_types.append('Audit.AzureActiveDirectory')\n if argsdict['dlp']:\n content_types.append('DLP.All')\n\n fallback_time = None\n if argsdict['time_hours']:\n fallback_time = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(days=argsdict['time_hours'])\n\n collector = AuditLogCollector(\n tenant_id=argsdict['tenant_id'], secret_key=argsdict['secret_key'], client_key=argsdict['client_key'],\n content_types=content_types, publisher_id=argsdict['publisher_id'], resume=argsdict['resume'],\n fallback_time=fallback_time, skip_known_logs=argsdict['skip_known_logs'], log_path=argsdict['log_path'],\n file_output=argsdict['file'], path=argsdict['output_path'], debug=argsdict['debug_logging'],\n prtg_output=argsdict['prtg'],\n azure_oms_output=argsdict['azure'], workspace_id=argsdict['azure_workspace'],\n shared_key=argsdict['azure_key'],\n gl_address=argsdict['graylog_addr'], gl_port=argsdict['graylog_port'],\n graylog_output=argsdict['graylog'],\n sql_connection_string=argsdict['sql_string'], table_connection_string=argsdict['table_string'],\n blob_connection_string=argsdict['blob_string'])\n if argsdict['config']:\n collector.load_config(path=argsdict['config'])\n collector.init_logging()\n collector.run_once()\n\n\n","sub_path":"Source/AuditLogCollector.py","file_name":"AuditLogCollector.py","file_ext":"py","file_size_in_byte":36891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"573472609","text":"import pandas as pd\n\ncsv = 'btc'\n\ndef pandasclean(csv):\n df = pd.read_csv('cryptodata/'+ csv +'usd.csv',parse_dates=['Date'],usecols=[1,2,3,4,5])\n df = df.set_index('Date').sort_index(ascending=True)\n df.dropna(inplace=True)\n return df\n\nprint(pandasclean(csv))","sub_path":"cryptodata/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"391378822","text":"import sys\nimport pickle\nfrom collections import defaultdict\nfrom random import shuffle\n\nsys.path.insert(0, '../')\n\nimport config\nfrom data_utils import (make_conll_format2, make_embedding, make_vocab,\n make_vocab_from_dm, process_file)\n\n\n\n\ndef make_sent_dataset():\n train_src_file = \"./para-train.txt\"\n train_trg_file = \"./tgt-train.txt\"\n\n embedding_file = \"./glove.840B.300d.txt\"\n embedding = \"./embedding.pkl\"\n word2idx_file = \"./word2idx.pkl\"\n # make vocab file\n word2idx = make_vocab(train_src_file, train_trg_file, word2idx_file, config.vocab_size)\n make_embedding(embedding_file, embedding, word2idx)\n\n\ndef make_para_dataset():\n embedding_file = \"./glove.840B.300d.txt\"\n embedding = \"./embedding.pkl\"\n src_word2idx_file = \"./word2idx.pkl\"\n\n train_squad = \"../squad/train-v1.1.json\"\n dev_squad = \"../squad/dev-v1.1.json\"\n\n train_src_file = \"../squad/para-train.txt\"\n train_trg_file = \"../squad/tgt-train.txt\"\n dev_src_file = \"../squad/para-dev.txt\"\n dev_trg_file = \"../squad/tgt-dev.txt\"\n\n test_src_file = \"../squad/para-test.txt\"\n test_trg_file = \"../squad/tgt-test.txt\"\n\n # pre-process training data\n # train_examples have passage question pairs, counter is the word frequency across all passages\n # question and passages are represented as a list of tokens\n with open('../cnn-dailymail/cnn_examples.pkl', 'rb') as f:\n cnn = pickle.load(f)\n print('loaded cnn')\n # with open('../cnn-dailymail/dm_examples.pkl','rb') as f:\n # dm = pickle.load(f)\n# print('loaded dailymail')\n\n counter = defaultdict(int)\n\n examples = cnn\n shuffle(examples)\n train_size = int(len(examples) * 0.92)\n train_examples = examples[:train_size]\n print(len(train_examples))\n dev_test_examples = examples[train_size:]\n print(len(train_examples), len(dev_test_examples))\n for e in train_examples:\n for token in e['context_tokens']:\n counter[token] += 1\n make_conll_format2(train_examples, train_src_file, train_trg_file)\n # make a dict mapping word to unique index\n word2idx = make_vocab_from_dm(src_word2idx_file, counter, config.vocab_size)\n # makes a dict mapping words from all passages to embedding vectors\n make_embedding(embedding_file, embedding, word2idx)\n\n # split dev into dev and test\n # random.shuffle(dev_test_examples)\n num_dev = len(dev_test_examples) // 2\n dev_examples = dev_test_examples[:num_dev]\n test_examples = dev_test_examples[num_dev:]\n make_conll_format2(dev_examples, dev_src_file, dev_trg_file)\n make_conll_format2(test_examples, test_src_file, test_trg_file)\n\n\nif __name__ == \"__main__\":\n # make_sent_dataset()\n make_para_dataset()\n\n","sub_path":"data/dm_process.py","file_name":"dm_process.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"156924266","text":"#!/usr/bin/env python\n#\n# Author: Thamme Gowda [tg at isi dot edu] \n# Created: 10/17/18\nimport torch\nimport rtg\nfrom rtg import log, yaml, TranslationExperiment as Experiment, device, BatchIterable\nfrom rtg.module import NMTModel\nfrom rtg.utils import IO\nfrom rtg.module import criterion as criteria\n\nfrom abc import abstractmethod\nfrom typing import Optional, Callable, List\nfrom dataclasses import dataclass, field\nimport time\n\nfrom torch import optim\nfrom torch.optim.optimizer import Optimizer\nfrom torch.utils.tensorboard import SummaryWriter\nfrom enum import Enum\nimport inspect\nfrom pathlib import Path\nfrom rtg.distrib import DistribTorch\n\n\n\ndtorch = DistribTorch.instance()\n\n\n\nclass NoamOpt(Optimizer):\n \"\"\"\n Optimizer wrapper that implements learning rate as a function of step.\n\n If inv_sqrt==True:\n - Linear warmup followed by inverse sqrt decay.\n - Uses learning rate in conf.yml as maximum learning rate after warmup\n\n Modeled after FairSeq's Inverse Square Root LR Scheduler:\n https://github.com/pytorch/fairseq/blob/master/fairseq/optim/lr_scheduler/\n inverse_square_root_schedule.py\n\n Else:\n - Independent of learning rate set in conf.yml\n\n Modeled after The Annotated Transformer's LR Scheduler:\n https://nlp.seas.harvard.edu/2018/04/03/attention.html\n \"\"\"\n\n def __init__(self, model_size, factor, warmup, optimizer: Optimizer, step=0, inv_sqrt=False):\n super().__init__(params=optimizer.param_groups, defaults=dict(warmup=warmup, step=step))\n self.optimizer = optimizer\n self._step = step\n self.warmup = warmup\n self.factor = factor\n self.model_size = model_size\n\n self.inv_sqrt = inv_sqrt\n lr = optimizer.defaults['lr']\n self.warmup_rate = lr / warmup\n self.decay_factor = lr * warmup ** 0.5\n\n self._rate = 0\n log.info(f\"model_size={model_size}, factor={factor}, warmup={warmup}, step={step}, \"\n f\"inv_sqrt={inv_sqrt}\")\n\n def step(self, closure=None):\n \"Update parameters and rate\"\n self._step += 1\n rate = self.rate()\n for p in self.optimizer.param_groups:\n p['lr'] = rate\n self._rate = rate\n self.optimizer.step(closure=closure)\n\n @property\n def curr_step(self):\n return self._step\n\n @property\n def curr_lr(self):\n return self._rate\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def rate(self, step=None):\n \"Implement `lrate` above\"\n if step is None:\n step = self._step\n if self.inv_sqrt:\n if step < self.warmup:\n lr = self.warmup_rate * step\n else:\n lr = self.decay_factor * step ** (-0.5)\n else:\n lr = self.factor * self.model_size ** (-0.5) * min(step ** (-0.5),\n step * self.warmup ** (-1.5))\n return lr\n\n @staticmethod\n def get_std_opt(model):\n return NoamOpt(model.src_embed[0].d_model, 2, 4000,\n torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))\n\n\nclass Optims(Enum):\n ADAM = optim.Adam\n ADAMW = optim.AdamW\n SGD = optim.SGD\n\n def new(self, parameters, lr=0.001, **args):\n log.info(f\"Creating {self.value} optimizer with lr={lr} and extra args:{args}\")\n log.info(f\" {self.value}, default arguments {inspect.signature(self.value)}\")\n return self.value(parameters, lr=lr, **args)\n\n @staticmethod\n def names():\n return list(Optims.__members__.keys())\n\n\n@dataclass\nclass TrainerState:\n \"\"\"\n A dataclass for storing any running stats the trainer needs to keep track during training\n \"\"\"\n\n model: NMTModel\n check_point: int\n total_toks: int = 0\n total_loss: float = 0.0\n steps: int = 0\n start: float = time.time()\n\n def running_loss(self):\n return self.total_loss / self.steps if self.steps > 0 else float('inf')\n\n def reset(self):\n loss = self.running_loss()\n self.total_toks = 0\n self.total_loss = 0.0\n self.steps = 0\n self.start = time.time()\n return loss\n\n def train_mode(self, mode: bool):\n torch.set_grad_enabled(mode)\n self.model.train(mode)\n\n def step(self, toks, loss):\n self.steps += 1\n self.total_toks += toks\n self.total_loss += loss\n return self.progress_bar_msg(), self.is_check_point()\n\n def progress_bar_msg(self):\n elapsed = time.time() - self.start\n return f'Loss:{self.running_loss():.4f},' \\\n f' {int(self.total_toks / elapsed)}toks/s'\n\n def is_check_point(self):\n return self.steps == self.check_point\n\n\n@dataclass\nclass EarlyStopper:\n \"\"\"\n A data model to track early stopping state\n \"\"\"\n enabled: bool = True\n by: str = 'loss'\n patience: int = 15\n min_steps: int = 0\n cur_step: int = 0\n signi_round: int = 4 # integer either positive or negative\n # these many digits are significant round(100, -1) => 30.0 round(100, 1) => 33.3\n measures: List[float] = field(default_factory=list) # could be loss or accuracy\n\n buf = 3 # take average of these many points; avoids weird dips and surges as stop\n minimizing = True # minimize loss, maximize accuracy\n\n def __post_init__(self):\n if self.enabled:\n assert self.patience > 0, f'early_stop.patience > 0 ? given={self.patience}'\n assert 1 <= self.buf <= self.patience\n log.info(f\"Early Stop Enabled;\")\n\n if self.by in {'loss'}:\n self.minimizing = True\n elif self.by in {'bleu', 'accuracy'}:\n self.minimizing = False # maximizing\n else:\n raise Exception(f'{self.by} is not supported')\n\n def step(self):\n self.cur_step += 1\n return self.cur_step\n\n def validation(self, val):\n self.measures.append(val)\n\n def is_stop(self):\n if not self.enabled:\n return False\n if self.cur_step < self.min_steps:\n # hasn't reached minimum steps; dont stop\n return False\n if len(self.measures) < (self.patience + self.buf + 1):\n # hasn't accumulated enough data points, dont stop\n return False\n\n # The old value; with some buffer around to avoid weird dips and surges\n old = (self.measures[-self.patience - self.buf: -self.patience])\n old = sum(old) / len(old) # mean\n recent = self.measures[-self.patience:] # the patience of seeing the post mark\n\n if self.minimizing:\n # older value is smaller than or same as best of recent => time to stop\n should_stop = round(old, self.signi_round) <= round(min(recent), self.signi_round)\n else:\n # older value is bigger than or same as best of recent => time to stop\n should_stop = round(old, self.signi_round) >= round(max(recent), self.signi_round)\n return should_stop\n\n\nclass NoOpSummaryWriter(SummaryWriter):\n \"\"\"\n A No-Op TensorBordX for tests and such experiments that doesnt want to leave\n footprints on file system.\n Note: that this does not extend all methods of SummaryWriter\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n #super().__init__(*args, **kwargs)\n # super will create dirs, which we dont want\n pass\n\n def add_text(self, *args, **kwargs):\n pass\n\n def add_scalar(self, *args, **kwargs):\n pass\n\n def add_scalars(self, *args, **kwargs):\n pass\n\n def add_embedding(self, *args, **kwargs):\n pass\n\n\nclass SteppedTrainer:\n \"\"\"\n A base class for Trainers that use step based training (not epoch based training)\n \"\"\"\n default_optim_args = {\n 'lr': 0.01,\n 'betas': [0.9, 0.98],\n 'eps': 1e-9,\n 'amsgrad': False,\n 'weight_decay': 0,\n 'criterion': 'smooth_kld',\n 'label_smoothing': 0.1,\n 'warmup_steps': 8000,\n 'inv_sqrt': False,\n 'constant': 2\n }\n\n def __init__(self, exp: Experiment,\n model: Optional[NMTModel] = None,\n model_factory: Optional[Callable] = None,\n optim: str = 'ADAM',\n **optim_args):\n self.last_step = -1\n self.exp = exp\n optim_state = None\n if model:\n self.model = model\n else:\n args = exp.model_args\n assert args\n assert model_factory\n self.model, args = model_factory(exp=exp, **args)\n exp.model_args = args\n last_model, self.last_step = self.exp.get_last_saved_model()\n if last_model:\n log.info(f\"Resuming training from step:{self.last_step}, model={last_model}\")\n state = torch.load(last_model, map_location=device) \n model_state = state['model_state'] if 'model_state' in state else state\n\n if 'optim_state' in state:\n optim_state = state['optim_state']\n self.model.load_state_dict(model_state)\n if 'amp_state' in state and dtorch.fp16:\n log.info(\"Restoring AMP state\")\n dtorch._scaler.load_state_dict(state['amp_state'])\n else:\n log.info(\"No earlier check point found. Looks like this is a fresh start\")\n\n # optimizer : default args for missing fields\n for k, v in self.default_optim_args.items():\n optim_args[k] = optim_args.get(k, v)\n\n self.n_gpus = torch.cuda.device_count()\n self.device_ids = list(range(self.n_gpus))\n\n inner_opt_args = {k: optim_args[k] for k in\n ['lr', 'betas', 'eps', 'weight_decay', 'amsgrad']}\n\n self.core_model = self.model.to(device)\n\n \n trainable_params = self.exp.config['optim'].get('trainable', {})\n if trainable_params:\n if drtorch.is_distributed: # model is wrapped in DP or DistributedDP\n log.warning(f\">> Using more than 1 GPU with 'trainable' params is NOT tested\")\n trainable_params = self.core_model.get_trainable_params(include=trainable_params.get('include'),\n exclude=trainable_params.get('exclude'))\n else:\n trainable_params = self.model.parameters()\n\n inner_opt = Optims[optim].new(trainable_params, **inner_opt_args)\n self.model = dtorch.maybe_distributed(self.core_model)\n \n if optim_state:\n log.info(\"restoring optimizer state from checkpoint\")\n try:\n inner_opt.load_state_dict(optim_state) \n except Exception:\n log.exception(\"Unable to restore optimizer, skipping it.\")\n self.opt = NoamOpt(self.core_model.model_dim, optim_args['constant'], optim_args['warmup_steps'],\n inner_opt, step=self.start_step, inv_sqrt=optim_args['inv_sqrt'])\n\n if self.exp.read_only:\n self.tbd = NoOpSummaryWriter()\n else:\n self.tbd = SummaryWriter(log_dir=str(exp.work_dir / 'tensorboard' ))\n\n self.exp.optim_args = optim, optim_args\n if not self.exp.read_only:\n self.exp.persist_state()\n self.samples = None\n if exp.samples_file and exp.samples_file.exists():\n with IO.reader(exp.samples_file) as f:\n self.samples = [line.strip().split('\\t') for line in f]\n log.info(f\"Found {len(self.samples)} sample records\")\n if self.start_step == 0:\n for samp_num, sample in enumerate(self.samples):\n self.tbd.add_text(f\"sample/{samp_num}\", \" || \".join(sample), 0)\n\n from rtg.module.decoder import Decoder\n self.decoder = Decoder.new(self.exp, self.core_model)\n\n if self.start_step <= 1:\n self.maybe_init_model()\n\n self.criterion = self.create_criterion(optim_args['criterion'])\n\n @property\n def start_step(self):\n _, step = self.exp.get_last_saved_model()\n if self.exp._trained_flag.exists():\n # noinspection PyBroadException\n try:\n step = max(step, yaml.load(self.exp._trained_flag.read_text())['steps'])\n except Exception as _:\n pass\n assert step >= 0\n return step\n\n def create_criterion(self, criterion):\n log.info(f\"Criterion = {criterion}\")\n\n optim_args = self.exp.optim_args[1]\n smoothing = optim_args.get('label_smoothing', 0.0)\n margin = optim_args.get('margin', 0.0)\n mode = optim_args.get('mode', 'dot')\n neg_sampling = optim_args.get('neg_sampling', 'random')\n neg_region = optim_args.get('neg_region', 0.05)\n alpha = optim_args.get('alpha', 1.0)\n\n pad_idx = self.exp.tgt_vocab.pad_idx\n if criterion == 'smooth_kld':\n return criteria.SmoothKLD(vocab_size=self.core_model.vocab_size, smoothing=smoothing,\n pad_idx=pad_idx)\n elif criterion == 'cross_entropy':\n return criteria.CrossEntropy(pad_idx=pad_idx)\n elif criterion == 'binary_cross_entropy':\n return criteria.BinaryCrossEntropy(smoothing=smoothing, pad_idx=pad_idx)\n elif criterion == 'triplet_loss':\n tgt_embedding = self.core_model.tgt_embed[0].lut\n return criteria.TripletLoss(embedding=tgt_embedding, margin=margin,\n neg_region=neg_region,\n mode=mode, neg_sampling=neg_sampling, pad_idx=pad_idx)\n elif criterion == 'smooth_kld_and_triplet_loss':\n tgt_embedding = self.core_model.tgt_embed[0].lut\n return criteria.SmoothKLDAndTripletLoss(\n embedding=tgt_embedding, margin=margin, neg_region=neg_region, mode=mode,\n neg_sampling=neg_sampling, smoothing=smoothing, alpha=alpha, pad_idx=pad_idx)\n else:\n raise Exception(f'criterion={criterion} is not supported')\n\n def maybe_init_model(self):\n def load_matrix(path: Path):\n return torch.load(path) if path.exists() else None\n\n src_emb_mat = load_matrix(self.exp.emb_src_file)\n if src_emb_mat is None:\n log.info(\"NOT initializing pre-trained source embedding\")\n else:\n self.core_model.init_src_embedding(src_emb_mat)\n\n tgt_emb_mat = load_matrix(self.exp.emb_tgt_file)\n if tgt_emb_mat is None:\n log.info(\"NOT Initializing pre-trained target embeddings\")\n else:\n self.core_model.init_tgt_embedding(tgt_emb_mat)\n self.core_model.maybe_init_from_parent(exp=self.exp)\n\n def show_samples(self, beam_size=3, num_hyp=3, max_len=30):\n \"\"\"\n Logs the output of model (at this stage in training) to a set of samples\n :param beam_size: beam size\n :param num_hyp: number of hypothesis to output\n :param max_len: maximum length to decode\n :return:\n \"\"\"\n if not self.samples:\n log.info(\"No samples are chosen by the experiment\")\n return\n for i, (line, ref) in enumerate(self.samples):\n step_num = self.opt.curr_step\n result = self.decoder.decode_sentence(line, beam_size=beam_size, num_hyp=num_hyp,\n max_len=max_len)\n outs = [f\"hyp{j}: {score:.3f} :: {out}\" for j, (score, out) in enumerate(result)]\n self.tbd.add_text(f'sample/{i}', \" || \".join(outs), step_num)\n outs = '\\n'.join(outs)\n log.info(f\"==={i}===\\nSRC:{line}\\nREF:{ref}\\n{outs}\")\n\n def make_check_point(self, train_loss: float, val_loss: float, keep_models: int,\n log_embedding=False):\n \"\"\"\n Check point the model\n :param train_loss: training loss value\n :param val_loss: loss on validation set\n :param keep_models: how many checkpoints to keep on file system\n :return:\n \"\"\"\n\n step_num = self.opt.curr_step\n if step_num == self.last_step:\n log.warning(\"Ignoring checkpt request\")\n return # calling multiple times doesnt save\n log.info(f\"Checkpoint at optimizer step {step_num}. Training Loss {train_loss:g},\"\n f\" Validation Loss:{val_loss:g}\")\n self.show_samples()\n\n self.tbd.add_scalars(f'losses', {'train_loss': train_loss,\n 'valid_loss': val_loss}, step_num)\n if log_embedding:\n # TODO: add metadata (text) of each subword\n # TODO: Update tag to include tie configuration\n self.tbd.add_embedding(self.model.generator.proj.weight,\n global_step=step_num, tag=f'Target embeddings')\n\n # Unwrap model state from DataParallel and persist\n model = (self.model.module if hasattr(self.model, 'module') else self.model)\n state = {\n 'model_state': model.state_dict(),\n 'optim_state': self.opt.optimizer.state_dict(),\n 'step': step_num,\n 'train_loss': train_loss,\n 'val_loss': val_loss,\n 'time': time.time(),\n 'rtg_version': rtg.__version__,\n 'model_type': self.exp.model_type,\n 'model_args': self.exp.model_args,\n }\n if dtorch.fp16:\n state['amp_state'] = dtorch._scaler.state_dict()\n\n self.exp.store_model(step_num, state, train_score=train_loss,\n val_score=val_loss, keep=keep_models)\n self.last_step = step_num\n\n @abstractmethod\n def run_valid_epoch(self, data_iter: BatchIterable) -> float:\n \"\"\"\n Run a validation epoch\n :param data_iter: data iterator, either training or validation\n :return: score which is a loss\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def train(self, steps: int, check_point: int, batch_size: int,\n check_pt_callback: Optional[Callable] = None, **args):\n \"\"\"\n Train the model\n :param steps: number of steps to train\n :param check_point: how often to take check points\n :param batch_size: what is the batch size to use (depends on GPU RAM and model size)\n :param check_pt_callback: the function to call when a check point is taken\n :param args: any extra args\n :return:\n \"\"\"\n raise NotImplementedError()\n","sub_path":"rtg/module/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":18645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"454463037","text":"__author__ = 'caimiao'\n\nfrom django.conf.urls import patterns, url\nfrom archdb import views\n\nurlpatterns = patterns(\n '',\n url(r'^$', views.index, name='index'),\n url(r'^test$', views.test, name='test'),\n url(r'test404$', views.for404, name='test404')\n)","sub_path":"archdb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"382965882","text":"adrian = {\n \"nombre\": \"Adrian\"\n}\narregloNumeros = [1, 2]\ntry:\n arregloNumeros[\"1\"] = 0\n asd = 21 + \"a\"\nexcept (KeyError, TypeError) as errorQueSalte: # For keys\n print(errorQueSalte)\nexcept Exception as err: # For keys\n print(\"Error in types\")\n print(err.__traceback__)\n\n\n\"\"\"\nexcept TypeError as type: # For keys\n print(\"Error in types\")\n print(type)\n print(f\"Linea del error: {type.__traceback__.tb_lineno}\")\n\"\"\"","sub_path":"01-Python/06_exceptions.py","file_name":"06_exceptions.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"319703890","text":"import pandas as pd\nimport numpy as np\nimport sklearn\nfrom sklearn import linear_model, preprocessing, svm, metrics\nfrom sklearn.utils import shuffle\n\ndata = pd.read_csv(\"GTExData.csv\") #load data in dataframe\nDatPatLiv = pd.read_csv(\"PathologylistLiver.csv\")\nDatPatPan = pd.read_csv(\"PathologylistPancreas.csv\")\n\n#entries with both cirrhosis and steatosis. Edit data so it only reflects worst stage of the condition. Then check average fat content for these stages.\n#for i in range(len(DatPatLiv)):\n# if data[\"Fat.Percentage_liver\"][i]>=50:\n# print(data[\"Subject.ID\"][i])\n#steatosis, inflammation, fibrosis, cirrhosis, necrosis (Here we work under the assumption inflammation is unique to these stages)\nNAFLD=[0]*len(DatPatLiv)\nfor i in range(len(DatPatLiv)):\n if DatPatLiv[\"necrosis\"][i]==1:\n NAFLD[i]=5\n elif DatPatLiv[\"cirrhosis\"][i]==1:\n NAFLD[i]=4\n elif DatPatLiv[\"fibrosis\"][i]==1:\n NAFLD[i]=3\n elif DatPatLiv[\"inflammation\"][i]==1:\n NAFLD[i]=2\n elif DatPatLiv[\"steatosis\"][i]==1:\n NAFLD[i]=1\n else:\n NAFLD[i]=0\n\nle = preprocessing.LabelEncoder() #For converting label entries to integer entries, like female to 0 and male to 1.\nGender=le.fit_transform(list(data[\"Sex\"]))\nAge=le.fit_transform(list(data[\"Age.Bracket\"]))\nDeath=le.fit_transform(list(data[\"Hardy.Scale\"]))\n\n#Put data back in one dataframe, remove obsoletes.\ndatsam={\"Stage\": NAFLD, \"Gender\": Gender, \"Age\": Age, \"congestion\": DatPatLiv[\"congestion\"],\n \"hyperplasia\": DatPatLiv[\"hyperplasia\"], \"nodularity\": DatPatLiv[\"nodularity\"],\n \"hemorrhage\": DatPatLiv[\"hemorrhage\"], \"atrophy\": DatPatLiv[\"atrophy\"], \"infarction\": DatPatLiv[\"infarction\"],\n \"no_abnormalities\": DatPatLiv[\"no_abnormalities\"], \"hepatitis\": DatPatLiv[\"hepatitis\"],\n \"sclerotic\": DatPatLiv[\"sclerotic\"], \"scarring\": DatPatLiv[\"scarring\"], \"hyalinization\": DatPatLiv[\"hyalinization\"],\n \"pigment\": DatPatLiv[\"pigment\"], \"ischemic_changes\": DatPatLiv[\"ischemic_changes\"]}\nRevisedData=pd.DataFrame(data=datsam)\n\n#data = data[[\"G1\",\"G2\",\"G3\",\"studytime\",\"failures\",\"absences\"]] #trim data to the attributes we want\n#We wish to predict grade 3 from the other data.\npredict=\"Fat.Percentage_liver\"\nX = np.array(RevisedData) #data, but without G3\nY = np.array(data[predict]) #Only liver values.\n\nx_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(X,Y,test_size=0.1)\nfor i in range(len(y_train)):\n y_train[i]=int(round(y_train[i]))\nfor i in range(len(y_test)):\n y_test[i]=int(round(y_test[i]))\n#lab_enc = preprocessing.LabelEncoder()\n#print(len(x_train), len(y_train))\n#y_train1 = lab_enc.fit_transform(y_train)\n#x_train1 = lab_enc.fit_transform(x_train)\n#print(x_train1, y_train1)\nclf = svm.SVC(kernel=\"poly\")\nclf.fit(x_train, y_train)\nprint(x_train)\ny_pred = clf.predict(x_test)\n\nacc= metrics.accuracy_score(y_test, y_pred)\n\nprint(acc)\n\n\n#splits data into training and test data, makes (0.1=10%) of the data test data, so we train\n# the model on the majority of data, but have some unknown to test on after.\n#linear = linear_model.LinearRegression()\n#linear.fit(x_train, y_train)\n#acc = linear.score(x_test, y_test)\n#print(acc)\n#prediction = linear.predict(x_test) #use model to predict y value corresponding to xtest\n","sub_path":"InitialML.py","file_name":"InitialML.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"50330633","text":"import numpy as np\r\nimport keras\r\nfrom keras import applications\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Conv2D, MaxPooling2D\r\nfrom keras.layers import Activation, Dropout, Flatten, Dense\r\n\r\ntop_model_weights_path = 'D:\\\\Programming\\\\Self-drive\\\\bottleneck_fc_model.h5'\r\nbatch_size = 128\r\nepochs = 20\r\n\r\ndata_y = np.load('training_data_y.npy')\r\ndata_x = np.load('training_data_x.npy')\r\n\r\ndata_x = data_x.astype(float)/255\r\n\r\ndata_x_train = data_x[:26400]\r\ndata_y_train = data_y[:26400]\r\n\r\ndata_x_val = data_x[26401:]\r\ndata_y_val = data_y[26401:]\r\n\r\n\r\nmodel = applications.InceptionV3(weights='imagenet', include_top=False)\r\n#print(model.output_shape[1:][1:][1:])\r\n\r\n\r\ntop_model = Sequential()\r\ntop_model.add(Flatten(input_shape=(model.output_shape[1:][1:][1:])))\r\ntop_model.add(Dense(256, activation= 'relu'))\r\ntop_model.add(Dropout(0.5))\r\ntop_model.add(Dense(9, activation='softmax'))\r\n\r\ntop_model.load_weights(top_model_weights_path)\r\n\r\n# add the model on top of the convolutional base\r\nmodel.add(top_model)\r\n\r\nfor layer in model.layers[:25]:\r\n layer.trainable = False\r\n\r\n# compile the model with a SGD/momentum optimizer\r\n# and a very slow learning rate.\r\nmodel.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=1e-4, momentum=0.9), metrics=['categorical_accuracy'])\r\n\r\nmodel.fit(data_x_train, data_y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(data_x_val, data_y_val))\r\nscore = model.evaluate(data_x_val, data_y_val, verbose=0)\r\nprint('Test loss: ', score[0])\r\nprint('Test accuracy: ', score[1])\r\n","sub_path":"mainmodel.py","file_name":"mainmodel.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"545005503","text":"import numpy as np\n\nclass FMMC:\n\tdef initData(self, cfg, data, k):\n\t\tself.K = k\n\t\tself.cfg = cfg\n\t\tself.data = data\n\t\tself.init_req = True\n\n\tdef initClusters(self, c1, c2):\n\t\tself.alphaT = 1.0 / self.cfg.L2\n\t\tself.assignments = np.zeros((len(self.data),self.K))\n\t\tself.pi = np.ones(self.K) * (1.0 / self.K)\n\t\tself.thetaI = np.zeros((self.K, self.cfg.L2))\n\t\tself.thetaT = np.zeros((self.K, self.cfg.L2, self.cfg.L2))\n\n\t\tself.init_req = False\n\t\tself.thetaI[0,:] = c1[-1,:]\n\t\tself.thetaI[1,:] = c2[-1,:]\n\t\tself.thetaT[0,:,:] = c1[:-1,:]\n\t\tself.thetaT[1,:,:] = c2[:-1,:]\n\n\tdef getClusters(self):\n\t\treturn self.result\n\n\tdef run(self):\n\t\titNum = 10\n\t\tif (self.init_req):\n\t\t\tself.__init()\n\t\tself.result = np.zeros((self.thetaT.shape[0],self.thetaT.shape[1]+1,self.thetaT.shape[2]))\n\t\titeration = 0\n\n\t\twhile (iteration < itNum):\n\t\t\tself.__assign()\n\t\t\tself.__updatePriors()\n\t\t\titeration += 1\n\t\tself.result[:,:-1,:] = self.thetaT\n\t\tself.result[:,-1,:] = self.thetaI\n\t\treturn self.assignments[:,1] > self.assignments[:,0]\n\n\tdef __updatePriors(self):\n\t\t# update pi\n\t\tself.pi = np.sum(self.assignments, axis=0)\n\t\tself.pi /= np.sum(self.pi)\n\n\t\t# update thetas\n\t\tself.thetaI = np.zeros((self.K, self.cfg.L2)) + self.alphaT\n\t\tself.thetaT = np.zeros((self.K, self.cfg.L2, self.cfg.L2)) + self.alphaT\n\t\tfor seq in np.arange(len(self.data)):\n\t\t\t# if not(len(self.data[seq]) == 0):\n\t\t\tself.thetaI[:,self.data[seq][0]] += self.assignments[seq,:]\n\t\t\tsequence = self.data[seq]\n\t\t\tfor pos in np.arange(1,len(sequence)):\n\t\t\t\tself.thetaT[:, sequence[pos-1], sequence[pos]] += self.assignments[seq,:]\n\n\t\t# normalize\n\t\tself.thetaI = np.divide(self.thetaI, np.sum(self.thetaI,axis=1).reshape(self.K,1))\n\t\tfor mix in range(self.K):\n\t\t\tself.thetaT[mix,:,:] = np.divide(self.thetaT[mix,:,:], np.sum(self.thetaT[mix,:,:],axis=1).reshape(self.cfg.L2,1))\n\n\tdef __assign(self):\n\t\tfor seq in range(len(self.data)):\n\t\t\tif isinstance(self.data[seq], float):\n\t\t\t\tself.data[seq] = [self.data[seq]]\n\t\t\tfor mix in range(self.K):\n\t\t\t\tself.assignments[seq, mix] = self.__computePosterior(self.data[seq], mix)\n\t\t\tself.assignments[seq, :] += 1E-60\n\t\t\tself.assignments[seq, :] /= np.sum(self.assignments[seq, :])\n\n\tdef __init(self):\n\t\tself.alphaT = 1.0 / self.cfg.L2\n\t\tself.assignments = np.zeros((len(self.data),self.K))\n\t\tself.pi = np.ones(self.K) * (1.0 / self.K)\n\t\tself.thetaI = np.zeros((self.K, self.cfg.L2))\n\t\tself.thetaT = np.zeros((self.K, self.cfg.L2, self.cfg.L2))\n\n\t\tsel_seq = self.data[np.random.randint(0,len(self.data))]\n\t\tself.thetaI[0,sel_seq[0]] += 1\n\t\tfor pos in np.arange(1,len(sel_seq)):\n\t\t\tself.thetaT[0,sel_seq[pos-1],sel_seq[pos]] += 1\n\n\t\t# normalize\n\t\tself.thetaI[0,:] = np.divide(self.thetaI[0,:], (np.sum(self.thetaI[0,:]) + 1E-2))\n\t\tself.thetaT[0,:,:] = np.divide(self.thetaT[0,:,:], (np.sum(self.thetaT[0,:,:],axis=1).reshape(self.cfg.L2,1) + 1E-20))\n\n\t\tscore = np.zeros(len(self.data))\n\t\tfor seqID in np.arange(len(self.data)):\n\t\t\tseq = self.data[seqID]\n\t\t\tif not(len(seq) == 0):\n\t\t\t\tscore[seqID] = self.thetaI[0,seq[0]]\n\t\t\t\tprev = -1\n\t\t\t\tfor obs in seq:\n\t\t\t\t\tif (prev >= 0):\n\t\t\t\t\t\tscore[seqID] += self.thetaT[0,prev,obs]\n\t\t\t\t\tprev = obs\n\t\t\t\tscore[seqID] /= float(len(seq))\n\n\t\tscore = np.nan_to_num(score)\n\t\tsel_seq = self.data[np.argmin(score)]\n\t\twhile (len(sel_seq) == 0):\n\t\t\tscore[np.argmin(score)] = np.inf\n\t\t\tsel_seq = self.data[np.argmin(score)]\n\n\t\tself.thetaI[1,sel_seq[0]] += 1\n\t\tfor pos in np.arange(1,len(sel_seq)):\n\t\t\tself.thetaT[1,sel_seq[pos-1],sel_seq[pos]] += 1\n\n\t\t# normalize\n\t\tself.thetaI[1,:] = np.divide(self.thetaI[1,:], (np.sum(self.thetaI[1,:]) + 1E-20))\n\t\tself.thetaT[1,:,:] = np.divide(self.thetaT[1,:,:], (np.sum(self.thetaT[1,:,:],axis=1).reshape(self.cfg.L2,1) + 1E-20))\n\n\n\tdef __computePosterior(self, data, mix):\n\t\tprob = self.pi[mix] * self.thetaI[mix,data[0]]\n\t\tfor pos in range(1,len(data)):\n\t\t\tprob *= self.thetaT[mix,data[pos-1],data[pos]]\n\t\treturn prob\n\n\n\n\t\t\n","sub_path":"fIMMC/fmmc.py","file_name":"fmmc.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"328810337","text":"import tensorflow as tf\nimport utils\nimport config\ndef smooth_l1_loss(y_true, y_pred):\n \"\"\"Implements Smooth-L1 loss.\n y_true and y_pred are typicallly: [N, 4], but could be any shape.\n \"\"\"\n diff = tf.abs(y_true - y_pred)\n less_than_one = tf.cast(tf.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n return loss\n\ndef soft_focal_loss(logits,labels,number_cls=20):\n labels = tf.one_hot(labels,number_cls)\n loss = tf.reduce_sum(labels*(-(1 - tf.nn.softmax(logits))**2*tf.log(tf.nn.softmax(logits))),axis=1)\n return loss\n\ndef rpn_class_loss_graph(rpn_match, rpn_class_logits):\n\n anchor_class = tf.cast(tf.equal(rpn_match, 1), tf.int32)\n total_pos = tf.reduce_sum(anchor_class)\n\n indices = tf.where(tf.not_equal(rpn_match, -1))\n rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)\n anchor_class = tf.gather_nd(anchor_class, indices)\n loss = soft_focal_loss(labels=anchor_class,logits=rpn_class_logits ,number_cls=2)\n loss = tf.keras.backend.switch(tf.cast(tf.size(loss) > 0,tf.bool), tf.reduce_sum(loss)/tf.cast(total_pos,tf.float32), tf.constant(0.0))\n return loss\n\ndef rpn_class_loss_multi_task(rpn_match, rpn_class_logits):\n total_loss = []\n for x in range(config.batch_size):\n b_rpn_match = rpn_match[x]\n b_rpn_logits = rpn_class_logits[x]\n anchor_class = tf.cast(tf.greater(b_rpn_match, 0), tf.int32)\n pos_num = tf.reduce_sum(anchor_class)\n indices_neg = tf.where(tf.equal(b_rpn_match, 0))\n indices_pos = tf.where(tf.greater(b_rpn_match, 0))\n indices_neg = tf.random_shuffle(indices_neg)[:3*pos_num]\n total_index = tf.concat([indices_neg, indices_pos],axis=0)\n\n b_rpn_class_logits = tf.gather(b_rpn_logits, total_index)\n anchor_class = tf.gather(anchor_class, total_index)\n\n loss = soft_focal_loss(labels=anchor_class,logits=b_rpn_class_logits ,number_cls=2)\n loss = tf.keras.backend.switch(tf.cast(tf.size(loss) > 0,tf.bool), tf.reduce_sum(loss), tf.constant(0.0))\n loss = loss/(tf.cast(pos_num, tf.float32)+1e-5)\n total_loss.append(loss)\n return tf.reduce_mean(total_loss)\n\ndef rpn_class_loss_graph1(rpn_match, rpn_class_logits):\n\n\n anchor_class = tf.cast(tf.equal(rpn_match, 1), tf.int32)\n\n indices = tf.where(tf.not_equal(rpn_match, -1))\n rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)\n anchor_class = tf.gather_nd(anchor_class, indices)\n loss = soft_focal_loss(labels=anchor_class,logits=rpn_class_logits ,number_cls=2)\n loss = tf.keras.backend.switch(tf.cast(tf.size(loss) > 0,tf.bool), tf.reduce_mean(loss), tf.constant(0.0))\n return loss\n\ndef rpn_bbox_loss_graph( input_rpn_deltas, input_rpn_label, pred_rpn_deltas):\n input_rpn_deltas = tf.reshape(input_rpn_deltas,(-1,4))\n pred_rpn_deltas = tf.reshape(pred_rpn_deltas, (-1,4))\n input_rpn_label = tf.reshape(input_rpn_label, (-1,))\n\n indices = tf.where(tf.greater(input_rpn_label, 0))\n\n true_rpn_delatas = tf.gather(input_rpn_deltas, indices)\n pred_rpn_deltas = tf.gather(pred_rpn_deltas, indices)\n\n diff = tf.abs(true_rpn_delatas - pred_rpn_deltas)\n less_than_one = tf.cast(tf.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n loss = tf.keras.backend.switch(tf.cast(tf.size(loss) > 0,tf.bool), tf.reduce_mean(loss), tf.constant(0.0))\n return loss\n\n\ndef mrcnn_class_loss_graph(target_class_ids, pred_class_logits, rois):\n print(target_class_ids, pred_class_logits, rois)\n #pred_bbox = tf.reshape(rois, shape=(-1, 4))\n #ix = tf.where(tf.reduce_sum(tf.abs(pred_bbox),axis=1)>0)[:,0]\n\n #target_class_ids = tf.gather(target_class_ids, ix)\n #pred_class_logits = tf.gather(pred_class_logits,ix)\n\n\n tf.summary.scalar('target_shape', tf.shape(target_class_ids)[0])\n target_class_ids = tf.reshape(target_class_ids, shape=(-1,))\n target_class_ids = tf.cast(target_class_ids, 'int64')\n pos_num = tf.reduce_sum(tf.cast(tf.greater(target_class_ids,0),tf.int32))\n tf.summary.scalar('pos_num', pos_num)\n loss = soft_focal_loss(labels=target_class_ids, logits=pred_class_logits, number_cls=2)\n\n loss = tf.keras.backend.switch(tf.cast(tf.size(loss) > 0, tf.bool), tf.reduce_sum(loss)/tf.cast(pos_num,tf.float32), tf.constant(0.0))\n\n return loss\n\n\ndef mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):\n\n print(pred_bbox)\n pred_bbox = tf.reshape(pred_bbox,(-1, 2, 4))\n target_class_ids = tf.reshape(target_class_ids, (-1,))\n target_bbox = tf.reshape(target_bbox, (-1, 4))\n # pred_bbox = tf.reshape(pred_bbox, (-1, tf.shape(pred_bbox)[2], 4))\n\n\n # Only positive ROIs contribute to the loss. And only\n # the right class_id of each ROI. Get their indicies.\n positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_roi_ix), tf.int64)\n indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)\n\n # Gather the deltas (predicted and true) that contribute to loss\n target_bbox = tf.gather(target_bbox, positive_roi_ix)\n pred_bbox = tf.gather_nd(pred_bbox, indices)\n print(target_bbox,pred_bbox)\n loss = tf.keras.backend.switch(tf.cast(tf.size(target_bbox) > 0,tf.bool),\n smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),\n tf.constant(0.0))\n loss = tf.reduce_mean(loss)\n return loss\n\n\n\n\ndef mrcnn_bbox_loss_graph_dsl(target_bbox, target_class_ids, pred_bbox):\n\n target_class_ids = tf.reshape(target_class_ids, (-1,))\n target_bbox = tf.reshape(target_bbox, (-1, 4))\n\n positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_roi_ix), tf.int64)\n\n target_bbox = tf.gather(target_bbox, positive_roi_ix)\n pred_bbox = tf.gather(pred_bbox, positive_roi_ix)\n\n loss = tf.keras.backend.switch(tf.cast(tf.size(target_bbox) > 0,tf.bool),\n smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),\n tf.constant(0.0))\n loss = tf.reduce_mean(loss)\n return loss","sub_path":"losses/rcnn_losses.py","file_name":"rcnn_losses.py","file_ext":"py","file_size_in_byte":6198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"393490484","text":"\"\"\"\r\nAuthor: Yogaraja Gopal\r\nThis module contains the Template tags for env App\r\n\"\"\"\r\nimport re\r\nfrom django import template\r\nfrom esmt.env.conf.properties import FINAL_OCTET\r\nfrom esmt.env.conf.properties_r2 import FINAL_OCTET as R2_FINAL_OCTET\r\nregister = template.Library()\r\n\r\n\r\n@register.filter(name=\"sort_list\")\r\ndef sort_list(values):\r\n \"\"\"\r\n This template tag is used to sort list\r\n \"\"\"\r\n return sorted(values)\r\n\r\n\r\n@register.filter(name=\"dict_val\")\r\ndef dict_val(values, arg):\r\n \"\"\"\r\n This template tag is used to return dictionary value\r\n \"\"\"\r\n if arg in values:\r\n return values[arg]\r\n return \"\"\r\n\r\n\r\n@register.filter(name=\"dict_items\")\r\ndef dict_items(values):\r\n \"\"\"\r\n This template tag is used to return dictionary items\r\n \"\"\"\r\n return values.items()\r\n\r\n\r\ndef check_status(line, text):\r\n \"\"\"\r\n This template tag is used to return the status\r\n \"\"\"\r\n search_run = re.search(r\"\" + text, line, re.I)\r\n return search_run\r\n\r\n\r\n@register.filter(name=\"repl_status\")\r\ndef repl_status(value):\r\n \"\"\"\r\n This template tag is used to check the rep_log and return css_class\r\n \"\"\"\r\n soc_log = (value['soc_log'] if 'soc_log' in value else \"\")\r\n plato_log = (value['plato_log'] if 'plato_log' in value else \"\")\r\n\r\n if check_status(soc_log, \"Completed OK\") and check_status(plato_log, \"Completed OK\"):\r\n return \"btn-success\"\r\n elif check_status(soc_log, \"Failed\") or check_status(plato_log, \"Failed\"):\r\n return \"btn-danger\"\r\n return \"btn-warning\"\r\n\r\n\r\n@register.filter(name='mount_sts')\r\ndef mount_sts(mnt_detail):\r\n \"\"\"\r\n This template tag is used to check the mount details and return the css_class\r\n \"\"\"\r\n sts = \"btn-success\"\r\n if mnt_detail != '':\r\n mount_data = mnt_detail[\"mount\"]\r\n if mount_data:\r\n for mounts in mount_data:\r\n mnt = mounts[4].split(\"%\")\r\n mnt_per = int(mnt[0])\r\n if mnt_per >= 95:\r\n sts = 'btn-danger'\r\n elif 89 < mnt_per < 95:\r\n sts = ('btn-warning' if sts != 'btn-danger' else sts)\r\n else:\r\n sts = ('btn-success' if sts != 'btn-danger' and sts != 'btn-warning' else sts)\r\n else:\r\n sts = 'btn-danger'\r\n else:\r\n sts = 'btn-danger'\r\n return sts\r\n\r\n\r\n@register.filter(name='region_name')\r\ndef region_name(region_code):\r\n \"\"\"\r\n This template tag returns the region name based on region code\r\n \"\"\"\r\n region_code_name = {\r\n '020304': 'UK',\r\n '020321': 'scottish',\r\n '020322': 'Nothern Ireland',\r\n '020323': 'Guernsey & Jersey',\r\n '1315': 'Spain',\r\n '0218': 'ROI',\r\n '0708': 'NL'\r\n }\r\n return region_code_name[region_code]\r\n\r\n\r\n@register.filter(name='split2')\r\ndef split2(value):\r\n \"\"\"\r\n This template tag is used to split the values\r\n \"\"\"\r\n return value.split(\".\")[2]\r\n\r\n\r\n@register.simple_tag(name='clientIp')\r\ndef clientIp(store, client, *args):\r\n \"\"\"\r\n This template tag forms the ip Address and returns it base don the client name\r\n \"\"\"\r\n r2_r3 = args[0]\r\n if r2_r3 == \"R3\":\r\n return \"10.34.\" + store + \".\" + FINAL_OCTET[client]\r\n elif r2_r3 == \"R2\":\r\n return \"10.34.\" + store + \".\" + R2_FINAL_OCTET[client]\r\n return \"Invalid region supplied\"\r\n","sub_path":"django/scripts/src/esmt/env/templatetags/env_filters.py","file_name":"env_filters.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"538666008","text":"'''\nConfiguration settings for Grapher gui\n'''\nimport pyqtgraph as pg\npg.setConfigOption('background', 'k')\npg.setConfigOption('foreground', 'y')\n\nclass traceListConfig():\n def __init__(self, background_color = 'white'):\n self.background_color = background_color\n\nclass graphConfig():\n def __init__(self, name, ylim=[0,1], isScrolling=False, max_datasets = 6,\n show_points = True):\n self.name = name\n self.ylim = ylim\n self.isScrolling = isScrolling\n self.max_datasets = max_datasets\n self.graphs = 1 # just a single graph\n self.show_points = show_points\n\nclass gridGraphConfig():\n def __init__(self, tab, config_list):\n self.tab = tab\n self.config_list = config_list[0::3]\n self.row_list = config_list[1::3]\n self.column_list = config_list[2::3]\n\n self.graphs = len(self.config_list)\n\n\ntabs =[\n gridGraphConfig('current', [graphConfig('current', max_datasets = 1), 0, 0]),\n gridGraphConfig('pmt', [graphConfig('pmt', ylim=[0,30], isScrolling=True, max_datasets = 1, show_points = False), 0, 0]),\n gridGraphConfig('spectrum', [graphConfig('spectrum'), 0, 0]),\n gridGraphConfig('rabi', [graphConfig('rabi'), 0, 0]),\n gridGraphConfig('calibrations', [\n graphConfig('car1'), 0, 0,\n graphConfig('car2'), 0, 1, \n graphConfig('radial1'), 1, 0,\n graphConfig('radial2'), 1, 1]),\n gridGraphConfig('molmer-sorensen',[\n graphConfig('ms_time'), 0, 0,\n graphConfig('local_stark'), 0, 1]),\n\n gridGraphConfig('vaet',[\n graphConfig('vaet_time'), 0, 0,\n graphConfig('vaet_delta'), 0, 1]),\n gridGraphConfig('parity', [graphConfig('parity'), 0, 0]),\n gridGraphConfig('ramsey', [graphConfig('ramsey'), 0, 0])\n]\n\n# gridGraphConfig('testgrid',\n# [\n# graphConfig('fig1'), 0, 0,\n# graphConfig('fig2'), 0, 1,\n# graphConfig('fig3'), 2, 2,\n# graphConfig('fig4'), 1, 2\n# ]),\n# gridGraphConfig('testgrid2',\n# [\n# graphConfig('fig1123'), 0, 0,\n# ])\n","sub_path":"devel/RealSimpleGrapher/GUIConfig.py","file_name":"GUIConfig.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"155918991","text":"from django.contrib.sites.managers import CurrentSiteManager as DjangoCSM\nfrom sitesngine.hosts.utils import current_site_id\n\n__author__ = 'fearless' # \"from birth till death\"\n\n\nclass CurrentSiteManager(DjangoCSM):\n \"\"\"\n Extends Django's site manager to first look up site by ID stored in\n the request, the session, then domain for the current request\n (accessible via threadlocals in ``sitesngine.hosts.request``), the\n environment variable ``SITESNGINE_SITE_ID`` (which can be used by\n management commands with the ``--site`` arg, finally falling back\n to ``settings.SITE_ID`` if none of those match a site.\n \"\"\"\n def __init__(self, field_name=None, *args, **kwargs):\n super(DjangoCSM, self).__init__(*args, **kwargs)\n self.__field_name = field_name\n self.__is_validated = False\n\n def get_query_set(self):\n if not self.__is_validated:\n self._validate_field_name()\n lookup = {self.__field_name + \"__id__exact\": current_site_id()}\n return super(DjangoCSM, self).get_query_set().filter(**lookup)","sub_path":"sitesngine/hosts/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"433451321","text":"# -*- coding: utf-8 -*-\n\"\"\"\nProduction Configurations\n\n- Use djangosecure\n- Use mailgun to send emails\n- Use Redis on Heroku\n- Use sentry for error logging\n- Use opbeat for error reporting\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nimport logging\n\nfrom .common import * # noqa\n\n# SECRET CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\n# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ\nSECRET_KEY = env('DJANGO_SECRET_KEY')\n\n\n# This ensures that Django will be able to detect a secure connection\n# properly on Heroku.\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# raven sentry client\n# See https://docs.getsentry.com/hosted/clients/python/integrations/django/\nINSTALLED_APPS += ('raven.contrib.django.raven_compat', )\nRAVEN_MIDDLEWARE = ('raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware', )\nMIDDLEWARE_CLASSES = RAVEN_MIDDLEWARE + MIDDLEWARE_CLASSES\n\n# SECURITY CONFIGURATION\n# ------------------------------------------------------------------------------\n# See https://docs.djangoproject.com/en/1.9/ref/middleware/#module-django.middleware.security\n# and https://docs.djangoproject.com/ja/1.9/howto/deployment/checklist/#run-manage-py-check-deploy\n\nSECURE_HSTS_SECONDS = 31536000\nSECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(\n 'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)\nSECURE_CONTENT_TYPE_NOSNIFF = env.bool(\n 'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)\nSECURE_BROWSER_XSS_FILTER = True\nSESSION_COOKIE_SECURE = True\nSESSION_COOKIE_HTTPONLY = True\nSECURE_SSL_REDIRECT = False\nCSRF_COOKIE_SECURE = True\nCSRF_COOKIE_HTTPONLY = True\nX_FRAME_OPTIONS = 'DENY'\n\n# SITE CONFIGURATION\n# ------------------------------------------------------------------------------\n# Hosts/domain names that are valid for this site\n# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts\nALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['*'])\n# END SITE CONFIGURATION\n\n# Static Assets\n# ------------------------\nMEDIA_ROOT = \"/data/media\"\n\n# COMPRESSOR\n# ------------------------------------------------------------------------------\nCOMPRESS_URL = STATIC_URL\nCOMPRESS_ENABLED = True\nCOMPRESS_OFFLINE = True\n\n# EMAIL\n# ------------------------------------------------------------------------------\nDEFAULT_FROM_EMAIL = env('DJANGO_SERVER_EMAIL')\nSERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)\n\n# Anymail with Mailgun\nINSTALLED_APPS += (\"anymail\", )\nANYMAIL = {\n \"MAILGUN_API_KEY\": env('DJANGO_MAILGUN_API_KEY'),\n}\nCELERY_EMAIL_BACKEND = env(\"CELERY_EMAIL_BACKEND\", default=\"anymail.backends.mailgun.MailgunBackend\")\n\n# TEMPLATE CONFIGURATION\n# ------------------------------------------------------------------------------\n# See:\n# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader\nTEMPLATES[0]['OPTIONS']['loaders'] = [\n ('django.template.loaders.cached.Loader', [\n 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),\n]\n\n# Sentry Configuration\nSENTRY_DSN = env('DJANGO_SENTRY_DSN')\nSENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'root': {\n 'level': 'WARNING',\n 'handlers': ['sentry'],\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s '\n '%(process)d %(thread)d %(message)s'\n },\n },\n 'handlers': {\n 'sentry': {\n 'level': 'ERROR',\n 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'django.db.backends': {\n 'level': 'ERROR',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'raven': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'sentry.errors': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'django.security.DisallowedHost': {\n 'level': 'ERROR',\n 'handlers': ['console', 'sentry'],\n 'propagate': False,\n },\n },\n}\nSENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)\nRAVEN_CONFIG = {\n 'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),\n 'DSN': SENTRY_DSN\n}\n\n# Custom Admin URL, use {% raw %}{% url 'admin:index' %}{% endraw %}\nADMIN_URL = env('DJANGO_ADMIN_URL')\nACCOUNT_DEFAULT_HTTP_PROTOCOL = \"https\"\nSTATIC_ROOT = '/data/static'\n\nPINAX_STRIPE_PUBLIC_KEY = env('PINAX_STRIPE_PUBLIC_KEY')\nPINAX_STRIPE_SECRET_KEY = env('PINAX_STRIPE_SECRET_KEY')\n\nOCTOBAT_IMAGE = \"http://localhost:8000/static/images/logo_icon.png\"\n\nOCTOBAT_PUBLIC_KEY = env('OCTOBAT_PUBLIC_KEY')\nOCTOBAT_PRIVATE_KEY = env('OCTOBAT_PRIVATE_KEY')\n\nMAILCHIMP_API_KEY = env.str(\"MAILCHIMP_API_KEY\")\nMAILCHIMP_LIST = env.str(\"MAILCHIMP_LIST\")\n\n# WEBPACK\n# ------------------------------------------------------------------------------\n# Webpack config\nWEBPACK_LOADER = {\n 'DEFAULT': {\n 'BUNDLE_DIR_NAME': '{{ cookiecutter.project_slug }}/static/{{ cookiecutter.project_slug }}/dist/',\n 'STATS_FILE': '/data/webpack/webpack-stats-production.json'\n }\n}\n\nNEW_RELIC_LICENSE_KEY = env('NEW_RELIC_LICENSE_KEY')\nNEW_RELIC_APP_NAME = '{{ cookiecutter.project_slug }}'\n\n# Your production stuff: Below this line define 3rd party library settings\n","sub_path":"{{cookiecutter.project_slug}}/config/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":5837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"155001343","text":"import os\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\nimport numpy as np\nimport argparse\nfrom matplotlib import pyplot as plt\nimport cv2\nif torch.cuda.is_available():\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n\nfrom ssd import build_ssd\nfrom data import VOC_CLASSES as labels\n\nparser = argparse.ArgumentParser(description='Single Shot Detection Demo')\nparser.add_argument('-i', '--img', type=str, dest='image_path',\n default='demo/street.jpg', help='Image path')\nparser.add_argument('-cp', '--checkpoint', type=str, dest='checkpoint',\n default=os.path.join('checkpoints', 'ssd300_VOC_115000.pth'), help='Checkpoint path')\nparser.add_argument('-rgb', '--mean_rgb', type=str, dest='mean_rgb',\n default='104.0, 117.0, 123.0', help='Mean value of RGB channels')\nparser.add_argument('-d', '--data_shape', type=int, dest='data_shape',\n default=300, help='Size of network input (not the actual image size)')\nparser.add_argument('-n', '--num_class', type=int, dest='num_class',\n default=21, help='Number of classes (including ground truth)')\nargs = parser.parse_args()\n\n\n# Initialize SSD\nnet = build_ssd('test', args.data_shape, args.num_class)\nnet.load_weights(args.checkpoint)\n\n# Load image\nimage = cv2.imread(args.image_path, cv2.IMREAD_COLOR)\nrgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\nx = cv2.resize(image, (args.data_shape, args.data_shape)).astype(np.float32)\nmean_rgb = [float(n) for n in args.mean_rgb.split(',')]\nx -= tuple(mean_rgb)\nx = x.astype(np.float32)\nx = x[:, :, ::-1].copy()\nx = torch.from_numpy(x).permute(2, 0, 1)\n\n# wrap tensor in Variable\nxx = Variable(x.unsqueeze(0))\nif torch.cuda.is_available():\n xx = xx.cuda()\ny = net(xx)\n\nplt.figure(figsize=(6,6))\ncolors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()\nplt.imshow(rgb_image)\ncurrentAxis = plt.gca()\n\ndetections = y.data\n# scale each detection back up to the image\nscale = torch.Tensor(rgb_image.shape[1::-1]).repeat(2)\nfor i in range(detections.size(1)):\n j = 0\n while detections[0,i,j,0] >= 0.6:\n score = detections[0,i,j,0]\n label_name = labels[i-1]\n display_txt = '%s: %.2f'%(label_name, score)\n pt = (detections[0,i,j,1:]*scale).cpu().numpy()\n coords = (pt[0], pt[1]), pt[2]-pt[0]+1, pt[3]-pt[1]+1\n color = colors[i]\n currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))\n currentAxis.text(pt[0], pt[1], display_txt, bbox={'facecolor':color, 'alpha':0.5})\n j+=1\n\nplt.show()\n\n\n\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"377437143","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : Lee A. Congdon \nDate : 2021-07-29\nPurpose: Tiny Python Project Twelve Days of Christmas exercise\n\"\"\"\n\nimport argparse\nimport sys\nimport emoji\n\n\ndef get_args():\n \"\"\"Parse arguments\"\"\"\n\n parser = argparse.ArgumentParser(\n description=\"Twelve Days of Christmas\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n parser.add_argument(\n \"-n\",\n \"--num\",\n default=12,\n type=int,\n help=\"Number of days to sing\",\n metavar=\"days\",\n )\n\n parser.add_argument(\n \"-e\", \"--emoji\", action=\"store_true\", help=\"Use emoji in output\"\n )\n\n parser.add_argument(\n \"-o\",\n \"--outfile\",\n default=sys.stdout,\n type=argparse.FileType(\"wt\"),\n help=\"Outfile\",\n metavar=\"FILE\",\n )\n\n args = parser.parse_args()\n\n if args.num < 1 or args.num > 12:\n parser.error(f'--num \"{args.num}\" must be between 1 and 12')\n\n return args\n\n\ndef verse(day, emoji_flag=False):\n \"\"\"Return the verse for day\"\"\"\n ordinal = [\n \"first\",\n \"second\",\n \"third\",\n \"fourth\",\n \"fifth\",\n \"sixth\",\n \"seventh\",\n \"eighth\",\n \"ninth\",\n \"tenth\",\n \"eleventh\",\n \"twelfth\",\n ]\n phrase = [\n \"And a partridge in a pear tree.\",\n \"Two turtle doves,\",\n \"Three French hens,\",\n \"Four calling birds,\",\n \"Five gold rings,\",\n \"Six geese a laying,\",\n \"Seven swans a swimming,\",\n \"Eight maids a milking,\",\n \"Nine ladies dancing,\",\n \"Ten lords a leaping,\",\n \"Eleven pipers piping,\",\n \"Twelve drummers drumming,\",\n ]\n alt_phrase = \"A partridge in a pear tree.\"\n emoji_phrase = [\n \"And a :bird: in a pear tree.\",\n \"Two turtle :dove:s,\",\n \"Three French :chicken:s,\",\n \"Four calling birds,\",\n \"Five gold :ring:s,\",\n \"Six :bird:s a laying,\",\n \"Seven :swan:s a swimming,\",\n \"Eight :woman: milking,\",\n \"Nine :woman: dancing,\",\n \"Ten :man: a leaping,\",\n \"Eleven :man: piping,\",\n \"Twelve :drum:s drumming,\",\n ]\n emoji_alt_phrase = \"A :bird: in a pear tree.\"\n\n result = [\n f\"On the {ordinal[day - 1]} day of Christmas,\",\n \"My true love gave to me,\",\n ]\n if not emoji_flag:\n result.extend(reversed(phrase[1:day]))\n result.append(f\"{phrase[0] if day != 1 else alt_phrase}\")\n else:\n result.extend(map(emoji.emojize, reversed(emoji_phrase[1:day])))\n result.append(\n f\"{emoji.emojize(emoji_phrase[0]) if day != 1 else emoji.emojize(emoji_alt_phrase)}\"\n )\n return \"\\n\".join(result)\n\n\ndef test_verse():\n assert verse(1) == \"\\n\".join(\n [\n \"On the first day of Christmas,\",\n \"My true love gave to me,\",\n \"A partridge in a pear tree.\",\n ]\n )\n assert verse(2) == \"\\n\".join(\n [\n \"On the second day of Christmas,\",\n \"My true love gave to me,\",\n \"Two turtle doves,\",\n \"And a partridge in a pear tree.\",\n ]\n )\n\n\ndef main():\n \"\"\"Main program\"\"\"\n\n args = get_args()\n # for day in range(1, args.num + 1):\n # print(verse(day), end='\\n\\n')\n args.outfile.write(\n \"\\n\\n\".join(verse(day, args.emoji) for day in range(1, args.num + 1)) + \"\\n\"\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"13_twelve_days/twelve_days.py","file_name":"twelve_days.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"283623533","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nimport random\nimport time\nimport os\nfrom sklearn.model_selection import train_test_split\n\nfrom demo.Emotion.em_network.utils import device, AverageMeter, dir_path, write_log, \\\n sensor_imag_data_loader, accuracy\nfrom demo.Emotion.em_network.models.PHRNN import PHRNN\n\nos.chdir('C:/Users/Zber/Documents/Dev_program/OpenRadar/')\n\n# set seed, make result reporducable\nSEED = 1234\nrandom.seed(SEED)\nnp.random.seed(SEED)\ntorch.manual_seed(SEED)\ntorch.cuda.manual_seed(SEED)\ntorch.backends.cudnn.deterministic = True\n\n# facial parts slices\n# alignment landmark\nface_parts = {\n 'eyebrow': slice(17, 27),\n 'eye': slice(36, 48),\n 'nose': slice(27, 36),\n 'lips': slice(48, 68),\n}\n\n\ndef train(model, data_loader, criterion, optimizer, epoch=0, to_log=None, print_freq=5):\n # create Average Meters\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n train_loss = []\n\n # switch to train mode\n model.train()\n # record start time\n start = time.time()\n\n for i, (inputs, target) in enumerate(data_loader):\n # prepare input and target\n inputs = inputs.to(device)\n # target = target.type(torch.LongTensor)\n target = target.long()\n target = target.to(device)\n\n eyebrow, eye, nose, mouth = split_parts(inputs)\n eyebrow = reshape_parts(eyebrow)\n eye = reshape_parts(eye)\n nose = reshape_parts(nose)\n mouth = reshape_parts(mouth)\n\n # measure data loading time\n data_time.update(time.time() - start)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # gradient and do SGD step\n output = model(eyebrow, eye, nose, mouth)\n loss = criterion(output, target)\n\n train_loss.append(loss.item())\n loss.backward()\n optimizer.step()\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.item(), inputs.size(0))\n top1.update(prec1.item(), inputs.size(0))\n top5.update(prec5.item(), inputs.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - start)\n start = time.time()\n\n # print training info\n if i % print_freq == 0:\n str = ('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:3.3f} ({top1.avg:3.3f})\\t'\n 'Prec@5 {top5.val:3.3f} ({top5.avg:3.3f})'.format(\n epoch, i, len(data_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n print(str)\n\n if to_log is not None:\n write_log(str + '\\n', to_log)\n\n return train_loss\n\n\ndef test(model, test_loader, criterion, to_log=None):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for (inputs, target) in test_loader:\n target = target.long()\n inputs, target = inputs.to(device), target.to(device)\n\n eyebrow, eye, nose, mouth = split_parts(inputs)\n eyebrow = reshape_parts(eyebrow)\n eye = reshape_parts(eye)\n nose = reshape_parts(nose)\n mouth = reshape_parts(mouth)\n\n output = model(eyebrow, eye, nose, mouth)\n loss = criterion(output, target)\n test_loss += loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n test_loss /= len(test_loader.sampler)\n test_loss *= test_loader.batch_size\n acc = 100. * correct / len(test_loader.sampler)\n format_str = 'Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\\n'.format(\n test_loss, correct, len(test_loader.sampler), acc\n )\n print(format_str)\n if to_log is not None:\n write_log(format_str, to_log)\n return test_loss.item(), acc\n\n\ndef scale_range(input, min=0, max=1):\n input += -(np.min(input))\n input /= np.max(input) / (max - min)\n input += min\n return input\n\n\ndef load_data(landmark_dir):\n x = np.zeros((num_data, num_frames, num_landmarks, n_dim))\n y = np.zeros((num_data))\n\n # load numpy data\n i = 0\n for label in range(1, 8):\n landmark_label_dir = os.path.join(landmark_dir, str(label))\n\n for npy_path in os.listdir(landmark_label_dir):\n d = np.load(os.path.join(landmark_label_dir, npy_path))\n x[i] = d\n y[i] = label - 1\n i += 1\n return x, y\n\n\ndef landmark_normalization(landmark):\n # nose point\n nose_index = 33\n\n for data_index in range(num_data):\n for frame_index in range(num_frames):\n frame_landmark = landmark[data_index, frame_index]\n nose_landmark = frame_landmark[nose_index]\n landmark[data_index, frame_index] = (frame_landmark-nose_landmark)/ np.std(frame_landmark, axis=0)\n\n return landmark\n\n\ndef reshape_parts(data):\n batch_size, frame_size = data.size()[0], data.size()[1]\n data_view = data.view((batch_size, frame_size, -1))\n return data_view\n\n\ndef split_parts(data):\n eyebrow = data[:, :, face_parts['eyebrow'], :]\n eye = data[:, :, face_parts['eye'], :]\n nose = data[:, :, face_parts['nose'], :]\n lips = data[:, :, face_parts['lips'], :]\n\n return eyebrow, eye, nose, lips\n\n\nif __name__ == \"__main__\":\n\n N_EPOCHS = 100\n LR = 0.0005\n BATCH_SIZE = 32\n num_classes = 6\n num_data = 327\n num_frames = 30\n # num_frames = 20\n num_landmarks = 68\n n_dim = 2\n\n emotion_list = ['Joy', 'Surprise', 'Anger', 'Sadness', 'Fear', 'Disgust']\n\n emotion_classes = ['anger', 'contempt', 'disgust', 'fear', 'happy', 'sadness', 'surprise']\n\n # results dir\n result_dir = \"demo/Emotion/results\"\n # landmark_dir = \"G:/My Drive/mmWave/mmWave-Emotion/mmWave Vision Datasets/CK+/CK+/emotion_images\"\n landmark_dir = \"G:/My Drive/mmWave/mmWave-Emotion/mmWave Vision Datasets/CK+/CK+/alignment_landmarks_L30/\"\n # landmark_dir = \"G:/My Drive/mmWave/mmWave-Emotion/mmWave Vision Datasets/CK+/CK+/alignment_landmarks_L20/\"\n\n # model configure\n model_config = {\n 'eye_size': (face_parts['eye'].stop - face_parts['eye'].start) * n_dim,\n 'eyebrow_size': (face_parts['eyebrow'].stop - face_parts['eyebrow'].start) * n_dim,\n 'nose_size': (face_parts['nose'].stop - face_parts['nose'].start) * n_dim,\n 'mouth_size': (face_parts['lips'].stop - face_parts['lips'].start) * n_dim,\n 'h1_size': 30,\n 'h2_size': 30,\n 'h3_size': 60,\n 'h4_size': 60,\n 'h5_size': 90,\n 'h6_size': 90,\n 'total_length': num_frames,\n 'num_classes': len(emotion_classes)\n }\n\n # load data\n x, y = load_data(landmark_dir)\n # merge numpy file with same lable\n\n x = landmark_normalization(x)\n\n # Landmark normalization\n\n # azi_data_path = \"demo/Emotion/data/Heatmap_D0_S1_L0_B4-14_I0-80_azi.npy\"\n # ele_data_path = \"demo/Emotion/data/Heatmap_D0_S1_L0_B4-14_I0-80_ele.npy\"\n # label_path = 'demo/Emotion/data/sensor_b8r3_c5_y.npy'\n # label_path_1 = 'demo/Emotion/data/sensor_b8r3_c5_y_s40_e80.npy'\n\n # split data\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=25, stratify=y)\n\n train_loader = sensor_imag_data_loader(x_train, y_train, batch_size=BATCH_SIZE)\n test_loader = sensor_imag_data_loader(x_test, y_test, batch_size=np.shape(x_test)[0])\n\n # log path\n path = dir_path(\"vision_landmark_PHRNN_alignment_landmark_normalized_nobias\", result_dir)\n\n # create model\n model = PHRNN(**model_config)\n model = model.to(device)\n\n # initialize critierion and optimizer\n # could add weighted loss e.g. pos_weight = torch.ones([64])\n # criterion = nn.BCELoss()\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=LR)\n\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.2)\n\n metrics_dic = {\n 'loss': [],\n 'precision': [],\n 'recall': []\n }\n\n for epoch in range(N_EPOCHS):\n train_loss = train(model, data_loader=train_loader, criterion=criterion, optimizer=optimizer, epoch=epoch,\n to_log=path['log'])\n test_loss, acc = test(model, test_loader=test_loader, criterion=criterion, to_log=path['log'])\n\n lr_scheduler.step()\n\n metrics_dic['loss'].append(test_loss)\n metrics_dic['precision'].append(acc)\n # metrics_dic['recall'].append(rec)\n\n # save final model\n torch.save(model.state_dict(), path['model'])\n\n # save metrics dic\n","sub_path":"demo/Emotion/em_network/train_PHRNN.py","file_name":"train_PHRNN.py","file_ext":"py","file_size_in_byte":8970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"270985682","text":"from django.conf.urls import include, url\n\nfrom django.contrib import admin\nfrom . import views\nadmin.autodiscover()\n\nurlpatterns = [\n # Examples:\n url(r'^$', views.home),\n url(r'^rules/',views.rules),\n url(r'^tutorial/', views.videotutorial ),\n url(r'^about/', views.about ),\n url(r'^friends/',include('friends.urls')),\n url(r'^verification/',include('verification.urls')),\n url(r'^rooms/',include('rooms.urls')),\n url(r'^game/',include('game.urls')),\n url(r'^history/',include('history.urls')),\n #url(r'^admin/', include(admin.site.urls)),\n url(r'^emote/',include('emote.urls')),\n url(r'^shop/',include('shop.urls')),\n # url(r'^admin/',include('admin.urls'))\n]\n","sub_path":"meetain/meetain/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"459316210","text":"# coding=utf-8\n# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport logging\nimport os\nfrom contextlib import contextmanager\n\nfrom pants.util.contextutil import temporary_file\nfrom pants.util.dirutil import safe_open\n\n\nlogger = logging.getLogger(__name__)\n\n\n@contextmanager\ndef safe_args(args,\n options,\n max_args=None,\n argfile=None,\n delimiter='\\n',\n quoter=None,\n delete=True):\n \"\"\"Yields args if there are less than a limit otherwise writes args to an argfile and yields an\n argument list with one argument formed from the path of the argfile.\n\n :param args: The args to work with.\n :param OptionValueContainer options: scoped options object for this task\n :param max_args: The maximum number of args to let though without writing an argfile. If not\n specified then the maximum will be loaded from the --max-subprocess-args option.\n :param argfile: The file to write args to when there are too many; defaults to a temporary file.\n :param delimiter: The delimiter to insert between args written to the argfile, defaults to '\\n'\n :param quoter: A function that can take the argfile path and return a single argument value;\n defaults to: lambda f: '@' + f\n :param delete: If True deletes any arg files created upon exit from this context; defaults to\n True.\n \"\"\"\n max_args = max_args or options.max_subprocess_args\n if len(args) > max_args:\n def create_argfile(f):\n logger.debug('Creating argfile {} with contents {}'.format(f.name, ' '.join(args)))\n f.write(delimiter.join(args))\n f.close()\n return [quoter(f.name) if quoter else '@{}'.format(f.name)]\n\n if argfile:\n try:\n with safe_open(argfile, 'w') as fp:\n yield create_argfile(fp)\n finally:\n if delete and os.path.exists(argfile):\n os.unlink(argfile)\n else:\n with temporary_file(cleanup=delete) as fp:\n yield create_argfile(fp)\n else:\n yield args\n","sub_path":"src/python/pants/backend/jvm/argfile.py","file_name":"argfile.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"148601671","text":"import time\nimport argparse\nimport urllib3\nimport bs4\n\nstart_time = time.time()\n\n\nparser = argparse.ArgumentParser(description = \"Read from URL\")\nparser.add_argument(\"myurl\")\nparser.add_argument(\"tofind\")\n\nargs = parser.parse_args()\nmyurl = args.myurl\ntofind = args.tofind\n\n#webbrowser.open_new(myurl)\n#print(\"myurl: \" + myurl)\n\n\ni = 0\nlines = []\nhttp = urllib3.PoolManager()\n#page = http.request('GET', myurl)\n#textfile = page.data\npage = http.urlopen('GET',myurl)\ntext = page.data\ntext2 = page.encode('ascii', 'ignore').decode('ascii')\nprint(text2)\nsoup = bs4.BeautifulSoup(text2, 'html5lib')\n\n#soup = bs4.BeautifulSoup(page.data.decode('utf-8','ignore'),'html5lib')\nprint(soup.prettify())\n#lines = page.readlines()\n\n#if tofind in textfile:\n#\tprint(tofind + \" ist in \" + myurl + \" enthalten.\")\n#\tprint(\"Zeilenzahl: \", len(lines))\n#\twhile i != len(lines):\n#\t\tif tofind in lines[i]:\n#\t\t\tprint(\"Text found in line \" + str((i+1)))\n#\t\ti += 1\n#else:\n#\tprint(\"Habe '\" + tofind + \"' nicht in \" + myurl + \" gefunden.\")\n\n#page = requests.get(myurl)\n#print(page.text)\npage.close()\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\nexit()\n","sub_path":"readfrominet.py","file_name":"readfrominet.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"330968086","text":"import setuptools\nimport numpy.distutils.core\nimport os\n\nfrom numpy.distutils.core import Extension\n\n# create extension for calling IGRF\nextensions = [Extension(name='OMMBV.igrf',\n sources=[os.path.join('OMMBV', 'igrf13.f')]),\n Extension(name='OMMBV.fortran_coords',\n sources=[os.path.join('OMMBV', '_coords.f')])]\n\nhere = os.path.abspath(os.path.dirname(__file__))\nversion_filename = os.path.join('OMMBV', 'version.txt')\nwith open(os.path.join(here, version_filename)) as version_file:\n version = version_file.read().strip()\n\n# call setup\nnumpy.distutils.core.setup(\n name='OMMBV',\n version=version,\n packages=['OMMBV', 'OMMBV.tests'],\n description='Orthogonal geomagnetic vector basis and field-line mapping for multipole magnetic fields.',\n url='http://github.com/rstoneback/OMMBV',\n\n # Author details\n author='Russell Stoneback',\n author_email='rstoneba@utdallas.edu',\n data_files=[('OMMBV', ['OMMBV/version.txt'])],\n include_package_data=True,\n\n # required modules\n install_requires=['numpy', 'scipy'],\n ext_modules=extensions,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"353858348","text":"import json\nimport math\nimport re\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom .exceptions import NoDataError\n\n_intraday_pattern = re.compile(\"var (?P.*)=(?P\\[[^;]*\\]);\")\n\n\ndef _load_script_vars_from_web(symbol_id, year, month, day):\n d = f'{year:04}{month:02}{day:02}'\n script_variables = {}\n\n # get data from url\n daily_content = requests.get(f'http://cdn.tsetmc.com/Loader.aspx?ParTree=15131P&i={symbol_id}&d={d}',\n timeout=20, verify=False).text\n\n # find and get script tags\n all_scripts = BeautifulSoup(daily_content, 'lxml').find_all('script')\n first_script = str(all_scripts[-3])\n second_script = str(all_scripts[-2])\n third_script = str(all_scripts[-1])\n\n # extract first script variables\n matches = _intraday_pattern.findall(first_script)\n for match in matches:\n script_variables[match[0]] = json.loads(match[1].replace('\\'', '\"'))\n\n # extract second script variables\n matches = _intraday_pattern.findall(second_script)\n for match in matches:\n script_variables[match[0]] = json.loads(match[1].replace('\\'', '\"'))\n\n # extract third script variables\n matches = _intraday_pattern.findall(third_script)\n for match in matches:\n script_variables[match[0]] = json.loads(match[1].replace('\\'', '\"'))\n\n return script_variables\n\n\ndef _extract_symbol_details_data(script_vars):\n symbol_details = {\n 'full_name': script_vars['InstSimpleData'][0],\n 'short_name': script_vars['InstSimpleData'][1],\n 'market_short_name': script_vars['InstSimpleData'][2],\n 'market_full_name': script_vars['InstSimpleData'][3],\n 'isin': script_vars['InstSimpleData'][7],\n 'shares_count': script_vars['InstSimpleData'][8],\n 'base_volume': script_vars['InstSimpleData'][9],\n }\n\n return symbol_details\n\n\ndef _extract_price_data(script_vars):\n # State Data\n state_data = []\n for isd in script_vars['InstrumentStateData']:\n state_data.append(isd[1:])\n\n last_state_i = 0\n next_state_change_time = state_data[0][0] if len(state_data) > 1 else math.inf\n last_state = state_data[0][1] if len(state_data) > 1 else 'A ' # todo: convert to enum\n\n # Price Data\n price_data = []\n for cpd in script_vars['ClosingPriceData']:\n continuous_time = int(cpd[12])\n while continuous_time >= next_state_change_time:\n if len(state_data) > (last_state_i + 1):\n last_state_i += 1\n next_state_change_time = state_data[last_state_i][0]\n last_state = state_data[last_state_i][1] # todo: convert to enum\n else:\n next_state_change_time = math.inf\n\n price_data.append({\n 't': continuous_time,\n 'lst': int(cpd[2]),\n 'y': int(cpd[5]),\n 'o': int(cpd[4]),\n 'c': int(cpd[3]),\n 'h': int(cpd[6]),\n 'l': int(cpd[7]),\n 'cnt': int(cpd[8]),\n 'v': int(cpd[9]),\n 's': last_state,\n })\n\n price_data.sort(key=lambda x: x['t'])\n return price_data\n\n\ndef _extract_orders_data(script_vars):\n orders = []\n for bld in script_vars['BestLimitData']:\n order = {\n 't': bld[0],\n 'rank': int(bld[1]),\n 'bcnt': int(bld[2]),\n 'bv': int(bld[3]),\n 'bp': int(bld[4]),\n 'scnt': int(bld[7]),\n 'sv': int(bld[6]),\n 'sp': int(bld[5]),\n }\n\n orders.append(order)\n\n orders.sort(key=lambda x: x['t'])\n return orders\n\n\ndef _extract_trade_data(script_vars):\n trades = [\n {\n 'order': int(x[0]),\n 't': int(f'{x[1][:2]}{x[1][3:5]}{x[1][6:]}'),\n 'v': int(x[2]),\n 'p': int(x[3]),\n }\n for x in script_vars['IntraTradeData']\n ]\n\n return trades\n\n\ndef _extract_share_holders_data(script_vars):\n share_holders = []\n for shd in script_vars['ShareHolderData']:\n share_holders.append({\n 'id': shd[0],\n 'shares_count': shd[2],\n 'percentage': shd[3],\n 'name': shd[5],\n })\n\n previous_share_holders = []\n for shd in script_vars['ShareHolderDataYesterday']:\n previous_share_holders.append({\n 'id': shd[0],\n 'shares_count': shd[2],\n 'percentage': shd[3],\n 'name': shd[5],\n })\n\n return previous_share_holders, share_holders\n\n\ndef load_intraday_data(symbol_id, year, month, day):\n script_vars = _load_script_vars_from_web(symbol_id, year, month, day)\n\n symbol_details = _extract_symbol_details_data(script_vars)\n price_data = _extract_price_data(script_vars)\n\n if not price_data:\n raise NoDataError('there is no data for this symbol in the specified date')\n\n previous_shareholders, shareholders = _extract_share_holders_data(script_vars)\n trades = _extract_trade_data(script_vars)\n orders_data = _extract_orders_data(script_vars)\n\n return {\n 'symbol_details': symbol_details,\n 'price_data': price_data,\n 'previous_shareholders': previous_shareholders,\n 'shareholders': shareholders,\n 'trades': trades,\n 'orders_data': orders_data,\n }\n","sub_path":"lib/tsetmc_api/day_details/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"281297362","text":"#! /usr/bin/env python\ndef helper(r):\n if not r:\n return 0\n left = f(r.left)\n right = f(r.right)\n if left == -1:\n return -1\n if right == -1:\n return -1\n if abs(left - right) > 1:\n return -1\n else:\n return 1 + max(left, right)\ndef f(r):\n res = helper(r)\n if res == -1:\n return False\n else:\n return True\ndef max_height(r):\n if not r:\n return 0\n else:\n return max(max_height(r.left), max_height(r.right))+1\ndef min_height(r):\n if not r:\n return 0\n else:\n return min(min_height(r.left), min_height(r.right))+1\ndef f(r):\n if not r:\n return\n mx = max(max_height(r.left), max_height(r.right))\n mi = min(min_height(r.left), min_height(r.right))\n if mx-mi >= 2:\n return False\n return True\n","sub_path":"python/balanced_binary_tree.py","file_name":"balanced_binary_tree.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"227705187","text":"from app import db\n\nclass Image(db.Model):\n\n id = db.Column(db.Integer, primary_key=True)\n category = db.Column(db.String(48))\n caption = db.Column(db.String(140))\n date = db.Column(db.Date)\n image_path = db.Column(db.String(140))\n\n def __init__(self, category, caption, date, image_path):\n self.category = category\n self.caption = caption\n self.date = date\n self.image_path = image_path\n\n def __repr__(self):\n return ' 60) and (Player.getDead() == 0):\n print(people[-1],\"has died.\")\n people.remove(people[-1])\n print(\"People Left:\")\n Player.setDead(Player.getDead() + 1)\n for x in people:\n print(x)\n \n elif (40 0 and Player.getFood() > 0:\n itemLost = random.choice(itemsList)\n if (itemLost == itemsList[0]) or (itemLost == itemsMinusFood):\n itemsLost = random.randint(1, 300)\n Player.setFood(Player.getFood() - itemsLost)\n print(\"You forded the river, but lost\",itemsLost,itemLost,\". You have\",Player.getFood(),itemLost,\".\")\n \n elif (itemLost == itemsList[1]) or (itemLost == itemsMinusAmmo):\n itemsLost = random.randint(1, 60)\n Player.setAmmo(Player.getAmmo() - itemsLost)\n print(\"You forded the river, but lost\",itemsLost,itemLost,\". You have\",Player.getAmmo(),itemLost,\".\")\n flag = False\n elif inputRiver == 2:\n \n print(\"Attempting to float across river.\")\n c = random.randint(0, 1)\n d = random.randint(0, 1)\n if c == d:\n print(\"Congratulations, you successfully floated across the river.\")\n flag = False\n \n else:\n if Player.getAmmo() == 0:\n itemLost = random.choice(itemsMinusAmmo)\n elif Player.getFood() == 0:\n itemLost = random.choice(itemsMinusFood)\n if Player.getAmmo() > 0 and Player.getFood() > 0:\n itemLost = random.choice(itemsList)\n if (itemLost == itemsList[0]) or (itemLost == itemsMinusFood):\n itemsLost = random.randint(1, 300)\n Player.setFood(Player.getFood() - itemsLost)\n print(\"You floated across the river, but lost\",itemsLost,itemLost,\". You have\",Player.getFood(),itemLost,\".\")\n flag = False\n return Player.getFood()\n elif itemLost == (itemsList[1]) or (itemLost == itemsMinusFood):\n itemsLost = random.randint(1, 60)\n Player.setAmmo(Player.getAmmo() - itemsLost)\n print(\"You floated across the river, but lost\",itemsLost,itemLost,\". You have\",Player.getAmmo(),itemLost,\".\")\n flag = False\n return Player.getAmmo()\n\n elif inputRiver == 3:\n numDaySkip = random.randint(1, 4)\n skipDays(numDaySkip)\n print(\"You have lost\",numDaySkip,\"days.\")\n return numDaySkip\n \n except ValueError:\n print(\"Invalid.\")\n except NameError:\n print(\"Invalid.\")\n \n else:\n print(\"Nothing happened today.\")\n","sub_path":"EVENTS.py","file_name":"EVENTS.py","file_ext":"py","file_size_in_byte":6467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"288947888","text":"import sys\nimport config\nfrom nltk.stem import PorterStemmer\n\ndef readDocIds(docids_file):\n file_pointer = open(docids_file, \"r\", encoding=\"utf8\", errors='ignore')\n file_data = file_pointer.read()\n file_lines = file_data.split('\\n')\n documents = dict()\n for line in file_lines:\n if len(line) > 1:\n arr = line.split('\\t')\n documents[arr[1]] = int(arr[0]) # \n return documents\n\n\ndef readTermIds(termids_file):\n file_pointer = open(termids_file, \"r\", encoding=\"utf8\", errors='ignore')\n file_data = file_pointer.read()\n file_lines = file_data.split('\\n')\n terms = dict()\n for line in file_lines:\n if len(line) > 1:\n arr = line.split('\\t')\n terms[arr[1]] = int(arr[0]) # \n return terms\n\n\ndef readTermIndex():\n file_pointer = open(config.TERM_INDEX_FILE, \"r\", encoding=\"utf8\", errors='ignore')\n file_data = file_pointer.read()\n file_lines = file_data.split('\\n')\n index = dict() # index = <>\n for line in file_lines:\n arr = line.split(' ')\n if len(arr[0]) < 1:\n continue\n i = 0\n term_id = int(arr[0])\n firstDocId = True\n totalId = 0\n positions = []\n index[term_id] = dict() # index = >\n for part in arr:\n if i > 2: # skipping the first 3 numbers which are term_id, count_in_corpus, document_count\n doc_pos = part.split(',') # split doc_id and position\n if firstDocId: # no decoding is needed in first doc_id\n try:\n index[term_id][int(doc_pos[0])].append(int(doc_pos[1])) # index = >\n except:\n index[term_id][int(doc_pos[0])] = []\n index[term_id][int(doc_pos[0])].append(int(doc_pos[1]))\n totalId = int(doc_pos[0])\n firstDocId = False\n else:\n totalId += int(doc_pos[0])\n try:\n index[term_id][int(totalId)].append(int(doc_pos[1]))\n except:\n index[term_id][int(totalId)] = []\n index[term_id][int(totalId)].append(int(doc_pos[1]))\n i+=1\n\n return index\n\n\nps = PorterStemmer()\nsearched_term = ps.stem(str(sys.argv[1]))\n\nprint(\"Listing for term (stemmed): \" + searched_term)\nterms = readTermIds(config.TERMID_FILE)\n\nindex = readTermIndex()\n\nterm_id = terms[searched_term]\n\nif term_id is None:\n print(\"Term not found.\")\nelse:\n try:\n res = index[term_id]\n print(\"TERMID: \" + str(term_id))\n print(\"Number of documents containing term: \" + str(len(res)))\n\n total_occ = 0\n for doc in res:\n for pos in res[doc]:\n total_occ += 1\n\n print(\"Term frequency in corpus: \" + str(total_occ))\n except:\n print(\"not found\")\n\n\n\n\n","sub_path":"read_index.py","file_name":"read_index.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"75175896","text":"class Solution(object):\n def intersection(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n nums1 = self.mergesort(nums1)\n nums2 = self.mergesort(nums2)\n res = []\n i=j=0\n while i '2003001':\n tr=soup.find_all('tr')[i]\n# print(tr)\n td=tr.find_all('td')\n print('Date: ' + td[0].text)\n #row这个数组是要写入csv文件的\n row=[td[0].text]\n print('SeqNum: '+ td[1].text)\n row.append(td[1].text)\n MarkSeqNum=td[1].text\n \n # 这个字段存放中奖球的号码。\n ball='' \n for k in range(0,7):\n em=td[2].find_all('em')[k]\n ball=ball +' ' +em.text\n row.append(em.text)\n print(ball)\n \n print('Revenue: '+ td[3].text)\n row.append(td[3].text)\n strong=td[4].find('strong')\n print('First Reward: '+strong.text)\n row.append(strong.text)\n # print('First Reward: '+td[4].text.strip())\n # row.append(td[4].text.strip())\n print('Second Reward: '+td[5].text)\n row.append(td[5].text)\n print('')\n \n #在csv文件中写入\n writer.writerow(row)\n \ncsvfile.close()\n","sub_path":"SSQDatafromZHCW.py","file_name":"SSQDatafromZHCW.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"366851970","text":"from urllib.request import Request, urlopen\nimport json\nfrom dateutil import parser\nimport datetime\nimport sys\n\n\ndef get_data(forecast, item):\n value = float(forecast[item])\n if value == -99.0:\n value = None\n\n return value\n\n\n# Obtenção dos valores de interesse para fazer o display, de acordo com o tipo\n# de previsão horária / diária\ndef clean_forecast(forecast):\n idPeriodo = forecast['idPeriodo']\n dataPrev = forecast['dataPrev']\n\n # Probabilidade de precipitação\n probPrec = get_data(forecast, 'probabilidadePrecipita')\n # Id's do tipo de tempo\n idTempo = forecast['idTipoTempo']\n idPrec = forecast['idIntensidadePrecipita']\n\n dic_out = {\n 'dataPrev': dataPrev,\n 'probPrec': probPrec,\n 'idTempo': idTempo,\n 'idPrec': idTempo\n }\n \n # Medições específicas para cada tipo de tempo\n if idPeriodo == 1:\n t = get_data(forecast, 'tMed')\n dic_out['t'] = t\n elif idPeriodo == 24:\n tMin = get_data(forecast, 'tMin')\n tMax = get_data(forecast, 'tMax')\n dic_out['tMin'] = tMin\n dic_out['tMax'] = tMax\n\n return dic_out\n \n\n\n\n\n# Função para obter a lista de forecasts\n# O input 'forecast_type' indica o tipo de previsão a pedir:\n# forecast_type = 1 -> horária\n# forecast_type = 24 -> diária\ndef getWeatherForecast(forecast_type):\n \n weather_type_dict = {\n '1': 'Clear',\n '2': 'Slightly cloudy',\n '3': 'Partly Cloudy',\n '4': 'Overcast',\n '5': 'High clouds',\n '6': 'Light rain',\n '7': 'Drizzle',\n '9': 'Rain',\n '11': 'Heavy rain',\n }\n\n # 1010500 é o código de Aveiro\n source = 'https://api.ipma.pt/json/alldata/1010500.json'\n\n request = Request(source)\n stream = urlopen(request).read().decode('utf-8')\n stream_data = json.loads(stream)\n\n forecast_list = []\n\n # Separação em 2 tipos de previsão (diária e horária)\n # Existe mais outra trihorária, mas vou ignorar.\n for forecast in stream_data:\n dt = forecast['idPeriodo']\n if dt == forecast_type:\n forecast_list += [clean_forecast(forecast)]\n \n return forecast_list\n","sub_path":"engine_ipma.py","file_name":"engine_ipma.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"423177106","text":"from multiprocessing import Process\nimport logging\nfrom time import time\n\n\nclass AverageFilter(Process):\n name = \"AverageFilter\"\n\n description = \"Calculates average values of floats or ints passed to it.\"\n\n # Types of data this module accepts\n inTypes = [\"float\", \"int\"]\n\n # Additional arguments this module requires\n arguments = None\n\n def __init__(self, task_queue):\n Process.__init__(self)\n self.task_queue = task_queue\n\n logging.info(\"Created AverageFilter\")\n\n self.time = time()\n\n self.total = 0\n self.len = 0.\n\n self.outlist = []\n\n def run(self):\n while True:\n next_task = self.task_queue.get()\n\n # Poison pill means shutdown\n if next_task is None:\n for tq in self.outlist:\n if self.len:\n tq.put(self.total / self.len)\n else:\n tq.put(0)\n tq.put(None)\n\n # Wait for them to notice it\n for tq in self.outlist:\n tq.join()\n\n # Notify upstream module\n self.task_queue.task_done()\n logging.info(self.name + \" exiting\")\n break\n\n if not type(next_task).__name__ in self.inTypes:\n logging.warning(\"Wrong type!\")\n self.task_queue.task_done()\n continue\n\n if next_task:\n self.total += next_task\n self.len += 1.\n\n elapsed = time() - self.time\n\n if elapsed > 1:\n self.time = time()\n for tq in self.outlist:\n if self.len:\n tq.put(self.total / self.len)\n else:\n tq.put(0)\n\n self.task_queue.task_done()\n return\n","sub_path":"python/modules/filters/averagefilter.py","file_name":"averagefilter.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"335798136","text":"# %load q02_best_k_features/build.py\n# Default imports\n\nimport pandas as pd\nimport numpy as np\ndata = pd.read_csv('data/house_prices_multivariate.csv')\n#print(data.info())\nfrom sklearn.feature_selection import SelectPercentile\nfrom sklearn.feature_selection import f_regression\n\n\n# Write your solution here:\n\ndef percentile_k_features(data,k=20):\n y=data['SalePrice']\n X=data.iloc[:,:-1]\n f_test, _ = f_regression(X, y)\n f_normalized=f_test/np.max(f_test)\n featureVar=X.columns.values\n\n topK=pd.DataFrame({'col1':f_normalized,'col2':featureVar})\n topK_sorted=topK.sort_values(by=['col1'],ascending=False)\n return topK_sorted.nlargest(7, 'col1')['col2'].tolist()\n #Selector_f.fit(X,y)\n #x_new = Selector_f.transform(X).sort()\n\n\n #print(X.columns.values)\n #return pd.DataFrame(list(zip(X.columns.values, f_normalized)))\n# Selector_f= SelectPercentile(f_regression, k)\n# Selector_f.fit_transform(X,y)\n# scores = score_filter.scores_\n# support = score_filter.get_support()\n #print(np.sort(scores[support]))\n #print(Selector_f.transform(y))\n #cols=Selector_f.get_support()\n #print(cols)\n #return list(data.columns.values[cols])\n\n\n#feature_names =percentile_k_features(data)\n\n#print((feature_names))\n","sub_path":"q02_best_k_features/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"530975060","text":"from mymodules.models import Student #imports the student class\nfrom mymodules.math_utils import average_grade #imports the average_grade method\n\n#create a roster of 10 students, print the list of students,\n#and print the average score of the current roster.\n#must use the Student class, and the average_grade method you defined the mymodules module.\n\ndef main():\n s1 = Student(\"Selina\", 95)\n s2 = Student(\"Alex\", 93)\n s3 = Student(\"Danny\", 85)\n s4 = Student(\"John\", 72)\n s5 = Student(\"Emily\", 75)\n s6 = Student(\"Hannah\", 88)\n s7 = Student(\"Liz\", 79)\n s8 = Student(\"Lex\", 82)\n s9 = Student(\"Bill\", 86)\n s10 = Student(\"Phil\", 90)\n\n roster = [s1,s2,s3,s4,s5,s6,s7,s8,s9,s10]\n\n for student in roster:\n student.print_student_info()\n\n avg = average_grade(roster)\n print(f\"Average score of roster: {avg}\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cus1166_lab1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"178547789","text":"import json\nimport argparse\nimport math\n\n\ndef load_data(file_path):\n json_data = None\n try:\n with open(file_path, 'r') as handler:\n json_data = json.load(handler)\n except OSError as error:\n print(error)\n except UnicodeDecodeError as error:\n print(\"Please use the encoding UTF8: %s\" % error)\n except ValueError as error:\n print('Invalid json: %s' % error)\n finally:\n return json_data\n\n\ndef get_biggest_bar(json_data):\n biggest_bar = max(json_data, key=lambda item: item['SeatsCount'])\n return 'Biggest bar: %s' % biggest_bar['Name']\n\n\ndef get_smallest_bar(json_data):\n smallest_bar = min(json_data, key=lambda item: item['SeatsCount'])\n return 'Smallest bar: %s' % smallest_bar['Name']\n\n\ndef get_distance(bar_coordinates, your_coordinates):\n return math.hypot(bar_coordinates[0] - your_coordinates[0], bar_coordinates[1] - your_coordinates[1])\n\n\ndef get_closest_bar(json_data, longitude, latitude):\n closest_bar = min(json_data, key=lambda item: get_distance(item['geoData']['coordinates'], (longitude, latitude)))\n return 'Closest bar: %s' % closest_bar['Name']\n\n\ndef parser_input_data():\n parser = argparse.ArgumentParser()\n parser.add_argument('-filepath', '-f', dest='file_path', required=True, type=str, help='-filepath=/path/to/file')\n parser.add_argument('--biggest', dest='biggest', action='store_true', help='print biggest bar')\n parser.add_argument('--smallest', dest='smallest', action='store_true', help='print smallest bar')\n parser.add_argument('--closest', dest='closest', action='store_true', help='print closest bar')\n parser.add_argument('-latitude', dest='latitude', type=float, help='-latitude=12.33')\n parser.add_argument('-longitude', dest='longitude', type=float, help='-longitude=66.33')\n return parser.parse_args()\n\n\ndef get_bars(json_data, args):\n bars = []\n try:\n if args.biggest:\n bars.append(get_biggest_bar(json_data))\n if args.smallest:\n bars.append(get_smallest_bar(json_data))\n if args.closest:\n bars.append(get_closest_bar(json_data, args.longitude, args.latitude))\n except KeyError as error:\n print('Invalid json key: %s' % error)\n finally:\n return bars\n\n\ndef print_bars(bars):\n if bars:\n for bar in bars:\n print(bar)\n\n\ndef validate_arguments(args):\n if args.closest and (not args.longitude or not args.latitude):\n print('Please write latitude and longitude: -latitude=12 -longitude=13')\n return False\n else:\n return True\n\n\nif __name__ == '__main__':\n args = parser_input_data()\n json_data = load_data(args.file_path)\n if json_data and validate_arguments(args):\n print_bars(get_bars(json_data, args))\n","sub_path":"bars.py","file_name":"bars.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"85493469","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.response.AlipayResponse import AlipayResponse\nfrom alipay.aop.api.domain.ItapResponsePayload import ItapResponsePayload\n\n\nclass AlipayMsaasItapUserConfirmResponse(AlipayResponse):\n\n def __init__(self):\n super(AlipayMsaasItapUserConfirmResponse, self).__init__()\n self._payload = None\n self._request_id = None\n\n @property\n def payload(self):\n return self._payload\n\n @payload.setter\n def payload(self, value):\n if isinstance(value, list):\n self._payload = list()\n for i in value:\n if isinstance(i, ItapResponsePayload):\n self._payload.append(i)\n else:\n self._payload.append(ItapResponsePayload.from_alipay_dict(i))\n @property\n def request_id(self):\n return self._request_id\n\n @request_id.setter\n def request_id(self, value):\n self._request_id = value\n\n def parse_response_content(self, response_content):\n response = super(AlipayMsaasItapUserConfirmResponse, self).parse_response_content(response_content)\n if 'payload' in response:\n self.payload = response['payload']\n if 'request_id' in response:\n self.request_id = response['request_id']\n","sub_path":"alipay/aop/api/response/AlipayMsaasItapUserConfirmResponse.py","file_name":"AlipayMsaasItapUserConfirmResponse.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"437761872","text":"import os\nimport subprocess\n\nimport database\nimport spider\nimport service\nfrom scanners.dns_records import DNSRecords\n\ndef run_spider(domain, spider_name):\n spider.run(domain, spider_name)\n\ndef run_service(domain, service_name):\n service.run(domain, service_name)\n\ndef run_enumerator(domain):\n filename = '/tmp/out'\n with open(filename, 'w') as log:\n p = subprocess.Popen(\n [\n './dnssearch', '-domain', domain\n ],\n stdout=log,\n stderr=log,\n stdin=log\n )\n p.wait()\n\n with open(filename) as f:\n subdomains = [domain.strip() for domain in f.readlines()]\n database.add('posts', domain, subdomains)\n os.remove(filename)\n\n\ndef run_subjack(domain):\n filename = '/tmp/out'\n filename_input = '/tmp/input'\n with open(filename_input, 'w') as f:\n f.writelines((url + '\\n' for url in database.get('posts', domain)['subdomains']))\n\n p = subprocess.Popen(\n [\n './subjack', '-w', filename_input, '-o', filename\n ],\n stdout=None,\n stderr=None,\n stdin=None\n )\n p.wait()\n try:\n with open(filename) as f:\n takeover_subdomains = [line.strip().split(\" \")[-1] for line in f.readlines()]\n database.add('posts', domain, takeover_subdomains, 'takeover')\n os.remove(filename)\n except:\n pass\n\n os.remove(filename_input)\n\n\ndef run_cname_scanner(domain):\n database.add_cname(\n os.environ['CNAME_COLLECTION'],\n domain,\n {\n 'domain': domain,\n 'subdomains': DNSRecords(\n database.get(\n os.environ['DEFAULT_COLLECTION'],\n domain)['subdomains']\n ).get_records(['CNAME'])\n }\n )\n","sub_path":"workers/app/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"541918795","text":"#!/usr/bin/env python\nimport argparse\nimport json\nimport logging\nimport os\n\nfrom requests.packages import urllib3\n\nfrom avi.sdk.utils.f5_converter import f5_config_converter_v11, \\\n f5_config_converter_v10, f5_parser, upload_config\n\nurllib3.disable_warnings()\n\n\ndef dict_merge(dct, merge_dct):\n for k, v in merge_dct.iteritems():\n if (k in dct and isinstance(dct[k], dict) and\n isinstance(merge_dct[k], dict)):\n dict_merge(dct[k], merge_dct[k])\n else:\n dct[k] = merge_dct[k]\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--bigip_config_file',\n help='F5 config file location')\n parser.add_argument('-v', '--f5_config_version',\n help='version of f5 config', default=11)\n parser.add_argument('-o', '--output_file_path',\n help='output file path', default='output')\n parser.add_argument('-O', '--option', choices=['cli-upload', 'api-upload'],\n help='Output option', default='cli-upload')\n parser.add_argument('-u', '--user', help='controller user', default='admin')\n parser.add_argument('-p', '--password', help='controller user password',\n default='avi123')\n parser.add_argument('-t', '--tenant', help='tenant name', default='admin')\n parser.add_argument('-c', '--controller_ip', help='controller ip')\n parser.add_argument('-s', '--vs_state', choices=['enable', 'disable'],\n help='state of created VS', default='disable')\n parser.add_argument('-l', '--input_folder_location',\n help='location of input files', default='.')\n parser.add_argument('--controller_version',\n help='target controller version', default='16.2')\n\n args = parser.parse_args()\n if not os.path.exists(args.output_file_path):\n os.mkdir(args.output_file_path)\n output_file_path = os.path.normpath(args.output_file_path)\n input_folder_location = os.path.normpath(args.input_folder_location)\n\n LOG = logging.getLogger(\"converter-log\")\n LOG.setLevel(logging.DEBUG)\n fh = logging.FileHandler(args.output_file_path +\n os.path.sep + \"converter.log\",\n mode='a', encoding=None, delay=False)\n fh.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n LOG.addHandler(fh)\n\n source_file = open(args.bigip_config_file, \"r\")\n source_str = source_file.read()\n LOG.debug('Reading source file:'+source_file.name)\n f5_config_dict = f5_parser.parse_config(source_str, args.f5_config_version)\n LOG.debug('File Parsed successfully')\n avi_config_dict = None\n LOG.debug('Conversion started')\n if int(args.f5_config_version) == 11:\n defaults_file = open(\"f5_v11_defaults.conf\", \"r\")\n f5_defaults_dict = f5_parser.parse_config(defaults_file.read(), 11)\n dict_merge(f5_defaults_dict, f5_config_dict)\n f5_config_dict = f5_defaults_dict\n avi_config_dict = f5_config_converter_v11.\\\n convert_to_avi_dict(f5_config_dict, output_file_path, args.vs_state,\n input_folder_location, args.option)\n elif int(args.f5_config_version) == 10:\n defaults_file = open(\"f5_v10_defaults.conf\", \"r\")\n f5_defaults_dict = f5_parser.parse_config(defaults_file.read(), 10)\n dict_merge(f5_defaults_dict, f5_config_dict)\n f5_config_dict = f5_defaults_dict\n avi_config_dict = f5_config_converter_v10.\\\n convert_to_avi_dict(f5_config_dict, output_file_path, args.vs_state,\n input_folder_location, args.option)\n\n if args.option == \"cli-upload\":\n avi_config_dict[\"META\"] = {\n \"supported_migrations\": {\n \"versions\": [\n \"14_2\",\n \"15_1\",\n \"15_1_1\",\n \"15_2\",\n \"15_2_3\",\n \"15_3\",\n \"current_version\"\n ]\n },\n \"version\": {\n \"Product\": \"controller\",\n \"Version\": args.controller_version,\n \"min_version\": 15.2,\n \"ProductName\": \"Avi Cloud Controller\"\n },\n \"upgrade_mode\": False,\n \"use_tenant\": args.tenant\n }\n text_file = open(output_file_path+os.path.sep+\"Output.json\", \"w\")\n json.dump(avi_config_dict, text_file, indent=4)\n text_file.close()\n LOG.info('written avi config file ' +\n output_file_path+os.path.sep+\"Output.json\")\n else:\n text_file = open(output_file_path+\"Output.json\", \"w\")\n json.dump(avi_config_dict, text_file, indent=4)\n text_file.close()\n upload_config.upload_config_to_controller(\n avi_config_dict, args.controller_ip,\n args.user, args.password, args.tenant)\n LOG.info('Config uploaded to controller')\n","sub_path":"python/avi/sdk/utils/f5_converter/f5_converter.py","file_name":"f5_converter.py","file_ext":"py","file_size_in_byte":5122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"286497274","text":"N = input()\nres = sum(map(int, N))\nres = max(res, max(0, int(N[0]) - 1 + 9 * (len(N) - 1)))\nfor i in range(1, len(N) - 1):\n if int(N[i]) > 0:\n t = 0\n for j in range(i):\n t += int(N[j])\n t += int(N[i]) - 1 + 9 * (len(N) - i - 1)\n res = max(res, t)\nprint(res)\n","sub_path":"Python_codes/p03427/s639731446.py","file_name":"s639731446.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"191024542","text":"# coding=utf8\n\nfrom subprocess import Popen, PIPE\nfrom threading import Thread\ntry:\n from queue import Queue\nexcept ImportError:\n from Queue import Queue\n\nimport sublime\nimport os\nimport json\n\nfrom ..Tss import TSS\nfrom ..display.Panel import PANEL\nfrom ..display.Errors import ERRORS\nfrom ..system.Settings import SETTINGS\nfrom ..Utils import debounce, dirname, read_file, get_kwargs, Debug\n\n\n# ----------------------------------------- UTILS --------------------------------------- #\n\ndef show_output(window,line):\n PANEL.show(window)\n PANEL.update(line['output'])\n\ndef clear_panel(window):\n PANEL.clear(window)\n\n\n# --------------------------------------- COMPILER -------------------------------------- #\n\nclass Refactor(Thread):\n\n def __init__(self, window, member, refs, root):\n self.window = window\n self.member = member\n self.refs = refs\n self.root = root\n Thread.__init__(self)\n\n def run(self):\n clear_panel(self.window)\n\n node = SETTINGS.get_node(self.root)\n kwargs = get_kwargs()\n p = Popen([node, os.path.join(dirname,'bin','refactor.js'), self.member, json.dumps(self.refs)], stdin=PIPE, stdout=PIPE, **kwargs)\n reader = RefactorReader(self.window,p.stdout,Queue())\n reader.daemon = True\n reader.start()\n\n\nclass RefactorReader(Thread):\n\n def __init__(self,window,stdout,queue):\n self.window = window\n self.stdout = stdout\n self.queue = queue\n Thread.__init__(self)\n\n def run(self):\n delay = 1000\n previous = \"\"\n for line in iter(self.stdout.readline, b''):\n line = json.loads(line.decode('UTF-8'))\n if 'output' in line:\n show_output(self.window,line)\n elif 'file' in line:\n filename = line['file']\n content = read_file(filename)\n lines = len(content.split('\\n'))-1\n if previous != filename:\n self.send(filename,lines,content,delay)\n delay+=100\n\n previous = filename\n else:\n print('refactor error')\n\n\n self.stdout.close()\n\n def send(self,filename,lines,content,delay):\n sublime.set_timeout(lambda:self.update(filename,lines,content),delay)\n\n def update(self,filename,lines,content):\n TSS.update(filename, lines, content)\n ERRORS.start_recalculation(filename_or_root)\n\n\n\n","sub_path":"lib/commands/Refactor.py","file_name":"Refactor.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"395228644","text":"def merge_two_sorted(la,lb):\n # if len(la) == 0:\n # return lb\n # if len(lb) == 0:\n # return la\n i, j= 0,0\n result = []\n while i= len(arr1):\n res += arr2[j],\n j += 1\n elif j >= len(arr2):\n res += arr1[i],\n i += 1\n else:\n if arr1[i] > arr2[j]:\n res += arr2[j],\n j += 1\n else:\n res += arr1[i],\n i += 1\n return res\n\n # while arr1 or arr2:\n # if not arr1:\n # res += arr2.pop(0),\n # elif not arr2:\n # res += arr1.pop(0),\n # else: # arr1 and arr2 not empty\n # v = arr1.pop(0) if arr1[0] < arr2[0] else arr2.pop(0)\n # res += v,\n # return res\n\n def helper(l, r):\n if l + 1 == r:\n return arr[l:r]\n m = (l + r + 1)//2\n l_arr = helper(l, m)\n r_arr = helper(m, r)\n return merge_sorted_lists(l_arr, r_arr)\n\n return helper(0, len(arr))\n\nif __name__ == '__main__':\n from minitest import *\n\n # with test(merge_two_sorted):\n # la = [1,3,5,7,9]\n # lb = [2,4,6]\n # merge_two_sorted(la,lb).must_equal(\n # [1, 2, 3, 4, 5, 6, 7, 9])\n\n # with test(merge_sort):\n # merge_sort([4,3,5,7,8,2,1]).must_equal(\n # [1, 2, 3, 4, 5, 7, 8])\n\n with test(merge_sort_inplace):\n merge_sort_inplace([4,3,5,7,8,2,1]).must_equal(\n [1, 2, 3, 4, 5, 7, 8])\n\n\n","sub_path":"python/leetcode/sort/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"555801047","text":"#! /usr/local/bin/python\n\nimport sys\nimport cv2\nimport numpy as np\n\nimgpath = sys.argv[1]\nexportTmpPath, expression = imgpath.rsplit(\".\", 1)\nexportpath = exportTmpPath+\"_brend.\"+expression\nblurDeg = 101\n\nimg = cv2.imread(imgpath)\nblurImg = cv2.GaussianBlur(img, (blurDeg, blurDeg), 0)\n\nwhite = np.zeros(img.shape, dtype=img.dtype)\nwhite[::] = 255\n\nbrend = cv2.addWeighted(white, 0.5, blurImg, 0.5, 0)\n\ncv2.imwrite(exportpath, brend)\n","sub_path":"pyt/genBlurImg.py","file_name":"genBlurImg.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"377772957","text":"#!/usr/bin/env python\n\nimport argparse\nimport json\nimport subprocess\nimport cStringIO\n\n\ndef import_cif_list_cif(feed, confidence):\n IPlist = []\n cif_command = ['/usr/bin/perl', '/opt/cif/bin/cif', '-C', '/home/cif/.cif', '-q', feed, '-c', confidence, '-p',\n 'json']\n p = subprocess.Popen(cif_command, stdout=subprocess.PIPE)\n fh = cStringIO.StringIO(p.communicate()[0])\n line = fh.readline()\n while line:\n obj = json.loads(line)\n IPlist.append(obj['address'])\n line = fh.readline()\n return IPlist\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Update CIF IP feeds on webserver',\n epilog='http://xkcd.com/353/')\n parser.add_argument('--directory', '-d', default='/home/httpuser/Public',\n help='Base directory to write files to; defaults to \"/home/httpuser/Public\"')\n options = parser.parse_args()\n\n # https://code.google.com/p/collective-intelligence-framework/wiki/Feeds_v1\n INFRASTRUCTURE = ['infrastructure/botnet', 'infrastructure/malware', 'infrastructure/scan']\n\n # Generate 85 and 65 confidence feeds for each type\n for feed, confidence in [(f, c) for f in INFRASTRUCTURE for c in ['65', '85']]:\n scan_file = options.directory + '/CIF_infrastructure_' + feed.split('/')[1] + '_' + confidence + '.txt'\n mylist = import_cif_list_cif(feed, confidence)\n if mylist:\n with open(scan_file, 'w') as f:\n for item in mylist:\n f.write('%s\\n' % item)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cif_generate_IP_lists.py","file_name":"cif_generate_IP_lists.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"519401974","text":"import csv\n\n\ndef export_to_file(results, new_file_name):\n column_names = [[\"Date\", \"Title\", \"Price\", \"Link\"]]\n\n with open(new_file_name, 'w') as new_file:\n writeCSV = csv.writer(new_file, delimiter=',')\n\n # Set the column names\n for names in column_names:\n writeCSV.writerow(names)\n\n # Set all rows\n for i in range(0, results[\"length\"]):\n columns = []\n for col in column_names[0]:\n columns.append(results[i][col.lower()])\n\n writeCSV.writerow(columns)\n\n return \"Success\"\n\n\n\n\n# def read_and_convert_to_list(file):\n# list = []\n# with open(file, 'r') as csvfile:\n# readCSV = csv.reader(csvfile)\n#\n# for row in readCSV:\n# list.append(row)\n# return list\n\n\n# def write_to_file(list, new_file_name):\n# with open(new_file_name, 'w') as new_file:\n# writeCSV = csv.writer(new_file, delimiter=',')\n#\n# for row in list:\n# writeCSV.writerow(row)\n# return\n","sub_path":"export_file.py","file_name":"export_file.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"63161385","text":"# Copyright (C) 2015 UCSC Computational Genomics Lab\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom builtins import str\nfrom past.utils import old_div\nimport logging\nimport os\nfrom pipes import quote\nfrom toil import subprocess\nimport time\nimport math\nimport sys\nimport shlex\nimport xml.etree.ElementTree as ET\nimport tempfile\n\nfrom toil.batchSystems import MemoryString\nfrom toil.batchSystems.abstractGridEngineBatchSystem import AbstractGridEngineBatchSystem\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\n\nclass TorqueBatchSystem(AbstractGridEngineBatchSystem):\n\n\n # class-specific Worker\n class Worker(AbstractGridEngineBatchSystem.Worker):\n\n def __init__(self, newJobsQueue, updatedJobsQueue, killQueue, killedJobsQueue, boss):\n super(self.__class__, self).__init__(newJobsQueue, updatedJobsQueue, killQueue, killedJobsQueue, boss)\n self._version = self._pbsVersion()\n\n def _pbsVersion(self):\n \"\"\" Determines PBS/Torque version via pbsnodes\n \"\"\"\n try:\n out = subprocess.check_output([\"pbsnodes\", \"--version\"]).decode('utf-8')\n\n if \"PBSPro\" in out:\n logger.debug(\"PBS Pro proprietary Torque version detected\")\n self._version = \"pro\"\n else:\n logger.debug(\"Torque OSS version detected\")\n self._version = \"oss\"\n except subprocess.CalledProcessError as e:\n if e.returncode != 0:\n logger.error(\"Could not determine PBS/Torque version\")\n\n return self._version\n \n \"\"\"\n Torque-specific AbstractGridEngineWorker methods\n \"\"\"\n def getRunningJobIDs(self):\n times = {}\n with self.runningJobsLock:\n currentjobs = dict((str(self.batchJobIDs[x][0].strip()), x) for x in self.runningJobs)\n logger.debug(\"getRunningJobIDs current jobs are: \" + str(currentjobs))\n # Skip running qstat if we don't have any current jobs\n if not currentjobs:\n return times\n # Only query for job IDs to avoid clogging the batch system on heavily loaded clusters\n # PBS plain qstat will return every running job on the system.\n jobids = sorted(list(currentjobs.keys()))\n if self._version == \"pro\":\n process = subprocess.Popen(['qstat', '-x'] + jobids, stdout=subprocess.PIPE)\n elif self._version == \"oss\":\n process = subprocess.Popen(['qstat'] + jobids, stdout=subprocess.PIPE)\n\n\n stdout, stderr = process.communicate()\n\n # qstat supports XML output which is more comprehensive, but PBSPro does not support it \n # so instead we stick with plain commandline qstat tabular outputs\n for currline in stdout.decode('utf-8').split('\\n'):\n items = currline.strip().split()\n if items:\n jobid = items[0].strip()\n if jobid in currentjobs:\n logger.debug(\"getRunningJobIDs job status for is: \" + items[4])\n if jobid in currentjobs and items[4] == 'R':\n walltime = items[3]\n logger.debug(\"getRunningJobIDs qstat reported walltime is: \" + walltime)\n # normal qstat has a quirk with job time where it reports '0'\n # when initially running; this catches this case\n if walltime == '0':\n walltime = time.mktime(time.strptime(walltime, \"%S\"))\n else:\n walltime = time.mktime(time.strptime(walltime, \"%H:%M:%S\"))\n times[currentjobs[jobid]] = walltime\n\n logger.debug(\"Job times from qstat are: \" + str(times))\n return times\n\n def getUpdatedBatchJob(self, maxWait):\n try:\n logger.debug(\"getUpdatedBatchJob: Job updates\")\n pbsJobID, retcode = self.updatedJobsQueue.get(timeout=maxWait)\n self.updatedJobsQueue.task_done()\n jobID, retcode = (self.jobIDs[pbsJobID], retcode)\n self.currentjobs -= {self.jobIDs[pbsJobID]}\n except Empty:\n logger.debug(\"getUpdatedBatchJob: Job queue is empty\")\n pass\n else:\n return jobID, retcode, None\n\n def killJob(self, jobID):\n subprocess.check_call(['qdel', self.getBatchSystemID(jobID)])\n\n def prepareSubmission(self, cpu, memory, jobID, command):\n return self.prepareQsub(cpu, memory, jobID) + [self.generateTorqueWrapper(command, jobID)]\n\n def submitJob(self, subLine):\n process = subprocess.Popen(subLine, stdout=subprocess.PIPE)\n so, se = process.communicate()\n return so\n\n def getJobExitCode(self, torqueJobID):\n if self._version == \"pro\":\n args = [\"qstat\", \"-x\", \"-f\", str(torqueJobID).split('.')[0]]\n elif self._version == \"oss\":\n args = [\"qstat\", \"-f\", str(torqueJobID).split('.')[0]]\n\n process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for line in process.stdout:\n line = line.strip()\n #logger.debug(\"getJobExitCode exit status: \" + line)\n # Case differences due to PBSPro vs OSS Torque qstat outputs\n if line.startswith(\"failed\") or line.startswith(\"FAILED\") and int(line.split()[1]) == 1:\n return 1\n if line.startswith(\"exit_status\") or line.startswith(\"Exit_status\"):\n status = line.split(' = ')[1]\n logger.debug('Exit Status: ' + status)\n return int(status)\n if 'unknown job id' in line.lower():\n # some clusters configure Torque to forget everything about just\n # finished jobs instantly, apparently for performance reasons\n logger.debug('Batch system no longer remembers about job {}'.format(torqueJobID))\n # return assumed success; status files should reveal failure\n return 0\n return None\n\n \"\"\"\n Implementation-specific helper methods\n \"\"\"\n def prepareQsub(self, cpu, mem, jobID):\n\n # TODO: passing $PWD on command line not working for -d, resorting to\n # $PBS_O_WORKDIR but maybe should fix this here instead of in script?\n\n qsubline = ['qsub', '-S', '/bin/sh', '-V', '-N', 'toil_job_{}'.format(jobID)]\n\n if self.boss.environment:\n qsubline.append('-v')\n qsubline.append(','.join(k + '=' + quote(os.environ[k] if v is None else v)\n for k, v in self.boss.environment.items()))\n\n reqline = list()\n if mem is not None:\n memStr = str(old_div(mem, 1024)) + 'K'\n reqline.append('mem=' + memStr)\n\n if cpu is not None and math.ceil(cpu) > 1:\n reqline.append('nodes=1:ppn=' + str(int(math.ceil(cpu))))\n\n # Other resource requirements can be passed through the environment (see man qsub)\n reqlineEnv = os.getenv('TOIL_TORQUE_REQS')\n if reqlineEnv is not None:\n logger.debug(\"Additional Torque resource requirements appended to qsub from \"\\\n \"TOIL_TORQUE_REQS env. variable: {}\".format(reqlineEnv))\n if (\"mem=\" in reqlineEnv) or (\"nodes=\" in reqlineEnv) or (\"ppn=\" in reqlineEnv):\n raise ValueError(\"Incompatible resource arguments ('mem=', 'nodes=', 'ppn='): {}\".format(reqlineEnv))\n\n reqline.append(reqlineEnv)\n \n if reqline:\n qsubline += ['-l',','.join(reqline)]\n \n # All other qsub parameters can be passed through the environment (see man qsub).\n # No attempt is made to parse them out here and check that they do not conflict\n # with those that we already constructed above\n arglineEnv = os.getenv('TOIL_TORQUE_ARGS')\n if arglineEnv is not None:\n logger.debug(\"Native Torque options appended to qsub from TOIL_TORQUE_ARGS env. variable: {}\".\\\n format(arglineEnv))\n if (\"mem=\" in arglineEnv) or (\"nodes=\" in arglineEnv) or (\"ppn=\" in arglineEnv):\n raise ValueError(\"Incompatible resource arguments ('mem=', 'nodes=', 'ppn='): {}\".format(arglineEnv))\n qsubline += shlex.split(arglineEnv)\n\n return qsubline\n\n def generateTorqueWrapper(self, command, jobID):\n \"\"\"\n A very simple script generator that just wraps the command given; for\n now this goes to default tempdir\n \"\"\"\n stdoutfile = self.boss.formatStdOutErrPath(jobID, 'torque', '$PBS_JOBID', 'std_output')\n stderrfile = self.boss.formatStdOutErrPath(jobID, 'torque', '$PBS_JOBID', 'std_error')\n\n _, tmpFile = tempfile.mkstemp(suffix='.sh', prefix='torque_wrapper')\n fh = open(tmpFile , 'w')\n fh.write(\"#!/bin/sh\\n\")\n fh.write(\"#PBS -o {}\\n\".format(stdoutfile))\n fh.write(\"#PBS -e {}\\n\".format(stderrfile))\n fh.write(\"cd $PBS_O_WORKDIR\\n\\n\")\n fh.write(command + \"\\n\")\n\n fh.close\n \n return tmpFile\n\n\n @classmethod\n def obtainSystemConstants(cls):\n\n # See: https://github.com/BD2KGenomics/toil/pull/1617#issuecomment-293525747\n logger.debug(\"PBS/Torque does not need obtainSystemConstants to assess global cluster resources.\")\n\n\n #return maxCPU, maxMEM\n return None, None\n","sub_path":"src/toil/batchSystems/torque.py","file_name":"torque.py","file_ext":"py","file_size_in_byte":10550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"215957821","text":"'''\nScript to plot ogives from EddyPro output folder.\nDaniel Metzen, 23/07/2019\n'''\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pathlib import Path\nfrom tqdm import tqdm\n\n\ndef build_ogive_df(path_to_ogive_folder, ogive='og(w_ts)'):\n \"\"\"\n Function to build single dataframe merging all ogives in EddyPro output\n folder.\n\n Parameters\n ----------\n path_to_ogive_folder: string\n path to folder containing ogive files\n ogive (optional): string\n columnname of ogive to merge\n\n Returns:\n --------\n df: pd.DataFrame\n Dataframe with frequency as index and ogives of each file as columns\n \"\"\"\n path_to_ogive_folder = Path(path_to_ogive_folder)\n df_out = pd.DataFrame()\n # append data from files as columns with timestamp as name\n for f in tqdm(path_to_ogive_folder.glob('*binned_ogives*.csv')):\n obs_time = f.stem[:13]\n _df = pd.read_csv(f, skiprows=11, index_col=1, na_values=-9999)\n _df = _df.dropna(subset=[ogive])\n df_out[obs_time] = _df[ogive]\n return df_out\n\n\ndef plot_ogives(df, outfile=None):\n \"\"\"\n Function to plot oviges contained in Dataframe.\n\n Parameters\n ----------\n df: pd.DataFrame\n dataframe containing ogives\n outfile (optional): string\n filepath for saving plot\n\n Returns:\n --------\n Pyplot figure and optionally saves figure to file\n \"\"\"\n # plot data\n plt.plot(df.median(axis=1), 'k-', label='median')\n plt.fill_between(df.index, df.quantile(q=.95, axis=1),\n df.quantile(q=.05, axis=1), color='k', alpha=.1,\n label='5th-95th percentile')\n # plot indicator lines for 30, 60 and 120min\n plt.axvline((1/(30*60)), c='.5', ls=':', label='30 min')\n plt.axvline(1/(60*60), c='.5', ls='-.', label='60 min')\n plt.axvline(1/(120*60), c='.5', ls='--', label='120 min')\n # tweak plot\n plt.legend()\n plt.xscale('log')\n plt.xlabel('f (Hz)')\n plt.ylabel('ogives')\n plt.tight_layout()\n plt.show()\n # save plot if desired\n if outfile:\n plt.savefig(outfile, dpi=300, bbox_inches='tight')\n\n\ndef main():\n df = build_ogive_df(\n r'E:\\flux_data_processing\\10hz_data\\MOFO_understory\\ep_output\\ogive_check\\eddypro_binned_ogives')\n plot_ogives(df)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"eddy_pro_files/ogives/plot_ogives.py","file_name":"plot_ogives.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"536820843","text":"'''\n2x5会使末尾产生 0\n质因数分解后而 2 的个数一定大于 5 的个数\n所以统计 5 的个数即可\n例如 11 要统计 11!中会分解出几个 5,可以用 11//5 \n但是如果是 25 则多出一个 0,所以对于 //25 要再加一个 0\n以此类推 125 再加一个 0\n'''\n\nclass Solution:\n \"\"\"\n @param: n: An integer\n @return: An integer, denote the number of trailing zeros in n!\n \"\"\"\n def trailingZeros(self, n):\n # write your code here, try to do it without arithmetic operators.\n res = 0\n num = 5\n while n >= num:\n res += n // num\n num *= 5\n return res\n \n \n \nif __name__ == '__main__':\n solution = Solution()\n print(solution.trailingZeros(21))","sub_path":"Lintcode-ladder/MathBitManipulation/trailingZeros.py","file_name":"trailingZeros.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"295396396","text":"from fastapi import FastAPI, Query\nfrom typing import Optional, List\nfrom pydantic import BaseModel\n\napp = FastAPI()\n\nfrom enum import Enum\n\n\nclass MyModel(str, Enum):\n firstname = \"Namatullah\"\n surname = \"Wahidi\"\n\n\n@app.get(\"/models/{model}\")\nasync def get_models(model: MyModel):\n return {\"model_name\": model.name, \"model_value\": model.value}\n\n\n@app.get(\"/items/\")\nasync def read_items(q: Optional[str] = Query(None, max_length=50)):\n results = {\"items\": [{\"item_id\": \"Foo\"}, {\"item_id\": \"Bar\"}]}\n if q:\n results.update({\"q\": q})\n return results\n\n\n@app.get(\"/items/more/\")\nasync def read_items_more(q: Optional[List[str]] = Query(None)):\n query_items = {\"q\": q}\n return query_items\n","sub_path":"second.py","file_name":"second.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"377120902","text":"from django.urls import path, re_path\n\nfrom .views import cart_home, cart_update, checkout_home\n\nurlpatterns = [\n\tpath('', cart_home, name='cart_home'),\n\tpath('checkout/', checkout_home, name='checkout'),\n\tpath('update/', cart_update, name='update'),\n\t\n]","sub_path":"njumu/carts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"424538958","text":"import tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport numpy as np\n\nclass MobileYolo_skip:\n\tdef __init__(self,images,labels,anchors,cr=0.8,is_training=True,img_size=512,scope='Yolo_MobileNet_v2'):\n\t\tself.images = images\n\t\tself.labels = labels\n\t\tself.anchors = anchors\n\t\tself.cr = cr\n\t\tself.is_training = is_training\n\t\tself.img_size = img_size\n\n\t\tself.anchors_per_unit = self.anchors.shape[0]\n\t\tself.final_channels = self.anchors_per_unit*8\n\t\tself.output_size = [16,16]\n\t\tself.scales = [1,1,1,1]\n\n\t\tself.lstm_cell=tf.nn.rnn_cell.LSTMCell(num_units=10,num_proj=1,name='lstm_cell')\n\t\tself.layer_masks = []\n\n\t\tself.build_model(scope,is_training)\n\n\t\tself.loss_yolo = self.compute_loss()\n\n\t\tflops = {'conv1':154140672,'16':128974848,'24_1':76283904,'24':67239936,'32_1':40402944,'32':28704768,\n\t\t\t\t '64_1':19759104,'64':26935296,'96_1':33226752,'96':59277312,'160_1':40771584,'160':40427520,\n\t\t\t\t '320':60088320,'conv2':20971520,'conv3':5120}\n\n\t\tself.flops_fix = flops['conv1']+flops['16']+flops['24_1']+flops['32_1']+flops['64_1']+flops['96_1']+flops['160_1']+flops['320']+flops['conv2']+flops['conv3']\n\t\tself.flops_dyn = flops['24']+flops['32']*2+flops['64']*3+flops['96']*2+flops['160']*2\n\t\tself.flops_sum = self.flops_fix+self.flops_dyn\n\n\t\tself.computation_cost = tf.reduce_mean(flops['24']*self.layer_masks[0]+flops['32']*tf.reduce_sum(self.layer_masks[1:3],axis=0)\n\t\t\t\t\t\t\t\t\t\t\t+flops['64']*tf.reduce_sum(self.layer_masks[3:6],axis=0)\n\t\t\t\t\t\t\t\t\t\t\t+flops['96']*tf.reduce_sum(self.layer_masks[6:8],axis=0)\n\t\t\t\t\t\t\t\t\t\t\t+flops['160']*tf.reduce_sum(self.layer_masks[8:],axis=0))+self.flops_fix\n\n\t\tself.compress_ratio = self.computation_cost/self.flops_sum\n\n\t\talpha = 1e-9\n\t\tsign = tf.cond(self.compress_ratio>self.cr,lambda:1.0,lambda:-1.0)\n\t\tself.loss = self.loss_yolo + alpha*sign*self.computation_cost\n\n\t\tself.optimizer = tf.train.AdamOptimizer(1e-4).minimize(self.loss)\n\n\n\tdef build_model(self,scope,is_training=True):\n\t\tx = self.images\n\n\t\twith tf.variable_scope(scope):\n\t\t\tx = slim.conv2d(x,32,[7,7],2,padding='SAME',activation_fn=None,scope='conv1')\n\t\t\tx = tf.nn.relu6(self.batch_norm(x,is_training,scope='bn1'))\n\n\t\t\tx = self.residual(x,out_channels=16,multi=1,stride=1,is_training=self.is_training,scope='residual_16_1')\n\n\t\t\tx = self.residual(x,out_channels=24,multi=6,stride=2,is_training=self.is_training,scope='residual_24_1')\n\t\t\tnext_x = self.residual(x,out_channels=24,multi=6,stride=1,is_training=self.is_training,scope='residual_24_2')\n\t\t\tmask = self.skip_layer(x,scope='skip_layer_24_1')\n\t\t\tone_tensor = tf.ones_like(mask)\n\t\t\tx = mask*next_x+(one_tensor-mask)*x\n\t\t\tself.layer_masks.append(tf.reshape(mask,[-1]))\n\n\t\t\tx = self.residual(x,out_channels=32,multi=6,stride=2,is_training=self.is_training,scope='residual_32_1')\n\t\t\tfor i in range(2):\n\t\t\t\tnext_x = self.residual(x,out_channels=32,multi=6,stride=1,is_training=self.is_training,scope='residual_32_'+str(i+2))\n\t\t\t\tmask = self.skip_layer(x,scope='skip_layer_32_'+str(i+1))\n\t\t\t\tx = mask*next_x+(one_tensor-mask)*x\n\t\t\t\tself.layer_masks.append(tf.reshape(mask,[-1]))\n\n\t\t\tx = self.residual(x,out_channels=64,multi=6,stride=2,is_training=self.is_training,scope='residual_64_1')\n\t\t\tfor i in range(3):\n\t\t\t\tnext_x = self.residual(x,out_channels=64,multi=6,stride=1,is_training=self.is_training,scope='residual_64_'+str(i+2))\n\t\t\t\tmask = self.skip_layer(x,scope='skip_layer_64_'+str(i+1))\n\t\t\t\tx = mask*next_x+(one_tensor-mask)*x\n\t\t\t\tself.layer_masks.append(tf.reshape(mask,[-1]))\n\n\t\t\tx = self.residual(x,out_channels=96,multi=6,stride=1,is_training=self.is_training,scope='residual_96_1')\n\t\t\tfor i in range(2):\n\t\t\t\tnext_x = self.residual(x,out_channels=96,multi=6,stride=1,is_training=self.is_training,scope='residual_96_'+str(i+2))\n\t\t\t\tmask = self.skip_layer(x,scope='skip_layer_96_'+str(i+1))\n\t\t\t\tx = mask*next_x+(one_tensor-mask)*x\n\t\t\t\tself.layer_masks.append(tf.reshape(mask,[-1]))\n\n\t\t\tx = self.residual(x,out_channels=160,multi=6,stride=2,is_training=self.is_training,scope='residual_160_1')\n\t\t\tfor i in range(2):\n\t\t\t\tnext_x = self.residual(x,out_channels=160,multi=6,stride=1,is_training=self.is_training,scope='residual_160_'+str(i+2))\n\t\t\t\tmask = self.skip_layer(x,scope='skip_layer_160_'+str(i+1))\n\t\t\t\tx = mask*next_x+(one_tensor-mask)*x\n\t\t\t\tself.layer_masks.append(tf.reshape(mask,[-1]))\n\n\t\t\tx = self.residual(x,out_channels=320,multi=6,stride=1,is_training=self.is_training,scope='residual_320_1')\n\n\t\t\tx = slim.conv2d(x,128,[1,1],1,padding='SAME',activation_fn=None,scope='conv2')\n\t\t\tx = tf.nn.relu6(self.batch_norm(x,is_training,scope='bn2'))\n\n\t\t\tx = slim.conv2d(x,self.final_channels,[1,1],1,padding='SAME',activation_fn=None,scope='conv3')\n\n\t\t\tself.model_output = tf.reshape(x,[-1,x.shape[1].value*x.shape[2].value,self.anchors_per_unit,8],name='model_output')\n\n\n\tdef residual(self,x,out_channels,multi=6,stride=1,is_training=True,scope='residual'):\n\t\tin_channels = x.shape[-1].value\n\n\t\tif stride==1:\n\t\t\torig_x = x\n\n\t\t\twith tf.variable_scope(scope):\n\t\t\t\tx = slim.conv2d(x,in_channels*multi,[1,1],1,padding='SAME',activation_fn=None,scope='rconv1')\n\t\t\t\tx = tf.nn.relu6(self.batch_norm(x,is_training,scope='rbn1'))\n\n\t\t\t\twith tf.variable_scope('depthwise_conv'):\n\t\t\t\t\tdw_filter = tf.get_variable('dw_filter',[3,3,in_channels*multi,1],initializer=tf.truncated_normal_initializer(stddev=0.1))\n\t\t\t\t\tx = tf.nn.depthwise_conv2d(x,dw_filter,strides=[1,1,1,1],padding='SAME')\n\t\t\t\t\tx = tf.nn.relu6(self.batch_norm(x,is_training,scope='rbn2'))\n\n\t\t\t\tx = slim.conv2d(x,out_channels,[1,1],1,padding='SAME',activation_fn=None,scope='rconv3')\n\n\t\t\t\tif in_channels != out_channels:\n\t\t\t\t\torig_x = slim.conv2d(orig_x,out_channels,[1,1],1,padding='SAME',activation_fn=None,scope='orig_conv')\n\n\t\t\t\tx = x+orig_x\n\n\t\telse:\n\t\t\twith tf.variable_scope(scope):\n\t\t\t\tx = slim.conv2d(x,in_channels*multi,[1,1],1,padding='SAME',activation_fn=None,scope='rconv1')\n\t\t\t\tx = tf.nn.relu6(self.batch_norm(x,is_training,scope='rbn1'))\n\n\t\t\t\twith tf.variable_scope('depthwise_conv'):\n\t\t\t\t\tdw_filter = tf.get_variable('dw_filter',[3,3,in_channels*multi,1],initializer=tf.truncated_normal_initializer(stddev=0.1))\n\t\t\t\t\tx = tf.nn.depthwise_conv2d(x,dw_filter,strides=[1,stride,stride,1],padding='SAME')\n\t\t\t\t\tx = tf.nn.relu6(self.batch_norm(x,is_training,scope='rbn2'))\n\n\t\t\t\tx = slim.conv2d(x,out_channels,[1,1],1,padding='SAME',activation_fn=None,scope='rconv3')\n\n\t\treturn x\n\n\n\tdef skip_layer(self,feature_map,scope='skip_layer'):\n\t\tx = feature_map\n\t\twith tf.variable_scope(scope):\n\t\t\tx = slim.avg_pool2d(x,[x.shape[1].value, x.shape[2].value],stride=1,padding='VALID',scope='avg_pool')\n\t\t\tx = slim.conv2d(x,32,[1,1],1,padding='SAME',activation_fn=None,scope='conv')\n\t\t\tx = tf.reshape(x,[-1,1,32])\n\n\t\t\tx,state = tf.nn.dynamic_rnn(cell=self.lstm_cell,inputs=x,dtype=tf.float32,scope='rnn')\n\n\t\t\tx = tf.reshape(x,[-1,1,1,1])\n\t\t\tzero_tensor = tf.zeros_like(x)\n\t\t\tone_tensor = tf.ones_like(x)\n\n\t\t\treturn tf.nn.sigmoid(x)+tf.stop_gradient(tf.where(x>0,one_tensor,zero_tensor)-tf.nn.sigmoid(x))\n\n\n\tdef batch_norm(self,x,is_training=True,scope='bn',moving_decay=0.9,eps=1e-6):\n\t\twith tf.variable_scope(scope):\n\t\t\tgamma = tf.get_variable('gamma',x.shape[-1],initializer=tf.constant_initializer(1))\n\t\t\tbeta = tf.get_variable('beta', x.shape[-1],initializer=tf.constant_initializer(0))\n\n\t\t\taxes = list(range(len(x.shape)-1))\n\t\t\tbatch_mean, batch_var = tf.nn.moments(x,axes,name='moments')\n\n\t\t\tema = tf.train.ExponentialMovingAverage(moving_decay)\n\n\t\t\tdef mean_var_with_update():\n\t\t\t\tema_apply_op = ema.apply([batch_mean,batch_var])\n\t\t\t\twith tf.control_dependencies([ema_apply_op]):\n\t\t\t\t\treturn tf.identity(batch_mean), tf.identity(batch_var)\n\n\t\t\tmean, var = tf.cond(tf.equal(is_training,True),mean_var_with_update,\n\t\t\t\t\tlambda:(ema.average(batch_mean),ema.average(batch_var)))\n\n\t\t\treturn tf.nn.batch_normalization(x,mean,var,beta,gamma,eps)\n\n\n\tdef compute_loss(self):\n\t\tanchors = tf.reshape(self.anchors,[1,1,self.anchors_per_unit,2])\n\t\tsprob,sconf,snoob,scoor = self.scales\n\n\t\t_coords = self.labels[:,:,:,0:4]\n\t\t_confs = self.labels[:,:,:,4]\n\t\t_wh = tf.pow(_coords[:,:,:,2:4],2)*np.reshape(self.output_size,[1,1,1,2])\n\t\t_area = _wh[:,:,:,0]*_wh[:,:,:,1]\n\t\t_centers = _coords[:,:,:,0:2]\n\t\t_left_top,_right_bottom = _centers-_wh/2,_centers+_wh/2\n\n\t\tcoords = self.model_output[:,:,:,0:4]\n\t\tcoords_xy = tf.nn.sigmoid(coords[:,:,:,0:2])\n\t\tcoords_wh = tf.sqrt(tf.exp(coords[:,:,:,2:4])*anchors)\n\t\tcoords = tf.concat([coords_xy,coords_wh],axis=3)\n\t\t\n\t\tconfs = tf.nn.sigmoid(self.model_output[:,:,:,4])\n\t\tconfs = tf.expand_dims(confs,-1)\n\t\t\n\t\tprobs = tf.nn.softmax(self.model_output[:,:,:,5:])\n\n\t\tself.predict = tf.concat([coords,confs,probs],axis=3,name='predict')\n\n\t\twh = tf.pow(coords[:,:,:,2:4],2)*np.reshape(self.output_size,[1,1,1,2])\n\t\tarea = wh[:,:,:,0]*wh[:,:,:,1]\n\t\tcenters = coords[:,:,:,0:2]\n\t\tleft_top,right_bottom = centers-wh/2,centers+wh/2\n\n\t\tinter_left_top = tf.maximum(left_top,_left_top)\n\t\tinter_right_bottom = tf.minimum(right_bottom,_right_bottom)\n\t\tinter_wh = tf.maximum(inter_right_bottom-inter_left_top,0.0)\n\t\tinter_area = inter_wh[:,:,:,0]*inter_wh[:,:,:,1]\n\t\tious = tf.truediv(inter_area,area+_area-inter_area)\n\n\t\tbest_iou_mask = tf.equal(ious,tf.reduce_max(ious,axis=2,keepdims=True))\n\t\tbest_iou_mask = tf.cast(best_iou_mask,tf.float32)\n\t\tmask = best_iou_mask*_confs\n\t\tmask = tf.expand_dims(mask,-1)\n\n\t\tconfs_w = snoob*(1-mask)+sconf*mask\n\t\tcoords_w = scoor*mask\n\t\tprobs_w = sprob*mask\n\t\tweights = tf.concat([coords_w,coords_w,coords_w,coords_w,confs_w,probs_w,probs_w,probs_w],axis=3)\n\n\t\tloss = tf.pow(self.predict-self.labels,2)*weights\n\t\tloss = tf.reduce_sum(loss, axis=[1, 2, 3])\n\t\tloss = 0.5*tf.reduce_mean(loss)\n\n\t\treturn loss\n\ndef model_size():\n\tparams = tf.trainable_variables()\n\tsize = 0\n\tfor x in params:\n\t\tsz = 1\n\t\tfor dim in x.get_shape():\n\t\t\tsz *= dim.value\n\t\tsize += sz\n\treturn size\n\n\nif __name__ == '__main__':\n\timages = tf.placeholder(tf.float32,[None,512,512,3],name='images')\n\tlabels = tf.placeholder(tf.float32,[None,16*16,5,8],name ='labels')\n\tanchors = np.ones([5,2])\n\n\twith tf.Session() as sess:\n\t\tmobile = MobileYolo_skip(images,labels,anchors)\n\t\tsess.run(tf.global_variables_initializer())\n\t\tprint(mobile.predict.shape.as_list())\n\t\tprint('Size:',model_size())","sub_path":"MobileYolo_skip.py","file_name":"MobileYolo_skip.py","file_ext":"py","file_size_in_byte":10125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"615154018","text":"import pickle\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.svm import SVC, LinearSVC\n\n\ndef learn(X, y, algs, out):\n classes = y.unique()\n\n for alg in algs:\n if \"bernoulli\" == alg:\n clf = BernoulliNB()\n elif \"random_forest\" == alg:\n clf = RandomForestClassifier(n_estimators=20, verbose=2)\n elif \"svc\" == alg:\n clf = SVC(verbose=2)\n elif \"linear_svc\" == alg:\n clf = LinearSVC(max_iter=5000, verbose=2)\n else:\n raise KeyError(\"Unknown algorithm: {}\".format(alg))\n\n # X is too big to fit in memory: partial fits\n i = 0\n if len(X)*len(X.columns) > 50000**2:\n while i < len(y):\n clf.partial_fit(X[i:i+5000], y[i:i+5000], classes=classes)\n i += 5000\n\n # X fits in memory: direct computation\n else:\n clf.fit(X, y)\n\n pickle.dump(clf, \"{}/{}.zip\".format(out, alg), protocol=pickle.HIGHEST_PROTOCOL)\n","sub_path":"hw1/learning.py","file_name":"learning.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"393241364","text":"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Test Neural Network.\"\"\"\n\nimport unittest\n\nfrom test import QiskitMachineLearningTestCase\n\nimport numpy as np\nfrom ddt import ddt, data\n\nfrom qiskit_machine_learning.neural_networks import NeuralNetwork\n\n\nclass _NeuralNetwork(NeuralNetwork):\n \"\"\"Dummy implementation to test the abstract neural network class.\"\"\"\n\n def _forward(self, input_data, weights):\n \"\"\"Expects as input either None, or a 2-dim array and returns.\"\"\"\n\n # handle None input\n if self.num_inputs == 0 and input_data is None:\n return np.zeros(self.output_shape)\n\n return np.zeros(self.output_shape)\n\n def _backward(self, input_data, weights):\n # return None if there are no weights\n input_grad = None\n if self.num_inputs > 0:\n input_grad = np.zeros((*self.output_shape, self.num_inputs))\n\n weight_grad = None\n if self.num_weights > 0:\n weight_grad = np.zeros((*self.output_shape, self.num_weights))\n\n return input_grad, weight_grad\n\n\n@ddt\nclass TestNeuralNetwork(QiskitMachineLearningTestCase):\n \"\"\"Neural Network Tests.\"\"\"\n\n @data(\n # no input\n ((0, 0, 1), None),\n ((0, 1, 1), None),\n ((0, 1, 2), None),\n ((0, 1, (2, 2)), None),\n\n # 1d input\n ((1, 0, 1), 0),\n ((1, 1, 1), 0),\n ((1, 1, 2), 0),\n ((1, 1, (2, 2)), 0),\n\n # multi-dimensional input and weights\n ((2, 2, (2, 2)), [0, 0])\n )\n def test_forward_shape(self, params):\n \"\"\"Test forward shape.\"\"\"\n\n config, input_data = params\n network = _NeuralNetwork(*config)\n\n shape = network.forward(input_data, np.zeros(network.num_weights)).shape\n self.assertEqual(shape, network.output_shape)\n\n @data(\n # no input\n ((0, 0, 1), None),\n ((0, 1, 1), None),\n ((0, 1, 2), None),\n ((0, 1, (2, 2)), None),\n\n # 1d input\n ((1, 0, 1), 0),\n ((1, 1, 1), 0),\n ((1, 1, 2), 0),\n ((1, 1, (2, 2)), 0),\n\n # multi-dimensional input and weights\n ((2, 2, (2, 2)), [0, 0])\n )\n def test_backward_shape(self, params):\n \"\"\" Test backward shape \"\"\"\n\n config, input_data = params\n network = _NeuralNetwork(*config)\n\n input_grad, weights_grad = network.backward(input_data, np.zeros(network.num_weights))\n\n if network.num_inputs > 0:\n self.assertEqual(input_grad.shape, (*network.output_shape, network.num_inputs))\n else:\n self.assertEqual(input_grad, None)\n\n if network.num_weights > 0:\n self.assertEqual(weights_grad.shape, (*network.output_shape, network.num_weights))\n else:\n self.assertEqual(weights_grad, None)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/neural_networks/test_neural_network.py","file_name":"test_neural_network.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"495083622","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 11 16:49:45 2018\r\n\r\n@author: ashwin.monpur\r\n\"\"\"\r\nimport pandas as pd\r\nimport matplotlib.pylab as plt\r\nfrom statsmodels.tsa.stattools import adfuller\r\nimport numpy as np\r\nfrom statsmodels.tsa.stattools import acf, pacf\r\nfrom statsmodels.tsa.arima_model import ARIMA\r\n\r\n# Importing data\r\n\r\ndata=pd.read_csv('Train_Franklin_india.csv')\r\ndata['date']=pd.to_datetime(data['date'])\r\n\r\n# Geting required data\r\n\r\nt_series=data['nav'] # Mutual Funt NAV\r\nt_series.index=data['date'] # creating Index\r\n\r\ndef test_stationarity(timeseries):\r\n \r\n #Determing rolling statistics\r\n rolmean = pd.rolling_mean(timeseries, window=7) #Cont Mean\r\n rolstd = pd.rolling_std(timeseries, window=7) #Cont Std\r\n \r\n #Plot rolling statistics:\r\n plt.plot(timeseries, color='blue',label='Original')\r\n plt.plot(rolmean, color='red', label='Rolling Mean')\r\n plt.plot(rolstd, color='black', label = 'Rolling Std')\r\n plt.legend(loc='best')\r\n plt.title('Rolling Mean & Standard Deviation')\r\n plt.show(block=False)\r\n \r\n #Perform Dickey-Fuller test:\r\n print ('Results of Dickey-Fuller Test:')\r\n dftest = adfuller(timeseries, autolag='AIC') \r\n dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])\r\n for key,value in dftest[4].items():\r\n dfoutput['Critical Value (%s)'%key] = value\r\n print (dfoutput)\r\n\r\n#test_stationarity(t_series)\r\n\r\n# removing trend and seasonality\r\n\r\nts_logtransformed=(np.log(t_series))\r\n#plt.plot(ts_logtransformed)\r\n#test_stationarity(ts_logtransformed)\r\nts_diff_logtrans = ts_logtransformed -ts_logtransformed.shift(7)\r\n\r\nts_diff_logtrans.head(10)\r\n#test_stationarity(ts_diff_logtrans)\r\n\r\nts_diff_logtrans.dropna(inplace=True)\r\n#test_stationarity(ts_diff_logtrans)\r\n#plt.plot(ts_diff_logtrans)\r\n\r\n#ACF and PACF plots:\r\nlag_acf = acf(ts_diff_logtrans, nlags=30)\r\nlag_pacf = pacf(ts_diff_logtrans, nlags=50, method='ols')\r\n\r\n#Plot ACF: \r\nplt.subplot(121) \r\nplt.plot(lag_acf)\r\nplt.axhline(y=0,linestyle='--',color='gray')\r\nplt.axhline(y=-1.96/np.sqrt(len(ts_diff_logtrans)),linestyle='--',color='gray')\r\nplt.axhline(y=1.96/np.sqrt(len(ts_diff_logtrans)),linestyle='--',color='gray')\r\nplt.title('Autocorrelation Function')\r\n\r\n#Plot PACF:\r\nplt.subplot(122)\r\nplt.plot(lag_pacf)\r\nplt.axhline(y=0,linestyle='--',color='gray')\r\nplt.axhline(y=-1.96/np.sqrt(len(ts_diff_logtrans)),linestyle='--',color='gray')\r\nplt.axhline(y=1.96/np.sqrt(len(ts_diff_logtrans)),linestyle='--',color='gray')\r\nplt.title('Partial Autocorrelation Function')\r\nplt.tight_layout()\r\n\r\nfrom statsmodels.graphics.tsaplots import plot_acf\r\nfrom statsmodels.graphics.tsaplots import plot_pacf\r\nfrom matplotlib import pyplot\r\n#pyplot.figure()\r\npyplot.subplot(211)\r\nplot_acf(ts_diff_logtrans, ax=pyplot.gca(),lags=40)\r\npyplot.subplot(212)\r\nplot_pacf(ts_diff_logtrans, ax=pyplot.gca(), lags=50)\r\npyplot.show()\r\n\r\nmodel = ARIMA(ts_logtransformed, order=(3,2, 7)) \r\n\r\nresults_ARIMA = model.fit() \r\npyplot.subplot(311)\r\nplt.plot(ts_diff_logtrans)\r\npyplot.subplot(312)\r\nplt.plot(results_ARIMA.fittedvalues, color='red', label = 'order 2')\r\n\r\nRSS = results_ARIMA.fittedvalues-ts_diff_logtrans\r\nRSS.dropna(inplace=True)\r\nplt.title('RSS: %.4f'% sum(RSS**2))\r\nplt.legend(loc ='best')\r\n\r\npredictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True)\r\nprint(predictions_ARIMA_diff.head())\r\n\r\npredictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()\r\nprint(predictions_ARIMA_diff_cumsum.head())\r\n\r\npredictions_ARIMA_log = pd.Series(ts_logtransformed.iloc[0], index=ts_logtransformed.index)\r\npredictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum,fill_value=0)\r\npredictions_ARIMA_log.head()\r\n\r\npredictions_ARIMA = np.exp(predictions_ARIMA_log)\r\nplt.plot(t_series)\r\nplt.plot(predictions_ARIMA)\r\nplt.title('RMSE: %.4f'% np.sqrt(sum((predictions_ARIMA-data)**2)/len(data)))\r\n\r\nprint(results_ARMIA.summary())\r\n","sub_path":"Intern Project/Arima model - Forecasting/new_arima.py","file_name":"new_arima.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"154767513","text":"# train the model using conditional Gaussian Classifiers\n\n# Siqi Dai\n# Oct 8, 2018\n\nimport glob\nimport cv2\nimport math\nimport csv\nfrom DataPreprocessing import preprocess, dataAugmentation\nfrom multiprocessing.pool import ThreadPool\n\n\ndef train_model(percent, info, num_of_classes):\n # percent = % of training data in the dataset;\n # info: 1 -> print data info; 0 -> not print data info;\n # num_of_classes (total number of classes for an object): 4 -> eye; 3 -> mouth\n\n processImagesBeforeTraining(num_of_classes)\n train_set, valid_set = getImageSets(percent, info, num_of_classes)\n\n # access pixel values of the images in the respective folders\n train_data = []\n flag = 1\n\n for train in train_set:\n pixels = accessImgPixelVal(train, num_of_classes)\n train_data.append(pixels)\n if len(train) == 0:\n flag = 0\n\n if flag == 1: # there are training images for all classes\n miu_list = computeMiu(train_data, num_of_classes) # miu\n theta = computeTheta(train_data, miu_list, num_of_classes) # shared vector theta (pixel noise standard deviation)\n if info == 1:\n print('Prediction model was successfully built! Press the \"Testing\" button.\\n')\n\n # save the model parameters in a csv file\n fn = 'model_param_eyes.csv' if num_of_classes == 4 else 'model_param_mouth.csv'\n writeModelParamInCSV(fn, theta, miu_list)\n\n else: # missing training images for at least one class\n miu_list, theta = [], 0\n if info == 1:\n print('Missing training data for at least one class.\\n')\n\n return miu_list, theta\n\n\ndef accessImgPixelVal(imgset, num_of_classes):\n list = []\n if len(imgset) != 0:\n for im in imgset:\n if num_of_classes == 4: # for eyes\n img = cv2.imread(im, 0)\n img = cv2.equalizeHist(img) # histogram equalization\n else: # for mouth\n img = cv2.imread(im)\n\n row, col = img.shape[0], img.shape[1]\n if num_of_classes == 4: # for eyes\n pixel_vals = [img[i, j] for i in range(row) for j in range(col)]\n else: # for mouth\n pixel_vals = [img[i, j, 0] for i in range(row) for j in range(col)]\n list.append(pixel_vals)\n\n return list\n\n\n# compute the vector miu\ndef computeMiu(train_data, num_of_classes):\n miu_list = []\n for k in range(0, num_of_classes):\n num_of_attribute = len(train_data[k][0])\n num_imgs = len(train_data[k])\n miu = []\n for i in range(0, num_of_attribute):\n x_kji = 0\n for j in range(num_imgs):\n x_kji = x_kji + train_data[k][j][i]\n miu.append(x_kji / num_imgs)\n miu_list.append(miu)\n return miu_list\n\n\n# compute the shared vector theta (pixel noise standard deviation)\ndef computeTheta(train_data, miu_list, num_of_classes):\n sum = 0\n num_of_attribute = len(train_data[0][0])\n for k in range(0, num_of_classes):\n for j in range(0, len(train_data[k])):\n for i in range(0, num_of_attribute):\n sum = sum + (train_data[k][j][i] - miu_list[k][i])**2\n theta = math.sqrt(sum/num_of_attribute/len(train_data[k]))\n return theta\n\n\ndef split(list, percent): # split a list into two sub-lists\n length = int(len(list) * percent / 100)\n sublist1 = [list[i] for i in range(length)]\n sublist2 = [list[i] for i in range(length, len(list))]\n return sublist1, sublist2\n\n\ndef processImagesBeforeTraining(num_of_classes):\n thresh = 0.8 # threshold for outliers (delete bad samples)\n img_to_generate = 1000 # num of files to generate after data augmentation\n\n data_path = './CurrentData/' if num_of_classes == 4 else './MouthDetector/CurrentData/'\n # read csv\n file = open(data_path + 'Dataset.csv')\n contents = file.readlines()\n folders = []\n for i in range(len(contents)):\n if contents[i] != \"\\n\":\n folders.append(contents[i].rstrip(\"\\n\"))\n\n if num_of_classes == 4: # for eye images\n up, down, left, right = [], [], [], []\n for file in folders:\n img_path = data_path + file + '/'\n\n # delete bad samples\n print(\"Data preprocessing: deleting outliers ...\\n\")\n pool = ThreadPool(processes=4)\n async_1 = pool.apply_async(preprocess, (img_path + 'up/', thresh))\n async_2 = pool.apply_async(preprocess, (img_path + 'down/', thresh))\n async_3 = pool.apply_async(preprocess, (img_path + 'left/', thresh))\n async_4 = pool.apply_async(preprocess, (img_path + 'right/', thresh))\n val_1 = async_1.get()\n print(\"1/4 completed ...\")\n val_2 = async_2.get()\n print(\"2/4 completed ...\")\n val_3 = async_3.get()\n print(\"3/4 completed ...\")\n val_4 = async_4.get()\n print(\"Done.\\n\")\n\n # data augmentation\n print(\"Data augmentation:\", img_to_generate, \"new images will be generated ...\\n\")\n pool = ThreadPool(processes=4)\n async_1 = pool.apply_async(dataAugmentation, (img_path + 'up/', img_to_generate))\n async_2 = pool.apply_async(dataAugmentation, (img_path + 'down/', img_to_generate))\n async_3 = pool.apply_async(dataAugmentation, (img_path + 'left/', img_to_generate))\n async_4 = pool.apply_async(dataAugmentation, (img_path + 'right/', img_to_generate))\n val_1 = async_1.get()\n print(\"1/4 completed ...\")\n val_2 = async_2.get()\n print(\"2/4 completed ...\")\n val_3 = async_3.get()\n print(\"3/4 completed ...\")\n val_4 = async_4.get()\n print(\"Done.\\n\")\n\n else: # for mouth images\n mouth_open, mouth_line, mouth_nothing = [], [], []\n for file in folders:\n img_path = data_path + file + '/'\n\n # delete bad samples\n print(\"Data preprocessing: deleting outliers ...\\n\")\n pool = ThreadPool(processes=3)\n async_1 = pool.apply_async(preprocess, (img_path + 'click/', thresh))\n async_2 = pool.apply_async(preprocess, (img_path + 'ForceNoOp/', thresh))\n async_3 = pool.apply_async(preprocess, (img_path + 'nothing/', thresh))\n val_1 = async_1.get()\n print(\"1/3 completed ...\")\n val_2 = async_2.get()\n print(\"2/3 completed ...\")\n val_3 = async_3.get()\n print(\"Done.\\n\")\n\n\n # data augmentation\n print(\"Data augmentation: \", img_to_generate, \" new images will be generated ...\\n\")\n pool = ThreadPool(processes=3)\n async_1 = pool.apply_async(dataAugmentation, (img_path + 'click/', img_to_generate))\n async_2 = pool.apply_async(dataAugmentation, (img_path + 'ForceNoOp/', img_to_generate))\n async_3 = pool.apply_async(dataAugmentation, (img_path + 'nothing/', img_to_generate))\n val_1 = async_1.get()\n print(\"1/3 completed ...\")\n val_2 = async_2.get()\n print(\"2/3 completed ...\")\n val_3 = async_3.get()\n print(\"Done.\\n\")\n\n\ndef getImageSets(percent, info, num_of_classes): # get train and validation image sets\n\n data_path = './CurrentData/' if num_of_classes == 4 else './MouthDetector/CurrentData/'\n # read csv\n file = open(data_path + 'Dataset.csv')\n contents = file.readlines()\n folders = []\n for i in range(len(contents)):\n if contents[i] != \"\\n\":\n folders.append(contents[i].rstrip(\"\\n\"))\n\n if num_of_classes == 4: # for eye images\n up, down, left, right = [], [], [], []\n for file in folders:\n img_path = data_path + file + '/'\n\n up = up + [f for f in glob.glob(img_path + 'up/' + '*.jpg')]\n down = down + [f for f in glob.glob(img_path + 'down/' + '*.jpg')]\n left = left + [f for f in glob.glob(img_path + 'left/' + '*.jpg')]\n right = right + [f for f in glob.glob(img_path + 'right/' + '*.jpg')]\n train_up, valid_up = split(up, percent)\n train_down, valid_down = split(down, percent)\n train_left, valid_left = split(left, percent)\n train_right, valid_right = split(right, percent)\n\n if info == 1: # print information\n print('Total Number of Data: \\n up: ', len(up), '; down: ', len(down), '; left: ', len(left), '; right: ',\n len(right))\n print('Number of Training Data: \\n up: ', len(train_up), '; down: ', len(train_down), '; left: ',\n len(train_left), '; right: ', len(train_right))\n print('Number of Test Data: \\n up: ', len(valid_up), '; down: ', len(valid_down), '; left: ', len(valid_left),\n '; right: ', len(valid_right))\n print(\"\\nStart training ...\\n\")\n\n train_set = [train_up, train_down, train_left, train_right]\n valid_set = [valid_up, valid_down, valid_left, valid_right]\n\n else: # for mouth images\n mouth_open, mouth_line, mouth_nothing = [], [], []\n for file in folders:\n img_path = data_path + file + '/'\n\n mouth_open = mouth_open + [f for f in glob.glob(img_path + 'click/' + '*.jpg')]\n mouth_line = mouth_line + [f for f in glob.glob(img_path + 'ForceNoOp/' + '*.jpg')]\n mouth_nothing = mouth_nothing + [f for f in glob.glob(img_path + 'nothing/' + '*.jpg')]\n train_mouth_open, valid_mouth_open = split(mouth_open, percent)\n train_mouth_line, valid_mouth_line = split(mouth_line, percent)\n train_mouth_nothing, valid_mouth_nothing = split(mouth_nothing, percent)\n\n if info == 1: # print information\n print('Total Number of Data: \\n mouth_open: ', len(mouth_open), '; mouth_force_no_op: ', len(mouth_line),\n '; mouth_nothing: ', len(mouth_nothing))\n print('Number of Training Data: \\n mouth_open: ', len(train_mouth_open), '; mouth_force_no_op: ',\n len(train_mouth_line), '; mouth_nothing: ', len(train_mouth_nothing))\n print('Number of Test Data: \\n mouth_open: ', len(valid_mouth_open), '; mouth_force_no_op: ',\n len(valid_mouth_line), '; mouth_nothing: ', len(valid_mouth_nothing))\n print(\"\\nStart training ...\\n\")\n\n train_set = [train_mouth_open, train_mouth_line, train_mouth_nothing]\n valid_set = [valid_mouth_open, valid_mouth_line, valid_mouth_nothing]\n\n return train_set, valid_set\n\n\ndef writeModelParamInCSV(fn, theta, miu_list):\n with open(fn, \"w\") as write_f:\n writer = csv.writer(write_f)\n writer.writerow([theta])\n writer.writerows(miu_list)\n write_f.close()","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":9908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"318065247","text":"import click\nimport pytest\nfrom pygitguardian.models import Detail\n\nfrom ggshield.scan.scannable_errors import handle_scan_error\n\n\ndef test_handle_scan_error_api_key():\n detail = Detail(\"Invalid API key.\")\n detail.status_code = 401\n with pytest.raises(click.UsageError):\n handle_scan_error(detail, [])\n\n\n@pytest.mark.parametrize(\n \"detail, status_code, chunk\",\n [\n pytest.param(\n Detail(\"Too many documents to scan\"),\n 400,\n [{\"document\": \"\", \"filename\": \"/example\"} for _ in range(21)],\n id=\"too many documents\",\n ),\n pytest.param(\n Detail(\n \"[\\\"filename:: [ErrorDetail(string='Ensure this field has no more than 256 characters.', code='max_length')]\\\", '', '', '']\" # noqa\n ),\n 400,\n [\n {\n \"document\": \"still valid\",\n \"filename\": \"/home/user/too/long/file/name\",\n },\n {\"document\": \"\", \"filename\": \"valid\"},\n {\"document\": \"\", \"filename\": \"valid\"},\n {\"document\": \"\", \"filename\": \"valid\"},\n ],\n id=\"single file exception\",\n ),\n ],\n)\ndef test_handle_scan_error(detail, status_code, chunk, capsys, snapshot):\n detail.status_code = 400\n handle_scan_error(detail, chunk)\n captured = capsys.readouterr()\n snapshot.assert_match(captured.err)\n","sub_path":"tests/scan/test_scannable_errors.py","file_name":"test_scannable_errors.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"167610741","text":"import os\nimport math\nimport pickle\n\nimport tokenizer\n\n''' class Ngram\nThis class represents the language model derived from some\ntraining corpus. It stores the n-gram database of the given\ntraining file in a trie (prefix tree) data structure and\nprovides the interface for querying probabilities, etc.\n'''\nclass Ngram:\n\n def __init__(self, filename, degree=4, tokenlist=None):\n self.__filename = filename\n self.degree = degree\n if filename==None:\n self.tokenlist = tokenlist\n self.grams = [{}]\n self.rgrams = [{}]\n self.vocabcount = [0]*(self.degree+1)\n self.totalcount = [0]*(self.degree+1)\n self.tset = []\n self.tdict = {}\n if tokenlist!=None:\n self.addData(tokenlist)\n else:\n self.tokenlist = []\n if os.path.isfile(filename+\".ngram\"):\n print(\"Loading Ngram Model.\", end=' ')\n self.grams = pickle.load(open(filename+\".ngram\", \"rb\"))\n self.vocabcount = self.grams[2]\n self.totalcount = self.grams[3]\n self.tset = self.grams[4]\n self.tdict = dict([(x,self.tset.index(x)) for x in self.tset])\n self.rgrams = self.grams[1]\n self.grams = self.grams[0]\n print(\"Done!\")\n else:\n print(\"Generating Ngram Model.\", end=' ')\n self.tokenlist = tokenizer.tokenize(open(filename, 'r').read())\n self.grams = []\n self.rgrams = []\n self.tset = []\n self.tdict = {}\n self._buildModel()\n pickle.dump([self.grams, self.rgrams, self.vocabcount,\n self.totalcount, self.tset],\n open(filename+\".ngram\",\"wb\"))\n print(\"Done!\")\n # del self.tokenlist\n\n def printTree(self,j=0,level=0):\n ''' Prints the underlying prefix tree in which the language\n model is stored. The argument j specifies the subtree to\n print and level specifies the indentation level to start\n from.\n '''\n for item in self.grams[j]:\n tabs=\"\"\n for i in range(level):\n tabs += \"\\t\"\n print(tabs+self.tset[item]+\":\")\n if self.grams[j][item][1]!=-1:\n self.printTree(self.grams[j][item][1],level+1)\n\n def _buildModel(self):\n ''' Build the language model database from the corpus.\n '''\n self.tset = list(set(self.tokenlist))\n self.tdict = dict([(x,self.tset.index(x)) for x in self.tset])\n # Initialize empty ngram trie\n self.grams.append({})\n self.rgrams.append({})\n # Initialize vocabcount list. This stores the number of unique\n # n-grams for all n frm 1 to self.degree\n self.vocabcount = [0]*(self.degree+1)\n # Total counts for all gram sizes. Straightforward to calculate\n # from the token list size.\n self.totalcount = [0]+[len(self.tokenlist)-x for\n x in range(self.degree)]\n self._buildTrie(self.grams,self.tokenlist,self.tset,self.tdict)\n self._buildTrie(self.rgrams,self.tokenlist[::-1],\n self.tset,self.tdict,False)\n self.vocabcount = [0]+self.vocabcount\n self.totalcount = [0]+self.totalcount\n\n def _buildTrie(self,grams,tlist,tset,tdict,countvocab=True,weight=1):\n ''' Build the n-gram model trie. The argument tlist is a list\n of tokens, tset is list where any possible token occurs\n only once, and tdict is a dictionary which inverts the\n list tset. That is, tdict maps the elements in tset to\n thier indices (positions) in tset.\n '''\n j = 1\n # Aliases for some data members\n degree = self.degree\n vocabcounts = self.vocabcount\n for i in range(len(tlist)):\n nxtSubTree = 0\n if i+degree>len(tlist):\n degree = len(tlist)-i\n for k in range(degree):\n if tdict[tlist[i+k]] not in grams[nxtSubTree]:\n if k!=(degree-1):\n grams[nxtSubTree][tdict[tlist[i+k]]] = (weight,j)\n grams.append({})\n j+=1\n else:\n grams[nxtSubTree][tdict[tlist[i+k]]] = (weight,-1)\n if countvocab:\n vocabcounts[k]+=1\n else:\n dtup = grams[nxtSubTree][tdict[tlist[i+k]]]\n dtup = (dtup[0]+weight,dtup[1])\n grams[nxtSubTree][tdict[tlist[i+k]]] = dtup\n nxtSubTree = grams[nxtSubTree][tdict[tlist[i+k]]][1]\n\n def addData(self, tokenlist, weight=1):\n ''' Add text to the ngram database. This function read\n the given string updates the ngram trie incorporating\n information from the text.\n '''\n if tokenlist == []:\n return\n for token in tokenlist:\n if token not in self.tset:\n self.tset.append(token)\n self.tdict[token] = len(self.tset)-1\n self._buildTrie(self.grams,tokenlist,self.tset,self.tdict,True,weight)\n self._buildTrie(self.rgrams,tokenlist[::-1],\n self.tset,self.tdict,False,weight)\n for i in range(self.degree):\n self.totalcount[i+1] += len(tokenlist)-i\n\n def count(self, sequence):\n ''' Returns the number of times the given token sequence\n appears in the training corpus.\n '''\n lseq=len(sequence)\n if lseq>0 and lseq<=self.degree:\n subTree = 0\n for i in range(lseq):\n if sequence[i] not in self.tdict:\n return 0\n indx = self.tdict[sequence[i]]\n if indx in self.grams[subTree]:\n if i==lseq-1:\n return self.grams[subTree][indx][0]\n else:\n subTree = self.grams[subTree][indx][1]\n else:\n return 0\n else:\n return 0\n\n def probability(self, sequence, log=False, smooth=True):\n ''' Returns the probability of occurence of the token sequence\n specified by the argument sequence. If log is True, the\n base-10 log of the probability is returned. If smooth\n is true, the (add-k) smoothed probability is returned.\n '''\n l = len(sequence)\n thisGram = self.count(sequence)\n return self.countToProb(thisGram,l,log,smooth)\n\n\n def countToProb(self, count, gramsize, log=False, smooth=True):\n ''' Returns the probability of occurence of a certain token\n sequence when its count and length of the token sequence\n (gramsize) is given.\n '''\n # FIXME: Error thrown if smooth=False and log=True\n if gramsize < 1 or gramsize > self.degree:\n return 0.0\n\n gramCount = self.vocabcount[gramsize]\n total = self.totalcount[gramsize]\n thisGram = count\n\n k = 0.01 if smooth else 0.0\n p = (thisGram+k)/float(total+k*gramCount)\n p = math.log10(p) if log else p\n return p\n\n def nextBest(self, sequence, n=1, grams=None):\n ''' Returns the n most probable tokens that come after\n the token sequence given. If n is 0, all tokens with\n a similar log-probability (similar order of probability)\n to the best one are returned. Optionally, if a trie\n is given as grams, then that trie is used for the result.\n '''\n l = len(sequence)\n if l<1 or l+1>self.degree:\n return []\n\n if grams: theGrams = grams\n else: theGrams = self.grams\n\n subTree = 0\n for i in range(l):\n if sequence[i] not in self.tdict:\n return []\n idx = self.tdict[sequence[i]]\n if idx in theGrams[subTree]:\n subTree = theGrams[subTree][idx][1]\n if subTree==-1:\n return []\n else:\n return []\n\n candidates = [(self.tset[x],self.countToProb(theGrams[subTree][x][0],\n l+1,log=True,smooth=True)) for x in theGrams[subTree]]\n candidates.sort(key=lambda x: x[1], reverse=True)\n\n # if n is zero, return values with order around the best candidate\n if n<=0:\n value = math.floor(candidates[0][1])\n candidates = [(x,y) for (x,y) in candidates if y+1 >= value]\n # else return specified number of values\n elif len(candidates)>=n:\n candidates = candidates[:n]\n\n return candidates\n\n def prevBest(self,sequence,n=1,reverse=False):\n ''' Returns the n most probable tokens that come before\n the token sequence given. Except that, it behaves\n identically to the method nextBest().\n '''\n if not reverse:\n sequence = tuple(list(sequence)[::-1])\n return self.nextBest(sequence,n,self.rgrams)\n\n def getUnigrams(self):\n ''' Returns all the unigrams and their counts as a dict.\n '''\n return dict([(self.tset[x],self.grams[0][x][0]) for x in self.grams[0]])\n","sub_path":"ngram/ngram.py","file_name":"ngram.py","file_ext":"py","file_size_in_byte":9371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"550494857","text":"# coding:utf-8\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\n\nclass AlexNet(nn.Module):\n def __init__(self):\n super(AlexNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 96, 11)\n self.conv2 = nn.Conv2d(5, 96, 256)\n self.conv3 = nn.Conv2d(256, 384, 5)\n self.conv4 = nn.Conv2d(384, 384, 3)\n self.conv5 = nn.Conv2d(384, 256, 3)\n self.fc6 = nn.Linear(self.conv5, 4096)\n self.fc7 = nn.Linear(4096, 4096)\n self.fc8 = nn.Linear(4096, 1000)\n\n def forward(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)))\n x = F.max_pool2d(F.relu(self.conv2(x)))\n x = F.max_pool2d(F.relu(self.conv3(x)))\n x = F.max_pool2d(F.relu(self.conv4(x)))\n x = F.max_pool2d(F.relu(self.conv5(x)))\n x = F.dropout(F.relu(self.fc6(x)))\n x = F.dropout(F.relu(self.fc7(x)))\n out = self.fc8(x)\n return out\n","sub_path":"models/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"339770436","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n#\r\n# Author: Neverland_LY\r\n# Date: 2018-11-21\r\n# Version: v1.0\r\n#\r\n# Description:\r\n# 1. This code is used fo calculate the average of the P, T and R value of the\r\n# meteorological data per day by month.\r\n# 2. The scripts can only deal with the file ending with the suffix of .xlsx (except .xls).\r\n#\r\n# Usage:\r\n# 1. Modify the input file path to your own path.\r\n# 2. DON'T modify the base_block number if unnecessary otherwise the program will crash and\r\n# result in some undefined behavior !\r\n\r\nimport openpyxl\r\nimport os # Operator System library\r\n\r\n# define input path (the content of path can contains both folders and files)\r\ninput_file_path = \"E:\\\\xiaowei\" # do not add \\\\\r\n# define output path (the path can be any folder)\r\noutput_path = \"E:\\\\2018\" # do not add \\\\ in the end of path and the output path must have been created firstly by user\r\n#define the block number\r\nbase_block = 120 # WARNING #\r\n\r\n# define the counter\r\ncount = 1\r\n# get all files that the names ending with the suffix of [.xlsx]\r\nfor root_path, sub_directory_list, un_sub_directory_list in os.walk(input_file_path):\r\n for file in un_sub_directory_list:\r\n # open [.xlsx] file\r\n wb = openpyxl.load_workbook(root_path + \"\\\\\" + file)\r\n # get active sheet\r\n active_sheet = wb.active\r\n # define output file\r\n result_wb = openpyxl.Workbook()\r\n result_sheet = result_wb.active\r\n # merge 120 lines to 1 line\r\n for block in range(int(active_sheet.max_row / base_block)): # [0 - 24)\r\n sum_list = []\r\n for col in range(13):\r\n sum_col = 0.0\r\n for row in range(120):\r\n sum_col += active_sheet.cell(row = block * 120 + row + 1, column = col + 1).value\r\n sum_list.append(str(sum_col / base_block))\r\n for index in range(len(sum_list)):\r\n result_sheet.cell(row = block + 1, column= index + 1, value = sum_list[index])\r\n result_wb.save(output_path + '\\\\' + file[:-5] + '-LY.xlsx')\r\n print(\" [ No.\" + str(count) + \" ] \"+ file + \" operate successful~\")\r\n count += 1\r\n","sub_path":"Python/工程项目/小威严格求平均数.py","file_name":"小威严格求平均数.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"323558246","text":"import sys\r\nsys.path.append('../Grupo1/Instrucciones')\r\nsys.path.append('../Grupo1/Utils')\r\nsys.path.append('../Grupo1/Librerias/storageManager')\r\n\r\nfrom instruccion import *\r\nfrom Lista import *\r\nfrom TablaSimbolos import *\r\nfrom Primitivo import *\r\nfrom Error import *\r\nfrom jsonMode import *\r\nfrom c3dGen import *\r\nimport sys\r\nsys.path.append('../Grupo1/Instrucciones')\r\n\r\nfunciones=list()\r\nclass pl_Funcion(Instruccion):\r\n\r\n def __init__(self,arg0,arg1,arg2,arg3, nombre,parametros, tipo, cuerpof):\r\n self.nombre = nombre\r\n self.parametros =parametros\r\n self.tipo = tipo\r\n self.arg0 = arg0\r\n self.arg1 = arg1\r\n self.arg2 = arg2\r\n self.arg3 = arg3 \r\n self.cuerpof = cuerpof \r\n \r\n def execute(self, data):\r\n cad=\"\"\r\n def ExisteFunc(key, dicObj):\r\n if key in dicObj:\r\n cad = \"La funcion: \"+str(self.nombre)+\" Ya Existe!!\"\r\n return cad\r\n else:\r\n print(\"self\")\r\n createFunctionC3D(data.databaseSeleccionada,self.nombre,self.arg0,self.parametros,self.arg2,self.arg3, self.cuerpof)\r\n funciones.append(self.nombre)\r\n data.tablaSimbolos[self.nombre] = {'nombre' : self.nombre, 'parametros' : self.parametros, 'tipo':self.tipo,'cuerpo':self.cuerpof} \r\n cad= \"Se creo la funcion: \"+str(self.nombre)\r\n return cad\r\n \r\n a=ExisteFunc(self.nombre,data.tablaSimbolos) \r\n return a \r\n\r\n\r\n def __repr__(self):\r\n return str(self.__dict__)\r\n\t\t\r\nclass pl_CuerpoFuncion(Instruccion):\r\n def __init__(self, arg0,arg1,declare,instrucciones):\r\n self.arg0 = arg0\r\n self.arg1 = arg1 \r\n self.declare = declare\r\n self.instrucciones =instrucciones\r\n\r\n def __repr__(self):\r\n return str(self.__dict__)\t\r\nclass pl_CuerpoFuncion2(Instruccion):\r\n def __init__(self, declaraciones,instrucciones):\r\n self.declaraciones = declaraciones\r\n self.instrucciones =instrucciones\r\n\r\n def __repr__(self):\r\n return str(self.__dict__)\r\n\t\t\r\nclass pl_Declarar3(Instruccion):\r\n def __init__(self, arg0,arg1,nombre1, tipo,nombre2):\r\n self.arg0 = arg0\r\n self.arg1 = arg1 \r\n self.nombre1 = nombre1\r\n self.tipo =tipo\r\n self.nombre2 =nombre2\r\n\r\n def __repr__(self):\r\n return str(self.__dict__)\t\r\n\t\t\r\n\t\t\r\nclass pl_callFuncion(Instruccion):\r\n def __init__(self,arg0, arg1, tipo,nombre,argumentos):\r\n self.tipo =tipo\r\n self.nombre = nombre\r\n self.argumentos = argumentos\r\n self.arg0 = arg0\r\n self.arg1 = arg1\r\n\r\n def execute(self, data):\r\n self.argumentos\r\n select_functionC3D(self.nombre,self.argumentos)\r\n \r\n return 'se ejecuto la funcion: '+str(self.nombre)\r\n\r\n def __repr__(self):\r\n return str(self.__dict__)\t\t\r\n\t\t\r\n\t\t\r\nclass pl_dropFuncion(Instruccion):\r\n\r\n def __init__(self, arg0, arg1,nombre,parametros):\r\n self.nombre = nombre\r\n self.parametros =parametros\r\n self.arg0 = arg0\r\n self.arg1 = arg1\r\n \r\n def execute(self, data): \r\n cad=\"\"\r\n def ExisteFunc(key, dicObj):\r\n if key in dicObj:\r\n for i in funciones:\r\n funciones.remove(str(key))\r\n \r\n del data.tablaSimbolos[key]\r\n cad = \"Se elimino la funcion: \"+str(self.nombre)\r\n return cad\r\n else:\r\n cad= \"Not existe la funcion: \"+str(self.nombre)\r\n return cad\r\n \r\n a=ExisteFunc(self.nombre,data.tablaSimbolos) \r\n return a\r\n \r\n \r\n def __repr__(self):\r\n return str(self.__dict__)\t\t\r\n\t\t","sub_path":"parser/fase2/team01/Grupo1/Instrucciones/PL/pl_funciones.py","file_name":"pl_funciones.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"534088261","text":"# coding=utf8\nfrom topfive.models import Thread, Post, Like, PostAnswerRelationship\nfrom django.http import HttpResponseRedirect, HttpResponse, Http404\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom forms import PostForm\nfrom django.core.context_processors import csrf\n\ndef all_threads(request):\n\tif not request.user.is_authenticated():\n\t\treturn HttpResponseRedirect('/')\n\tthreads = Thread.objects.all()\n\treturn render_to_response('threads.html',\n\t\t{'threads_list' : threads})\n\ndef every_post(request, thread_id):\n\tif not request.user.is_authenticated():\n\t\treturn HttpResponseRedirect('/')\n\tposts = Post.objects.filter(thread = thread_id).order_by('created')\n\tparentThread = Thread.objects.get(id = thread_id)\n\tthread_id = int(thread_id)\n\n\tif request.POST:\n\t\tusuario = request.user\n\t\tform = PostForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tmadePost = form.save(commit=False)\n\t\t\tmadePost.thread = parentThread\n\t\t\tmadePost.creator = usuario\n\t\t\tmadePost.save()\n\t\t\treturn HttpResponseRedirect('/thread/get/%i/' % thread_id)\n\telse:\n\t\tform = PostForm()\n\t\t#parte do csrf mais o resto do contexto 'cntxt'\n\t\tcntxt = {}\n\t\tcntxt.update(csrf(request))\n\t\tcntxt['form'] = form\n\t\tcntxt['thread_id'] = thread_id\n\t\tcntxt['posts_list'] = posts\n\t\tcntxt['thread'] = parentThread\n\t\treturn render_to_response('thread.html', cntxt)\n\ndef create_answer(request, thread_id, post_id):\n\tposts = Post.objects.filter(thread=thread_id).order_by('created')\n\tthread_id, post_id = int(thread_id), int(post_id)\n\tif request.POST:\n\t\tusuario = request.user\n\t\tform = PostForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tmadeAnswer = form.save(commit=False)\n\t\t\tmadeAnswer.creator = usuario\n\t\t\tmadeAnswer.save()\n\t\t\t#incluir o manytomany post\n\t\t\tparentPost = Post.objects.get(id = post_id)\n\t\t\tans = PostAnswerRelationship(parentPost=parentPost, childPost=madeAnswer)\n\t\t\tans.save()\n\t\t\treturn HttpResponseRedirect('/thread/get/%i/' % thread_id)\n\n\ndef vote(request):\n\tuser = request.user\n\tif request.method == u'GET':\n\t\tGET = request.GET\n\t\tif GET.has_key(u'id') and GET.has_key(u'tipo'):\n\t\t\tid = int(GET[u'id'])\n\t\t\ttipo = GET[u'tipo']\n\t\t\tactualPost = get_object_or_404(Post, id=id)\n\t\t\tvotos = actualPost.totalLikes()\n\t\t\ttry:\n\t\t\t\topinion, created = Like.objects.get_or_create(person=user, post=actualPost)\n\t\t\texcept:\n\t\t\t\tcreated = True\n\t\t\tif tipo == u'up':\n\t\t\t\t#incrementa\n\t\t\t\tif not created:\n\t\t\t\t\t#o usuário já deu 'opinião' no post\n\t\t\t\t\tif opinion.dislike == True:\n\t\t\t\t\t\topinion.dislike = False\n\t\t\t\t\t\topinion.save()\n\t\t\t\t\t\tvotos += 2\n\t\t\t\t\telse:\n\t\t\t\t\t\topinion.delete()\n\t\t\t\t\t\tvotos -= 1\n\t\t\t\telse:\n\t\t\t\t\t#cria\n\t\t\t\t\topinion = Like(post=actualPost)\n\t\t\t\t\topinion.save()\n\t\t\t\t\topinion.person.add(user)\n\t\t\t\t\tvotos += 1\n\t\t\telif tipo == u'down':\n\t\t\t\tif not created:\n\t\t\t\t\t#o usuário já deu 'opinião' no post\n\t\t\t\t\tif opinion.dislike == False:\n\t\t\t\t\t\topinion.dislike = True\n\t\t\t\t\t\topinion.save()\n\t\t\t\t\t\tvotos -= 2\n\t\t\t\t\telse:\n\t\t\t\t\t\topinion.delete()\n\t\t\t\t\t\tvotos += 1\n\t\t\t\telse:\n\t\t\t\t\t#cria\n\t\t\t\t\topinion = Like(post=actualPost, dislike=True)\n\t\t\t\t\topinion.save()\n\t\t\t\t\topinion.person.add(user)\n\t\t\t\t\tvotos -= 1\n\t\telse:\n\t\t\treturn Http404\n\t\n\treturn HttpResponse(str(votos))\n\n\ndef mk_paginator(request, items, num_items):\n\t\"\"\"Create and return a paginator.\"\"\"\n\tpaginator = Paginator(items, num_items)\n\ttry: page = int(request.GET.get(\"page\",'1'))\n\texcept ValueError: page = 1\n\n\ttry:\n\t\titems = paginator.page(page)\n\texcept (InvalidPage, EmptyPage):\n\t\titems = paginator.page(paginator.num_pages)\n\treturn items\n","sub_path":"topfive/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"12995684","text":"import requests\nfrom bs4 import BeautifulSoup\nimport os\nimport json\nimport time\nimport re\n\ndef req(url,headers):\n response = session.get(url, headers=headers)\n soup = BeautifulSoup(response.text, 'html.parser')\n return soup\n\ndef content(soup,n):\n title = soup.select('a[class=\"_1Zdp\"]')[n]['title'].replace('<',' ').replace('>',' ').replace('/','').replace('#','').replace('?','').replace('*','_').replace('\\\"','_').replace(':','_').replace('\\n','_')\n article_id = soup.select('a[class=\"_1Zdp\"]')[n]['href']\n id = article_id.split('/')[-1]\n href = 'https://news.cnyes.com' + article_id\n date = soup.select('a[class=\"_1Zdp\"]')[n].time['datetime']\n content_url = 'https://news.cnyes.com/api/v6/news/' + id\n content_response = session.get(content_url, headers=headers)\n content_json = content_response.json()\n tag = content_json['items']['keywords']\n content = content_json['items']['content']\n content = text_clean(content)\n output = {'date': date, 'title': title, 'content': content, 'href': href, 'tag': tag, 'clicks': 'NA'}\n time.sleep(1)\n return title, output\n\ndef text_clean(content):\n if re.search(r'()', content) != None:\n content = re.sub(r\"(\\(\\))\", '', content, count=0, flags=re.IGNORECASE)\n if re.search(r'(&l.+?gt;)', content) != None:\n content = re.sub(r'(&l.+?gt;)', '', content, count=0)\n if re.search(r'(&a.+?sp;)', content) != None:\n content = re.sub(r'(&a.+?sp;)', '', content, count=0)\n if re.search(r'(\\n)', content) != None:\n content = content.replace('\\r', '')\n content = content.replace('\\n', '')\n return content\n\ndef file_save(path,title):\n with open(path % (title) + '.json', 'w', encoding='utf8') as f:\n json.dump(output, f)\n\n\nif not os.path.exists(r'./cnyesnewstoday'):\n os.mkdir(r'./cnyesnewstoday')\nheaders={'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36'}\nurl_news_index = 'https://news.cnyes.com/news/cat/tw_stock?exp=a'\nsession = requests.session()\nsoup = req(url_news_index, headers)\npath =r'./cnyesnewstoday/%s'\nfor n in range(len(soup.select('a[class=\"_1Zdp\"]'))):\n title, output = content(soup,n)\n if os.path.exists(path %(title) + '.json'):\n break\n if not os.path.exists(path %(title)):\n file_save(path, title)\nsession.close()\n","sub_path":"cnyes_news_hightlight.py","file_name":"cnyes_news_hightlight.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"81988469","text":"# importing necessary packages\nfrom preprocessing.preprocessing import AspectAwarePreprocessor\nfrom keras.preprocessing.image import img_to_array\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import load_model\nfrom keras.utils import to_categorical\nfrom keras.models import Model, Sequential\nfrom keras.optimizers import SGD\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.core import Activation, Flatten, Dropout, Dense\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport glob\nimport cv2\nimport os\n\n# path to the survey CSV\ndata = pd.read_csv('/home/stu15/s15/ts6442/Capstone/codes/HIT_results.csv')\npath = '/home/stu15/s15/ts6442/Capstone/faces/'\n# reading all the images from the file\nimage = glob.glob(path + '*.jpg')\nprint('[INFO] length of images', len(image))\n\nimages = []\nlabels = []\ni = 0\nsid = 0\n# defining classes as dictionary\nemotions = {'Happy': 0, 'Sad': 1, 'Anger': 2, 'Neutral': 3, 'Surprise': 4, 'Disgust': 5, 'Scared': 6, 'Not clear': 7}\n\nfor item in image:\n # getting label corresponding to the image\n a = data['Answer.Q7Answer'].loc[data['Input.image_url'] == 'https://lijingyang.me/images/AmazonMTurk/' + item.split('/')[-1]]\n try:\n # removing more than one entry\n image = cv2.imread('/home/stu15/s15/ts6442/Capstone/faces/' + item.split('/')[-1])\n ap = AspectAwarePreprocessor(128, 128)\n image = ap.preprocess(image)\n image = img_to_array(image)\n b = a.values.tolist()[0].split('|')[0]\n images.append(image)\n labels.append(emotions[b])\n sid += 1\n except:\n # deleting files with label as \"NaN\"\n # os.remove('/home/stu15/s15/ts6442/Capstone/Labelled_images/' + item.split('/')[-1])\n i += 1\n print('[INFO] Exception at image number', sid)\n pass\n if sid % 500 == 0:\n print('[INFO] {} images loaded...'.format(sid))\n\nimages = np.array(images)\nlabels = np.array(labels).reshape(len(labels), 1)\nprint('[INFO] {} images not loaded'.format(i))\nprint('[INFO] shape of images is', images.shape)\nprint('[INFO] shape of labels is', labels.shape)\n\n(trainX, testX, trainY, testY) = train_test_split(images, labels, test_size=0.2)\n\nmodel_path = '/home/stu15/s15/ts6442/Capstone/codes/final_model.h5'\nnumber = 4\n\n\ndef fineTune(model, num):\n model = load_model(model)\n clip = Model(model.inputs, model.layers[num].output)\n\n # defining a new model\n new = Sequential()\n new.add(clip)\n # new.add(Dense(500, activation='relu'))\n new.add(Conv2D(256, (3, 3)))\n new.add(Activation('relu'))\n new.add(BatchNormalization())\n new.add(Dropout(0.5))\n # new.add(Conv2D(128, (3, 3)))\n # new.add(Activation('relu'))\n # new.add(BatchNormalization())\n # new.add(MaxPooling2D(pool_size=(2, 2)))\n # new.add(Dropout(0.5))\n # new.add(Conv2D(64, (3, 3), padding='same'))\n # new.add(Activation('relu'))\n # new.add(BatchNormalization())\n # new.add(Dropout(0.5))\n # new.add(Conv2D(128, (3, 3), padding='same'))\n # new.add(Activation('relu'))\n # new.add(BatchNormalization())\n # new.add(MaxPooling2D(pool_size=(2, 2)))\n # new.add(Dropout(0.5))\n # new.add(Conv2D(256, (3, 3), padding='same'))\n # new.add(Activation('relu'))\n # new.add(BatchNormalization())\n # new.add(Dropout(0.5))\n # new.add(Conv2D(512, (3, 3), padding='same'))\n # new.add(Activation('relu'))\n # new.add(BatchNormalization())\n # new.add(MaxPooling2D(pool_size=(2, 2)))\n # new.add(Dropout(0.5))\n new.add(Flatten())\n new.add(Dense(500, activation='relu'))\n new.add(Dropout(0.25))\n new.add(Dense(300, activation='relu'))\n new.add(Dropout(0.25))\n new.add(Dense(100, activation='relu'))\n new.add(Dropout(0.25))\n new.add(Dense(8, activation='softmax'))\n\n # selecting which layers to train\n # for layer in new.layers:\n # layer.trainable = False\n # break\n\n return new\n\n\ndef as_keras_metric(method):\n import functools\n from keras import backend as K\n import tensorflow as tf\n\n @functools.wraps(method)\n def wrapper(self, args, **kwargs):\n \"\"\" Wrapper for turning tensorflow metrics into keras metrics \"\"\"\n value, update_op = method(self, args, **kwargs)\n K.get_session().run(tf.local_variables_initializer())\n with tf.control_dependencies([update_op]):\n value = tf.identity(value)\n return value\n return wrapper\n\n\nprecision = as_keras_metric(tf.metrics.precision)\nrecall = as_keras_metric(tf.metrics.recall)\n\n\nprint('[INFO] creating model...')\nmodel = fineTune(model_path, number)\nmodel.summary()\n\ntrainY = to_categorical(trainY)\ntestY = to_categorical(testY)\n\n# opt = SGD(lr=0.01, momentum=0.9, nesterov=True)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy', precision, recall])\n\nH = model.fit(trainX, trainY, epochs=40, verbose=1, validation_data=(testX, testY))\np = model.predict(testX)\nfor item in p[0]:\n print(item.argmax())\n","sub_path":"CODE/cropped_faces.py","file_name":"cropped_faces.py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"296707808","text":"\"\"\"\nvirtualenv -p python 3 \"folder_name\" && cd \"folder_name\"\nsource bin/activate\npython --version\npip install requests bs4\npip install python-twitter\n\"\"\"\nimport twitter\n\nconsumer_key= '4pTMlfX5q9KOKbWh7i0W47be8'\nconsumer_secret = '4MvFAJIztonzmTljquXAmxF3uUJ42SeKK1mhCc89bFynwf5NQb'\naccess_token = '934521371656417280-qAdxEbVfRw6NZEfZhEhQpneyjiw8xfz'\naccess_secret = 'dGwIIwbgKIqKexrRnZC9WElwzlCDabVmGUfnCHI893ywS'\n\napi = twitter.Api(consumer_key=consumer_key,\n consumer_secret = consumer_secret,\n access_token_key= access_token,\n access_token_secret=access_secret)\n\nprint(api.VerifyCredentials())\n\n#get users and followers of user account\nfollowers = api.GetFollowers()\n#friends = api.GetFriends()\n\n#tweeting to my professor\n#post_update = api.PostUpdates(status='@tyleransom tweet sent from python #Econ5970')\n#print(post_update)\n\nfilename = \"Followers.csv\"\nf = open(filename, \"w\")\nheaders = \"User_Screen_Name, \\n\"\nf.write(headers)\n\n#pull all followers User Names\nfor u in followers:\n print(u.screen_name)\n\n f.write(u.screen_name + \"\\n\")\n\nf.close\n","sub_path":"ProblemSets/PS5/PS5b_Hoehne.py","file_name":"PS5b_Hoehne.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"303754535","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport sys\nimport os\n\nimport numpy as np\nfrom six.moves import xrange\nimport tensorflow as tf\nfrom tensorflow.python.platform import flags\n\nimport logging\nimport os\nfrom cleverhans.attacks import DeepFool\nfrom cleverhans.utils import pair_visual, grid_visual, AccuracyReport\nfrom cleverhans.utils import set_log_level\nfrom cleverhans.utils_tf import model_train, model_eval, tf_model_load\n\n#from classifier_extend import classifier_extend\nfrom wrap_classifier import ClfWrapper\nsys.path.append('/home/chiba/research/master/classifiers/')\nsys.path.append('/home/chiba/research/tensorflow/dl_utils/')\nfrom utils import *\nfrom data import *\n\nFLAGS = flags.FLAGS\n\n\ndef cw_attack(dataset_name, nn_type, save_dir, eps, gpu_list,\n batch_size=128, nb_classes=10, source_samples=10000,\n model_path=os.path.join(\"models\", \"mnist\")):\n\n # Object used to keep track of (and return) key accuracies\n report = AccuracyReport()\n\n if dataset_name == 'fmnist':\n input_shape = (28, 28, 1)\n cnn_dim = 4\n augment = False\n dataset = FashionMnistDataset(code_dim=0, code_init=None)\n elif dataset_name == 'cifar10':\n input_shape = (24, 24, 3)\n cnn_dim = 16\n augment = True\n dataset = Cifar10Dataset('/home/chiba/data/cifar10/cifar-10-batches-py', code_dim=0, code_init=None)\n\n if nn_type == 'resnet':\n from classifiers.resnet import Classifier\n elif nn_type == 'vgg':\n from classifiers.vgg import Classifier\n else:\n raise ValueError('Neural Network %s is unsupported.'%FLAGS.nn_type)\n\n # MNIST-specific dimensions\n img_rows, img_cols, channels = input_shape\n\n # Set TF random seed to improve reproducibility\n tf.set_random_seed(1234)\n\n # Create TF session\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.visible_device_list = gpu_list\n sess = tf.Session(config=config)\n print(\"Created TensorFlow session.\")\n\n set_log_level(logging.DEBUG)\n\n # Define input TF placeholder\n x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, channels))\n y = tf.placeholder(tf.float32, shape=(None, nb_classes))\n\n X_test, Y_test = dataset.test_images, dataset.test_labels\n if augment:\n X_test = crop_images(X_test, input_shape)\n\n # Define TF model graph\n #model = make_basic_cnn()\n model = Classifier(sess, batch_size, input_shape, cnn_dim, save_dir, augment)\n model.build_model()\n #model = classifier_extend(model)\n model = ClfWrapper(model)\n preds = model.get_probs(x)\n print(\"Defined TensorFlow model graph.\")\n\n rng = np.random.RandomState([2017, 8, 30])\n # check if we've trained before, and if we have, use that pre-trained model\n if os.path.exists(model_path + \".meta\"):\n tf_model_load(sess, model_path)\n\n # Evaluate the accuracy of the MNIST model on legitimate test examples\n eval_params = {'batch_size': batch_size}\n #accuracy = model_eval(sess, x, y, preds, X_test, Y_test, args=eval_params)\n #print('Test accuracy on legitimate test examples: {0}'.format(accuracy))\n #report.clean_train_clean_eval = accuracy\n\n ###########################################################################\n # Craft adversarial examples using Carlini and Wagner's approach\n ###########################################################################\n nb_adv_per_sample = '1'\n print('Crafting ' + str(source_samples) + ' * ' + nb_adv_per_sample +\n ' adversarial examples')\n print(\"This could take some time ...\")\n\n # Instantiate a CW attack object\n df = DeepFool(model, back='tf', sess=sess)\n\n adv_inputs = X_test[:source_samples]\n\n adv_ys = None\n yname = \"y\"\n\n #df_params = {'clip_min': 0., 'clip_max': 1., 'overshoot': 0.0}\n df_params = {'clip_min': 0., 'clip_max': 1., 'overshoot': 0.02}\n\n #adv = fgm.generate_np(adv_inputs,\n # **fgm_params)\n #adv = np.clip(adv, adv-eps, adv+eps)\n\n n_batches = source_samples // batch_size\n adv = []\n for i in xrange(n_batches):\n batch = adv_inputs[i*batch_size:(i+1)*batch_size]\n adv_ = df.generate_np(batch,\n **df_params)\n adv_ = np.clip(adv_, batch-eps, batch+eps)\n adv.extend(list(adv_))\n adv = np.asarray(adv)\n\n adv.dump(os.path.join(save_dir, 'adversarials.pkl'))\n adv_inputs.dump(os.path.join(save_dir, 'originals.pkl'))\n\n eval_params = {'batch_size': np.minimum(nb_classes, source_samples)}\n adv_accuracy = 1 - \\\n model_eval(sess, x, y, preds, adv, Y_test[\n :source_samples], args=eval_params)\n\n print('--------------------------------------')\n\n # Compute the number of adversarial examples that were successfully found\n print('Avg. rate of successful adv. examples {0:.4f}'.format(adv_accuracy))\n report.clean_train_adv_eval = 1. - adv_accuracy\n\n # Compute the average distortion introduced by the algorithm\n percent_perturbed = np.mean(np.sum((adv - adv_inputs)**2,\n axis=(1, 2, 3))**.5)\n print('Avg. L_2 norm of perturbations {0:.4f}'.format(percent_perturbed))\n\n # Close TF session\n sess.close()\n\n return report\n\n\ndef main(argv=None):\n if not os.path.exists(FLAGS.save_dir):\n os.makedirs(FLAGS.save_dir)\n\n eps = float(FLAGS.eps) / 255.\n cw_attack(FLAGS.dataset, FLAGS.nn_type, FLAGS.save_dir, eps,\n gpu_list=FLAGS.gpu_list,\n batch_size=FLAGS.batch_size,\n nb_classes=FLAGS.nb_classes,\n source_samples=FLAGS.source_samples,\n model_path=FLAGS.model_path)\n\nif __name__ == '__main__':\n flags.DEFINE_integer('batch_size', 100, 'Size of training batches')\n flags.DEFINE_integer('nb_classes', 10, 'Number of output classes')\n flags.DEFINE_integer('source_samples', 10, 'Nb of test inputs to attack')\n flags.DEFINE_integer('eps', 2, 'integer value in [0, 255] which indicates the strength of a pertubation')\n flags.DEFINE_string('dataset', 'fmnist', 'dataset name [fmnist, cifar10]')\n flags.DEFINE_string('save_dir', 'save', 'directory to save adversarial examples')\n flags.DEFINE_string('model_path', os.path.join(\"models\", \"mnist\"),\n 'Path to save or load the model file')\n flags.DEFINE_string('gpu_list', '0', 'gpu numbers to use')\n flags.DEFINE_string('nn_type', 'resnet', 'neural networks type [resnet, vgg]')\n\n tf.app.run()\n","sub_path":"cleverhans/deepfool.py","file_name":"deepfool.py","file_ext":"py","file_size_in_byte":6648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"459583364","text":"import os\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\nimport SimpleITK as sitk\n\n# from UNet_2D.data_set import MyDataset, testDataset\nfrom unet2d_znw import UNet2D\nfrom data_set import MyDataset, testDataset\nfrom MYLOSS import myLoss, FocalLoss, DSC_LOSS\n\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\nimport sys\n# sys.path.append(r'/data/zhangnaiwen442/all_fold_thr40_1205/fold_2/XG_Net/UNet_2D')\n\n\n\ndef DSC(pred, Y):\n N = pred.size(0)\n\n smooth = 1e-10\n\n pred_flat = pred.view(N, -1)\n Y_flat = Y.view(N, -1)\n Dice = 0\n for i in range(1):\n Y_C = Y_flat\n Y_C[Y_C != i + 1] = -1\n intersection = torch.eq(pred_flat, Y_C).float()\n\n dice = 2 * (intersection.sum(1)) / (\n pred_flat[pred_flat == i + 1].sum().float() + Y_flat[Y_flat == i + 1].sum().float() + smooth)\n dice = dice.sum() / N\n\n # print('C {} dice is {:.4f}'.format(i + 1, dice))\n\n # return Dice/3\n return dice\n\n\ndef DSC_2(pred, Y):\n pred_gt_data = pred.data.cpu().numpy()*Y.data.cpu().numpy()\n pred_sum = np.sum(pred.data.cpu().numpy())\n gt_sum = np.sum(Y.data.cpu().numpy())\n pred_gt_sum = np.sum(pred_gt_data)\n\n return (2*pred_gt_sum)/(pred_sum+gt_sum)\n\n\nif __name__ == \"__main__\":\n\n train_dataset = MyDataset(path_image='/data/zhangnaiwen442/testNorm/fold_2/Vessel_Training/VesselSeg')\n train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True)\n test_dataset = testDataset(path_image='/data/zhangnaiwen442/testNorm/fold_2/Vessel_Test/VesselSeg')\n test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False)\n\n model = UNet2D(n_class=2)\n # device = torch.device(\"cuda: 2\" if torch.cuda.is_available() else \"cpu\")\n device = torch.device('cuda')\n model.to(device)\n\n optimizer = torch.optim.SGD(model.parameters(), 1e-4, momentum=0.3, weight_decay=0.0, nesterov=False)\n\n best_loss = 100000\n best_dice = 0\n dicesum = 0\n bestepoch = 0\n\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=int(50 * 0.8), gamma=0.5)\n # train the model\n for epoch in range(2000):\n model.train()\n all_loss = 0\n index = 0\n for batch_ndx, x in enumerate(train_dataloader):\n optimizer.zero_grad()\n data = x['vein']\n label = x['label']\n data, label = data.to(device), label.to(device).long()\n pred = model(data)\n # pred = torch.softmax(pred, 1)\n # print(\"training pred.shape:\", pred.shape, \"label.shape:\", label.shape)\n # loss = DSC(pred[:, 1, :, :], label)\n loss = F.cross_entropy(pred, label)\n # print('**** ', epoch, ' **** loss:', loss)\n # lossFun = FocalLoss()\n # loss = lossFun(pred, label)\n loss.backward()\n optimizer.step()\n all_loss += loss\n index += 1\n\n print(\"Epoch {}, training loss is {:.4f}\".format(epoch + 1, all_loss/index))\n\n if epoch % 10 == 0:\n model.eval()\n with torch.no_grad():\n alltestdsc = 0\n for valnum, TestData in enumerate(test_dataloader):\n test_image, test_label = TestData['vein'].to(device), TestData['label'].to(device).long()\n\n test_label = np.squeeze(test_label)\n vxpred = torch.zeros(test_label.shape)\n vxpred = vxpred.to(device)\n for vallayer in range(0, test_image.shape[2]):\n validpred = model(test_image[:, :, vallayer, :, :])\n validpred = torch.softmax(validpred, 1)\n # validpred = validpred[:, 1, :, :]\n # validpred[validpred > 0.5] = 1\n # validpred[validpred <= 0.5] = 0\n _, validpredmax = torch.max(validpred, 1)\n vxpred[vallayer, :, :] = np.squeeze(validpredmax)\n Dice = DSC_2(vxpred, test_label)\n print('test_label.sum', np.sum(test_label.data.cpu().numpy()), 'predimage.sum', np.sum(vxpred.data.cpu().numpy()))\n predimage = sitk.GetImageFromArray(vxpred.data.cpu().numpy())\n save_path = '/data/zhangnaiwen442/testNorm/fold_2/predImage_IN/' + str(epoch) + '_1209_' + str(valnum) + '.nii.gz'\n sitk.WriteImage(predimage, save_path)\n\n print(\"testing dice is {:.4f}\".format(Dice))\n print(\" \")\n alltestdsc += Dice\n\n if alltestdsc > best_dice:\n best_dice = alltestdsc\n bestepoch = epoch\n torch.save({\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()\n }, \"./lr4_IN_2d.pt\")\n # print(\"Saving model Fold1.pt\")\n\n print('Epoch {},best mean dice is {:.4f}'.format(bestepoch + 1, best_dice / (valnum + 1)))\n # alltestdsc = 0\n\n\n","sub_path":"XG_Net_IN/UNet_2D/train_and_test.py","file_name":"train_and_test.py","file_ext":"py","file_size_in_byte":5216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"327481328","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n# Parameters.\ninput_filename = \"/home/craig/Code/ccir/output/images/2017222_139Ce57Co_osem0.img\"\nshape = (400, 400, 400) # matrix size\ndtype = np.dtype('>u2') # big-endian unsigned integer (16bit)\noutput_filename = \"/home/craig/Code/ccir/output/images/2017222_139Ce57Co_osem0.png\"\n\n# Reading.\nfid = open(input_filename, 'rb')\ndata = np.fromfile(fid, dtype)\nimage = data.reshape(shape)\n\n# Display.\nplt.imshow(image, cmap = \"gray\")\nplt.savefig(output_filename)\nplt.show()\n","sub_path":"analysis_programs/lorentzian_fit/read_image.py","file_name":"read_image.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"261057405","text":"\"\"\"\nCreated on Mon Jun 6 11:20:35 2016\n\n@author: chc\n\"\"\"\n\nimport numpy as _np\nimport copy as _copy\n\nfrom scipy.signal import savgol_filter as _sg\n\nfrom crikit.cri.algorithms.kk import hilbertfft as _hilbert\n\nfrom crikit.preprocess.algorithms.als import AlsCvxopt as _AlsCvxopt\n\nfrom crikit.utils.datacheck import _rng_is_pix_vec\n\n\nclass PhaseErrCorrectALS:\n \"\"\"\n Phase error correction using alternating least squares (ALS)\n\n Reference\n ---------\n * C H Camp Jr, Y J Lee, and M T Cicerone, JRS (2016).\n \"\"\"\n def __init__(self, smoothness_param=1, asym_param=1e-2,\n redux=10, order=2, rng=None, fix_end_points=False,\n fix_rng=None, max_iter=100, min_diff=1e-5, **kwargs):\n\n\n self.rng = _rng_is_pix_vec(rng)\n self._k = kwargs\n\n self._k.update({'smoothness_param' : smoothness_param,\n 'asym_param' : asym_param,\n 'redux' : redux,\n 'order' : order,\n 'fix_end_points' : fix_end_points,\n 'fix_rng' : fix_rng,\n 'max_iter' : max_iter,\n 'min_diff' : min_diff})\n\n\n def _calc(self, data, ret_obj, **kwargs):\n\n self._inst_als = _AlsCvxopt(**kwargs)\n\n try:\n shp = data.shape[0:-2]\n total_num = _np.array(shp).prod()\n\n counter = 1\n for idx in _np.ndindex(shp):\n print('Detrended iteration {} / {}'.format(counter, total_num))\n ph = _np.unwrap(_np.angle(data[idx]))\n if self.rng is None:\n err_phase = self._inst_als.calculate(ph)\n else:\n err_phase = self._inst_als.calculate(ph[..., self.rng])\n\n h = _np.zeros(err_phase.shape)\n h += _hilbert(err_phase)\n\n correction_factor = 1/_np.exp(h) * _np.exp(-1j*err_phase)\n\n if self.rng is None:\n ret_obj[idx] *= correction_factor\n else:\n ret_obj[idx][..., self.rng] *= correction_factor\n counter += 1\n except:\n return False\n else:\n# print(self._inst_als.__dict__)\n return True\n\n def calculate(self, data, **kwargs):\n\n data_copy = _copy.deepcopy(data)\n self._k.update(kwargs)\n\n success = self._calc(data, ret_obj=data_copy, **self._k)\n if success:\n return data_copy\n else:\n return None\n\n def transform(self, data, **kwargs):\n self._k.update(kwargs)\n\n success = self._calc(data, ret_obj=data, **self._k)\n return success\n\nclass ScaleErrCorrectSG:\n \"\"\"\n Scale error correction using Savitky-Golay\n\n Reference\n ---------\n * C H Camp Jr, Y J Lee, and M T Cicerone, JRS (2016).\n \"\"\"\n def __init__(self, win_size=601, order=2, rng=None):\n self.win_size = win_size\n self.order = order\n self.rng = _rng_is_pix_vec(rng)\n\n def _calc(self, data, ret_obj):\n try:\n if self.rng is None:\n correction_factor = _sg(data.real, window_length=self.win_size,\n polyorder=self.order, axis=-1)\n else:\n correction_factor = _sg(data[..., self.rng].real,\n window_length=self.win_size,\n polyorder=self.order, axis=-1)\n\n correction_factor[correction_factor == 0] = 1\n correction_factor **= -1\n\n if self.rng is None:\n ret_obj *= correction_factor\n else:\n ret_obj[..., self.rng] *= correction_factor\n except:\n return False\n else:\n return True\n\n def calculate(self, data):\n\n data_copy = _copy.deepcopy(data)\n success = self._calc(data, ret_obj=data_copy)\n if success:\n return data_copy\n else:\n return None\n\n def transform(self, data):\n success = self._calc(data, ret_obj=data)\n return success\n\n\nif __name__ == '__main__': # pragma: no cover\n import matplotlib.pyplot as plt\n from crikit.cri.kk import KramersKronig\n import timeit\n\n SPECT_LEN = 878\n WN = _np.linspace(4000, 500, SPECT_LEN)\n chi = (1 / ((WN - 1000 - 1j * 10)) +\n 1 / ((WN - 1020 - 1j * 10)) +\n 1 / ((WN - 2800 - 1j * 10)))\n chiNR = 0*chi + 0.055\n exc = WN\n sig = _np.abs(chi + chiNR)**2\n\n sigNR = _np.abs(chiNR)**2\n sigRef = chiNR*(WN/1e3)**.5\n\n NUM_REPS = 10\n\n kk = KramersKronig()\n kkd = kk.calculate(sig, sigRef)\n kkd = _np.dot(_np.random.rand(NUM_REPS,NUM_REPS,1)*_np.ones((NUM_REPS, NUM_REPS, 1)), kkd[None, :])\n\n# plt.plot(chi.imag/chiNR.real, label='Ideal')\n plt.plot(kkd[5, 5, :].imag, label='Before Correction')\n\n start = timeit.default_timer()\n phase_err_correct_als = PhaseErrCorrectALS(fix_end_points=True,\n smoothness_param=1e8,\n asym_param=1e-3,\n redux=1)\n success = phase_err_correct_als.transform(kkd, verbose=False)\n print('Success? : {}'.format(success))\n stop = timeit.default_timer()\n print('Sec/spectrum: {:.3g}'.format((stop-start)/NUM_REPS**2))\n\n\n#\n plt.plot(kkd[5,5,:].imag, label='After PhErr Corr.')\n plt.legend()\n plt.show()\n# scale_err_correct_sg = ScaleErrCorrectSG()\n# success = scale_err_correct_sg.transform(kkd)\n# print('Success? : {}'.format(success))\n# plt.plot(kkd[5, 5, :].imag, label='After Correction')\n# plt.legend(loc='best')\n# plt.show()\n\n# scale_err_correct_sg = ScaleErrCorrectSG()\n# out = scale_err_correct_sg.calculate(kkd[0,0,:])\n# plt.plot(out.imag)\n#\n# scale_err_correct_sg = ScaleErrCorrectSG(win_size=11, order=2)\n# out = scale_err_correct_sg.calculate(kkd[0,0,:])\n# plt.plot(out.imag)\n#\n# plt.show()\n\n#\n# phase_err_correct_als = PhaseErrCorrectALS(print_iteration=False)\n# out = phase_err_correct_als.calculate(kkd)\n#\n# plt.plot(out[0,0,:].imag)\n#\n# phase_err_correct_als = PhaseErrCorrectALS(smoothness_param=1e1,\n# asym_param=1e-2,\n# redux_factor=1)\n\n\n# print(phase_err_correct_als._k)","sub_path":"crikit/cri/error_correction.py","file_name":"error_correction.py","file_ext":"py","file_size_in_byte":6436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"97505479","text":"import argparse\nfrom googleapiclient import discovery\nimport httplib2\nfrom flask import json, jsonify\nfrom oauth2client.client import GoogleCredentials\nfrom cassandra.cluster import Cluster,named_tuple_factory\nimport traceback\nimport requests\nimport re\nfrom datetime import datetime\nimport uuid\n\nepoch = datetime.utcfromtimestamp(0)\nprint(epoch)\ndef unix_time_millis(dt):\n return (dt - epoch).total_seconds() * 1000.0\n\ndef extract_entity_info(entity):\n \"\"\"Extract information about an entity.\"\"\"\n type = str(entity['type'])\n name = entity['name']\n metadata = entity['metadata']\n salience = entity['salience']\n wiki_url = metadata.get('wikipedia_url', None)\n return (type, name, salience, wiki_url)\n\ndef extract_article_info(article):\n \"\"\"Extract information about an entity.\"\"\"\n article_description = article['description']\n article_title = article['title']\n article_publishedAt = article['publishedAt']\n article_url = article['url']\n author = article['author']\n return (article_title,article_description, article_publishedAt,article_url,author)\n\ntry:\n # connect to database\n cluster = Cluster(['162.243.58.135'])\n session = cluster.connect('myka_keyspace')\n session.row_factory = named_tuple_factory\n insertNewsTickDataStmt = session.prepare(\"INSERT INTO tick_news_day (tick_code,tick_date,tick_time,article_title,sent_polarity,sent_magnitude,article_desc, \"\n \"published_at,author,news_url) VALUES (?,?,?,?,?,?,?,?,?,?) \")\n news_look_up_stmt = session.prepare(\"SELECT * FROM tick_news_day WHERE tick_date = ? and published_at = ? and author = ? and news_url = ? \")\n tick_date = datetime.now().strftime('%Y%m%d')\n newsLink = \"https://newsapi.org/v1/articles?source=cnbc&sortBy=top&apiKey=114fdc0b1c8741c79ce2fa3668365f92\"\n ticker_code = open(\"ticker.csv\", 'r').read()\n r = requests.get(newsLink)\n data = json.loads(r.content)\n for article in data['articles']:\n highest_salience = 0.0\n inLoopFirstTime = True\n article_title,article_description,article_publishedAt,article_url,author = extract_article_info(article)\n articleContent = article_title + article_description\n print(\" New Article Title --->> \"+article_title)\n print(\" New Article Description --->> \"+article_description)\n DISCOVERY_URL = ('https://{api}.googleapis.com/'\n '$discovery/rest?version={apiVersion}')\n '''Run a sentiment analysis request on text within a passed filename'''\n http = httplib2.Http()\n credentials = GoogleCredentials.get_application_default().create_scoped(\n ['https://www.googleapis.com/auth/cloud-platform'])\n http=httplib2.Http()\n credentials.authorize(http)\n service = discovery.build('language', 'v1beta1',\n http=http, discoveryServiceUrl=DISCOVERY_URL)\n body={\n 'document': {\n 'type': 'PLAIN_TEXT',\n 'content': str(articleContent),\n }\n }\n entity_service_request = service.documents().analyzeEntities(body=body)\n entity_response = entity_service_request.execute()\n entities = entity_response['entities']\n for entity in entities:\n type, name, current_salience, wiki_url = extract_entity_info(entity)\n if(inLoopFirstTime):\n highest_salience = current_salience\n inLoopFirstTime = False\n if(type in (\"ORGANIZATION\")):\n if(current_salience >= highest_salience):\n highest_salience = current_salience\n text_search = re.search( r'(.*)'+name+'(.*?) .*', ticker_code, re.M|re.I)\n if(text_search):\n tick_code = re.search( r'(.*?),', text_search.group(), re.M|re.I).group(0)\n sentiment_service_request = service.documents().analyzeSentiment(body=body)\n sentiment_response = sentiment_service_request.execute()\n polarity = sentiment_response['documentSentiment']['polarity']\n magnitude = sentiment_response['documentSentiment']['magnitude']\n print(\" New Article Title --->> \"+article_title)\n print(\" New Article Description --->> \"+article_description)\n print(\" Ticker Code related to the news Article --->>\"+tick_code)\n print('Sentiment: polarity of %s with magnitude of %s' % (polarity, magnitude))\n tick_code = tick_code.strip( ',' )\n newsPresent = session.execute(news_look_up_stmt,[tick_date,article_publishedAt,author,article_url])\n if not newsPresent:\n tickerNewsData = session.execute(insertNewsTickDataStmt,(tick_code,tick_date,uuid.uuid1(),article_title,\n polarity,magnitude,article_description,article_publishedAt,author,article_url))\nexcept Exception as e:\n print('Generic exception: ' + traceback.format_exc()+ \" link is \"+str(e))\n\n","sub_path":"myka.data.provider/src/sentiment-analysis.py","file_name":"sentiment-analysis.py","file_ext":"py","file_size_in_byte":5220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"280941831","text":"import subprocess\nimport time\n\ndef main () :\n startTime = time.time()\n repeatTimes = 1\n start = 1\n end = 1000\n MAX_LOGS = 1000\n #MAX_LOGS = 1102363703\n filename = \"testFile.txt\"\n #timeLogger = []\n\n for i in range(repeatTimes):\n iterationStartTime = time.time()\n for index in range(1,MAX_LOGS,1000):\n start = index\n end = start + 1000\n process = subprocess.run([f\"./ctclient -first {start} -last {end} getentries >> {filename}\"], shell=True)\n \n print(f\"Iteration Time for {MAX_LOGS}: {round(time.time()-iterationStartTime,2)} running time: {round(time.time()-startTime,2)}\")\n #timeLogger.append(time.time()-iterationStartTime)\n \n print(\"total time: \", round(time.time() - startTime,2))\n print(f\"Average time for {MAX_LOGS} for {repeatTimes} iterations: {round((time.time() - startTime)/repeatTimes,2)}\") \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"client/ctclient/schedulingScript.py","file_name":"schedulingScript.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"525423161","text":"import os, io\r\nfrom google.cloud import vision\r\nimport pandas as pd\r\n\r\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r\"VisionAPIKey.json\"\r\n\r\nclient = vision.ImageAnnotatorClient()\r\n\r\nimage_path = r'C:\\Python27\\Image9.JPG'\r\n\r\nwith io.open(image_path, 'rb') as image_file:\r\n content = image_file.read()\r\n\r\ntext_file = open(\"Image9.txt\",\"w+\")\r\n# construct an image instance\r\nimage = vision.types.Image(content=content)\r\n\r\n# annotate Image Response\r\nresponse = client.text_detection(image=image) # returns TextAnnotation\r\ndf = pd.DataFrame(columns=['locale', 'description'])\r\n\r\ntexts = response.text_annotations\r\nfor text in texts:\r\n df = df.append(\r\n dict(\r\n locale=text.locale,\r\n description=text.description.encode('utf-8')\r\n ),\r\n ignore_index=True\r\n )\r\ntext_file.write(df['description'][0])\r\ntext_file.close() \r\n\r\n\r\n","sub_path":"Vision_API.py","file_name":"Vision_API.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"108583704","text":"# On a broken calculator that has a number showing on its display, we can perform two operations:\n#\n# Double: Multiply the number on the display by 2, or;\n# Decrement: Subtract 1 from the number on the display.\n# Initially, the calculator is displaying the number X.\n#\n# Return the minimum number of operations needed to display the number Y.\n#\n#\n#\n# Example 1:\n#\n# Input: X = 2, Y = 3\n# Output: 2\n# Explanation: Use double operation and then decrement operation {2 -> 4 -> 3}.\n\n\nclass Solution:\n def brokenCalc(self, X: int, Y: int) -> int:\n count = 0\n while True:\n if Y < X:\n count += (X-Y)\n break\n elif Y > X:\n if Y % 2 != 0:\n Y += 1\n count += 1\n else:\n Y //= 2\n count += 1\n else:\n break\n return count\n\ns = Solution()\nprint(s.brokenCalc(3, 4))\n","sub_path":"Python_Projects/6-Google codejam/092_broken_calculator.py","file_name":"092_broken_calculator.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"649406924","text":"from flask import Flask, request, Response, render_template, jsonify\nimport requests\nimport itertools\nfrom flask_wtf.csrf import CSRFProtect\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, IntegerField\nfrom wtforms.validators import Regexp, NumberRange, Optional, Required\nimport os \n\nclass RequiredIf(object):\n # a validator which makes a field required if\n # another field is set and has a truthy value\n\n def __init__(self, *args, **kwargs):\n self.conditions = kwargs\n\n def __call__(self, form, field):\n for name, data in self.conditions.items():\n if name not in form._fields:\n Optional(form, field)\n else:\n condition_field = form._fields.get(name)\n if condition_field.data == data and not field.data:\n Required()(form, field)\n Optional()(form,field)\n \nclass WordForm(FlaskForm):\n avail_letters = StringField(\"Letters\", validators= [ RequiredIf(avail_pattern=''),\n Regexp(r'^[a-z]+$', message=\"must contain letters only\")\n ])\n\n avail_length = IntegerField(\"WordLength\", validators= [ Optional(),\n NumberRange(min=3, max=10, message=\"Wordlength must be equal to or between 3 & 10 characters\")\n ])\n\n avail_pattern = StringField(\"Pattern\", validators= [RequiredIf(avail_letters='')])\n\n submit = SubmitField(\"Go\")\n\ncsrf = CSRFProtect()\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = \"row the boat\"\ncsrf.init_app(app)\nlength = 0\n\n@app.route('/index')\ndef index():\n form = WordForm()\n return render_template(\"index.html\", form=form)\n\ndef filterLength(word):\n return len(word) == length\n\ndef filterPattern(word):\n return re.search(\"^\"+pattern+\"$\",word) \n\n@app.route('/words', methods=['POST','GET'])\ndef letters_2_words():\n\n form = WordForm()\n if form.validate_on_submit():\n letters = form.avail_letters.data\n global length;\n length = form.avail_length.data\n global pattern;\n pattern = form.avail_pattern.data\n else:\n return render_template(\"index.html\", form=form)\n\n with open('sowpods.txt') as f:\n good_words = set(x.strip().lower() for x in f.readlines())\n\n if (length != None):\n # filter words that are wordlength long\n good_words = set(filter(filterLength, good_words))\n\n if (pattern != \"\"):\n # filter words that fit a pattern\n good_words = set(filter(filterPattern, good_words))\n\n word_set = set()\n\n if (letters != \"\"):\n for l in range(3,len(letters)+1):\n for word in itertools.permutations(letters,l):\n w = \"\".join(word)\n if w in good_words:\n word_set.add(w)\n else: \n word_set = set(good_words)\n\n # sort alphabetically\n word_set = sorted(word_set)\n return render_template('wordlist.html',\n wordlist=sorted(word_set, key=len),\n name=\"CS4131\")\n\n\n@app.route('/proxy/', methods=['GET'])\ndef proxy(var):\n result = requests.get('https://www.dictionaryapi.com/api/v3/references/collegiate/json/'+var+'?key='+ os.environ[\"PROJECT_API_KEY\"])\n \n resp = Response(result.text)\n\n if result.status_code != 200:\n return \"Error with API\"\n else:\n jsonresponse = result.json()\n resp.headers['Content-Type'] = 'application/json'\n return str(jsonresponse[0]['shortdef'][0])\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"395054054","text":"from .base import *\n\nMEDIA_ROOT = '/var/www/trololo/media/prod/'\nSTATIC_ROOT = '/var/www/trololo/static/prod/'\n\nALLOWED_HOSTS = ['127.0.0.1', 'worddict.net']\n\n# LOGGING\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging\n# --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---\nLOGGING = {\n 'version': 1,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',\n 'datefmt' : \"%d/%b/%Y %H:%M:%S\"\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n }\n },\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n 'file': {\n 'level': 'INFO',\n #'level': 'DEBUG',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': '/home/ubuntu/projects/trololo/logs/prod_app.log',\n 'maxBytes': 1024 * 1024 * 10, # 10 MB\n 'backupCount': 2,\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['file'],\n 'level': 'WARNING',\n 'propagate': True,\n },\n 'django': {\n 'handlers': ['file'],\n 'level': 'WARNING',\n 'propagate': True,\n },\n 'app': {\n 'handlers': ['file'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n }\n}\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'trololo_prod',\n 'USER': 'trololo_user',\n 'PASSWORD': 'louShoote6',\n 'HOST': '127.0.0.1',\n 'PORT': '5432',\n }\n}\n\nSITE_ID = 3","sub_path":"trololo/trololo/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"54009686","text":"#/usr/bin/python\n\"\"\"zip wraps two or more iterators with a lazy generator. The zip generator yields\ntuples containing the next value from each iterator.\"\"\"\n\n#iterate both lists in parallel\n\nlist_test = [\"veera\",\"brahmam\",\"vannem\",\"reddi\"]\nlist_test_lengths = [ len(i) for i in list_test] \nfor name,lens in zip( list_test , list_test_lengths):\n print (\" Name : %s length is %d\"%(name,lens))\n\ncount=0\nlongest_name=\"\"\nfor name,lens in zip( list_test , list_test_lengths):\n if lens > count:\n count = lens\n longest_name = name\nprint (longest_name)\n \n","sub_path":"usage_of_zip.py","file_name":"usage_of_zip.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"432289121","text":"import pyspark\nfrom pyspark.sql import SparkSession\n\n# Creating a SparkSession\nspark = SparkSession.builder \\\n .master('local[*]') \\\n .appName('first_spark_application') \\\n .getOrCreate()\n\nflights = spark.read.csv('/Users/wel51x/Box Sync/MyBox/Code/DataCamp/data/flights.csv',\n sep=',',\n header=True,\n inferSchema=True,\n nullValue='NA')\n\n# Get number of records\nprint(\"The data contain %d records.\" % flights.count(), '\\n')\n\n# Remove the 'flight' column\nflights = flights.drop('flight')\n\n# Number of records with missing 'delay' values\nprint(\"Flights with no value in delay field:\", flights.filter('delay IS NULL').count())\n\n# Remove records with missing 'delay' values\nflights = flights.filter('delay IS NOT NULL')\n\n# Remove records with missing values in any column and get the number of remaining rows\nflights = flights.dropna()\nprint(\"\\nThe data contains %d records after dropping records with na values.\" % flights.count())\n\nspark.stop()\n","sub_path":"Chapter_2/Ex2a.1.py","file_name":"Ex2a.1.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"74919958","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass MongoDbMigrationSettings(Model):\n \"\"\"Describes how a MongoDB data migration should be performed.\n\n All required parameters must be populated in order to send to Azure.\n\n :param boost_rus: The RU limit on a CosmosDB target that collections will\n be temporarily increased to (if lower) during the initial copy of a\n migration, from 10,000 to 1,000,000, or 0 to use the default boost (which\n is generally the maximum), or null to not boost the RUs. This setting has\n no effect on non-CosmosDB targets.\n :type boost_rus: int\n :param databases: Required. The databases on the source cluster to migrate\n to the target. The keys are the names of the databases.\n :type databases: dict[str,\n ~azure.mgmt.datamigration.models.MongoDbDatabaseSettings]\n :param replication: Describes how changes will be replicated from the\n source to the target. The default is OneTime. Possible values include:\n 'Disabled', 'OneTime', 'Continuous'\n :type replication: str or\n ~azure.mgmt.datamigration.models.MongoDbReplication\n :param source: Required. Settings used to connect to the source cluster\n :type source: ~azure.mgmt.datamigration.models.MongoDbConnectionInfo\n :param target: Required. Settings used to connect to the target cluster\n :type target: ~azure.mgmt.datamigration.models.MongoDbConnectionInfo\n :param throttling: Settings used to limit the resource usage of the\n migration\n :type throttling:\n ~azure.mgmt.datamigration.models.MongoDbThrottlingSettings\n \"\"\"\n\n _validation = {\n 'databases': {'required': True},\n 'source': {'required': True},\n 'target': {'required': True},\n }\n\n _attribute_map = {\n 'boost_rus': {'key': 'boostRUs', 'type': 'int'},\n 'databases': {'key': 'databases', 'type': '{MongoDbDatabaseSettings}'},\n 'replication': {'key': 'replication', 'type': 'str'},\n 'source': {'key': 'source', 'type': 'MongoDbConnectionInfo'},\n 'target': {'key': 'target', 'type': 'MongoDbConnectionInfo'},\n 'throttling': {'key': 'throttling', 'type': 'MongoDbThrottlingSettings'},\n }\n\n def __init__(self, **kwargs):\n super(MongoDbMigrationSettings, self).__init__(**kwargs)\n self.boost_rus = kwargs.get('boost_rus', None)\n self.databases = kwargs.get('databases', None)\n self.replication = kwargs.get('replication', None)\n self.source = kwargs.get('source', None)\n self.target = kwargs.get('target', None)\n self.throttling = kwargs.get('throttling', None)\n","sub_path":"src/dms-preview/azext_dms/vendored_sdks/datamigration/models/mongo_db_migration_settings.py","file_name":"mongo_db_migration_settings.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"70838663","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport glob\nimport datetime\nimport multiprocessing as mp\nimport h5py\nimport numpy as np\nfrom scipy import interpolate\nfrom scipy.io import readsav\nfrom pre_SHARE import GLINK_NAMES\nfrom cfg import *\nimport netCDF4 as nc4\n\nfname_netrc = os.path.expanduser('~/.netrc')\nif not os.path.isfile(fname_netrc):\n os.system('touch %s' % fname_netrc)\n os.system('echo \"machine urs.earthdata.nasa.gov login %s password %s\" >> %s' % (cfg['username'], cfg['password'], fname_netrc))\n\nfname_urs = os.path.expanduser('~/.urs_cookies')\nif not os.path.isfile(fname_urs):\n os.system('touch %s' % fname_urs)\n\ndef GDATA_OCO2(\n date,\n fdirOut=os.getcwd(),\n httpSite='https://oco2.gesdisc.eosdis.nasa.gov/data/s4pa/OCO2_DATA/OCO2_L2_Lite_FP.7r',\n verbose=True\n ):\n\n linkFull = '%s/%4.4d' % (httpSite, date.year)\n dateTag = '%2.2d%2.2d%2.2d' % (date.year-2000, date.month, date.day)\n searchTags = ['oco2', dateTag, 'nc4']\n fileNames = GLINK_NAMES(linkFull, searchTags=searchTags)\n NFile = len(fileNames)\n if NFile > 0:\n print('Message [GDATA_OCO2]: start download for OCO2 ...')\n else:\n exit('Error [GDATA_OCO2]: data is not available for the requested date.')\n\n for fileName in fileNames:\n dataLink = '%s/%s' % (linkFull, fileName)\n fname0 = '%s/%s' % (fdirOut, fileName)\n if glob.glob(fname0):\n print('Warning [GDATA_OCO2]: %s already exists under %s, skipping...' % (fileName, fdirOut))\n else:\n cmdStr = 'wget --load-cookies ~/.urs_cookies --save-cookies ~/.urs_cookies --keep-session-cookies -P %s/ -q %s' % (fdirOut, dataLink)\n os.system(cmdStr)\n if verbose and glob.glob(fname0):\n print('Message [GDATA_OCO2]: %s has been downloaded under %s.' %(fileName, fdirOut))\n\n NFile_check = len(glob.glob('%s/*' % fdirOut))\n if NFile_check == NFile:\n print('Message [GDATA_OCO2]: complete download for OCO2 ...')\n else:\n print('Warning [GDATA_OCO2]: incomplete download for OCO2, please check ...')\n\nclass RDATA_OCO2:\n def __init__(self, fname, xyRange=None):\n f = nc4.Dataset(fname, 'r')\n self.lon = f.variables['longitude'][...].ravel()\n self.lat = f.variables['latitude'][...].ravel()\n self.xco2= f.variables['xco2'][...].ravel()\n f.close()\n\n if xyRange is not None:\n self.UPDATE(xyRange)\n\n def UPDATE(self, xyRange):\n logic = (self.lon>=xyRange[0])&(self.lon<=xyRange[2]) & (self.lat>=xyRange[1])&(self.lat<=xyRange[3])\n self.lon = self.lon[logic]\n self.lat = self.lat[logic]\n self.xco2= self.xco2[logic]\n\nif __name__ == \"__main__\":\n # date = datetime.datetime(2017, 3, 5)\n # GDATA_OCO2(date)\n fname = '/Users/hoch4240/Chen/work/03_OCO2/pre/data/20151206/OCO2/oco2_LtCO2_151206_B7305Br_160712000329s.nc4'\n f_oco2 = RDATA_OCO2(fname)\n print(f_oco2.lon)\n","sub_path":"pre_OCO2.py","file_name":"pre_OCO2.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"528688630","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl import app, flags\nFLAGS = flags.FLAGS\n\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nimport glob\nimport cv2\nfrom PIL import Image\nimport numpy as np\nfrom multiprocessing import Pool, Manager\n\nimport melt\nimport gezi\nfrom gezi import tqdm\n\nNUM_CLASSES = 8\nimgs = None\n\ndef build_features(index):\n global imgs\n fold = index % FLAGS.num_folds\n out_dir = f'{FLAGS.out_dir}/{FLAGS.records_name}/{FLAGS.mark}/{fold}'\n gezi.try_mkdir(out_dir)\n ofile = f'{out_dir}/{index}.tfrec'\n with melt.tfrecords.Writer(ofile) as writer:\n num_imgs = len(imgs) if not FLAGS.small else 100\n for i in tqdm(range(num_imgs), ascii=True, desc=f'{FLAGS.mark}_{index}_{fold}'):\n if i % FLAGS.num_records != index:\n continue\n\n if FLAGS.mark != 'train':\n img, label = imgs[i], None\n else:\n img, label = imgs[i]\n \n feature = {}\n feature['id'] = os.path.splitext(os.path.basename(img))[0]\n if feature['id'].isdigit():\n feature['id'] = int(feature['id'])\n ## 输入是tiff转成png存储\n # feature['image'] = melt.read_image(img) \n feature['image'] = melt.read_image_as(img, 'png')\n dtype = np.uint16 if FLAGS.data_version == 1 else np.uint8\n mask = convert_mask(cv2.imread(label, cv2.IMREAD_UNCHANGED).astype(dtype)) if label else ''\n feature['mask'] = melt.image.convert_image(Image.fromarray(mask), 'png') if mask != '' else ''\n if mask != '':\n feature['bins'] = np.bincount(mask.reshape(-1), minlength=NUM_CLASSES)\n feature['components'] = tf.reduce_max(tfa.image.connected_components(mask)).numpy()\n else:\n feature['bins'] = [0] * NUM_CLASSES\n feature['components'] = 0\n\n writer.write_feature(feature)\n\ndef get_img_label_paths(images_path, labels_path):\n res = []\n for dir_entry in os.listdir(images_path):\n if os.path.isfile(os.path.join(images_path, dir_entry)):\n file_name, _ = os.path.splitext(dir_entry)\n res.append((os.path.join(images_path, file_name + \".tif\"),\n os.path.join(labels_path, file_name + \".png\")))\n return res\n\ndef main(data_dir):\n assert FLAGS.num_classes\n global NUM_CLASSES\n NUM_CLASSES = FLAGS.num_classes\n\n FLAGS.num_folds = FLAGS.num_folds_\n FLAGS.seed = FLAGS.seed_\n \n np.random.seed(FLAGS.seed)\n image_dir = f'{FLAGS.in_dir}/{FLAGS.mark}/image'\n label_dir = f'{FLAGS.in_dir}/{FLAGS.mark}/label'\n \n print(image_dir, label_dir)\n global imgs\n if FLAGS.mark == 'train':\n imgs = get_img_label_paths(image_dir, label_dir)\n np.random.shuffle(imgs)\n else:\n imgs = glob.glob(f'{image_dir}/*')\n print(imgs[0], len(imgs))\n\n if FLAGS.debug:\n build_features(0)\n else:\n with Pool(FLAGS.num_records) as p:\n p.map(build_features, range(FLAGS.num_records))\n\nif __name__ == '__main__':\n flags.DEFINE_string('in_dir', '../input', '')\n flags.DEFINE_string('out_dir', '../input', '')\n flags.DEFINE_string('mark', 'train', 'train or test')\n flags.DEFINE_integer('num_classes', None, '')\n flags.DEFINE_integer('num_records', 30, '6 gpu to infer')\n flags.DEFINE_integer('num_folds_', 10, '')\n flags.DEFINE_integer('seed_', 12345, '')\n flags.DEFINE_string('records_name', 'tfrecords', '')\n flags.DEFINE_bool('small', False, '')\n flags.DEFINE_integer('data_version', 2, '1 初赛 2 复赛')\n \n app.run(main) \n","sub_path":"projects/ai/suichang/prepare/gen-records-seg.py","file_name":"gen-records-seg.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"473335604","text":"import random\nimport pygame\n\nBLACK = (0,0,0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\npygame.init()\nsize = (700, 500)\nscreen = pygame.display.set_mode(size)\n\n\nclass Ball(object):\n def __init__(self, screen, radius, x, y):\n self.__screen = screen\n self._radius = radius\n self._xLoc = x\n self._yLoc = y\n self.__xVel = 7\n self.__yVel = 2\n w, h = pygame.display.get_surface().get_size()\n self.__width = w\n self.__height = h\n\n def getXVel(self):\n return self.__xVel\n\n def getYVel(self):\n return self.__yVel\n\n def draw(self):\n pygame.draw.circle(screen, (255, 0, 0),\n (self._xLoc, self._yLoc), self._radius)\n\n def update(self, paddle, brickwall):\n self._xLoc += self.__xVel\n self._yLoc += self.__yVel\n if self._xLoc <= self._radius:\n self.__xVel *= -1\n elif self._xLoc >= self.__width - self._radius:\n self.__xVel *= -1\n if self._yLoc <= self._radius:\n self.__yVel *= -1\n elif self._yLoc >= self.__width - self._radius:\n return True\n if brickwall.collide(self):\n self.__yVel *= -1\n paddleY = paddle._yLoc\n paddleW = paddle._width\n paddleH = paddle._height\n paddleX = paddle._xLoc\n ballX = self._xLoc\n ballY = self._yLoc\n if ((ballX + self._radius) >= paddleX and ballX <= (paddleX + paddleW)) \\\n and ((ballY + self._radius) >= paddleY and ballY <= (paddleY + paddleH)):\n self.__yVel *= -1\n return False\n\n\n\nclass Paddle(object):\n def __init__(self, screen, width, height, x, y):\n self.__screen = screen\n self._width = width\n self._height = height\n self._xLoc = x\n self._yLoc = y\n w, h = pygame.display.get_surface().get_size()\n self.__W = w\n self.__H = h\n\n def draw(self):\n pygame.draw.rect(screen, (255, 255, 255), (self._xLoc,\n self._yLoc, self._width, self._height), 0)\n\n def update(self):\n x, y = pygame.mouse.get_pos()\n if x >= 0 and x <= (self.__W - self._width):\n self._xLoc = x\n\n\nclass Brick(pygame.sprite.Sprite):\n def __init__(self, screen, width, height, x, y):\n self.__screen = screen\n self._width = width\n self._height = height\n self._xLoc = x\n self._yLoc = y\n w, h = pygame.display.get_surface().get_size()\n self.__W = w\n self.__H = h\n self.__isInGroup = False\n\n def draw(self):\n pygame.draw.rect(screen, (255 ,177, 237), (self._xLoc,\n self._yLoc, self._width, self._height), 0)\n\n def add(self, group):\n group.add(self)\n self.__isInGroup = True\n\n def remove(self, group):\n group.remove(self)\n self.__isInGroup = False\n\n def alive(self):\n return self.__isInGroup\n\n def collide(self, ball):\n brickX = self._xLoc\n brickY = self._yLoc\n brickW = self._width\n brickH = self._height\n ballX = ball._xLoc\n ballY = ball._yLoc\n ballXVel = ball.getXVel()\n ballYVel = ball.getYVel()\n if ((ballX + ball._radius) >= brickX and (ballX + ball._radius) <= (brickX + brickW)) \\\n and ((ballY - ball._radius) >= brickY and (ballY - ball._radius)\n <= (brickY + brickH)):\n return True\n else:\n return False\n\n\n\n\nclass BrickWall(pygame.sprite.Group):\n def __init__(self, screen, x, y, width, height):\n self.__screen = screen\n self._x = x\n self._y = y\n self._width = width\n self._height = height\n self._bricks = []\n X = x\n Y = y\n for i in range(3):\n for j in range(4):\n self._bricks.append(Brick(screen, width, height, X, Y))\n X += width + (width / 7.0)\n Y += height + (height / 7.0)\n X = x\n\n def add(self, brick):\n self._bricks.append(brick)\n\n def remove(self, brick):\n self._bricks.remove(brick)\n\n def draw(self):\n for brick in self._bricks:\n if brick != None:\n brick.draw()\n\n def update(self, ball):\n for i in range(len(self._bricks)):\n if ((self._bricks[i] != None) and self._bricks[i].collide(ball)):\n self._bricks[i] = None\n for brick in self._bricks:\n if brick == None:\n self._bricks.remove(brick)\n\n def hasWin(self):\n return len(self._bricks) == 0\n\n def collide(self, ball):\n for brick in self._bricks:\n if brick.collide(ball):\n return True\n return False\n\n\nball = Ball(screen, 25, random.randint(1, 700), 250)\npaddle = Paddle(screen, 100, 20, 250, 450)\nbrickWall = BrickWall(screen, 25, 25, 150, 50)\nisGameOver = False\ngameStatus = True\nscore = 0\npygame.display.set_caption(\"Brickout-game\")\ndone = False\nclock = pygame.time.Clock()\npygame.font.init()\nmgGameOver = pygame.font.SysFont('courier', 32)\nmgWin = pygame.font.SysFont('courier', 32)\nmgScore = pygame.font.SysFont('courier', 32)\ntextsurfaceGameOver = mgGameOver.render('Game Over!', False, WHITE)\ntextsurfaceWin = mgWin.render(\"You win!\", False, WHITE)\ntextsurfaceScore = mgScore.render(\"score: \" + str(score), False, WHITE)\nwhile not done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n screen.fill(BLACK)\n if gameStatus:\n brickWall.draw()\n if brickWall.collide(ball):\n score += 10\n textsurfaceScore = mgScore.render(\n \"score: \" + str(score), False, WHITE)\n screen.blit(textsurfaceScore, (300, 0))\n brickWall.update(ball)\n paddle.draw()\n paddle.update()\n if ball.update(paddle, brickWall):\n isGameOver = True\n gameStatus = False\n if brickWall.hasWin():\n gameStatus = False\n ball.draw()\n else:\n if isGameOver:\n screen.blit(textsurfaceGameOver, (0, 0))\n textsurfaceScore = mgScore.render(\n \"score: \" + str(score), False, WHITE)\n screen.blit(textsurfaceScore, (300, 0))\n elif brickWall.hasWin():\n screen.blit(textsurfaceWin, (0, 0))\n textsurfaceScore = mgScore.render(\n \"score: \" + str(score), False, WHITE)\n screen.blit(textsurfaceScore, (300, 0))\n pygame.display.flip()\n clock.tick(60)\npygame.quit()\n","sub_path":"Brickout.py","file_name":"Brickout.py","file_ext":"py","file_size_in_byte":6628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"268699816","text":"# WIP\n# https://leetcode.com/problems/alien-dictionary/description/\n# Refer https://leetcode.co m/problems/alien-dictionary/discuss/70115/3ms-Clean-Java-Solution-(DFS)\n\nclass Solution:\n N = 26 # Length of alphabet\n res = \"\" # Result of topological sort\n\n def get_adjacency(self, words, visited):\n adj = [[False for j in range(self.N)] for i in range(self.N)]\n for i in range(len(words)):\n for c in words[i]:\n visited[ord(c)-ord('a')] = 0 # Mark that character exists\n if i > 0: # From the second word onwards\n w1, w2 = words[i-1], words[i]\n l = min(len(w1), len(w2)) # Useful length is minimum of length of both words\n for j in range(l):\n c1, c2 = w1[j], w2[j] # c1 comes before c2\n if c1 is not c2:\n adj[ord(c1)-ord('a')][ord(c2)-ord('a')] = True # Topological path exists from c1 --> c2\n return adj\n\n def display_adj(self, adj):\n for i in range(self.N):\n for j in range(self.N):\n if adj[i][j]:\n print(\"{0}-->{1}\".format(chr(i + ord('a')), chr(j + ord('a'))))\n\n # def dfs(self, adj, visited, result, i):\n def dfs(self, adj, visited, i):\n print(\"dfs-ing at \",chr(i + ord('a')))\n visited[i] = 1\n for j in range(self.N):\n if adj[i][j]:\n if visited[j] == 1:\n print(\"Cycle detected at \",chr(j + ord('a')))\n return False # Cycle detected\n elif visited[j] == 0:\n # if not self.dfs(adj, visited, result, j):\n if not self.dfs(adj, visited, j):\n return False\n visited[i] = 2\n self.res += chr(i + ord('a'))\n print(\"Result is \", self.res)\n return True\n\n def alienOrder(self, words):\n \"\"\"\n :type words: List[str]\n :rtype: str\n \"\"\"\n visited = [-1 for _ in range(self.N)] # -1 : Doesnt exist, 0 : Exists & Not Visited, 1 -Visiting, 2 - Visited\n adj = self.get_adjacency(words, visited)\n self.display_adj(adj)\n # res = \"\"\n for i in range(self.N):\n if visited[i] == 0:\n # if not self.dfs(adj, visited, res, i):\n if not self.dfs(adj, visited, i):\n return \"\"\n print(\"Final result \",self.res)\n return self.res[::-1] \n \ns = Solution()\n# a1 = [\n# \"wrt\",\n# \"wrf\",\n# \"er\",\n# \"ett\",\n# \"rftt\"\n# ]\n# print(s.alienOrder(a1))\n# print()\n# a2 = [\n# \"z\",\n# \"x\"\n# ]\n# print(s.alienOrder(a2))\n# print()\n# a3 = [\n# \"z\",\n# \"x\",\n# \"z\"\n# ]\n# print(s.alienOrder(a3))\nt1 = [\n \"za\",\n \"zb\",\n \"ca\",\n \"cb\"\n]\nprint(s.alienOrder(t1))","sub_path":"Leetcode/alien_dictionary.py","file_name":"alien_dictionary.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"426883092","text":"# BOJ 2294 - 동전 2\nimport sys\nr = sys.stdin.readline\n\nN, K = map(int, r().split())\ncoins = sorted([int(r()) for _ in range(N)])\n\narr = [10001] * (K+1)\narr[0] = 0\n\nfor i in range(N):\n for j in range(coins[i], K+1):\n arr[j] = min(arr[j], arr[j-coins[i]] + 1)\n\narr[-1] = arr[-1] if arr[-1] != 10001 else -1\nprint(arr[-1])\n","sub_path":"baekjoon/2294_coin2.py","file_name":"2294_coin2.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"273436439","text":"\n\nfrom xai.brain.wordbase.nouns._tinderbox import _TINDERBOX\n\n#calss header\nclass _TINDERBOXES(_TINDERBOX, ):\n\tdef __init__(self,): \n\t\t_TINDERBOX.__init__(self)\n\t\tself.name = \"TINDERBOXES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"tinderbox\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_tinderboxes.py","file_name":"_tinderboxes.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"225020940","text":"import pandas as pd\nimport numpy as np\nfrom datetime import date\n\nInterest_Rate = 0.03875/12\nMonths = 159.17\nPrincipal = 31058.7\nAddl_Principal = 0\nstart_date = (date(2017,8,21))\n\nper = 120\n\nprint(\"Total Payment:\")\npmt = np.pmt(Interest_Rate, Months, Principal)\nprint(pmt)\n\nprint(\"Interest Payment:\")\nipmt = np.ipmt(Interest_Rate, per, Months, Principal)\nprint(ipmt)\n\nprint(\"Calculate the Principal:\")\nppmt = np.ppmt(Interest_Rate, per, Months, Principal)\nprint(ppmt)\n\nrng = pd.date_range(start_date, periods=Months, freq='MS')\nrng.name = \"Payment_Date\"\n\ndf = pd.DataFrame(index=rng,columns=['Payment', 'Principal', 'Interest', 'Addl_Principal', 'Balance'], dtype='float')\ndf.reset_index(inplace=True)\ndf.index += 1\ndf.index.name = \"Period\"\n\ndf[\"Payment\"] = np.pmt(Interest_Rate, Months, Principal)\ndf[\"Principal\"] = np.ppmt(Interest_Rate, df.index, Months, Principal)\ndf[\"Interest\"] = np.ipmt(Interest_Rate, df.index, Months, Principal)\ndf[\"Addl_Principal\"] = -Addl_Principal\ndf = df.round(2)\n\nprint(df)","sub_path":"scrapwork/annuity.py","file_name":"annuity.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"272187993","text":"# %load q02_teams/build.py\n# default imports\nfrom greyatomlib.python_getting_started.q01_read_data.build import read_data\ndata = read_data()\n\n# solution\nlst = []\n\ndef teams(data):\n lst.append(data['info']['teams'])\n # write your code here\n \n return lst[0]\n\nteams(data)\nlst[0]\n\n","sub_path":"q02_teams/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"77450087","text":"import telebot\r\nimport datetime\r\nfrom kunapipy.kundelik import kundelik\r\nimport random\r\nimport requests\r\nimport json\r\nfrom firebase import firebase\r\n\r\n\r\nTOKEN = '792704732:AAHBTUYMzth_hk4gaptyTUUJaA5sxghTrYY'\r\nchat_id = None\r\nbot = telebot.TeleBot(TOKEN)\r\n\r\nfirebase = firebase.FirebaseApplication('https://kundelik-7f9c2.firebaseio.com/', None)\r\n\r\n\r\nrandom_answers = ['Бла-бла-бла, не понимаю',\r\n 'Сори, не знаю такой команды',\r\n 'Проверь правописание',\r\n 'И что ты хотел этим сказать?',\r\n 'Непонел']\r\n\r\n\r\n@bot.message_handler(commands=['i_need_help'], content_types=['text'])\r\ndef i_need_help(message):\r\n bot.send_message(message.chat.id, str(message.from_user.first_name) + firebase.get('/i_need_help', ''))\r\n\r\n\r\n@bot.message_handler(commands=['start'], content_types=['text'])\r\ndef welcome(message):\r\n sticker = open('C:\\Kundelik\\hello_sticker.webp', 'rb')\r\n\r\n firebase.put('/' + str(message.from_user.id), 'first_name', message.from_user.first_name)\r\n firebase.put('/' + str(message.from_user.id), 'login', '')\r\n firebase.put('/' + str(message.from_user.id), 'password', '')\r\n firebase.put('/' + str(message.from_user.id), 'user_token', '')\r\n\r\n bot.send_sticker(message.chat.id, sticker)\r\n bot.send_message(message.chat.id, \"Привет, я Oqu_bot! Я умею удаленно работать с системой Kundelik \"\r\n \"и помогу упростить твоё взаимодействие с ней через понятную и \"\r\n \"привычную всем социальную сеть Telegramm. Хочу уточнить, что \"\r\n \"ты можешь использовать команду /summative_marks вместе с предметом, \"\r\n \"чтобы получить оценки именно это данный предмет. Для этого надо в одном \"\r\n \"сообщении набрать команду и через пробел название предмета. Чтобы не ошибиться,\"\r\n \" ты можешь проверить название своих предметов с помощью функции /my_subjects.\"\r\n \" Также, чтобы использовать функцию /login, в том же сообщении ты должен ввести\"\r\n \" свои данные в формате “login:mylogin password:mypassword” через пробел после\"\r\n \" самой команды (в одном сообщении!). Пример - \"\r\n \"«/login login:ivanivanov password:test123».\"\r\n \" Что ж, теперь мы точно знакомы, приятно познакомится! :wave:\")\r\n\r\n\r\n@bot.message_handler(commands=['summative_marks'], content_types=['text'])\r\ndef summative_marks(message):\r\n try:\r\n data = get_data_from_db(tele_id=message.from_user.id, data_type=['class_id', 'person_id', 'user_token'])\r\n except Exception as e:\r\n bot.send_message(message.chat.id,'Упс! Возможно ты не вошел в систему и мы с тобой еще не знакомы. '\r\n 'Воспользуйся функцией /login и попробуй снова!')\r\n\r\n class_id = data['class_id']\r\n person_id = data['person_id']\r\n user_token = data['user_token']\r\n\r\n bot.send_message(message.chat.id, message.from_user.id)\r\n bot.send_message(message.chat.id, \"Что ж, посмотрим на твои оценки...\" + '\\n')\r\n\r\n if len(message.text.split()) == 1:\r\n url = 'https://api.kundelik.kz/v2.0/edu-group/' + class_id + '/person/' + person_id + '/criteria-marks'\r\n res = requests.get(url, headers={'Access-Token': user_token}).json()\r\n for subject in res:\r\n bot.send_message(message.chat.id,\r\n get_subject_name(class_id=class_id, user_token=user_token, subject_id=subject['subject']))\r\n if subject['personmarks']:\r\n str_marks = ''\r\n for mark in subject['personmarks'][0]['criteriamarks']:\r\n str_marks += str(mark['value']) + ' '\r\n bot.send_message(message.chat.id, str_marks)\r\n else:\r\n bot.send_message(message.chat.id, 'За этот предмет нет оценок')\r\n\r\n elif len(message.text.split()) == 2:\r\n subject_name = message.text.split()[1]\r\n print(subject_name)\r\n url = 'https://api.kundelik.kz/v2.0/edu-group/' + class_id + '/person/' + person_id + '/criteria-marks'\r\n res = requests.get(url, headers={'Access-Token': user_token}).json()\r\n if not get_subject_id(subject_name=subject_name, user_token=user_token, class_id=class_id):\r\n bot.send_message(message.chat.id, 'Кажется ты неправильно ввел имя предмета, '\r\n 'воспользуйся функцией и попробуй заново.')\r\n return 0\r\n\r\n for subject in res:\r\n if subject['subject'] == get_subject_id(subject_name=subject_name, user_token=user_token, class_id=class_id):\r\n if subject['personmarks']:\r\n str_marks = ''\r\n for mark in subject['personmarks'][0]['criteriamarks']:\r\n str_marks += 'Балл: ' + str(mark['value']) + ' дата: ' + str(mark['date'])[:10] + '\\n'\r\n bot.send_message(message.chat.id, str_marks)\r\n else:\r\n bot.send_message(message.chat.id, 'За этот предмет нет оценок')\r\n\r\n\r\n@bot.message_handler(commands=['schedule'], content_types=['text'])\r\ndef schedule(message):\r\n try:\r\n data = get_data_from_db(tele_id=message.from_user.id, data_type=['person_id', 'class_id', 'user_token'])\r\n except Exception as e:\r\n bot.send_message(message.chat.id, 'Упс! Возможно ты не вошел в систему и мы с тобой еще не знакомы. '\r\n 'Воспользуйся функцией /login и попробуй снова!')\r\n\r\n person_id = data['person_id']\r\n class_id = data['class_id']\r\n user_token = data['user_token']\r\n\r\n bot.send_message(message.chat.id, \"Что у нас завтра?\" + '\\n')\r\n start_date = str(datetime.date.today() + datetime.timedelta(days=1))\r\n end_date = str(datetime.date.today() + datetime.timedelta(days=2))\r\n url = 'https://api.kundelik.kz/v2.0/persons/' + person_id + '/groups/' + class_id + '/' \\\r\n 'schedules?startDate=' + start_date + '&endDate=' + end_date\r\n schedule = requests.get(url, headers={'Access-Token': user_token}).json()['days'][0]['lessons']\r\n\r\n for lesson in schedule:\r\n bot.send_message(message.chat.id,str(lesson['number']) + ')' + str(lesson['hours']) + ' - ' +\r\n get_lesson_information(user_token=user_token, lesson_id=lesson['id'])['subject']['name'])\r\n\r\n\r\n@bot.message_handler(commands=['my_subjects'], content_types=['text'])\r\ndef my_subjects(message):\r\n try:\r\n data = get_data_from_db(tele_id=message.from_user.id, data_type=['class_id', 'user_token'])\r\n except Exception as e:\r\n bot.send_message(message.chat.id, 'Упс! Возможно ты не вошел в систему и мы с тобой еще не знакомы. '\r\n 'Воспользуйся функцией /login и попробуй снова!')\r\n class_id = data['class_id']\r\n user_token = data['user_token']\r\n\r\n bot.send_message(message.chat.id, \"Список всех предметов твоей школы\" + '\\n')\r\n subjects = get_subject_name(class_id=class_id, user_token=user_token)\r\n bot.send_message(message.chat.id, 'Твои предметы:' + '\\n' + '\\n')\r\n answer = ''\r\n print(subjects)\r\n for subject in subjects:\r\n answer += subject['name'] + '\\n'\r\n bot.send_message(message.chat.id, answer)\r\n\r\n\r\n\r\n@bot.message_handler(commands=['week_grades', 'month_grades', 'day_grades'], content_types=['text'])\r\ndef week_grades(message):\r\n try:\r\n data = get_data_from_db(tele_id=message.from_user.id, data_type=['school_id', 'user_token', 'person_id'])\r\n except Exception as e:\r\n bot.send_message(message.chat.id, 'Упс! Возможно ты не вошел в систему и мы с тобой еще не знакомы. '\r\n 'Воспользуйся функцией /login и попробуй снова!')\r\n\r\n person_id = data['person_id']\r\n school_id = data['school_id']\r\n user_token = data['user_token']\r\n\r\n bad = False\r\n sat = False\r\n\r\n if message.text == '/week_grades':\r\n start_date = str(datetime.date.today() - datetime.timedelta(days=7))\r\n if message.text == '/month_grades':\r\n start_date = str(datetime.date.today() - datetime.timedelta(days=30))\r\n if message.text == '/day_grades':\r\n start_date = str(datetime.date.today() - datetime.timedelta(days=1))\r\n\r\n end_date = str(datetime.date.today())\r\n current_marks = show_marks_in_period(person_id=person_id, start_date=start_date,\r\n school_id=school_id, end_date=end_date, user_token=user_token)\r\n for mark in current_marks:\r\n url = 'https://api.kundelik.kz/v2.0/lessons/' + mark['lesson_str']\r\n subject = requests.get(url, headers={'Access-Token': user_token}).json()\r\n if str(mark['value']) == 'ПЛХ':\r\n bad = True\r\n if str(mark['value']) == 'УДВ':\r\n sat = True\r\n marks_answer = str(subject['subject']['name']) + ' - ' + str(mark['value'])\r\n bot.send_message(message.chat.id, marks_answer)\r\n\r\n if bad:\r\n bot.send_message(message.chat.id, 'Похоже на то, что у тебя есть двойки! Но не расстраивайся, '\r\n 'все твои пятерки еще впереди, надо лишь приложить немного усилий!')\r\n\r\n if not bad and not sat and current_marks:\r\n bot.send_message(message.chat.id, 'Похоже на то, что у тебя ни одной тройки и двойки! '\r\n 'Машинка, что сказать. Так держать.')\r\n if not current_marks:\r\n bot.send_message(message.chat.id, 'Ты еще не успел получить ни одной оценки')\r\n\r\n\r\n@bot.message_handler(commands=['week_attend', 'month_attend', 'day_attend'], content_types=['text'])\r\ndef attendance(message):\r\n try:\r\n data = get_data_from_db(tele_id=message.from_user.id, data_type=['person_id', 'user_token'])\r\n except Exception as e:\r\n bot.send_message(message.chat.id, 'Упс! Возможно ты не вошел в систему и мы с тобой еще не знакомы. '\r\n 'Воспользуйся функцией /login и попробуй снова!')\r\n\r\n person_id = data['person_id']\r\n user_token = data['user_token']\r\n\r\n if message.text == '/week_attend':\r\n start_date = str(datetime.date.today() - datetime.timedelta(days=7))\r\n if message.text == '/month_attend':\r\n start_date = str(datetime.date.today() - datetime.timedelta(days=30))\r\n if message.text == '/day_attend':\r\n start_date = str(datetime.date.today() - datetime.timedelta(days=1))\r\n\r\n end_date = str(datetime.date.today())\r\n current_attendance = show_attendance_in_period(person_id=person_id, start_date=start_date,\r\n end_date=end_date, user_token=user_token)\r\n try:\r\n logEntries = current_attendance['logEntries']\r\n\r\n except Exception as e:\r\n bot.send_message(message.chat.id, 'Нет записей за этот период')\r\n return 0\r\n\r\n if not logEntries:\r\n bot.send_message(message.chat.id, 'Нет записей за этот период')\r\n else:\r\n for note in logEntries:\r\n status = note['status']\r\n subject = get_lesson_information(lesson_id=str(note['lesson']), user_token=user_token)['subject']['name']\r\n if status == 'Pass':\r\n bot.send_message(message.chat.id, 'прогулял урок: ' + subject)\r\n if status == 'Absent':\r\n bot.send_message(message.chat.id, 'пропустил урок: ' + subject)\r\n if status == 'NotSet':\r\n pass\r\n if status == 'Ill':\r\n bot.send_message(message.chat.id, 'не присутствовал по болезни: ' + subject)\r\n if status == 'Late':\r\n bot.send_message(message.chat.id, 'опоздал на урок: ' + subject)\r\n\r\n\r\n@bot.message_handler(commands=['class_average_mark'], content_types=['text'])\r\ndef class_average_mark(message):\r\n try:\r\n data = get_data_from_db(tele_id=message.from_user.id, data_type=['class_id', 'user_token'])\r\n except Exception as e:\r\n bot.send_message(message.chat.id, 'Упс! Возможно ты не вошел в систему и мы с тобой еще не знакомы. '\r\n 'Воспользуйся функцией /login и попробуй снова!')\r\n\r\n class_id = data['class_id']\r\n user_token = data['user_token']\r\n\r\n bot.send_message(message.chat.id, 'Будет показан средний балл класса с начала года то сегодняшнего дня')\r\n url = 'https://api.kundelik.kz/v2.0/edu-groups/' + class_id + '/avg-marks/2020-04-01/2020-05-01'\r\n average_mark = requests.get(url, headers={'Access-Token': user_token}).json()\r\n all_marks = []\r\n for student in average_mark:\r\n for subject in student['per-subject-averages']:\r\n all_marks.append(float(subject['avg-mark-value'].replace(',', '.',)))\r\n answer = sum(all_marks) / len(all_marks)\r\n url = 'https://api.kundelik.kz/v2.0/edu-groups/1565042653527550944'\r\n class_name = (requests.get(url, headers={'Access-Token': user_token}).json())['name']\r\n bot.send_message(message.chat.id, 'Средний балл класса ' + str(class_name) + ' равен ' + str(answer))\r\n\r\n\r\n@bot.message_handler(commands=['login'], content_types=['text'])\r\ndef login(message):\r\n if len(message.text.split()) == 3 and (message.text.split())[0] == '/login':\r\n login = (message.text.split())[1][6:]\r\n password = (message.text.split())[2][9:]\r\n try:\r\n\r\n dn = kundelik.KunAPI(login=login, password=password)\r\n firebase.put('/' + str(message.from_user.id), 'login', login)\r\n firebase.put('/' + str(message.from_user.id), 'password', password)\r\n user_token = dn.get_token(login=login, password=password)\r\n firebase.put('/' + str(message.from_user.id), 'user_token', user_token)\r\n user_id = str(dn.get_info()['id'])\r\n firebase.put('/' + str(message.from_user.id), 'user_id', user_id)\r\n person_id = str(dn.get_info()['personId'])\r\n firebase.put('/' + str(message.from_user.id), 'person_id', person_id)\r\n data = get_user_information(user_token=user_token, user_id=user_id)\r\n school_id = str(data['schools'][0]['id'])\r\n firebase.put('/' + str(message.from_user.id), 'school_id', school_id)\r\n class_id = data['eduGroups'][0]['id_str']\r\n firebase.put('/' + str(message.from_user.id), 'class_id', class_id)\r\n bot.send_message(message.chat.id, text=\"Вход успешно произведен! Что ты хочешь сделать дальше?\")\r\n\r\n except Exception as e:\r\n firebase.put('/' + str(message.from_user.id), 'login', '')\r\n firebase.put('/' + str(message.from_user.id), 'password', '')\r\n bot.send_message(message.chat.id, e)\r\n\r\n elif message.text == 'Спасибо!':\r\n bot.send_message(message.chat.id, 'Обращайся!')\r\n else:\r\n bot.send_message(message.chat.id, random.choice(random_answers))\r\n\r\ndef get_lesson_information(lesson_id, user_token):\r\n url = 'https://api.kundelik.kz/v2.0/lessons/' + str(lesson_id)\r\n res = requests.get(url, headers={'Access-Token': user_token}).json()\r\n\r\n return res\r\n\r\n\r\ndef get_user_information(user_token, user_id):\r\n url = 'https://api.kundelik.kz/v2.0/users/' + str(user_id) + '/context'\r\n res = requests.get(url, headers={'Access-Token': user_token})\r\n\r\n return json.loads(res.text)\r\n\r\n\r\ndef show_marks_in_period(person_id, school_id, start_date, end_date, user_token):\r\n url_mark = 'https://api.kundelik.kz/v2.0/persons/' + person_id + '/schools' \\\r\n '/' + school_id + '/marks/' + start_date + '/' \\\r\n + end_date\r\n res = requests.get(url_mark, headers={'Access-Token': user_token}).json()\r\n\r\n return res\r\n\r\n\r\ndef show_attendance_in_period(person_id, start_date, end_date, user_token):\r\n url = 'https://api.kundelik.kz/v2.0/persons/' + person_id + '/lesson-log-entries?startDate=' \\\r\n + start_date + '&endDate=' + end_date\r\n res = requests.get(url, headers={'Access-Token': user_token}).json()\r\n\r\n return res\r\n\r\n\r\ndef get_subject_name(class_id, user_token, subject_id=0):\r\n url = 'https://api.kundelik.kz/v2.0/edu-groups/' + class_id + '/subjects'\r\n res = requests.get(url, headers={'Access-Token': str(user_token)}).json()\r\n if subject_id == 0:\r\n return res\r\n for subject in res:\r\n if subject['id'] == subject_id:\r\n return subject['name']\r\n\r\n return None\r\n\r\n\r\ndef get_subject_id(subject_name, user_token, class_id):\r\n url = 'https://api.kundelik.kz/v2.0/edu-groups/' + class_id + '/subjects'\r\n res = requests.get(url, headers={'Access-Token': user_token}).json()\r\n for subject in res:\r\n if subject['name'] == subject_name:\r\n return subject['id']\r\n\r\n return False\r\n\r\n\r\ndef get_data_from_db(tele_id, data_type=None):\r\n if not data_type:\r\n return firebase.get('/' + str(tele_id), '')\r\n data = {}\r\n for name in data_type:\r\n data[name] = firebase.get('/' + str(tele_id), name)\r\n\r\n return data\r\n\r\n\r\nbot.polling(none_stop=True, timeout=100)\r\n\r\n\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"88280684","text":"import json\n\ndef items():\n with open('./database/items.json', encoding='utf-8-sig') as json_file:\n items = json.load(json_file)\n\n with open('./database/item_names.json', encoding='utf-8') as json_file:\n item_names = json.load(json_file)\n\n for key, value in items.items():\n if key in item_names.keys():\n value['name'] = item_names[key]\n \n if \"U\" in items[key].keys():\n items[key][\"npcs\"] = [int(unit_key) for unit_key in items[key][\"U\"].keys()]\n del items[key][\"U\"]\n\n if \"O\" in items[key].keys():\n items[key][\"objects\"] = [int(unit_key) for unit_key in items[key][\"O\"].keys()]\n del items[key][\"O\"]\n\n if \"V\" in items[key].keys():\n items[key][\"vendors\"] = [int(unit_key) for unit_key in items[key][\"V\"].keys()]\n del items[key][\"V\"]\n\n if \"R\" in items[key].keys():\n del items[key][\"R\"]\n \n\n with open('./src/resources/items.json', 'w') as json_file: \n json.dump(items, json_file, indent=4)\n\n\ndef objects():\n with open('./database/objects.json', encoding='utf-8') as json_file:\n objects = json.load(json_file)\n\n with open('./database/object_names.json', encoding='utf-8') as json_file:\n object_names = json.load(json_file)\n\n new_objects = dict()\n for key, value in objects.copy().items():\n if \"coords\" in value.keys() and key in object_names.keys():\n new_objects[key] = dict([\n (\"name\", object_names[key]),\n (\"locations\", [dict([(\"coords\", dict([(\"x\", coord[0]), (\"y\", coord[1])])), (\"zone\", coord[2])]) for coord in value['coords']])\n ])\n\n with open('./src/resources/objects.json', 'w') as json_file: \n json.dump(new_objects, json_file, indent=4)\n\n\ndef quests():\n with open('./database/quests.json') as json_file:\n quests = json.load(json_file)\n\n with open('./database/quest_names.json') as json_file:\n quest_names = json.load(json_file)\n\n new_quests = dict()\n\n for key, value in quests.items():\n if key in quest_names.keys():\n new_quests[key] = dict([(\"name\", quest_names[key][\"T\"])])\n\n if \"start\" in value.keys():\n if \"U\" in value[\"start\"].keys():\n new_quests[key][\"start\"] = dict([(\"id\", value[\"start\"][\"U\"][0]), (\"type\", \"npc\")])\n\n elif \"O\" in value[\"start\"].keys():\n new_quests[key][\"start\"] = dict([(\"id\", value[\"start\"][\"O\"][0]), (\"type\", \"object\")])\n\n elif \"start\" not in value.keys() and \"I\" in value[\"obj\"].keys():\n new_quests[key][\"start\"] = dict([(\"id\", value[\"obj\"][\"I\"][0]), (\"type\", \"item\")])\n\n if \"end\" in value.keys():\n if \"U\" in value[\"end\"].keys():\n new_quests[key][\"end\"] = dict([(\"id\", value[\"end\"][\"U\"][0]), (\"type\", \"npc\")])\n \n elif \"O\" in value[\"end\"].keys():\n new_quests[key][\"end\"] = dict([(\"id\", value[\"end\"][\"O\"][0]), (\"type\", \"object\")])\n\n if \"start\" in value.keys() and \"end\" in value.keys() and \"obj\" in value.keys():\n if \"I\" in value[\"obj\"].keys():\n new_quests[key][\"objective\"] = [dict([(\"id\", item), (\"type\", \"item\")]) for item in value[\"obj\"][\"I\"]]\n\n if \"U\" in value[\"obj\"].keys():\n new_quests[key][\"objective\"] = [dict([(\"id\", item), (\"type\", \"npc\")]) for item in value[\"obj\"][\"U\"]]\n\n\n \n\n with open('./src/resources/quests.json', 'w') as json_file:\n json.dump(new_quests, json_file, indent=4)\n\n\ndef units():\n with open('./database/units.json', encoding='utf-8') as json_file:\n units = json.load(json_file)\n\n with open('./database/unit_names.json', encoding='utf-8') as json_file:\n unit_names = json.load(json_file)\n\n new_units = dict()\n for key, value in units.copy().items():\n if \"coords\" in value.keys() and key in unit_names.keys():\n new_units[key] = dict([\n (\"name\", unit_names[key]),\n (\"locations\", [dict([(\"coords\", dict([(\"x\", coord[0]), (\"y\", coord[1])])), (\"zone\", coord[2])]) for coord in value['coords']])\n ])\n\n\n with open('./src/resources/units.json', 'w') as json_file: \n json.dump(new_units, json_file, indent=4)\n\n\nif __name__ == \"__main__\":\n items()\n objects()\n quests()\n units()","sub_path":"scripts/format_database.py","file_name":"format_database.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"541448000","text":"import testinfra.utils.ansible_runner\nimport pytest\nimport os\n\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ[\"MOLECULE_INVENTORY_FILE\"]\n).get_hosts(\"all\")\n\n\ndef test_verify_conf_file(host):\n c_file = host.file(\"/etc/aerospike/aerospike.conf\")\n assert c_file.exists\n assert c_file.user == \"root\"\n assert c_file.group == \"root\"\n\n\ndef test_verify_log_file(host):\n l_file = host.file(\"/var/log/aerospike/aerospike.log\")\n assert l_file.exists\n\n\ndef test_aerospike_service(host):\n c_service = host.service(\"aerospike\")\n assert c_service.is_running\n assert c_service.is_enabled\n\n\ndef test_aerospike_cluster_size(host):\n hostname = host.run(\"hostname\").stdout\n if \"aerospike-node-\" in hostname:\n # Mesh cluster\n cluster_info = host.run(\"asinfo --no-config-file -v statistics\").stdout\n assert \"cluster_size=3;\" in cluster_info\n else:\n # Multicast cluster\n cluster_info = host.run(\"asinfo --no-config-file -v statistics\").stdout\n assert \"cluster_size=1;\" in cluster_info\n\n\n@pytest.mark.parametrize(\n \"nodename,local_address\",\n [\n # All Aerospike nodes\n (\"aerospike-\", \"0.0.0.0:3000\"), # Service\n (\"aerospike-\", \"0.0.0.0:3001\"), # Fabric\n (\"aerospike-\", \"0.0.0.0:3003\"), # Info\n # Only Aerospike mesh cluster nodes\n (\"aerospike-node-\", \"0.0.0.0:3002\"), # Mesh Heartbeat\n ],\n)\ndef test_aerospike_listening_ports(host, nodename, local_address):\n listening_ports = host.run(\"netstat -ant\").stdout\n if nodename in host.run(\"hostname\").stdout:\n assert local_address in listening_ports\n\n\n@pytest.mark.parametrize(\n \"nodename,teststring\",\n [\n # All nodes\n (\"aerospike\", \"file /var/log/aerospike/aerospike.log\"),\n (\"aerospike\", \"service-threads 2\"),\n (\"aerospike\", \"proto-fd-max 15000\"),\n (\"aerospike\", \"proto-fd-idle-ms 60000\"),\n (\"aerospike\", \"high-water-memory-pct 60\"),\n (\"aerospike\", \"high-water-disk-pct 50\"),\n (\"aerospike\", \"nsup-period 30m\"),\n (\"aerospike\", \"replication-factor 2\"),\n (\"aerospike\", \"interval 250\"),\n (\"aerospike\", \"timeout 10\"),\n (\"aerospike\", \"paxos-single-replica-limit 1\"),\n # Multicast nodes\n (\"aerospike-multicast-node\", \"file /opt/aerospike/data/file1\"),\n (\"aerospike-multicast-node\", \"data-in-memory false\"),\n (\"aerospike-multicast-node\", \"scheduler-mode noop\"),\n (\"aerospike-multicast-node\", \"write-block-size 128K\"),\n (\"aerospike-multicast-node\", \"mode multicast\"),\n (\"aerospike-multicast-node\", \"port 9917\"),\n (\"aerospike-multicast-node\", \"filesize 2G\"),\n (\"aerospike-multicast-node\", \"default-ttl 30d\"),\n (\"aerospike-multicast-node\", \"multicast-group 239.1.99.2\"),\n (\"aerospike-multicast-node\", \"storage-engine device\"),\n # Mesh nodes\n (\"aerospike-node-\", \"mode mesh\"),\n (\"aerospike-node-\", \"port 3002\"),\n (\"aerospike-node-\", \"address any\"),\n (\"aerospike-node-\", \"mesh-seed-address-port \"),\n (\"aerospike-node-\", \"default-ttl 4d\"),\n (\"aerospike-node-\", \"storage-engine memory\"),\n ],\n)\ndef test_aerospike_config(host, teststring, nodename):\n c_file = host.file(\"/etc/aerospike/aerospike.conf\")\n if nodename in host.run(\"hostname\").stdout:\n assert c_file.contains(teststring)\n\n\ndef test_aerospike_access_port(host):\n ip_address = host.run(\"hostname -I\").stdout\n c_file = host.file(\"/etc/aerospike/aerospike.conf\")\n assert c_file.contains(\"access-address \" + ip_address)\n","sub_path":"molecule/default/tests/test_aerospike.py","file_name":"test_aerospike.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"330250419","text":"from datetime import datetime, timedelta\nfrom typing import List\n\nfrom model.City import City\n\n\ndef customize(axis, **kwargs):\n axis.clear()\n axis.set_title(kwargs['title'])\n axis.tick_params(labelsize=6)\n axis.set_ylabel(kwargs['y_label'])\n return axis\n\n\ndef plot_temperature(axis, forecast_data):\n temperature = [forecast.temp for forecast in forecast_data]\n x = [data.datetime_txt[11:16] for data in forecast_data]\n axis = customize(axis, title=\"Temperature\", y_label=\"°C\")\n axis.plot(x, temperature, '.-', color=\"red\")\n\n\ndef plot_pressure(axis, forecast_data):\n pressure = [forecast.grnd_level for forecast in forecast_data]\n x = [data.datetime_txt[11:16] for data in forecast_data]\n axis = customize(axis, title=\"Pressure\", y_label=\"hPa\")\n axis.set_ylim([min(pressure), max(pressure) + 1])\n axis.plot(x, pressure, '.-', color=\"yellow\")\n d = [min(pressure)] * len(pressure)\n axis.fill_between(x, pressure, where=pressure >= d, interpolate=True, color=\"yellow\")\n\n\ndef plot_humidity(axis, forecast_data):\n humidity = [forecast.humidity for forecast in forecast_data]\n x = [data.datetime_txt[11:16] for data in forecast_data]\n axis = customize(axis, title=\"Humidity\", y_label=\"%\")\n axis.set_ylim([min(humidity) - 10, 100])\n axis.plot(x, humidity, '.-')\n d = [min(humidity)] * len(humidity)\n axis.fill_between(x, humidity, where=humidity >= d, interpolate=True)\n\n\ndef plot_cloud(axis, forecast_data):\n cloud = [forecast.cloud_percentage for forecast in forecast_data]\n x = [data.datetime_txt[11:16] for data in forecast_data]\n axis = customize(axis, title=\"Clouds\", y_label=\"%\")\n axis.plot(x, cloud, '.-', color=\"gray\")\n axis.set_ylim([0, 100])\n d = [0] * len(cloud)\n axis.fill_between(x, cloud, where=cloud >= d, interpolate=True, color=\"gray\")\n\n\ndef plot_rain(axis, forecast_data):\n rain = [forecast.rain_in_last_3_hours for forecast in forecast_data]\n x = [data.datetime_txt[11:16] for data in forecast_data]\n axis = customize(axis, title=\"Rain\", y_label=\"mm\")\n axis.bar(x, rain, width=1)\n\n\ndef plot2D(self):\n cities: List[City] = self.adapter.get_basic_forecast_data()\n city_id = [city.city_id for city in cities if city.name == self.city_combo_box.currentText()][0]\n date = self.date_combo_box.currentText()\n begin = datetime.strptime(date, \"%Y-%m-%d %H:%M:%S\")\n\n city = [x for x in cities if x.city_id == city_id][0]\n forecast_data = city.forecast.get_forecast(begin - timedelta(hours=3),\n begin + timedelta(days=1))\n\n self.lat_lon_label.setText(f\"{city.name}: latitude {city.latitude:4.4f}, longitude {city.longitude:4.4f}.\")\n self.title_label.setText(\"WEATHER DETAILS\")\n self.temperature_label.setText(f\"Temperature: {forecast_data[0].temp:5.2f}°C,\"\n f\" max: {forecast_data[0].temp_max:5.2f}°C,\"\n f\" min: {forecast_data[0].temp_min:5.2f}°C.\")\n self.pressure_label.setText(f\"Pressure: {forecast_data[0].grnd_level:5.2f} hPa,\"\n f\" {forecast_data[0].sea_level:5.2f}hPa.\")\n self.precipitation_label.setText(f\"In next 3 hours: rain - {forecast_data[1].rain_in_last_3_hours:5.2f} mm,\"\n f\" snow - {forecast_data[1].snow_in_last_3_hours:5.2f} mm.\")\n\n # create an axis\n temperature_axis = self.figure.add_subplot(911)\n pressure_axis = self.figure.add_subplot(913)\n humidity_axis = self.figure.add_subplot(915)\n cloud_axis = self.figure.add_subplot(917)\n rain_axis = self.figure.add_subplot(919)\n\n plot_temperature(temperature_axis, forecast_data)\n plot_pressure(pressure_axis, forecast_data)\n plot_humidity(humidity_axis, forecast_data)\n plot_cloud(cloud_axis, forecast_data)\n plot_rain(rain_axis, forecast_data)\n\n # refresh canvas\n self.canvas.draw()\n","sub_path":"PPDV_SP_PW/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"430220271","text":"import contextlib\nimport logging\nimport os\nimport re\nimport shutil\nimport subprocess\nfrom typing import Callable, Sequence, Tuple\n\n\nfrom otupdate.buildroot.file_actions import (unzip_update,\n hash_file,\n ROOTFS_NAME,\n ROOTFS_HASH_NAME,\n HashMismatch)\nfrom .constants import DATA_DIR_NAME\n\nBOOT_NAME = 'boot.vfat'\nBOOT_HASH_NAME = 'boot.vfat.hash'\n\nUPDATE_FILES = [BOOT_NAME, BOOT_HASH_NAME, ROOTFS_NAME, ROOTFS_HASH_NAME]\n\nLOG = logging.getLogger(__name__)\n\n\ndef validate_update(\n filepath: str,\n progress_callback: Callable[[float], None]) -> Tuple[str, str]:\n \"\"\" Like otupdate.buildroot.file_actions.validate_update but moreso\n\n Checks for the rootfs, rootfs hash, bootfs, and bootfs hash.\n\n Returns the path to the rootfs and the path to the bootfs\n \"\"\"\n filenames = [ROOTFS_NAME, ROOTFS_HASH_NAME, BOOT_NAME, BOOT_HASH_NAME]\n\n def zip_callback(progress):\n progress_callback(progress/3.0)\n files, sizes = unzip_update(filepath, zip_callback, filenames, filenames)\n\n def rootfs_hash_callback(progress):\n progress_callback(progress/3.0 + 0.33)\n\n rootfs = files.get(ROOTFS_NAME)\n assert rootfs\n rootfs_calc_hash = hash_file(rootfs, rootfs_hash_callback,\n file_size=sizes[ROOTFS_NAME])\n rootfs_hashfile = files.get(ROOTFS_HASH_NAME)\n assert rootfs_hashfile\n rootfs_packaged_hash = open(rootfs_hashfile, 'rb').read().strip()\n if rootfs_calc_hash != rootfs_packaged_hash:\n msg = f\"Hash mismatch (rootfs): calculated {rootfs_calc_hash!r} != \"\\\n f\"packaged {rootfs_packaged_hash!r}\"\n LOG.error(msg)\n raise HashMismatch(msg)\n\n def bootfs_hash_callback(progress):\n progress_callback(progress/3.0 + 0.66)\n\n bootfs = files.get(BOOT_NAME)\n assert bootfs\n bootfs_calc_hash = hash_file(bootfs, bootfs_hash_callback,\n file_size=sizes[BOOT_NAME])\n bootfs_hashfile = files.get(BOOT_HASH_NAME)\n assert bootfs_hashfile\n bootfs_packaged_hash = open(bootfs_hashfile, 'rb').read().strip()\n if bootfs_calc_hash != bootfs_packaged_hash:\n msg = f\"Hash mismatch (bootfs): calculated {bootfs_calc_hash!r} != \"\\\n f\"packged {bootfs_packaged_hash!r}\"\n LOG.error(msg)\n raise HashMismatch(msg)\n\n return rootfs, bootfs\n\n\ndef _get_proc_cmdline() -> bytes:\n \"\"\" Load /proc/cmdline \"\"\"\n return open('/proc/cmdline', 'rb').read()\n\n\ndef find_active_sysroot() -> str:\n \"\"\" Parse /proc/cmdline to find the active sysroot. Return the path \"\"\"\n cmdline = _get_proc_cmdline()\n match = re.search(b'root=([/a-zA-Z0-9.]+)', cmdline)\n if not match:\n raise RuntimeError(f\"Couldn't find bootpart from {cmdline!r}\")\n return match.group(1).decode()\n\n\ndef find_inactive_sysroot() -> str:\n \"\"\" Parse /proc/cmdline to find the inactive sysroot. Return the path \"\"\"\n active = find_active_sysroot()\n return {'/dev/mmcblk0p3': '/dev/mmcblk0p2',\n '/dev/mmcblk0p2': '/dev/mmcblk0p3'}[active]\n\n\ndef patch_connection_file_paths(connection: str) -> str:\n \"\"\"\n Patch any paths in a connection to remove the balena host paths\n\n Undoes the changes applied by\n :py:meth:`opentrons.system.nmcli._rewrite_key_path_to_host_path`\n\n :param connection: The contents of a NetworkManager connection file\n :return: The patches contents, suitable for writing somewher\n \"\"\"\n new_conn_lines = []\n for line in connection.split('\\n'):\n if '=' in line:\n parts = line.split('=')\n path_matches = re.search(\n '/mnt/data/resin-data/[0-9]+/(.*)', parts[1])\n if path_matches:\n new_path = f'/data/{path_matches.group(1)}'\n new_conn_lines.append(\n '='.join([parts[0], new_path]))\n LOG.info(\n f\"migrate_connection_file: {parts[0]}: \"\n f\"{parts[1]}->{new_path}\")\n continue\n new_conn_lines.append(line)\n return '\\n'.join(new_conn_lines)\n\n\n@contextlib.contextmanager\ndef mount_state_partition():\n \"\"\" Mount the active sysroot partition somewhere and yield it \"\"\"\n with mount_partition('/dev/mmcblk0p5', '/mnt/resin-state') as mountpath:\n yield mountpath\n\n\n@contextlib.contextmanager\ndef mount_boot_partition():\n \"\"\" Mount the balena boot partition somewhere and yield it \"\"\"\n with mount_partition('/dev/mmcblk0p1', '/mnt/boot') as mountpath:\n yield mountpath\n\n\n@contextlib.contextmanager\ndef mount_data_partition():\n \"\"\" Mount the balena data partition somewhere and yield it \"\"\"\n with mount_partition('/dev/mmcblk0p6', '/mnt/data') as mountpath:\n yield mountpath\n\n\n@contextlib.contextmanager\ndef mount_partition(partition: str, mountpath: str):\n os.makedirs(mountpath, exist_ok=True)\n subprocess.check_call(['mount', partition, mountpath])\n try:\n yield mountpath\n finally:\n subprocess.check_call(['umount', mountpath])\n\n\ndef migrate(ignore: Sequence[str], name: str):\n \"\"\" Copy everything in the app data to the root of the new partition\n\n :param ignore: Files to ignore in the root. This should be populated\n with the names (with no directory elements) of the migration\n update zipfile and everything unzipped from it.\n :param str: The name of the robot\n \"\"\"\n try:\n with mount_data_partition() as datamount:\n migrate_data(ignore, datamount, DATA_DIR_NAME)\n migrate_connections(datamount)\n migrate_hostname(datamount, name)\n except Exception:\n LOG.exception(\"Exception during data migration\")\n raise\n\n\ndef migrate_files_to_ignore(src, names):\n if src.endswith('jupyter') and 'jupyter' in names:\n return ['jupyter']\n\n return []\n\n\ndef migrate_data(ignore: Sequence[str],\n new_data_path: str,\n old_data_path: str):\n \"\"\" Copy everything in the app data to the root of the main data part\n\n :param ignore: A list of files that should be ignored in the root of /data\n :param new_data_path: Where the new data partition is mounted\n :param old_data_path: Where the old date files are\n \"\"\"\n # the new ’data’ path is actually /var and /data is in /var/data\n dest_data = os.path.join(new_data_path, 'data')\n LOG.info(f\"migrate_data: copying {old_data_path} to {dest_data}\")\n os.makedirs(dest_data, exist_ok=True)\n with os.scandir(old_data_path) as scanner:\n for entry in scanner:\n if entry.name in ignore:\n LOG.info(f\"migrate_data: ignoring {entry.name}\")\n continue\n src = os.path.join(old_data_path, entry.name)\n dest = os.path.join(dest_data, entry.name)\n if os.path.exists(dest):\n LOG.info(f\"migrate_data: removing dest tree {dest}\")\n shutil.rmtree(dest, ignore_errors=True)\n if entry.is_dir():\n LOG.info(f\"migrate_data: copying tree {src}->{dest}\")\n shutil.copytree(src, dest, symlinks=True,\n ignore=migrate_files_to_ignore)\n else:\n LOG.info(f\"migrate_data: copying file {src}->{dest}\")\n shutil.copy2(src, dest)\n\n\ndef migrate_system_connections(src_sc: str, dest_sc: str) -> bool:\n \"\"\" Migrate the contents of a system-connections dir\n\n :param dest_sc: The system-connections to copy to. Will be created if it\n does not exist\n :param src_sc: The system-connections to copy from\n :return: True if anything was moved\n \"\"\"\n found = False\n LOG.info(f\"migrate_system_connections: checking {dest_sc}\")\n os.makedirs(dest_sc, exist_ok=True)\n with os.scandir(src_sc) as scanner:\n for entry in scanner:\n # ignore readme and sample\n if entry.name.endswith('.ignore'):\n continue\n # ignore the hardwired connection added by api server\n if entry.name == 'static-eth0':\n continue\n # ignore weird remnants of boot partition connections\n if entry.name.startswith('._'):\n continue\n patched = patch_connection_file_paths(\n open(os.path.join(src_sc, entry.name), 'r').read())\n open(os.path.join(dest_sc, entry.name), 'w').write(patched)\n LOG.info(f\"migrate_connections: migrated {entry.name}\")\n found = True\n return found\n\n\ndef migrate_connections(new_data_path: str):\n \"\"\" Migrate wifi connection files to new locations and patch them\n\n :param new_data_path: The path to where the new data partition is mounted\n \"\"\"\n dest_connections = os.path.join(\n new_data_path, 'lib', 'NetworkManager', 'system-connections')\n os.makedirs(dest_connections, exist_ok=True)\n\n with mount_state_partition() as state_path:\n src_connections = os.path.join(\n state_path, 'root-overlay', 'etc', 'NetworkManager',\n 'system-connections')\n LOG.info(f\"migrate_connections: moving nmcli connections from\"\n f\" {src_connections} to {dest_connections}\")\n found = migrate_system_connections(src_connections, dest_connections)\n\n if found:\n return\n\n LOG.info(\n \"migrate_connections: No connections found in state, checking boot\")\n\n with mount_boot_partition() as boot_path:\n src_connections = os.path.join(\n boot_path, 'system-connections')\n LOG.info(f\"migrate_connections: moving nmcli connections from\"\n f\" {src_connections} to {dest_connections}\")\n found = migrate_system_connections(src_connections, dest_connections)\n if not found:\n LOG.info(\"migrate_connections: No connections found in boot\")\n\n\ndef migrate_hostname(dest_data: str, name: str):\n \"\"\" Write the machine name to a couple different places\n\n :param dest_data: The path to the root of ``/var`` in buildroot\n :param name: The name\n\n The hostname gets written to:\n - dest_path/hostname (bind mounted to /etc/hostname)\n (https://www.freedesktop.org/software/systemd/man/hostname.html#)\n - dest_path/machine-info as the PRETTY_HOSTNAME (bind mounted to\n /etc/machine-info)\n (https://www.freedesktop.org/software/systemd/man/machine-info.html#)\n - dest_path/serial since we assume the resin name is the serial number\n\n We also create some basic defaults for the machine-info.\n \"\"\"\n if name.startswith('opentrons-'):\n name = name[len('opentrons-'):]\n LOG.info(\n f\"migrate_hostname: writing name {name} to {dest_data}/hostname,\"\n f\" {dest_data}/machine-info, {dest_data}/serial\")\n with open(os.path.join(dest_data, 'hostname'), 'w') as hn:\n hn.write(name + \"\\n\")\n with open(os.path.join(dest_data, 'machine-info'), 'w') as mi:\n mi.write(f'PRETTY_HOSTNAME={name}\\nDEPLOYMENT=production\\n')\n with open(os.path.join(dest_data, 'serial'), 'w') as ser:\n ser.write(name)\n","sub_path":"update-server/otupdate/migration/file_actions.py","file_name":"file_actions.py","file_ext":"py","file_size_in_byte":11215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"372088821","text":"# encoding: utf-8\nimport re\n\nfrom lxml import etree\nimport pydash\nfrom download_css import get_css_links\nfrom parse_css import get_css_class_dict\nfrom utils import read_file, to_string\n\n\ndef parse_comment_page(doc, css_urls):\n \"\"\"\n 解析评论页并提取数据\n \"\"\"\n tmp_class_dict = {}\n\n def get_class_dict(css_url):\n tmp_class_dict_keys = list(tmp_class_dict.keys())\n if pydash.includes(tmp_class_dict_keys, css_url):\n return tmp_class_dict[css_url]\n else:\n class_dict = get_css_class_dict(css_url)\n tmp_class_dict[css_url] = class_dict\n return class_dict\n\n def get_class_means(class_name):\n for css_url in css_urls:\n print(\"css_url\", css_url)\n class_dict = get_class_dict(css_url)\n if pydash.includes(list(class_dict.keys()), class_name):\n return class_dict[class_name]\n return None\n\n datas = []\n for li in doc.xpath('//*[@class=\"mod comment\"]/ul/li'):\n print(\"li\", li)\n name = li.xpath('.//a[@class=\"name\"]/text()')[0].strip('\\n\\r \\t')\n try:\n star = li.xpath('.//span[contains(./@class, \"sml-rank-stars\")]/@class')[0]\n print(\"star1\", star)\n star = re.search(r'sml-str(\\d+)', star).group(1)\n print(\"star2\", star)\n except IndexError:\n star = 0\n time = li.xpath('.//span[@class=\"time\"]/text()')[0].strip('\\n\\r \\t')\n # test = li.xpath('.//p[@class=\"desc J-desc\"]/text()')\n # comment = pydash.join(test, \"\")\n # print(\"comment=\")\n # print(comment)\n test = li.xpath('.//p[@class=\"desc J-desc\"]')\n print(\"test\", type(test), test)\n for comment_elem in test:\n # just one comment.\n print(\"comment_elem\", type(comment_elem), comment_elem)\n comment = to_string(comment_elem)\n\n class_set = set()\n # \n for class_name in re.findall(r'', comment):\n origin_string = '' % class_name\n meaning = get_class_means(class_name)\n print(\"class_name\", class_name, \"meaning\", meaning, origin_string)\n print(\"comment\", comment)\n comment = pydash.replace(comment, origin_string, get_class_means(class_name))\n\n class_set.add(class_name)\n\n print(\"comment=\")\n print(comment)\n\n score = ' '.join(map(lambda s: s.strip('\\n\\r \\t'), li.xpath('.//span[@class=\"score\"]//text()')))\n\n data = {\n 'name': name,\n 'comment': comment,\n 'star': star,\n 'score': score,\n 'time': time,\n }\n datas.append(data)\n print(\"data\", data)\n exit()\n\n return datas\n\n\ndef parse_one_html(file_path):\n html = read_file(file_path)\n\n css_urls = get_css_links(html)\n print(\"css_url\", css_urls)\n\n doc = etree.HTML(html)\n result = parse_comment_page(doc, css_urls)\n print(\"result\", result)\n\n\n# file_path = \"/Users/huang/Desktop/workpyt/hon/jayin-python-app/src/dianping/htmls/504634.html\"\nfile_path = \"htmls/【THE C·HOUSE】电话,地址,价格,营业时间(图) - 上海美食 - 大众点评网.htm\"\nparse_one_html(file_path)\n","sub_path":"src/dianping_back/parse_shop.py","file_name":"parse_shop.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"511262659","text":"import operator\nCount = {}\nwords = []\n\n\n\nwith open('word_search.tsv') as file:\n\tfor row in file:\n\t\t# splitting into word\n\t\tword, frequency = row.split('\\t')\n\t\t#insert word as key and frequency as value into a dictionary\n\t\tCount[word] = int(frequency.strip())\n\t\twords.append(word)\n\n\n#check the input text is present in any word of words list.\ndef searchwords(letter):\n\tresults = []\n\tfor word in words:\n\t\tif letter in word:\n\t\t\tresults.append(word)\n\treturn results\n\n\n# sort the words based on constrains( search keyword.\ndef sortwords(results, incompleteWord):\n\twordresults = [(result, result.find(incompleteWord), Count[result], len(result)) for result in results]\n\twordresults.sort(key=operator.itemgetter(1))\n\twordresults.sort(key=operator.itemgetter(3))\n\tsearchResults = [wordresult[0] for wordresult in wordresults][:25]\n\treturn searchResults","sub_path":"Autocompleteapp/Autocomplete.py","file_name":"Autocomplete.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"463711611","text":"from tensorflow.keras.models import Model, load_model\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.layers import Dropout, Lambda\nfrom tensorflow.keras.layers import Conv2D, Conv2DTranspose, BatchNormalization\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import concatenate\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\nimport datetime\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten\n\n\n\ndef get_model(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS, do_compile=False, out_activation='sigmoid'):\n\n inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))\n\n conv1 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (inputs)\n conv1 = BatchNormalization() (conv1)\n conv1 = Dropout(0.1) (conv1)\n conv1 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (conv1)\n conv1 = BatchNormalization() (conv1)\n pooling1 = MaxPooling2D((2, 2)) (conv1)\n\n conv2 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (pooling1)\n conv2 = BatchNormalization() (conv2)\n conv2 = Dropout(0.1) (conv2)\n conv2 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (conv2)\n conv2 = BatchNormalization() (conv2)\n pooling2 = MaxPooling2D((2, 2)) (conv2)\n\n conv3 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (pooling2)\n conv3 = BatchNormalization() (conv3)\n conv3 = Dropout(0.2) (conv3)\n conv3 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (conv3)\n conv3 = BatchNormalization() (conv3)\n pooling3 = MaxPooling2D((2, 2)) (conv3)\n\n conv4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (pooling3)\n conv4 = BatchNormalization() (conv4)\n conv4 = Dropout(0.2) (conv4)\n conv4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (conv4)\n conv4 = BatchNormalization() (conv4)\n pooling4 = MaxPooling2D(pool_size=(2, 2)) (conv4)\n\n conv5 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (pooling4)\n conv5 = BatchNormalization() (conv5)\n conv5 = Dropout(0.3) (conv5)\n conv5 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (conv5)\n conv5 = BatchNormalization() (conv5)\n pooling5 = MaxPooling2D(pool_size=(2, 2)) (conv5)\n\n conv6 = Conv2D(512, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(pooling5)\n conv6 = BatchNormalization()(conv6)\n conv6 = Dropout(0.3)(conv6)\n conv6 = Conv2D(512, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(conv6)\n conv6 = BatchNormalization()(conv6)\n pooling6 = MaxPooling2D(pool_size=(2, 2)) (conv6)\n\n conv7 = Conv2D(1024, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(pooling6)\n conv7 = BatchNormalization()(conv7)\n conv7 = Dropout(0.3)(conv7)\n conv7 = Conv2D(1024, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(conv7)\n conv7 = BatchNormalization()(conv7)\n\n upsample8 = Conv2DTranspose(512, (2, 2), strides=(2, 2), padding='same') (conv7)\n upsample8 = concatenate([upsample8, conv6])\n conv8 = Conv2D(512, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (upsample8)\n conv8 = BatchNormalization() (conv8)\n conv8 = Dropout(0.2) (conv8)\n conv8 = Conv2D(512, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (conv8)\n conv8 = BatchNormalization() (conv8)\n\n upsample9 = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same') (conv8)\n upsample9 = concatenate([upsample9, conv5])\n conv9 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (upsample9)\n conv9 = BatchNormalization() (conv9)\n conv9 = Dropout(0.2) (conv9)\n conv9 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (conv9)\n conv9 = BatchNormalization() (conv9)\n \n upsample10 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same') (conv5)\n upsample10 = concatenate([upsample10, conv4])\n conv10 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (upsample10)\n conv10 = BatchNormalization() (conv10)\n conv10 = Dropout(0.2) (conv10)\n conv10 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (conv10)\n conv10 = BatchNormalization() (conv10)\n\n upsample11 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same') (conv10)\n upsample11 = concatenate([upsample11, conv3])\n conv11 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (upsample11)\n conv11 = BatchNormalization() (conv11)\n conv11 = Dropout(0.2) (conv11)\n conv11 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (conv11)\n conv11 = BatchNormalization() (conv11)\n\n upsample12 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same') (conv11)\n upsample12 = concatenate([upsample12, conv2])\n conv12 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (upsample12)\n conv12 = BatchNormalization() (conv12)\n conv12 = Dropout(0.1) (conv12)\n conv12 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (conv12)\n conv12 = BatchNormalization() (conv12)\n\n upsample13 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same') (conv12)\n upsample13 = concatenate([upsample13, conv1], axis=3)\n conv13 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (upsample13)\n conv13 = BatchNormalization() (conv13)\n conv13 = Dropout(0.1) (conv13)\n conv13 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (conv13)\n conv13 = BatchNormalization() (conv13)\n\n outputs = Conv2D(1, (1, 1), activation=out_activation) (conv13)\n\n model = Model(inputs=[inputs], outputs=[outputs])\n\n return model\n\n\ndef fit(X_train, Y_train, model, epochs=100, validation_split=0.1, validation_data=None, class_weight=None,\n checkpoint_datetime=False,checkpoint_suffix=\"\",batch_size=8):\n \"\"\"\n\n :param X_train: The training data\n :param Y_train: The training labels\n :param model: The tf keras model to train\n :param epochs: the number of epochs to train\n :param validation_split: percentage of data to use as validation\n :param validation_data: tuple of (X_val, y_val) validation data to use\n :param class_weight: weights for the classes in the loss function\n :param checkpoint_datetime: whether or not to append the current date and time to the checkpoints\n :param checkpoint_suffix: a optional string to append to the checkpoints\n :return: the results object\n \"\"\"\n earlystopper = EarlyStopping(patience=20, verbose=1)\n suffix = checkpoint_suffix\n if checkpoint_datetime:\n suffix += str(datetime.datetime.now())\n checkpointer = ModelCheckpoint('checkpoints/unet{}.h5'.format(suffix), verbose=1, save_best_only=True)\n results = model.fit(X_train, Y_train, validation_split=validation_split, batch_size=batch_size, epochs=epochs,\n validation_data=validation_data,\n callbacks=[earlystopper, checkpointer], class_weight=class_weight)\n return results\n\n\ndef load(filename='checkpoints/unet.h5'):\n return load_model(filename)\n\n # # Fit model\n # earlystopper = EarlyStopping(patience=15, verbose=1)\n # checkpointer = ModelCheckpoint('model_unet_checkpoint.h5', verbose=1, save_best_only=True)\n # results = model.fit(X_train, Y_train, validation_split=0.1, batch_size=16, epochs=100,\n # callbacks=[earlystopper, checkpointer])\n #\n # # Predict on train, val and test\n # model = load_model('model_unet_checkpoint.h5')\n # preds_train = model.predict(X_train[:int(X_train.shape[0] * 0.9)], verbose=1)\n # preds_val = model.predict(X_train[int(X_train.shape[0] * 0.9):], verbose=1)\n # preds_test = model.predict(X_test, verbose=1)\n #\n # # Threshold predictions\n # preds_train_t = (preds_train > 0.5).astype(np.uint8)\n # preds_val_t = (preds_val > 0.5).astype(np.uint8)\n # preds_test_t = (preds_test > 0.5).astype(np.uint8)\n #\n # # Create list of upsampled test masks\n # preds_test_upsampled = []\n # for i in range(len(preds_test_t)):\n # preds_test_upsampled.append(resize(np.squeeze(preds_test_t[i]),\n # (sizes_test[i][0], sizes_test[i][1]),\n # mode='constant', preserve_range=True))\n","sub_path":"models/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":8949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"202367874","text":"\"\"\"mysite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^', include('eartraining.urls')),\n url(r'^pitch/', include('pitch.urls')),\n url(r'^intervals/', include('intervals.urls')),\n url(r'^melodic_dictation/', include('melodic_dictation.urls')),\n url(r'^triads/', include('triads.urls')),\n url(r'^seventh_chords/', include('seventh_chords.urls')),\n url(r'^extended_chords/', include('extended_chords.urls')),\n url(r'^progressions/', include('progressions.urls')),\n]\n","sub_path":"mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"225366351","text":"import requests\r\nimport json\r\nimport xlrd,xlwt\r\nimport datetime,logging\r\nfrom xlutils.copy import copy\r\nimport time\r\n\r\n\r\n\r\ndef read_excel(sheet,row,col):\r\n '''读取excel表公共方法'''\r\n a = xlrd.open_workbook('G:/RasyncadminApi/datas/apiTestCase.xls')\r\n b = a.sheet_by_name(sheet)\r\n c = b.cell_value(row,col)\r\n return c\r\n\r\n\r\ndef write_excel(list,row,col,data):\r\n '''写入excel表公共方法'''\r\n file = r\"G:/RasyncadminApi/datas/apiTestCase.xls\"\r\n rb = xlrd.open_workbook(file, formatting_info=True)\r\n wb = copy(rb)\r\n ws = wb.get_sheet(list)\r\n ws.write(row, col, data)\r\n wb.save(file)\r\n print('写入数据成功')\r\n #list 第几章表0开始,row几行, col几列, data写入的数据\r\n\r\ndef Headers(token,data):\r\n '''将公共部分与请求参数拼接起来'''\r\n headers = {\r\n \"id\":str(time.time()),\r\n \"device\":\"\",\r\n \"module\":\"WEBADMIN\",\r\n \"token\":token,\r\n \"version\": \"3.0.3.2\"\r\n }\r\n data=json.loads(data)\r\n data.update(headers)\r\n return json.dumps(data)\r\n\r\n\r\n\r\ndef setlogging():\r\n '''打印日志公共方法'''\r\n t = str(datetime.datetime.now()).split(' ')[0] # 创建一个logger\r\n\r\n logger = logging.getLogger()\r\n logger.setLevel(level=logging.DEBUG)\r\n handler = logging.FileHandler('G:/RasyncadminApi/logs/' + t + \".log\") # log_path = os.path.dirname(os.getcwd()) + '/Logs/' # 项目根目录下/Logs 保存日志\r\n handler.setLevel(logging.DEBUG)\r\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') # 定义handler的输出格式\r\n handler.setFormatter(formatter)\r\n\r\n console = logging.StreamHandler()\r\n console.setLevel(logging.DEBUG)\r\n\r\n logger.addHandler(handler) # 给logger添加handler\r\n logger.addHandler(console)\r\n\r\n return logger\r\n\r\n\r\n#日志的公共方法\r\nlogger = setlogging()\r\n\r\n","sub_path":"RasyncadminApi/public_data/public_data.py","file_name":"public_data.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"54141794","text":"import os\npath = \"/home/\" + os.getlogin() + \"/ece411/mp3\" # changeme\n\n# print(\"then compile() to compile\")\n# print(\"then test() next\")\n\nfrom subprocess import PIPE, Popen\nimport time\nfrom os import getcwd, chdir\n\npipe_all = {\"stdin\" : PIPE, \"stdout\" : PIPE}#, \"stderr\" : PIPE}\n\ndef compile():\n\tvsim_pipe = Popen([\"/software/altera/13.1/modelsim_ase/bin/vsim\", \"-c\"], **pipe_all)\n\tchdir(path+\"/simulation/modelsim\")\n\n\tdolines = open(path + \"/simulation/modelsim/cpu_run_msim_rtl_verilog.do\", 'r').readlines()\n\tinitdo = \"\".join(dolines[:-2])\n\n\t# Opens a command line for modelsim.\n\tvsim_pipe = Popen([\"/software/altera/13.1/modelsim_ase/bin/vsim\", \"-c\"], **pipe_all)\n\tvsim_pipe.communicate(initdo)\n\tvsim_pipe.wait()\n\n\tprint (\"\\nCompilation complete!\\n\")\n\treturn\n\ndef test():\n\n\tchdir(path+\"/simulation/modelsim\")\n\tvsim_pipe = Popen([\"/software/altera/13.1/modelsim_ase/bin/vsim\", \"-c\"], **pipe_all)\n\n\tf = \"\".join(open(\"cpu_run_msim_rtl_verilog.do\").readlines())\n\tcommand1 = \\\n\t\t'radix hex;'\\\n\t\t'add list halt ;'\\\n\t\t'when {halt == \"1\"} {write list test1.lst} ;'\\\n\t\t'run -all;exit;\\n'\n\n\tvsim_pipe.communicate( f+command1 )\n\n\t# command2 = \\\n\t# \t'delete list *;'\\\n\t# \t'add list pmem_write;'\\\n\t# \t'restart -f;'\\\n\t# \t'nowhen *;'\\\n\t# \t'when {halt == \"1\"} {write list test2.lst; exit} ;'\\\n\t# \t'run -all\\n'\n\n\ttime = open(\"test1.lst\").readlines()[-1].split()[0][0:-3] + \" nanoseconds\\n\"\n\tprint(time)\n\n\treturn \n\n\t# vsim_pipe.wait()\n\n\t# THIS IS REALLY COOL\n\t# add list -notrigger write_data;\\\n\t# add list write;\\\n\t# add list -notrigger write_address;\\\n\n\n\n\n","sub_path":"mp3/testcode/python/jlb_run_tests.py","file_name":"jlb_run_tests.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"189992831","text":"# https://cloud.tencent.com/developer/article/1584953\nimport requests\nimport json\nimport requests\nfrom pyecharts.charts import Map, Geo\nfrom pyecharts import options as opts\nfrom pyecharts.globals import GeoType,RenderType\nimport json\n\n\nclass Map:\n def __init__(self, parameter_list=None):\n self.all_data , self.data_pair = self.get_data()\n # print(self.data)\n self.geo = self.get_geo()\n\n\n def get_data(self, url='https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'):\n '''获取数据'''\n url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'\n data = json.loads(requests.get(url=url).json()['data'])\n china = data['areaTree'][0]['children']\n confirm_china = []\n for i in range(len(china)):\n confirm_china.append([china[i]['name'],china[i]['total']['confirm']])\n return data ,confirm_china\n\n def get_geo(self):\n china_total = \"确诊:\"+ str(self.all_data['chinaTotal']['confirm']) + \\\n \" 疑似:\" +str( self.all_data['chinaTotal']['suspect']) + \\\n \" 死亡:\" + str( self.all_data['chinaTotal']['dead']) + \\\n \" 治愈:\" + str(self.all_data['chinaTotal']['heal']) + \\\n \" 更新日期:\" + str(self.all_data['lastUpdateTime'])\n\n geo = (\n Geo(init_opts = opts.InitOpts(width=\"1200px\",height=\"600px\",bg_color=\"#404a59\",page_title=\"全国疫情实时报告\",renderer=RenderType.SVG,theme=\"white\"))#设置绘图尺寸,背景色,页面标题,绘制类型\n .add_schema(maptype=\"china\",itemstyle_opts=opts.ItemStyleOpts(color=\"rgb(49,60,72)\",border_color=\"rgb(0,0,0)\"))#中国地图,地图区域颜色,区域边界颜色\n .add(series_name=\"geo\",data_pair=self.data_pair,type_=GeoType.EFFECT_SCATTER)#设置地图数据,动画方式为涟漪特效effect scatter\n .set_series_opts(#设置系列配置\n label_opts=opts.LabelOpts(is_show=False),#不显示Label\n effect_opts = opts.EffectOpts(scale = 6))#设置涟漪特效缩放比例\n .set_global_opts(#设置��局系列配置\n visualmap_opts=opts.VisualMapOpts(min_=0,max_=100),#设置视觉映像配置,最大值为平均值\n title_opts=opts.TitleOpts(title=\"全国疫情地图\", subtitle=china_total,pos_left=\"center\",pos_top=\"10px\",title_textstyle_opts=opts.TextStyleOpts(color=\"#fff\")),#设置标题,副标题,标题位置,文字颜色\n legend_opts = opts.LegendOpts(is_show=False),#不显示图例\n )\n )\n\n return geo\n\n def gen_html(self):\n self.geo.render(path=\"./html/render.html\")\n\nif __name__ == \"__main__\":\n map = Map()\n map.gen_html()","sub_path":"迭代2/需求细化及核心用例1实现/疫情地图demo/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"497272568","text":"from simanim import *\n\ndef setup(m):\n PixelsPerUnit(5)\n ViewBox((-10, -10), 120, 60)\n UpdatesPerFrame(100)\n\n m.h = InputFloat(30, (10,40))\n m.v = 0\n m.g = 9.81\n\ndef update(m):\n dv = m.g * m.dt \n dh = - (m.v * m.dt + m.g * m.dt ** 2 / 2)\n\n m.v += dv\n m.h += dh\n\n if m.h <= 0:\n m.h = 0\n Finish()\n\ndef draw(m):\n podloga = Box((-10,-10),120,10)\n podloga.fill_color = '#00AA00'\n telo = Box((0,m.h),10,10)\n telo.fill_color = '#777777'\n Draw(podloga,telo)\n\n txt_h = Text((50, 35), f'h ={m.h:6.3f}m')\n txt_h.pen_color = '#009900'\n txt_t = Text((50, 30), f't ={m.t:6.3f}s')\n Draw(txt_t, txt_h)\n\nRun(setup, update, draw)","sub_path":"02_slobodni_pad/pad2.py","file_name":"pad2.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"381797645","text":"from . import FilterField\r\nfrom . import sort\r\nfrom .utils import *\r\nfrom io import FileIO\r\nfrom typing import List, Tuple\r\n\r\n\r\ndef get_tasks(service_id: int = None, fields: List[str] = None, search: str = None, archive: bool = None,\r\n inactive: bool = None, filter_id: int = None, sort_list: List[sort.SortType] = None,\r\n filter_fields: List[FilterField] = None, pagesize: int = None, page: int = None,\r\n include: List[str] = None) -> json or List[json]:\r\n params = {}\r\n if service_id is not None:\r\n params['service_id'] = service_id\r\n if fields is not None:\r\n params['fields'] = list_to_str(fields)\r\n if search is not None:\r\n params['search'] = search\r\n if archive is not None:\r\n params['archive'] = archive\r\n if inactive is not None:\r\n params['inactive'] = inactive\r\n if filter_id is not None:\r\n params['filterid'] = filter_id\r\n if sort_list is not None and sort:\r\n params['sort'] = sort.make_sort(sort_list)\r\n if filter_fields is not None:\r\n for ff in filter_fields:\r\n if ff.value:\r\n params[ff.type] = ff.value\r\n if pagesize is not None:\r\n params['pagesize'] = pagesize\r\n if page is not None:\r\n params['page'] = page\r\n if include is not None and include:\r\n params['include'] = list_to_str(include)\r\n return make_request('get', 'task', params)\r\n else:\r\n result = make_request('get', 'task', params)\r\n return result['Tasks'] if type(result) is not APIError else result\r\n\r\n\r\ndef get_task(id_: int, include: List[str] = None):\r\n params = {}\r\n if include is not None and include:\r\n params['include'] = list_to_str(include)\r\n return make_request('get', f'task/{id_}', params)\r\n else:\r\n result = make_request('get', f'task/{id_}', params)\r\n return result['Task'] if type(result) is not APIError else result\r\n\r\n\r\ndef get_new_task_example(service_id: int, tasktypeid: int, include: List[str] = None):\r\n params = {'serviceid': service_id,\r\n 'tasktypeid': tasktypeid}\r\n if include is not None and include:\r\n params['include'] = list_to_str(include)\r\n return make_request('get', f'newtask', params)\r\n\r\n\r\ndef load_files(files: List[Tuple[str, FileIO]]):\r\n multipart_form_data = {}\r\n for i in range(len(files)):\r\n multipart_form_data[f'file{i}'] = (files[i][0], files[i][1], \"multipart/form-data\")\r\n result = make_request('post', 'TaskFile', files=multipart_form_data)\r\n return result['FileTokens'] if type(result) is not APIError else result\r\n\r\n\r\ndef create_task(name: str, service_id: int, status_id: int, priority_id: int, type_id: int,\r\n comment: str = None, deadline: datetime = None, description: str = None, parent_id: int = None,\r\n creator_id: int = None, service_stage_id: int = None, is_private_comment: bool = None,\r\n is_mass_incident: bool = None, completion_status: int = None, asset_ids: List[int] = None,\r\n category_ids: List[int] = None, executor_ids: List[int] = None, coordinator_ids: List[int] = None,\r\n executor_group_id: int = None, observer_ids: List[int] = None, files: List[Tuple[str, FileIO]] = None,\r\n deleted_files: List[str] = None): # ToDo: Fields ?\r\n task = get_new_task_example(service_id, type_id)['Task']\r\n task['Name'] = name\r\n task['ServiceId'] = service_id\r\n task['StatusId'] = status_id\r\n task['PriorityId'] = priority_id\r\n task['TypeId'] = type_id\r\n if comment is not None:\r\n task['Comment'] = comment\r\n if deadline is not None:\r\n task['Deadline'] = datetime_to_str(deadline)\r\n if description is not None:\r\n task['Description'] = description\r\n if parent_id is not None:\r\n task['ParentId'] = parent_id\r\n if creator_id is not None:\r\n task['CreatorId'] = creator_id\r\n if service_stage_id is not None:\r\n task['ServiceStageId'] = service_stage_id\r\n if is_private_comment is not None:\r\n task['IsPrivateComment'] = is_private_comment\r\n if is_mass_incident is not None:\r\n task['IsMassIncident'] = is_mass_incident\r\n if completion_status is not None:\r\n task['CompletionStatus'] = completion_status\r\n if asset_ids is not None:\r\n task['AssetIds'] = asset_ids\r\n if category_ids is not None:\r\n task['CategoryIds'] = category_ids\r\n if executor_ids is not None and executor_ids:\r\n task['ExecutorIds'] = list_to_str(executor_ids)\r\n if coordinator_ids is not None and coordinator_ids:\r\n task['CoordinatorIds'] = list_to_str(coordinator_ids)\r\n if executor_group_id is not None:\r\n task['ExecutorGroupId'] = executor_group_id\r\n if observer_ids is not None and observer_ids:\r\n task['ObserverIds'] = list_to_str(observer_ids)\r\n if files is not None and files:\r\n task['FileTokens'] = load_files(files)\r\n if deleted_files is not None and deleted_files:\r\n task['DeletedFiles'] = list_to_str(deleted_files, '|')\r\n return make_request('post', 'task', json_k=task)\r\n\r\n\r\ndef change_task(id_: int, name: str = None, reason: int = None, service_id: int = None, status_id: int = None,\r\n priority_id: int = None, type_id: int = None, comment: str = None, deadline: datetime = None,\r\n description: str = None, parent_id: int = None, creator_id: int = None, service_stage_id: int = None,\r\n is_private_comment: bool = None, is_mass_incident: bool = None, completion_status: int = None,\r\n asset_ids: List[int] = None, category_ids: List[int] = None, executor_ids: List[int] = None,\r\n coordinator_ids: List[int] = None, executor_group_id: int = None, observer_ids: List[int] = None,\r\n files: List[Tuple[str, FileIO]] = None, deleted_files: List[str] = None, evaluation_id: int = None,\r\n reaction_date: datetime = None, reaction_date_fact: datetime = None,\r\n resolution_date_fact: datetime = None, coordinate: bool = None):\r\n task = {}\r\n if name is not None:\r\n task['Name'] = name\r\n if reason is not None:\r\n task[\"field1129\"] = reason\r\n if service_id is not None:\r\n task['ServiceId'] = service_id\r\n if status_id is not None:\r\n task['StatusId'] = status_id\r\n if priority_id is not None:\r\n task['PriorityId'] = priority_id\r\n if type_id is not None:\r\n task['TypeId'] = type_id\r\n if comment is not None:\r\n task['Comment'] = comment\r\n if deadline is not None:\r\n task['Deadline'] = datetime_to_str(deadline)\r\n if description is not None:\r\n task['Description'] = description\r\n if parent_id is not None:\r\n task['ParentId'] = parent_id\r\n if creator_id is not None:\r\n task['CreatorId'] = creator_id\r\n if service_stage_id is not None:\r\n task['ServiceStageId'] = service_stage_id\r\n if is_private_comment is not None:\r\n task['IsPrivateComment'] = is_private_comment\r\n if is_mass_incident is not None:\r\n task['IsMassIncident'] = is_mass_incident\r\n if completion_status is not None:\r\n task['CompletionStatus'] = completion_status\r\n if asset_ids is not None:\r\n task['AssetIds'] = asset_ids\r\n if category_ids is not None:\r\n task['CategoryIds'] = category_ids\r\n if executor_ids is not None and executor_ids:\r\n task['ExecutorIds'] = list_to_str(executor_ids)\r\n if coordinator_ids is not None and coordinator_ids:\r\n task['CoordinatorIds'] = list_to_str(coordinator_ids)\r\n if executor_group_id is not None:\r\n task['ExecutorGroupId'] = executor_group_id\r\n if observer_ids is not None and observer_ids:\r\n task['ObserverIds'] = list_to_str(observer_ids)\r\n if files is not None and files:\r\n task['FileTokens'] = load_files(files)\r\n if deleted_files is not None and deleted_files:\r\n task['DeletedFiles'] = list_to_str(deleted_files, '|')\r\n if evaluation_id is not None:\r\n task['EvaluationId'] = evaluation_id\r\n if reaction_date is not None:\r\n task['ReactionDate'] = datetime_to_str(reaction_date)\r\n if reaction_date_fact is not None:\r\n task['ReactionDateFact'] = datetime_to_str(reaction_date_fact)\r\n if resolution_date_fact is not None:\r\n task['ResolutionDateFact'] = datetime_to_str(resolution_date_fact)\r\n if coordinate is not None:\r\n task['Coordinate'] = coordinate\r\n return make_request('put', f'task/{id_}', json_k=task)\r\n\r\n\r\ndef get_task_url(id_: int) -> str:\r\n return f'{config.BASIC_URL}/Task/View/{id_}'\r\n\r\n\r\ndef get_task_lifetime(taskid: int, include: List[str] = None, last_comments_on_top: bool = None):\r\n params = dict()\r\n params['taskid'] = taskid\r\n if last_comments_on_top is not None:\r\n params['lastcommentsontop'] = last_comments_on_top\r\n if include is not None and include:\r\n return make_request('get', 'tasklifetime', params=params)\r\n else:\r\n result = make_request('get', 'tasklifetime', params=params)\r\n return result['TaskLifetimes'] if type(result) is not APIError else result\r\n","sub_path":"IntraServiceAPI/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":9185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"271317264","text":"import warnings\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.decomposition import KernelPCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import LinearSVC\nfrom sklearn.svm import LinearSVR\nfrom sklearn.svm import SVC\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\n\n# Load dataset\niris = datasets.load_iris()\nX = iris\nX = iris[\"data\"][:, (2, 3)]\n\n############################\n# Show data\n############################\n# plt.plot(X[:,0], X[:,1], \"o\", label=\"SGD\")\n# plt.show()\n\ny = (iris[\"target\"] == 2).astype(np.float64) # Iris-Virginica\n# Linear classification\nsvm_clf = Pipeline(((\"scaler\", StandardScaler()), (\"linear_svc\", LinearSVC(C=1, loss=\"hinge\")),))\nsvm_clf.fit(X, y)\nprint(\"yes\" if svm_clf.predict([[5.5, 1.7]]) == 1.0 else \"no\")\n\n# Add a pypeline using polynomial features\npolynomial_svm_clf = Pipeline(((\"poly_features\", PolynomialFeatures(degree=3)), (\"scaler\", StandardScaler()),\n (\"svm_clf\", LinearSVC(C=10, loss=\"hinge\"))))\npolynomial_svm_clf.fit(X, y)\nprint(\"yes\" if polynomial_svm_clf.predict([[5.5, 1.7]]) == 1.0 else \"no\")\n\npoly_kernel_svm_clf = Pipeline(((\"scaler\", StandardScaler()), (\"svm_clf\", SVC(kernel=\"poly\", degree=3, coef0=1, C=5))))\npoly_kernel_svm_clf.fit(X, y)\n\n# It is also possible to handle regression tasks\n# with a SVM. Let's try:\n# Linear / SVR is the regression equivalent of the SVC class\nsvm_reg = LinearSVR(epsilon=1.5)\nsvm_reg.fit(X, y)\n\n############################\n# Experiment with PCA\n############################\nfrom sklearn.decomposition import PCA\n\n# Plot 2D and 3D against each other\ncopy = datasets.load_iris()\nX = copy[\"data\"]\npca = PCA(n_components=2)\nX2D = pca.fit_transform(X)\nfig = plt.figure(figsize=(8, 6))\nax1 = fig.add_subplot(121, projection='3d')\nax1.plot3D(X[:, 0], X[:, 1], X[:, 2], 'o')\nax2 = fig.add_subplot(122)\nax2.plot(X2D[:, 0], X2D[:, 1], 'o')\nplt.show()\n\n# Find the dimension in which 95% of the\n# variance is preserved\npca = PCA()\npca.fit(X)\nprint(pca.explained_variance_ratio_)\ncumsum = np.cumsum(pca.explained_variance_ratio_)\nprint(cumsum)\nd = np.argmax(cumsum >= 0.95) + 1\n# OR\npca = PCA(n_components=0.95)\nX_reduced = pca.fit_transform(X)\nprint(d)\n\n############################\n# Use grid search to find best kernel and gamma value\n# for PCA\n############################\nclf = Pipeline([(\"kpca\", KernelPCA(n_components=2)), (\"log_reg\", LogisticRegression())])\n\nparam_grid = [{\n \"kpca__gamma\": np.linspace(0.03, 0.05, 10),\n \"kpca__kernel\": [\"rbf\", \"sigmoid\"]\n}]\ngrid_search = GridSearchCV(clf, param_grid, cv=3)\ngrid_search.fit(X, y)\n","sub_path":"Hands On Machine Learning Exercises/HoML SVMs.py","file_name":"HoML SVMs.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"161989937","text":"#!/usr/bin/env python\n#coding=utf-8\n\nfrom struct import Struct\n\ndef write_records(records, format, f):\n record_struct = Struct(format)\n for r in records:\n f.write(record_struct.pack(*r))\n\n#读取这个文件并返回一个元组列表,以块的形式增量读取文件\ndef read_records(format, f):\n record_struct = Struct(format)\n chunks = iter(lambda: f.read(record_struct.size), b'')\n return (record_struct.unpack(chunk) for chunk in chunks)\n\n#将整个文件一次性读取到一个字节字符串中,然后在分片解析\ndef unpack_records(format, data):\n record_struct = Struct(format)\n return (record_struct.unpack_from(data, offset)\n for offset in range(0, len(data), record_struct.size))\n\n\nif __name__ == '__main__':\n records = [ (1, 2.3, 4.5),\n (6, 7.8, 9.0),\n (12, 13.4, 56.7) ]\n\n # with open('struct1.bin', 'wb') as f:\n # write_records(records, ' QTMProtocol:\n return QTMProtocol(loop=event_loop)\n\n\n@pytest.mark.asyncio\nasync def test_send_command_not_connected(qtmprotocol: QTMProtocol):\n\n with pytest.raises(QRTCommandException):\n await qtmprotocol.send_command(\"dummy\")\n\n\n@pytest.mark.asyncio\nasync def test_await_any_event_timeout(qtmprotocol: QTMProtocol):\n awaitable = qtmprotocol.await_event(timeout=0.1)\n with pytest.raises(asyncio.TimeoutError):\n await awaitable\n\n\n@pytest.mark.asyncio\nasync def test_await_any_event(event_loop, qtmprotocol: QTMProtocol):\n awaitable = qtmprotocol.await_event(timeout=1)\n event_loop.call_later(0, lambda: qtmprotocol._on_event(QRTEvent.EventConnected))\n result = await awaitable\n\n assert result == QRTEvent.EventConnected\n\n\n@pytest.mark.asyncio\nasync def test_await_specific_event(event_loop, qtmprotocol: QTMProtocol):\n awaitable = qtmprotocol.await_event(event=QRTEvent.EventConnected, timeout=1)\n event_loop.call_later(0, lambda: qtmprotocol._on_event(QRTEvent.EventConnected))\n result = await awaitable\n\n assert result == QRTEvent.EventConnected\n\n\n@pytest.mark.asyncio\nasync def test_await_event_multiple(event_loop, qtmprotocol: QTMProtocol):\n awaitable = qtmprotocol.await_event(event=QRTEvent.EventConnected, timeout=1)\n\n event_loop.call_later(\n 0, lambda: qtmprotocol._on_event(QRTEvent.EventConnectionClosed)\n )\n event_loop.call_later(0.1, lambda: qtmprotocol._on_event(QRTEvent.EventConnected))\n\n result = await awaitable\n\n assert result == QRTEvent.EventConnected\n\n\n@pytest.mark.asyncio\nasync def test_await_multiple(qtmprotocol: QTMProtocol):\n loop = asyncio.get_event_loop()\n awaitable1 = loop.create_task(qtmprotocol.await_event(event=QRTEvent.EventConnected))\n awaitable2 = loop.create_task(qtmprotocol.await_event(event=QRTEvent.EventConnectionClosed))\n\n done, _ = await asyncio.wait(\n [awaitable1, awaitable2], return_when=asyncio.FIRST_EXCEPTION\n )\n\n print(done)\n \n with pytest.raises(Exception):\n done.pop().result()\n","sub_path":"test/qtmprotocol_test.py","file_name":"qtmprotocol_test.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"625916627","text":"#! /usr/bin/python\n\n\"\"\"Copyright 2018 SopraSteria.\nThis work is licensed under my intership in SopraSteria\"\"\"\n\n__author__=\"Ouafik Salaheddine\"\n__version__=\"1.0.0\"\n__date__ =\"26/07/2018 19:00h\"\n\n#Basic imports\nimport sys\nfrom time import sleep\nimport random\n\n# Import Adafruit IO MQTT client.\nfrom Adafruit_IO import MQTTClient\n\n# Set to your Adafruit IO key.\nADAFRUIT_IO_KEY = 'c527e85da54a4d8cab58f70de6a7d7f3'\n\n# Set to your Adafruit IO username.\nADAFRUIT_IO_USERNAME = 'salahEo'\n\n\n# Define callback functions which will be called when certain events happen.\ndef connected(client):\n print('Connected to Adafruit IO! Listening for changes...')\n # Subscribe to changes on a feed named Weight.\n for feed_id in ['poidruche' , 'gx' , 'gy' , 'position']:\n client.subscribe(feed_id)\n\ndef disconnected(client):\n # Disconnected function will be called when the client disconnects.\n print('Disconnected from Adafruit IO!')\n sys.exit(1)\n\ndef message(client, feed_id, payload):\n print('Feed {0} received new value: {1}'.format(feed_id, payload))\n\ndef afficheTable(table):\n print('Valeur du tableau :')\n for element in table :\n print(element)\n\ndef remplirTable(table):\n index1 = random.randint(0,3)\n index2 = random.randint(0,3)\n value1 = random.randint(30,50)\n value2 = random.randint(30,50)\n\n table[index1]= value1\n table[index2]= value2\n\ndef centreGx(table,poidRuche):\n Gx=((table[2]+table[3])-(table[0]+table[1]))/poidRuche\n return round(Gx,2)\n\ndef centreGy(table,poidRuche):\n Gy=((table[0]+table[3])-(table[1]+table[2]))/poidRuche\n return round(Gy,2)\n\ndef gestionPosition(Gx,Gy):\n position=-1\n if(Gx*Gy>0):\n if Gx>=0 :\n position=3\n else:\n position=1\n else:\n if Gx<0 :\n position=0\n else:\n position=2\n return position\n\ntable=[0,1,2,3]\n\ndef flushData(client):\n remplirTable(table)\n afficheTable(table)\n poidRuche=table[0]\n for i in range(1,4):\n poidRuche+= table[i] \n print('Publishing {0} to PoidRuche.'.format(poidRuche))\n client.publish('poidruche', poidRuche)\n Gx=centreGx(table,poidRuche)\n Gy= centreGy(table,poidRuche)\n position=gestionPosition(Gx,Gy)\n print('Publishing {0} to Gx.'.format(Gx))\n client.publish('gx', Gx)\n print('Publishing {0} to Gy.'.format(Gy))\n client.publish('gy', Gy)\n print('Publishing {0} to Position.'.format(position))\n client.publish('position', position)\n \n# Create an MQTT client instance.\nclient = MQTTClient(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)\n\n# Setup the callback functions defined above.\nclient.on_connect = connected\nclient.on_disconnect = disconnected\nclient.on_message = message\n\n# Connect to the Adafruit IO server.\nclient.connect()\nclient.loop_background()\nsleep(10)\n # Now send new values every 15 seconds.\nprint('Publishing a new message every 10 seconds (press Ctrl-C to quit)...')\nfor x in range(0,20):\n flushData(client)\n sleep(10)\nprint('finish')\n\n\n\n","sub_path":"hive_data_simulation.py","file_name":"hive_data_simulation.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"204149399","text":"import pytest\nimport pytest_mock\n\nfrom ex02.motion import Translation, Rotation\nfrom ex02.robot import Robot, Transmitter, MotionController, Navigator, EnergySupplier\nfrom ex02.telecom import Telecom, Command\n\n\nclass TestRobotSolution_2_1:\n\n def test_mocking_a_robot(self, mocker):\n robot = mocker.Mock(spec=Robot)\n\n @pytest.fixture()\n def init_robot(self, mocker):\n transmitter = mocker.Mock(spec=Transmitter)\n motion_controller = mocker.Mock(spec=MotionController)\n navigator = mocker.Mock(spec=Navigator)\n energy_supplier = mocker.Mock(spec=EnergySupplier)\n\n robot = Robot(transmitter=transmitter,\n motion_controller=motion_controller,\n navigator=navigator,\n energy_supplier=energy_supplier)\n\n return robot, transmitter, motion_controller, navigator, energy_supplier\n\n\n\n def test_is_moving_default_not(self, init_robot):\n # -- given --\n robot, *_ = init_robot\n # -- then --\n assert not robot.is_moving()\n\n def test_is_moving(self, init_robot):\n # -- given --\n robot, *_ = init_robot\n # -- when --\n robot.status = Robot.STATUS_MOVING\n # -- then --\n assert robot.is_moving()\n\n\n def test_exchange_through_transmitter(self, init_robot):\n # -- given --\n robot, transmitter, *_ = init_robot\n # -- when --\n robot.exchange(Telecom(command=Command.MOVING))\n # -- then --\n transmitter.exchange.assert_called_once()\n\n def test_load_positions_when_energy_supplier_has_enough_energy(self, init_robot):\n # -- given --\n robot, _, _, navigator, energy_supplier = init_robot\n motions = []\n navigator.compute_motions.return_value = motions\n # -- when--\n energy_supplier.has_enough.return_value = True\n robot.load_positions([])\n # -- then --\n assert robot.motions is motions\n\n\n def test_load_positions_when_energy_supplier_has_NOT_enough_energy(self, init_robot):\n # -- given --\n robot, _, _, navigator, energy_supplier = init_robot\n motions = []\n navigator.compute_motions.return_value = motions\n # -- when--\n energy_supplier.has_enough.return_value = False\n with pytest.raises(ValueError):\n robot.load_positions(motions)\n # -- then --\n assert len(robot.motions) == 0\n\n def test_load_positions_calls(self, init_robot):\n # -- given --\n robot, transmitter, motion_controller, navigator, energy_supplier = init_robot\n # -- when--\n energy_supplier.has_enough.return_value = True\n robot.load_positions([])\n # -- then --\n navigator.compute_motions.assert_called_once\n navigator.compute_total_distance.assert_called_once\n motion_controller.get_required_energy_for.assert_called_once\n energy_supplier.has_enough.assert_called_once\n\n\n def test_run_without_motions(self, init_robot):\n # --given--\n robot, *_ = init_robot\n\n # -- when--\n robot.motions = []\n with pytest.raises(ValueError):\n robot.run()\n\n\n def test_run_with_motions(self, mocker, init_robot):\n # -- given --\n robot, transmitter, motion_controller, navigator, energy_supplier = init_robot\n # -- when--\n translation = mocker.Mock(spec=Translation)\n rotation = mocker.Mock(spec=Rotation)\n robot.motions = [translation, rotation, translation]\n robot.run()\n # -- then --\n # - get the arguments\n assert motion_controller.move.call_count == 3\n","sub_path":"test/test_robot_solution_2_1.py","file_name":"test_robot_solution_2_1.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"379289080","text":"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"augmentation params settings\"\"\"\n\nimport os\nfrom copy import deepcopy\n\nimport numpy as np\nfrom src.nnunet.training.data_augmentation.custom_transforms import Convert3DTo2DTransform, Convert2DTo3DTransform, \\\n MaskTransform, ConvertSegmentationToRegionsTransform\nfrom src.nnunet.training.data_augmentation.pyramid_augmentations import MoveSegAsOneHotToData, \\\n ApplyRandomBinaryOperatorTransform, \\\n RemoveRandomConnectedComponentFromOneHotEncodingTransform\nfrom batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter\nfrom batchgenerators.transforms.abstract_transforms import Compose\nfrom batchgenerators.transforms.channel_selection_transforms import DataChannelSelectionTransform, \\\n SegChannelSelectionTransform\nfrom batchgenerators.transforms.color_transforms import GammaTransform\nfrom batchgenerators.transforms.spatial_transforms import SpatialTransform, MirrorTransform\nfrom batchgenerators.transforms.utility_transforms import RemoveLabelTransform, RenameTransform, NumpyToTensor\nfrom batchgenerators.augmentations.utils import rotate_coords_3d, rotate_coords_2d\ndefault_3D_augmentation_params = {\n \"selected_data_channels\": None,\n \"selected_seg_channels\": None,\n\n \"do_elastic\": True,\n \"elastic_deform_alpha\": (0., 900.),\n \"elastic_deform_sigma\": (9., 13.),\n \"p_eldef\": 0.2,\n\n \"do_scaling\": True,\n \"scale_range\": (0.85, 1.25),\n \"independent_scale_factor_for_each_axis\": False,\n \"p_independent_scale_per_axis\": 1,\n \"p_scale\": 0.2,\n\n \"do_rotation\": True,\n \"rotation_x\": (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi),\n \"rotation_y\": (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi),\n \"rotation_z\": (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi),\n \"rotation_p_per_axis\": 1,\n \"p_rot\": 0.2,\n\n \"random_crop\": False,\n \"random_crop_dist_to_border\": None,\n\n \"do_gamma\": True,\n \"gamma_retain_stats\": True,\n \"gamma_range\": (0.7, 1.5),\n \"p_gamma\": 0.3,\n\n \"do_mirror\": True,\n \"mirror_axes\": (0, 1, 2),\n\n \"dummy_2D\": False,\n \"mask_was_used_for_normalization\": None,\n \"border_mode_data\": \"constant\",\n\n \"all_segmentation_labels\": None, # used for cascade\n \"move_last_seg_chanel_to_data\": False, # used for cascade\n \"cascade_do_cascade_augmentations\": False, # used for cascade\n \"cascade_random_binary_transform_p\": 0.4,\n \"cascade_random_binary_transform_p_per_label\": 1,\n \"cascade_random_binary_transform_size\": (1, 8),\n \"cascade_remove_conn_comp_p\": 0.2,\n \"cascade_remove_conn_comp_max_size_percent_threshold\": 0.15,\n \"cascade_remove_conn_comp_fill_with_other_class_p\": 0.0,\n\n \"do_additive_brightness\": False,\n \"additive_brightness_p_per_sample\": 0.15,\n \"additive_brightness_p_per_channel\": 0.5,\n \"additive_brightness_mu\": 0.0,\n \"additive_brightness_sigma\": 0.1,\n\n \"num_threads\": 12 if 'nnUNet_n_proc_DA' not in os.environ else int(os.environ['nnUNet_n_proc_DA']),\n \"num_cached_per_thread\": 1,\n}\n\ndefault_2D_augmentation_params = deepcopy(default_3D_augmentation_params)\n\ndefault_2D_augmentation_params[\"elastic_deform_alpha\"] = (0., 200.)\ndefault_2D_augmentation_params[\"elastic_deform_sigma\"] = (9., 13.)\ndefault_2D_augmentation_params[\"rotation_x\"] = (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi)\ndefault_2D_augmentation_params[\"rotation_y\"] = (-0. / 360 * 2. * np.pi, 0. / 360 * 2. * np.pi)\ndefault_2D_augmentation_params[\"rotation_z\"] = (-0. / 360 * 2. * np.pi, 0. / 360 * 2. * np.pi)\n\n# sometimes you have 3d data and a 3d net but cannot augment them properly in 3d due to anisotropy (which is currently\n# not supported in batchgenerators). In that case you can 'cheat' and transfer your 3d data into 2d data and\n# transform them back after augmentation\ndefault_2D_augmentation_params[\"dummy_2D\"] = False\ndefault_2D_augmentation_params[\"mirror_axes\"] = (0, 1) # this can be (0, 1, 2) if dummy_2D=True\n\n\ndef get_patch_size(final_patch_size, rot_x, rot_y, rot_z, scale_range):\n \"\"\"get patch size\"\"\"\n if isinstance(rot_x, (tuple, list)):\n rot_x = max(np.abs(rot_x))\n if isinstance(rot_y, (tuple, list)):\n rot_y = max(np.abs(rot_y))\n if isinstance(rot_z, (tuple, list)):\n rot_z = max(np.abs(rot_z))\n rot_x = min(90 / 360 * 2. * np.pi, rot_x)\n rot_y = min(90 / 360 * 2. * np.pi, rot_y)\n rot_z = min(90 / 360 * 2. * np.pi, rot_z)\n\n coords = np.array(final_patch_size)\n final_shape = np.copy(coords)\n if len(coords) == 3:\n final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, rot_x, 0, 0)), final_shape)), 0)\n final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, 0, rot_y, 0)), final_shape)), 0)\n final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, 0, 0, rot_z)), final_shape)), 0)\n elif len(coords) == 2:\n final_shape = np.max(np.vstack((np.abs(rotate_coords_2d(coords, rot_x)), final_shape)), 0)\n final_shape /= min(scale_range)\n return final_shape.astype(int)\n\n\ndef get_default_augmentation(dataloader_train, dataloader_val, patch_size, params=None,\n border_val_seg=-1, pin_memory=True,\n seeds_train=None, seeds_val=None, regions=None):\n \"\"\"get default augmentation\"\"\"\n assert params.get('mirror') is None, \"old version of params, use new keyword do_mirror\"\n if params is None:\n params = default_3D_augmentation_params\n tr_transforms = []\n\n if params.get(\"selected_data_channels\") is not None:\n tr_transforms.append(DataChannelSelectionTransform(params.get(\"selected_data_channels\")))\n\n if params.get(\"selected_seg_channels\") is not None:\n tr_transforms.append(SegChannelSelectionTransform(params.get(\"selected_seg_channels\")))\n\n # don't do color augmentations while in 2d mode with 3d data because the color channel is overloaded!!\n if params.get(\"dummy_2D\") is not None and params.get(\"dummy_2D\"):\n tr_transforms.append(Convert3DTo2DTransform())\n patch_size_spatial = patch_size[1:]\n else:\n patch_size_spatial = patch_size\n\n tr_transforms.append(SpatialTransform(\n patch_size_spatial, patch_center_dist_from_border=None, do_elastic_deform=params.get(\"do_elastic\"),\n alpha=params.get(\"elastic_deform_alpha\"), sigma=params.get(\"elastic_deform_sigma\"),\n do_rotation=params.get(\"do_rotation\"), angle_x=params.get(\"rotation_x\"), angle_y=params.get(\"rotation_y\"),\n angle_z=params.get(\"rotation_z\"), do_scale=params.get(\"do_scaling\"), scale=params.get(\"scale_range\"),\n border_mode_data=params.get(\"border_mode_data\"), border_cval_data=0, order_data=3, border_mode_seg=\"constant\",\n border_cval_seg=border_val_seg,\n order_seg=1, random_crop=params.get(\"random_crop\"), p_el_per_sample=params.get(\"p_eldef\"),\n p_scale_per_sample=params.get(\"p_scale\"), p_rot_per_sample=params.get(\"p_rot\"),\n independent_scale_for_each_axis=params.get(\"independent_scale_factor_for_each_axis\")\n ))\n if params.get(\"dummy_2D\") is not None and params.get(\"dummy_2D\"):\n tr_transforms.append(Convert2DTo3DTransform())\n\n if params.get(\"do_gamma\"):\n tr_transforms.append(\n GammaTransform(params.get(\"gamma_range\"), False, True, retain_stats=params.get(\"gamma_retain_stats\"),\n p_per_sample=params[\"p_gamma\"]))\n\n if params.get(\"do_mirror\"):\n tr_transforms.append(MirrorTransform(params.get(\"mirror_axes\")))\n\n if params.get(\"mask_was_used_for_normalization\") is not None:\n mask_was_used_for_normalization = params.get(\"mask_was_used_for_normalization\")\n tr_transforms.append(MaskTransform(mask_was_used_for_normalization, mask_idx_in_seg=0, set_outside_to=0))\n\n tr_transforms.append(RemoveLabelTransform(-1, 0))\n\n if params.get(\"move_last_seg_chanel_to_data\") is not None and params.get(\"move_last_seg_chanel_to_data\"):\n tr_transforms.append(MoveSegAsOneHotToData(1, params.get(\"all_segmentation_labels\"), 'seg', 'data'))\n if params.get(\"cascade_do_cascade_augmentations\") and not None and params.get(\n \"cascade_do_cascade_augmentations\"):\n tr_transforms.append(ApplyRandomBinaryOperatorTransform(\n channel_idx=list(range(-len(params.get(\"all_segmentation_labels\")), 0)),\n p_per_sample=params.get(\"cascade_random_binary_transform_p\"),\n key=\"data\",\n strel_size=params.get(\"cascade_random_binary_transform_size\")))\n tr_transforms.append(RemoveRandomConnectedComponentFromOneHotEncodingTransform(\n channel_idx=list(range(-len(params.get(\"all_segmentation_labels\")), 0)),\n key=\"data\",\n p_per_sample=params.get(\"cascade_remove_conn_comp_p\"),\n fill_with_other_class_p=params.get(\"cascade_remove_conn_comp_max_size_percent_threshold\"),\n dont_do_if_covers_more_than_X_percent=params.get(\"cascade_remove_conn_comp_fill_with_other_class_p\")))\n\n tr_transforms.append(RenameTransform('seg', 'target', True))\n\n if regions is not None:\n tr_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))\n\n tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))\n\n tr_transforms = Compose(tr_transforms)\n\n batchgenerator_train = MultiThreadedAugmenter(dataloader_train, tr_transforms, params.get('num_threads'),\n params.get(\"num_cached_per_thread\"), seeds=seeds_train,\n pin_memory=pin_memory)\n\n val_transforms = []\n val_transforms.append(RemoveLabelTransform(-1, 0))\n if params.get(\"selected_data_channels\") is not None:\n val_transforms.append(DataChannelSelectionTransform(params.get(\"selected_data_channels\")))\n if params.get(\"selected_seg_channels\") is not None:\n val_transforms.append(SegChannelSelectionTransform(params.get(\"selected_seg_channels\")))\n\n if params.get(\"move_last_seg_chanel_to_data\") is not None and params.get(\"move_last_seg_chanel_to_data\"):\n val_transforms.append(MoveSegAsOneHotToData(1, params.get(\"all_segmentation_labels\"), 'seg', 'data'))\n\n val_transforms.append(RenameTransform('seg', 'target', True))\n\n if regions is not None:\n val_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))\n\n val_transforms.append(NumpyToTensor(['data', 'target'], 'float'))\n val_transforms = Compose(val_transforms)\n\n batchgenerator_val = MultiThreadedAugmenter(dataloader_val, val_transforms, max(params.get('num_threads') // 2, 1),\n params.get(\"num_cached_per_thread\"), seeds=seeds_val,\n pin_memory=pin_memory)\n return batchgenerator_train, batchgenerator_val\n","sub_path":"research/cv/nnUNet/src/nnunet/training/data_augmentation/default_data_augmentation.py","file_name":"default_data_augmentation.py","file_ext":"py","file_size_in_byte":11505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"505870445","text":"# -*- coding: utf-8 -*-\n# Copyright 2018 Mobicage NV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @@license_version:1.3@@\n\nimport datetime\nimport logging\n\nimport cloudstorage\nfrom framework.utils import put_in_chunks, get_epoch_from_datetime\nfrom plugins.trash_calendar.consts import TrashCollector, TrashActivity, \\\n DEFAULT_COUNTRY\nfrom plugins.trash_calendar.models.common import Street, UnconnectedStreet, Collection\nfrom plugins.trash_calendar.utils import read_csv\n\n\ndef import_streets(collection_key, country, postal_code, gcs_path):\n assert ('-' in collection_key, 'Invalid collection key %s' % collection_key)\n streets = _read_csv(gcs_path)\n logging.info(\"import_streets for %s has %s streets\", collection_key, len(streets))\n\n current_streets = {}\n for s in Street.list_by_postal_code(TrashCollector.IDM, country, postal_code):\n for a in s.aliases:\n current_streets[a] = s.key\n\n to_put = []\n for s in streets:\n if s['Straat'] in current_streets:\n continue\n us_key = UnconnectedStreet.create_key(TrashCollector.IDM, collection_key, s['Straat'])\n us = us_key.get()\n if not us:\n us = UnconnectedStreet(key=us_key)\n us.manual_conversion = False\n us.country_code = DEFAULT_COUNTRY\n us.collection_key = collection_key\n us.name = s['Straat']\n us.postal_code = s['Postcode']\n us.city = s['Gemeente']\n to_put.append(us)\n\n if to_put:\n logging.info('put %s items', len(to_put))\n put_in_chunks(to_put)\n\n\ndef import_collection_data(country, collection_key, gcs_path):\n collections = _read_csv(gcs_path)\n logging.info(\"import_collection_data for %s has %s collections\", collection_key, len(collections))\n\n to_put = []\n for c in collections:\n activities = []\n for k in c:\n activity = _get_activity_name(k)\n if not activity:\n continue\n if c[k] != u'x':\n continue\n activities.append(activity)\n\n if not activities:\n continue\n\n _date = c['Datum'].split('/')\n _year = int(_date[2])\n _month = int(_date[1])\n _day = int(_date[0])\n epoch = get_epoch_from_datetime(datetime.date(_year, _month, _day))\n to_put.append(Collection(\n key=Collection.create_key(TrashCollector.IDM, country, epoch, collection_key),\n activities=activities,\n epoch=epoch,\n year=_year,\n month=_month,\n day=_day\n ))\n\n if to_put:\n logging.info('put %s items', len(to_put))\n put_in_chunks(to_put)\n\n\ndef _read_csv(gcs_path):\n with cloudstorage.open(gcs_path, 'r') as f:\n return read_csv(f.read(), delimiter=';', quotechar='\"')\n\n\ndef _get_activity_name(name):\n if name in (u'Dag', u'Datum', u'WE - Feestdag'):\n return None\n mapping = {\n 'PMD': TrashActivity.PLASTIC_METAL_CARTONS,\n 'RESTAFVAL': TrashActivity.REST,\n 'P-K': TrashActivity.PAPER_CARDBOARD,\n 'TEXTIEL': TrashActivity.TEXTILE,\n 'SNOEIAFVAL': TrashActivity.PRUNING_WASTE,\n 'KERSTBOOM': TrashActivity.CHRISTMAS_TREE,\n 'GFT': TrashActivity.VEGETABLE_FRUIT_GARDEN_WASTE,\n }\n if name not in mapping:\n raise Exception('Failed to get _get_activity_name for %s' % name)\n return mapping[name]\n","sub_path":"plugins/trash_calendar/bizz/idm.py","file_name":"idm.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"143990141","text":"def histogram(fdata, **kwargs):\n '''\n Plots the Histrogram for a given Frequency Distribution\n \n Parameters:\n * fdata: dict containing items as key and their frequency as value\n * kwargs: customizable parameters\n * design [default: ===] - design of the bar\n * label [default: True] - weather to show frequency after bar or not\n * align [default:] - weather to align label or not\n * not align will be False if label is False\n \n >>> histogram({2: 6, 9: 4, 4: 1, 3: 1, 1: 2})\n 2\t|============ 6\n 9\t|======== 4\n 4\t|== 1\n 3\t|== 1\n 1\t|==== 2\n \n >>> histogram({2: 6, 9: 4, 4: 1, 3: 1, 1: 2}, label=False)\n 2\t|============ \n 9\t|======== \n 4\t|== \n 3\t|== \n 1\t|==== \n \n >>> histogram({2: 6, 9: 4, 4: 1, 3: 1, 1: 2}, align=True)\n 2\t|============ 6\n 9\t|======== 4\n 4\t|== 1\n 3\t|== 1\n 1\t|==== 2\n\n >>> histogram({2: 6, 9: 4, 4: 1, 3: 1, 1: 2}, align=True, design='::::')\n 2\t|:::::::::::::::::::::::: 6\n 9\t|:::::::::::::::: 4\n 4\t|:::: 1\n 3\t|:::: 1\n 1\t|:::::::: 2\n '''\n \n design=kwargs.get('design','==') \n label=kwargs.get('label',True) \n \n align=kwargs.get('align',False) if label else False\n\n hi=max(fdata.values())*len(design) if align else None\n \n \n for key,value in fdata.items():\n lbl = value if label else '' \n bar= (design * value)\n if hi!=None :\n bar=bar.ljust(hi)\n \n print('{}\\t|{} {}'.format(key,bar,lbl))\n\n\n#help(histogram)\nhistogram({2: 6, 9: 4, 4: 1, 3: 1, 1: 2})","sub_path":"basic-demos/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"421204233","text":"from summit.apps.projects.models import Project\nfrom summit.apps.projects.models import Location\nfrom summit.libs.auth.models import CESU\nfrom summit.libs.auth.models import Organization\nfrom summit.libs.auth.models import UserProfile\nimport csv\nfrom datetime import datetime\nfrom decimal import Decimal\n\nfrom django.core.exceptions import ObjectDoesNotExist\n\n# only non user people are deleted because \n# deleting profiles that have users attached breaks the system\nnon_user_people = UserProfile.objects.exclude(user__isnull=False)\nnon_user_people.delete()\nfor user in UserProfile.objects.all():\n user.cesu = CESU.objects.get(id=1)\nProject.objects.all().delete()\nLocation.objects.all().delete()\nOrganization.objects.all().delete()\n\n# read line\nwith open('CPCESU_Summit-Data-Import_FINAL.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile, quotechar='\"')\n for row in reader:\n is_before_22217 = False\n if (row['Start Date'] != \"\"):\n is_before_22217 = datetime.strptime(row['Start Date'], '%m/%d/%y') < datetime(2017, 2, 22)\n fed_agency = None\n project_partner = None\n project_location = None\n ppi = None\n techrep = None\n pm = None\n if (row['Federal Agency'] != \"\"):\n fed_agency = Organization.objects.get_or_create(\n name = row['Federal Agency'],\n type = 'Federal Agency'\n )[0]\n if (row['Partner'] != \"\"):\n project_partner = Organization.objects.get_or_create(\n name = row['Partner'],\n type = 'Partner'\n )[0]\n if (row['Partner Principal Investigator First Name'] != \"\"):\n ppi = UserProfile.objects.get_or_create(\n first_name = row['Partner Principal Investigator First Name'],\n last_name = row['Partner Principal Investigator Last Name'],\n assigned_group = project_partner\n )[0]\n if (row['Federal Tech Rep First Name'] != \"\"):\n techrep = UserProfile.objects.get_or_create(\n first_name = row['Federal Tech Rep First Name'],\n last_name = row['Federal Tech Rep Last Name'],\n assigned_group = fed_agency\n )[0]\n if (row['Federal Project Lead First Name'] != \"\"):\n pm = UserProfile.objects.get_or_create(\n first_name = row['Federal Project Lead First Name'],\n last_name = row['Federal Project Lead Last Name'], \n assigned_group = fed_agency\n )[0]\n new_proj = Project.objects.create(\n cesu_unit = CESU.objects.get(id=1),\n federal_agency = fed_agency,\n partner = project_partner,\n fiscal_year = row['Fiscal Year'],\n p_num = row['Award Number'],\n local_num = row['Local Number'],\n #location = project_location,\n project_title = row['Project Title'],\n type = row['Type'],\n discipline = row['Discipline'],\n \n budget = float(row['Initial Amount'].strip(\"$\").replace(',','')) if row['Initial Amount'] != \"\" else 0,\n added_amount = float(row['Added Amount'].strip(\"$\").replace(',','')) if row['Added Amount'] != \"\" else 0,\n total_amount = float(row['Total Amount'].strip(\"$\").replace(',','')) if row['Total Amount'] != \"\" else 0,\n pp_i = ppi,\n project_manager = pm,\n tech_rep = techrep,\n num_of_students = 0 if row['Students Involved'] == \"No\" else 1,\n sensitive = False if row['Sensitive Data'] == \"No\" else True,\n description = row['Description'],\n notes = row['Notes'],\n init_start_date = datetime.strptime(row['Received Date'], '%m/%d/%y') if row['Received Date'] != \"\" else None,\n reviewed = datetime.strptime(row['Reviewed Date'], '%m/%d/%y') if row['Reviewed Date'] != \"\" else None,\n task_agreement_start_date = datetime.strptime(row['Approved Date'], '%m/%d/%y') if row['Approved Date'] != \"\" else None,\n tent_start_date = datetime.strptime(row['Start Date'], '%m/%d/%y') if row['Start Date'] != \"\" else None,\n tent_end_date = datetime.strptime(row['End Date'], '%m/%d/%y') if row['End Date'] != \"\" else None,\n status = \"CLOSED\" if is_before_22217 else \"AWARDED\",\n award_office = row['Awarding Office']\n )\n if (row['Location'] != \"\"):\n location_arr = (row['Location']).split(\"| \")\n print(location_arr)\n for entry in location_arr:\n new_location = Location.objects.get_or_create(\n abbrv = entry.strip()\n )[0]\n new_proj.location.add(new_location)","sub_path":"add-files.py","file_name":"add-files.py","file_ext":"py","file_size_in_byte":4330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"604278345","text":"#\n# @lc app=leetcode id=92 lang=python3\n#\n# [92] Reverse Linked List II\n#\n# https://leetcode.com/problems/reverse-linked-list-ii/description/\n#\n# algorithms\n# Medium (34.66%)\n# Likes: 1251\n# Dislikes: 92\n# Total Accepted: 198.8K\n# Total Submissions: 565K\n# Testcase Example: '[1,2,3,4,5]\\n2\\n4'\n#\n# Reverse a linked list from position m to n. Do it in one-pass.\n# \n# Note: 1 ≤ m ≤ n ≤ length of list.\n# \n# Example:\n# \n# \n# Input: 1->2->3->4->5->NULL, m = 2, n = 4\n# Output: 1->4->3->2->5->NULL\n# \n# \n#\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def reverseBetween(self, head: ListNode, m: int, n: int) -> ListNode:\n dummy = cur = ListNode(None)\n cur.next = head\n \n for _ in range(m - 1):\n cur = cur.next\n # the node before the interval\n before = cur\n # nodes in the interval\n nodes = []\n for i in range(n - m + 1):\n cur = cur.next\n nodes.append(cur)\n # the node after the interval\n after = cur.next\n # reconnect\n for n in nodes[::-1]:\n before.next = n\n before = n\n before.next = after\n\n return dummy.next \n\n def reverseBetween(self, head: ListNode, m: int, n: int) -> ListNode:\n dummy = cur = ListNode(None)\n cur.next = head\n for _ in range(m - 1):\n cur = cur.next\n # the node before the interval\n before = cur\n # reverse nodes between [m, n]\n nodeBefore = None\n cur = cur.next\n for _ in range(n - m + 1):\n nodeAfter = cur.next\n cur.next = nodeBefore\n nodeBefore = cur\n cur = nodeAfter\n # connect\n before.next.next = cur\n before.next = nodeBefore\n\n return dummy.next\n\n","sub_path":"92.reverse-linked-list-ii.py","file_name":"92.reverse-linked-list-ii.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"436605245","text":"'''\n--- Day 4: High-Entropy Passphrases ---\n\n#PART 1\nA new system policy has been put in place that requires all accounts to use a passphrase instead of simply a password. A passphrase consists of a series of words (lowercase letters) separated by spaces.\n\nTo ensure security, a valid passphrase must contain no duplicate words.\n\nFor example:\n\naa bb cc dd ee is valid.\naa bb cc dd aa is not valid - the word aa appears more than once.\naa bb cc dd aaa is valid - aa and aaa count as different words.\nThe system's full passphrase list is available as your puzzle input. How many passphrases are valid?\n\nYour puzzle answer was 477.\n\n _ _\n ((\\o/))\n .-----//^\\\\-----.\n | /`| |`\\ |\n | | | |\n | | | |\n | | | |\n '------===------'\n\n'''\n\n\n\nimport re\nf = open('input.txt')\nspreadsheet = []\nfor line in f:\n\tspreadsheet.append([x for x in line.split()])\n\nnumRows = len(spreadsheet)\n# [x//y for x in line for y in line if (x != y and not x%y)]\nnumValid = 0\n'''\nfor phrase in spreadsheet:\n\tinvalids = []\n\tinvalids = [x for x in phrase for y in phrase if(x=y)]\n\tprint(invalids)\n\tif len(invalids) == 0:\n\t\tnumValid += 1\n\tinvalids.clear()\n'''\nfor phrase in spreadsheet:\n\tinvalids = 0\n\tfor i in range(0, len(phrase)):\n\t\tfor j in range(i+1, len(phrase)):\n\t\t\tif phrase[i] == phrase[j]:\n\t\t\t\tif i != j:\n\t\t\t\t\tinvalids += 1\n\tif invalids == 0:\n\t\t numValid +=1\t\t\t\t\nprint(numValid)\n\n\n'''\n--- Part Two ---\n\nFor added security, yet another system policy has been put in place. Now, a valid passphrase must \ncontain no two words that are anagrams of each other - that is, a passphrase is invalid \nif any word's letters can be rearranged to form any other word in the passphrase.\n\nFor example:\n\nabcde fghij is a valid passphrase.\nabcde xyz ecdab is not valid - the letters from the third word can be rearranged to form the first word.\na ab abc abd abf abj is a valid passphrase, because all letters need to be used when forming another word.\niiii oiii ooii oooi oooo is valid.\noiii ioii iioi iiio is not valid - any of these words can be rearranged to form any other word.\nUnder this new system policy, how many passphrases are valid?\n''' \nnumValid = 0\nfor phrase in spreadsheet:\n\tinvalids = 0\n\tfor i in range(0, len(phrase)):\n\t\tfor j in range(0, len(phrase)):\n\t\t\tif sorted(phrase[i]) == sorted(phrase[j]):\n\t\t\t\tif i != j:\n\t\t\t\t\tinvalids +=1\n\tif invalids == 0:\n\t\tnumValid += 1\nprint(numValid)\n\n\n\n\n\n\n\n","sub_path":"Day4/day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"28943433","text":"import cv2\n\n#Create object to read images from camera 0\ncam = cv2.VideoCapture(0)\n\nwhile True:\n #Get image from webcam and convert to greyscale\n ret, img = cam.read()\n\n #Display colour image with detected features\n cv2.imshow(\"Camera\", img)\n\n #Sleep infinite loop for ~10ms\n #Exit if user presses \n if cv2.waitKey(10) == 27:\n break\n","sub_path":"test_webcam.py","file_name":"test_webcam.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"262730995","text":"import heapq\n\ndef dijkstra(self, environment):\n\n \"\"\"\n Performs one iteration of Dijkstra's algorithm based on agent's current state.\n Args:\n environment: The current environment\n \"\"\"\n\n # Clean the logs\n self.logs = []\n\n # First iteration\n if self.waitList == None:\n sourceCell = environment.grid[self.location.x][self.location.y]\n self.waitList = [(0, sourceCell)]\n self.distances[sourceCell] = 0\n\n # Exhausted all possible moves\n if len(self.waitList) == 0:\n return\n\n # Pop the minimum element and log the changes\n minElement = heapq.heappop(self.waitList)\n nextCell = minElement[1]\n self.logs.append([self, nextCell, 'visited'])\n\n\n # Iterate over valid neighbours\n for nx, ny in nextCell.location.neighbours:\n\n if not self.isValidMove(environment, nextCell, nx, ny, False):\n continue\n\n # Check if a better path is possible\n neighbour = environment.grid[nx][ny]\n newDistance = self.distances[nextCell] + environment.distance(nextCell, neighbour)\n if neighbour in self.distances and self.distances[neighbour] <= newDistance:\n continue\n\n # Add neighbour to the heap and log the changes\n heapq.heappush(self.waitList, (newDistance, neighbour))\n self.path[neighbour] = nextCell\n self.distances[neighbour] = newDistance\n self.logs.append([self, neighbour, 'waitList'])\n","sub_path":"src/actions/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"380708836","text":"import os\r\nimport sys\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tensorflow as tf\r\nfrom utils_asc_3class import *\r\nimport sklearn.metrics as metrics\r\nfrom plots import plot_confusion_matrix\r\n\r\n#=========================================================================================================#\r\ndef evaluate_model(interpreter, test_images, test_labels, num_class, is_eval=False):\r\n input_index = interpreter.get_input_details()[0][\"index\"]\r\n output_index = interpreter.get_output_details()[0][\"index\"]\r\n \r\n prediction_digits = []\r\n pred_output_all = np.empty([1, num_class])\r\n for test_image in test_images:\r\n test_image = np.expand_dims(test_image, axis=0).astype(np.float32)\r\n interpreter.set_tensor(input_index, test_image)\r\n \r\n interpreter.invoke()\r\n \r\n output = interpreter.get_tensor(output_index)\r\n pred_output = output[0]\r\n pred_output.reshape([1, num_class])\r\n pred_output_all = np.vstack((pred_output_all, pred_output))\r\n digit = np.argmax(output[0])\r\n prediction_digits.append(digit)\r\n \r\n pred_output_all = pred_output_all[1:,:]\r\n \r\n if is_eval:\r\n return pred_output_all, prediction_digits\r\n else:\r\n accurate_count = 0\r\n for index in range(len(prediction_digits)):\r\n if prediction_digits[index] == test_labels[index]:\r\n accurate_count += 1\r\n accuracy = accurate_count * 1.0 / len(prediction_digits)\r\n return accuracy, pred_output_all, prediction_digits\r\n\r\n#=========================================================================================================#\r\nis_eval = False\r\n\r\nif not is_eval:\r\n data_path = '/home/soonshin/sss/dataset/ASC/2020task1b/TAU-urban-acoustic-scenes-2020-3class-development/'\r\n val_csv = data_path + 'evaluation_setup/fold1_evaluate.csv'\r\n feat_path = 'features/valid_asc_3class_48k_logmel128_norm'\r\n model_path = sys.argv[1]\r\n csv_path = sys.argv[2].replace('.csv','-asc-3class.csv')\r\n \r\nelse:\r\n data_path = '/home/soonshin/sss/dataset/ASC/2020task1b/TAU-urban-acoustic-scenes-2020-3class-development/'\r\n val_csv = data_path + 'evaluation_setup/fold1_evaluate.csv'\r\n feat_path = 'features/valid_asc_3class_48k_logmel128_norm'\r\n model_path = sys.argv[1]\r\n csv_path = sys.argv[2].replace('.csv','-eval.csv')\r\n\r\nnum_freq_bin = 128\r\nnum_classes = 3\r\n\r\nprint (model_path)\r\nprint (csv_path)\r\n\r\n#=========================================================================================================#\r\nif not is_eval:\r\n data_val, y_val = load_data_2020(feat_path, val_csv, num_freq_bin, 'logmel')\r\n #data_deltas_val = deltas(data_val)\r\n #data_deltas_deltas_val = deltas(data_deltas_val)\r\n #data_val = np.concatenate((data_val[:,:,4:-4,:], data_deltas_val[:,:,2:-2,:], data_deltas_deltas_val), axis=-1)\r\n y_val_onehot = tf.keras.utils.to_categorical(y_val, num_classes)\r\n print(data_val.shape)\r\n print(y_val.shape)\r\n \r\n dev_test_df = pd.read_csv(val_csv, sep='\\t', encoding='ASCII')\r\n wav_paths = dev_test_df['filename'].tolist()\r\n class_list = np.unique(dev_test_df['scene_label'])\r\n \r\nelse:\r\n data_val = load_data_2020_evaluate(feat_path, val_csv, num_freq_bin, 'logmel')\r\n #data_deltas_val = deltas(data_val)\r\n #data_deltas_deltas_val = deltas(data_deltas_val)\r\n #data_val = np.concatenate((data_val[:,:,4:-4,:], data_deltas_val[:,:,2:-2,:], data_deltas_deltas_val), axis=-1)\r\n print(data_val.shape)\r\n \r\n dev_test_df = pd.read_csv(val_csv, sep='\\t', encoding='ASCII')\r\n wav_paths = dev_test_df['filename'].tolist()\r\n \r\n for idx, elem in enumerate(wav_paths):\r\n wav_paths[idx] = wav_paths[idx].split('/')[-1]\r\n \r\n#=========================================================================================================#\r\ninterpreter_quant = tf.lite.Interpreter(model_path=model_path)\r\ninterpreter_quant.allocate_tensors()\r\n\r\n#=========================================================================================================#\r\n\r\nif not is_eval:\r\n overall_acc, preds, preds_class_idx = evaluate_model(interpreter_quant, \r\n data_val, \r\n y_val, \r\n num_class=num_classes,\r\n is_eval=False)\r\n\r\n over_loss = metrics.log_loss(y_val_onehot, preds)\r\n print(\"\\nval acc: \", \"{0:.4f}\".format(overall_acc))\r\n print(\"val log loss: \", \"{0:.4f}\\n\".format(over_loss))\r\n\r\n y_pred_val = np.argmax(preds, axis=1)\r\n conf_matrix = metrics.confusion_matrix(y_val, y_pred_val)\r\n plot_confusion_matrix(y_val, y_pred_val, class_list, normalize=True, title=None, png_name=csv_path.replace('.csv','.png'))\r\n \r\n overall_accuracy = metrics.accuracy_score(y_val, y_pred_val)\r\n precision_mat = metrics.precision_score(y_val, y_pred_val, average=None, zero_division='warn')\r\n recall_mat = metrics.recall_score(y_val, y_pred_val, average=None, zero_division='warn')\r\n f1_score_mat = metrics.f1_score(y_val, y_pred_val, average=None, zero_division='warn')\r\n precision = metrics.precision_score(y_val, y_pred_val, average='weighted', zero_division='warn')\r\n recall = metrics.recall_score(y_val, y_pred_val, average='weighted', zero_division='warn')\r\n f1_score = metrics.f1_score(y_val, y_pred_val, average='weighted', zero_division='warn')\r\n\r\n print(metrics.classification_report(y_val, y_pred_val))\r\n print(metrics.confusion_matrix(y_val, y_pred_val))\r\n\r\n print(\"\\nper-class precision: \", precision_mat)\r\n print(\"\\nper-class recall: \", recall_mat)\r\n print(\"\\nper-class f1-score: \", f1_score_mat)\r\n\r\n print(\"\\naccuracy :\", overall_accuracy)\r\n print(\"precision :\", precision)\r\n print(\"recall :\", recall)\r\n print(\"f1 score :\", f1_score)\r\n\r\nelse:\r\n preds, preds_class_idx = evaluate_model(interpreter_quant, \r\n data_val, \r\n test_labels=None, \r\n num_class=num_classes,\r\n is_eval=True)\r\n y_pred_val = np.argmax(preds, axis=1)\r\n\r\n#=========================================================================================================#\r\n\r\nscene_map_str = \"\"\"\r\nindoor 0\r\noutdoor 1 \r\ntransportation 2\r\n\"\"\"\r\n\r\nscene_index_map={}\r\nfor line in scene_map_str.strip().split('\\n'):\r\n ch, index = line.split()\r\n scene_index_map[int(index)] = ch\r\nlabels = [str(scene_index_map[c]) for c in y_pred_val]\r\nfilename = [str(a[:]) for a in wav_paths]\r\nleft = {'filename': filename, 'scene_label': labels}\r\nleft_df = pd.DataFrame(left)\r\nright_df = pd.DataFrame(preds, columns = ['indoor',\r\n 'outdoor',\r\n 'transportation'])\r\nmerge = pd.concat([left_df, right_df], axis=1, sort=False)\r\nmerge.to_csv(csv_path, sep = '\\t', index=False)\r\n","sub_path":"3class/eval_quantize_asc_3class.py","file_name":"eval_quantize_asc_3class.py","file_ext":"py","file_size_in_byte":7088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"588060295","text":"import smtplib\nfrom email.mime.text import MIMEText\n\n\ndef sentemail(sender_mail,sender_name,sender_msg):\n host = 'smtp.163.com'\n port = 465 # 设置发件服务器端口号。\n sender = '15xxxxxx@163.com' # 设置发件邮箱,一定要自己注册的邮箱\n pwd = 'xxxxx' # 授权码\n receiver = 'Theo_hui@163.com' # 设置邮件接收人\n\n\n #构建body\n body=\"

\"+sender_mail+\"

\"+\"

\"+sender_name+\" 联系您了\"+\"

\"+\"

\"+sender_msg+\"

\"\n\n\n msg = MIMEText(body, 'html') # 设置正文为符合邮件格式的HTML内容\n msg['subject'] = '你有新的一条消息' # 设置邮件标题\n msg['from'] = sender# 设置发送人\n msg['to'] = receiver# 设置接收人\n try:\n s = smtplib.SMTP_SSL(host, port)# 注意!如果是使用SSL端口,这里就要改为SMTP_SSL\n s.login(sender, pwd)# 登陆邮箱\n s.sendmail(sender, receiver, msg.as_string())# 发送邮件!\n except smtplib.SMTPException:\n print(\"邮件发送失败\")","sub_path":"myblog/resume/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"291852328","text":"from tkinter import *\r\nfrom threading import Thread\r\nimport threading\r\nimport os\r\nimport time \r\nfrom tkinter import messagebox\r\nimport tkinter.scrolledtext as tkscrolled\r\nfrom WiFiClient import NodeMCU\r\n\r\nroot=Tk()\r\nroot.title('Proyecto II')\r\nroot.minsize(800,400)\r\nroot.resizable(width=NO,height=NO)\r\n\r\nC_root=Canvas(root, width=800,height=600)\r\nC_root.place(x=0,y=0)\r\n\r\nL_Titulo = Label(C_root,text=\"Mensajes Enviados\",font=('Arial',14),fg='black')\r\nL_Titulo.place(x=100,y=5)\r\n\r\nL_Titulo = Label(C_root,text=\"Respuesta Mensaje\",font=('Arial',14),fg='black')\r\nL_Titulo.place(x=490,y=5)\r\n\r\nSentCarScrolledTxt = tkscrolled.ScrolledText(C_root, height=10, width=45)\r\nSentCarScrolledTxt.place(x=10,y=40)\r\n\r\nRevCarScrolledTxt = tkscrolled.ScrolledText(C_root, height=10, width=45)\r\nRevCarScrolledTxt.place(x=400,y=40)\r\n\r\nmyCar = NodeMCU()\r\nmyCar.start()\r\n\r\n\r\ndef get_log():\r\n indice = 0\r\n # Variable del carro que mantiene el hilo de escribir.\r\n while(myCar.loop):\r\n while(indice < len(myCar.log)):\r\n mnsSend = \"[{0}] cmd: {1}\\n\".format(indice,myCar.log[indice][0])\r\n SentCarScrolledTxt.insert(END,mnsSend)\r\n SentCarScrolledTxt.see(\"end\")\r\n\r\n mnsRecv = \"[{0}] result: {1}\\n\".format(indice,myCar.log[indice][1])\r\n RevCarScrolledTxt.insert(END, mnsRecv)\r\n RevCarScrolledTxt.see('end')\r\n\r\n indice+=1\r\n time.sleep(0.200)\r\n \r\np = Thread(target=get_log)\r\np.start()\r\n \r\nL_Titulo = Label(C_root,text=\"Comandos:\",font=('Arial',14),fg='black')\r\nL_Titulo.place(x=330,y=250)\r\n\r\n\r\nL_Titulo = Label(C_root,text=\"Sense; \\t \\t \\t Infinite;\",font=('Arial',14),fg='black')\r\nL_Titulo.place(x=220,y=280)\r\n\r\nL_Titulo = Label(C_root,text=\"ZigZag; \\t \\t \\t Indeciso;\",font=('Arial',14),fg='black')\r\nL_Titulo.place(x=220,y=310)\r\n\r\nL_Titulo = Label(C_root,text=\"Parpadeo; \\t \\t girarFacil;\",font=('Arial',14),fg='black')\r\nL_Titulo.place(x=220,y=340)\r\n\r\n\r\nL_Titulo = Label(C_root,text=\"Mensaje:\",font=('Arial',14),fg='black')\r\nL_Titulo.place(x=100,y=210)\r\n\r\nE_Command = Entry(C_root,width=30,font=('Arial',14))\r\nE_Command.place(x=200,y=210)\r\n\r\n\r\ndef send (event):\r\n mns = str(E_Command.get())\r\n if(len(mns)>0 and mns[-1] == \";\"):\r\n E_Command.delete(0, 'end')\r\n myCar.send(mns)\r\n else:\r\n messagebox.showwarning(\"Error del mensaje\", \"Mensaje sin caracter de finalización (';')\") \r\n\r\n\r\ndef sendShowID():\r\n mns = str(E_Command.get())\r\n if(len(mns)>0 and mns[-1] == \";\"):\r\n E_Command.delete(0, 'end')\r\n mnsID = myCar.send(mns)\r\n messagebox.showinfo(\"Mensaje pendiente\", \"Intentando enviar mensaje, ID obtenido: {0}\\n\\\r\nLa respuesta definitiva se obtine en un máximo de {1}s\".format(mnsID, myCar.timeoutLimit))\r\n \r\n else:\r\n messagebox.showwarning(\"Error del mensaje\", \"Mensaje sin caracter de finalización (';')\")\r\n\r\ndef read():\r\n mnsID = str(E_read.get())\r\n if(len(mnsID)>0 and \":\" in mnsID):\r\n mns = myCar.readById(mnsID)\r\n if(mns != \"\"):\r\n messagebox.showinfo(\"Resultado Obtenido\", \"El mensaje con ID:{0}, obtuvo de respuesta:\\n{1}\".format(mnsID, mns))\r\n E_read.delete(0, 'end')\r\n else:\r\n messagebox.showerror(\"Error de ID\", \"No se obtuvo respuesta\\n\\\r\nEl mensaje no ha sido procesado o el ID es invalido\\n\\\r\nAsegurese que el ID: {0} sea correcto\".format(mnsID))\r\n\r\n else:\r\n messagebox.showwarning(\"Error en formato\", \"Recuerde ingresar el separador (':')\")\r\n\r\nroot.bind('', send)\r\n\r\nBtn_ConnectControl = Button(C_root,text='Enviar',command=lambda:send(None),fg='white',bg='black', font=('Arial',12))\r\nBtn_ConnectControl.place(x=550,y=210)\r\n\r\nroot.mainloop()\r\n","sub_path":"Interfaz.py","file_name":"Interfaz.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"400246858","text":"import unittest\nfrom contenedores import *\ndef vol_calcmock(input1,input2,input3):\n return input1*input2*input3\n\nclass test_contenedores(unittest.TestCase):\n def test_volCalc(self):\n test_cases = [\n {\n 'name': 'PRUEBA DE CONTENEDORES 3 x 3 x 3',\n 'input': ([3.0,3.0,3.0]),\n 'expected_out': 27.0\n },\n {\n 'name': 'PRUEBA DE CONTENEDORES 4 x 4 x 4',\n 'input': ([4.0,4.0,4.0]),\n 'expected_out': 64.0\n },\n {\n 'name': 'PRUEBA DE CONTENEDORES 5 x 5 x 5',\n 'input': ([5.0,5.0,5.0]),\n 'expected_out': 125.0\n },\n {\n 'name': 'PRUEBA DE CONTENEDORES 2 x 2 x 2',\n 'input': ([2.0,2.0,2.0]),\n 'expected_out': 8.0\n },\n {\n 'name': 'PRUEBA DE CONTENEDORES 1 x 1 x 1',\n 'input': [1.0,1.0,1.0],\n 'expected_out': 1.0\n }\n ]\n for i in test_cases:\n actual=vol_calc(i['input'][0],i['input'][1],i['input'][2])\n self.assertEqual(actual,i['expected_out'])\n def test_totalVol(self):\n test_cases = [\n {\n 'name': 'PRUEBA 3 contenedores de Volumen 1, 2 de 4 y 5 de 3',\n 'input': ['3,1','2,4','5,3'],\n 'expected_out':26.0\n },\n {\n 'name': 'PRUEBA 4 contenedores de Volumen 2, 5 de 1 y 5 de 6',\n 'input': ['4,2','5,1','5,6'],\n 'expected_out':43.0\n },\n {\n 'name': 'PRUEBA 2 contenedores de Volumen 2, 4 de 4 y 5 de 5',\n 'input': ['2,2','4,4','5,5'],\n 'expected_out':45.0\n }\n ]\n for i in test_cases:\n actual = total_vol(i['input'])\n self.assertEqual(actual,i['expected_out'])\n\n def test_savingstratUnit(self): #USANDO UN MOCK\n test_cases = [\n {\n 'name': 'cajas de 1, 2 y 3 para un volumen de 100',\n 'input': [[1,2,3],100],\n 'expected':['(3,3)','(2,2)','(1,3)']\n },\n {\n 'name': 'cajas de 2, 4 y 6 para un volumen de 500',\n 'input': [[2,4,6],500],\n 'expected':['(6,2)','(4,1)','(2,1)']\n },\n {\n 'name': 'cajas de 1, 2 y 3 para un volumen de 200',\n 'input': [[1,2,3],200],\n 'expected':['(3,7)','(2,1)','(1,3)']\n }\n ]\n for i in test_cases:\n actual = saving_strat(i['input'][0],i['input'][1],vol_calcmock)\n self.assertEqual(actual,i['expected'])\n \n def test_savingstrat(self): ##INTEGRANDO FUNCIONES A USAR\n test_cases = [\n {\n 'name': 'cajas de 1, 2 y 3 para un volumen de 100',\n 'input': [[1,2,3],100],\n 'expected':['(3,3)','(2,2)','(1,3)']\n },\n {\n 'name': 'cajas de 2, 4 y 6 para un volumen de 500',\n 'input': [[2,4,6],500],\n 'expected':['(6,2)','(4,1)','(2,1)']\n },\n {\n 'name': 'cajas de 1, 2 y 3 para un volumen de 200',\n 'input': [[1,2,3],200],\n 'expected':['(3,7)','(2,1)','(1,3)']\n }\n ]\n for i in test_cases:\n actual = saving_strat(i['input'][0],i['input'][1],vol_calc)\n self.assertEqual(actual,i['expected'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"ene-jun-2020/EmilioBarreraGonzalez/practica5/contenedores_test.py","file_name":"contenedores_test.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"213608781","text":"import os\nimport matplotlib.image as mpimg\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.multiprocessing as mp\nimport numpy as np\nimport torchvision\nfrom torch.utils import data\nfrom torchvision import datasets, models, transforms\nfrom sklearn.model_selection import StratifiedKFold\nimport matplotlib.pyplot as plt\nimport PIL\nfrom PIL import Image\nimport time\nimport math\nimport cv2\n\nfrom parseData import parseData\nfrom efficientnet_pytorch import EfficientNet\n\nfrom visualize_model import visualize_model\nfrom run_model import run_model\nfrom CellDataset import CellDataset\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nparser = argparse.ArgumentParser(description='PyTorch EfficientNet Training')\nparser.add_argument('--data', metavar='DIR', default=\"\",\n help='path to KI-Dataset folder')\nparser.add_argument('-a', '--arch', metavar='ARCH', default='efficientnet-b0',\n help='model architecture (default: efficientnet-b0)')\nparser.add_argument('-j', '--workers', default=1, type=int, metavar='N',\n help='number of data loading workers (default: 1)')\nparser.add_argument('--epochs', default=5, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=64, type=int,\n metavar='N',\n help='mini-batch size (default: 64), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on test set')\nparser.add_argument('-val', '--validate', dest='validate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--feature_extract', dest='feature_extract',\n action='store_true',\n help=\"Train only last layer (otherwise full model)\")\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--image_size', default=32, type=int,\n help='image size')\nparser.add_argument('--advprop', default=False, action='store_true',\n help='use advprop or not')\nparser.add_argument('--upsample', default=False, action='store_true',\n help='upsample, else use class weights')\n\n# Static config\nnum_classes = 4\nclass_names = ['inflammatory', 'lymphocyte', 'fibroblast and endothelial',\n 'epithelial', 'apoptosis / civiatte body']\nshuffle = True\nk = 5 # Cross-validation splits\n\nclass AddGaussianNoise(object):\n def __init__(self, mean=0., std=1.):\n self.std = std\n self.mean = mean\n\n def __call__(self, tensor):\n return tensor + torch.randn(tensor.size()) * self.std + self.mean\n\n def __repr__(self):\n return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)\n\ndef lambdaTransform(image):\n return image * 2.0 - 1.0\n\ndef main():\n args = parser.parse_args()\n\n mp.set_start_method('spawn')\n\n # Normalize using dataset mean + std or advprop settings\n #if args.advprop:\n # normalize = transforms.Lambda(lambdaTransform)\n #else:\n # normalize = transforms.Normalize(mean=[0.72482513, 0.59128926, 0.76370454],\n # std=[0.18745105, 0.2514997, 0.15264913])\n\n image_size = args.image_size\n print('Using image size', image_size)\n\n # Image transforms used for training and validation\n print(image_size+math.floor(0.1*image_size))\n train_tsfm = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize(image_size+math.floor(0.1*image_size), interpolation=PIL.Image.BICUBIC),\n transforms.RandomResizedCrop(image_size),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n AddGaussianNoise(0., 1.)\n # normalize,\n ])\n\n val_tsfm = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize(image_size, interpolation=PIL.Image.BICUBIC),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n # normalize,\n ])\n\n # Load and split datasets\n\n images, labels = parseData(basePath=args.data, fileCount=5)\n exit()\n\n for i in range(len(labels)-1, -1, -1):\n if(labels[i] == 4):\n labels.pop(i)\n images.pop(i)\n\n for i in range(num_classes):\n print('The count of {} is: {}'.format(class_names[i], labels.count(i)))\n\n # Test images from different slices than training images\n # Roughly 80-20 split with 20390 being the closest threshold between two WSIs\n # len(images)-20 only relevant if parsing a small set of the data for debugging\n train_images = images[:min(20390, len(images)-20)]\n train_labels = labels[:min(20390, len(images)-20)]\n test_images = images[min(20390, len(images)-20):]\n test_labels = labels[min(20390, len(images)-20):]\n\n '''\n Train data without sampling:\n The count of inflammatory is: 1387\n The count of lymphocyte is: 2470\n The count of fibroblast and endothelial is: 6257\n The count of epithelial is: 10276\n '''\n if args.upsample:\n c0_ind = [i for i, x in enumerate(train_labels) if x == 0]\n c1_ind = [i for i, x in enumerate(train_labels) if x == 1]\n c2_ind = [i for i, x in enumerate(train_labels) if x == 2]\n c3_ind = [i for i, x in enumerate(train_labels) if x == 3]\n print(train_labels)\n for i in range(8):\n for idx, val in enumerate(c0_ind):\n print(val)\n train_labels.append(train_labels[val])\n train_images.append(train_images[val])\n for i in range(4):\n for idx, val in enumerate(c1_ind):\n train_labels.append(train_labels[val])\n train_images.append(train_images[val])\n for i in range(1):\n for idx, val in enumerate(c2_ind):\n train_labels.append(train_labels[val])\n train_images.append(train_images[val])\n\n for idx, val in enumerate(c3_ind):\n if idx < 2000:\n train_labels.append(train_labels[val])\n train_images.append(train_images[val])\n\n for i in range(num_classes):\n print('The count of {} is: {}'.format(class_names[i], train_labels.count(i)))\n\n '''\n Train data with sampling\n The count of inflammatory is: 12483\n The count of lymphocyte is: 12350\n The count of fibroblast and endothelial is: 12514\n The count of epithelial is: 12276\n '''\n\n # Using Stratified K-Fold with shuffle to evenly distribute the data between validation splits\n skf = StratifiedKFold(n_splits=k, shuffle=shuffle, random_state=args.seed)\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # Transform test data to torch tensor\n tensor_test_x = torch.tensor(test_images, dtype=torch.float32, device=device)\n tensor_test_y = torch.tensor(test_labels, dtype=torch.long, device=device)\n tensor_test_x = tensor_test_x.permute(0, 3, 1, 2)\n\n split = 0\n\n # Run once using each train-validation split\n for train, val in skf.split(train_images, train_labels):\n # Transform training and validation data to torch tensor\n tensor_train_x = torch.tensor([train_images[i] for i in train], dtype=torch.float32, device=device)\n tensor_val_x = torch.tensor([train_images[i] for i in val], dtype=torch.float32, device=device)\n tensor_train_y = torch.tensor([train_labels[i] for i in train], dtype=torch.long, device=device)\n tensor_val_y = torch.tensor([train_labels[i] for i in val], dtype=torch.long, device=device)\n\n # Order image array dimensions to pytorch standard format\n tensor_train_x = tensor_train_x.permute(0, 3, 1, 2)\n tensor_val_x = tensor_val_x.permute(0, 3, 1, 2)\n\n # Generate datasets\n train_dataset = CellDataset(tensors=(tensor_train_x, tensor_train_y),\n transform=train_tsfm)\n val_dataset = CellDataset(tensors=(tensor_val_x, tensor_val_y),\n transform=val_tsfm)\n test_dataset = CellDataset(tensors=(tensor_test_x, tensor_test_y),\n transform=val_tsfm)\n\n # Sizes of datasets\n train_dataset_size = len(train_dataset)\n val_dataset_size = len(val_dataset)\n test_dataset_size = len(test_dataset)\n print(\"train size: {}\".format(train_dataset_size))\n print(\"val size: {}\".format(val_dataset_size))\n print(\"test size: {}\".format(test_dataset_size))\n\n # Create Dataloaders\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=shuffle,\n num_workers=args.workers, pin_memory=False)\n\n val_loader = torch.utils.data.DataLoader(\n val_dataset,\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=False)\n\n test_loader = torch.utils.data.DataLoader(\n test_dataset,\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=False)\n\n loaders = {\n \"train\": train_loader,\n \"val\": val_loader,\n \"test\": test_loader\n }\n\n # Run the training\n model = run_model(loaders, split, args)\n split += 1\n\n '''\n # View results of model\n visualize_model(model, my_dataloader)\n plt.show()\n '''\n\n '''\n # View single image\n crop = Image.fromarray(images[5814])\n crop.show()\n print(labels[5814])\n '''\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"efficientNet.py","file_name":"efficientNet.py","file_ext":"py","file_size_in_byte":10995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"553979321","text":"from SSH import *\nimport matplotlib.pyplot as plt\nimport argparse\nimport pickle\nimport numpy as np\nimport time\nfrom mpi4py.futures import MPIPoolExecutor\nfrom copy import copy\n\ndef run_Born(p):\n delta,r,LA,es,delta_i,L_i=p\n params=Params(delta=delta,L=np.inf,bc=-1,dmax=(2*LA+r)*2)\n x=np.array([0,LA,LA+r,2*LA+r])\n subA=np.arange(x[0],x[1])\n subB=np.arange(x[2],x[3])\n subAp=np.arange(x[1],x[2],2)\n params.measure_all_Born(proj_range=subAp,type='link')\n MI=params.mutual_information_m(subA,subB)\n LN=params.log_neg(subA,subB)\n return MI,LN,L_i,delta_i,es\n\nif __name__==\"__main__\":\n parser=argparse.ArgumentParser()\n parser.add_argument('--es',default=200,type=int)\n parser.add_argument('--delta_min',default=-.1,type=float)\n parser.add_argument('--delta_max',default=.1,type=float)\n parser.add_argument('--delta_num',default=101,type=int)\n parser.add_argument('--L_min',default=16,type=int)\n parser.add_argument('--L_max',default=64,type=int)\n args=parser.parse_args()\n\n \n delta_list=np.linspace(args.delta_min,args.delta_max,args.delta_num)\n L_list=np.arange(args.L_min,args.L_max+1,16)\n MI_list=np.zeros((L_list.shape[0],delta_list.shape[0],args.es))\n LN_list=np.zeros((L_list.shape[0],delta_list.shape[0],args.es))\n inputs=[(delta,L,L,es,delta_i,L_i) for delta_i,delta in enumerate(delta_list) for (L_i,L) in enumerate(L_list) for es in range(args.es)]\n\n executor=MPIPoolExecutor()\n pool=executor.map(run_Born,inputs)\n for result in pool:\n MI,LN,L_i,delta_i,es=result\n MI_list[L_i,delta_i,es]=MI\n LN_list[L_i,delta_i,es]=LN\n\n executor.shutdown()\n\n with open('SSH_Born_es{:d}.pickle'.format(args.es),'wb') as f:\n pickle.dump([delta_list,L_list,MI_list],f)\n\n","sub_path":"SSH_inf_L.py","file_name":"SSH_inf_L.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"541078536","text":"#!/usr/bin/env python\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import util\nfrom google.appengine.api import users\nimport logging\nimport vdb, msgparser, ui\n\nclass MainHandler(webapp.RequestHandler):\n def get(self):\n vdb.ns_set()\n text = ui.get_menu()\n self.response.out.write(ui.format_page('admin', text))\n\nclass ResetHandler(webapp.RequestHandler):\n def get(self):\n vdb.ns_set()\n vdb.msg_reset_parsed()\n self.redirect('/')\n\nclass ParseHandler(webapp.RequestHandler):\n def get(self):\n vdb.ns_set()\n list = vdb.msg_get_unparsed()\n for item in list[:100]:\n if item.parent_id > 0:\n vdb.msg_delete(item.key().id())\n else:\n records = msgparser.parse(item.body)\n if records[0][1] == 'common':\n records = msgparser.parse(item.subject)\n vdb.msg_update_from(item, records[0])\n vdb.stat_update(item)\n if len(records) > 1:\n for record in records[1:]:\n rec = vdb.msg_create_from(item)\n vdb.msg_update_from(rec, record)\n vdb.stat_update(rec)\n self.redirect('/')\n\nclass ViewHandler(webapp.RequestHandler):\n def get(self):\n vdb.ns_set()\n text = ui.get_menu()\n pgstate = ui.parse_state(self.request.get('s'))\n namespaces = vdb.ns_get_all()\n for ns in namespaces:\n text += \"[%s]
\\n\" % ns.namespace_name\n q = vdb.msg_get_by_ns(ns.namespace_name)\n# q = q.filter('name !=', 'M').filter('name !=', 'F')\n list = []\n list.append(('id', 'pid', 'date', 'place', 'name',\n 'atag', 'amount', 'parsed'))\n for item in q:\n list.append((item.key().id(), item.parent_id, item.date, item.place, item.name,\n item.atag, item.amount, item.parsed))\n text += ui.format_table(1, list, pgstate)\n self.response.out.write(ui.format_page('viewer', text))\n\ndef main():\n application = webapp.WSGIApplication([\n ('/admin/', MainHandler),\n ('/admin/view', ViewHandler),\n ('/admin/reset', ResetHandler),\n ('/admin/parse', ParseHandler)],\n debug=True)\n util.run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"538802979","text":"import itertools\n\nT = int(input())\nfor test_case in range(1, T + 1):\n origin_str, can_change = input().split()\n can_change = int(can_change)\n n = len(origin_str)\n if n == 1:\n print('#{} {}'.format(test_case, origin_str))\n continue\n origin = list(origin_str)\n goal_list = sorted(origin, reverse=True)\n goal = int(''.join(goal_list))\n\n can_change_case = list(itertools.combinations(range(n), 2))\n stack = [origin]\n i = 0\n ans = int(origin_str)\n for i in range(can_change):\n p = 0\n new_stack = []\n for now_prise in stack:\n prise = int(''.join(now_prise))\n for x, y in can_change_case:\n next_prise = now_prise.copy()\n next_prise[x], next_prise[y] = next_prise[y], next_prise[x]\n p = int(''.join(next_prise))\n if p < ans:\n continue\n if next_prise in new_stack:\n continue\n else:\n new_stack.append(next_prise)\n if ans < p:\n ans = p\n if p == goal:\n break\n if p == goal:\n break\n if p == goal:\n break\n stack = new_stack\n\n if (can_change-i) % 2:\n print('#{} {}'.format(test_case, ans))\n else:\n goal_list[-1], goal_list[-2] = goal_list[-2], goal_list[-1]\n print('#{} {}'.format(test_case, ''.join(goal_list)))","sub_path":"10월/10_01/1244.py","file_name":"1244.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"580886069","text":"from grid import Grid\nfrom grid import Entity\nfrom grid import Item \nfrom grid import Console\nfrom grid import Room\n\n'''\nThis is a prototype\nThe map contins a 10x10 grid with a 'Y' shaped moveable area\nThe objective is to get the key from one leg of the Y and use it\nto unlock the door on the other end of the Y\n'''\nclass Map(object):\n\tgrid = Grid(10, 10)\n\tKEY_ROOM = None\n\tDOOR_ROOM = None\n\tSTARTING_POSITION = None\n\n\tdef __init__(self):\n\t\tfor yValue in range(5, 10):\n\t\t\tself.grid.addRoom(Room(x = 5, y = yValue))\n\t\tMap.STARTING_POSITION = {'x' : 5, 'y' : 5}\n\n\t\tX = 4\n\t\tY = 4\n\t\t\n\t\tfor each in range(4):\n\t\t\tself.grid.addRoom(Room(x = X, y = Y))\n\t\t\tX = X - 1\n\t\t\tY = Y - 1\n\t\tMap.DOOR_ROOM = self.grid.get(x = 1, y = 1)\n\t\tMap.DOOR_ROOM.description = '''This room is huge, wide, and empty, except for a large door stationed in the middle'''\n\t\tX = 6\n\t\tY = 4\n\n\t\tfor each in range(5):\n\t\t\tself.grid.addRoom(Room(x = X, y = Y, description = 'This is the right leg of the Y. There are pictures of flamingos on the wall and the occasional goose'))\n\t\t\tX = X + 1\n\t\t\tY = Y - 1\n\t\tMap.KEY_ROOM = self.grid.get(x = 6, y = 4)\n\t\tMap.KEY_ROOM.description = '''This is the key room. It holds keys of all shapes and sizes. They line the walls'''\n\t\tkey = Item(x = Map.KEY_ROOM.x, y = Map.KEY_ROOM.y, name = 'key', description = 'a shiny key')\n\n\t\tpOfTrueB = Item(x = Map.KEY_ROOM.x, y = Map.KEY_ROOM.y, name = 'potion of true belief', \n\t\t\tdescription = '''This potion is in a shiny green bottle. It bubbles loudly in a way that can only be described as ominous. \nThere is a big white label across the middle saying:\n'DANGER: DO NOT OPERATE HEAVY MACHINRY, FIREARMS, OR GOVERNMENTS WHILE UNDER THE INFLUENCE OF THIS SUBSTANCE' ''')\n\t\tfor foo in (key, pOfTrueB):\n\t\t\tself.grid.get(foo.x, foo.y).inventory.add(foo)\n\t\t\n\t\tself.player = Entity(**Map.STARTING_POSITION)\n\t\tself.player.name = 'Mr Foo'\n\t\tself.grid.get(self.player.x, self.player.y).inventory.add(self.player)\n\n\t\tself.grid.draw()\t\n\ngameMap = Map()\nconsole = Console(member = gameMap.player, grid = gameMap.grid)\n\nwhile(True):\n\tconsole.prompt()\t\t\n","sub_path":"textgame/textgame.py","file_name":"textgame.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"460466843","text":"#!/usr/bin/env python\n\n\"\"\"Observatory Management Service to keep track of observatories sites, logical platform sites, instrument sites,\nand the relationships between them\"\"\"\n\nimport string\nimport time\nfrom collections import defaultdict\n\nfrom ooi.logging import log\n\nfrom pyon.core.exception import NotFound, BadRequest, Inconsistent\nfrom pyon.public import CFG, IonObject, RT, PRED, LCS, LCE, OT\nfrom pyon.ion.resource import ExtendedResourceContainer\n\nfrom ion.services.sa.instrument.rollx_builder import RollXBuilder\nfrom ion.services.sa.instrument.status_builder import AgentStatusBuilder\nfrom ion.services.sa.observatory.deployment_activator import DeploymentActivatorFactory, DeploymentResourceCollectorFactory\nfrom ion.util.enhanced_resource_registry_client import EnhancedResourceRegistryClient\nfrom ion.services.sa.observatory.observatory_util import ObservatoryUtil\nfrom ion.util.geo_utils import GeoUtils\nfrom ion.util.related_resources_crawler import RelatedResourcesCrawler\nfrom ion.services.sa.observatory.deployment_util import describe_deployments\n\nfrom interface.services.sa.iobservatory_management_service import BaseObservatoryManagementService\nfrom interface.objects import OrgTypeEnum, ComputedValueAvailability, ComputedIntValue, ComputedListValue, ComputedDictValue, AggregateStatusType, DeviceStatusType\nfrom interface.objects import MarineFacilityOrgExtension, NegotiationStatusEnum, NegotiationTypeEnum, ProposalOriginatorEnum\n\nINSTRUMENT_OPERATOR_ROLE = 'INSTRUMENT_OPERATOR'\nOBSERVATORY_OPERATOR_ROLE = 'OBSERVATORY_OPERATOR'\nDATA_OPERATOR_ROLE = 'DATA_OPERATOR'\nSTATUS_UNKNOWN = {1:1, 2:1, 3:1, 4:1}\n\n\nclass ObservatoryManagementService(BaseObservatoryManagementService):\n\n def on_init(self):\n IonObject(\"Resource\") # suppress pyflakes error\n CFG, log, RT, PRED, LCS, LCE, NotFound, BadRequest, log #suppress pyflakes errors about \"unused import\"\n\n self.override_clients(self.clients)\n self.outil = ObservatoryUtil(self)\n self.agent_status_builder = AgentStatusBuilder(process=self)\n\n\n self.HIERARCHY_DEPTH = {RT.InstrumentSite: 3,\n RT.PlatformSite: 2,\n RT.Subsite: 1,\n RT.Observatory: 0,\n }\n \n self.HIERARCHY_LOOKUP = [RT.Observatory, \n RT.Subsite, \n RT.PlatformSite, \n RT.InstrumentSite]\n\n #todo: add lcs methods for these??\n# # set up all of the policy interceptions\n# if self.container and self.container.governance_controller:\n# reg_precondition = self.container.governance_controller.register_process_operation_precondition\n# reg_precondition(self, 'execute_observatory_lifecycle',\n# self.RR2.policy_fn_lcs_precondition(\"observatory_id\"))\n# reg_precondition(self, 'execute_subsite_lifecycle',\n# self.RR2.policy_fn_lcs_precondition(\"subsite_id\"))\n# reg_precondition(self, 'execute_platform_site_lifecycle',\n# self.RR2.policy_fn_lcs_precondition(\"platform_site_id\"))\n# reg_precondition(self, 'execute_instrument_site_lifecycle',\n# self.RR2.policy_fn_lcs_precondition(\"instrument_site_id\"))\n\n\n def override_clients(self, new_clients):\n \"\"\"\n Replaces the service clients with a new set of them... and makes sure they go to the right places\n \"\"\"\n self.RR2 = EnhancedResourceRegistryClient(new_clients.resource_registry)\n\n #shortcut names for the import sub-services\n if hasattr(new_clients, \"resource_registry\"):\n self.RR = new_clients.resource_registry\n \n if hasattr(new_clients, \"instrument_management\"):\n self.IMS = new_clients.instrument_management\n\n if hasattr(new_clients, \"data_process_management\"):\n self.PRMS = new_clients.data_process_management\n\n def _calc_geospatial_point_center(self, site):\n siteTypes = [RT.Site, RT.Subsite, RT.Observatory, RT.PlatformSite, RT.InstrumentSite]\n if site and site.type_ in siteTypes:\n # if the geospatial_bounds is set then calculate the geospatial_point_center\n for constraint in site.constraint_list:\n if constraint.type_ == OT.GeospatialBounds:\n site.geospatial_point_center = GeoUtils.calc_geospatial_point_center(constraint)\n\n ##########################################################################\n #\n # CRUD OPS\n #\n ##########################################################################\n\n\n def create_marine_facility(self, org=None):\n \"\"\"Create an Org (domain of authority) that realizes a marine facility. This Org will have\n set up roles for a marine facility. Shared resources, such as a device can only be\n registered in one marine facility Org, and additionally in many virtual observatory Orgs. The\n marine facility operators will have more extensive permissions and will supercede virtual\n observatory commands\n\n @param org Org\n @retval org_id str\n @throws BadRequest if object does not have _id or _rev attribute\n @throws NotFound object with specified id does not exist\n \"\"\"\n log.debug(\"ObservatoryManagementService.create_marine_facility(): %s\", org)\n \n # create the org\n org.org_type = OrgTypeEnum.MARINE_FACILITY\n org_id = self.clients.org_management.create_org(org)\n\n #Instantiate initial set of User Roles for this marine facility\n instrument_operator_role = IonObject(RT.UserRole,\n governance_name=INSTRUMENT_OPERATOR_ROLE,\n name='Facility Operator', #previously Instrument Operator\n description='Operate and post events related to Facility Platforms and Instruments')\n self.clients.org_management.add_user_role(org_id, instrument_operator_role)\n observatory_operator_role = IonObject(RT.UserRole,\n governance_name=OBSERVATORY_OPERATOR_ROLE,\n name='Facility Manager', # previously Observatory Operator\n description='Change Facility configuration, post Site-related events')\n self.clients.org_management.add_user_role(org_id, observatory_operator_role)\n data_operator_role = IonObject(RT.UserRole,\n governance_name=DATA_OPERATOR_ROLE,\n name='Facility Data Operator', # previously Data Operator\n description='Manipulate and post events related to Facility Data products')\n self.clients.org_management.add_user_role(org_id, data_operator_role)\n \n return org_id\n\n def create_virtual_observatory(self, org=None):\n \"\"\"Create an Org (domain of authority) that realizes a virtual observatory. This Org will have\n set up roles for a virtual observatory. Shared resources, such as a device can only be\n registered in one marine facility Org, and additionally in many virtual observatory Orgs. The\n marine facility operators will have more extensive permissions and will supercede virtual\n observatory commands\n\n @param org Org\n @retval org_id str\n @throws BadRequest if object does not have _id or _rev attribute\n @throws NotFound object with specified id does not exist\n \"\"\"\n log.debug(\"ObservatoryManagementService.create_virtual_observatory(): %s\", org)\n\n # create the org\n org.org_type = OrgTypeEnum.VIRTUAL_OBSERVATORY\n org_id = self.clients.org_management.create_org(org)\n\n return org_id\n\n def create_observatory(self, observatory=None, org_id=\"\"):\n \"\"\"Create a Observatory resource. An observatory is coupled\n with one Org. The Org is created and associated as part of this call.\n\n @param observatory Observatory\n @retval observatory_id str\n @throws BadRequest if object does not have _id or _rev attribute\n @throws NotFound object with specified id does not exist\n \"\"\"\n # if the geospatial_bounds is set then calculate the geospatial_point_center\n self._calc_geospatial_point_center(observatory)\n\n # create the marine facility\n observatory_id = self.RR2.create(observatory, RT.Observatory)\n\n if org_id:\n self.assign_resource_to_observatory_org(observatory_id, org_id)\n\n return observatory_id\n\n def read_observatory(self, observatory_id=''):\n \"\"\"Read a Observatory resource\n\n @param observatory_id str\n @retval observatory Observatory\n @throws NotFound object with specified id does not exist\n \"\"\"\n return self.RR2.read(observatory_id, RT.Observatory)\n\n def update_observatory(self, observatory=None):\n \"\"\"Update a Observatory resource\n\n @param observatory Observatory\n @throws NotFound object with specified id does not exist\n \"\"\"\n # if the geospatial_bounds is set then calculate the geospatial_point_center\n self._calc_geospatial_point_center(observatory)\n\n return self.RR2.update(observatory, RT.Observatory)\n\n def delete_observatory(self, observatory_id=''):\n \"\"\"Delete a Observatory resource\n\n @param observatory_id str\n @throws NotFound object with specified id does not exist\n \"\"\"\n return self.RR2.retire(observatory_id, RT.Observatory)\n\n def force_delete_observatory(self, observatory_id=''):\n return self.RR2.pluck_delete(observatory_id, RT.Observatory)\n\n\n\n def create_subsite(self, subsite=None, parent_id=''):\n \"\"\"Create a Subsite resource. A subsite is a frame of reference within an observatory. Its parent is\n either the observatory or another subsite.\n\n @param subsite Subsite\n @param parent_id str\n @retval subsite_id str\n @throws BadRequest if object does not have _id or _rev attribute\n @throws NotFound object with specified id does not exist\n \"\"\"\n # if the geospatial_bounds is set then calculate the geospatial_point_center\n self._calc_geospatial_point_center(subsite)\n\n subsite_id = self.RR2.create(subsite, RT.Subsite)\n\n if parent_id:\n self.assign_site_to_site(subsite_id, parent_id)\n\n return subsite_id\n\n def read_subsite(self, subsite_id=''):\n \"\"\"Read a Subsite resource\n\n @param subsite_id str\n @retval subsite Subsite\n @throws NotFound object with specified id does not exist\n \"\"\"\n return self.RR2.read(subsite_id, RT.Subsite)\n\n def update_subsite(self, subsite=None):\n \"\"\"Update a Subsite resource\n\n @param subsite Subsite\n @throws NotFound object with specified id does not exist\n \"\"\"\n # if the geospatial_bounds is set then calculate the geospatial_point_center\n self._calc_geospatial_point_center(subsite)\n\n return self.RR2.update(subsite, RT.Subsite)\n\n def delete_subsite(self, subsite_id=''):\n \"\"\"Delete a subsite resource, removes assocations to parents\n\n @param subsite_id str\n @throws NotFound object with specified id does not exist\n \"\"\"\n self.RR2.retire(subsite_id, RT.Subsite)\n\n def force_delete_subsite(self, subsite_id=''):\n self.RR2.pluck_delete(subsite_id, RT.Subsite)\n\n\n\n def create_platform_site(self, platform_site=None, parent_id=''):\n \"\"\"Create a PlatformSite resource. A platform_site is a frame of reference within an observatory. Its parent is\n either the observatory or another platform_site.\n\n @param platform_site PlatformSite\n @param parent_id str\n @retval platform_site_id str\n @throws BadRequest if object does not have _id or _rev attribute\n @throws NotFound object with specified id does not exist\n \"\"\"\n\n # if the geospatial_bounds is set then calculate the geospatial_point_center\n self._calc_geospatial_point_center(platform_site)\n\n platform_site_id = self.RR2.create(platform_site, RT.PlatformSite)\n\n if parent_id:\n self.RR2.assign_site_to_one_site_with_has_site(platform_site_id, parent_id)\n\n return platform_site_id\n\n def read_platform_site(self, platform_site_id=''):\n \"\"\"Read a PlatformSite resource\n\n @param platform_site_id str\n @retval platform_site PlatformSite\n @throws NotFound object with specified id does not exist\n \"\"\"\n return self.RR2.read(platform_site_id, RT.PlatformSite)\n\n def update_platform_site(self, platform_site=None):\n \"\"\"Update a PlatformSite resource\n\n @param platform_site PlatformSite\n @throws NotFound object with specified id does not exist\n \"\"\"\n # if the geospatial_bounds is set then calculate the geospatial_point_center\n self._calc_geospatial_point_center(platform_site)\n\n return self.RR2.update(platform_site, RT.PlatformSite)\n\n def delete_platform_site(self, platform_site_id=''):\n \"\"\"Delete a PlatformSite resource, removes assocations to parents\n\n @param platform_site_id str\n @throws NotFound object with specified id does not exist\n \"\"\"\n self.RR2.retire(platform_site_id, RT.PlatformSite)\n\n def force_delete_platform_site(self, platform_site_id=''):\n self.RR2.pluck_delete(platform_site_id, RT.PlatformSite)\n\n\n def create_instrument_site(self, instrument_site=None, parent_id=''):\n \"\"\"Create a InstrumentSite resource. A instrument_site is a frame of reference within an observatory. Its parent is\n either the observatory or another instrument_site.\n\n @param instrument_site InstrumentSite\n @param parent_id str\n @retval instrument_site_id str\n @throws BadRequest if object does not have _id or _rev attribute\n @throws NotFound object with specified id does not exist\n \"\"\"\n # if the geospatial_bounds is set then calculate the geospatial_point_center\n self._calc_geospatial_point_center(instrument_site)\n\n instrument_site_id = self.RR2.create(instrument_site, RT.InstrumentSite)\n\n if parent_id:\n self.RR2.assign_site_to_one_site_with_has_site(instrument_site_id, parent_id)\n\n return instrument_site_id\n\n def read_instrument_site(self, instrument_site_id=''):\n \"\"\"Read a InstrumentSite resource\n\n @param instrument_site_id str\n @retval instrument_site InstrumentSite\n @throws NotFound object with specified id does not exist\n \"\"\"\n return self.RR2.read(instrument_site_id, RT.InstrumentSite)\n\n def update_instrument_site(self, instrument_site=None):\n \"\"\"Update a InstrumentSite resource\n\n @param instrument_site InstrumentSite\n @throws NotFound object with specified id does not exist\n \"\"\"\n # if the geospatial_bounds is set then calculate the geospatial_point_center\n self._calc_geospatial_point_center(instrument_site)\n\n return self.RR2.update(instrument_site, RT.InstrumentSite)\n\n def delete_instrument_site(self, instrument_site_id=''):\n \"\"\"Delete a InstrumentSite resource, removes assocations to parents\n\n @param instrument_site_id str\n @throws NotFound object with specified id does not exist\n \"\"\"\n # todo: give InstrumentSite a lifecycle in COI so that we can remove the \"True\" argument here\n self.RR2.retire(instrument_site_id, RT.InstrumentSite)\n\n def force_delete_instrument_site(self, instrument_site_id=''):\n self.RR2.pluck_delete(instrument_site_id, RT.InstrumentSite)\n\n\n\n def create_deployment(self, deployment=None, site_id=\"\", device_id=\"\"):\n \"\"\"\n Create a Deployment resource. Represents a (possibly open-ended) time interval\n grouping one or more resources within a given context, such as an instrument\n deployment on a platform at an observatory site.\n \"\"\"\n\n deployment_id = self.RR2.create(deployment, RT.Deployment)\n\n #Verify that site and device exist, add links if they do\n if site_id:\n site_obj = self.RR2.read(site_id)\n if site_obj:\n self.RR2.assign_deployment_to_site_with_has_deployment(deployment_id, site_id)\n\n if device_id:\n\n device_obj = self.RR2.read(device_id)\n if device_obj:\n self.RR2.assign_deployment_to_device_with_has_deployment(deployment_id, device_id)\n\n return deployment_id\n\n def update_deployment(self, deployment=None):\n # Overwrite Deployment object\n self.RR2.update(deployment, RT.Deployment)\n\n def read_deployment(self, deployment_id=''):\n deployment_obj = self.RR2.read(deployment_id, RT.Deployment)\n\n return deployment_obj\n\n def delete_deployment(self, deployment_id=''):\n \"\"\"\n Delete a Deployment resource\n \"\"\"\n\n self.RR2.retire(deployment_id, RT.Deployment)\n\n def force_delete_deployment(self, deployment_id=''):\n self.RR2.pluck_delete(deployment_id, RT.Deployment)\n\n\n ############################\n #\n # ASSOCIATIONS\n #\n ############################\n\n\n def assign_site_to_site(self, child_site_id='', parent_site_id=''):\n \"\"\"Connects a child site (any subtype) to a parent site (any subtype)\n\n @param child_site_id str\n @param parent_site_id str\n @throws NotFound object with specified id does not exist\n \"\"\"\n\n self.RR2.assign_site_to_site_with_has_site(child_site_id, parent_site_id)\n\n\n def unassign_site_from_site(self, child_site_id='', parent_site_id=''):\n \"\"\"Disconnects a child site (any subtype) from a parent site (any subtype)\n\n @param child_site_id str\n @param parent_site_id str\n @throws NotFound object with specified id does not exist\n \"\"\"\n\n self.RR2.unassign_site_from_site_with_has_site(child_site_id, parent_site_id)\n\n\n def assign_device_to_site(self, device_id='', site_id=''):\n \"\"\"Connects a device (any type) to a site (any subtype)\n\n @param device_id str\n @param site_id str\n @throws NotFound object with specified id does not exist\n \"\"\"\n\n self.RR2.assign_device_to_site_with_has_device(device_id, site_id)\n\n def unassign_device_from_site(self, device_id='', site_id=''):\n \"\"\"Disconnects a device (any type) from a site (any subtype)\n\n @param device_id str\n @param site_id str\n @throws NotFound object with specified id does not exist\n \"\"\"\n\n self.RR2.unassign_device_from_site_with_has_device(device_id, site_id)\n\n\n def assign_device_to_network_parent(self, child_device_id='', parent_device_id=''):\n \"\"\"Connects a device (any type) to parent in the RSN network\n\n @param child_device_id str\n @param parent_device_id str\n @throws NotFound object with specified id does not exist\n \"\"\"\n\n self.RR2.assign_device_to_one_device_with_has_network_parent(parent_device_id, child_device_id)\n\n\n def unassign_device_from_network_parent(self, child_device_id='', parent_device_id=''):\n \"\"\"Disconnects a child device (any type) from parent in the RSN network\n\n @param child_device_id str\n @param parent_device_id str\n @throws NotFound object with specified id does not exist\n \"\"\"\n\n self.RR2.unassign_device_from_device_with_has_network_parent(parent_device_id, child_device_id)\n\n\n\n def assign_instrument_model_to_instrument_site(self, instrument_model_id='', instrument_site_id=''):\n self.RR2.assign_instrument_model_to_instrument_site_with_has_model(instrument_model_id, instrument_site_id)\n\n def unassign_instrument_model_from_instrument_site(self, instrument_model_id='', instrument_site_id=''):\n self.RR2.unassign_instrument_model_from_instrument_site_with_has_model(self, instrument_model_id, instrument_site_id)\n\n def assign_platform_model_to_platform_site(self, platform_model_id='', platform_site_id=''):\n self.RR2.assign_platform_model_to_platform_site_with_has_model(platform_model_id, platform_site_id)\n\n def unassign_platform_model_from_platform_site(self, platform_model_id='', platform_site_id=''):\n self.RR2.unassign_platform_model_from_platform_site_with_has_model(platform_model_id, platform_site_id)\n\n def assign_resource_to_observatory_org(self, resource_id='', org_id=''):\n if not org_id:\n raise BadRequest(\"Org id not given\")\n if not resource_id:\n raise BadRequest(\"Resource id not given\")\n\n #log.trace(\"assign_resource_to_observatory_org: org_id=%s, resource_id=%s \", org_id, resource_id)\n self.clients.org_management.share_resource(org_id, resource_id)\n\n def unassign_resource_from_observatory_org(self, resource_id='', org_id=''):\n if not org_id:\n raise BadRequest(\"Org id not given\")\n if not resource_id:\n raise BadRequest(\"Resource id not given\")\n\n self.clients.org_management.unshare_resource(org_id, resource_id)\n\n\n\n\n ##########################################################################\n #\n # DEPLOYMENTS\n #\n ##########################################################################\n\n\n def deploy_instrument_site(self, instrument_site_id='', deployment_id=''):\n # OBSOLETE - Move calls to assign/unassign\n self.RR2.assign_deployment_to_instrument_site_with_has_deployment(deployment_id, instrument_site_id)\n\n def undeploy_instrument_site(self, instrument_site_id='', deployment_id=''):\n # OBSOLETE - Move calls to assign/unassign\n self.RR2.unassign_deployment_from_instrument_site_with_has_deployment(deployment_id, instrument_site_id)\n\n def deploy_platform_site(self, platform_site_id='', deployment_id=''):\n # OBSOLETE - Move calls to assign/unassign\n self.RR2.assign_deployment_to_platform_site_with_has_deployment(deployment_id, platform_site_id)\n\n def undeploy_platform_site(self, platform_site_id='', deployment_id=''):\n # OBSOLETE - Move calls to assign/unassign\n self.RR2.unassign_deployment_from_platform_site_with_has_deployment(deployment_id, platform_site_id)\n\n def _get_deployment_assocs(self, deployment_id):\n res_ids, assocs = self.RR.find_subjects(predicate=PRED.hasDeployment, object=deployment_id, id_only=True)\n assoc_by_type = dict(Site=[], Device=[])\n for a in assocs:\n if a.st not in assoc_by_type:\n assoc_by_type[a.st] = []\n assoc_by_type[a.st].append(a)\n if a.st.endswith(\"Device\"):\n assoc_by_type[\"Device\"].append(a)\n if a.st.endswith(\"Site\"):\n assoc_by_type[\"Site\"].append(a)\n return assoc_by_type\n\n def assign_device_to_deployment(self, device_id='', deployment_id=''):\n device = self.RR.read(device_id)\n dep_assocs = self._get_deployment_assocs(deployment_id)\n if dep_assocs[\"Device\"]:\n raise BadRequest(\"Deployment %s - Cannot have more than 1 Device\" % deployment_id)\n if device.type_ == RT.InstrumentDevice:\n self.RR2.assign_deployment_to_instrument_device_with_has_deployment(deployment_id, device_id)\n if dep_assocs[\"Site\"] and dep_assocs[\"Site\"][0].st != RT.InstrumentSite:\n raise BadRequest(\"Deployment %s - Device %s (%s) incompatible with associated Site %s (%s)\" % (\n deployment_id, device_id, device.type_, dep_assocs[\"Site\"][0].s, dep_assocs[\"Site\"][0].st))\n elif device.type_ == RT.PlatformDevice:\n self.RR2.assign_deployment_to_platform_device_with_has_deployment(deployment_id, device_id)\n if dep_assocs[\"Site\"] and dep_assocs[\"Site\"][0].st != RT.PlatformSite:\n raise BadRequest(\"Deployment %s - Device %s (%s) incompatible with associated Site %s (%s)\" % (\n deployment_id, device_id, device.type_, dep_assocs[\"Site\"][0].s, dep_assocs[\"Site\"][0].st))\n else:\n raise BadRequest(\"Illegal resource type to assign to Deployment: %s\" % device.type_)\n\n def unassign_device_from_deployment(self, device_id='', deployment_id=''):\n device = self.RR.read(device_id)\n if device.type_ == RT.InstrumentDevice:\n self.RR2.unassign_deployment_from_instrument_device_with_has_deployment(deployment_id, device_id)\n elif device.type_ == RT.PlatformDevice:\n self.RR2.unassign_deployment_from_platform_device_with_has_deployment(deployment_id, device_id)\n else:\n raise BadRequest(\"Illegal resource type to assign to Deployment: %s\" % device.type_)\n\n def assign_site_to_deployment(self, site_id='', deployment_id=''):\n site = self.RR.read(site_id)\n dep_assocs = self._get_deployment_assocs(deployment_id)\n if dep_assocs[\"Site\"]:\n raise BadRequest(\"Deployment %s - Cannot have more than 1 Site\" % deployment_id)\n if site.type_ == RT.InstrumentSite:\n self.RR2.assign_deployment_to_instrument_site_with_has_deployment(deployment_id, site_id)\n if dep_assocs[\"Device\"] and dep_assocs[\"Device\"][0].st != RT.InstrumentDevice:\n raise BadRequest(\"Deployment %s - Site %s (%s) incompatible with associated Device %s (%s)\" % (\n deployment_id, site_id, site.type_, dep_assocs[\"Device\"][0].s, dep_assocs[\"Device\"][0].st))\n elif site.type_ == RT.PlatformSite:\n self.RR2.assign_deployment_to_platform_site_with_has_deployment(deployment_id, site_id)\n if dep_assocs[\"Device\"] and dep_assocs[\"Device\"][0].st != RT.PlatformDevice:\n raise BadRequest(\"Deployment %s - Site %s (%s) incompatible with associated Device %s (%s)\" % (\n deployment_id, site_id, site.type_, dep_assocs[\"Device\"][0].s, dep_assocs[\"Device\"][0].st))\n else:\n raise BadRequest(\"Illegal resource type to assign to Deployment: %s\" % site.type_)\n\n def unassign_site_from_deployment(self, site_id='', deployment_id=''):\n site = self.RR.read(site_id)\n if site.type_ == RT.InstrumentSite:\n self.RR2.unassign_deployment_from_instrument_site_with_has_deployment(deployment_id, site_id)\n elif site.type_ == RT.PlatformSite:\n self.RR2.unassign_deployment_from_platform_site_with_has_deployment(deployment_id, site_id)\n else:\n raise BadRequest(\"Illegal resource type to assign to Deployment: %s\" % site.type_)\n\n\n\n def activate_deployment(self, deployment_id='', activate_subscriptions=False):\n \"\"\"\n Make the devices on this deployment the primary devices for the sites\n \"\"\"\n #Verify that the deployment exists\n depl_obj = self.RR2.read(deployment_id)\n log.debug(\"Activating deployment '%s' (%s)\", depl_obj.name, deployment_id)\n\n deployment_activator_factory = DeploymentActivatorFactory(self.clients)\n deployment_activator = deployment_activator_factory.create(depl_obj)\n deployment_activator.prepare()\n\n # process any removals\n for site_id, device_id in deployment_activator.hasdevice_associations_to_delete():\n log.info(\"Unassigning hasDevice; device '%s' from site '%s'\", device_id, site_id)\n self.unassign_device_from_site(device_id, site_id)\n\n # process the additions\n for site_id, device_id in deployment_activator.hasdevice_associations_to_create():\n log.info(\"Setting primary device '%s' for site '%s'\", device_id, site_id)\n self.assign_device_to_site(device_id, site_id)\n\n\n # self.RR.execute_lifecycle_transition(deployment_id, LCE.DEPLOY)\n\n\n def deactivate_deployment(self, deployment_id=''):\n \"\"\"Remove the primary device designation for the deployed devices at the sites\n\n @param deployment_id str\n @throws NotFound object with specified id does not exist\n @throws BadRequest if devices can not be undeployed\n \"\"\"\n\n #Verify that the deployment exists\n deployment_obj = self.RR2.read(deployment_id)\n\n# if LCS.DEPLOYED != deployment_obj.lcstate:\n# raise BadRequest(\"This deploment is not active\")\n\n # get all associated components\n collector_factory = DeploymentResourceCollectorFactory(self.clients)\n resource_collector = collector_factory.create(deployment_obj)\n resource_collector.collect()\n\n # must only remove from sites that are not deployed under a different active deployment\n # must only remove devices that are not deployed under a different active deployment\n def filter_alternate_deployments(resource_list):\n # return the list of ids for devices or sites not connected to an alternate lcs.deployed deployment\n ret = []\n for r in resource_list:\n depls, _ = self.RR.find_objects(r, PRED.hasDeployment, RT.Deployment)\n keep = True\n for d in depls:\n if d._id != deployment_id and LCS.DEPLOYED == d.lcstate:\n keep = False\n if keep:\n ret.append(r)\n return ret\n\n device_ids = filter_alternate_deployments(resource_collector.collected_device_ids())\n site_ids = filter_alternate_deployments(resource_collector.collected_site_ids())\n\n # delete only associations where both site and device have passed the filter\n for s in site_ids:\n ds, _ = self.RR.find_objects(s, PRED.hasDevice, id_only=True)\n for d in ds:\n if d in device_ids:\n a = self.RR.get_association(s, PRED.hasDevice, d)\n self.RR.delete_association(a)\n#\n# # mark deployment as not deployed (developed seems appropriate)\n# self.RR.execute_lifecycle_transition(deployment_id, LCE.DEVELOPED)\n\n def prepare_deployment_support(self, deployment_id=''):\n extended_resource_handler = ExtendedResourceContainer(self)\n\n resource_data = extended_resource_handler.create_prepare_resource_support(deployment_id, OT.DeploymentPrepareSupport)\n\n #Fill out service request information for creating a instrument agent instance\n extended_resource_handler.set_service_requests(resource_data.create_request, 'observatory_management',\n 'create_deployment', { \"deployment\": \"$(deployment)\" })\n\n #Fill out service request information for creating a instrument agent instance\n extended_resource_handler.set_service_requests(resource_data.update_request, 'observatory_management',\n 'update_deployment', { \"deployment\": \"$(deployment)\" })\n\n #Fill out service request information for assigning a InstrumentDevice\n extended_resource_handler.set_service_requests(resource_data.associations['DeploymentHasInstrumentDevice'].assign_request, 'observatory_management',\n 'assign_device_to_deployment', {\"device_id\": \"$(instrument_device_id)\",\n \"deployment_id\": deployment_id })\n\n #Fill out service request information for assigning a PlatformDevice\n extended_resource_handler.set_service_requests(resource_data.associations['DeploymentHasPlatformDevice'].assign_request, 'observatory_management',\n 'assign_device_to_deployment', {\"device_id\": \"$(platform_device_id)\",\n \"deployment_id\": deployment_id })\n\n #Fill out service request information for unassigning a InstrumentDevice\n extended_resource_handler.set_service_requests(resource_data.associations['DeploymentHasInstrumentDevice'].unassign_request, 'observatory_management',\n 'unassign_device_from_deployment', {\"device_id\": \"$(instrument_device_id)\",\n \"deployment_id\": deployment_id })\n\n #Fill out service request information for unassigning a PlatformDevice\n extended_resource_handler.set_service_requests(resource_data.associations['DeploymentHasPlatformDevice'].unassign_request, 'observatory_management',\n 'unassign_device_from_deployment', {\"device_id\": \"$(platform_device_id)\",\n \"deployment_id\": deployment_id })\n\n #Fill out service request information for assigning a InstrumentSite\n extended_resource_handler.set_service_requests(resource_data.associations['DeploymentHasInstrumentSite'].assign_request, 'observatory_management',\n 'assign_site_to_deployment', {\"site_id\": \"$(instrument_site_id)\",\n \"deployment_id\": deployment_id })\n\n #Fill out service request information for assigning a PlatformSite\n extended_resource_handler.set_service_requests(resource_data.associations['DeploymentHasPlatformSite'].assign_request, 'observatory_management',\n 'assign_site_to_deployment', {\"site_id\": \"$(platform_site_id)\",\n \"deployment_id\": deployment_id })\n\n #Fill out service request information for unassigning a InstrumentSite\n extended_resource_handler.set_service_requests(resource_data.associations['DeploymentHasInstrumentSite'].unassign_request, 'observatory_management',\n 'unassign_site_from_deployment', {\"site_id\": \"$(instrument_site_id)\",\n \"deployment_id\": deployment_id })\n\n #Fill out service request information for unassigning a PlatformSite\n extended_resource_handler.set_service_requests(resource_data.associations['DeploymentHasPlatformSite'].unassign_request, 'observatory_management',\n 'unassign_site_from_deployment', {\"site_id\": \"$(platform_site_id)\",\n \"deployment_id\": deployment_id })\n\n return resource_data\n\n\n ##########################################################################\n #\n # FIND OPS\n #\n ##########################################################################\n\n\n\n def find_org_by_observatory(self, observatory_id=''):\n \"\"\"\n \"\"\"\n orgs,_ = self.RR.find_subjects(RT.Org, PRED.hasResource, observatory_id, id_only=False)\n return orgs\n\n\n def find_related_frames_of_reference(self, input_resource_id='', output_resource_type_list=None):\n\n # use the related resources crawler\n finder = RelatedResourcesCrawler()\n\n # generate the partial function (cached association list)\n get_assns = finder.generate_related_resources_partial(self.RR, [PRED.hasSite])\n\n # run 2 searches allowing all site-based resource types: one down (subj-obj), one up (obj-subj)\n full_crawllist = [RT.InstrumentSite, RT.PlatformSite, RT.Subsite, RT.Observatory]\n search_down = get_assns({PRED.hasSite: (True, False)}, full_crawllist)\n search_up = get_assns({PRED.hasSite: (False, True)}, full_crawllist)\n\n # the searches return a list of association objects, so compile all the ids by extracting them\n retval_ids = set([])\n\n # we want only those IDs that are not the input resource id\n for a in search_down(input_resource_id, -1) + search_up(input_resource_id, -1):\n if a.o not in retval_ids and a.o != input_resource_id:\n retval_ids.add(a.o)\n if a.s not in retval_ids and a.s != input_resource_id:\n retval_ids.add(a.s)\n\n\n log.trace(\"converting retrieved ids to objects = %s\" % retval_ids)\n #initialize the dict\n retval = dict((restype, []) for restype in output_resource_type_list)\n\n #workaround for read_mult problem\n all_res = []\n if retval_ids: all_res = self.RR.read_mult(list(retval_ids))\n #all_res = self.RR.read_mult(retval_ids)\n\n # put resources in the slot based on their type\n for resource in all_res:\n typename = type(resource).__name__\n if typename in output_resource_type_list:\n retval[typename].append(resource)\n\n # display a count of how many resources we retrieved\n log.debug(\"got these resources: %s\", dict([(k, len(v)) for k, v in retval.iteritems()]))\n\n return retval\n\n\n def find_related_sites(self, parent_resource_id='', exclude_site_types=None, include_parents=False, id_only=False):\n if not parent_resource_id:\n raise BadRequest(\"Must provide a parent parent_resource_id\")\n exclude_site_types = exclude_site_types or []\n if not isinstance(exclude_site_types, list):\n raise BadRequest(\"exclude_site_types mut be a list, is: %s\" % type(exclude_site_types))\n\n parent_resource = self.RR.read(parent_resource_id)\n\n org_id, site_id = None, None\n if parent_resource.type_ == RT.Org:\n org_id = parent_resource_id\n elif RT.Site in parent_resource._get_extends():\n site_id = parent_resource_id\n else:\n raise BadRequest(\"Illegal parent_resource_id type. Expected Org/Site, given:%s\" % parent_resource.type_)\n\n site_resources, site_children = self.outil.get_child_sites(site_id, org_id,\n exclude_types=exclude_site_types, include_parents=include_parents, id_only=id_only)\n\n return site_resources, site_children\n\n\n def get_sites_devices_status(self, parent_resource_ids=None, include_sites=False, include_devices=False, include_status=False):\n if not parent_resource_ids:\n raise BadRequest(\"Must provide a parent parent_resource_id\")\n\n result_dict = {}\n\n RR2 = EnhancedResourceRegistryClient(self.RR)\n outil = ObservatoryUtil(self, enhanced_rr=RR2)\n parent_resource_objs = self.RR.read_mult(parent_resource_ids)\n res_by_id = dict(zip(parent_resource_ids, parent_resource_objs))\n\n # Loop thru all the provided site ids and create the result structure\n for parent_resource_id in parent_resource_ids:\n\n parent_resource = res_by_id[parent_resource_id]\n\n org_id, site_id = None, None\n if parent_resource.type_ == RT.Org:\n org_id = parent_resource_id\n elif RT.Site in parent_resource._get_extends():\n site_id = parent_resource_id\n\n site_result_dict = {}\n\n site_resources, site_children = outil.get_child_sites(site_id, org_id, include_parents=True, id_only=False)\n if include_sites:\n site_result_dict[\"site_resources\"] = site_resources\n site_result_dict[\"site_children\"] = site_children\n\n all_device_statuses = {}\n if include_devices or include_status:\n RR2.cache_predicate(PRED.hasSite)\n RR2.cache_predicate(PRED.hasDevice)\n all_device_statuses = self._get_master_status_table(RR2, site_children.keys())\n\n if include_status:\n #add code to grab the master status table to pass in to the get_status_roll_ups calc\n log.debug('get_sites_devices_status site master_status_table: %s ', all_device_statuses)\n site_result_dict[\"site_status\"] = all_device_statuses\n\n #create the aggreagate_status for each device and site\n\n log.debug(\"calculate site aggregate status\")\n site_status = self._get_site_rollup_list(RR2, all_device_statuses, [s for s in site_children.keys()])\n site_status_dict = dict(zip(site_children.keys(), site_status))\n log.debug('get_sites_devices_status site_status_dict: %s ', site_status_dict)\n site_result_dict[\"site_aggregate_status\"] = site_status_dict\n\n if include_devices:\n log.debug(\"calculate device aggregate status\")\n inst_status = [self.agent_status_builder._crush_status_dict(all_device_statuses.get(k, {}))\n for k in all_device_statuses.keys()]\n device_agg_status_dict = dict(zip(all_device_statuses.keys(), inst_status))\n log.debug('get_sites_devices_status device_agg_status_dict: %s ', device_agg_status_dict)\n site_result_dict[\"device_aggregate_status\"] = device_agg_status_dict\n\n result_dict[parent_resource_id] = site_result_dict\n\n return result_dict\n\n def find_site_data_products(self, parent_resource_id='', include_sites=False, include_devices=False,\n include_data_products=False):\n if not parent_resource_id:\n raise BadRequest(\"Must provide a parent parent_resource_id\")\n\n res_dict = self.outil.get_site_data_products(parent_resource_id, include_sites=include_sites,\n include_devices=include_devices,\n include_data_products=include_data_products)\n\n return res_dict\n\n\n\n ############################\n #\n # EXTENDED RESOURCES\n #\n ############################\n\n # TODO: Make every incoming call to this one\n def get_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):\n site_extension = None\n\n # Make a case decision on what what to do\n site_obj = self.RR2.read(site_id)\n site_type = site_obj._get_type()\n\n if site_type == RT.InstrumentSite:\n site_extension = self._get_instrument_site_extension(site_id, ext_associations, ext_exclude, user_id)\n\n elif site_type in (RT.Observatory, RT.Subsite):\n site_extension = self._get_platform_site_extension(site_id, ext_associations, ext_exclude, user_id)\n\n elif site_type == RT.PlatformSite:\n site_extension = self._get_platform_site_extension(site_id, ext_associations, ext_exclude, user_id)\n\n else:\n raise BadRequest(\"Unknown site type '%s' for site %s\" % (site_type, site_id))\n\n return site_extension\n\n # TODO: Redundant, remove operation and use get_site_extension\n def get_observatory_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):\n return self.get_site_extension(site_id, ext_associations, ext_exclude, user_id)\n\n # TODO: Redundant, remove operation and use get_site_extension\n def get_platform_station_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):\n return self.get_site_extension(site_id, ext_associations, ext_exclude, user_id)\n\n # TODO: Redundant, remove operation and use get_site_extension\n def get_platform_assembly_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):\n return self.get_site_extension(site_id, ext_associations, ext_exclude, user_id)\n\n # TODO: Redundant, remove operation and use get_site_extension\n def get_platform_component_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):\n return self.get_site_extension(site_id, ext_associations, ext_exclude, user_id)\n\n # TODO: Redundant, remove operation and use get_site_extension\n def get_instrument_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):\n return self.get_site_extension(site_id, ext_associations, ext_exclude, user_id)\n\n def _get_site_device(self, site_id, device_relations):\n site_devices = [tup[1] for tup in device_relations.get(site_id, []) if tup[2] in (RT.InstrumentDevice, RT.PlatformDevice)]\n if len(site_devices) > 1:\n log.error(\"Inconsistent: Site %s has multiple devices: %s\", site_id, site_devices)\n if not site_devices:\n return None\n return site_devices[0]\n\n def _get_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):\n \"\"\"Returns a site extension object containing common information, plus some helper objects\n\n @param site_id str\n @param ext_associations dict\n @param ext_exclude list\n @retval TBD\n @throws BadRequest A parameter is missing\n @throws NotFound An object with the specified observatory_id does not exist\n \"\"\"\n try:\n if not site_id:\n raise BadRequest(\"The site_id parameter is empty\")\n\n extended_resource_handler = ExtendedResourceContainer(self)\n\n extended_site = extended_resource_handler.create_extended_resource_container(\n extended_resource_type=OT.SiteExtension,\n resource_id=site_id,\n computed_resource_type=OT.SiteComputedAttributes,\n ext_associations=ext_associations,\n ext_exclude=ext_exclude,\n user_id=user_id)\n\n RR2 = EnhancedResourceRegistryClient(self.RR)\n outil = ObservatoryUtil(self, enhanced_rr=RR2)\n\n # Find all subsites and devices\n site_resources, site_children = outil.get_child_sites(parent_site_id=site_id, include_parents=False, id_only=False)\n site_ids = site_resources.keys() + [site_id] # IDs of this site and all child sites\n device_relations = outil.get_device_relations(site_ids)\n\n # Set parent immediate child sites\n parent_site_ids = [a.s for a in RR2.filter_cached_associations(PRED.hasSite, lambda a: a.p ==PRED.hasSite and a.o == site_id)]\n if parent_site_ids:\n extended_site.parent_site = RR2.read(parent_site_ids[0])\n else:\n extended_site.parent_site = None\n extended_site.sites = [site_resources[ch_id] for ch_id in site_children[site_id]] if site_children.get(site_id, None) is not None else []\n\n # Set all nested child devices, remove any dups\n instrument_device_ids = list( set( [tup[1] for (parent,dlst) in device_relations.iteritems() for tup in dlst if tup[2] == RT.InstrumentDevice] ) )\n platform_device_ids = list( set( [tup[1] for (parent,dlst) in device_relations.iteritems() for tup in dlst if tup[2] == RT.PlatformDevice] ) )\n\n device_ids = list(set(instrument_device_ids + platform_device_ids))\n device_objs = self.RR2.read_mult(device_ids)\n devices_by_id = dict(zip(device_ids, device_objs))\n\n extended_site.instrument_devices = [devices_by_id[did] for did in instrument_device_ids]\n extended_site.platform_devices = [devices_by_id[did] for did in platform_device_ids]\n\n # Set primary device at immediate child sites\n extended_site.sites_devices = []\n for ch_site in extended_site.sites:\n device_id = self._get_site_device(ch_site._id, device_relations)\n extended_site.sites_devices.append(devices_by_id.get(device_id, None))\n extended_site.portal_instruments = extended_site.sites_devices # ALIAS\n\n # Set deployments\n RR2.cache_predicate(PRED.hasDeployment)\n deployment_assocs = RR2.filter_cached_associations(PRED.hasDeployment, lambda a: a.s in site_ids)\n deployment_ids = [a.o for a in deployment_assocs]\n deployment_objs = RR2.read_mult(list(set(deployment_ids)))\n extended_site.deployments = deployment_objs\n\n # Set data products\n RR2.cache_predicate(PRED.hasSource)\n dataproduct_assocs = RR2.filter_cached_associations(PRED.hasSource, lambda a: a.o in site_ids)\n dataproduct_ids = [a.s for a in dataproduct_assocs]\n dataproduct_objs = RR2.read_mult(list(set(dataproduct_ids)))\n extended_site.data_products = dataproduct_objs\n\n log.debug(\"Building list of model objs\")\n # Build a lookup for device models via hasModel predicates.\n # lookup is a 2d associative array of [subject type][subject id] -> object id\n RR2.cache_predicate(PRED.hasModel)\n lookup = {rt : {} for rt in [RT.InstrumentDevice, RT.PlatformDevice]}\n for a in RR2.filter_cached_associations(PRED.hasModel, lambda assn: assn.st in lookup):\n lookup[a.st][a.s] = a.o\n\n def retrieve_model_objs(rsrc_list, object_type):\n # rsrc_list is devices that need models looked up. object_type is the resource type (a device)\n # not all devices have models (represented as None), which kills read_mult. so, extract the models ids,\n # look up all the model ids, then create the proper output\n model_list = [lookup[object_type].get(r._id) for r in rsrc_list]\n model_uniq = list(set([m for m in model_list if m is not None]))\n model_objs = self.RR2.read_mult(model_uniq)\n model_dict = dict(zip(model_uniq, model_objs))\n return [model_dict.get(m) for m in model_list]\n\n extended_site.instrument_models = retrieve_model_objs(extended_site.instrument_devices, RT.InstrumentDevice)\n extended_site.platform_models = retrieve_model_objs(extended_site.platform_devices, RT.PlatformDevice)\n\n primary_device_id = self._get_site_device(site_id, device_relations)\n\n # Filtered subsites by type/alt type\n def fs(resource_type, filter_fn):\n both = lambda s: ((resource_type == s._get_type()) and filter_fn(s))\n return filter(both, site_resources.values())\n\n extended_site.platform_station_sites = fs(RT.PlatformSite, lambda s: s.alt_resource_type == \"StationSite\")\n extended_site.platform_component_sites = fs(RT.PlatformSite, lambda s: s.alt_resource_type == \"PlatformComponentSite\")\n extended_site.platform_assembly_sites = fs(RT.PlatformSite, lambda s: s.alt_resource_type == \"PlatformAssemblySite\")\n extended_site.instrument_sites = fs(RT.InstrumentSite, lambda _: True)\n\n #from pyon.util.breakpoint import breakpoint; breakpoint(locals())\n\n context = dict(\n extended_site=extended_site,\n enhanced_RR=RR2,\n site_device_id=primary_device_id,\n site_resources=site_resources,\n site_children=site_children,\n device_relations=device_relations\n )\n return context\n except:\n log.error('_get_site_extension failed', exc_info=True)\n raise\n\n def _get_platform_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):\n \"\"\"Creates a SiteExtension and status for platforms and higher level sites\"\"\"\n log.debug(\"_get_platform_site_extension\")\n context = self._get_site_extension(site_id, ext_associations, ext_exclude, user_id)\n extended_site, RR2, platform_device_id, site_resources, site_children, device_relations = \\\n context[\"extended_site\"], context[\"enhanced_RR\"], context[\"site_device_id\"], \\\n context[\"site_resources\"], context[\"site_children\"], context[\"device_relations\"]\n\n RR2.cache_predicate(PRED.hasDevice)\n\n # prepare to make a lot of rollups\n log.debug(\"Found these site children: %s\", site_children.keys())\n\n devices_for_status = set(site_children.keys())\n for dev in extended_site.sites_devices:\n if dev:\n devices_for_status.add(dev._id)\n\n all_device_statuses = self._get_master_status_table(RR2, devices_for_status)\n log.debug(\"Found all device statuses: %s\", all_device_statuses)\n\n # portal status rollup\n portal_status = [self.agent_status_builder._crush_status_dict(all_device_statuses.get(k._id, {})) if k else DeviceStatusType.STATUS_UNKNOWN for k in extended_site.portal_instruments]\n extended_site.computed.portal_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=portal_status)\n\n log.debug(\"generating site status rollup\")\n site_status = self._get_site_rollup_list(RR2, all_device_statuses, [s._id for s in extended_site.sites])\n extended_site.computed.site_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED,\n value=site_status)\n\n # create the list of station status from the overall status list\n subset_status = []\n for site in extended_site.platform_station_sites:\n if not extended_site.sites.count(site):\n log.error(\" Platform Site does not exist in the full list of sites. id: %s\", site._id)\n break\n idx = extended_site.sites.index( site )\n subset_status.append( site_status[idx] )\n extended_site.computed.station_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED,\n value=subset_status)\n\n log.debug(\"generating instrument status rollup\") # (is easy)\n inst_status = [self.agent_status_builder._crush_status_dict(all_device_statuses.get(k._id, {}))\n for k in extended_site.instrument_devices]\n extended_site.computed.instrument_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED,\n value=inst_status)\n\n log.debug(\"generating platform status rollup\")\n plat_status = self._get_platform_rollup_list(RR2, all_device_statuses, [s._id for s in extended_site.platform_devices])\n extended_site.computed.platform_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED,\n value=plat_status)\n\n log.debug(\"generating rollup of this site\")\n #site_aggregate = self._get_site_rollup_dict(RR2, all_device_statuses, site_id)\n\n #we know all the devices on this site, use prev located ids\n all_device_ids = set([s._id for s in extended_site.instrument_devices] + [s._id for s in extended_site.platform_devices])\n #create one list with all the relavant status dicts\n all_status = []\n for device_id in all_device_ids:\n all_status.append( all_device_statuses.get(device_id, STATUS_UNKNOWN) )\n\n log.debug('_get_platform_site_extension all_status: %s', all_status )\n if all_status:\n rollup_status = {}\n #for each status type extract a vector from the status set\n for stype, svalue in AggregateStatusType._str_map.iteritems():\n type_list = []\n for status in all_status:\n type_list.append(status.get(stype, DeviceStatusType.STATUS_UNKNOWN))\n #take the max valus as the status of this type\n rollup_status[stype] = max(type_list)\n else:\n # no devices on this site\n rollup_status = STATUS_UNKNOWN\n log.debug('_get_platform_site_extension rollup_status: %s', rollup_status )\n\n self.agent_status_builder.set_status_computed_attributes(extended_site.computed,\n rollup_status,\n ComputedValueAvailability.PROVIDED)\n\n extended_site.deployment_info = describe_deployments(extended_site.deployments, self.clients,\n instruments=extended_site.instrument_devices,\n instrument_status=extended_site.computed.instrument_status.value)\n\n return extended_site\n\n def _get_instrument_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):\n \"\"\"Creates a SiteExtension and status for instruments\"\"\"\n context = self._get_site_extension(site_id, ext_associations, ext_exclude, user_id)\n extended_site, RR2, inst_device_id, site_resources, site_children, device_relations = \\\n context[\"extended_site\"], context[\"enhanced_RR\"], context[\"site_device_id\"], \\\n context[\"site_resources\"], context[\"site_children\"], context[\"device_relations\"]\n\n if inst_device_id:\n log.debug(\"Reading status for device '%s'\", inst_device_id)\n self.agent_status_builder.add_device_rollup_statuses_to_computed_attributes(inst_device_id,\n extended_site.computed,\n None)\n else:\n log.debug(\"No device ID, so filling in ''Status unknown if device not present''\")\n all_unknown = dict([(k, DeviceStatusType.STATUS_UNKNOWN) for k in AggregateStatusType._str_map.keys()])\n self.agent_status_builder.set_status_computed_attributes(extended_site.computed,\n all_unknown,\n ComputedValueAvailability.PROVIDED)\n\n instrument_status_list = [self.agent_status_builder.get_aggregate_status_of_device(d._id)\n for d in extended_site.instrument_devices]\n\n def clv(value=None):\n return ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=value if value is not None else [])\n\n # there are no child sites, and therefore no child statuses\n extended_site.computed.platform_status = clv()\n extended_site.computed.site_status = clv()\n extended_site.computed.instrument_status = clv(instrument_status_list)\n\n extended_site.deployment_info = describe_deployments(extended_site.deployments, self.clients,\n instruments=extended_site.instrument_devices,\n instrument_status=extended_site.computed.instrument_status.value)\n\n # have portals but need to reduce to appropriate subset...\n extended_site.computed.portal_status = clv()\n\n return extended_site\n\n def get_deployment_extension(self, deployment_id='', ext_associations=None, ext_exclude=None, user_id=''):\n if not deployment_id:\n raise BadRequest(\"The deployment_id parameter is empty\")\n\n extended_resource_handler = ExtendedResourceContainer(self)\n extended_deployment = extended_resource_handler.create_extended_resource_container(\n extended_resource_type=OT.DeploymentExtension,\n resource_id=deployment_id,\n computed_resource_type=OT.DeploymentComputedAttributes,\n ext_associations=ext_associations,\n ext_exclude=ext_exclude,\n user_id=user_id)\n\n if not extended_deployment.device or not extended_deployment.site \\\n or not hasattr(extended_deployment.device, '_id') \\\n or not hasattr(extended_deployment.site, '_id'):\n return extended_deployment\n #raise Inconsistent('deployment %s should be associated with a device and a site' % deployment_id)\n\n log.info('have device: %r\\nand site: %r', extended_deployment.device.__dict__, extended_deployment.site.__dict__)\n RR2 = EnhancedResourceRegistryClient(self.clients.resource_registry)\n finder = RelatedResourcesCrawler()\n get_assns = finder.generate_related_resources_partial(RR2, [PRED.hasDevice])\n # search from PlatformDevice to subplatform or InstrumentDevice\n search_down = get_assns({PRED.hasDevice: (True, False)}, [RT.InstrumentDevice, RT.PlatformDevice])\n\n # collect ids of devices below deployment target\n platform_device_ids = set()\n instrument_device_ids = set()\n # make sure main device in deployment is in the list\n if extended_deployment.device.type_==RT.InstrumentDevice:\n instrument_device_ids.add(extended_deployment.device._id)\n else:\n platform_device_ids.add(extended_deployment.device._id)\n for a in search_down(extended_deployment.device._id, -1):\n if a.o != extended_deployment.device._id:\n if a.ot == RT.InstrumentDevice:\n instrument_device_ids.add(a.o)\n else: # a.ot == RT.PlatformDevice:\n platform_device_ids.add(a.o)\n\n # get sites (portals)\n extended_deployment.computed.portals = ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=[extended_deployment.site])\n subsite_ids = set()\n device_by_site = { extended_deployment.site._id: extended_deployment.device._id }\n for did in platform_device_ids:\n related_sites = RR2.find_platform_site_ids_by_platform_device_using_has_device(did)\n for sid in related_sites:\n subsite_ids.add(sid)\n device_by_site[sid] = did\n for did in instrument_device_ids:\n related_sites = RR2.find_instrument_site_ids_by_instrument_device_using_has_device(did)\n for sid in related_sites:\n subsite_ids.add(sid)\n device_by_site[sid] = did\n\n # sort the objects into the lists to be displayed\n ids = list(platform_device_ids|instrument_device_ids|subsite_ids)\n device_by_id = { extended_deployment.device._id: extended_deployment.device }\n objs = self.RR.read_mult(ids)\n for obj in objs:\n if obj.type_==RT.InstrumentDevice:\n extended_deployment.instrument_devices.append(obj)\n elif obj.type_==RT.PlatformDevice:\n extended_deployment.platform_devices.append(obj)\n else: # InstrumentSite or PlatformSite\n extended_deployment.computed.portals.value.append(obj)\n\n # get associated models for all devices\n devices = list(platform_device_ids|instrument_device_ids)\n assocs = self.RR.find_associations(anyside=list(devices), id_only=False)\n ## WORKAROUND find_associations doesn't support anyside + predicate,\n # so must use anyside to find a list of values and filter for predicate later\n workaround = []\n for a in assocs:\n if a.p==PRED.hasModel:\n workaround.append(a)\n assocs = workaround\n ## end workaround\n\n model_id_by_device = { a.s: a.o for a in assocs }\n model_ids = set( [ a.o for a in assocs ])\n models = self.RR.read_mult( list(model_ids) )\n model_by_id = { o._id: o for o in models }\n\n extended_deployment.instrument_models = [ model_by_id[model_id_by_device[d._id]] for d in extended_deployment.instrument_devices ]\n extended_deployment.platform_models = [ model_by_id[model_id_by_device[d._id]] for d in extended_deployment.platform_devices ]\n extended_deployment.portal_instruments = [ device_by_id[device_by_site[p._id]]\n if p._id in device_by_site and device_by_site[p._id] in device_by_id\n else None\n for p in extended_deployment.computed.portals.value ]\n\n # TODO -- all status values\n #\n #status: !ComputedIntValue\n ## combined list of sites and their status\n ##@ResourceType=InstrumentSite,PlatformSite\n #portal_status: !ComputedListValue\n ## status of device lists\n #instrument_status: !ComputedListValue\n #platform_status: !ComputedListValue\n\n return extended_deployment\n\n\n\n\n #-----------------------------------------------\n # COMPUTED RESOURCES\n #-----------------------------------------------\n def get_marine_facility_extension(self, org_id='', ext_associations=None, ext_exclude=None, user_id=''):\n \"\"\"Returns an MarineFacilityOrgExtension object containing additional related information\n\n @param org_id str\n @param ext_associations dict\n @param ext_exclude list\n @retval observatory ObservatoryExtension\n @throws BadRequest A parameter is missing\n @throws NotFound An object with the specified observatory_id does not exist\n \"\"\"\n\n if not org_id:\n raise BadRequest(\"The org_id parameter is empty\")\n\n extended_resource_handler = ExtendedResourceContainer(self)\n\n extended_org = extended_resource_handler.create_extended_resource_container(\n extended_resource_type=OT.MarineFacilityOrgExtension,\n resource_id=org_id,\n computed_resource_type=OT.MarineFacilityOrgComputedAttributes,\n ext_associations=ext_associations,\n ext_exclude=ext_exclude,\n user_id=user_id,\n negotiation_status=NegotiationStatusEnum.OPEN)\n\n RR2 = EnhancedResourceRegistryClient(self.RR)\n RR2.cache_predicate(PRED.hasModel)\n RR2.cache_predicate(PRED.hasDevice)\n\n #Fill out service request information for requesting data products\n extended_org.data_products_request.service_name = 'resource_registry'\n extended_org.data_products_request.service_operation = 'find_objects'\n extended_org.data_products_request.request_parameters = {\n 'subject': org_id,\n 'predicate': 'hasResource',\n 'object_type': 'DataProduct',\n 'id_only': False,\n 'limit': 10,\n 'skip': 0\n }\n\n\n # clients.resource_registry may return us the container's resource_registry instance\n self._rr = self.clients.resource_registry\n\n # extended object contains list of member actors, so need to change to user info\n actors_list = extended_org.members\n user_list = []\n for actor in actors_list:\n log.debug(\"get_marine_facility_extension: actor: %s \", actor)\n user_info_objs, _ = self._rr.find_objects(subject=actor._id, predicate=PRED.hasInfo, object_type=RT.UserInfo, id_only=False)\n if user_info_objs:\n log.debug(\"get_marine_facility_extension: user_info_obj %s \", user_info_objs[0])\n user_list.append( user_info_objs[0] )\n\n extended_org.members = user_list\n\n\n #Convert Negotiations to OrgUserNegotiationRequest\n extended_org.open_requests = self._convert_negotiations_to_requests(extended_org, extended_org.open_requests)\n extended_org.closed_requests = self._convert_negotiations_to_requests(extended_org, extended_org.closed_requests)\n\n # lookup all hasModel predicates\n # lookup is a 2d associative array of [subject type][subject id] -> object id (model)\n lookup = dict([(rt, {}) for rt in [RT.InstrumentDevice, RT.PlatformDevice]])\n for a in RR2.filter_cached_associations(PRED.hasModel, lambda assn: assn.st in lookup):\n if a.st in lookup:\n lookup[a.st][a.s] = a.o\n\n def retrieve_model_objs(rsrc_list, object_type):\n # rsrc_list is devices that need models looked up. object_type is the resource type (a device)\n # not all devices have models (represented as None), which kills read_mult. so, extract the models ids,\n # look up all the model ids, then create the proper output\n model_list = [lookup[object_type].get(r._id) for r in rsrc_list]\n model_uniq = list(set([m for m in model_list if m is not None]))\n model_objs = self.clients.resource_registry.read_mult(model_uniq)\n model_dict = dict(zip(model_uniq, model_objs))\n return [model_dict.get(m) for m in model_list]\n\n extended_org.instrument_models = retrieve_model_objs(extended_org.instruments, RT.InstrumentDevice)\n extended_org.platform_models = retrieve_model_objs(extended_org.platforms, RT.PlatformDevice)\n\n log.debug(\"time to make the rollups\")\n _, site_children = self.outil.get_child_sites(org_id=org_id, id_only=False)\n all_device_statuses = self._get_master_status_table(RR2, site_children.keys())\n\n log.debug(\"site status rollup\")\n site_status = self._get_site_rollup_list(RR2, all_device_statuses, [s._id for s in extended_org.sites])\n extended_org.computed.site_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED,\n value=site_status)\n\n log.debug(\"instrument status rollup\") # (is easy)\n inst_status = [self.agent_status_builder._crush_status_dict(all_device_statuses.get(k._id, {}))\n for k in extended_org.instruments]\n extended_org.computed.instrument_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED,\n value=inst_status)\n\n log.debug(\"platform status rollup\")\n plat_status = self._get_platform_rollup_list(RR2, all_device_statuses, [s._id for s in extended_org.platforms])\n extended_org.computed.platform_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED,\n value=plat_status)\n\n log.debug(\"rollup of this org includes all found devices\")\n org_aggregate = {}\n for k, v in AggregateStatusType._str_map.iteritems():\n aggtype_list = [a.get(k, DeviceStatusType.STATUS_UNKNOWN) for a in all_device_statuses.values()]\n org_aggregate[k] = self.agent_status_builder._crush_status_list(aggtype_list)\n\n self.agent_status_builder.set_status_computed_attributes(extended_org.computed,\n org_aggregate,\n ComputedValueAvailability.PROVIDED)\n\n # station_site is currently all PlatformSites, need to limit to those with alt_resource_type\n subset = []\n for site in extended_org.station_sites:\n if site.alt_resource_type=='StationSite':\n subset.append(site)\n extended_org.station_sites = subset\n station_status = self._get_site_rollup_list(RR2, all_device_statuses, [s._id for s in extended_org.station_sites])\n extended_org.computed.station_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=station_status)\n extended_org.deployment_info = describe_deployments(extended_org.deployments, self.clients, instruments=extended_org.instruments, instrument_status=extended_org.computed.instrument_status.value)\n\n return extended_org\n\n def _get_root_platforms(self, RR2, platform_device_list):\n # get all relevant assocation objects\n filter_fn = lambda a: a.o in platform_device_list\n\n # get child -> parent dict\n lookup = dict([(a.o, a.s) for a in RR2.filter_cached_associations(PRED.hasDevice, filter_fn)])\n\n # root platforms have no parent, or a parent that's not in our list\n return [r for r in platform_device_list if (r not in lookup or (lookup[r] not in platform_device_list))]\n\n # return a table of device statuses for all given device ids\n def _get_master_status_table(self, RR2, site_tree_ids):\n platformdevice_tree_ids = []\n for s in site_tree_ids:\n platformdevice_tree_ids += RR2.find_objects(s, PRED.hasDevice, RT.PlatformDevice, True)\n\n plat_roots = self._get_root_platforms(RR2, platformdevice_tree_ids)\n\n # build id -> aggstatus lookup table\n master_status_table = {}\n for plat_root_id in plat_roots:\n agg_status, _ = self.agent_status_builder.get_cumulative_status_dict(plat_root_id)\n if None is agg_status:\n log.warn(\"Can't get agg status for platform %s, ignoring\", plat_root_id)\n else:\n for k, v in agg_status.iteritems():\n master_status_table[k] = v\n\n return master_status_table\n\n\n # based on ALL the site ids in this tree, return a site rollup list corresponding to each site in the site_id_list\n def _get_site_rollup_list(self, RR2, master_status_table, site_id_list):\n\n # get rollup for each site\n master_status_rollup_list = []\n for s in site_id_list:\n #_, underlings = self.outil.get_child_sites(parent_site_id=s, id_only=True)\n master_status_rollup_list.append(self.agent_status_builder._crush_status_dict(\n self._get_site_rollup_dict(RR2, master_status_table, s)))\n\n return master_status_rollup_list\n\n # based on return a site rollup dict corresponding to a site in the site_id_list\n def _get_site_rollup_dict(self, RR2, master_status_table, site_id):\n\n attr1, underlings = self.outil.get_child_sites(parent_site_id=site_id, id_only=True)\n\n def collect_all_children(site_id, child_site_struct, child_list):\n #walk the tree of site children and put all site ids (all the way down the hierarchy) into one list\n children = child_site_struct.get(site_id, [])\n for child in children:\n child_list.append(child)\n #see if this child has children\n more_children = child_site_struct.get(child, [])\n if more_children:\n collect_all_children(child, child_site_struct, child_list)\n\n log.debug('collect_all_children child_list: %s', child_list)\n child_list = list( set(child_list ) )\n return child_list\n\n site_aggregate = {}\n all_site_ids = [site_id]\n all_site_ids = collect_all_children(site_id, underlings, all_site_ids)\n\n site_aggregate = {}\n #all_site_ids = underlings.keys()\n all_device_ids = []\n for s in all_site_ids:\n all_device_ids += RR2.find_objects(s, PRED.hasDevice, RT.PlatformDevice, True)\n all_device_ids += RR2.find_objects(s, PRED.hasDevice, RT.InstrumentDevice, True)\n\n log.debug(\"Calculating cumulative rollup values for all_device_ids = %s\", all_device_ids)\n for k, v in AggregateStatusType._str_map.iteritems():\n aggtype_list = [master_status_table.get(d, {}).get(k, DeviceStatusType.STATUS_UNKNOWN) for d in all_device_ids]\n log.debug(\"aggtype_list for %s is %s\", v, zip(all_device_ids, aggtype_list))\n site_aggregate[k] = self.agent_status_builder._crush_status_list(aggtype_list)\n\n return site_aggregate\n\n\n\n def _get_platform_rollup_list(self, RR2, master_status_table, platform_id_list):\n finder = RelatedResourcesCrawler()\n get_assns = finder.generate_related_resources_partial(RR2, [PRED.hasDevice])\n full_crawllist = [RT.InstrumentDevice, RT.PlatformDevice]\n search_down = get_assns({PRED.hasDevice: (True, False)}, full_crawllist)\n\n\n # get rollup for each platform device\n master_status_rollup_list = []\n\n for p in platform_id_list:\n\n # the searches return a list of association objects, so compile all the ids by extracting them\n underlings = set([])\n # we want only those IDs that are not the input resource id\n for a in search_down(p, -1):\n underlings.add(a.o)\n underlings.add(p)\n\n master_status_rollup_list.append(self.agent_status_builder._crush_status_list(\n [self.agent_status_builder._crush_status_dict(master_status_table.get(k, {})) for k in underlings]\n ))\n\n return master_status_rollup_list\n\n\n def _convert_negotiations_to_requests(self, extended_marine_facility=None, negotiations=None):\n assert isinstance(extended_marine_facility, MarineFacilityOrgExtension)\n assert isinstance(negotiations, list)\n\n #Get all associations for user info\n assoc_list = self.clients.resource_registry.find_associations(predicate=PRED.hasInfo, id_only=False)\n\n ret_list = []\n followup_list = defaultdict(list)\n\n for neg in negotiations:\n\n request = IonObject(OT.OrgUserNegotiationRequest, ts_updated=neg.ts_updated, negotiation_id=neg._id,\n negotiation_type=NegotiationTypeEnum._str_map[neg.negotiation_type],\n negotiation_status=NegotiationStatusEnum._str_map[neg.negotiation_status],\n originator=ProposalOriginatorEnum._str_map[neg.proposals[-1].originator],\n request_type=neg.proposals[-1].type_,\n description=neg.description, reason=neg.reason,\n org_id=neg.proposals[-1].provider)\n\n # since this is a proxy for the Negotiation object, simulate its id to help the UI deal with it\n request._id = neg._id\n\n actor_assoc = [ a for a in assoc_list if a.s == neg.proposals[-1].consumer ]\n if actor_assoc:\n member_assoc = [ m for m in extended_marine_facility.members if m._id == actor_assoc[0].o ]\n if member_assoc:\n request.user_id = member_assoc[0]._id\n request.name = member_assoc[0].name\n else:\n followup_list[actor_assoc[0].o].append(request)\n\n ret_list.append(request)\n\n # assign names/user_ids to any requests that weren't in the members list, likely enroll requests\n if len(followup_list):\n user_infos = self.clients.resource_registry.read_mult(followup_list.keys())\n udict = {}\n for u in user_infos:\n udict[u._id] = u\n\n for k, v in followup_list.iteritems():\n for request in v:\n request.user_id = k\n request.name = udict[k].name\n\n return ret_list\n","sub_path":"ion/services/sa/observatory/observatory_management_service.py","file_name":"observatory_management_service.py","file_ext":"py","file_size_in_byte":78495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"416706334","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.10-x86_64/egg/dhcpkit/utils.py\n# Compiled at: 2017-06-24 06:55:32\n# Size of source mod 2**32: 7885 bytes\n\"\"\"\nUtility functions\n\"\"\"\nimport codecs, re, idna\nfrom typing import Iterable, Tuple, Union\n\ndef camelcase_to_underscore(camelcase: str) -> str:\n \"\"\"\n Convert a name in CamelCase to non_camel_case\n\n :param camelcase: CamelCased string\n :return: non_camel_cased string\n \"\"\"\n s0 = camelcase.replace('-', '_')\n s1 = re.sub('(.)([A-Z][a-z]+)', '\\\\1_\\\\2', s0)\n s2 = re.sub('([a-z0-9])([A-Z])', '\\\\1_\\\\2', s1)\n s3 = s2.lower()\n return re.sub('_+', '_', s3)\n\n\ndef camelcase_to_dash(camelcase: str) -> str:\n \"\"\"\n Convert a name in CamelCase to non-camel-case\n\n :param camelcase: CamelCased string\n :return: non-camel-cased string\n \"\"\"\n return camelcase_to_underscore(camelcase).replace('_', '-')\n\n\ndef validate_domain_label(label: str):\n \"\"\"\n Check if a given string is a valid domain label\n\n :param label: The domain label\n \"\"\"\n try:\n idna.alabel(label)\n except idna.IDNAError as e:\n if e.args and 'A-label' in e.args[0]:\n raise ValueError('Invalid label') from None\n else:\n raise ValueError(e.args[0]) from None\n\n\ndef parse_domain_bytes(buffer: bytes, offset: int=0,\n length: int=None, allow_relative: bool=False) -> Tuple[(int, str)]:\n \"\"\"\n Extract a single domain name.\n\n :param buffer: The buffer to read data from\n :param offset: The offset in the buffer where to start reading\n :param length: The amount of data we are allowed to read from the buffer\n :param allow_relative: Allow domain names that do not end with a zero-length label\n :return: The number of bytes used from the buffer and the extracted domain name\n \"\"\"\n my_offset = 0\n max_offset = length or len(buffer) - offset\n current_labels = []\n while max_offset > my_offset:\n label_length = buffer[(offset + my_offset)]\n my_offset += 1\n if label_length == 0:\n domain_name_bytes = (b'.').join(current_labels) + b'.'\n domain_name = idna.decode(domain_name_bytes)\n if len(domain_name) > 254:\n raise ValueError('Domain too long')\n return (my_offset, domain_name)\n if label_length > 63:\n raise ValueError('Label too long')\n if my_offset + label_length > max_offset:\n raise ValueError('Invalid encoded domain name, exceeds available buffer')\n current_label_bytes = buffer[offset + my_offset:offset + my_offset + label_length]\n my_offset += label_length\n current_labels.append(current_label_bytes)\n\n if allow_relative:\n domain_name_bytes = (b'.').join(current_labels)\n domain_name = idna.decode(domain_name_bytes)\n if len(domain_name) > 253:\n raise ValueError('Domain too long')\n return (my_offset, domain_name)\n raise ValueError('Domain name must end with a 0-length label')\n\n\ndef parse_domain_list_bytes(buffer: bytes, offset: int=0,\n length: int=None) -> Tuple[(int, list)]:\n \"\"\"\n Extract a list of domain names.\n\n :param buffer: The buffer to read data from\n :param offset: The offset in the buffer where to start reading\n :param length: The amount of data we are allowed to read from the buffer\n :return: The number of bytes used from the buffer and the extracted domain names\n \"\"\"\n my_offset = 0\n max_offset = length or len(buffer) - offset\n domain_names = []\n while max_offset > my_offset:\n domain_name_len, domain_name = parse_domain_bytes(buffer, offset=offset + my_offset, length=max_offset - my_offset)\n domain_names.append(domain_name)\n my_offset += domain_name_len\n\n return (my_offset, domain_names)\n\n\ndef encode_domain(domain_name: str, allow_relative: bool=False) -> bytearray:\n \"\"\"\n Encode a single domain name as a sequence of bytes\n\n :param domain_name: The domain name\n :param allow_relative: Assume that domain names that don't end with a period are relative and encode them as such\n :return: The encoded domain name as bytes\n \"\"\"\n if not isinstance(domain_name, str):\n raise ValueError('Domain name must be a string')\n buffer = bytearray()\n if domain_name.endswith('.'):\n domain_name = domain_name.rstrip('.') + '.'\n try:\n domain_name = idna.encode(domain_name).decode('ascii')\n except idna.IDNAError as e:\n if e.args and 'A-label' in e.args[0]:\n raise ValueError('Invalid label') from None\n else:\n raise ValueError(e.args[0]) from None\n\n if allow_relative:\n if domain_name.endswith('.'):\n domain_name = domain_name.rstrip('.')\n end_with_zero = True\n else:\n end_with_zero = False\n else:\n domain_name = domain_name.rstrip('.')\n end_with_zero = True\n domain_name_parts = domain_name.split('.')\n for label in domain_name_parts:\n validate_domain_label(label)\n label_length = len(label)\n buffer.append(label_length)\n buffer.extend(label.encode('ascii'))\n\n if end_with_zero:\n buffer.append(0)\n return buffer\n\n\ndef encode_domain_list(domain_names: Iterable[str]) -> bytearray:\n \"\"\"\n Encode a list of domain names to a sequence of bytes\n\n :param domain_names: The list of domain names\n :return: The encoded domain names as bytes\n \"\"\"\n buffer = bytearray()\n for domain_name in domain_names:\n buffer.extend(encode_domain(domain_name))\n\n return buffer\n\n\ndef normalise_hex(hex_data: Union[(str, bytes)], include_colons: bool=False) -> str:\n \"\"\"\n Normalise a string containing hexadecimal data\n\n :param hex_data: Hexadecimal data, either with or without colon separators per byte\n :param include_colons: Whether to include colon separators per byte in the output\n :return: Hexadecimal data in lowercase without colon separators\n \"\"\"\n if isinstance(hex_data, bytes):\n hex_data = codecs.encode(hex_data, 'hex').decode('ascii')\n if hex_data == '':\n return hex_data\n if re.match('^[0-9A-Fa-f]{2}(:?[0-9A-Fa-f]{2})*$', hex_data):\n hex_data = hex_data.replace(':', '').lower()\n if include_colons:\n hex_data = ':'.join(re.findall('..', hex_data))\n return hex_data\n raise ValueError('Input data is not valid hex data')","sub_path":"pycfiles/dhcpkit-1.0.7-py3.4/utils.cpython-34.py","file_name":"utils.cpython-34.py","file_ext":"py","file_size_in_byte":6597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"283434713","text":"import alg_prim\n\nvet_grafo = []\nvet_saida = []\n\n#w_file = open(\"dados_grafo.txt\", \"r\")\nw_file = open(\"teste.txt\", \"r\")\n\nw_lines = w_file.readlines()\n\nfor i in w_lines:\n vet_grafo.append(i.split())\n\nalg_prim.execute(vet_grafo, 1, vet_saida)\n\n#for saida in vet_saida:\n# print(saida)\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"371777383","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n \n def push_at_start(self, data):\n new_node = Node(data)\n new_node.next = self.head\n self.head = new_node\n \n def push_at_end(self, data):\n new_node = Node(data)\n if self.head is None: \n self.head = new_node \n return\n last = self.head\n while last.next:\n last = last.next\n last.next = new_node\n \n def push_at_pos(self, data, val):\n new_node = Node(data)\n if self.head is None: \n self.head = new_node \n return\n temp = self.head\n while temp.next:\n if temp.data == val:\n break\n temp = temp.next\n new_node.next = temp.next\n temp.next = new_node\n\n def print_list(self):\n temp = self.head\n while temp:\n print(temp.data)\n temp = temp.next\n \n def print_middle(self):\n pass\n\nif __name__ == \"__main__\":\n llist = LinkedList()\n llist.push_at_start(1)\n llist.push_at_start(2)\n llist.push_at_start(3)\n llist.push_at_end(2)\n llist.push_at_end(3)\n llist.push_at_pos(0, 1)\n llist.print_list()","sub_path":"linked_list/insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"478158693","text":"import logging\n\nclass RemoteCommandMeta( type ):\n\tdef __init__( cls, name, bases, dict ):\n\t\tsuper( RemoteCommandMeta, cls ).__init__( name, bases, dict )\n\t\tfullname = dict.get( 'name', None )\n\t\tif not fullname: return\n\t\tRemoteCommandRegistry.get().registerCommand( fullname, cls )\n\n##----------------------------------------------------------------##\n\n##----------------------------------------------------------------##\nclass RemoteCommand( object, metaclass=RemoteCommandMeta ):\n\tdef run( self, argv ):\n\t\tpass\n\n# ##----------------------------------------------------------------##\n# def RemoteFunc( name ):\n# \tdef wrapperFunc( func ):\n# \t\tclass AnonymousCommand( RemoteCommand ):\n# \t\t\tdef run( argv ):\n# \t\t\t\treturn func( argv )\n# \t\tRemoteCommandRegistry.get().registerCommand( name, AnonymousCommand )\n# \treturn wrapperFunc\n\n##----------------------------------------------------------------##\nclass RemoteCommandRegistry( object ):\n\t_singleton = None\n\n\t@staticmethod\n\tdef get():\n\t\tif not RemoteCommandRegistry._singleton:\n\t\t\treturn RemoteCommandRegistry()\n\t\treturn RemoteCommandRegistry._singleton\n\n\tdef __init__( self ):\n\t\tRemoteCommandRegistry._singleton = self\n\t\tself.commands = {}\n\n\tdef registerCommand( self, name, cmdClas ):\n\t\tself.commands[ str(name) ] = cmdClas\n\n\tdef doCommand( self, argv, output ):\n\t\tif argv:\n\t\t\tstrArgv = [ str( v, encoding = 'utf-8' ) for v in argv ]\n\t\t\tcmdName = strArgv[ 0 ]\n\t\t\tclas = self.commands.get( cmdName, None )\n\t\t\tif clas:\n\t\t\t\tcmd = clas()\n\t\t\t\tif len( argv ) > 1 :\n\t\t\t\t\targs = strArgv[1:]\n\t\t\t\telse:\n\t\t\t\t\targs = []\n\t\t\t\ttry:\n\t\t\t\t\tresult = cmd.run( *args )\n\t\t\t\t\toutput.append( result )\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tlogging.exception( e )\n\t\t\telse:\n\t\t\t\tlogging.warning( 'no remote command found:' + cmdName )\n\n\nRemoteCommandRegistry()\n\n","sub_path":"lib/gii/core/RemoteCommand.py","file_name":"RemoteCommand.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"237550816","text":"import os, csv\nfrom booksdb import *\nfrom sqlalchemy import create_engine, desc\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\nengine = create_engine(\"postgresql://oupmdpbbloafkq:524dc2ab68ad1420bdb528f75b63e4c1f43af3d677d07e2dfe2e7ef3bda4c4eb@ec2-34-198-243-120.compute-1.amazonaws.com:5432/d6i86eicfinhqi\")\ndbscope = scoped_session(sessionmaker(bind=engine))\n\ndef main():\n Base.metadata.create_all(bind=engine)\n f = open(\"books.csv\")\n # print(f)\n reader = csv.reader(f)\n next(reader)\n for isbn, title, author, year in reader:\n book = Books(isbn=isbn, title=title, author=author, year=int(year))\n dbscope.add(book)\n dbscope.commit()\n dbscope.close()\n\nif __name__ == \"__main__\":\n main()","sub_path":"books.py","file_name":"books.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"254602623","text":"# https://leetcode.com/problems/palindrome-partitioning/\n\nclass Solution(object):\n def partition(self, s):\n ret = []\n for i in range(1, len(s)+1):\n if s[:i] == s[i-1::-1]:\n for rest in self.partition(s[i:]):\n ret.append([s[:i]]+rest)\n if not ret:\n return [[]]\n return ret","sub_path":"leet/131.py","file_name":"131.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"603141710","text":"import sys\nimport argparse\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import early_stopping, ModelCheckpoint\nimport yaml\n\nfrom ArxivDataLoader import ArxivDataModule\nfrom ImdbDataLoader import ImdbDataModule\nfrom HypeDataLoader import HypeDataModule\nfrom WOSDataLoader import WOSDataModule\nfrom PatentDataLoader import PatentDataModule\nfrom DocumentModel import DocumentModel\n\n\ndef main(config, gpus):\n patience = config['early_stop_patience']\n max_epochs = config['maximum_num_epochs']\n min_delta = config['early_stop_delta']\n log_dir = config['logging_dir']\n run_name = config['experiment_name']\n seed = config['seed']\n\n if config['batch_size'] > 8:\n assert config['batch_size'] in {16, 32}\n if config['dataset'] in {'small_hyper', 'hyperpartisan_news'}:\n if config['batch_size'] == 16:\n accumulate_num = 4\n else:\n accumulate_num = 8\n config['batch_size'] = 4\n else:\n if config['batch_size'] == 16:\n accumulate_num = 2\n else:\n accumulate_num = 4\n config['batch_size'] = 8\n else:\n accumulate_num = 1\n\n if config['dataset'] in {'small_hyper', 'hyperpartisan_news'}:\n data_module = HypeDataModule(config)\n elif config['dataset'] == 'web_of_science':\n data_module = WOSDataModule(config)\n elif config['dataset'] == 'imdb':\n data_module = ImdbDataModule(config)\n elif config['dataset'] == 'arxiv':\n data_module = ArxivDataModule(config)\n elif config['dataset'] == 'patent':\n data_module = PatentDataModule(config)\n config['counts'] = data_module.label_counts\n else:\n print(f\"### Unknown dataset: {config['dataset']} ###\")\n raise NotImplementedError\n pl.trainer.seed_everything(seed)\n model = DocumentModel(config)\n\n early_stop_callback = early_stopping.EarlyStopping(\n monitor='validation_f1',\n min_delta=min_delta,\n patience=patience,\n verbose=False,\n mode='max'\n )\n\n model_checkpoint_callback = ModelCheckpoint(\n monitor='validation_f1',\n dirpath='models/',\n filename=run_name,\n mode='max'\n )\n\n logger = pl.loggers.TensorBoardLogger(log_dir,\n default_hp_metric=True,\n name=run_name)\n logger.log_hyperparams(config)\n\n trainer = pl.Trainer(callbacks=[early_stop_callback,\n model_checkpoint_callback],\n logger=logger,\n enable_pl_optimizer=True,\n gradient_clip_val=0.25,\n accumulate_grad_batches=accumulate_num,\n max_epochs=max_epochs,\n log_every_n_steps=50,\n progress_bar_refresh_rate=0,\n gpus=[gpus] if gpus != None else None)\n # precision=16)\n trainer.fit(model, data_module)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Train a model')\n parser.add_argument('--config', default=None)\n parser.add_argument('--gpus', default=None, type=int)\n args = parser.parse_args()\n\n try:\n config = yaml.load(open(args.config, 'r'), Loader=yaml.SafeLoader)\n main(config, args.gpus)\n except IndexError:\n print(f'Must pass in config file. Usage: python3 train_script.py config_file')\n except FileNotFoundError:\n print(f'Unable to find file {sys.argv[1]}')\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n raise\n","sub_path":"DocumentModel/train_script.py","file_name":"train_script.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"612384429","text":"import numpy as np\nimport pandas as pd\nimport time\nimport copy\n\nfrom .choice import get_choice\nfrom .mctreesearch import MCTreeSearch\n\n\nclass NetNode(object):\n def __init__(self, itemset_columns):\n self.parents = []\n self.children = []\n self.state = []\n self.index = itemset_columns\n self.Q = 0\n\n\n'''\ndef initNetNode(node):\n for i in range(len(node.index)):\n if node.index[i] == 0:\n tempitem = node.index\n tempitem[i] = 1\n child = NetNode(tempitem)\n node.children.append(child)\n child.parents.append(node)\n'''\n\n\ndef initLayer(layer):\n print(\"start initLayer\")\n start_time = time.time()\n # initialize the next layer\n layer_next = []\n layer_index = []\n for pnode in layer:\n # search the optional children nodes\n for k in range(len(pnode.index)):\n if pnode.index[k] == 0:\n item = copy.deepcopy(pnode.index)\n item[k] = 1\n if item in layer_index: # the node is found and add link to parent\n print(item)\n location = layer_index.index(item)\n layer_next[location].parents.append(pnode)\n pnode.children.append(layer_next[location])\n else: # create a new child node\n cnode = NetNode(item)\n cnode.parents.append(pnode)\n pnode.children.append(cnode)\n layer_next.append(cnode)\n layer_index.append(item)\n print(\"--- {} seconds ---\".format((time.time() - start_time)))\n return layer_next, layer_index\n\n\ndef is_pair(item1, item2):\n is_paired = False\n record = 0\n for i in range(len(item1)):\n if item1[i] != 0 and item2[i] != 0 and item1[i] != item2[i]:\n return is_paired\n elif item1[i] != 0 and item2[i] == 0:\n record += 1\n elif item1[i] == 0 and item2[i] != 0:\n record += 1\n if record == 2:\n is_paired = True\n return is_paired\n\n\ndef merge(item1, item2):\n item = copy.deepcopy(item1)\n for i in range(len(item1)):\n if item1[i] == 0 and item2[i] != 0:\n item[i] = item2[i]\n return item\n\n\ndef best_set_in_layer(Layer, U, L):\n max_score = 0\n best_set = []\n next_layer = True\n for i in range(1, len(Layer)):\n Layer[i] = sorted(Layer[i], key=lambda node: node.Q, reverse=True)\n if next_layer:\n if Layer[i][0].Q > max_score*L:\n max_score = Layer[i][0].Q\n best_set = Layer[i][0].state\n elif Layer[i][0].Q > max_score*U:\n max_score = Layer[i][0].Q\n best_set = Layer[i][0].state\n\n if len(Layer[i]) < 2 or Layer[i][0].Q == 0 or (Layer[i][0].Q - Layer[i][1].Q)/Layer[i][0].Q > 0.2:\n next_layer = False\n else:\n next_layer = True\n return best_set, max_score\n\n\ndef hotspot(forecast, real, M, PT):\n columns = forecast.columns[:-1]\n tempindex = [0] * len(columns)\n node = NetNode(tempindex)\n Layer = []\n Layer.append([node])\n Bset = []\n maxQ = 0\n\n # build the 1st layer\n layer1, layer1index = initLayer(Layer[0])\n print(len(layer1))\n for i in range(len(layer1)):\n choice = get_choice(forecast, layer1index[i])\n print(\"MCTS\")\n start_time = time.time()\n b_node = MCTreeSearch(forecast, real, choice, M, PT)\n print(\"--- {} seconds ---\".format((time.time() - start_time)))\n # print(\"--- Q score: %f\" % b_node.Q)\n # print(b_node.state)\n\n if b_node.Q > maxQ:\n Bset = copy.deepcopy(b_node.state)\n maxQ = b_node.Q\n print(\"--- max Q score: %f\" % maxQ)\n print(Bset)\n layer1[i].state = b_node.state\n layer1[i].Q = b_node.Q\n Layer.append(layer1)\n\n # build layer l + 1\n for l in range(1, len(tempindex)):\n layer_l, _layer_index = initLayer(Layer[l])\n for tnode in layer_l:\n choice = []\n print(\"length of tnode.parent: %d\" % len(tnode.parents))\n for i in range(len(tnode.parents)):\n # print(\"tnode parent state:\")\n # print(tnode.parents[i].state)\n for j in range(i+1, len(tnode.parents)):\n for st1 in tnode.parents[i].state:\n for st2 in tnode.parents[j].state:\n if is_pair(st1, st2):\n stemp = merge(st1, st2)\n if stemp not in choice:\n choice.append(stemp)\n b_node = MCTreeSearch(forecast, real, choice, M, PT)\n print(\"Q score: %f\" % b_node.Q)\n # print(\"--- max Q score: %f\" % maxQ)\n # print('best set in this node:')\n print(b_node.state)\n if b_node.Q > maxQ*0.99:\n print(\"---------- best set changed ----------\")\n Bset = copy.deepcopy(b_node.state)\n maxQ = b_node.Q\n print(\"--- max Q score: %f\" % maxQ)\n print(Bset)\n tnode.state = b_node.state\n tnode.Q = b_node.Q\n Layer.append(layer_l)\n # Bset, maxQ = best_set_in_layer(Layer, 1.25, 0.99)\n return Bset, maxQ\n ","sub_path":"Code/hotspot/hotspot/hotspot.py","file_name":"hotspot.py","file_ext":"py","file_size_in_byte":5344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"403048442","text":"import sys, os\r\nDAFAPP_DIR = os.environ.get(\"DAFAPPSERVER_ROOTDIR\")\r\nsys.path.append(DAFAPP_DIR + 'ibank/accounting/script_modules')\r\nimport accountingapi\r\n\r\ndef CreateJournal(config, invoicenumber, invoicedate, branch_code, user_id_commit):\r\n journal = config.CreatePObject('Journal')\r\n datevalue = accountingapi.GetActiveAccDay(config).DateValue\r\n datevalue = config.ModDateTime.EncodeDate(datevalue[0],datevalue[1],datevalue[2])\r\n journal.journal_date = datevalue\r\n journal.description = 'Jurnal Invoice Purchasing %s' % (invoicenumber)\r\n journal.branch_code = branch_code\r\n journalapi = accountingapi.Journal(journal)\r\n journal_type = 'GL' # Jurnal umum\r\n journalapi.SetNewInstance(journal_type)\r\n\r\n journal.is_posted = 'F'\r\n journal.is_partlychecked = 'T'\r\n journal.userid_create = user_id_commit\r\n \r\n return journal\r\n\r\ndef CreateJournalItem(config, strDescription, journal, oAccountInstance, debit, credit, user_id_commit):\r\n item = config.CreatePObject('JournalItem')\r\n item.description = strDescription\r\n itemapi = accountingapi.JournalItem(item)\r\n itemapi.SetNewInstance(journal, oAccountInstance, debit, credit)\r\n item.JournalItemStatus = 'C'\r\n item.userid_create = user_id_commit\r\n item.userid_check = user_id_commit\r\n\r\ndef DAFScriptMain(config, parameter, returnpacket):\r\n # config: ISysConfig object\r\n # parameter: TPClassUIDataPacket\r\n # returnpacket: TPClassUIDataPacket (undefined structure)\r\n\r\n user_id_commit = parameter.FirstRecord.user_id_commit\r\n netamount = parameter.FirstRecord.netamount\r\n invoicenumber = parameter.FirstRecord.invoicenumber\r\n invoicedate = parameter.FirstRecord.invoicedate\r\n branch_code = parameter.FirstRecord.branch_code\r\n currency_code = parameter.FirstRecord.currency_code\r\n #rcv_acccode = parameter.FirstRecord.rcv_acccode\r\n pay_acccode = parameter.FirstRecord.pay_acccode\r\n\r\n oAccInstInvPurchase = accountingapi.GetAccModuleIntfInstance(branch_code, currency_code, 'invpurchase', config)\r\n oAccInstPay = accountingapi.GetAcountInstance(config, branch_code, currency_code, pay_acccode)\r\n\r\n config.BeginTransaction()\r\n try:\r\n journal = CreateJournal(config, invoicenumber, invoicedate, branch_code, user_id_commit)\r\n\r\n # journal item invoice sales\r\n strDescription = 'Nilai pembelian Invoice Purchasing '+ invoicenumber\r\n CreateJournalItem(config, strDescription, journal, oAccInstInvPurchase, netamount, 0.0, user_id_commit)\r\n\r\n # journal item payable\r\n strDescription = 'Hutang dari Invoice Purchasing '+ invoicenumber\r\n CreateJournalItem(config, strDescription, journal, oAccInstPay, 0.0, netamount, user_id_commit)\r\n\r\n #CheckDebitCreditBal(config, journal.journal_no)\r\n\r\n config.Commit()\r\n isSucceed = 1\r\n except:\r\n config.Rollback()\r\n isSucceed = 0\r\n raise\r\n \r\n returnpacket.CreateValues(['isSucceed',isSucceed])\r\n\r\n return 1\r\n","sub_path":"scripts/purchasing/invpurch journal.py","file_name":"invpurch journal.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"243392546","text":"# This file contains the functions needed to format certain things. This will help with maintenance and with\n# compatability for potential future libraries\n\nimport pandas as pd\nimport numpy as np\nimport random\nimport copy\nimport re\nimport pdb\n\ndef categorize_dataset(dataset, editdata=None):\n original_target = dataset['target']\n original_target_names = dataset['target_names']\n\n cartypes = []\n\n # Finds all types of vehicles (So long as they are named correctly)\n delimiters = \"_\", \".xlsx\", \"\\\\\"\n regexPattern = '|'.join(map(re.escape, delimiters))\n\n for name in original_target_names:\n tokens = re.split(regexPattern, name)\n cartype = tokens[1] # 1 Should be the position of the type of the vehicle in the string\n if cartype not in cartypes:\n cartypes.append(cartype)\n\n new_data = dataset['data'] if editdata is None else editdata\n new_target = []\n for val in original_target: # Remakes target and target_names for cartype, not filename\n name = original_target_names[val]\n for i, cartype in enumerate(cartypes):\n if name.find(cartype) != -1:\n new_target.append(i)\n break\n\n return {'data':new_data, 'target':np.array(new_target), 'target_names':np.array(cartypes),\n '__type__':dataset['__type__'], 'features':dataset['features']}\n\ndef load_scikit_dataset_withFeats(featuredata, feats):\n dataset = {}\n\n data = [] # 2D list of car feature values\n target = [] # list of ints that helps associate which rows in 'data' belong to which file (0, 1, 2, 3, etc)\n target_names = [] # ['carfilename1.xslx', 'carfilename2.xslx', etc]\n i = 0\n for file in sorted(featuredata):\n features = copy.deepcopy(featuredata[file])\n if i == 0: # If this is the first time, initialize lists\n for feature in feats:\n data.append(features[feature])\n else: # If not first time, concatenate to pre existing lists\n for row, feature in enumerate(feats):\n data[row].extend(features[feature])\n long_list_of_targets = [i] * len(features[feats[0]]) # IE: [1] * 5 feature samples = [1, 1, 1, 1, 1]\n target.extend(long_list_of_targets)\n target_names.append(file) # before: [[1, 2, 3, 4, 5, ...]\n i += 1 # transpose [1, 2, 3, 4, 5, ...]\n dataset['data'] = np.rot90(data, axes=(1, 0)) # ----> [1, 2, 3, 4, 5, ...]]\n dataset['target'] = np.array(target) #\n dataset['target_names'] = np.array(target_names) # after: [[1, 1, 1]\n dataset['__type__'] = ''\n dataset['features'] = feats\n # [3, 3, 3] <- This now matches data format in\n return dataset # [4, 4, 4] sklearn.decomposition.PCA.fit(X)\n\n \n\ndef load_scikit_dataset(featuredata, raw_features, types):\n # loads car data into scikit-learn's dataset format that's commonly used with their tools\n #\n # 'type' = 'z', 'xy', or 'xyz'\n # 'raw_features' is a string that has the features (without prepended z/xy/xyz) separated by spaces\n # IE: \"stdev skewness p2p\"\n #\n # A hypothetical completed dataset dictionary will look like this:\n # KEYS\n # _____________________________________________________________________________________\n # | 'data' 'target' 'target_name' |\n # | stdev skew p2p 0 1 2 |\n # | 0 [ 1, 1, 1] 0 ['car1.xlsx', 'car2.xlsx', 'car3.xlsx'] |\n # | 1 [ 2, 9, 7] 0 |\n # I | 2 [ 3, 3, 5] 0 ^ |\n # N | ..... ... | |\n # D | 31 [ 4, -2, 9] 1 -> ('target' is index of 'target_name') |\n # E | 32 [ 5, 6, 1] 1 |\n # X | 33 [ 6, -15, 2] 1 |\n # | ..... ... |\n # | 61 [ 7, 2, 5] 2 |\n # | 62 [ 8, 0, 6] 2 |\n # | 63 [ 8, 0, 6] 2 |\n # |_____________________________________________________________________________________|\n\n raw_features = raw_features.split(' ')\n feats = []\n for type in types:\n feats.extend([type + feat for feat in raw_features]) # IE: if type='z', it does ('z' + 'stdev') to make 'zstdev'\n dataset = {} # dictionary that we'll return that contains 'data', 'target', and 'target_names'\n data = [] # 2D list of car feature values\n target = [] # list of ints that helps associate which rows in 'data' belong to which file (0, 1, 2, 3, etc)\n target_names = [] # ['carfilename1.xslx', 'carfilename2.xslx', etc]\n i = 0\n for file in sorted(featuredata):\n features = copy.deepcopy(featuredata[file])\n if i == 0: # If this is the first time, initialize lists\n for feature in feats:\n data.append(features[feature])\n else: # If not first time, concatenate to pre existing lists\n for row, feature in enumerate(feats):\n data[row].extend(features[feature])\n long_list_of_targets = [i] * len(features[feats[0]]) # IE: [1] * 5 feature samples = [1, 1, 1, 1, 1]\n target.extend(long_list_of_targets)\n target_names.append(file) # before: [[1, 2, 3, 4, 5, ...]\n i += 1 # transpose [1, 2, 3, 4, 5, ...]\n dataset['data'] = np.rot90(data, axes=(1, 0)) # ----> [1, 2, 3, 4, 5, ...]]\n dataset['target'] = np.array(target) #\n dataset['target_names'] = np.array(target_names) # after: [[1, 1, 1]\n dataset['__type__'] = ','.join(types) # transpose [2, 2, 2]\n dataset['features'] = feats\n # [3, 3, 3] <- This now matches data format in\n return dataset # [4, 4, 4] sklearn.decomposition.PCA.fit(X)\n\n\ndef format_matrix_data(featuredata):\n # - Formats data for ScatterPlot Matrices\n # - This block attempts to mimic the format of a Comma Separated Value file, without actually creating\n # a .csv file by instead using a dictionary full of columns. (pandas.DataFrame() accepts csv file, OR dict)\n # - Here there are three dictionaries containing information for their respective Scatterplots (z, xy, xyz)\n # and their format helps replicate this format:\n # https://github.com/mwaskom/seaborn-data/blob/master/iris.csv\n #\n # EX: (for hypothetical dictionary 'z{}')\n # z = {'Car':['FedEx Truck.csv', 'Batmobile.csv', ...], ('Cars' helps us color scatterplot later on)\n # 'zmin':[1.0, 2.3, ...],\n # 'zmax':[1.5, -4, ...],\n # ... }\n #\n # Translates to:\n #\n # | zmin | zmax | zmean | zmedian | zp2p | zrms | zp2prms | Car |\n # ----------------------------------------------------------------------------------------\n # run 1 | 1.0 | 1.5 | 5 | -23 | 5 | 3 | 34 | FedEx Truck |\n # ----------------------------------------------------------------------------------------\n # run 2 | # | # | # | # | # | # | # | FedEx Truck |\n # (down all the way until last run...)\n # ----------------------------------------------------------------------------------------\n # run 1 | # | # | # | # | # | # | # | Batmobile |\n # ----------------------------------------------------------------------------------------\n # run 2 | # | # | # | # | # | # | # | Batmobile |\n # (down all the way until last run...)\n # (and so forth)\n\n feats = 'min max mean median p2p rms p2prms'.split(' ') # list of features to be compared\n zs = ['z' + feat for feat in feats]\n xys = ['xy' + feat for feat in feats]\n xyzs = ['xyz' + feat for feat in feats]\n z = {}\n xy = {}\n xyz = {}\n for i in range(len(feats)): # initialize the features and their keys in the dictionaries for less clutter later on\n z[zs[i]] = []\n xy[xys[i]] = []\n xyz[xyzs[i]] = []\n z['Car'] = [] # initializes car column so that we can associate which run belongs to which car (for plot colors)\n xy['Car'] = []\n xyz['Car'] = []\n\n print('Generating Scatterplot Matrices... (might take a moment)')\n for file in sorted(featuredata):\n features = featuredata[file]\n for feature, data in features.items():\n if feature in z:\n z[feature].extend(data)\n if feature in xy:\n xy[feature].extend(data)\n if feature in xyz:\n xyz[feature].extend(data)\n\n num_runs = len(features[zs[0]]) # number of runs\n z['Car'].extend([file] * num_runs) # populates the Car column with (num_runs) amount of filenames\n xy['Car'].extend([file] * num_runs) # (See example up top)\n xyz['Car'].extend([file] * num_runs)\n\n df_z = pd.DataFrame(data=z, columns=z.keys())\n df_xy = pd.DataFrame(data=xy, columns=xy.keys())\n df_xyz = pd.DataFrame(data=xyz, columns=xyz.keys())\n\n df_z = df_z[zs + ['Car']] # Sorts the DataFrame to match the original order of features\n df_xy = df_xy[xys + ['Car']] # (Dictionaries are unordered structures)\n df_xyz = df_xyz[xyzs + ['Car']]\n\n return [df_z, df_xy, df_xyz]\n\n#randomizes dataset after feature extraction but prior to classifications/prediction\ndef randomize_data(dataset):\n data = []\n cartype = []\n randomdata = {}\n temp_data = dataset['data']\n temp_cartype = dataset['target']\n mylist = list(range(len(temp_data)))\n random.shuffle(mylist)\n\n for i in zip(mylist):\n data.append(temp_data[i])\n cartype.append(temp_cartype[i])\n\n randomdata['__type__'] = dataset['__type__']\n randomdata['data'] = np.array(data)\n randomdata['target'] = np.array(cartype)\n randomdata['target_names'] = dataset['target_names']\n randomdata['features'] = dataset['features']\n\n return randomdata\n","sub_path":"src/dataformatter.py","file_name":"dataformatter.py","file_ext":"py","file_size_in_byte":11040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"175431232","text":"####################################\n#file_name: similarity.py #\n#author: Riccardo La Grassa #\n#data created: 16/11/2016 #\n#data last modified: #\n#Python interpreter: 3.5.2 #\n#mail: riccardo2468@gmail.com #\n####################################\n\n\n\nimport re\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nimport time\nimport math\nimport tweepy\nimport fusion_with_standfordData\nimport datetime\nimport matplotlib.dates as mdates\nfrom click._compat import raw_input\n\nimport word_netV1 as wrd_net1\nimport word_netV2 as wrd_net2\nimport plotting_zone as plt_zone\nfrom nltk import FreqDist\n\n# it computes the similarity with vector model\n#input: doc weighted 1 doc weighted 2 and the doc 2 aligned(for compute the scalar product)\n#output:similarity of the vector model\ndef sim_vectorial(doc_w1,doc_align_2,doc_w2):\n scalar_product=0.0\n lenght_norm2=0.0\n lenght_norm1=0.0\n for i in zip(doc_w1,doc_align_2):\n scalar_product= scalar_product + i[0]*i[1]\n\n for i in doc_w1:\n lenght_norm1=lenght_norm1 + math.pow(i,2)\n\n for a in doc_w2:\n lenght_norm2 = lenght_norm2 + math.pow(a,2)\n\n\n if scalar_product == 0: return 0 #totally different\n else:\n return (scalar_product/((math.sqrt(lenght_norm1)) * (math.sqrt(lenght_norm2))))\n\n#it computes the weighting scheme (tf-idf) of all list\n# and it aligns the terms of the second list with the first list ( if it exist, otherwise it puts 0)\n# in this way, i don't consider another structure for the words set\n#input:list target X and Y\n#output: doc weighted 1, doc weighted 2, doc aligned 2 seeing the first\ndef tf_idf(list1,list2):\n doc_mix_=[]\n gold=[]\n temp=[]\n temp_repl=[]\n not_replicated=[]\n fusion_list=[]\n fusion_list.append(list1)\n fusion_list.append(list2) #lista1 U lista2\n #frequency for each term in doc j\n for i in range(0,len(fusion_list)):\n #set_clean_list=set(fusion_list[i]) #i create a set for each account twitter\n for j in (fusion_list[i]): # for each word of doc(set) count the occurrences inside the doc\n if not j in temp_repl: #i don't consider the same words\n temp_repl.append(j)\n #tf\n n_i=1 #of course\n fr_i=fusion_list[i].count(j)\n\n\n #idf\n if any(j in item for item in fusion_list[1-i]):\n n_i=n_i + 1\n\n w= ((1 + math.log10(fr_i))) * (math.log10((1+2 / n_i))) #tf-idf\n temp.append(w)\n\n doc_mix_.append([w for w in temp]) #first list -> doc weight 1 second list -> doc weight 2\n temp.clear()\n not_replicated.append([w for w in temp_repl])\n temp_repl.clear()\n\n\n\n #Finally, i won.. Fortunately i come from C\n for item in not_replicated[0]:\n if item in not_replicated[1]:\n gold.append(doc_mix_[1][not_replicated[1].index(item)])\n else:\n gold.append(0)\n return doc_mix_[0],gold,doc_mix_[1] #this is too importat, see an explanation!\n\n\n#The function computes the coefficient jaccard through the formula: |A inter B| / |AUB|->|A|+|B|-|A inter B|\n#input:nothing\n#output:jaccard coefficient\ndef jaccard_coefficient():\n #intersect=[w for w in listX_clean if w in listY_clean] #intersection #I don't use it because i don't considerer the word frequency\n intersect=set(listX_clean).intersection(set(listY_clean))\n jaccard=len(intersect)/(len(listX_clean)+len(listY_clean)-len(intersect))\n return jaccard\n\n\n\n#I clean the list X through a procedure mixed of grammarFile as a reference and expression regular for alienating the useless words\n#input:set tweet of target X\n#output: set Clean tweet of target X\ndef clean_text(original_list):\n list_splitted=[]\n list_clean=[]\n for i in original_list:\n list_splitted.append(i.split())\n\n #grammar file. i will erase all the useless words\n grammarlist=[]\n try:\n with open('data_set/grammarList', 'r') as f:\n grammarlist.append(f.read().splitlines())\n\n\n except IOError:\n print('file not found!')\n exit()\n\n\n for i in range(0, len(list_splitted)):\n\n\n for j in range(0, len(list_splitted[i])):\n\n if not re.search('https?|RT|[^A-Za-z]|amp|[ah|ha]+', list_splitted[i][j]):\n list_splitted[i][j]=re.sub('•|‘|\"|”|!|“|,|:|&|;|/|\\+|\\?|…|[.]+|-|–|—|→|\\(|\\)', '', list_splitted[i][j].lower()) #i clean the text from link replytweet and @tag\n if not (len(list_splitted[i][j]) < 4 ):\n if not (any(list_splitted[i][j].lower() in s for s in grammarlist)):\n list_clean.append(list_splitted[i][j].lower())\n\n f.close()\n return list_clean\n\n\n\n#it converts the target's name string into numerical id\n#input: string targets\n#return: names id if it exist! otherwise exit()\ndef get_user_information(source1, source2):\n while True:\n try:\n user1 = api.get_user(source1)\n user2 = api.get_user(source2)\n\n s1 = {'name': user1.name,\n 'screen_name': user1.screen_name,\n 'id': user1.id,\n 'friends_count': user1.friends_count,\n 'followers_count': user1.followers_count,\n 'followers_ids': user1.followers_ids()}\n\n s2 = {'name': user2.name,\n 'screen_name': user2.screen_name,\n 'id': user2.id,\n 'friends_count': user2.friends_count,\n 'followers_count': user2.followers_count,\n 'followers_ids': user2.followers_ids()}\n\n return s1['id'], s2['id']\n except tweepy.TweepError as e:\n if ('88' in e.reason):\n print('Too many requests, Wait: 15 min...\\n')\n time.sleep(60 * 15) #because i exceeded the requests\n else:\n print(e.reason)\n exit()\n\n\n\n#Final the summary contain the computational result#\n#input:nothing\n#output:file_name\ndef generate_summary_file():\n try:\n with open('data_set/Summary with '+str(len(list_tweetX))+' post with targets X: '+str(sys.argv[1])+' Y: '+str(sys.argv[2]), 'w') as f:\n f.write('')\n f.write('--Number of tweet postX and Number of tweet postY--\\n')\n f.write(str(len(list_tweetX))+'-'+str(len(list_tweetY))+'\\n\\n')\n f.write('--Comparison similarity--\\n')\n f.write('Jaccard: '+str(jaccard_value)+'\\n'+'Vector model: '+str(similarity_vectorial)+'\\n'+\n 'Vector Model with WordnetV2: ' + str(similarity_vectorial_w2)+'\\n')\n f.write('Time Vector model: '+str(time_vector_model)+'\\n'+'Time with WordnetV2: '+str(time_vector_model_wrdnetV2)+'\\n'+\n 'Time with Jaccard: '+str(time_jaccard)+'\\n')\n f.write('The top 40 most commons words of the targetX:\\n\\n'+str(most_common1)+'\\n\\n')\n f.write('The top 40 most commons words of the targetY:\\n\\n' + str(most_common2) + '\\n\\n')\n f.close()\n\n\n except IOError:\n print('Error to generate a summary file!')\n exit()\n\n\n\n#####################\n#Error Control main #\n#####################\nif(len(sys.argv) != 3):\n print('Error Missing parameters! You try to write --help \\n')\n exit()\n\nif(sys.argv[1]=='--help'):\n print('****'*10)\n print('namefile.py parameter\\n\\nparameters = sourceX sourceY \\'@\\' \\nfor example nomefile.py michiokaku russelcrowe')\n exit()\n\n\n# enter the corresponding information from your Twitter application ( these are my access credential):\nCONSUMER_KEY = 'NM4xHHBwm7fiBQjf0X4QfdN8X'\nCONSUMER_SECRET = 'u6pnGad8o11sNhLiY77voEbAUawHejgGiVgxKBwPFVKLyLyRbD'\nACCESS_KEY = '251060755-lUeE2kxqXjMfL5hLqzmo4EuyuWo4wYmqcihTEU0o'\nACCESS_SECRET = 'sUCSFY1PkPkVxVD5P96EixYzCjJnUapeXmzyJ8HQ1UW2s'\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_KEY, ACCESS_SECRET)\napi = tweepy.API(auth, wait_on_rate_limit = True, wait_on_rate_limit_notify=True) #This is a standard of the server twitter, probably to prevent too many requests by bot\n\nsource1, source2 = get_user_information(str(sys.argv[1]), str(sys.argv[2])) #i get data target from shell\nlist_tweetX=[]\nlist_tweetY=[]\ntime_tweetX=[]\ntime_tweetY=[]\n\n#Get tweet post from target X and Y\nfor page in tweepy.Cursor(api.user_timeline, id=source1).pages(40):\n for item in page:\n time_tweetX.append(item.created_at)#i get the data creation of all post, i will use it after\n list_tweetX.append(item.text)\n\nfor page in tweepy.Cursor(api.user_timeline, id=source2).pages(40):\n for item in page:\n time_tweetY.append(item.created_at)\n list_tweetY.append(item.text)\n\n\nlistX_clean=clean_text(list_tweetX)\nlistY_clean=clean_text(list_tweetY)\n\n\nprint('Comparison of Similarity')\nstart = time.time()\njaccard_value=jaccard_coefficient()\ntime_jaccard=time.time()-start\nprint('Similarity Jaccard coefficient: ',jaccard_value,'Time: ',time_jaccard)\n\n\nstart = time.time()\ndoc_weight1,doc_weight2,not_replicated=tf_idf(listX_clean,listY_clean)\nsimilarity_vectorial=sim_vectorial(doc_weight1,doc_weight2,not_replicated)\ntime_vector_model=time.time()-start\nprint('Similarity vector model: ',similarity_vectorial,'Time: ',time_vector_model)\n\n\n''''\n#i call the python file (word_netV1.py) for the comparison between the model vector and the model vector with wordnet\nstart = time.time()\ndoc_weight1,doc_align_2,doc_weight2=wrd_net1.tf_idf(listX_clean,listY_clean)\nsimilarity_vectorial_w1=wrd_net1.sim_vectorial(doc_weight1,doc_align_2,doc_weight2)\ntime_vector_model_wrdnet1=time.time()-start\nprint('Similarity vector model with WordnetV1(old version) ',similarity_vectorial_w1,'Time:',time_vector_model_wrdnet1)\n'''\n\n#i call the python file (word_netV2.py) for a comparison between the model vector and the model vector with wordnetV2\nstart = time.time()\ndoc_weight1,doc_align_2,doc_weight2=fusion_with_standfordData.tf_idf(listX_clean, listY_clean)\nsimilarity_vectorial_w2=fusion_with_standfordData.sim_vectorial(doc_weight1, doc_align_2, doc_weight2)\ntime_vector_model_wrdnetV2=time.time()-start\nprint('Similarity vector model with WordnetV2 ',similarity_vectorial_w2,'Time:',time_vector_model_wrdnetV2)\n\n\nfdist1=FreqDist(listX_clean)\nfdist2=FreqDist(listY_clean)\nmost_common1=fdist1.most_common(40)\nmost_common2=fdist2.most_common(40)\n\n#I call the module for plotting. You see the plotting_zone.py\nplt_zone.analysis_frequency([w.date() for w in time_tweetX], [w.date() for w in time_tweetY], str(sys.argv[1]), str(sys.argv[2])) # i set the date without hours minutes and seconds\nplt_zone.analysis_word(most_common1, 'Words Frequency analysis of '+str(sys.argv[1]), 4)\nplt_zone.analysis_word(most_common2, 'Words Frequency analysis of '+str(sys.argv[2]), 5)\nplt.show()\n\n\n#if you want, you can create a summary file containing some results such as computation time, similarity of the various models\nreply = raw_input(\"Do you want to generate a text file index? Y/N: \")\nif (reply.lower() == 'y'): generate_summary_file()\n#else bye bye.. see you!\n\n","sub_path":"similarity.py","file_name":"similarity.py","file_ext":"py","file_size_in_byte":11122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"633290262","text":"import urllib.request\nimport json\nimport sys\nimport os\nimport semver\nfrom retry import retry\nfrom github import Github, GithubException, InputGitAuthor\n\nHTTP_REQUEST_RETRIES = 3\nHTTP_REQUEST_DELAY_IN_SECONDS = 2\nHTTP_REQUEST_DELAY_MULTIPLIER = 2\npackageUser = os.environ[\"packageUser\"]\npackagePAT = os.environ[\"packagePAT\"]\npackageEmail = os.environ[\"packageEmail\"]\norganization = 'ballerina-platform'\n\ndef main():\n print(\"Checking Ballerina Distribution for stdlib version updates\")\n moduleList = getStdlibModules()\n repo = fetchBallerinaDistributionRepo()\n propertiesFile = fetchPropertiesFile(repo)\n currentVersions = getCurrentModuleVersions(propertiesFile)\n modifiedPropertiesFile, commitFlag, updatedModules = updatePropertiesFile(propertiesFile, moduleList, currentVersions)\n if commitFlag:\n commitChanges(modifiedPropertiesFile, repo, updatedModules)\n createPullRequest(repo)\n print(\"Updated gradle.properties file in Ballerina Distribution Successfully\")\n else:\n print(\"Stdlib versions in gradle.properties file are up to date\")\n\n# Get stdlib module details from stdlib_modules.json file\ndef getStdlibModules():\n try:\n with open('./release/resources/stdlib_modules.json') as f:\n moduleList = json.load(f)\n except:\n print('Failed to read stdlib_modules.json')\n sys.exit()\n\n return moduleList['modules']\n\n# Fetch ballerina-distribution repository with GitHub credentials\ndef fetchBallerinaDistributionRepo():\n github = Github(packagePAT)\n try:\n repo = github.get_repo(organization + \"/\" + 'ballerina-distribution')\n except:\n print(\"Error fetching repository ballerina-distribution\")\n\n return repo\n\n# Fetch the gradle.properties file from the ballerina-distribution repo\ndef fetchPropertiesFile(repo):\n try:\n branch = repo.get_branch(branch=\"automated-stdlib-version-update\")\n file = repo.get_contents(\"gradle.properties\", ref=\"automated-stdlib-version-update\")\n except GithubException:\n file = repo.get_contents(\"gradle.properties\")\n\n data = file.decoded_content.decode(\"utf-8\")\n\n return data\n\n# Get current versions of stdlib modules from gradle.properties file\ndef getCurrentModuleVersions(propertiesFile):\n currentVersions = {}\n\n for line in propertiesFile.splitlines():\n if 'stdlib' in line and 'Version=' in line:\n moduleName = line.split('=')[0]\n version = line.split('=')[1]\n currentVersions[moduleName] = version\n\n return currentVersions\n\n# Compare latest version with current version\n# Return 1 if latest version > current version\n# Return 0 if latest version = current version\n# Return -1 if latest version < current version\ndef compareVersion(latestVersion, currentVersion):\n if semver.compare(latestVersion.split('-')[0], currentVersion.split('-')[0]) == 1:\n return latestVersion\n else:\n return currentVersion\n\n# Update stdlib module versions in the gradle.properties file with module details fetched from stdlib_modules.json\ndef updatePropertiesFile(data, modules, currentVersions):\n modifiedData = ''\n updatedModules = []\n currentLine = ''\n commitFlag = False\n\n lineList = data.splitlines()\n\n for line in lineList:\n if 'stdlib' in line.lower():\n currentLine = line\n break \n line += '\\n'\n modifiedData += line\n modifiedData = modifiedData[0:-1]\n\n level = 1\n for module in modules:\n if module['level'] == level:\n line = \"\\n# Stdlib Level \" + f\"{level:02d}\" + \"\\n\"\n modifiedData += line\n level += 1\n\n moduleName = module['name'].split('-')[-1]\n latestVersion = module['version']\n\n if moduleName == 'java.arrays':\n version = compareVersion(latestVersion, currentVersions['stdlibJarraysVersion'])\n line = \"stdlibJarraysVersion=\" + version + \"\\n\"\n elif moduleName == 'java.jdbc':\n version = compareVersion(latestVersion, currentVersions['stdlibJdbcVersion'])\n line = \"stdlibJdbcVersion=\" + version + \"\\n\"\n else:\n version = compareVersion(latestVersion, currentVersions['stdlib' + moduleName.capitalize() + 'Version'])\n line = \"stdlib\" + moduleName.capitalize() + \"Version=\" + version + \"\\n\"\n\n if line[0:-1] not in lineList:\n updatedModules.append(moduleName)\n modifiedData += line\n\n for line in lineList[lineList.index(currentLine):len(lineList)]:\n currentLine = line\n if 'stdlib' not in line.lower() and line != '':\n break\n\n modifiedData += \"\\n\"\n\n for line in lineList[lineList.index(currentLine):len(lineList)]:\n if 'stdlib' not in line.lower():\n line += \"\\n\"\n modifiedData += line\n\n # modifiedData = modifiedData[0:-1]\n if modifiedData != data:\n commitFlag = True\n\n return modifiedData, commitFlag, updatedModules\n\n# Commit changes made to the gradle.properties file\ndef commitChanges(data, repo, updatedModules):\n author = InputGitAuthor(packageUser, packageEmail)\n\n # If branch already exists checkout and commit else create new branch from master branch and commit\n try:\n source = repo.get_branch(branch=\"automated-stdlib-version-update\")\n except GithubException:\n try:\n source = repo.get_branch(\"main\")\n except GithubException:\n source = repo.get_branch(\"master\")\n\n repo.create_git_ref(ref=f\"refs/heads/automated-stdlib-version-update\", sha=source.commit.sha)\n\n contents = repo.get_contents(\"gradle.properties\", ref=\"automated-stdlib-version-update\")\n\n if len(updatedModules) > 0:\n commitMessage = \"Bump the version of stdlib module(s) - \"\n for updatedModule in updatedModules:\n commitMessage += updatedModule\n commitMessage += \" \"\n else:\n commitMessage = \"Update gradle.properties\"\n\n repo.update_file(contents.path, \n commitMessage, \n data, \n contents.sha, \n branch=\"automated-stdlib-version-update\", \n author=author)\n\n# Create a PR from the branch created\ndef createPullRequest(repo):\n pulls = repo.get_pulls(state='open', head=\"automated-stdlib-version-update\")\n\n PRExists = 0\n\n # Check if a PR already exists for the module\n for pull in pulls:\n if \"[Automated] Update Stdlib module versions\" in pull.title:\n PRExists = pull.number\n\n # If PR doesn't exists create a new PR\n if PRExists == 0:\n try:\n repo.create_pull(title=\"[Automated] Update Stdlib module versions\", \n body='$subject', \n head=\"automated-stdlib-version-update\", \n base=\"main\")\n except GithubException:\n repo.create_pull(title=\"[Automated] Update Stdlib module versions\", \n body='$subject', \n head=\"automated-stdlib-version-update\", \n base=\"master\")\n\nmain()\n","sub_path":"release/src/version_update/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"75632915","text":"import sys\nimport torch\nimport torch.nn as nn\nfrom collections import defaultdict\nimport math\nfrom math import log\nimport numpy as np\n\nclass Scorer(object):\n '''def __init__(self, char_list, model_path, rnn_type, ninp, nhid, nlayers, device):\n char_list = list(char_list) + ['sil_start', 'sil_end']\n self.inv_vocab_map = dict([(i, c) for (i, c) in enumerate(char_list)])\n self.vocab_map = dict([(c, i) for (i, c) in enumerate(char_list)])\n self.device = device\n self.history = defaultdict(tuple)'''\n\n def __init__(self, languageModel, tokenizer, device):\n self.languageModel = languageModel\n self.tokenizer = tokenizer\n self.device = device\n self.languageModel.to(self.device)\n self.languageModel.eval()\n self.history = defaultdict(lambda: 0.0)\n\n def get_score(self, string):\n \n #print(string)\n tokenize_input = self.tokenizer.tokenize(string)\n #print (\"tokenize_input: \" + str(tokenize_input))\n\n \n while len(tokenize_input) < 2:\n tokenize_input.append('')\n\n\n tensor_input = torch.tensor([self.tokenizer.convert_tokens_to_ids(tokenize_input[:-1])]).to(self.device)\n tensor_labels = torch.tensor([self.tokenizer.convert_tokens_to_ids(tokenize_input[1:])]).to(self.device)\n \n outputs = self.languageModel(tensor_input, labels=tensor_labels)\n loss = outputs.loss\n if math.isnan(loss.item()) == True:\n loss = 0\n else:\n loss = loss.item()\n return -(len(tokenize_input) - 1) * loss, loss\n\n\n def get_score_fast(self, strings):\n '''strings = [''.join(x) for x in strings]\n scores = [self.get_score(string)[0] for string in strings]\n return scores'''\n strings = [''.join(x) for x in strings]\n #history_to_update = defaultdict(lambda: 0.0)\n scores = []\n #strings.sort()\n #print (\"strings: \" + str(strings))\n for string in strings:\n #print (string)\n if len(string) <= 2:\n score, hidden_state = self.get_score(string)\n scores.append(score)\n self.history[string] = score\n elif string in self.history:\n scores.append(self.history[string])\n else:\n res = self.get_score(string)[0]\n self.history[string] = res\n scores.append(res)\n\n\n return scores\n\n\n","sub_path":"scorer.py","file_name":"scorer.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"395539213","text":"import plotly.plotly as py\nimport plotly.graph_objs as go\n\nx = [1, 2, 3, 4]\n\ntrace1 = {\n 'x': x,\n 'y': [1, 4, 9, 16],\n 'name': 'Trace1',\n 'type': 'bar'\n};\ntrace2 = {\n 'x': x,\n 'y': [6, -8, -4.5, 8],\n 'name': 'Trace2',\n 'type': 'bar'\n};\ntrace3 = {\n 'x': x,\n 'y': [-15, -3, 4.5, -8],\n 'name': 'Trace3',\n 'type': 'bar'\n }\n \ntrace4 = {\n 'x': x,\n 'y': [-1, 3, -3, -4],\n 'name': 'Trace4',\n 'type': 'bar'\n }\n \ndata = [trace1, trace2, trace3, trace4];\nlayout = {\n 'xaxis': {'title': 'X axis'},\n 'yaxis': {'title': 'Y axis'},\n 'barmode': 'relative',\n 'title': 'Relative Barmode'\n};\npy.iplot({'data': data, 'layout': layout}, filename='barmode-relative')\n","sub_path":"plotly/plotly15.py","file_name":"plotly15.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"224764535","text":"'''\n25. テンプレートの抽出\n記事中に含まれる「基礎情報」テンプレートのフィールド名と値を抽出し,辞書オブジェクトとして格納せよ.\n'''\n# Python 3.6.2 で実行\nimport re\nimport sys\nimport pprint\nfrom collections import OrderedDict\n\n\ndef extract_infobox():\n with open('wiki_UK.txt') as f:\n pattern = r'{{基礎情報[^|]+(?P.+?)}}\\n'\n match = re.search(pattern, f.read(), re.DOTALL)\n info_body = match.group('Infobox_body')\n subptn = r'^\\|(.+?)\\s*=\\s*(.+?)(?)\\n'\n reg = re.compile(subptn, re.MULTILINE | re.DOTALL)\n od = OrderedDict(reg.findall(info_body))\n return od\n\n\nif __name__ == '__main__':\n d = extract_infobox()\n pprint.pprint(d)\n\n\n''' NOTE\n* ウィキペディアの基礎情報_国\n-> https://ja.wikipedia.org/wiki/Template:基礎情報_国\n\n* 特殊文字(special characters)\n-> https://docs.python.org/ja/3/library/re.html#regular-expression-syntax\n * `^`:\n (キャレット) 文字列の先頭にマッチし、\n MULTILINE モードでは各改行の直後にもマッチします。\n\n* モジュールコンテンツ\n-> https://docs.python.org/ja/3/library/re.html?highlight=findall#module-contents\n * re.MULTILINE / re.M / (?m)\n 指定されていると、\n パターン文字 '^' は文字列の先頭で、および各行の先頭 (各改行の直後)\n パターン文字 '$' は文字列の末尾で、および各行の末尾 (各改行の直前) でマッチします。\n デフォルトでは、\n '^' は文字列の先頭でのみ、\n '$' は文字列の末尾および文字列の末尾の改行 (もしあれば) の直前でのみマッチします。\n * re.DOTALL / re.S / (?s)\n '.' 特殊文字を、改行を含むあらゆる文字にマッチさせます。\n'''\n","sub_path":"kiyuna/chapter03/knock25.py","file_name":"knock25.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"517714719","text":"# Make sure pandas is loaded\r\nimport pandas as pd\r\nimport os\r\nimport pytz\r\nimport json\r\nfrom datetime import datetime,timezone\r\nsurveys_df = pd.read_csv('movies2.csv')\r\ndef csv_datatype(csv_file):\r\n column_name=list(csv_file.columns)\r\n # print(\"column name=>\",column_name)\r\n csv_file=pd.DataFrame(csv_file)\r\n for df in csv_file:\r\n if csv_file[df].dtype== 'object':\r\n csv_file[df] = csv_file[df].astype('str')\r\n\r\n mask1 = csv_file.astype(str).apply(lambda x : x.str.match(r'\\d{2}-\\d{2}-\\d{4}').all()) #dd-mm-yyyy\r\n csv_file.loc[:, mask1] = csv_file.loc[:, mask1].apply(pd.to_datetime)\r\n mask2 = csv_file.astype(str).apply(lambda x : x.str.match(r'\\d{4}-\\d{2}-\\d{2}').all()) #yyyy-mm-dd\r\n csv_file.loc[:, mask2] = csv_file.loc[:, mask2].apply(pd.to_datetime)\r\n mask3 = csv_file.astype(str).apply(lambda x : x.str.match(r'\\d{2}/\\d{2}/\\d{4}').all()) #yyyy/mm/dd\r\n csv_file.loc[:, mask3] = csv_file.loc[:, mask3].apply(pd.to_datetime)\r\n mask4 = csv_file.astype(str).apply(lambda x : x.str.match(r'\\d{2}/\\d{2}/\\d{4}').all()) #dd/mm/yyyy\r\n csv_file.loc[:, mask4] = csv_file.loc[:, mask4].apply(pd.to_datetime)\r\n mask5 = csv_file.astype(str).apply(lambda x : x.str.match(r'\\d{2}-\\d{2}-\\d{4} \\d{2}\\:\\d{2}\\:\\d{2}').all()) #dd-mm-yyyy hh:mm:ss\r\n csv_file.loc[:, mask5] = csv_file.loc[:, mask5].apply(pd.to_datetime)\r\n mask6 = csv_file.astype(str).apply(lambda x : x.str.match(r'\\d{4}-\\d{2}-\\d{2} \\d{2}\\:\\d{2}\\:\\d{2}').all()) #yyyy-mm-dd hh:mm:ss\r\n csv_file.loc[:, mask6] = csv_file.loc[:, mask6].apply(pd.to_datetime)\r\n mask7 = csv_file.astype(str).apply(lambda x : x.str.match(r'\\d{4}/\\d{2}/\\d{2} \\d{2}\\:\\d{2}\\:\\d{2}').all()) #yyyy/mm/dd hh:mm:ss\r\n csv_file.loc[:, mask7] = csv_file.loc[:, mask7].apply(pd.to_datetime)\r\n mask8 = csv_file.astype(str).apply(lambda x : x.str.match(r'\\d{2}-\\d{2}-\\d{4} \\d{2}\\:\\d{2}\\:\\d{2}').all()) #dd/mm/yyyy hh:mm:ss\r\n csv_file.loc[:, mask8] = csv_file.loc[:, mask8].apply(pd.to_datetime)\r\n mask9 = csv_file.astype(str).apply(lambda x : x.str.match(r'\\d{2}\\:\\d{2}\\:\\d{2}').all()) #hh:mm:ss\r\n csv_file.loc[:, mask9] = csv_file.loc[:, mask9].apply(pd.to_datetime)\r\n mask10 = csv_file.astype(str).apply(lambda x : x.str.match(r'\\d{2}-\\d{2}-\\d{4} \\d{2}\\:\\d{2}\\:\\d{2}-\\d{2}').all()) #dd-mm-yyyy hh:mm:ss-tz\r\n csv_file.loc[:, mask10] = csv_file.loc[:, mask10].apply(pd.to_datetime)\r\n\r\n data_type=csv_file.dtypes\r\n data_type=pd.Series(data_type).tolist() #data type stored in list\r\n dict_csv={}\r\n tup_csv=()\r\n for i in range(len(data_type)):\r\n #print(data_type[i])\r\n if data_type[i]=='object':\r\n data_type[i]='TEXT'\r\n elif data_type[i]=='int64':\r\n data_type[i]='BIGINT'\r\n elif data_type[i]=='float64':\r\n data_type[i]='float'\r\n elif data_type[i]=='bool':\r\n data_type[i]='bool'\r\n # elif data_type[i]=='category':\r\n # data_type[i]='category'\r\n elif data_type[i]=='bool':\r\n data_type[i]='bool'\r\n # elif data_type[i]=='datetime64[ns, pytz.FixedOffset(-420)]':\r\n # data_type[i]='timestamptz'\r\n elif data_type[i] == 'datetime64[ns]':\r\n data_type[i] = 'timestamp'\r\n else:\r\n data_type[i] = 'timestamptz'\r\n dict_csv[column_name[i]]=data_type[i]\r\n tup_csv=tup_csv+(\"{a} {b}\".format(a=column_name[i],b=data_type[i]),)\r\n # print(\"dictionary=>\",dict_csv)\r\n # print(\"tuple=>\",tup_csv)\r\n return(dict_csv,tup_csv)\r\n\r\na,b=csv_datatype(surveys_df)\r\nprint(a,b)","sub_path":"standarize_date/csv_datatype_change.py","file_name":"csv_datatype_change.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"382673042","text":"# coding: utf8\n\nfrom tortue.main.scrapper.scrapper import Scrapper\n\ns = Scrapper()\n\n\ndef test_format_dict_to_model():\n # Given\n data = {'title': 'hello', 'text': 'world'}\n\n # When\n doc = s.transform_to_doc(data)\n\n # Then\n assert doc.title == 'hello'\n\n\ndef test_format_with_only_alphanumeric_chars():\n # Given\n data = {'title': 'hello', 'text': ' aBcDefghijklmnopqrstuvwxyz 0123456789 *-+!:/;,?\\n'}\n\n # When\n doc = s.transform_to_doc(data)\n\n # Then\n assert doc.text == 'abcdefghijklmnopqrstuvwxyz 0123456789'\n\n","sub_path":"tortue/test/scrapper_test/scrapper_test.py","file_name":"scrapper_test.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"395026694","text":"\n# Dependencies\nfrom bs4 import BeautifulSoup as bs\nimport requests\nimport pymongo\nfrom splinter import Browser\nfrom flask import Flask, render_template, redirect\nimport pandas as pd\nfrom webdriver_manager.chrome import ChromeDriverManager\n\n\ndef scrape_info():\n executable_path = {'executable_path': ChromeDriverManager().install()}\n browser = Browser('chrome', **executable_path, headless=False)\n\n url = 'https://mars.nasa.gov/news/'\n browser.visit(url)\n html = browser.html\n soup = bs(html, 'html.parser')\n\n news_title = soup.find_all('div', class_='content_title')[0].text\n news_p = soup.find_all('div', class_='rollover_description_inner')[0].text\n \n\n jpl_image_url = \"https://spaceimages-mars.com/\"\n browser.visit(jpl_image_url)\n\n html = browser.html\n soup = bs(html, \"html.parser\")\n image_url = soup.find_all('article')\n\n\n\n # Scrape Mars facts from https://space-facts.com/mars/\n url = 'https://galaxyfacts-mars.com/'\n db = pd.read_html(url)\n db\n\n mars_db = db[0]\n mars_db = mars_db.rename(\n columns={0: \"Profile\", 1: \"Value\"}, errors=\"raise\")\n mars_db.set_index(\"Profile\", inplace=True)\n mars_db\n\n # In[34]:\n\n mars_facts=mars_db.to_html()\n\n url = 'https://marshemispheres.com/'\n browser.visit(url)\n html = browser.html\n soup = bs(html, 'html.parser')\n\n executable_path = {'executable_path': ChromeDriverManager().install()}\n browser = Browser('chrome', **executable_path, headless=False)\n\n # url to scrap\n url = 'https://marshemispheres.com/'\n browser.visit(url)\n\n hemisphere_image_urls = []\n\n for i in range(0, 4):\n\n links_found = browser.links.find_by_partial_text('Hemisphere Enhanced')\n links_found[i].click()\n html = browser.html\n soup = bs(html, 'html.parser')\n\n title = soup.find('h2', class_='title').text\n\n img_url = soup.find('div', class_='downloads')\n img_url = img_url.find('a', target='_blank')['href']\n img_url = 'https://marshemispheres.com/' + img_url\n\n hemisphere_image_urls.append({\n 'Title': title,\n 'Image URL': img_url\n })\n\n browser.links.find_by_partial_text('Back').click()\n\n # End browser session\n browser.quit()\n data = {\n \"news_title\":news_title,\n \"news_p\":news_p,\n \"featured_image\":image_url,\n \"facts\":mars_facts,\n \"hemispheres\":hemisphere_image_urls\n }\n return data\nif __name__ == \"__main__\":\n print(scrape_info())\n","sub_path":"mission_mars/app/scrap_mars.py","file_name":"scrap_mars.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"139560595","text":"import tensorflow as tf\nfrom tensorflow.keras.layers import MaxPool2D, Concatenate\nfrom .resnet import ResnetBase\nfrom .skeleton import EncoderDecoderModel\n\n\nclass AutoencoderSmall(EncoderDecoderModel, ResnetBase):\n\n def build_encoder(self):\n skipcons = []\n\n inp = tf.keras.Input(shape=self.shape, name='inp')\n \n x = inp\n skipcons += [x]\n\n x = self.conv_bn_act(x, 16, 8, 'in', strides=2)\n skipcons += [x]\n\n x = MaxPool2D(3,2, padding='SAME', name='pool')(x)\n x = self.encoder_down_block(x, 16, skipcons, 'block1', downscale=False)\n \n x = self.encoder_down_block(x, 32, skipcons, 'block2')\n x = self.encoder_down_block(x, 64, skipcons, 'block3')\n x = self.encoder_down_block(x, 128, skipcons, 'block4')\n\n encoder = tf.keras.Model(inputs=inp, outputs=x, name='encoder')\n\n return encoder, skipcons\n\n\n def encoder_down_block(self, x, channels, skipcons, name, downscale=True):\n n = 6\n for i in range(1,n+1):\n x = self.resblock_down(x, channels, f'{name}_{i}', downscale = downscale and (i==1))\n skipcons += [x]\n\n return x\n\n\n def build_decoder(self, skipcons):\n H,W,C = self.shape[0] // 2**5, self.shape[1] // 2**5, 128\n inp = tf.keras.Input(shape=(H,W,C), name='inp')\n \n x = self.spatial_flatten(inp, [H,W,C])\n x = self.dense_bn_act(x, H*W, 'spatial_mixing')\n x = self.spatial_deflate(x, [H,W,C])\n x = self.conv_bn_act(x, C, 1, 'channel_mixing')\n\n x = self.resblock_up(x, 64, f'block4_upconv', upscale=True)\n\n x = self.decoder_up_block(x, 64, skipcons[-2], 'block3')\n x = self.decoder_up_block(x, 32, skipcons[-3], 'block2')\n x = self.decoder_up_block(x, 16, skipcons[-4], 'block1')\n\n x = self.resblock_up(x, 16, f'inp_up', upscale=True)\n x = self.conv(x, 2, 8, f'out_conv', kernel_initializer='glorot_normal')\n \n decoder = tf.keras.Model(inputs=inp, outputs=x, name='decoder')\n\n return decoder\n\n def decoder_up_block(self, x, channels, skipcon, name):\n n = 6\n for i in range(1,n+1):\n x = self.resblock_up(x, channels, f'{name}_{i}', upscale = (i==n))\n\n return x\n\n\n def build_model(self):\n self.encoder, skipcons = self.build_encoder()\n self.decoder = self.build_decoder(skipcons)\n\n img = tf.keras.Input(shape=self.shape, name='img')\n x = self.encoder(img)\n x = self.decoder(x)\n\n return tf.keras.Model(\n inputs=img,\n outputs=x,\n name=self.name\n )","sub_path":"python/phire/rplearn/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"426205691","text":"if __name__ == '__main__':\n from nursery import Nursery\nelse:\n from .nursery import Nursery\n\n\n\n\ncur_nursery = Nursery()\n\nMenu_list = [\n ['Add a nursery', cur_nursery.add_cat],\n ['Edit information about nursery', cur_nursery.change_cat],\n ['Print list of nurserys', cur_nursery.print_cats],\n ['Delete the nursery from list', cur_nursery.remove_the_cat],\n ['Clear list', cur_nursery.clear_cats_list],\n ['Do special function', cur_nursery.do_special_meow],\n ['Save the list in the file', cur_nursery.save_to_file],\n ['Download the list from the file', cur_nursery.load_from_file]\n\n ]\n\n\ndef menu():\n print(\"------------------------------\")\n for i, item in enumerate(Menu_list):\n print(\"{0:2}. {1}\".format(i, item[0]))\n print(\"------------------------------\")\n return int(input())\n\n\ndef main():\n try:\n while True:\n Menu_list[menu()][1]()\n except Exception as e:\n print(e, \"\\nFinally\")\n\n\nif __name__ == '__main__':\n\tmain()\n\n\n","sub_path":"asm1905/st01/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"420086833","text":"# =============================================================================\n# periscope-ps (unis)\n#\n# Copyright (c) 2012-2016, Trustees of Indiana University,\n# All rights reserved.\n#\n# This software may be modified and distributed under the terms of the BSD\n# license. See the COPYING file for details.\n#\n# This software was created at the Indiana University Center for Research in\n# Extreme Scale Technologies (CREST).\n# =============================================================================\n#!/usr/bin/env python\n\"\"\"\nModels related tests\n\"\"\"\n\nimport copy\nfrom mock import MagicMock\nfrom periscope.models import ObjectDict\nfrom periscope.models import JSONSchemaModel\nfrom periscope.models import SchemasLoader\nfrom periscope.test.base import PeriscopeTestCase\n\n\nclass ObjectDictTest(PeriscopeTestCase):\n def test_init(self):\n # Arrange\n test_data = {\"key1\": \"value1\", \"key2\": \"value2\", \"$key3\": \"value3\"}\n # Act\n obj1 = ObjectDict()\n obj2 = ObjectDict(test_data)\n # Assert\n self.assertEqual(obj1, {})\n self.assertEqual(obj2, test_data)\n for key, value in test_data.items():\n self.assertTrue(hasattr(obj2, key))\n self.assertEqual(getattr(obj2, key), value)\n \n def test_add_property(self):\n # Arrange\n test_data = {\"key1\": \"value1\", \"key2\": \"value2\", \"$key3\": \"value3\"}\n obj = ObjectDict(test_data)\n # Act\n obj.key4 = \"value4\"\n obj[\"key5\"] = \"value5\"\n # Assert\n self.assertTrue(hasattr(obj, \"key4\"))\n self.assertEqual(getattr(obj, \"key4\"), \"value4\")\n self.assertTrue(hasattr(obj, \"key5\"))\n self.assertEqual(getattr(obj, \"key5\"), \"value5\")\n self.assertNotEqual(obj, test_data)\n for key, value in test_data.items():\n self.assertTrue(hasattr(obj, key))\n self.assertEqual(getattr(obj, key), value)\n \n def test_del_property(self):\n # Arrange\n test_data = {\"key1\": \"value1\", \"key2\": \"value2\", \"$key3\": \"value3\"}\n obj = ObjectDict(test_data)\n # Act\n del obj.key1\n del obj[\"key2\"]\n # Assert\n self.assertNotEqual(obj, test_data)\n self.assertFalse(hasattr(obj, \"key1\"))\n self.assertFalse(hasattr(obj, \"key2\"))\n self.assertFalse(\"key1\" in obj)\n self.assertFalse(\"key2\" in obj)\n self.assertTrue(hasattr(obj, \"$key3\"))\n self.assertEqual(getattr(obj, \"$key3\"), \"value3\")\n \n def test_convert_value(self):\n # Arrange\n test_data = {\"key1\": \"value1\",\n \"key2\": {\"k1\": \"v1\"},\n \"key3\": {\"k2\": {\"k3\": \"v3\"}}}\n obj = ObjectDict(test_data)\n # Act\n key1 = obj.key1\n key2 = obj[\"key2\"]\n key3 = obj.key3\n k1 = key2.k1\n k2 = key3[\"k2\"]\n k3 = k2.k3\n # Assert\n self.assertEqual(obj, test_data)\n self.assertTrue(isinstance(obj, ObjectDict))\n self.assertTrue(isinstance(key1, str))\n self.assertTrue(isinstance(key2, ObjectDict))\n self.assertTrue(isinstance(key3, ObjectDict))\n self.assertTrue(isinstance(k1, str))\n self.assertTrue(isinstance(k2, ObjectDict))\n self.assertTrue(isinstance(k3, str))\n \n def test_convert_value_nested(self):\n # Arrange\n test_data = {\"key1\": \"value1\",\n \"key2\": {\"k1\": \"v1\"},\n \"key3\": {\"k2\": {\"k3\": \"v3\"}},\n \"key4\": [{\"k4\": {\"k5\": \"v5\"}}]\n }\n expected_data = {\"key1\": \"value1\",\n \"key2\": {\"k1\": \"v1\"},\n \"key3\": {\"k2\": {\"k3\": \"v3\"}},\n \"key4\": [{\"k4\": {\"k5\": \"v5\"}}, {\"k6\": \"v6\"}],\n \"key5\": {\"k7\": [{\"k8\": \"v8\"}, {\"k9\": \"v9\"}]}\n }\n obj = ObjectDict(test_data)\n # Act\n \n key1 = obj.key1\n key2 = obj[\"key2\"]\n key3 = obj.key3\n key4 = obj.key4\n k1 = key2.k1\n k2 = key3[\"k2\"]\n k3 = k2.k3\n key4 = obj.key4\n key4.append({\"k6\": \"v6\"})\n obj.key5 = {\"k7\": [{\"k8\": \"v8\"}]}\n k7 = obj.key5.k7\n k7.append({\"k9\": \"v9\"})\n # Assert\n self.assertEqual(obj, expected_data)\n self.assertTrue(isinstance(obj, ObjectDict))\n self.assertTrue(isinstance(key1, str))\n self.assertTrue(isinstance(key2, ObjectDict))\n self.assertTrue(isinstance(key3, ObjectDict))\n self.assertTrue(isinstance(k1, str))\n self.assertTrue(isinstance(k2, ObjectDict))\n self.assertTrue(isinstance(k3, str))\n\n \n def test_to_mongo(self):\n # Arrange\n obj = ObjectDict()\n obj[\"$prop1\"] = \"value1\"\n obj[\"prop.2\"] = \"value2\"\n obj[\"prop.$3\"] = \"value3\"\n obj[\"prop4\"] = copy.deepcopy(obj)\n obj[\"prop5\"] = [{\"key.1\": \"v1\", \"$key2\": \"v2\"}]\n expected = {\n \"\\$prop1\": \"value1\",\n \"prop$DOT$2\": \"value2\", \n \"prop$DOT$$3\": \"value3\", \n \"prop4\": {\n \"\\$prop1\": \"value1\",\n \"prop$DOT$2\": \"value2\", \n \"prop$DOT$$3\": \"value3\", \n },\n \"prop5\": [\n {\n \"key$DOT$1\": \"v1\",\n \"\\$key2\": \"v2\"\n }\n ]\n }\n\n # Act\n mongo_dict = dict(obj._to_mongoiter())\n # Assert\n self.assertTrue(hasattr(obj, \"$prop1\"))\n self.assertTrue(hasattr(obj, \"prop.2\"))\n self.assertTrue(hasattr(obj, \"prop.$3\"))\n self.assertEqual(obj[\"$prop1\"], \"value1\")\n self.assertEqual(obj[\"prop.2\"], \"value2\")\n self.assertEqual(obj[\"prop.$3\"], \"value3\")\n self.assertEqual(mongo_dict, expected)\n \n def test_from_mongo(self):\n # Arrange\n data = {\n \"\\$prop1\": \"value1\",\n \"prop$DOT$2\": \"value2\", \n \"prop$DOT$$3\": \"value3\", \n \"prop4\": {\n \"\\$prop1\": \"value1\",\n \"prop$DOT$2\": \"value2\", \n \"prop$DOT$$3\": \"value3\", \n },\n \"prop5\": [\n {\n \"key$DOT$1\": \"v1\",\n \"\\$key2\": \"v2\"\n }\n ]\n }\n \n # Act\n obj = ObjectDict._from_mongo(data)\n # Assert\n self.assertTrue(isinstance(obj, ObjectDict))\n self.assertTrue(hasattr(obj, \"$prop1\"))\n self.assertTrue(hasattr(obj, \"prop.2\"))\n self.assertTrue(hasattr(obj, \"prop.$3\"))\n self.assertTrue(hasattr(obj, \"prop4\"))\n self.assertTrue(hasattr(obj, \"prop5\"))\n self.assertEqual(obj[\"$prop1\"], \"value1\")\n self.assertEqual(obj[\"prop.2\"], \"value2\")\n self.assertEqual(obj[\"prop.$3\"], \"value3\")\n self.assertEqual(obj[\"prop4\"], \\\n {\"$prop1\": \"value1\", \"prop.2\": \"value2\", \"prop.$3\": \"value3\"})\n self.assertEqual(obj[\"prop5\"], [{\"key.1\": \"v1\", \"$key2\": \"v2\"}])\n\n\nclass JSONSchemaModelTest(PeriscopeTestCase):\n def test_init(self):\n # Arrange\n schema = {\"name\": \"TestSchema\",\n \"description\": \"Unit testing schema\",\n \"additionalProperties\": False,\n \"properties\": {\n \"prop1\": {\n \"type\": \"string\",\n \"description\": \"prop1 description\",\n },\n \"prop2\": {\n \"type\": \"integer\",\n \"description\": \"prop2 description\",\n },\n },\n \"patternPropeties\": {\n \"prop?\": {\n \"type\": \"string\",\n \"description\": \"pattern prop\",\n },\n },\n }\n # Act\n schemaModel = JSONSchemaModel.json_model_factory(\"schemaModel\", schema)\n # Assert\n self.assertTrue(issubclass(schemaModel, JSONSchemaModel))\n self.assertEqual(schemaModel.__doc__, schema[\"description\"])\n self.assertTrue(hasattr(schemaModel, \"prop1\"))\n self.assertTrue(hasattr(schemaModel, \"prop2\"))\n \n def test_set_value(self):\n # Arrange\n schema = {\"name\": \"TestSchema\",\n \"description\": \"Unit testing schema\",\n \"properties\": {\n \"prop1\": {\n \"type\": \"string\",\n \"description\": \"prop1 description\",\n },\n \"prop2\": {\n \"type\": \"string\",\n \"description\": \"prop2 description\",\n },\n },\n \"patternProperties\": {\n \"prop?\": {\n \"type\": \"string\",\n \"description\": \"pattern prop\",\n },\n },\n }\n SchemaModel = JSONSchemaModel.json_model_factory(\"schemaModel\", schema)\n schemaModel = SchemaModel()\n # Act\n schemaModel.prop1 = \"value1\"\n schemaModel[\"prop2\"] = \"value2\"\n schemaModel.prop3 = \"value3\"\n schemaModel[\"prop4\"] = \"value4\"\n # Assert\n self.assertTrue(hasattr(schemaModel, \"prop1\"))\n self.assertTrue(hasattr(schemaModel, \"prop2\"))\n self.assertTrue(hasattr(schemaModel, \"prop3\"))\n self.assertTrue(hasattr(schemaModel, \"prop4\"))\n self.assertEqual(schemaModel.prop1, \"value1\")\n self.assertEqual(schemaModel.prop2, \"value2\")\n self.assertEqual(schemaModel.prop3, \"value3\")\n self.assertEqual(schemaModel.prop4, \"value4\")\n self.assertEqual(schemaModel[\"prop1\"], \"value1\")\n self.assertEqual(schemaModel[\"prop2\"], \"value2\")\n self.assertEqual(schemaModel[\"prop3\"], \"value3\")\n self.assertEqual(schemaModel[\"prop4\"], \"value4\")\n \n \n def test_json_model_factory(self):\n # Arrange\n schema = {\"name\": \"TestSchema\",\n \"description\": \"Unit testing schema\",\n \"properties\": {\n \"prop1\": {\n \"type\": \"string\",\n \"description\": \"prop1 description\",\n },\n \"prop2\": {\n \"type\": \"string\",\n \"description\": \"prop2 description\",\n },\n },\n \"patternProperties\": {\n \"prop?\": {\n \"type\": \"string\",\n \"description\": \"pattern prop\",\n },\n },\n }\n schema2 = {\"name\": \"TestSchema2\",\n \"description\": \"Unit testing schema2\",\n \"properties\": {\n \"p1\": {\n \"type\": \"string\",\n \"description\": \"prop1 description\",\n }\n }\n }\n \n # Act\n Schema = JSONSchemaModel.json_model_factory(\"Schema\", schema)\n Schema2 = JSONSchemaModel.json_model_factory(\"Schema2\", schema2,\n extends=Schema)\n schemaModel = Schema()\n schemaModel.prop1 = \"value1\"\n schemaModel[\"prop2\"] = \"value2\"\n schemaModel.prop3 = \"value3\"\n schemaModel[\"prop4\"] = \"value4\"\n schemaModel2 = Schema2()\n # Assert\n self.assertTrue(hasattr(schemaModel, \"prop1\"))\n self.assertTrue(hasattr(schemaModel, \"prop2\"))\n self.assertTrue(hasattr(schemaModel, \"prop3\"))\n self.assertTrue(hasattr(schemaModel, \"prop4\"))\n self.assertEqual(schemaModel.prop1, \"value1\")\n self.assertEqual(schemaModel.prop2, \"value2\")\n self.assertEqual(schemaModel.prop3, \"value3\")\n self.assertEqual(schemaModel.prop4, \"value4\")\n self.assertEqual(schemaModel[\"prop1\"], \"value1\")\n self.assertEqual(schemaModel[\"prop2\"], \"value2\")\n self.assertEqual(schemaModel[\"prop3\"], \"value3\")\n self.assertEqual(schemaModel[\"prop4\"], \"value4\")\n \n def test_nested_schemas(self):\n # Arrange\n schema = {\"name\": \"TestSchema\",\n \"description\": \"Unit testing schema\",\n \"properties\": {\n \"prop1\": {\n \"type\": \"string\",\n \"description\": \"prop1 description\",\n },\n \"prop2\": {\n \"type\": [{\"$ref\": \"schema2\"}],\n \"description\": \"prop2 description\",\n },\n }\n }\n schema2 = {\"name\": \"TestSchema2\",\n \"description\": \"Unit testing schema2\",\n \"properties\": {\n \"p1\": {\n \"type\": \"string\",\n \"description\": \"prop1 description\",\n }\n }\n }\n data = {\n \"prop1\": \"value1\",\n \"prop2\": {\n \"p1\": \"value2\",\n }\n }\n Schema = JSONSchemaModel.json_model_factory(\"Schema\", schema)\n Schema2 = JSONSchemaModel.json_model_factory(\"Schema2\", schema2,\n extends=Schema)\n loader_mock = SchemasLoader()\n loader_mock.get_class = MagicMock()\n loader_mock.get_class.return_value = Schema2\n \n # Act\n schemaModel = Schema(data, schemas_loader=loader_mock)\n # Assert\n self.assertTrue(hasattr(schemaModel, \"prop1\"))\n self.assertTrue(hasattr(schemaModel, \"prop2\"))\n self.assertEqual(schemaModel.prop1, data[\"prop1\"])\n self.assertEqual(schemaModel.prop2, data[\"prop2\"])\n self.assertEqual(schemaModel[\"prop1\"], data[\"prop1\"])\n self.assertEqual(schemaModel[\"prop2\"], data[\"prop2\"])\n self.assertEqual(type(schemaModel.prop2), Schema2)\n\n\n\nclass SchemasLoaderTest(PeriscopeTestCase):\n def test_init(self):\n # Arrange\n schema = {\n \"id\": \"http://schema1\",\n \"name\": \"TestSchema\",\n \"description\": \"Unit testing schema\",\n \"properties\": {\n \"prop1\": {\n \"type\": \"string\",\n \"description\": \"prop1 description\",\n },\n \"prop2\": {\n \"type\": \"string\",\n \"description\": \"prop2 description\",\n },\n },\n \"patternProperties\": {\n \"prop?\": {\n \"type\": \"string\",\n \"description\": \"pattern prop\",\n },\n },\n }\n cache = {schema[\"id\"]: schema}\n # Act\n obj = SchemasLoader(cache=cache)\n schema_get = obj.get(schema[\"id\"])\n # Assert\n self.assertTrue(isinstance(obj, SchemasLoader))\n self.assertEqual(schema_get, schema)\n\n","sub_path":"periscope/test/models_test.py","file_name":"models_test.py","file_ext":"py","file_size_in_byte":14399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"154254049","text":"from django.conf.urls.defaults import *\nfrom django.views.generic.simple import direct_to_template\n\nurlpatterns = patterns('irontickets.views',\n url(r'^$', direct_to_template, {\n 'template': 'irontickets/base.html',\n 'extra_context': {\n 'title': 'Help',\n },\n }, name='help'),\n)\n","sub_path":"irontickets/urls/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"525856728","text":"##############################################################################\n#\n# Copyright (c) 2005 Zope Corporation and Contributors. All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"ZCatalog node adapters.\n\n$Id$\n\"\"\"\n\nfrom Products.GenericSetup.interfaces import INodeExporter\nfrom Products.GenericSetup.interfaces import INodeImporter\nfrom Products.GenericSetup.interfaces import PURGE\nfrom Products.GenericSetup.utils import NodeAdapterBase\nfrom Products.GenericSetup.utils import ObjectManagerHelpers\nfrom Products.GenericSetup.utils import PropertyManagerHelpers\n\nfrom Products.ZCatalog.interfaces import IZCatalog\n\n\nclass _extra:\n\n pass\n\n\nclass ZCatalogNodeAdapter(NodeAdapterBase, ObjectManagerHelpers,\n PropertyManagerHelpers):\n\n \"\"\"Node im- and exporter for ZCatalog.\n \"\"\"\n\n __used_for__ = IZCatalog\n\n def exportNode(self, doc):\n \"\"\"Export the object as a DOM node.\n \"\"\"\n self._doc = doc\n node = self._getObjectNode('object')\n node.appendChild(self._extractProperties())\n node.appendChild(self._extractObjects())\n node.appendChild(self._extractIndexes())\n node.appendChild(self._extractColumns())\n return node\n\n def importNode(self, node, mode=PURGE):\n \"\"\"Import the object from the DOM node.\n \"\"\"\n if mode == PURGE:\n self._purgeProperties()\n self._purgeObjects()\n self._purgeIndexes()\n self._purgeColumns()\n\n self._initProperties(node, mode)\n self._initObjects(node, mode)\n self._initIndexes(node, mode)\n self._initColumns(node, mode)\n\n def _extractIndexes(self):\n fragment = self._doc.createDocumentFragment()\n indexes = self.context.getIndexObjects()[:]\n indexes.sort(lambda x,y: cmp(x.getId(), y.getId()))\n for idx in indexes:\n exporter = INodeExporter(idx, None)\n if exporter is None:\n continue\n fragment.appendChild(exporter.exportNode(self._doc))\n return fragment\n\n def _purgeIndexes(self):\n for idx_id in self.context.indexes():\n self.context.delIndex(idx_id)\n\n def _initIndexes(self, node, mode):\n for child in node.childNodes:\n if child.nodeName != 'index':\n continue\n if child.hasAttribute('deprecated'):\n continue\n zcatalog = self.context\n\n idx_id = str(child.getAttribute('name'))\n if idx_id not in zcatalog.indexes():\n extra = _extra()\n for sub in child.childNodes:\n if sub.nodeName == 'extra':\n name = str(sub.getAttribute('name'))\n value = str(sub.getAttribute('value'))\n setattr(extra, name, value)\n extra = extra.__dict__ and extra or None\n\n meta_type = str(child.getAttribute('meta_type'))\n zcatalog.addIndex(idx_id, meta_type, extra)\n\n idx = zcatalog._catalog.getIndex(idx_id)\n INodeImporter(idx).importNode(child, mode)\n\n def _extractColumns(self):\n fragment = self._doc.createDocumentFragment()\n schema = self.context.schema()[:]\n schema.sort()\n for col in schema:\n child = self._doc.createElement('column')\n child.setAttribute('value', col)\n fragment.appendChild(child)\n return fragment\n\n def _purgeColumns(self):\n for col in self.context.schema()[:]:\n self.context.delColumn(col)\n\n def _initColumns(self, node, mode):\n for child in node.childNodes:\n if child.nodeName != 'column':\n continue\n col = str(child.getAttribute('value'))\n if col not in self.context.schema()[:]:\n self.context.addColumn(col)\n","sub_path":"CMF/branches/regebro-sitemanager/GenericSetup/ZCatalog/adapters.py","file_name":"adapters.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"388628867","text":"import pandas as pd\nfrom framework.regressor import Regressor\nfrom framework.visualisator import Visualisator\nfrom framework.data_converter import DataConverter\n\n# Importing the dataset\ndataset = pd.read_csv('datasets/Social_Network_Ads.csv')\nXEl = dataset.iloc[:, [2,3]].values #always Matrix\nyEl = dataset.iloc[:, 4].values #always Vector\n\ndataConverter = DataConverter()\nregression = Regressor()\nvisualisation = Visualisator()\n\nXTrain, XTest, yTrain, yTest = dataConverter.splittingDataSetToTrainingSetAndTestSet(XEl, yEl, testSize = 0.25)\n\nscalling = dataConverter.featureScalling([XTrain, XTest], [])[0]\n\nscaller = scalling['x_scaller']\nXTrain = scalling['x_0']\nXTest = scalling['x_1']\n\nlogistics = regression.NaiveBayesModelCreator(XTrain, yTrain, valueToPredict = XTest)\nyPred = logistics[0]\nclassifier = logistics[1]\n\nconfusionMatrix = dataConverter.errorRateMatrix(yTest, yPred)\n\nvisualisation.visualiseColoredMap(XTrain, yTrain, ['red', 'green'], 'Naive Bayes (Training set)', 'Age', 'Estimated Salary', classifier = classifier)\nvisualisation.visualiseColoredMap(XTest, yTest, ['blue', 'gray'], 'Naive Bayes (Test set)', 'Age', 'Estimated Salary', classifier = classifier)\n\n\n","sub_path":"Helpers/naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"374079148","text":"import torch\nimport torch.utils.data\nfrom torch.autograd import Variable\nfrom .vocab import Vocab\n\nfrom collections import Counter\nfrom collections import OrderedDict\nimport csv\nimport json\nimport math\nimport os\nimport random\nfrom six.moves import urllib\nimport zipfile\n\nclass Pipeline:\n\n def __call__(self, x, *args):\n if isinstance(x, list):\n return [self.__call__(tok, *args) for tok in x]\n return self.convert_token(x, *args)\n\n def convert_token(self, token, *args):\n return token\n\n\ndef get_tokenizer(tokenizer):\n if not isinstance(tokenizer, str):\n return tokenizer\n if tokenizer == 'spacy':\n try:\n import spacy\n spacy_en = spacy.load('en')\n return lambda s: [tok.text for tok in spacy_en.tokenize(s)]\n except ImportError:\n print('''Please install SpaCy and the SpaCy English tokenizer:\n $ conda install libgcc\n $ pip install spacy\n $ python -m spacy.en.download tokenizer''')\n raise\n except AttributeError:\n print('''Please install the SpaCy English tokenizer:\n $ python -m spacy.en.download tokenizer''')\n raise\n\n\nclass Field:\n\n def __init__(\n self, time_series=False, use_vocab=True, init_token=None,\n eos_token=None, fix_length=None, tensor_type=torch.LongTensor,\n before_numericalizing=Pipeline(), after_numericalizing=Pipeline(),\n tokenize=(lambda s: s.split())):\n self.time_series = time_series\n self.use_vocab = use_vocab\n self.fix_length = fix_length\n self.init_token = init_token\n self.eos_token = eos_token\n self.tokenize = get_tokenizer(tokenize)\n self.before_numericalizing = before_numericalizing\n self.after_numericalizing = after_numericalizing\n self.tensor_type = tensor_type\n\n def preprocess(self, x):\n if self.time_series and isinstance(x, str):\n x = self.tokenize(x)\n return x\n\n def pad(self, minibatch):\n minibatch = list(minibatch)\n if not self.time_series:\n return minibatch\n if self.fix_length is None:\n max_len = max(len(x) for x in minibatch)\n else:\n max_len = self.fix_length + (\n self.init_token, self.eos_token).count(None) - 2\n padded = []\n for x in minibatch:\n padded.append(\n ([] if self.init_token is None else [self.init_token]) +\n list(x[:max_len]) +\n ([] if self.eos_token is None else [self.eos_token]) +\n [''] * max(0, max_len - len(x)))\n return padded\n\n def build_vocab(self, *args, lower=False, **kwargs):\n counter = Counter()\n sources = []\n for arg in args:\n if isinstance(arg, Dataset):\n sources += [getattr(arg, name) for name, field in\n arg.fields.items() if field is self]\n else:\n sources.append(arg)\n for data in sources:\n for x in data:\n if not self.time_series:\n x = [x]\n if lower:\n x = [token.lower() for token in x]\n counter.update(x)\n specials = list(OrderedDict.fromkeys(tok for tok in [\n '', self.init_token, self.eos_token] if tok is not None))\n self.vocab = Vocab(counter, specials=specials, lower=lower, **kwargs)\n\n def numericalize(self, arr, device=None, train=True):\n if self.use_vocab:\n arr = self.before_numericalizing(arr, self.vocab, train)\n if self.time_series:\n arr = [[self.vocab.stoi[x] for x in ex] for ex in arr]\n else:\n arr = [self.vocab.stoi[x] for x in arr]\n arr = self.after_numericalizing(arr, self.vocab, train)\n else:\n arr = self.after_numericalizing(arr, train)\n arr = self.tensor_type(arr)\n if self.time_series:\n arr.t_()\n if device == -1:\n if self.time_series:\n arr = arr.contiguous()\n else:\n with torch.cuda.device(device):\n arr = arr.cuda()\n return Variable(arr, volatile=not train)\n\n\nclass Example:\n\n @classmethod\n def fromJSON(cls, data, fields):\n return cls.fromdict(json.loads(data), fields)\n\n @classmethod\n def fromdict(cls, data, fields):\n ex = cls()\n for key, val in data.items():\n if key in fields:\n name, field = fields[key]\n if field is not None:\n setattr(ex, name, field.preprocess(val))\n return ex\n\n @classmethod\n def fromTSV(cls, data, fields):\n if data[-1] == '\\n':\n data = data[:-1]\n return cls.fromlist(data.split('\\t'), fields)\n\n @classmethod\n def fromCSV(cls, data, fields):\n if data[-1] == '\\n':\n data = data[:-1]\n return cls.fromlist(list(csv.reader([data]))[0])\n\n @classmethod\n def fromlist(cls, data, fields):\n ex = cls()\n for (name, field), val in zip(fields, data):\n if field is not None:\n setattr(ex, name, field.preprocess(val))\n return ex\n\n\nclass Dataset(torch.utils.data.Dataset):\n\n sort_key = None\n\n def __init__(self, examples, fields, filter_pred=None):\n\n if filter_pred is not None:\n examples = list(filter(filter_pred, examples))\n self.examples = examples\n\n if isinstance(fields, dict):\n fields = fields.values()\n self.fields = dict(fields)\n\n @classmethod\n def splits(cls, path, train=None, dev=None, test=None, **kwargs):\n train_data = None if train is None else cls(path + train, **kwargs)\n dev_data = None if dev is None else cls(path + dev, **kwargs)\n test_data = None if test is None else cls(path + test, **kwargs)\n return tuple(d for d in (train_data, dev_data, test_data)\n if d is not None)\n\n def __getitem__(self, i):\n return self.examples[i]\n\n def __len__(self):\n try:\n return len(self.examples)\n except TypeError:\n return 2**32\n\n def __iter__(self):\n yield from self.examples\n\n def __getattr__(self, attr):\n if attr in self.fields:\n for x in self.examples:\n yield getattr(x, attr)\n\n\nclass ZipDataset(Dataset):\n\n @classmethod\n def download_or_unzip(cls, root):\n path = os.path.join(root, cls.dirname)\n if not os.path.isdir(path):\n zpath = os.path.join(root, cls.filename)\n if not os.path.isfile(zpath):\n print('downloading')\n urllib.request.urlretrieve(cls.url, zpath)\n with zipfile.ZipFile(zpath, 'r') as zfile:\n print('extracting')\n zfile.extractall(root)\n return path\n\n\n\nclass TabularDataset(Dataset):\n\n def __init__(self, path, format, fields, **kwargs):\n\n make_example = {\n 'json': Example.fromJSON, 'dict': Example.fromdict,\n 'tsv': Example.fromTSV, 'csv': Example.fromCSV}[format.lower()]\n\n with open(os.path.expanduser(path)) as f:\n examples = [make_example(line, fields) for line in f]\n\n super().__init__(examples, fields, **kwargs)\n\n\ndef batch(data, batch_size):\n minibatch = []\n for ex in data:\n minibatch.append(ex)\n if len(minibatch) == batch_size:\n yield minibatch\n minibatch = []\n if minibatch:\n yield minibatch\n\ndef shuffled(data):\n data = list(data)\n random.shuffle(data)\n return data\n\ndef pool(data, batch_size, key):\n for p in batch(data, batch_size * 100):\n yield from shuffled(batch(sorted(p, key=key), batch_size))\n\n\nclass Batch:\n\n def __init__(self, data=None, dataset=None, device=None, train=True):\n if data is not None:\n self.batch_size = len(data)\n self.dataset = dataset\n self.train = train\n for (name, field) in dataset.fields.items():\n if field is not None:\n setattr(self, name, field.numericalize(\n field.pad(x.__dict__[name] for x in data),\n device=device, train=train))\n\n @classmethod\n def fromvars(cls, dataset, batch_size, train=True, **kwargs):\n batch = cls()\n batch.batch_size = batch_size\n batch.dataset = dataset\n batch.train = train\n for k, v in kwargs.items():\n setattr(batch, k, v)\n return batch\n\n\nclass Iterator:\n\n def __init__(self, dataset, batch_size, sort_key=None, device=None,\n train=True, repeat=None, shuffle=None, sort=None):\n self.batch_size, self.train, self.dataset = batch_size, train, dataset\n self.iterations = 0\n self.repeat = train if repeat is None else repeat\n self.shuffle = train if shuffle is None else shuffle\n self.sort = not train if sort is None else sort\n if sort_key is None:\n self.sort_key = dataset.sort_key\n else:\n self.sort_key = sort_key\n self.device = device\n\n @classmethod\n def splits(cls, datasets, batch_sizes=None, **kwargs):\n if batch_sizes is None:\n batch_sizes = [kwargs.pop('batch_size')] * len(datasets)\n ret = []\n for i in range(len(datasets)):\n train = i == 0\n ret.append(cls(\n datasets[i], batch_size=batch_sizes[i], train=train, **kwargs))\n return tuple(ret)\n\n def data(self):\n if self.shuffle:\n xs = [self.dataset[i] for i in torch.randperm(len(self.dataset))]\n elif self.sort:\n xs = sorted(self.dataset, key=self.sort_key)\n else:\n xs = self.dataset\n return xs\n\n def init_epoch(self):\n self.batches = batch(self.data(), self.batch_size)\n\n @property\n def epoch(self):\n return self.iterations / len(self)\n\n def __len__(self):\n return math.ceil(len(self.dataset) / self.batch_size)\n\n def __iter__(self):\n while True:\n self.init_epoch()\n for i, minibatch in enumerate(self.batches):\n if i == self.iterations % len(self):\n self.iterations += 1\n yield Batch(minibatch, self.dataset, self.device,\n self.train)\n if not self.repeat:\n raise StopIteration\n\nclass BucketIterator(Iterator):\n\n def init_epoch(self):\n if self.repeat:\n self.batches = pool(self.data(), self.batch_size,\n self.sort_key)\n else:\n self.iterations = 0\n self.batches = batch(self.data(), self.batch_size)\n\n\nclass BPTTIterator(Iterator):\n\n def __init__(self, dataset, batch_size, bptt_len, **kwargs):\n self.bptt_len = bptt_len\n super().__init__(dataset, batch_size, **kwargs)\n\n def __len__(self):\n return math.ceil(len(self.dataset[0].text) /\n (self.batch_size * self.bptt_len))\n\n def __iter__(self):\n text = self.dataset[0].text\n TEXT = self.dataset.fields['text']\n TEXT.eos_token = None\n text = text + [''] * (math.ceil(len(text) / self.batch_size) *\n self.batch_size - len(text))\n data = TEXT.numericalize(\n [text], device=self.device, train=self.train)\n data = data.view(self.batch_size, -1).t().contiguous()\n dataset = Dataset(examples=self.dataset.examples, fields=[\n ('text', TEXT), ('target', TEXT)])\n while True:\n for i in range(0, len(self) * self.bptt_len, self.bptt_len):\n seq_len = min(self.bptt_len, len(data) - 1 - i)\n yield Batch.fromvars(\n dataset, self.batch_size, train=self.train,\n text=data[i:i + seq_len],\n target=data[i + 1:i + 1 + seq_len])\n if not self.repeat:\n raise StopIteration\n\n\ndef interleave_keys(a, b):\n def interleave(args):\n return ''.join([x for t in zip(*args) for x in t])\n return int(''.join(interleave(format(x, '016b') for x in (a, b))), base=2)\n","sub_path":"torchtext/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":12344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"638649464","text":"from noshow import *\r\nconfig.BINO = True\r\nconfig.tadj = 0\r\n#config.svlp = 200.0\r\n\r\nconfig.vsearch = 1 #print each iteration\r\nconfig.samples = 1600\r\nconfig.elites = 80 \r\nconfig.smoother = 0.7\r\n\r\nfrom ORinstance3 import sina\r\n#example adopted from Talluri&van Ryzin (2004a)\r\n#Section 2.2.3.4, but merged the middle classes\r\n#sina = SimScena()\r\n#sina.C = 108.0\r\n#sina.m = 3\r\n#sina.beta = 0.2\r\n#sina.p = (0.1,0.1)\r\n#sina.f = (1050.,647.,350.)\r\n#sina.V = 1.2*sina.f[0]/(1.0 - sum(sina.p)/2.0)\r\n#mean demand: 17.3, 45.1, 73.6, 19.8\r\n#sina.U = (64.,120.,39.)\r\n#sina.L = (20., 30., 0.)\r\n#print \"Demand Factoer:\", sina.demandFactor(), sina.V\r\n\r\nnexp = noshexp(sina, 0)\r\nprob = sina.makeProblem()\r\n\r\nutot = int(sum(sina.U))\r\nltot = int(sum(sina.L))\r\n\r\nsump = [k for k in range(ltot, utot+1)]\r\n#std = [max(1.0,(prob.U[i]-prob.L[i])/2.0) for i in range(0,prob.m+1)]\r\nstd = [prob.U[i] for i in range(0,prob.m+1)]\r\navg = [(prob.L[i]+prob.U[i])/2.0 for i in range(0,prob.m+1)]\r\nnexp.initMRAS(avg, std, 500, 10, 0.0)\r\nnexp.search(21, 0)\r\ngamma = nexp.gamma(nexp.best)\r\nrron = [nexp.Ron(s[0], s[1], nexp.best) for s in nexp.scen]\r\nroff = [s[2] for s in nexp.scen]\r\nns = len(nexp.scen)\r\ngrof = [roff[i]*gamma for i in range(ns)]\r\nplot(sump, roff, '-.', label='Offline Revenue')\r\nplot(sump, rron, label='Online Revenue')\r\nplot(sump, grof, ':', label='Guaranteed Revenue')\r\n\r\nstd = [prob.U[i] for i in range(0,prob.m+1)]\r\navg = [(prob.L[i]+prob.U[i])/2.0 for i in range(0,prob.m+1)]\r\nnexp.initMRAS(avg, std, 200, 10, 0.4 )\r\nnexp.search(21, 1)\r\nregret = nexp.regret(nexp.best)\r\nrron = [nexp.Ron(s[0], s[1], nexp.best) for s in nexp.scen]\r\ngrof = [roff[i]-regret for i in range(ns)]\r\n#plot(sump, rron, '-.', label='AR online')\r\n#plot(sump, grof, ':', label='AR guarantee')\r\n\r\n#std = [prob.U[i] for i in range(0,prob.m+1)]\r\n#avg = [(prob.L[i]+prob.U[i])/2.0 for i in range(0,prob.m+1)]\r\n#nexp.initMRAS(avg, std, 200, 10, 0.4 )\r\n#nexp.search(21, 2)\r\n#regret = nexp.regret(nexp.best)\r\n#rron = [nexp.Ron(s[0], s[1], nexp.best) for s in nexp.scen]\r\n#grof = [roff[i]-regret for i in range(ns)]\r\n#plot(rron, label='WAR online')\r\n#plot(grof, ':', label='WAR guarant')\r\n\r\nxlim(ltot, utot)\r\nlegend(loc=0)\r\nxlabel(\"Total number of requests in extreme profiles\")\r\nylabel(\"Revenue ($)\")\r\nsavefig(\"dist-cr-mras.eps\")\r\n\r\nfigure()\r\npent = [s[3][0] for s in nexp.scen]\r\npenf = [nexp.ff.g(pent[i]) for i in range(ns)]\r\npend = [nexp.ff.g(pent[i]+1)-penf[i] for i in range(ns)]\r\nplot(pent, 'o')\r\nfor j in range(1, nexp.m):\r\n pent = [pent[i] - s[3][j] for i,s \r\n\t\t\t\t\tin enumerate(nexp.scen)]\r\n plot(pent)\r\n\r\nxlabel(\"Offline # accepted for Extreme Inputs\")\r\n\r\nfigure()\r\nplot(penf, 'ro:')\r\nplot(pend)\r\nxlabel(\"Penalties for Extreme Inputs\")\r\n\r\nshow()\r\n","sub_path":"OR-CEconv.py","file_name":"OR-CEconv.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"525422507","text":"# python imports\nimport os\nfrom tensorflow import keras\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import models\nimport tensorflow.keras.callbacks as KC\nfrom tensorflow.keras.optimizers import Adam\nfrom inspect import getmembers, isclass\n\n# project imports\nfrom . import metrics_model as metrics\nfrom .brain_generator import BrainGenerator\n\n# third-party imports\nfrom ext.lab2im import utils\nfrom ext.lab2im import layers as l2i_layers\nfrom ext.neuron import layers as nrn_layers\nfrom ext.neuron import models as nrn_models\n\n\ndef training(labels_dir,\n model_dir,\n generation_labels=None,\n segmentation_labels=None,\n patch_dir=None,\n batchsize=1,\n n_channels=1,\n target_res=None,\n output_shape=None,\n generation_classes=None,\n prior_distributions='uniform',\n prior_means=None,\n prior_stds=None,\n use_specific_stats_for_channel=False,\n mix_prior_and_random=False,\n flipping=True,\n scaling_bounds=.15,\n rotation_bounds=15,\n shearing_bounds=.012,\n translation_bounds=False,\n nonlin_std=3.,\n nonlin_shape_factor=.04,\n randomise_res=True,\n max_res_iso=4.,\n max_res_aniso=8.,\n data_res=None,\n thickness=None,\n downsample=False,\n blur_range=1.03,\n bias_field_std=.5,\n bias_shape_factor=.025,\n n_levels=5,\n nb_conv_per_level=2,\n conv_size=3,\n unet_feat_count=24,\n feat_multiplier=2,\n activation='elu',\n lr=1e-4,\n lr_decay=0,\n wl2_epochs=5,\n dice_epochs=200,\n steps_per_epoch=1000,\n checkpoint=None):\n \"\"\"\n This function trains a Unet to segment MRI images with synthetic scans generated by sampling a GMM conditioned on\n label maps. We regroup the parameters in three categories: Generation, Architecture, Training.\n\n # IMPORTANT !!!\n # Each time we provide a parameter with separate values for each axis (e.g. with a numpy array or a sequence),\n # these values refer to the RAS axes.\n\n :param labels_dir: path of folder with all input label maps, or to a single label map (if only one training example)\n :param model_dir: path of a directory where the models will be saved during training.\n\n #---------------------------------------------- Generation parameters ----------------------------------------------\n # label maps parameters\n :param generation_labels: (optional) list of all possible label values in the input label maps.\n Default is None, where the label values are directly gotten from the provided label maps. If not None, must be a\n list, or a 1d numpy array, or the path to such an array, which should be organised as follows: background label\n first, then non-sided labels (e.g. CSF, brainstem, etc.), then all the structures of the same hemisphere (can be\n left or right), and finally all the corresponding contralateral structures (in the same order).\n :param segmentation_labels: (optional) list of the same length as generation_labels to indicate which values to use\n in the training label maps, i.e. all occurences of generation_labels[i] in the input label maps will be converted to\n output_labels[i] in the returned label maps. Examples:\n Set output_labels[i] to zero if you wish to erase the value generation_labels[i] from the returned label maps.\n Set output_labels[i]=generation_labels[i] if you wish to keep the value generation_labels[i] in the returned maps.\n Can be a list or a 1d numpy array, or the path to such an array. Default is output_labels = generation_labels.\n\n # output-related parameters\n :param batchsize: (optional) number of images to generate per mini-batch. Default is 1.\n :param n_channels: (optional) number of channels to be synthetised. Default is 1.\n :param target_res: (optional) target resolution of the generated images and corresponding label maps.\n If None, the outputs will have the same resolution as the input label maps.\n Can be a number (isotropic resolution), or the path to a 1d numpy array.\n :param output_shape: (optional) desired shape of the output image, obtained by randomly cropping the generated image\n Can be an integer (same size in all dimensions), a sequence, a 1d numpy array, or the path to a 1d numpy array.\n Default is None, where no cropping is performed.\n\n # GMM-sampling parameters\n :param generation_classes: (optional) Indices regrouping generation labels into classes of same intensity\n distribution. Regouped labels will thus share the same Gaussian when samling a new image. Should be the path to a 1d\n numpy array with the same length as generation_labels. and contain values between 0 and K-1, where K is the total\n number of classes. Default is all labels have different classes.\n Can be a list or a 1d numpy array, or the path to such an array.\n :param prior_distributions: (optional) type of distribution from which we sample the GMM parameters.\n Can either be 'uniform', or 'normal'. Default is 'uniform'.\n :param prior_means: (optional) hyperparameters controlling the prior distributions of the GMM means. Because\n these prior distributions are uniform or normal, they require by 2 hyperparameters. Can be a path to:\n 1) an array of shape (2, K), where K is the number of classes (K=len(generation_labels) if generation_classes is\n not given). The mean of the Gaussian distribution associated to class k in [0, ...K-1] is sampled at each mini-batch\n from U(prior_means[0,k], prior_means[1,k]) if prior_distributions is uniform, and from\n N(prior_means[0,k], prior_means[1,k]) if prior_distributions is normal.\n 2) an array of shape (2*n_mod, K), where each block of two rows is associated to hyperparameters derived\n from different modalities. In this case, if use_specific_stats_for_channel is False, we first randomly select a\n modality from the n_mod possibilities, and we sample the GMM means like in 2).\n If use_specific_stats_for_channel is True, each block of two rows correspond to a different channel\n (n_mod=n_channels), thus we select the corresponding block to each channel rather than randomly drawing it.\n Default is None, which corresponds all GMM means sampled from uniform distribution U(25, 225).\n :param prior_stds: (optional) same as prior_means but for the standard deviations of the GMM.\n Default is None, which corresponds to U(5, 25).\n :param use_specific_stats_for_channel: (optional) whether the i-th block of two rows in the prior arrays must be\n only used to generate the i-th channel. If True, n_mod should be equal to n_channels. Default is False.\n :param mix_prior_and_random: (optional) if prior_means is not None, enables to reset the priors to their default\n values for half of thes cases, and thus generate images of random contrast.\n\n # spatial deformation parameters\n :param flipping: (optional) whether to introduce right/left random flipping. Default is True.\n :param scaling_bounds: (optional) if apply_linear_trans is True, the scaling factor for each dimension is\n sampled from a uniform distribution of predefined bounds. Can either be:\n 1) a number, in which case the scaling factor is independently sampled from the uniform distribution of bounds\n (1-scaling_bounds, 1+scaling_bounds) for each dimension.\n 2) the path to a numpy array of shape (2, n_dims), in which case the scaling factor in dimension i is sampled from\n the uniform distribution of bounds (scaling_bounds[0, i], scaling_bounds[1, i]) for the i-th dimension.\n 3) False, in which case scaling is completely turned off.\n Default is scaling_bounds = 0.15 (case 1)\n :param rotation_bounds: (optional) same as scaling bounds but for the rotation angle, except that for case 1 the\n bounds are centred on 0 rather than 1, i.e. (0+rotation_bounds[i], 0-rotation_bounds[i]).\n Default is rotation_bounds = 15.\n :param shearing_bounds: (optional) same as scaling bounds. Default is shearing_bounds = 0.012.\n :param translation_bounds: (optional) same as scaling bounds. Default is translation_bounds = False, but we\n encourage using it when cropping is deactivated (i.e. when output_shape=None).\n :param nonlin_std: (optional) Standard deviation of the normal distribution from which we sample the first\n tensor for synthesising the deformation field. Set to 0 to completely deactivate elastic deformation.\n :param nonlin_shape_factor: (optional) Ratio between the size of the input label maps and the size of the sampled\n tensor for synthesising the elastic deformation field.\n\n # blurring/resampling parameters\n :param randomise_res: (optional) whether to mimic images that would have been 1) acquired at low resolution, and\n 2) resampled to high esolution. The low resolution is uniformly resampled at each minibatch from [1mm, 9mm].\n In that process, the images generated by sampling the GMM are: 1) blurred at the sampled LR, 2) downsampled at LR,\n and 3) resampled at target_resolution.\n :param max_res_iso: (optional) If randomise_res is True, this enables to control the upper bound of the uniform\n distribution from which we sample the random resolution U(min_res, max_res_iso), where min_res is the resolution of\n the input label maps. Must be a number, and default is 4. Set to None to deactivate it, but if randomise_res is\n True, at least one of max_res_iso or max_res_aniso must be given.\n :param max_res_aniso: If randomise_res is True, we this enables to downsample the input volumes to a random LR in\n only 1 (random) direction. This is done by randomly selecting a direction i in the range [0, n_dims-1], and sampling\n a value in the corresponding uniform distribution U(min_res[i], max_res_aniso[i]), where min_res is the resolution\n of the input label maps. Can be a number, a sequence, or a 1d numpy array. Set to None to deactivate it, but if\n randomise_res is True, at least one of max_res_iso or max_res_aniso must be given.\n :param data_res: (optional) specific acquisition resolution to mimic, as opposed to random resolution sampled when\n randomis_res is True. This triggers a blurring which mimics the acquisition resolution, but downsampling is optional\n (see param downsample). Default for data_res is None, where images are slighlty blurred. If the generated images are\n uni-modal, data_res can be a number (isotropic acquisition resolution), a sequence, a 1d numpy array, or the path\n to a 1d numy array. In the multi-modal case, it should be given as a umpy array (or a path) of size (n_mod, n_dims),\n where each row is the acquisition resolution of the corresponding channel.\n :param thickness: (optional) if data_res is provided, we can further specify the slice thickness of the low\n resolution images to mimic. Must be provided in the same format as data_res. Default thickness = data_res.\n :param downsample: (optional) whether to actually downsample the volume images to data_res after blurring.\n Default is False, except when thickness is provided, and thickness < data_res.\n :param blur_range: (optional) Randomise the standard deviation of the blurring kernels, (whether data_res is given\n or not). At each mini_batch, the standard deviation of the blurring kernels are multiplied by a coefficient sampled\n from a uniform distribution with bounds [1/blur_range, blur_range]. If None, no randomisation. Default is 1.15.\n\n # bias field parameters\n :param bias_field_std: (optional) If strictly positive, this triggers the corruption of images with a bias field.\n The bias field is obtained by sampling a first small tensor from a normal distribution, resizing it to\n full size, and rescaling it to positive values by taking the voxel-wise exponential. bias_field_std designates the\n std dev of the normal distribution from which we sample the first tensor.\n Set to 0 to completely deactivate biad field corruption.\n :param bias_shape_factor: (optional) If bias_field_std is not False, this designates the ratio between the size of\n the input label maps and the size of the first sampled tensor for synthesising the bias field.\n\n # ------------------------------------------ UNet architecture parameters ------------------------------------------\n :param n_levels: (optional) number of level for the Unet. Default is 5.\n :param nb_conv_per_level: (optional) number of convolutional layers per level. Default is 2.\n :param conv_size: (optional) size of the convolution kernels. Default is 2.\n :param unet_feat_count: (optional) number of feature for the first layr of the Unet. Default is 24.\n :param feat_multiplier: (optional) multiply the number of feature by this nummber at each new level. Default is 2.\n :param activation: (optional) activation function. Can be 'elu', 'relu'.\n\n # ----------------------------------------------- Training parameters ----------------------------------------------\n :param lr: (optional) learning rate for the training. Default is 1e-4\n :param lr_decay: (optional) learing rate decay. Default is 0, where no decay is applied.\n :param wl2_epochs: (optional) number of epohs for which the network (except the soft-max layer) is trained with L2\n norm loss function. Default is 5.\n :param dice_epochs: (optional) number of epochs with the soft Dice loss function. default is 200.\n :param steps_per_epoch: (optional) number of steps per epoch. Default is 1000. Since no online validation is\n possible, this is equivalent to the frequency at which the models are saved.\n :param checkpoint: (optional) path of an already saved model to load before starting the training.\n \"\"\"\n\n # check epochs\n assert (wl2_epochs > 0) | (dice_epochs > 0), \\\n 'either wl2_epochs or dice_epochs must be positive, had {0} and {1}'.format(wl2_epochs, dice_epochs)\n\n # get label lists\n generation_labels, n_neutral_labels = utils.get_list_labels(label_list=generation_labels,\n labels_dir=labels_dir,\n FS_sort=False)\n if segmentation_labels is not None:\n segmentation_labels, _ = utils.get_list_labels(label_list=segmentation_labels)\n else:\n segmentation_labels = generation_labels\n n_segmentation_labels = len(np.unique(segmentation_labels))\n\n # instantiate BrainGenerator object\n brain_generator = BrainGenerator(labels_dir=labels_dir,\n generation_labels=generation_labels,\n output_labels=segmentation_labels,\n patch_dir=patch_dir,\n n_neutral_labels=n_neutral_labels,\n batchsize=batchsize,\n n_channels=n_channels,\n target_res=target_res,\n output_shape=output_shape,\n output_div_by_n=2 ** n_levels,\n generation_classes=generation_classes,\n prior_distributions=prior_distributions,\n prior_means=prior_means,\n prior_stds=prior_stds,\n use_specific_stats_for_channel=use_specific_stats_for_channel,\n mix_prior_and_random=mix_prior_and_random,\n flipping=flipping,\n scaling_bounds=scaling_bounds,\n rotation_bounds=rotation_bounds,\n shearing_bounds=shearing_bounds,\n translation_bounds=translation_bounds,\n nonlin_std=nonlin_std,\n nonlin_shape_factor=nonlin_shape_factor,\n randomise_res=randomise_res,\n max_res_iso=max_res_iso,\n max_res_aniso=max_res_aniso,\n data_res=data_res,\n thickness=thickness,\n downsample=downsample,\n blur_range=blur_range,\n bias_field_std=bias_field_std,\n bias_shape_factor=bias_shape_factor)\n\n # generation model\n labels_to_image_model = brain_generator.labels_to_image_model\n unet_input_shape = brain_generator.model_output_shape\n\n # prepare the segmentation model\n unet_model = nrn_models.unet(nb_features=unet_feat_count,\n input_shape=unet_input_shape,\n nb_levels=n_levels,\n conv_size=conv_size,\n nb_labels=n_segmentation_labels,\n feat_mult=feat_multiplier,\n nb_conv_per_level=nb_conv_per_level,\n batch_norm=-1,\n activation=activation,\n input_model=labels_to_image_model)\n\n # input generator\n input_generator = utils.build_training_generator(brain_generator.model_inputs_generator, batchsize)\n\n # pre-training with weighted L2, input is fit to the softmax rather than the probabilities\n if wl2_epochs > 0:\n wl2_model = models.Model(unet_model.inputs, [unet_model.get_layer('unet_likelihood').output])\n wl2_model = metrics.metrics_model(wl2_model, segmentation_labels, 'wl2')\n train_model(wl2_model, input_generator, lr, lr_decay, wl2_epochs, steps_per_epoch, model_dir, 'wl2', checkpoint)\n checkpoint = os.path.join(model_dir, 'wl2_%03d.h5' % wl2_epochs)\n\n # fine-tuning with dice metric\n dice_model = metrics.metrics_model(unet_model, segmentation_labels, 'dice')\n train_model(dice_model, input_generator, lr, lr_decay, dice_epochs, steps_per_epoch, model_dir, 'dice', checkpoint)\n\n\ndef train_model(model,\n generator,\n learning_rate,\n lr_decay,\n n_epochs,\n n_steps,\n model_dir,\n metric_type,\n path_checkpoint=None,\n reinitialise_momentum=False):\n\n # prepare model and log folders\n utils.mkdir(model_dir)\n log_dir = os.path.join(model_dir, 'logs')\n utils.mkdir(log_dir)\n\n # model saving callback\n save_file_name = os.path.join(model_dir, '%s_{epoch:03d}.h5' % metric_type)\n callbacks = [KC.ModelCheckpoint(save_file_name, verbose=1)]\n\n # TensorBoard callback\n if metric_type == 'dice':\n callbacks.append(KC.TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=True, write_images=False))\n\n compile_model = True\n init_epoch = 0\n if path_checkpoint is not None:\n if metric_type in path_checkpoint:\n init_epoch = int(os.path.basename(path_checkpoint).split(metric_type)[1][1:-3])\n if (not reinitialise_momentum) & (metric_type in path_checkpoint):\n custom_l2i = {key: value for (key, value) in getmembers(l2i_layers, isclass) if key != 'Layer'}\n custom_nrn = {key: value for (key, value) in getmembers(nrn_layers, isclass) if key != 'Layer'}\n custom_objects = {**custom_l2i, **custom_nrn, 'tf': tf, 'keras': keras, 'loss': metrics.IdentityLoss().loss}\n model = models.load_model(path_checkpoint, custom_objects=custom_objects)\n compile_model = False\n else:\n model.load_weights(path_checkpoint, by_name=True)\n\n # compile\n if compile_model:\n model.compile(optimizer=Adam(lr=learning_rate, decay=lr_decay),\n loss=metrics.IdentityLoss().loss,\n loss_weights=[1.0])\n\n # fit\n model.fit_generator(generator,\n epochs=n_epochs,\n steps_per_epoch=n_steps,\n callbacks=callbacks,\n initial_epoch=init_epoch)\n","sub_path":"SynthSeg/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":20633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"120748770","text":"\"\"\"Write a program using list comprehension to print the Fibonacci Sequence in comma separated form with a\r\ngiven n input by console.\r\n9.\r\nExample:\r\nIf the following n is given as input to the program:\r\n7\r\nThen, the output of the program should be:\r\n0,1,1,2,3,5,8,13\r\nHintThe Fibonacci Sequence is computed based on the following formula:.\r\nf(n)=0 if n=0\r\nf(n)=1 if n=1\r\nf(n)=f(n-1)+f(n-2) if n>1\r\n \"\"\"\r\ndef f(n):\r\n if n == 0: return 0\r\n elif n == 1: return 1\r\n else: return f(n-1)+f(n-2)\r\n\r\nn=int(input(\"Enter num for fib:- \"))\r\nvalues = [str(f(x)) for x in range(0, n+1)]\r\nprint(\",\".join(values))","sub_path":"python assignments/9fib.py","file_name":"9fib.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"34131411","text":"class Bot :\n \n # contructor\n def __init__(self, name):\n self.name = name \n self.age = 0\n self.energy = 10\n self.shield = 10\n\n #method \n def display_name(self):\n \n \n print(\"--------\")\n print(\"|\", self.name, \"|\")\n print(\"--------\") \n \n \n \n def display_age(self):\n print(\".-----.\")\n print(\"| .-. |\")\n print(\"| \",self.age,\" |\")\n print(\"| `-' |\") \n print(\"`-----'\")\n\n\n \n def display_energy(self):\n print(\"my energy is\", \"♦\"*self.energy)\n \n \n \n def display_shield(self):\n print(\"my shield level is\", \"♦\"* self.shield)\n\n #make some bots \n\nbeep = Bot(\"Beep\")\nbeep.display_name()\nbeep.display_age()\nbeep.display_energy()\nbeep.display_shield() \n\n\n\n ","sub_path":"2- guis/1-classes-and-objects/Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"274357784","text":"import sys\n\nfrom telebot import TeleBot, types\n\nfrom asgiref.sync import async_to_sync, sync_to_async\nfrom channels.layers import get_channel_layer\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils.safestring import mark_safe\nfrom django.urls import reverse\n\nfrom config.settings import (\n TELEGRAM_BOT_TOKEN,\n TELEGRAM_BOT_WEBHOOK_HOST,\n TELEGRAM_BOT_FILE_SIZE_LIMIT\n)\n\nfrom chat.models import Chat, Message\n\nfrom telegram_bot.models import User\nfrom telegram_bot.utils import ctime, get_default_image\n\nfrom google_drive_API.api import upload_to_google_drive\n\n\nWEBHOOK_PATH = f\"/{TELEGRAM_BOT_TOKEN}/\"\nWEBHOOK_URL = TELEGRAM_BOT_WEBHOOK_HOST + \"/telegram/webhook\" + WEBHOOK_PATH\n\n\nIGNORED_CONTENT_TYPES = [\n \"audio\",\n \"sticker\",\n \"video\",\n \"video_note\",\n \"voice\",\n \"location\",\n \"contact\"\n]\n\n\nBOT_PHRASES = {\n \"user_blocked\": \"❌ Вы заблокированы ❌\",\n \"user_unblocked\": \"✅ Вы разблокированы ✅\",\n \"file_size_exceeded\": \"⚠️ Превышен допустимый размер файла.\\nМаксимальный размер файла 2.5 MB\",\n \"available_content_types\": \"ℹ️ Бот поддерживает следующие типы сообщений:\\n\\n📝 Текст\\n\\n🖼 Фото (до 2.5 MB)\\n\\n📁 Файлы (до 2.5 MB)\",\n}\n\n\nbot = TeleBot(token=TELEGRAM_BOT_TOKEN)\n\n\nif \"--not-set-telegram-webhook\" not in sys.argv:\n bot.delete_webhook()\n bot.set_webhook(WEBHOOK_URL)\n\n sys.argv.append(\"--not-set-telegram-webhook\")\n\n\ndef download_file_from_telegram(file_id):\n metadata = bot.get_file(file_id)\n content = bot.download_file(metadata.file_path)\n\n return upload_to_google_drive(file_id, content)\n\n\ndef download_user_photo(user_id):\n response = bot.get_user_profile_photos(user_id)\n\n if response.total_count > 0:\n return download_file_from_telegram(response.photos[0][0].file_id)\n\n return None\n\n\n@async_to_sync\nasync def notify_staff_about_new_chat(chat):\n await get_channel_layer().group_send(\n \"chat\", {\n \"type\": \"notify.staff\",\n \"ucid\": chat.ucid,\n \"first_name\": chat.first_name,\n \"last_name\": chat.last_name,\n \"username\": chat.username,\n \"user_image\": chat.user.image,\n }\n )\n\n\ndef send_message(chat_id, text, *, reply_to_message_id=None):\n bot.send_chat_action(chat_id, action='typing')\n return bot.send_message(\n chat_id,\n text=mark_safe(text),\n reply_to_message_id=reply_to_message_id,\n parse_mode=\"HTML\"\n )\n\n\ndef send_text_message_to_client(chat, text, *, reply_to_message_id=None):\n message = send_message(\n chat.id, text, reply_to_message_id=reply_to_message_id)\n\n process_message(chat, message)\n\n\n@sync_to_async\ndef async_send_text_message_to_client(chat, text, *, reply_to_message_id=None):\n send_text_message_to_client(\n chat, text, reply_to_message_id=reply_to_message_id)\n\n\n@sync_to_async\ndef async_edit_bot_message_text(chat_id, message_id, text):\n bot.edit_message_text(chat_id=chat_id, message_id=message_id, text=text)\n\n\n@sync_to_async\ndef async_edit_bot_message_caption(chat_id, message_id, caption):\n bot.edit_message_caption(\n chat_id=chat_id, message_id=message_id, caption=caption)\n\n\ndef send_photo_to_client(chat, file, *, caption=None, reply_to_message_id=None):\n bot.send_chat_action(chat.id, action='upload_photo')\n message = bot.send_photo(\n chat.id,\n file,\n caption=mark_safe(caption),\n reply_to_message_id=reply_to_message_id,\n )\n\n process_message(chat, message)\n\n\ndef send_document_to_client(chat, file, *, caption=None, reply_to_message_id=None):\n bot.send_chat_action(chat.id, action='upload_document')\n message = bot.send_document(\n chat.id,\n file,\n caption=mark_safe(caption),\n reply_to_message_id=reply_to_message_id,\n )\n\n process_message(chat, message)\n\n\ndef send_welcome_message(message):\n first_name = message.chat.first_name or \"\"\n last_name = message.chat.last_name or \"\"\n username = message.chat.username or \"\"\n\n username = message.chat.username or first_name + \" \" + last_name\n\n send_message(message.chat.id, f\"Привет, {mark_safe(username)}👋\\n\")\n\n\ndef get_or_create_telegram_user(message):\n return User.objects.get_or_create(\n id=message.from_user.id,\n is_bot=message.from_user.is_bot,\n first_name=message.from_user.first_name,\n last_name=message.from_user.last_name,\n username=message.from_user.username,\n )\n\n\ndef get_or_create_telegram_chat(telegram_user, message):\n return Chat.objects.get_or_create(\n id=message.chat.id,\n first_name=message.chat.first_name,\n last_name=message.chat.last_name,\n username=message.chat.username,\n type=message.chat.type,\n user=telegram_user,\n is_archived=False,\n )\n\n\ndef get_telegram_user(message):\n telegram_user, is_new = get_or_create_telegram_user(message)\n\n if is_new:\n file_id = download_user_photo(message.from_user.id)\n\n if file_id:\n telegram_user.image = reverse(\n \"telegram_bot:user_photo\", args=[file_id]\n )\n\n else:\n # telegram_user.image = reverse(\n # \"static\", args=[get_default_image()]\n # )\n telegram_user.image = f\"/static/{get_default_image()}\"\n\n telegram_user.save()\n\n return telegram_user\n\n\ndef get_telegram_chat(user, message):\n chat, is_new = get_or_create_telegram_chat(user, message)\n\n if is_new and not user.is_blocked:\n send_welcome_message(message)\n notify_staff_about_new_chat(chat)\n\n return chat\n\n\n@ bot.message_handler(content_types=IGNORED_CONTENT_TYPES)\ndef process_ignored_content_types(message):\n send_message(message.chat.id, BOT_PHRASES[\"available_content_types\"])\n\n\ndef process_message(chat, message):\n photo = download_file_from_telegram(\n message.photo[-1].file_id) if message.photo else None\n document = download_file_from_telegram(\n message.document.file_id) if message.document else None\n\n file_name = message.document.file_name if message.document else None\n caption = message.caption if message.caption else None\n\n staff = chat.staff if message.from_user.is_bot else None\n user = chat.user if not message.from_user.is_bot else None\n\n if message.reply_to_message:\n try:\n reply_to_message = Message.objects.get(\n id=message.reply_to_message.message_id,\n chat=chat,\n )\n except ObjectDoesNotExist:\n reply_to_message = None\n else:\n reply_to_message = None\n\n return Message.objects.create(\n id=message.message_id,\n chat=chat,\n user=user,\n staff=staff,\n reply_to_message=reply_to_message,\n text=message.text,\n photo=photo,\n document=document,\n file_name=file_name,\n caption=caption,\n date=ctime(message.date),\n )\n\n\n@ bot.message_handler(content_types=[\"text\"])\ndef process_received_text_message(message):\n user = get_telegram_user(message)\n chat = get_telegram_chat(user, message)\n\n if user.is_blocked:\n send_message(chat.id, BOT_PHRASES[\"user_blocked\"])\n else:\n process_message(chat, message)\n\n\n@ bot.message_handler(commands=[\"start\"])\ndef process_received_start_command(message):\n process_received_text_message(message)\n\n\n@ bot.message_handler(content_types=[\"photo\"])\ndef process_received_photo_message(message):\n user = get_telegram_user(message)\n chat = get_telegram_chat(user, message)\n\n if user.is_blocked:\n send_message(chat.id, BOT_PHRASES[\"user_blocked\"])\n else:\n if message.photo[-1].file_size <= TELEGRAM_BOT_FILE_SIZE_LIMIT:\n process_message(chat, message)\n else:\n send_message(chat.id, BOT_PHRASES[\"file_size_exceeded\"])\n\n\n@ bot.message_handler(content_types=[\"document\"])\ndef process_received_document_message(message):\n user = get_telegram_user(message)\n chat = get_telegram_chat(user, message)\n\n if user.is_blocked:\n send_message(chat.id, BOT_PHRASES[\"user_blocked\"])\n else:\n if message.document.file_size <= TELEGRAM_BOT_FILE_SIZE_LIMIT:\n process_message(chat, message)\n else:\n send_message(chat.id, BOT_PHRASES[\"file_size_exceeded\"])\n\n\n@bot.edited_message_handler(content_types=[\"text\", \"photo\", \"document\"])\ndef process_edited_message(message):\n try:\n edited_message = Message.objects.get(id=message.message_id)\n except Message.DoesNotExist:\n pass\n else:\n if message.content_type == \"text\":\n edited_message.edited_text = message.text\n else:\n edited_message.edited_text = message.caption\n\n edited_message.is_edited = True\n edited_message.save(update_fields=[\"is_edited\", \"edited_text\"])\n\n\ndef delete_bot_message(chat_id, message_id):\n bot.delete_message(chat_id, message_id)\n\n\ndef edit_bot_message(chat_id, message_id):\n bot.edit_message(chat_id, message_id)\n\n\ndef debug_telegram_bot():\n bot.remove_webhook()\n bot.polling()\n\n\ndef process_telegram_event(update):\n update = types.Update.de_json(update)\n bot.process_new_updates([update])\n","sub_path":"telegram_bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":9378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"292884422","text":"import logging\nimport time\nfrom threading import Thread\nfrom typing import Dict, Set, Optional\n\n# import cv2\nfrom cv2loader import cv2\nimport numpy as np\n\nfrom server.analysis.compressor import Compressor\nfrom server.analysis.roi_maker import RoiMaker\nfrom server.analysis.splines import CatmulRomCurve\nfrom server.analysis.types import Point, Size\nfrom server.server import MjpegStream\n\nlogger = logging.getLogger(__name__)\n\n\nclass ImageAnalyzer(Thread):\n _streams: Dict[str, Optional[MjpegStream]]\n _compressors: Dict[str, Compressor]\n\n def __init__(self):\n super().__init__()\n\n self.roi_size = Size(512, 256)\n self.camera_size = Size(1920 // 2, 1080 // 2)\n self.poi = [\n Point(0, 0),\n Point(self.camera_size.width - 1, 0),\n Point(self.camera_size.width - 1, self.camera_size.height - 1),\n Point(0, self.camera_size.height - 1),\n ]\n self._is_running = False\n self._camera_image = np.empty((1, 1, 3), dtype=np.uint8)\n\n self._output_images_names = {\"main\", \"roi\"}\n\n self._analysis_output_images = {\n name: np.empty((1, 1, 3), dtype=np.uint8)\n for name in self._output_images_names\n }\n self._compressors = {\n name: Compressor()\n for name in self._output_images_names\n }\n self._streams = {\n name: None\n for name in self._output_images_names\n }\n self._curves = [CatmulRomCurve.from_points([\n Point(316, 45), Point(214, 71), Point(230, 108), Point(275, 132),\n Point(334, 137), Point(397, 135), Point(486, 125), Point(625, 124),\n Point(765, 122), Point(860, 126), Point(908, 128), Point(937, 127)\n ])]\n\n self._camera_fps = 10\n rtsp_url = \"rtsp://admin:123@192.168.1.106:554/onvif1\"\n gstreamer_pipeline = f\"rtspsrc location=\\\"{rtsp_url}\\\" ! rtph264depay ! h264parse ! avdec_h264 ! \"\n gstreamer_pipeline += f\"autovideoconvert ! videorate ! video/x-raw,framerate={self._camera_fps}/1 ! appsink\"\n\n logger.info(\"Starting camera capture\")\n logger.info(f\"Using GStreamer pipeline: '{gstreamer_pipeline}'\")\n\n self._camera = cv2.VideoCapture(\n gstreamer_pipeline,\n cv2.CAP_GSTREAMER\n )\n\n @property\n def output_images_names(self) -> Set[str]:\n return self._output_images_names\n\n def assign_stream(self, name: str, stream: MjpegStream) -> None:\n self._streams[name] = stream\n\n def start(self):\n self._is_running = True\n for compressor in self._compressors.values():\n compressor.start()\n super().start()\n\n def stop(self):\n self._is_running = False\n self.join()\n for compressor in self._compressors.values():\n compressor.stop()\n\n def run(self) -> None:\n debug = False\n\n start_time = time.time()\n log_fps_every = 10 # every 100 frame\n frame_count = 0\n while self._is_running:\n result, frame = self._camera.read()\n if result:\n self._camera_image = cv2.resize(frame, self.camera_size.to_tuple())\n\n self._analyze()\n self._compress()\n self._stream()\n\n if debug:\n frame_count += 1\n if frame_count % log_fps_every == 0:\n time_delta = (time.time() - start_time)\n if time_delta == 0:\n true_fps = 0\n else:\n true_fps = log_fps_every / time_delta\n logger.info(f\"Camera FPS is {true_fps:.2f}\")\n start_time = time.time()\n else:\n logger.warning(f\"Skipped frame, result: {result}\")\n time.sleep(1 / self._camera_fps)\n\n def _analyze(self) -> None:\n self._draw_curves()\n self._analysis_output_images[\"main\"] = self._camera_image\n self._make_roi()\n\n def _make_roi(self):\n roi_maker = RoiMaker(self._camera_image, self.roi_size)\n self._analysis_output_images[\"roi\"] = roi_maker.roi(self.poi)\n\n def _draw_curves(self):\n curves_points = [curve.compute_range(0.0, 1.0, 50) for curve in self._curves]\n for curve_points in curves_points:\n polyline_points = np.array(\n [[point.x, point.y] for point in curve_points],\n dtype=np.int32\n )\n cv2.polylines(self._camera_image, [polyline_points], False, (0, 0, 255), 2)\n\n def _compress(self) -> None:\n for name, image in self._analysis_output_images.items():\n stream = self._streams.get(name)\n if stream is None or not stream.is_streaming:\n continue\n self._compressors[name].input_queue.put(image)\n\n def _stream(self) -> None:\n for name, compressor in self._compressors.items():\n stream = self._streams.get(name)\n if stream is None or not stream.is_streaming:\n continue\n stream_data = compressor.output_queue.get()\n stream.raw_image_data = stream_data\n","sub_path":"server/analysis/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":5217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"371986810","text":"# 存在一个按升序排列的链表,给你这个链表的头节点 head ,请你删除链表中所有存在数字重复情况的节点,只保留原始链表中 没有重复出现 的数字。\r\n#\r\n# 返回同样按升序排列的结果链表。\r\n#\r\n#\r\n#\r\n# 示例 1:\r\n#\r\n#\r\n# 输入:head = [1,2,3,3,4,4,5]\r\n# 输出:[1,2,5]\r\n#\r\n#\r\n# 示例 2:\r\n#\r\n#\r\n# 输入:head = [1,1,1,2,3]\r\n# 输出:[2,3]\r\n#\r\n#\r\n#\r\n#\r\n# 提示:\r\n#\r\n#\r\n# 链表中节点数目在范围 [0, 300] 内\r\n# -100 <= Node.val <= 100\r\n# 题目数据保证链表已经按升序排列\r\n#\r\n# Related Topics 链表 双指针\r\n# 👍 660 👎 0\r\n\r\n\r\n# Definition for singly-linked list.\r\nclass ListNode:\r\n def __init__(self, val=0, next=None):\r\n self.val = val\r\n self.next = next\r\n\r\n def __repr__(self):\r\n return f\"{self.val}\"\r\n\r\n# leetcode submit region begin(Prohibit modification and deletion)\r\n\r\nclass Solution:\r\n def deleteDuplicates(self, head: ListNode) -> ListNode:\r\n dummy = ListNode(0) # 建立一个哑结点\r\n dummy.next = head # 将哑结点指向头结点\r\n pre = dummy # 将慢指针指向哑结点\r\n cur = head # 将快指针指向head节点\r\n while cur and cur.next: # 遍历链表\r\n if cur.val == cur.next.val: # 若存在重复元素\r\n while cur.next and cur.val == cur.next.val: # 将重复的元素丢弃,直到遇到不重复的\r\n cur.next = cur.next.next\r\n pre.next = cur.next # 将pre指针的下一跳指向第一个不重复的元素\r\n else: # 若不存在重复元素\r\n pre = cur # 将cur节点指向dummy节点\r\n cur = cur.next # cur指针后移\r\n return dummy.next\r\n\r\n# leetcode submit region end(Prohibit modification and deletion)\r\nh = [1, 1, 1, 2, 3]\r\nhead = ListNode(h[0])\r\ncur = head\r\nfor v in h[1:]:\r\n cur.next = ListNode(v)\r\n cur = cur.next\r\n\r\nsolution = Solution()\r\nres = solution.deleteDuplicates(head)\r\nwhile res:\r\n print(res)\r\n res = res.next\r\n","sub_path":"[82]删除排序链表中的重复元素2.py","file_name":"[82]删除排序链表中的重复元素2.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"545680034","text":" # Create your views here.\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom django.template.loader import get_template\nfrom django.template import RequestContext\nfrom forms import *\nfrom releaserecord.models import *\nimport copy\nimport os\nimport shutil\nimport sys\nimport socket\nimport threading\nfrom threading import Thread\nimport datetime,calendar,time\nimport HTMLParser\nfrom collections import namedtuple\nfrom autotrend.models import *\nfrom django.db.models.lookups import *\nfrom productivity.views import autobugzilla\nfrom productivity.views import namemap;\n\n#########################################################\ndef compareVersion(version1, version2):\n \"\"\"\n :type version1: str\n :type version2: str\n :rtype: int\n \"\"\"\n if version1 == version2:\n return 0\n version1 = version1.split('.')\n version2 = version2.split('.')\n top1 = version1.pop(0)\n top2 = version2.pop(0)\n if top1 == '':\n top1 = 0\n if top2 == '':\n top2 = 0\n if int(top1) > int(top2):\n return 1\n elif int(top1) < int(top2):\n return -1\n else:\n return compareVersion('.'.join(version1), '.'.join(version2))\n\n\t\ndef database(request):\n\ttpmgmt=Testplanmgmt.objects.all().exclude(Testplan_Type=\"Automation\")\n\ttemplate = get_template('database.html')\n\tif request.method == \"POST\":\n\t\tscript = request.POST.get('Script_Name')\n\t\ttestplan = request.POST.get('Testplan_Name')\n\t\ttestplano=Testplanmgmt.objects.filter(Testplan_Name=testplan)\n\n\t\n\tvariables = RequestContext(request,{\n\t\t'tpmgmt':tpmgmt,\n\t})\n\toutput = template.render(variables)\n\treturn HttpResponse(output)\n\ndef namedtuplefetchall(cursor):\n \"Return all rows from a cursor as a namedtuple\"\n desc = cursor.description\n nt_result = namedtuple('Result', [col[0] for col in desc])\n return [nt_result(*row) for row in cursor.fetchall()]\n\t\ndef dictfetchall(cursor):\n \"Return all rows from a cursor as a dict\"\n columns = [col[0] for col in cursor.description]\n return [\n dict(zip(columns, row))\n for row in cursor.fetchall()\n ]\t\n\ndef release(request):\n\tif request.method == \"POST\":\n\t\tpairs = zip(request.POST.getlist('id'),request.POST.getlist('Item'),request.POST.getlist('Result'),request.POST.getlist('Coverage'),request.POST.getlist('Comments'),request.POST.getlist('Tag'),request.POST.getlist('ReleaseNum'),request.POST.getlist('Bug'),request.POST.getlist('Owner'))\n\t\tfor id,Item,Result,Coverage,Comments,Tag,ReleaseNum,Bug,Owner in pairs:\n\t\t\texist=Feature.objects.filter(id=id)\n\t\t\tif exist: \n\t\t\t\texist.update(Result=Result,Coverage=Coverage,Comments=Comments,Tag=Tag,ReleaseNum=ReleaseNum,Bug=Bug,Owner=Owner)\n\t\t\telse:\n\t\t\t\tdata=Feature()\n\t\t\t\tdata.Item=Item,data.ReleaseNum=ReleaseNum,data.Coverage=Coverage,data.Comments=Comments,data.Tag=Tag,data.Result=Result,data.Bug=Bug,data.Owner=Owner\n\t\t\t\tif(data.ReleaseNum == \"\" or data.Item == \"\" or data.Owner == \"\"):\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tdata.save()\n\treleasefilter=request.GET.get('release')\n\treleasefilter=\"4.2.1.0\"\n\tweek=request.GET.get('week')\n\tsmoke_list =Smoke.objects.all().order_by(\"id\")\n\ttpmgmt=Testplanmgmt.objects.all().exclude(Testplan_Type=\"Manual\")\n\tbuilddict={}\n\tdicttestplan={}\n\tdicttrend={}\n\tbuildlist=[]\n\treleaselist=[]\n\treleasemile=Releasemgmts.objects.all().order_by('Release')\n\treleasedict={}\n\tfor rm in releasemile:\n\t\treleasedict[rm.Release]=rm\n\tsortedlist=sorted(releasedict.iteritems(),cmp=compareVersion,key=lambda d:d[0],reverse = False)\t\n\tcount_smoke=0\n\tfail_smoke=0\n\tresult_smoke=\"Pass\"\n\tmaxbuild=maxrelease=releaseno=instant=aos=\"\"\n\tsmokebuild=[]\n\ttp_list=[]\n\tweek=int(time.strftime(\"%W\"))+1\n\tdate = datetime.datetime.now()\n\tweekday=int(date.isoweekday())\n\tyear =str(datetime.date.today())[0:4]\n\tlastyear=int(year)-1\n\tmonth=str(datetime.date.today())[5:7] \n\tday=str(datetime.date.today())[8:10]\n\tslist=[]\n\t\"\"\"\n\t\tsl.Build 6.4.3.1-4.2.0.1_51742\n\t\t | | |\n\t\t AOS INSTANT Build Number\n\t\"\"\"\n\tfor sl in smoke_list:\n\t\trelease,subbuild=sl.Build.split('_')\n\t\taos,instant=release.split('-')\n\t\tif instant in builddict:\n\t\t\tif not subbuild in builddict[instant]:\n\t\t\t\tbuilddict[instant].append(subbuild)\n\t\telse:\n\t\t\tbuilddict[instant]=[subbuild]\n\t\tif subbuild==\"99999\":\n\t\t\tcontinue\n\t\telif maxbuild/overview')\nclass ProjectData(Resource):\n def get(self, project_uuid, path):\n \"\"\"\n Get a aggregated overview of project data:\n \"\"\"\n q = {\n \"aggs\": {\n \"by_userid\": {\n \"terms\": {\n \"field\": \"_user\"\n },\n \"aggs\": {\n \"overall\": {\n \"avg\": {\n \"field\": \"overall\",\n \"missing\": 0\n }\n },\n }\n }\n }\n }\n try:\n scroll = \"1m\"\n response = es.search(index=\"symptoms-index\", doc_type=\"project\", body=q, scroll=scroll)\n return jsonify(response[\"aggregations\"][\"by_userid\"][\"buckets\"][0])\n except:\n return jsonify([])\n","sub_path":"apis/v1/router_project.py","file_name":"router_project.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"652880868","text":"import datetime\nimport pathlib\n\nimport botocore.exceptions\nimport more_itertools as mi\nimport collections\nimport csv\nimport typing\n\nimport pytest\nimport requests\nimport sqlalchemy\nimport sqlalchemy.orm\n\nimport opshub\nimport opshub.fileshare\nfrom opshub import models\n\n\n@pytest.fixture(name='file_share', scope='session')\ndef _file_share(app):\n \"\"\"boto s3 client object\"\"\"\n s3 = opshub.fileshare.create_client(app)\n\n try:\n res = s3.list_buckets()\n assert res['ResponseMetadata']['HTTPStatusCode'] == 200\n except botocore.exceptions.BotoCoreError:\n pytest.fail('Unable to connect to S3 service (hint: try docker-compose up)')\n return\n buckets = [r['Name'] for r in res['Buckets']]\n\n bucket_name = app.config.get('S3_BUCKET_NAME')\n\n if bucket_name not in buckets:\n s3.create_bucket(ACL='public-read-write', Bucket=bucket_name)\n return s3\n\n\n@pytest.fixture(name='bucket_dir', scope='session')\ndef _bucket_dir(file_share, app):\n \"\"\"Local directory where s3 bucket objects are stored\"\"\"\n p = pathlib.Path(__file__).parent.parent.parent / 'data' / app.config.get('S3_BUCKET_NAME')\n if not p.exists():\n pytest.fail(f'Local directory for S3 service not found: {p}')\n return p\n\n\n@pytest.fixture(name='app', scope='session')\ndef _app():\n \"\"\"Flask app instance spun up in separate process. Runs on localhost:5000\"\"\"\n import multiprocessing\n app = opshub.create_app()\n\n def run():\n app.run('0.0.0.0')\n\n d = multiprocessing.Process(target=run)\n d.start()\n\n assert requests.get('http://localhost:5000/').status_code == 200\n yield app\n d.terminate()\n\n\n@pytest.fixture(name='db_session', scope='session')\ndef _db_session(app) -> sqlalchemy.orm.Session:\n \"\"\"\n Database session to use in tests.\n\n Note, DB schema is completely dropped and re-created at the beginning of\n each test session.\n \"\"\"\n e = sqlalchemy.create_engine(app.config.get('SQLALCHEMY_DB_URL'))\n models.Base.metadata.drop_all(e)\n models.Base.metadata.create_all(e)\n\n sess = sqlalchemy.orm.Session(bind=e)\n yield sess\n sess.close()\n\n\n@pytest.fixture(name='transactions', scope='session')\ndef _transactions(db_session):\n \"\"\"Load a sample of transactions for several gumball machines\"\"\"\n\n test_data_path = pathlib.Path(__file__).parent.parent / 'fixtures/transactions.txt'\n\n db_session.query(models.GumballTransaction).delete()\n db_session.commit()\n\n for i in range(6):\n db_session.add(models.GumballMachine(serial_no=f'GM00{i + 1}'))\n db_session.commit()\n\n def _load_all(\n transactions: typing.Iterable[typing.Tuple[str, str, str]]\n ) -> typing.Iterable[models.GumballTransaction]:\n trans_ids = collections.Counter()\n for sn, td, dc in transactions:\n trans_ids[sn] += 1\n try:\n yield models.GumballTransaction(\n trans_id=trans_ids[sn],\n serial_no=sn,\n trans_date=datetime.datetime.strptime(td, '%Y-%m-%dT%H:%M:%SZ'),\n dispense_count=int(dc))\n except ValueError as ve:\n raise ve\n\n with test_data_path.open() as f:\n reader = csv.DictReader(f, ('serial_no', 'transaction_date', 'dispense_count'), delimiter='|')\n rows = sorted(reader, key=lambda r: r['transaction_date'])\n for chunk in mi.chunked(_load_all((r.values() for r in rows)), 200):\n db_session.add_all(chunk)\n db_session.commit()\n","sub_path":"tests/functional/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"336630873","text":"\"\"\"\nSimple water flow example using ANUGA\nSubcritical flow over flat surface. Should produce steady state\n\"\"\"\n\n#------------------------------------------------------------------------------\n# Import necessary modules\n#------------------------------------------------------------------------------\nimport sys\nimport anuga\nfrom anuga import Domain as Domain\nfrom anuga import myid, finalize, distribute\nfrom math import cos\nfrom numpy import zeros, ones, float\nfrom time import localtime, strftime, gmtime\n#from balanced_dev import *\n\n\n#-------------------------------------------------------------------------------\n# Copy scripts to time stamped output directory and capture screen\n# output to file\n#-------------------------------------------------------------------------------\ntime = strftime('%Y%m%d_%H%M%S',localtime())\n\n#output_dir = 'subcritical_'+time\noutput_dir = '.'\noutput_file = 'subcritical'\n\nargs = anuga.get_args()\nalg = args.alg\nverbose = args.verbose\n\n\n#------------------------------------------------------------------------------\n# Setup domain\n#------------------------------------------------------------------------------\ndx = 0.1\ndy = dx\nL = 25.\nW = 3*dx\n\nif myid == 0:\n # structured mesh\n points, vertices, boundary = anuga.rectangular_cross(int(L/dx), int(W/dy), L, W, (0.0, 0.0))\n \n #domain = anuga.Domain(points, vertices, boundary) \n domain = Domain(points, vertices, boundary) \n \n domain.set_name(output_file) \n domain.set_datadir(output_dir) \n domain.set_flow_algorithm(alg)\n \n #------------------------------------------------------------------------------\n # Setup initial conditions\n #------------------------------------------------------------------------------\n\n domain.set_quantity('elevation',0.0)\n domain.set_quantity('friction', 0.0)\n\n domain.set_quantity('stage', 2.)\n domain.set_quantity('xmomentum', 0.0)\n domain.set_quantity('ymomentum', 0.0)\nelse:\n domain = None\n \n#---------------------------\n# Create Parallel Domain\n#--------------------------- \ndomain = distribute(domain)\n\n#-----------------------------------------------------------------------------\n# Setup boundary conditions\n#------------------------------------------------------------------------------\n\nBr = anuga.Reflective_boundary(domain) # Solid reflective wall\n#Bt = anuga.Transmissive_boundary(domain) # Continue all values on boundary \nBd = anuga.Dirichlet_boundary([2., 4.42, 0.]) # Constant boundary values\n\n# Associate boundary tags with boundary objects\ndomain.set_boundary({'left': Bd, 'right': Bd, 'top': Br, 'bottom': Br})\n\n\n\n\n#------------------------------------------------------------------------------\n# Produce a documentation of parameters\n#------------------------------------------------------------------------------\nif myid == 0:\n parameter_file=open('parameters.tex', 'w')\n parameter_file.write('\\\\begin{verbatim}\\n')\n from pprint import pprint\n pprint(domain.get_algorithm_parameters(),parameter_file,indent=4)\n parameter_file.write('\\\\end{verbatim}\\n')\n parameter_file.close()\n\n#------------------------------------------------------------------------------\n# Evolve system through time\n#------------------------------------------------------------------------------\nfor t in domain.evolve(yieldstep = 1.0, finaltime = 50.):\n #print(domain.timestepping_statistics(track_speeds=True))\n if myid == 0 and verbose: print(domain.timestepping_statistics())\n\n\ndomain.sww_merge(delete_old=True)\n\n\nfinalize()\n","sub_path":"validation_tests/analytical_exact/subcritical_flat/numerical_subcritical.py","file_name":"numerical_subcritical.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"391674664","text":"\"\"\"Plotting Live Data and Analysis of Data\n\nuse with arduino code \"new3D\"\n\nfeatures\n- real-time plotting of up to 4 sensors\n- automatic fuzzy c-means clustering\n- lines between centroids\n- continuous plotting with cluster predictions\n\"\"\"\n\n\n\"\"\" A. INITIALIZATION AND SETUP \"\"\"\n\n\n\"\"\" 1. importing relevant libraries \"\"\"\nimport serial\nimport time\nimport datetime\nimport csv\nimport math\nimport os\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport skfuzzy as fuzz\n\nos.path.abspath(\"C:\\\\Users\\\\Michael\\\\Documents\\\\GitHub\\\\EMG\\\\test\\\\csvfiles\")\n\n\"\"\" 2. retrieving serial data \"\"\"\n\nser = serial.Serial(port = 'COM10', baudrate = 9600, timeout = None) #connected port\nprint (\"connected to: \" + ser.portstr)\n\n\"\"\" 3. number of sensors \"\"\"\n\nnum_sensors = 4\nnum_dim =3 \nver =0 \n\n\"\"\" 3. initializaing starting variables and functions\"\"\"\n\n\"\"\"following function converts a 3D list to a 2D one\"\"\"\ndef threeDtotwoD(array): #array = 2D matrix (3 rows, N columns (number of samples))\n new_list = [[],[]]\n for i in range(len(array[0])):\n a = float(array[0][i])\n b = float(array[1][i])\n c = float(array[2][i]) \n f = (np.sqrt(3.0)/2)*(a-b)\n g = (1/2)*(2*c-a-b)\n r = np.sqrt(a**2 + b**2 + c**2)\n s = np.sqrt(f**2 + g**2 + 1)\n j = (r/s)*f\n k = (r/s)*g\n new_list[0].append(j)\n new_list[1].append(k)\n return new_list\n \n\"\"\"following function converts a 3D list to another 3D one\"\"\"\ndef threeDconv(array): #array = 2D matrix (3 rows, N columns)\n new_list = [[],[],[]]\n for i in range(len(array[0])):\n a = float(array[0][i])\n b = float(array[1][i])\n c = float(array[2][i]) \n f = (np.sqrt(3.0)/2)*(a-b)\n g = (1/2)*(2*c-a-b)\n h = np.sqrt(a**2 + b**2 + c**2)\n s = np.sqrt(f**2 + g**2 + 1)\n x = (h/s)*f\n y = (h/s)*g\n new_list[0].append(x)\n new_list[1].append(y)\n new_list[2].append(h)\n return new_list\n\n\"\"\"following function converts a 4D list into a 3D one\"\"\"\ndef fourDtothreeD(array): #array = 2D matrix (4 rows, N columns)\n new_list = [[],[],[]]\n for i in range(len(array[0])):\n a = float(array[0][i])\n b = float(array[1][i])\n c = float(array[2][i]) \n d = float(array[3][i])\n x = a - (1/3)*(b+c+d)\n y = (np.sqrt(2)/3)*(2*b-c-d)\n z = (np.sqrt(2/3))*(c-d)\n new_list[0].append(x)\n new_list[1].append(y)\n new_list[2].append(z) \n return new_list\n \nline = []\nindex = 0\ncolors = ['g', 'm', 'b', 'r', 'c', 'y', 'k', 'Brown', 'ForestGreen']\nall_data_proj3D = [[],[],[]]\nall_data_proj2D = [[],[]]\ntime_recording = 120\n\nif num_sensors == 2:\n all_data = [[\"logrms1\",\"logrms2\"]]\n fig0 = plt.figure(0)\nelif num_sensors == 3:\n all_data = [[\"logrms1\",\"logrms2\", \"logrms3\"]]\n if num_dim == 3:\n fig0 = plt.figure(0)\n ax = plt.axes(projection = \"3d\")\n elif num_dim == 2:\n fig0 = plt.figure(0)\nelif num_sensors == 4:\n all_data = [[\"logrms1\",\"logrms2\", \"logrms3\", \"logrms4\"]]\n fig0 = plt.figure(0)\n ax = plt.axes(projection = \"3d\")\n ax.view_init(elev = 90, azim = 0)\n ax.set_zlabel(\"z\")\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n\nstart_time = time.time()\nend_time = time.time()\n\n\"\"\" B. INITIAL DATA COLLECTION AND ANALYSIS \"\"\"\n\n\"\"\" 1. data collection loop \"\"\"\n\nwhile end_time - start_time < time_recording: #time in seconds\n if index >= 20: #arbitrary choice\n ser.reset_input_buffer()\n ser.reset_output_buffer()\n for c in ser.readline():\n if not (c == 13):\n line.append(chr(c))\n elif (c == 13):\n a = (\"\".join(str(x) for x in line))\n a = a.replace(\"\\n\", \",\")\n a = a.split(\",\")\n a = ([x for x in a if x])\n if num_sensors == 2:\n if ((len(a) == 2)):\n if (len(a[0])>=4 & len(a[1])>=4):\n x = float(a[0])\n y = float(a[1])\n all_data.append([x,y])\n plt.scatter(x,y,s=10, c = \"y\")\n plt.pause(0.00001)\n elif num_sensors == 3:\n if num_dim == 3:\n if ((len(a) == 3)):\n if (len(a[0])>=4 & len(a[1])>=4 & len(a[2])>=4):\n x = float(a[0])\n y = float(a[1])\n z = float(a[2])\n all_data.append([x,y,z])\n if ver == \"norm\":\n ax.scatter(x,y,z, s=10, c = \"y\")\n plt.pause(0.00001)\n else:\n converted_xyz = threeDconv([[x],[y],[z]])\n all_data_proj3D[0].append(converted_xyz[0][0])\n all_data_proj3D[1].append(converted_xyz[1][0])\n all_data_proj3D[2].append(converted_xyz[2][0])\n j = converted_xyz[0][0]\n k = converted_xyz[1][0]\n l = converted_xyz[2][0]\n ax.scatter(j,k,l,s = 10, c = \"y\")\n plt.pause(0.00001)\n elif num_dim == 2:\n all_data_proj = [[],[]]\n if ((len(a) == 3)):\n if (len(a[0])>=4 & len(a[1])>=4 & len(a[2])>=4):\n x = float(a[0])\n y = float(a[1])\n z = float(a[2])\n all_data.append([x,y,z])\n converted_xyz = threeDtotwoD([[x],[y],[z]])\n all_data_proj2D[0].append(converted_xyz[0][0])\n all_data_proj2D[1].append(converted_xyz[1][0])\n j = converted_xyz[0][0]\n k = converted_xyz[1][0]\n plt.scatter(j,k,s=10, c = \"y\")\n plt.pause(0.00001)\n \n elif num_sensors == 4:\n if ((len(a) == 4)):\n if (len(a[0])>=4 & len(a[1])>=4 & len(a[2])>=4 & len(a[3])>=4):\n x = float(a[0])\n y = float(a[1])\n z = float(a[2])\n w = float(a[3])\n #all_data.append([x,y,z,w])\n converted_xyzw = fourDtothreeD([[x],[y],[z],[w]])\n #all_data_proj3D[0].append(converted_xyzw[0][0])\n #all_data_proj3D[1].append(converted_xyzw[1][0])\n #all_data_proj3D[2].append(converted_xyzw[2][0])\n j = converted_xyzw[0][0]\n k = converted_xyzw[1][0]\n l = converted_xyzw[2][0]\n #ax.scatter(j,k,l,s = 10, c = \"y\")\n ax.scatter(j,k,l)\n plt.pause(0.00001)\n \n \n line = []\n index += 1\n end_time = time.time()\n \n\"\"\" 2. storing data in csv file \"\"\"\n# \n# num1 = datetime.datetime.now().date() \n# num2 = datetime.datetime.now().time() \n# num = num1.isoformat() + \"...\" + num2.isoformat()\n# num = (str(num).replace(\":\",\"-\"))\n# num = (str(num).replace(\"-\",\".\"))\n# \n# with open('C:\\\\Users\\\\Michael\\\\Documents\\\\GitHub\\\\EMG\\\\test\\\\csvfiles\\\\test' + num + '.csv', 'w', newline='') as csvfile:\n# writer = csv.writer(csvfile, delimiter=' ',\n# quotechar='|', quoting=csv.QUOTE_MINIMAL)\n# for i in all_data:\n# writer.writerow(i)\n# \n# print(\"file saved as: \" + 'C:\\\\Users\\\\Michael\\\\Documents\\\\GitHub\\\\EMG\\\\test\\\\csvfiles\\\\test' + str(num) + '.csv')\n\n\"\"\" 3. running the fuzzy c-means algorithm \"\"\"\n\n# alldata = np.transpose(np.asarray(all_data[1:]))\n# \n# cntr, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans(\n# alldata, 5, 2, error=0.0005, maxiter=10000, init=None, seed=None) #second par is clusters\n# cluster_membership = np.argmax(u, axis=0)\n\n\"\"\" 4. plotting the points and centroids \"\"\"\n\n# if num_sensors == 2:\n# cntr2 = cntr\n# for j in range(5): #change depending on number clusters\n# for i in range(len(cluster_membership)):\n# if cluster_membership[i] == j:\n# plt.scatter(alldata[0][i], alldata[1][i], color = colors[j], s= 20, marker = \".\")\n# plt.scatter(cntr[j][0], cntr[j][1], color = colors[j], s= 50, marker = \"s\")\n# elif (num_sensors == 3):\n# if num_dim == 3:\n# if ver == \"norm\":\n# cntr2 = cntr\n# for j in range(5):\n# for i in range(len(cluster_membership)):\n# if cluster_membership[i] == j:\n# ax.scatter(alldata[0][i], alldata[1][i], alldata[2][i],c = colors[j], s = 50, marker = \".\")\n# ax.scatter(cntr[j][0], cntr[j][1], cntr[j][2], c = colors[j], marker = \"s\", s = 50)\n# else:\n# cntr2 = cntr = np.transpose(np.array(np.transpose(threeDconv(cntr))))\n# for j in range(5):\n# for i in range(len(cluster_membership)):\n# if cluster_membership[i] == j:\n# ax.scatter(all_data_proj3D[0][i], all_data_proj3D[1][i], all_data_proj3D[2][i],c = colors[j], s = 50, marker = \".\")\n# ax.scatter(cntr2[j][0], cntr2[j][1], cntr2[j][2], c = colors[j], marker = \"s\", s = 50) \n# elif num_dim == 2:\n# cntr2 = np.transpose(np.array(threeDtotwoD(np.transpose(cntr))))\n# for j in range(5): #change depending on number clusters\n# for i in range(len(cluster_membership)):\n# if cluster_membership[i] == j:\n# plt.scatter(all_data_proj2D[0][i], all_data_proj2D[1][i], c = colors[j], s= 20, marker = \".\")\n# plt.scatter(cntr2[j][0], cntr2[j][1], color = colors[j], s= 50, marker = \"s\")\n# \n# elif (num_sensors == 4):\n# cntr2 = np.transpose(np.array(fourDtothreeD(np.transpose(cntr))))\n# for j in range(5):\n# for i in range(len(cluster_membership)):\n# if cluster_membership[i] == j:\n# ax.scatter(all_data_proj3D[0][i], all_data_proj3D[1][i], all_data_proj3D[2][i],c = colors[j], s = 50, marker = \".\")\n# ax.scatter(cntr2[j][0], cntr2[j][1], cntr2[j][2], c = colors[j], marker = \"s\", s = 50) \n\n\"\"\" 5. adding line between centroids \"\"\"\n\n# if num_dim == 3:\n# for point in cntr2:\n# for point2 in cntr2:\n# ax.plot([point[0], point2[0]], [point[1], point2[1]], [point[2], point2[2]],\"-b\")\n# if num_dim == 2:\n# for point in cntr2:\n# for point2 in cntr2:\n# plt.plot([point[0], point2[0]], [point[1], point2[1]],\"-b\")\n \n\"\"\" 6. saving figure as a png file \"\"\"\n \n# plt.savefig('C:\\\\Users\\\\Michael\\\\Documents\\\\GitHub\\\\EMG\\\\test\\\\csvfiles\\\\fig' + num + '.png')\n# print (\"figure saved as: \"+ 'C:\\\\Users\\\\Michael\\\\Documents\\\\GitHub\\\\EMG\\\\test\\csvfiles\\\\fig' + num + '.png')\n\nplt.savefig('C:\\\\Users\\\\Michael\\\\Documents\\\\GitHub\\\\EMG\\\\test\\\\csvfiles\\\\testlive.png')\nprint (\"figure saved as: \"+ 'C:\\\\Users\\\\Michael\\\\Documents\\\\GitHub\\\\EMG\\\\test\\csvfiles\\\\testlive.png')\n\n\"\"\" 7. resetting the plot with only centroids \"\"\"\n# \n# if num_dim == 2:\n# fig1 = plt.figure(1)\n# if num_dim == 3:\n# fig1 = plt.figure(1)\n# ax = plt.axes(projection = \"3d\")\n# \n# for j in range(5): #change value to match clusters\n# if num_dim == 2:\n# plt.scatter(cntr2[j][0], cntr2[j][1], c = colors[j], marker = \"s\", s = 50)\n# elif num_dim == 3:\n# ax.scatter(cntr2[j][0], cntr2[j][1], cntr2[j][2], c = colors[j], marker = \"s\", s = 50)\n \n\n\"\"\" C. CONTINUOUS REAL-TIME TRACKING \"\"\"\n\n# while True: \n# ser.reset_input_buffer()\n# ser.reset_output_buffer()\n# for c in ser.readline():\n# if not (c == 13):\n# line.append(chr(c))\n# elif (c == 13):\n# a = (\"\".join(str(x) for x in line))\n# a = a.replace(\"\\n\", \",\")\n# a = a.split(\",\")\n# a = ([x for x in a if x])\n# \n# if num_sensors == 2:\n# if ((len(a) == 2)):\n# if (len(a[0])>=4 & len(a[1])>=4):\n# x = float(a[0])\n# y = float(a[1])\n# \n# a_array = np.asarray([[x], [y]])\n# v = fuzz.cluster.cmeans_predict(a_array, cntr, 2, error = 0.0005, maxiter = 10000)\n# cluster_num = int(np.argmax(v[0], axis = 0))\n# #print(a)\n# plt.scatter(x,y, s=40, color = colors[cluster_num])\n# plt.pause(0.000001)\n# plt.clf()\n# for j in range(5): #change value to match clusters\n# plt.plot(cntr[j][0], cntr[j][1], colors[j]+\"s\")\n# elif num_sensors == 3:\n# if num_dim == 3:\n# if ((len(a) == 3)):\n# if (len(a[0])>=4 & len(a[1])>=4 & len(a[2])>=4):\n# x = float(a[0])\n# y = float(a[1])\n# z = float(a[2])\n# a_array = np.asarray([[x], [y], [z]])\n# v = fuzz.cluster.cmeans_predict(a_array, cntr, 2, error = 0.0005, maxiter = 10000)\n# cluster_num = int(np.argmax(v[0], axis = 0))\n# #print(a)\n# if ver == \"norm\":\n# ax.scatter(x,y,z, s=40, color = colors[cluster_num])\n# plt.pause(0.00001)\n# plt.clf()\n# for j in range(5): #change value to match clusters\n# ax.scatter(cntr[j][0], cntr[j][1], cntr[j][2], c = colors[j], marker = \"s\", s = 50)\n# else:\n# converted_xyz = fourDtothreeD([[x],[y],[z]])\n# j = converted_xyz[0][0]\n# k = converted_xyz[1][0]\n# l = converted_xyz[2][0]\n# ax.scatter(j,k,l,s = 10, c = colors[cluster_num])\n# plt.pause(0.00001)\n# plt.clf()\n# for j in range(5): #change value to match clusters\n# ax.scatter(cntr2[j][0], cntr2[j][1], cntr2[j][2], c = colors[j], marker = \"s\", s = 50)\n# elif num_dim == 2:\n# if ((len(a) == 3)):\n# if (len(a[0])>=4 & len(a[1])>=4 & len(a[2])>=4):\n# x = float(a[0])\n# y = float(a[1])\n# z = float(a[2])\n# a_array = np.asarray([[x], [y], [z]])\n# v = fuzz.cluster.cmeans_predict(a_array, cntr, 2, error = 0.0005, maxiter = 10000)\n# cluster_num = int(np.argmax(v[0], axis = 0))\n# converted_xyz = threeDtotwoD([[x],[y],[z]])\n# j = converted_xyz[0][0]\n# k = converted_xyz[1][0]\n# plt.scatter(j,k,s=10, c = colors[cluster_num])\n# plt.pause(0.00001)\n# plt.clf()\n# for j in range(5): #change value to match clusters\n# plt.plot(cntr2[j][0], cntr2[j][1], colors[j]+\"s\")\n# \n# \n# elif num_sensors == 4:\n# if ((len(a) == 4)):\n# if (len(a[0])>=4 & len(a[1])>=4 & len(a[2])>=4 & len(a[3])>=4):\n# x = float(a[0])\n# y = float(a[1])\n# z = float(a[2])\n# w = float(a[3])\n# a_array = np.asarray([[x], [y], [z],[w]])\n# v = fuzz.cluster.cmeans_predict(a_array, cntr, 2, error = 0.0005, maxiter = 10000)\n# cluster_num = int(np.argmax(v[0], axis = 0))\n# converted_xyzw = fourDtothreeD([[x],[y],[z],[w]])\n# j = converted_xyzw[0][0]\n# k = converted_xyzw[1][0]\n# l = converted_xyzw[2][0]\n# ax.scatter(j,k,l,s = 10, c = colors[cluster_num])\n# plt.pause(0.00001)\n# plt.clf()\n# #for j in range(5): #change value to match clusters\n# # ax.scatter(cntr2[j][0], cntr2[j][1], cntr2[j][2], c = colors[j], marker = \"s\", s = 50)\n# \n# \n# line = []\n\n","sub_path":"test/livedata/livedata4sensors.py","file_name":"livedata4sensors.py","file_ext":"py","file_size_in_byte":17163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"358973057","text":"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\n########### Loading Libraries ###############################################\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport scipy.stats as stats\r\nfrom datetime import datetime as date\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\nfrom geopy.distance import geodesic\r\nfrom geopy.distance import great_circle\r\nfrom scipy.stats import chi2_contingency\r\nimport statsmodels.api as sm\r\nfrom statsmodels.formula.api import ols\r\nfrom patsy import dmatrices\r\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn import metrics\r\nfrom sklearn.linear_model import LinearRegression as lm \r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.model_selection import RandomizedSearchCV\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nfrom xgboost import XGBRegressor\r\nimport xgboost as xgb\r\nfrom sklearn.externals import joblib\r\n\r\n############## Set Working Directory #####################################\r\n\r\nos.chdir('C:\\\\Users\\\\kyvenkat\\\\Desktop\\\\W0376 Backup\\\\Documents Backup\\\\Python Files')\r\nos.getcwd()\r\n\r\n################## Importing Data #######################################\r\ncabfare = pd.read_csv(\"C:\\\\Users\\\\kyvenkat\\\\Desktop\\\\W0376 Backup\\\\Datasets\\\\cabfare.csv\")\r\ncabfare.head(5)\r\ncabfare.dtypes\r\n\r\n########### Converting Required Datatypes #############################\r\ncabfare['fare_amount'] = pd.to_numeric(cabfare['fare_amount'],errors = 'coerce')\r\ncabfare['passenger_count'] = pd.to_numeric(cabfare['passenger_count'],errors = 'coerce') \r\ncabfare['pickup_datetime']=pd.to_datetime(cabfare['pickup_datetime'],errors = 'coerce')\r\ncabfare[\"pickup_datetime\"] = pd.to_datetime(cabfare[\"pickup_datetime\"],format= \"%Y-%m-%d %H:%M:%S UTC\")\r\n\r\n############## Removing General Outliers ####################################\r\ncabfare = cabfare.drop(cabfare[cabfare[\"fare_amount\"]<=0].index, axis=0)\r\ncabfare = cabfare.drop(cabfare[cabfare[\"passenger_count\"]<1].index, axis=0)\r\ncabfare = cabfare.drop(cabfare[cabfare[\"passenger_count\"]>6].index, axis=0)\r\ncabfare = cabfare.drop(cabfare[cabfare[\"pickup_longitude\"]==0].index, axis=0)\r\ncabfare = cabfare.drop(cabfare[cabfare[\"pickup_latitude\"]==0].index, axis=0)\r\ncabfare = cabfare.drop(cabfare[cabfare[\"dropoff_longitude\"]==0].index, axis=0)\r\ncabfare = cabfare.drop(cabfare[cabfare[\"dropoff_latitude\"]==0].index, axis=0)\r\ncabfare = cabfare.drop(cabfare[cabfare[\"pickup_longitude\"]>180].index, axis=0)\r\ncabfare = cabfare.drop(cabfare[cabfare[\"pickup_longitude\"]< -180].index, axis=0)\r\ncabfare = cabfare.drop(cabfare[cabfare[\"dropoff_longitude\"]>180].index, axis=0)\r\ncabfare = cabfare.drop(cabfare[cabfare[\"dropoff_longitude\"]< -180].index, axis=0)\r\ncabfare = cabfare.drop(cabfare[cabfare[\"pickup_latitude\"]>90].index, axis=0)\r\ncabfare = cabfare.drop(cabfare[cabfare[\"pickup_latitude\"]< -90].index, axis=0)\r\ncabfare = cabfare.drop(cabfare[cabfare[\"dropoff_latitude\"]>90].index, axis=0)\r\ncabfare = cabfare.drop(cabfare[cabfare[\"dropoff_latitude\"]< -90].index, axis=0)\r\n\r\n############ Missing Values Analysis #########################################\r\nmissing_val = pd.DataFrame(cabfare.isnull().sum())\r\ncabfare[cabfare['pickup_datetime'].isnull()].index\r\nmissing_val = missing_val.rename(columns = {'index':'variables',0:'Missing Values'})\r\nmissing_val_percentage = (missing_val['Missing Values']/len(cabfare))*100\r\nmissing_val.insert(1,\"Percentage\",missing_val_percentage)\r\nmissing_val =missing_val.sort_values('Missing Values', ascending = False)\r\n\r\n############### Missing Value Analysis ###############################################\r\n# 1. For fare_amount\r\n# Actual Value = 7\r\n# Mean Value = 15.1164\r\n# Median Value = 8.5\r\n\r\n# Choosing random value \r\n#cabfare['fare_amount'].loc[1000]\r\n#Replaing selected value with NA and Impute with mean\r\n#cabfare['fare_amount'].loc[1000] = np.nan \r\n#cabfare['fare_amount'].fillna(cabfare['fare_amount'].mean()).loc[1000]\r\n#cabfare['fare_amount'].fillna(cabfare['fare_amount'].mean(), inplace=True)\r\n\r\n#Replacing select random value with NA and Impute with median\r\n#cabfare['fare_amount'].loc[1000] = np.nan \r\n#cabfare['fare_amount'].fillna(cabfare['fare_amount'].median()).loc[1000]\r\n\r\ncabfare['fare_amount'].fillna(cabfare['fare_amount'].median(), inplace=True)\r\n\r\n# 2. For passenger_count\r\n# Actual Value = 1\r\n# Mode Value = 1\r\n# Median Value = 1\r\n# Choosing random value \r\n\r\n#cabfare['passenger_count'].loc[1000]\r\n#Replaing selected value with NA and Impute with mean\r\n#cabfare['passenger_count'].loc[1000] = np.nan \r\n#cabfare['passenger_count'].fillna(cabfare['passenger_count'].mode()[0]).loc[1000]\r\n\r\n#cabfare['passenger_count'].fillna(cabfare['passenger_count'].mode()[0], inplace=True)\r\n\r\n#Replacing select random value with NA and Impute with median\r\n#cabfare['passenger_count'].loc[1000] = np.nan \r\n#cabfare['passenger_count'].fillna(cabfare['passenger_count'].median()).loc[1000]\r\n\r\ncabfare['passenger_count'].fillna(cabfare['passenger_count'].median(),inplace=True)\r\n\r\n####### Removing date_time index which is not in the the standard date& time format\r\ncabfare = cabfare.drop(cabfare[cabfare['pickup_datetime'].isnull()].index, axis=0)\r\n\r\n################## Reset Index Number ##############################################\r\ncabfare = cabfare.reset_index(drop=True)\r\n\r\n############## Converting Passenger_count as factors ########################\r\n\r\ncabfare['passenger_count']=cabfare['passenger_count'].round().astype('object').astype('category')\r\n\r\n####### Adding more variables by splitting datetime variable ################\r\ncabfare['pick_year'] = cabfare['pickup_datetime'].apply(lambda row: row.year) \r\ncabfare['pick_month']= cabfare['pickup_datetime'].apply(lambda row: row.month) \r\ncabfare['pickup_hours']= cabfare['pickup_datetime'].apply(lambda row: row.hour) \r\ncabfare['pickup_day']= cabfare['pickup_datetime'].apply(lambda row: row.dayofweek)\r\n\r\n########## Converting longitude & latitude into distance #################################\r\n\r\ncabfare['distance']=cabfare.apply(lambda x: great_circle((x['pickup_latitude'],x['pickup_longitude']), (x['dropoff_latitude'], x['dropoff_longitude'])).m, axis=1)\r\n\r\n############## Remove where distance is zero #######################\r\ncabfare = cabfare.drop(cabfare[cabfare['distance']==0].index, axis=0)\r\n\r\n########## Dropping unwanted variables ##########################################\r\n\r\ncabfare = cabfare.drop(['pickup_datetime','pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude'], axis = 1)\r\n\r\n################### Converting year,month, day & hour into factors and levels###################\r\n# 1. defining internal function and apply where ever it is required\r\n\r\ndef time(x):\r\n ''' for sessions in a day using hour column '''\r\n if (x >=7) and (x <= 12):\r\n return 'Morning'\r\n elif (x >=19) and (x <=24 ):\r\n return 'Late Night'\r\n elif (x >= 12) and (x <= 18):\r\n return'After Noon'\r\n elif (x >=0) and (x <= 6) :\r\n return 'Early Morning'\r\n \r\ncabfare['pickup_hours'] = cabfare['pickup_hours'].apply(time)\r\ncabfare['pickup_hours'] = cabfare['pickup_hours'].astype('category')\r\ncabfare[\"pickup_hours\"] = cabfare[\"pickup_hours\"].cat.codes\r\n#################### Separating numeric & categorical variabless ############################################\r\n\r\ncabtrain_cat_var = ['passenger_count','pickup_day','pick_year','pick_month','pickup_hours']\r\n\r\ncabtrain_num_var = ['fare_amount','distance']\r\n\r\ncabfare[cabtrain_cat_var]=cabfare[cabtrain_cat_var].apply(lambda x: x.astype('category') )\r\n\r\ncabfare['pickup_day'] = cabfare['pickup_day'].cat.codes\r\ncabfare['pick_year'] = cabfare['pick_year'].cat.codes\r\ncabfare['pick_month'] = cabfare['pick_month'].cat.codes\r\n\r\n#################### Outlier Analysis ##################################################\r\n\r\n \r\ndef outlier_calculation(x):\r\n ''' calculating outlier index and replacing them with NA '''\r\n #Extract quartiles\r\n q75, q25 = np.percentile(cabfare[x], [75 ,25])\r\n print(q75,q25)\r\n #Calculate IQR\r\n iqr = q75 - q25\r\n #Calculate inner and outer fence\r\n minimum = q25 - (iqr*1.5)\r\n maximum = q75 + (iqr*1.5)\r\n print(minimum,maximum)\r\n #Replace with NA\r\n cabfare.loc[cabfare[x] < minimum,x] = np.nan\r\n cabfare.loc[cabfare[x] > maximum,x] = np.nan \r\n\r\n# Finding the outliers for fare_amount & distance\r\noutlier_calculation('fare_amount')\r\noutlier_calculation('distance')\r\n\r\n# Imputing with missing values using median\r\ncabfare['fare_amount'].fillna(cabfare['fare_amount'].median(), inplace=True) \r\ncabfare['distance'].fillna(cabfare['distance'].median(), inplace=True) \r\n \r\n# Again converting to categorical variables \r\ncabfare[cabtrain_cat_var]=cabfare[cabtrain_cat_var].apply(lambda x: x.astype('category') )\r\n\r\n############# Feature Selection ################################\r\n\r\n# Correlation analysis for numeric variables\r\nsns.heatmap(cabfare[cabtrain_num_var].corr(), square=True, cmap='RdYlGn',linewidths=0.5,linecolor='w',annot=True)\r\nplt.title('Correlation matrix ')\r\n \r\n# ANOVA for Categorical Variables and find significant variables \r\nmodel = ols('fare_amount ~ C(passenger_count)+C(pick_year)+C(pick_month)+C(pickup_hours)+C(pickup_day)',data=cabfare).fit() \r\nanova_table = sm.stats.anova_lm(model) \r\nanova_table\r\n\r\n############# Dropping Insignificant Variable ##############\r\ncabfare = cabfare.drop(['pickup_day'], axis = 1)\r\n\r\n#################### Normalization ########################################\r\nsns.distplot(cabfare['distance'],bins=100)\r\n\r\ncabfare['distance'] = (cabfare['distance'] - min(cabfare['distance']))/(max(cabfare['distance']) - min(cabfare['distance']))\r\n\r\n################ Setting train & test data #################################\r\n \r\nx = cabfare.drop('fare_amount',axis=1).values\r\ny = cabfare['fare_amount'].values\r\nx_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2)\r\n\r\nprint(cabfare.shape, x_train.shape, x_test.shape,y_train.shape,y_test.shape)\r\n\r\n###################### Multiple Linear Regression ###################################\r\n\r\nlm_model=lm().fit(x_train,y_train)\r\npredictions=lm_model.predict(x_test)\r\n\r\nexpected_values = x_test\r\npredicted_values = predictions\r\n\r\nrms = np.sqrt(mean_squared_error(y_test, predictions))\r\n## rms value =2.62\r\n\r\n####################### Decision Tree ###########################################\r\ntree = DecisionTreeRegressor()\r\ntree.fit(x_train,y_train)\r\npredictions = tree.predict(x_test)\r\n\r\nrms = np.sqrt(mean_squared_error(y_test, predictions))\r\n\r\n#rms value = 3.49\r\n\r\n##################### Random Forest ###########################################\r\n\r\nforest = RandomForestRegressor()\r\nforest.fit(x_train,y_train)\r\npredictions = forest.predict(x_test)\r\n\r\nrms = np.sqrt(mean_squared_error(y_test, predictions))\r\n\r\n# rms value =2.707\r\n\r\n############### XGBoost Method ##############################################\r\nXgb = XGBRegressor()\r\nXgb.fit(x_train,y_train)\r\npredictions = Xgb.predict(x_test)\r\n\r\nrms = np.sqrt(mean_squared_error(y_test, predictions))\r\n\r\n# rms value = 2.473\r\n\r\n############## Loading Test file ##############################################\r\n\r\ncabfare_test = pd.read_csv(\"C:\\\\Users\\\\Venkatesh K\\\\Downloads\\\\test\\\\test.csv\")\r\n\r\n\r\ncabfare_test['passenger_count'] = pd.to_numeric(cabfare_test['passenger_count'],errors = 'coerce') \r\ncabfare_test['pickup_datetime']=pd.to_datetime(cabfare_test['pickup_datetime'],errors = 'coerce')\r\ncabfare_test[\"pickup_datetime\"] = pd.to_datetime(cabfare_test[\"pickup_datetime\"],format= \"%Y-%m-%d %H:%M:%S UTC\")\r\n\r\ncabfare_test['distance']=cabfare_test.apply(lambda x: great_circle((x['pickup_latitude'],x['pickup_longitude']), (x['dropoff_latitude'], x['dropoff_longitude'])).m, axis=1)\r\n\r\ncabfare_test['pick_year'] = cabfare_test['pickup_datetime'].apply(lambda row: row.year) \r\ncabfare_test['pick_month']= cabfare_test['pickup_datetime'].apply(lambda row: row.month) \r\ncabfare_test['pickup_hours']= cabfare_test['pickup_datetime'].apply(lambda row: row.hour) \r\ncabfare_test['pickup_day']= cabfare_test['pickup_datetime'].apply(lambda row: row.dayofweek)\r\n\r\ncabfare_test = cabfare_test.drop(['pickup_datetime','pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude'], axis = 1)\r\n\r\ncabtest_cat_var = ['pick_year','pick_month','pickup_hours','pickup_day']\r\ncabtest_num_var = ['distance']\r\n\r\ncabfare_test['pickup_hours'] = cabfare_test['pickup_hours'].apply(time)\r\ncabfare_test['pickup_hours'] = cabfare_test['pickup_hours'].astype('category')\r\ncabfare_test[\"pickup_hours\"] = cabfare_test[\"pickup_hours\"].cat.codes\r\n#################### Separating numeric & categorical variabless ############################################\r\n\r\ncabfare_test[cabtest_cat_var]=cabfare_test[cabtest_cat_var].apply(lambda x: x.astype('category') )\r\n\r\ncabfare_test['pickup_day'] = cabfare_test['pickup_day'].cat.codes\r\ncabfare_test['pick_year'] = cabfare_test['pick_year'].cat.codes\r\ncabfare_test['pick_month'] = cabfare_test['pick_month'].cat.codes\r\n\r\ncabfare_test[cabtest_cat_var]=cabfare_test[cabtest_cat_var].apply(lambda x: x.astype('category') )\r\n\r\n\r\ncabfare_test = cabfare_test.drop(['pickup_day'],axis=1)\r\n\r\n\r\n\r\n################ Finalize Model #################################\r\n \r\n\r\nXgb = XGBRegressor()\r\nXgb.fit(x,y)\r\npredictions = Xgb.predict(cabfare_test.values)\r\n\r\n\r\na=pd.read_csv(\"C:\\\\Users\\\\kyvenkat\\\\Desktop\\\\W0376 Backup\\\\Datasets\\\\cabfare_test.csv\")\r\ncabfare_test_pickup_datetime=a['pickup_datetime']\r\n\r\npred_results_wrt_date = pd.DataFrame({\"pickup_datetime\":cabfare_test_pickup_datetime,\"fare_amount\" : predictions})\r\n\r\njoblib.dump(Xgb, 'cab_fare_xgboost_model.pkl') \r\n\r\n\r\npred_results_wrt_date.to_csv(r'C:\\Users\\kyvenkat\\Desktop\\W0376 Backup\\Documents Backup\\pred_results_wrt_date_python.csv')\r\n\r\n","sub_path":"Cabfare_Prediction_Final_Python.py","file_name":"Cabfare_Prediction_Final_Python.py","file_ext":"py","file_size_in_byte":14045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"342471829","text":"import sys\nimport copy\nfrom itertools import permutations\n\nclass Opcode():\n def __init__(self, full_opcode):\n self.full_opcode = \"{:05d}\".format(int(full_opcode))\n self.op_code = int(self.full_opcode[-2:])\n self.modes = list(map(int, self.full_opcode[:3]))\n\n def getOC(self):\n return self.op_code\n \n def getModes(self):\n return self.modes\n\nclass intMachine():\n def __init__(self, program, stdin=[]):\n self.program = copy.copy(program) + [0 for i in range(9000000)]\n self.p = 0\n self.ter = set([1,2,7,8])\n self.bin = set([5,6])\n self.stdin = copy.copy(stdin)\n self.p_in = 0\n self.stdout = []\n self.halt = False\n self.WAIT = -1\n self.offset = 0\n\n def isHalted(self):\n return self.halt\n\n def run(self):\n while True:\n foc = self.readOC()\n if foc.getOC() == 99:\n self.halt = True\n self.p -= 2\n return self.stdout\n\n a,b,c = foc.getModes()\n\n if foc.getOC() in self.ter:\n op1 = self.readNext(c)\n op2 = self.readNext(b)\n if(a == 0): a+=1\n op3 = self.readNext(a, w=1)\n self.writeTo(op3, self.calc2(foc.getOC(), op1, op2))\n elif foc.getOC() in self.bin:\n op1 = self.readNext(c)\n op2 = self.readNext(b)\n self.p = self.calc(foc.getOC(), op1, op2)\n else:\n if(self.io(foc.getOC(),c) == self.WAIT):\n return self.stdout\n \n def appendStdin(self, data):\n self.stdin += [data]\n \n def re_run(self):\n #self.p -= 2\n return self.run()\n\n def readOC(self):\n raw = self.readNext(1)\n return Opcode(raw)\n \n def readNext(self, mode=1, w=0):\n r = self.program[self.p]\n self.p += 1\n if mode == 0 and w==1: return r\n if mode == 0: r = self.program[r]\n elif mode == 2:\n r = self.offset+int(r)\n if w==0: r = self.program[r]\n if int(r) == 2343:\n a=1\n return int(r)\n \n def calc(self, mode, a, p):\n if (mode == 5): return p if a!=0 else self.p\n elif(mode == 6): return p if a==0 else self.p\n else: \n print(\"Algo ha ido mal\", file = sys.stderr)\n \n\n\n def calc2(self, mode, a, b):\n if(mode == 1): return a+b\n elif(mode == 2): return a*b\n elif(mode == 7): return 1 if a 1:\n msg_suffix = '{0} policies'.format(len(self.active_policies))\n utils.log(\"Initialized policy engine with {0}.\".format(msg_suffix))\n else:\n utils.log(\"No active policies found.\")\n\n def run_policy_enforcement(self, name, version, dependencies, api_list, owner):\n if self.active_policies:\n immutable_api_list = []\n for api in api_list:\n immutable_api_list.append(API(api['name'], api['version']))\n app = Application(name, version, dependencies, immutable_api_list, owner)\n errors = []\n for policy in self.active_policies:\n policy.evaluate(app, errors)\n if errors:\n return False, '|'.join(errors)\n return True, None\n\n def add_policy(self, name, content):\n regex = re.compile(\"^[a-zA-Z0-9_]+$\")\n if not regex.match(name):\n return False, 'Invalid policy name: Only letters, digits and underscores are allowed'\n file_path = os.path.join(self.policy_store_dir, name + '.py')\n if os.path.exists(file_path):\n return False, 'Policy {0} already exists'.format(name)\n file_handle = open(file_path, 'w')\n file_handle.write(content)\n file_handle.flush()\n file_handle.close()\n try:\n new_policy = Policy(file_path)\n self.active_policies.append(new_policy)\n return True, None\n except Exception as ex:\n os.remove(file_path)\n return False, 'Error while parsing policy: {0}'.format(ex.message)\n\n def remove_policy(self, name):\n file_path = os.path.join(self.policy_store_dir, name + '.py')\n if os.path.exists(file_path):\n os.remove(file_path)\n for p in self.active_policies:\n if p.name == name:\n self.active_policies.remove(p)\n break\n return True\n else:\n return False\n","sub_path":"Eager/policy/policy_engine.py","file_name":"policy_engine.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"478955345","text":"import numpy as np\nimport os\nimport cv2\nimport datetime\n\ncap = cv2.VideoCapture(0)\nwhile(True):\n ret, frame = cap.read()\n ir_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n assert ir_image.shape == (512, 640), ir_image.shape\n cv2.imshow('ir_image',ir_image)\n\n now = datetime.datetime.now()\n print('now:%02d:%02d:%02d'%(now.hour, now.minute, now.second))\n if now.second == 0 or now.second ==1:\n fn = 'ir_record/ir_%02d_%02d.png'%(now.hour, now.minute)\n if not os.path.exists(fn):\n print(fn)\n cv2.imwrite(fn, ir_image)\n\n key = cv2.waitKey(1000)\n if key == 27:\n break\n\ncap.release()\ncv2.destroyAllWindows()","sub_path":"FLIR/boson.py","file_name":"boson.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"442089720","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#------------------------------------------------------\n# @ File : cfgs.py\n# @ Description: \n# @ Author : Alex Chung\n# @ Contact : yonganzhong@outlook.com\n# @ License : Copyright (c) 2017-2018\n# @ Time : 2020/9/29 下午1:44\n# @ Software : PyCharm\n#-------------------------------------------------------\n\n\nfrom __future__ import division, print_function, absolute_import\nimport os\nimport tensorflow as tf\n\n# ------------------------------------------------\nVERSION = 'Image_Caption_20200921'\nNET_NAME = 'image_caption'\n\n\n#------------------------------GPU config----------------------------------\n# device_name = tf.test.gpu_device_name()\n# if device_name != '/device:GPU:0':\n# raise SystemError('GPU device not found')\n# print('Found GPU at: {}'.format(device_name))\n# print(tf.test.is_gpu_available())\n# ------------get gpu and cpu list------------------\ngpus = tf.config.experimental.list_physical_devices(device_type='GPU')\n# cpus = tf.config.experimental.list_physical_devices(device_type='CPU')\n# print(gpus)\n# print(cpus)\n\n# ------------------set visible of current program-------------------\n# method 1 Terminal input\n# $ export CUDA_VISIBLE_DEVICES = 2, 3\n# method 1\n# os.environ['CUDA_VISIBLE_DEVICES'] = \"2,3\"\n# method 2\ntf.config.experimental.set_visible_devices(devices=gpus[0], device_type='GPU')\n# ----------------------set gpu memory allocation-------------------------\n# method 1: set memory size dynamic growth\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n# method 2: set allocate static memory size\n# tf.config.experimental.set_virtual_device_configuration(\n# device=gpus[0],\n# logical_devices = [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=2048)]\n# )\n\n# ---------------------------------------- System_config----------------------------\nROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nprint (20*\"++--\")\nprint (ROOT_PATH)\nGPU_GROUP = \"4\"\nSHOW_TRAIN_INFO_INTE = 100\nSMRY_ITER = 100\nSAVE_WEIGHTS_ITER = 5\n\nSUMMARY_PATH = ROOT_PATH + '/outputs/summary'\nINFERENCE_SAVE_PATH = ROOT_PATH + '/outputs/inference_results'\nTEST_SAVE_PATH = ROOT_PATH + '/outputs/test_results'\nINFERENCE_IMAGE_PATH = ROOT_PATH + '/outputs/inference_image'\n# INFERENCE_SAVE_PATH = ROOT_PATH + '/tools/inference_results'\n# IMAGE_FEATURE_PATH = ROOT_PATH + '/data/image_feature'\nIMAGE_SAVE_PATH = ROOT_PATH + '/outputs/generate_image'\n\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'outputs/trained_weights')\nEVALUATE_DIR = ROOT_PATH + '/outputs/evaluate_result'\n\nWORD_INDEX = ROOT_PATH + '/outputs/word_index.pickle'\nSEQ_MAX_LENGTH = ROOT_PATH + '/outputs/seq_max_length.pickle'\n\n#----------------------Data---------------------\nDATASET_PATH = '/media/alex/AC6A2BDB6A2BA0D6/alex_dataset/COCO_2017'\n\n#------------------------network config--------------------------------\nZ_DIM = 100\nBATCH_SIZE = 128\n# SEQUENCE_LENGTH = 100 # the number in singe time dimension of a single sequence of input data\n\n\n# NUM_UNITS = [128, 64, 32]\n#-------------------------train config-------------------------------\nEMBEDDING_TRANSFER = False\nGENERATOR_LEARNING_RATE = 1e-4\nDISCRIMINATOR_LEARNING_RATE = 4e-4\nNUM_EPOCH = 50\nDROPOUT_RATE = 0.3\n\n# data\nSPLIT_RATIO = 0.2","sub_path":"libs/configs/cfgs.py","file_name":"cfgs.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"104700521","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('lessons', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Course',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=256)),\n ('slug', models.SlugField(unique=True, max_length=128)),\n ('access_code', models.CharField(max_length=8, null=True, blank=True)),\n ],\n options={\n 'permissions': (('access', 'Access'), ('instructor', 'Instructor'), ('manage', 'Manager')),\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='CourseLesson',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('course', models.ForeignKey(related_name='crs_lessons', to='courses.Course')),\n ('lesson', models.ForeignKey(related_name='crs_courses', to='lessons.Lesson')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='CoursePermission',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('role', models.CharField(default=b'student', max_length=48, choices=[(b'manager', b'Manager'), (b'instructor', b'Instructor'), (b'student', b'Student')])),\n ('course', models.ForeignKey(related_name='course_permissions', to='courses.Course')),\n ('user', models.ForeignKey(related_name='user_course_permissions', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"courses/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"414793965","text":"#ErlenmeyerFlasks-WasilewiczD-MingM\n#SoftDev1 pd6\n#K #10: Jinja Tuning\n#2018-09-23\n#imports Flask and the ability to render templates\nfrom flask import Flask, render_template\n#for reading files\nimport csv, random\nfrom csv import reader\n#creates instance of Flask\napp = Flask(__name__)\n\n\n#creates dictionary (empty)\nOCCLIST = {}\n#creates dictionary with only occupation name\nrandlist = {}\n#takes in file, returns random occupation with weighted probability\ndef randomOcc(filename):\n #opens file and reads it\n try:\n file = open(filename, \"r\")\n #deals with file not found\n except:\n print(\"file not found\")\n return 0\n #reads file\n red = file.read()\n #split by lines, excluding title and empty line at bottom\n lines = red.split(\"\\n\")[1:-2]\n read = csv.reader(lines)\n #iterates, adds key and weight to OCCLIST\n for r in read:\n #randlist takes only percentage, to be used in randomNum\n temp = float(r[1]) * 10\n randlist[r[0]] = int(temp)\n #takes occupation percentage and helpful link\n value = [float(r[1]), r[2]]\n OCCLIST[r[0]] = value\n #to be polite\n file.close()\n return OCCLIST\n\ndef randomNum(lists):\n #random number in range 0-998, because values are multiplied by 10\n randy = random.randint(0, 998)\n #counter keeps track of what percentage we're up to\n counter = 0\n #returns key based with weighted probability\n for key in lists.items():\n #compares random number to % we're up to;\n #if current percentage is greater than randy, return key\n counter+= float(key[1])\n if counter >= randy:\n return key[0]\n #if not, add counter to current percentage\n","sub_path":"10_occupy_flask_st/util/ErlenmeyerFlasks.py","file_name":"ErlenmeyerFlasks.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"594788246","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 7 13:56:52 2019\n\n@author: cbegeman\n\"\"\"\n\nimport sys\nsys.path.append('/home/ac.cbegeman/e3sm-cryo-analysis-scripts/great_circle_calculator')\n#sys.path.append('/global/homes/c/cbegeman/e3sm-cryo-analysis-scripts/great_circle_calculator')\nimport os\nimport csv\nimport netCDF4\nimport cartopy\nimport pyproj\n#import LatLon\nimport great_circle_calculator.great_circle_calculator as great_circle\n#import datetime\nimport numpy as np\nimport matplotlib as pltlib\nfrom matplotlib import ticker,rc\nimport matplotlib.pyplot as plt\nimport scipy.signal\nimport scipy.interpolate as interp\nimport matplotlib.cm as cmx\nimport matplotlib.colors as colors\nfrom matplotlib.colors import LogNorm,Normalize\nfrom matplotlib.colors import SymLogNorm\nimport cmocean\nfrom math import pi,sqrt,nan,atan,cos,floor,ceil\nimport pandas\nfrom extract_depths import zmidfrommesh\nfrom plot_config import *\nfrom region_config import *\n\ndef pick_point(lat=-9999,lon=-9999,\n run='ISMF',placename = '',\n vartype='velocity',transect_name='',plot_region = 'frisEAcoast',\n plot_map=False, overwrite=False, savepath=savepath):\n \n fmesh = netCDF4.Dataset(meshpath[runname.index(run)])\n \n if lat == -9999:\n lat = region_coordbounds[region_name.index(placename)][1,1]\n lon = region_coordbounds[region_name.index(placename)][0,0]\n \n # import variables from file\n latCell = fmesh.variables['latCell'][:]\n lonCell = fmesh.variables['lonCell'][:]\n xCell = fmesh.variables['xCell'][:]\n yCell = fmesh.variables['yCell'][:]\n if vartype== 'velocity':\n latpt = fmesh.variables['latEdge'][:]\n lonpt = fmesh.variables['lonEdge'][:]\n #idxpt = fmesh.variables['indexToEdgeID'][:]\n xpt = fmesh.variables['xEdge'][:]\n ypt = fmesh.variables['yEdge'][:]\n \n # the outcome of all these options is a list of x,y points with varying spacing and number \n geod = pyproj.Geod(ellps='WGS84')\n dlat = 0.3 # at 30km resolution, distance between cells in deg\n dlon = 0.98\n \n if placename == '':\n location_name = (str(int(abs(lat))) + 'S' + \n str(int(abs(lon-360))) + 'W')\n else:\n location_name = placename\n \n candidate_bool = ( (latCell > (lat-dlat)*deg2rad ) & \n (latCell < (lat+dlat)*deg2rad ) &\n (lonCell > (lon-dlon)*deg2rad ) & \n (lonCell < (lon+dlon)*deg2rad ) )\n candidate_idx = np.asarray(candidate_bool.nonzero(),dtype=int)[0,:]\n _,_,distance_from_point = geod.inv(\n [lon for j in candidate_idx],\n [lat for j in candidate_idx],\n lonCell[candidate_idx]/deg2rad,\n latCell[candidate_idx]/deg2rad)\n cellidx = int(candidate_idx[np.argmin(distance_from_point)])\n\n if plot_map:\n fig = plt.figure()\n idx = pick_from_region(region=plot_region,run=run)\n zmax = np.multiply(-1,fmesh.variables['bottomDepth'][idx])\n zice = fmesh.variables['landIceDraft'][0,idx]\n \n cntr1 = plt.tricontourf(yCell[idx].flatten(), xCell[idx].flatten(),\n zmax.flatten(), 20, cmap=\"viridis\")\n #plt.plot(yCell[idx], xCell[idx], 'o', color = 'white',\n # markersize = 4, fillstyle = 'none')#, alpha = 0.5)\n plt.plot(yCell[cellidx], xCell[cellidx], 'o', color = 'red',\n markersize = 4)\n #cntr = plt.tricontour(yCell[idx].flatten(), xCell[idx].flatten(),\n # zice.flatten(), [-10], colors = 'k')\n #if varlim:\n # plt.clim([varmin[vartitle.index('z')], varmax[vartitle.index('z')]])\n plt.axis('equal')\n cbar = plt.colorbar(cntr1)\n cbar.set_label('Depth (m)')\n plot_filename = 'bathy_' + location_name\n print(plot_filename)\n plt.savefig(savepath + plot_filename + '.png',dpi=set_dpi)\n plt.close()\n\n return cellidx,location_name\n\n# PICK_TRANSECT\n# option choose transect by 'coord' or 'contour'\n# if 'contour' need to provide latitude limits throgh latS and lonW\n# if 'coord' only need to provide contour endpoints\n# contourvar variable to choose contour from\n# contourval value of contour\n# vartype 'scalar' or 'velocity\n#\n# Outputs:\n# cellidx\n# edgeidx\n# dist: distance between each cell center and the cell center of the first cell\n# angle: mean orientation of transect from left to right\ndef pick_transect(option='coord', lat=[-76, -76], lon=[360-32, 360-32], run='CGM-UIB',\n vartype='velocity',transect_name='',scope_name = 'frisEAcoast',\n overwrite=False, savepath=savepath,append=False):\n \n fmesh = netCDF4.Dataset(meshpath[runname.index(run)])\n\n # import variables from file\n latcell = fmesh.variables['latCell'][:]\n loncell = fmesh.variables['lonCell'][:]\n xcell = fmesh.variables['xCell'][:]\n ycell = fmesh.variables['yCell'][:]\n zbottom = np.multiply(-1,fmesh.variables['bottomDepth'][:])\n #if vartype == 'scalar':\n #latpt = fmesh.variables['latCell'][:]\n #lonpt = fmesh.variables['lonCell'][:]\n if vartype== 'velocity':\n latpt = fmesh.variables['latEdge'][:]\n lonpt = fmesh.variables['lonEdge'][:]\n #idxpt = fmesh.variables['indexToEdgeID'][:]\n xpt = fmesh.variables['xEdge'][:]\n ypt = fmesh.variables['yEdge'][:]\n \n # the outcome of all these options is a list of x,y points with varying spacing and number \n if option == 'coord':\n geod = pyproj.Geod(ellps='WGS84')\n dlat = 0.3 # at 30km resolution, distance between cells in deg\n dlon = 0.98\n \n if transect_name != '':\n #TODO edit using region_name,region_xybounds, region_coordbounds\n lat = [region_coordbounds[region_name.index(transect_name),1,0],\n region_coordbounds[region_name.index(transect_name),1,1]]\n lon = [region_coordbounds[region_name.index(transect_name),0,0],\n region_coordbounds[region_name.index(transect_name),0,1]]\n zmax = region_zbounds[region_name.index(transect_name),1]\n zmin = region_zbounds[region_name.index(transect_name),0]\n else:\n transect_name = (str(int(abs(lat[0]))) + 'S' + \n str(int(abs(lon[0]-360))) + 'W-' + \n str(int(abs(lat[1]))) + 'S' + \n str(int(abs(lon[1]-360))) + 'W' )\n zmax = 0.\n zmin = -9999.\n \n p1,p2 = (lon[0]-360,lat[0]), (lon[1]-360,lat[1])\n frac_along_route = 0.05\n lat_interp = np.zeros((int(1/frac_along_route)))\n lon_interp = np.zeros((int(1/frac_along_route)))\n transect_idx = np.zeros((int(1/frac_along_route)),dtype=int)\n \n for i,frac in enumerate(np.arange(0,1,frac_along_route)):\n lon_interp[i],lat_interp[i] = great_circle.intermediate_point(p1,p2,frac)\n if lon_interp[i] < 0:\n lon_interp[i] += 360\n logical_trans = ( (latcell > (lat_interp[i]-dlat)*deg2rad ) & \n (latcell < (lat_interp[i]+dlat)*deg2rad ) &\n (loncell > (lon_interp[i]-dlon)*deg2rad) & \n (loncell < (lon_interp[i]+dlon)*deg2rad) \n ) \n candidate_idx = np.asarray(logical_trans.nonzero(),dtype=int)[0,:]\n distance_from_point = np.zeros((np.shape(candidate_idx)))\n _,_,distance_from_point = geod.inv(\n [lon_interp[i] for j in candidate_idx],\n [lat_interp[i] for j in candidate_idx],\n loncell[candidate_idx]/deg2rad,\n latcell[candidate_idx]/deg2rad)\n transect_idx[i] = int(candidate_idx[np.argmin(distance_from_point)])\n _,temp_idx = np.unique(transect_idx,return_index=True)\n cellidx = transect_idx[np.sort(temp_idx)]\n zbool = (zbottom[cellidx] < zmax) & (zbottom[cellidx] > zmin) \n cellidx = cellidx[zbool]\n edgeidx = []\n dist = np.sqrt( np.square(fmesh.variables['yCell'][cellidx] - \n fmesh.variables['yCell'][cellidx[0]]) + \n np.square(fmesh.variables['xCell'][cellidx] - \n fmesh.variables['xCell'][cellidx[0]]) )\n #elif select_cell == 'connecting':\n # # NOT FUNCTIONAL\n \n elif option== 'zcontour':\n print('option ',option,' is not yet enabled') \n \n elif option == 'by_index':\n datestr = '{0:04d}-{1:02d}'.format(98,1)\n #filename = ('{0}/mpaso.hist.am.timeSeriesStatsMonthly.'\n # .format(runpath[runname.index(run)]) \n # + datestr + '-01.nc')\n #f = netCDF4.Dataset(filename, 'r')\n \n if transect_name == 'trough_shelf':\n cellidx = np.subtract(cells_trough_shelf_lat,1)\n elif transect_name == 'trough_ice':\n cellidx = np.subtract(cells_trough_ice_lat,1)\n else:\n print('transect name not matched')\n \n #uCell = f.variables['timeMonthly_avg_velocityZonal'][0,idx,:]\n #vCell = f.variables['timeMonthly_avg_velocityMeridional'][0,idx,:]\n\n x_transect = fmesh.variables['xCell'][cellidx]\n y_transect = fmesh.variables['yCell'][cellidx]\n edgesOnCell = np.subtract(fmesh.variables['edgesOnCell'][cellidx,:],1)\n verticesOnCell = np.subtract(fmesh.variables['verticesOnCell'][cellidx,:],1)\n verticesOnEdge = np.zeros((len(cellidx),7,2))\n for i in range(len(cellidx)):\n for j in range(7):\n verticesOnEdge[i,j,:] = (\n fmesh.variables['verticesOnEdge'][edgesOnCell[i,j],:])\n \n # Select edges based on their orientation with respect to the transect line\n edgeidx = [] \n x0 = xcell[cellidx[0]]\n y0 = ycell[cellidx[0]]\n x1 = xcell[cellidx[-1]]\n y1 = ycell[cellidx[-1]]\n m = (y1 - y0)/(x1 - x0)\n b = (x1*y0 - x0*y1)/(x1 - x0)\n angle = atan(1/m)\n if vartype == 'velocity':\n #transect_angle = angle/deg2rad \n #if transect_angle>=180:\n # transect_angle += -360\n #elif transect_angle<-180:\n # transect_angle += 360\n #if transect_angle < 0:\n # transect_angle += 180\n #print('transect_angle = ',transect_angle)\n #print('x0,y0 = ',x0,y0)\n #print('x1,y1 = ',x1,y1)\n idx = [] \n dxy = 1e3\n #dxy = 5e3\n for i,celli in enumerate(cellidx):\n for j,edge in enumerate(edgesOnCell[i,:]):\n angleEdge = fmesh.variables['angleEdge'][edge]#edgesOnCell[i,j]]\n ye = fmesh.variables['yEdge'][edge]\n xe = fmesh.variables['xEdge'][edge]\n if abs(x0-x1) > (y0-y1):#only restrictions on y\n ym = m*xe + b\n xlim = xe#xcell[celli]\n ylim = ym - dxy\n #print('xe,ym = ',xe,ym)\n else:#only restrictions on x\n xm = (ye - b)/m\n xlim = xm - dxy\n ylim = ye#ycell[celli]\n #print('xm,ye = ',xm,ye)\n #if i == 1:\n # print('xcell,ycell = ',xcell[celli],ycell[celli])\n # print('xe,ye = ',xe,ye)\n # print('xlim,ylim = ',xlim,ylim)\n if ( (xe <= xlim) and (ye <= ylim) \n #and abs(xe-xcell[celli])>1e3 and abs(ye-ycell[celli])>1e3\n ):\n #edge not in edgesOnCell[i+1,:]) ):\n edgeidx.append(edgesOnCell[i,j])\n idx.append(celli)\n #edge_angle = angleEdge/deg2rad\n #if edge_angle>=180:\n # edge_angle += -360\n #elif edge_angle<-180:\n # edge_angle += 360\n #if edge_angle < 0:\n # edge_angle += 180\n #dangle = edge_angle-transect_angle\n #print('edge_angle = ',edge_angle)\n #print('dangle = ',dangle)\n cellidx = idx\n if vartype == 'velocity':\n dist = np.sqrt( np.square(fmesh.variables['yEdge'][edgeidx] - \n fmesh.variables['yEdge'][edgeidx[0]]) + \n np.square(fmesh.variables['xEdge'][edgeidx] - \n fmesh.variables['xEdge'][edgeidx[0]]) )\n else:\n dist = np.sqrt( np.square(fmesh.variables['yCell'][cellidx] - \n fmesh.variables['yCell'][cellidx[0]]) + \n np.square(fmesh.variables['xCell'][cellidx] - \n fmesh.variables['xCell'][cellidx[0]]) )\n \n # TODO replace above method with that below\n # include all edges whose vertices have y-coordinates above or on a line \n # connecting neighboring cell centers\n# for i,cell in enumerate(cellidx):\n# if cell == cellidx[-1]:\n# m = ( (y_transect[i] - y_transect[i-1]) /\n# (x_transect[i] - x_transect[i-1]) )\n# b = ( (x_transect[i] * y_transect[i-1] - \n# x_transect[i-1] * y_transect[i]) /\n# (x_transect[i] - x_transect[i-1]) )\n# else:\n# m = ( (y_transect[i+1] - y_transect[i]) /\n# (x_transect[i+1] - x_transect[i]) )\n# b = ( (x_transect[i+1] * y_transect[i] - \n# x_transect[i] * y_transect[i+1]) /\n# (x_transect[i+1] - x_transect[i]) )\n# for j,edge in enumerate(edgesOnCell[i,:]):\n# x2 = fmesh.variables['xVertex'][verticesOnEdge[i,j,:]\n# y2 = fmesh.variables['yVertex'][verticesOnEdge[i,j,:]\n# if ( ( y2[0] >= m*x2[0] + b) and \n# ( y2[1] >= m*x2[1] + b) and\n# ( x2[1] >= m*x2[1] + b) and\n# ( edge not in edgesOnCell[i+1,:]) ):\n# edgeidx.append(edge)\n \n\n # print('idxcell',str(i),':',str(ycell[i]),',',str(xcell[i]))\n # shared_edges = []\n # shared_verts = []\n # xEdge = fmesh.variables['xEdge'][edgesOnCell[i,:]]))\n # edge1 = np.argmin(xEdge)\n # fmesh.variables[]verticesOnCell[i,:] = (\n # vert1 = verticesOnCell[i,:]\n # vert2 = verticesOnCell[i+1,:]\n # edges1 = edgesOnCell[i,:]\n # print('xedge:',str(fmesh.variables['xEdge'][edges1-1]))\n # edges2 = fmesh.variables['edgesOnCell'][i+1,:]\n # for j in vert1:\n # if j in vert2:\n # np.append(shared_verts,j)\n # print('len(shared_edges) idx=',str(i),'=',str(len(shared_edges))) \n # for j in edges1:\n # if j in edges2:\n # np.append(shared_edges,j)\n # print('len(shared_verts) idx=',str(i),'=',str(len(shared_verts))) \n #for i in idx:\n # print('idxcell',str(i),':',str(ycell[i]),',',str(xcell[i]))\n # ii = fmesh.variables['indexToCellID'][:].index(i)\n # for j,jj in enumerate(fmesh.variables['edgesOnCell'][ii,:]):\n # print('edge ',str(j),':',str(fmesh.variables['yEdge'][jj]),',',str(fmesh.variables['xEdge'][jj]))\n # plt.plot(fmesh.variables['yEdge'][jj],\n # fmesh.variables['xEdge'][jj],'r.',markersize=ms)\n # k = fmesh.variables['cellsOnCell'][i,j]\n # print('cell ',str(k),':',str(fmesh.variables['yCell'][k]),',',str(fmesh.variables['xEdge'][k]))\n #if (edges[1] in idx) and (edges[10] in idx):\n # print('found duplicate')\n # idx[idx.index(edges[10])] = nan\n #if (edges[6] in idx) and (edges[5] in idx):\n # print('found duplicate')\n # idx[idx.index(edges[5])] = nan\n \n #idx1 = np.argsort(fmesh.variables['yEdge'][edgeidx])\n #edgeidx = edgeidx[np.argsort(fmesh.variables['xEdge'][edgeidx])]\n #dist = np.sqrt( np.square(fmesh.variables['yEdge'][edgeidx] - fmesh.variables['yEdge'][edgeidx[0]]) + \n # np.square(fmesh.variables['xEdge'][edgeidx] - fmesh.variables['xEdge'][edgeidx[0]]) )\n \n # show profile line across cells\n #if not os.path.exists(savepath + 'bathy_' + placename + '.png'):\n # fig1 = plt.figure()\n # plt.plot(yCell[logical_N], xCell[logical_N], 'k.')\n # plt.plot(yCell[logical_trans], xCell[logical_trans], 'r.')\n # plt.axis('equal')\n # plt.savefig('grid_' + placename + '_' + datestr + '.png',dpi=set_dpi)\n # plt.close()\n # \n # fig = plt.figure()\n # cntr1 = plt.tricontourf(yCell[logical_N].flatten(), xCell[logical_N].flatten(), \n # zmax[logical_N].flatten(), 20, cmap=\"viridis\")\n # plt.plot(yCell[logical_N], xCell[logical_N], 'o', color = 'white', \n # markersize = 4, fillstyle = 'none')#, alpha = 0.5)\n # cntr = plt.tricontour(yCell[logical_N].flatten(), xCell[logical_N].flatten(), \n # zice[logical_N].flatten(), [-10], colors = 'k')\n # plt.plot(yCell[idxsort_trans], xCell[idxsort_trans], 'k-')\n # plt.axis('equal')\n # cbar = plt.colorbar(cntr1)\n # cbar.set_label('Depth (m)') \n # plt.savefig(savepath + 'bathy_' + placename + '.png',dpi=set_dpi)\n # plt.close()\n \n \n \n # show profile line across cells\n if (not os.path.exists(savepath + 'bathy_' + transect_name + '.png')) or overwrite:\n # northern limit for subplots\n ms=1 \n #xcell = fmesh.variables['xCell'][:]\n #ycell = fmesh.variables['yCell'][:]\n #latcell = fmesh.variables['latCell'][:]\n idx_scope = pick_from_region(region=scope_name, run=run, plot_map=False)\n zmax_scope = np.multiply(-1.,fmesh.variables['bottomDepth'][idx_scope])\n icemask_scope = fmesh.variables['landIceMask'][0,idx_scope]\n #zice_scope = fmesh.variables['landIceDraft'][idx_scope]\n ycell_scope = ycell[idx_scope]\n xcell_scope = xcell[idx_scope]\n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n cntr1 = plt.scatter(ycell_scope,xcell_scope,\n s=loc_ptsize[region_name.index(scope_name)], c=zmax_scope)\n #cntr1 = plt.tricontourf(ycell[idx_scope],xcell[idx_scope],zmax, 20, cmap=\"viridis\")\n #plt.plot(ycell[idx_scope],xcell[idx_scope], 'o', color = 'white', \n # markersize = ms, fillstyle = 'none', alpha = 0.5)\n #cntr = plt.tricontour(ycell_scope,xcell_scope,\n # zice, [-10], colors = 'k',linewidth=lw1)\n plt.plot(ycell_scope[icemask_scope==1], xcell_scope[icemask_scope==1],\n 'o', color = 'white', \n markersize = 5*ms, fillstyle = 'none')\n plt.plot(ycell[cellidx], xcell[cellidx],\n 'o', color = 'black', \n markersize = ms, fillstyle = 'none')\n #plt.plot([y0,y1],[x0,x1], 'k-')\n\n cNorm = Normalize(vmin=-1*pi, vmax=1*pi)\n scalarMap = cmx.ScalarMappable(norm=cNorm, cmap='jet')\n #for i in range(len(idx)):\n # for j in range(6):\n # colorVal = scalarMap.to_rgba(fmesh.variables['angleEdge'][edgesOnCell[i,j]])\n # sc = plt.scatter(fmesh.variables['yEdge'][edgesOnCell[i,j]],\n # fmesh.variables['xEdge'][edgesOnCell[i,j]],s=ms/2,c=colorVal)\n for k in edgeidx:\n colorVal = scalarMap.to_rgba(fmesh.variables['angleEdge'][edgesOnCell[i,j]])\n sc = plt.plot([fmesh.variables['yVertex'][fmesh.variables['verticesOnEdge'][k,0]-1],\n fmesh.variables['yVertex'][fmesh.variables['verticesOnEdge'][k,1]-1]],\n [fmesh.variables['xVertex'][fmesh.variables['verticesOnEdge'][k,0]-1],\n fmesh.variables['xVertex'][fmesh.variables['verticesOnEdge'][k,1]-1]],\n 'b-',linewidth=lw1)#marker='None',linestyle='-','k')\n #print(fmesh.variables['yVertex'][verticesOnEdge[i,j,1]])\n #print(fmesh.variables['xVertex'][verticesOnEdge[i,j,1]])\n #xv = [fmesh.variables['yVertex'][verticesOnEdge[i,j,0]],\n # fmesh.variables['yVertex'][verticesOnEdge[i,j,1]]]\n #yv = [fmesh.variables['xVertex'][verticesOnEdge[i,j,0]],\n # fmesh.variables['xVertex'][verticesOnEdge[i,j,1]]]\n #plt.plot(xv,yv,'b-',linewidth=0.5)\n #plt.plot(ypt[idx], xpt[idx], 'k.',markersize=ms)\n # for i,j in enumerate(idx):\n # plt.plot([yv1[i],yv2[i]],[xv1[i],xv2[i]], 'k-')\n #ax.set_xlabel('y (m)')\n #ax.set_ylabel('x (m)')\n plt.axis('equal')\n plt.ylim([region_xybounds[region_name.index(scope_name)][0,0],\n region_xybounds[region_name.index(scope_name)][0,1]])\n plt.xlim([region_xybounds[region_name.index(scope_name)][1,0],\n region_xybounds[region_name.index(scope_name)][1,1]])\n cbar = plt.colorbar(cntr1)\n #cbar = plt.colorbar(sc)\n cbar.set_label(r'Depth (m)') \n print('save ','bathy_' + transect_name)\n plt.savefig(savepath + 'bathy_' + transect_name)\n plt.close()\n \n return cellidx, edgeidx, dist, angle\n\ndef pick_from_region(region='frisEAcoast',run = 'ISMF',\n land_ice_mask = False, plot_map=False,\n overwrite=False, savepath=savepath):\n \n fmesh = netCDF4.Dataset(meshpath[runname.index(run)])\n\n # import variables from file\n latCell = fmesh.variables['latCell'][:]\n lonCell = fmesh.variables['lonCell'][:]\n xCell = fmesh.variables['xCell'][:]\n yCell = fmesh.variables['yCell'][:]\n zmax = np.multiply(-1,fmesh.variables['bottomDepth'][:])\n idx_bool = ((xCell < region_xybounds [region_name.index(region),0,1]) & \n (xCell > region_xybounds [region_name.index(region),0,0]) & \n (yCell < region_xybounds [region_name.index(region),1,1]) & \n (yCell > region_xybounds [region_name.index(region),1,0]) & \n (latCell < region_coordbounds[region_name.index(region),1,1]*deg2rad) & \n (latCell > region_coordbounds[region_name.index(region),1,0]*deg2rad) & \n (lonCell < region_coordbounds[region_name.index(region),0,1]*deg2rad) & \n (lonCell > region_coordbounds[region_name.index(region),0,0]*deg2rad) &\n (zmax < region_zbounds [region_name.index(region),1]) & \n (zmax > region_zbounds [region_name.index(region),0]) \n )\n #idx_bool = np.ones_like(xCell, dtype=bool)\n #print(f'before region masking: {len(np.asarray(idx_bool.nonzero(),dtype=int)[0,:])}')\n #idx_bool = (idx_bool & \n # (xCell < region_xybounds[region_name.index(region),0,1]) & \n # (xCell > region_xybounds[region_name.index(region),0,0]) & \n # (yCell < region_xybounds[region_name.index(region),1,1]) & \n # (yCell > region_xybounds[region_name.index(region),1,0]))\n #print(f'after xy masking: {len(np.asarray(idx_bool.nonzero(),dtype=int)[0,:])}')\n #idx_bool = (idx_bool & \n # (latCell < region_coordbounds[region_name.index(region),1,1]*deg2rad) & \n # (latCell > region_coordbounds[region_name.index(region),1,0]*deg2rad))\n #idx_bool = (idx_bool & \n # (lonCell < region_coordbounds[region_name.index(region),0,1]*deg2rad) & \n # (lonCell > region_coordbounds[region_name.index(region),0,0]*deg2rad))\n #print(f'after coord masking: {len(np.asarray(idx_bool.nonzero(),dtype=int)[0,:])}')\n #idx_bool = (idx_bool & \n # (zmax < region_zbounds[region_name.index(region),1]) & \n # (zmax > region_zbounds[region_name.index(region),0]))\n #print(f'after z masking: {len(np.asarray(idx_bool.nonzero(),dtype=int)[0,:])}')\n print(f'after region masking: {len(np.asarray(idx_bool.nonzero(),dtype=int)[0,:])}')\n if land_ice_mask:\n landIceMask = fmesh.variables['landIceMask'][:]\n landIceDraft = fmesh.variables['landIceDraft'][:]\n idx_bool = (idx_bool * (landIceMask == 1))\n cellidx = np.asarray(idx_bool.nonzero(),dtype=int)[0,:]\n print(f'after land ice masking: {len(cellidx)}')\n print(zmax[cellidx])\n if land_ice_mask:\n print(landIceDraft[cellidx])\n print(latCell[cellidx])\n print(lonCell[cellidx])\n return cellidx\n","sub_path":"pick_from_mesh.py","file_name":"pick_from_mesh.py","file_ext":"py","file_size_in_byte":25125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"245257602","text":"from typing import List\n\n\nclass Solution:\n def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:\n power_set = []\n\n nums.sort()\n\n self.backtrack(power_set, [], nums, 0)\n return power_set\n\n def backtrack(\n self, power_set: List[int], set: List[int], nums: List[int], start: int\n ) -> List[int]:\n sortable = set[::]\n sortable.sort()\n\n if sortable not in power_set:\n power_set.append(sortable)\n\n for i in range(start, len(nums)):\n if i > start and nums[i] == nums[i - 1]:\n continue\n set.append(nums[i])\n self.backtrack(power_set, set, nums, i + 1)\n set.pop()\n\n\nm = Solution()\n\n# print(\"sol: \", m.subsetsWithDup([1, 2, 2]))\nprint(\"sol: \", m.subsetsWithDup([0]))\n","sub_path":"python/problems/subsets-II/accepted.py","file_name":"accepted.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"7702410","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[80]:\n\n\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport calendar\nfrom datetime import timedelta\nimport datetime as dt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.ensemble import RandomForestRegressor\n\n\n# In[30]:\n\n\nget_ipython().system('pip3 install geopy')\n\n\n# In[20]:\n\n\ntrain = pd.read_csv(\"./iiitb2019nyctaxifare/train.csv/train.csv\", nrows = 400000)\n\n\n# In[21]:\n\n\ntrain.head()\n\n\n# In[22]:\n\n\ntrain.info()\n\n\n# In[23]:\n\n\ntrain['pickup_datetime']=pd.to_datetime(train['pickup_datetime'],format='%Y-%m-%d %H:%M:%S UTC')\ntrain.head()\n\n\n# In[24]:\n\n\nfrom datetime import datetime\nimport calendar\nfrom datetime import timedelta\nimport datetime as dt\n\n\n# In[25]:\n\n\ntrain['pickup_date']= train['pickup_datetime'].dt.date\ntrain['pickup_day']=train['pickup_datetime'].apply(lambda x:x.day)\ntrain['pickup_hour']=train['pickup_datetime'].apply(lambda x:x.hour)\ntrain['pickup_day_of_week']=train['pickup_datetime'].apply(lambda x:calendar.day_name[x.weekday()])\ntrain['pickup_month']=train['pickup_datetime'].apply(lambda x:x.month)\ntrain['pickup_year']=train['pickup_datetime'].apply(lambda x:x.year)\n\n\n# In[56]:\n\n\ntrain.info()\n\n\n# In[26]:\n\n\n# removing outliers in latitude and longitude\nmisplaced_locations = 0\nmisplaced_locations_index = []\n\nfor i, val in enumerate(zip(train.pickup_latitude,train.dropoff_latitude,train.pickup_longitude,train.dropoff_longitude)):\n \n #print(val)\n #break\n \n lat1,lat2,lon1,lon2 = val\n #co_ords1 = (lat1, lon1)\n #co_ords2 = (lat2, lon2)\n \n if lat1 < 40.5 or lat1 > 41.8 or lat2 < 40.5 or lat2 > 41.8 or lon1 < -74.5 or lon1 > -72.8 or lon2 < -74.5 or lon2 > -72.8:\n misplaced_locations += 1\n misplaced_locations_index.append(i)\n \n \nprint(misplaced_locations)\n\n\n# In[27]:\n\n\ntrain = train.drop(misplaced_locations_index)\n\n\n# In[28]:\n\n\ntrain = train.dropna()\n\n\n# In[31]:\n\n\nimport geopy.distance\nfor val in zip(train.pickup_latitude,train.dropoff_latitude,train.pickup_longitude,train.dropoff_longitude):\n \n #print(val)\n #break\n \n lat1,lat2,lon1,lon2 = val\n co_ords1 = (lat1, lon1)\n co_ords2 = (lat2, lon2)\n \n train['distance'] = geopy.distance.distance(co_ords1, co_ords2).km\n\n\n# In[33]:\n\n\ntrain.head(100)\n\n\n# In[34]:\n\n\ntrain = train.drop(train[train['passenger_count'] <= 0].index.tolist())\n\n\n# In[35]:\n\n\ntrain = train.drop(train[train['fare_amount'] <= 0].index.tolist())\n\n\n# In[36]:\n\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# In[37]:\n\n\ncity_long_border = (-74.03, -73.75)\ncity_lat_border = (40.63, 40.85)\n\ntrain.plot(kind='scatter', x='dropoff_longitude', y='dropoff_latitude',\n color='red', \n s=.02, alpha=.6)\nplt.title(\"Dropoffs\")\n\nplt.ylim(city_lat_border)\nplt.xlim(city_long_border)\n\n\n# In[38]:\n\n\ntrain.plot(kind='scatter', x='pickup_longitude', y='pickup_latitude',\n color='blue', \n s=.02, alpha=.6)\nplt.title(\"Pickups\")\n\nplt.ylim(city_lat_border)\nplt.xlim(city_long_border)\n\n\n# In[39]:\n\n\n#calculate trip distance in miles\ndef distance(lat1, lat2, lon1,lon2):\n p = 0.017453292519943295 # Pi/180\n a = 0.5 - np.cos((lat2 - lat1) * p)/2 + np.cos(lat1 * p) * np.cos(lat2 * p) * (1 - np.cos((lon2 - lon1) * p)) / 2\n return 0.6213712 * 12742 * np.arcsin(np.sqrt(a))\n\n\n# In[40]:\n\n\ntrain['trip_distance']=train.apply(lambda row:distance(row['pickup_latitude'],row['dropoff_latitude'],row['pickup_longitude'],row['dropoff_longitude']),axis=1)\n\n\n# In[41]:\n\n\ntrain.head()\n\n\n# In[42]:\n\n\nplt.scatter(x=train['trip_distance'],y=train['fare_amount'])\nplt.xlabel(\"Trip Distance\")\nplt.ylabel(\"Fare Amount\")\nplt.title(\"Trip Distance vs Fare Amount\")\n\n\n# In[43]:\n\n\ntrips_year=train.groupby(['pickup_year'])['key'].count().reset_index().rename(columns={'key':'Num_Trips'})\ntrips_year.head()\nsns.barplot(x='pickup_year',y='Num_Trips',data=trips_year)\n\n\n# In[44]:\n\n\ntrips_year_fareamount=train.groupby(['pickup_year'])['fare_amount'].mean().reset_index().rename(columns={'fare_amount':'avg_fare_amount'})\n\n\n# In[45]:\n\n\nsns.barplot(x='pickup_year',y='avg_fare_amount',data=trips_year_fareamount).set_title(\"Avg Fare Amount over Years\")\n\n\n# In[46]:\n\n\ndef groupandplot(data,groupby_key,value,aggregate='mean'):\n plt.figure(figsize=(16,10))\n agg_data=data.groupby([groupby_key])[value].agg(aggregate).reset_index().rename(columns={value:aggregate+'_'+value})\n plt.subplot(1,2,1)\n count_data=train.groupby([groupby_key])['key'].count().reset_index().rename(columns={'key':'Num_Trips'})\n sns.barplot(x=groupby_key,y='Num_Trips',data=count_data).set_title(\"Number of Trips vs \"+groupby_key)\n \n plt.subplot(1,2,2)\n sns.barplot(x=groupby_key,y=aggregate+'_'+value,data=agg_data).set_title(aggregate+'_'+value+\" vs \"+groupby_key)\n\n\n# In[47]:\n\n\ngroupandplot(train,'pickup_month','fare_amount')\n\n\n# In[48]:\n\n\ngroupandplot(train,'pickup_day_of_week','fare_amount')\n\n\n# In[49]:\n\n\ngroupandplot(train,'pickup_hour','fare_amount')\n\n\n# In[50]:\n\n\n# Let us encode day of the week to numbers\ndef encodeDays(day_of_week):\n day_dict={'Sunday':0,'Monday':1,'Tuesday':2,'Wednesday':3,'Thursday':4,'Friday':5,'Saturday':6}\n return day_dict[day_of_week]\n\n\n# In[51]:\n\n\ntrain['pickup_day_of_week']=train['pickup_day_of_week'].apply(lambda x:encodeDays(x))\n\n\n# In[52]:\n\n\ngroupandplot(train,'passenger_count','fare_amount')\n\n\n# In[53]:\n\n\ngroupandplot(train,'passenger_count','fare_amount')\n\n\n# In[54]:\n\n\ntrain.to_csv(\"train_cleaned.csv\",index=False)\n\n\n# In[58]:\n\n\ntrain.shape\n\n\n# In[60]:\n\n\ntrain.info(0)\n\n\n# In[63]:\n\n\ntrain = train.drop(columns=['key','pickup_datetime','distance','pickup_date'])\n\n\n# In[64]:\n\n\ntrain.info()\n\n\n# In[70]:\n\n\ndef processDataForModelling(data,target,is_train=True,split=0.3):\n data_1=data\n # One hot Encoding\n data_1=pd.get_dummies(data_1)\n if is_train==True:\n X=data_1.drop([target],axis=1)\n y=data_1[target]\n X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=split,random_state=123)\n \n print(\"Shape of Training Features\",X_train.shape)\n print(\"Shape of Validation Features \",X_test.shape)\n \n return X_train, X_test, y_train, y_test\n else:\n print (\"Shape of Test Data\",data_1.shape)\n return data_1\n\n\n# In[73]:\n\n\nX_train, X_test, y_train, y_test=processDataForModelling(train,'fare_amount',is_train=True,split=0.2)\n\n\n# In[74]:\n\n\navg_fare=round(np.mean(y_train),2)\navg_fare\n\n\n# In[77]:\n\n\n# Baseline Model\nbaseline_pred=np.repeat(avg_fare,y_test.shape[0])\nbaseline_rmse=np.sqrt(mean_squared_error(baseline_pred, y_test))\nprint(\"Basline RMSE of Validation data :\",baseline_rmse)\n\n\n# In[78]:\n\n\n# Linear Regression Model\nlm = LinearRegression()\nlm.fit(X_train,y_train)\ny_pred=np.round(lm.predict(X_test),2)\nlm_rmse=np.sqrt(mean_squared_error(y_pred, y_test))\nprint(\"RMSE for Linear Regression is \",lm_rmse)\n\n\n# In[81]:\n\n\n# Random Forest Model\nrf = RandomForestRegressor(n_estimators = 100, random_state = 883,n_jobs=-1)\nrf.fit(X_train,y_train)\n\n\n# In[82]:\n\n\nrf_pred= rf.predict(X_test)\nrf_rmse=np.sqrt(mean_squared_error(rf_pred, y_test))\nprint(\"RMSE for Random Forest is \",rf_rmse)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Project/ML/Data Visualization.py","file_name":"Data Visualization.py","file_ext":"py","file_size_in_byte":7281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"608664057","text":"import requests\nfrom db import dbskiut_con\nfrom config.urls import _CAS_URL\nimport json\n\nfrom weezevent.weezevent_api import WeezeventAPI\n\nclass Tombola():\n \"\"\"\n Tombola methods\n \"\"\"\n def buy_tombola(self, user, ticket1, ticket5, ticket10, service):\n \"\"\"\n Do the transaction on weezevent side\n in order to validate the buy\n \"\"\"\n api = WeezeventAPI()\n\n tickets = []\n\n if int(ticket1) > 0 : tickets.append(['15122', ticket1])\n if int(ticket5) > 0 : tickets.append(['15121', ticket5])\n if int(ticket10) > 0 : tickets.append(['15123', ticket10])\n\n response = api.create_transaction(tickets, user.get_email(), service)\n\n con = dbskiut_con()\n con.begin()\n with con:\n try:\n cur = con.cursor()\n sql = \"INSERT INTO `tombola_2020` (`id_transaction`, `login_user`, `status`, `ticket1`, `ticket5`, `ticket10`) VALUES (%s, %s, %s, %s, %s, %s)\"\n cur.execute(sql, (response['tra_id'], user.get_login(), 'W', int(ticket1), int(ticket5), int(ticket10)))\n con.commit()\n except Exception as e:\n raise e\n finally:\n cur.close()\n\n return response\n\n #Call the transaction from weezevent and update its status if it has changed\n def update_transaction_status(self, transaction):\n api = WeezeventAPI()\n\n response = api.get_transaction_info(transaction)\n\n if response['status'] != 'W':\n con = dbskiut_con()\n con.begin()\n try:\n cur = con.cursor()\n sql = \"UPDATE `tombola_2020` SET `status`=%s WHERE `id_transaction`=%s\"\n cur.execute(sql, (response['status'], transaction))\n con.commit()\n except Exception as e:\n raise e\n finally:\n cur.close()\n\n #Get user stats for tombola\n def get_user_stats(self, user):\n response = {}\n\n con = dbskiut_con()\n con.begin()\n try:\n cur = con.cursor()\n sql = \"SELECT SUM(`ticket1`) AS `ticket1`, SUM(`ticket5`) AS `ticket5`, SUM(`ticket10`) AS `ticket10` FROM `tombola_2020` WHERE `login_user`=%s AND `status`=%s\"\n cur.execute(sql, (user.get_login(), 'V'))\n con.commit()\n response = cur.fetchone()\n\n for k, v in response.items():\n if v is None:\n response[k] = 0\n\n response = {\n 'ticket1': int(response['ticket1']),\n 'ticket5': int(response['ticket5']),\n 'ticket10': int(response['ticket10']),\n 'poids': int(self.get_poids(user))\n }\n except Exception as e:\n raise e\n finally:\n cur.close()\n return json.dumps(response)\n\n def validate_tombola(self, user):\n con = dbskiut_con()\n response = None\n con.begin()\n try:\n cur = con.cursor()\n sql = \"SELECT `id_transaction` FROM `tombola_2020` WHERE `login_user`=%s ORDER BY `id` DESC\"\n cur.execute(sql, user.get_login())\n con.commit()\n transaction = cur.fetchone()\n except Exception as e:\n raise e\n finally:\n cur.close()\n\n if transaction.get('id_transaction'):\n #Update the transaction status in the bdd\n self.update_transaction_status(transaction['id_transaction'])\n\n return self.get_user_stats(user)\n\n # Update each transaction status which have not been handled\n def check_transaction_routine(self):\n con = dbskiut_con()\n con.begin()\n try:\n cur = con.cursor()\n sql = \"SELECT `id_transaction` FROM `tombola_2020` WHERE `status`=%s\"\n cur.execute(sql, 'W')\n con.commit()\n transactions = cur.fetchall()\n except Exception as e:\n raise e\n finally:\n cur.close()\n\n for transaction in transactions:\n if transaction.get('id_transaction'):\n self.update_transaction_status(transaction['id_transaction'])\n\n def get_poids(self, user):\n con = dbskiut_con()\n con.begin()\n try:\n cur = con.cursor()\n sql = \"SELECT SUM(`ticket1` + 5*`ticket5` + 10*`ticket10`) as value FROM tombola_2020 WHERE `status`=%s\"\n cur.execute(sql, 'V')\n con.commit()\n total_tickets = cur.fetchone()\n sql = \"SELECT SUM(`ticket1` + 5*`ticket5` + 10*`ticket10`) as value FROM tombola_2020 WHERE `login_user`=%s AND `status`=%s\"\n cur.execute(sql, (user.get_login(), 'V'))\n con.commit()\n user_tickets = cur.fetchone()\n except Exception as e:\n raise e\n finally:\n cur.close()\n\n if user_tickets is None:\n user_tickets = 0\n\n if total_tickets is None:\n total_tickets = 0\n\n return int(user_tickets['value'])/total_tickets['value']\n","sub_path":"python/lib/tombola/tombola.py","file_name":"tombola.py","file_ext":"py","file_size_in_byte":5102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"526873189","text":"#!/usr/bin/env python3\n# -*- coding: utf8 -*-\n#\n# Copyright 2018 Daniel Perron\n#\n# Base on Mario Gomez MFRC522-Python\n#\n# This file use part of MFRC522-Python\n# MFRC522-Python is a simple Python implementation for\n# the MFRC522 NFC Card Reader for the Raspberry Pi.\n#\n# MFRC522-Python is free software:\n# you can redistribute it and/or modify\n# it under the terms of\n# the GNU Lesser General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# MFRC522-Python is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the\n# GNU Lesser General Public License along with MFRC522-Python.\n# If not, see .\n#\n\nimport RPi.GPIO as GPIO\nimport mfrc522\nimport signal \nimport time \nimport requests\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nclass mem ():\n def __init__(self, token, money):\n self.token = token\n self.money= money\n\nclass Cashier:\n def __init__(self, price_sheet):\n # google sheet inform\n self.auth_json_path = 'key.json'\n self.gss_scopes = ['https://spreadsheets.google.com/feeds']\n\n self.credentials = ServiceAccountCredentials.from_json_keyfile_name(self.auth_json_path,self.gss_scopes)\n self.gss_client = gspread.authorize(self.credentials)\n\n self.spreadsheet_key = '1J_Ewl_8pN9WMkNEL1Evb8mC-yTMYnO26lgW10GJWWL4' \n\n self.sheet = self.gss_client.open_by_key(self.spreadsheet_key).sheet1\n\n # Hook the SIGINT\n signal.signal(signal.SIGINT, self.end_read)\n\n # Create an object of the class MFRC522\n self.MIFAREReader = MFRC522.MFRC522()\n\n self.member = {}\n self.price_sheet = price_sheet\n self.init_dollar = 1000\n\n\n # function to read uid an conver it to a string\n def uidToString(self, uid):\n mystring = \"\"\n for i in uid:\n mystring = format(i, '02X') + mystring\n return mystring\n\n\n # Capture SIGINT for cleanup when the script is aborted\n def end_read(self, signal, frame):\n global continue_reading\n print(\"Ctrl+C captured, ending read.\")\n GPIO.cleanup()\n\n \n\n def lineNotifyMessage(self, token, msg):\n headers = { \"Authorization\": \"Bearer \" + token,\"Content-Type\" : \"application/x-www-form-urlencoded\" }\n payload = {'message': msg}\n r = requests.post(\"https://notify-api.line.me/api/notify\", headers = headers, params = payload)\n return r.status_code \n\n\n def checkout(self, cart):\n # This loop keeps checking for chips.\n # If one is near it will get the UID and authenticate\n while True:\n\n # Scan for cards\n (status, TagType) = self.MIFAREReader.MFRC522_Request(self.MIFAREReader.PICC_REQIDL)\n\n # If a card is found\n if status == self.MIFAREReader.MI_OK:\n print (\"Card detected\")\n\n # Get the UID of the card\n (status, uid) = self.MIFAREReader.MFRC522_SelectTagSN()\n # If we have the UID, continue\n if status == self.MIFAREReader.MI_OK:\n if self.uidToString(uid) in self.member:\n temp_price = 0\n string = \"\"\n for item, num in cart:\n if item == self.item1:\n temp_price += num * self.price_sheet[item]\n string += str(num) + \" \" + item + \"\\n\"\n\n if self.member[self.uidToString(uid)].money >= temp_price:\n self.member[self.uidToString(uid)].money -= temp_price\n string = \"you buy:\\n\" + string + \"total: \" + str(temp_price) + \" dollars\\nmoney left: \" + str(self.member[self.uidToString(uid)].money)\n self.lineNotifyMessage(self.member[self.uidToString(uid)].token, string)\n else:\n string = \"money is not enough!!!\\nyou buy:\\n\" + string + \"total: \" + str(temp_price) + \" dollars\\nbut your money left: \" + str(self.member[self.uidToString(uid)].money)\n self.lineNotifyMessage(self.member[self.uidToString(uid)].token, string)\n return\n else:\n for element in self.sheet.get_all_values():\n if self.uidToString(uid) in element[1]:\n self.member[self.uidToString(uid)] = mem(element[2], self.init_dollar)\n temp_price = 0\n string = \"\"\n for item in cart:\n num = cart[item]\n temp_price += num * self.price_sheet[item]\n string += str(num) + \" \" + item + \"\\n\"\n\n if self.member[self.uidToString(uid)].money >= temp_price:\n self.member[self.uidToString(uid)].money -= temp_price\n string = \"you buy:\\n\" + string + \"total: \" + str(temp_price) + \" dollars\\nmoney left: \" + str(self.member[self.uidToString(uid)].money)\n self.lineNotifyMessage(self.member[self.uidToString(uid)].token, string)\n else:\n string = \"money is not enough!!!\\nyou buy:\\n\" + string + \"total: \" + str(temp_price) + \" dollars\\nbut your money left: \" + str(self.member[self.uidToString(uid)].money)\n self.lineNotifyMessage(self.member[self.uidToString(uid)].token, string)\n return\n \n print(\"you are not member, please scan the QRCode to register, your ID is: \" + str(self.uidToString(uid)))\n return\n else:\n print(\"Authentication error\")\n\n# p_sheet = {\"冷氣卡\": 100, \"餅乾\": 10}\n# a = Cashier(p_sheet)\n# dic = {\"冷氣卡\": 1, \"餅乾\": 1}\n# a.checkout(dic)","sub_path":"project/Cashier.py","file_name":"Cashier.py","file_ext":"py","file_size_in_byte":6474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"233630577","text":"from django.urls import path\n\nfrom . import views\n\napp_name = \"auctions\"\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"create-listing\", views.create_listing, name=\"create_listing\"),\n path(\"submit-listing\", views.submit_listing, name=\"submit_listing\"),\n path(\"listing/\", views.listing, name=\"listing\"),\n path(\"bidding-submit\", views.bidding_submit, name=\"bidding_submit\"),\n path(\"watchlist-listing\", views.watchlist_listing, name=\"watchlist_listing\"),\n path(\"watchlist-submit\", views.watchlist_submit, name=\"watchlist_submit\"),\n path(\"status-listing\", views.status_listing, name=\"status_listing\"),\n path(\"comment-listing\", views.comment_listing, name=\"comment_listing\"),\n path(\"categories\", views.categories, name=\"categories\"),\n path(\"categories/\", views.category_listing, name=\"category_listing\")\n]\n","sub_path":"learning/cs50-web/web50/project_2/commerce/auctions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"292999332","text":"# 파일 열기\nout_file = open('vocabulary.txt', 'w')\n\nwhile True:\n voca_eng = input(\"영어 단어를 입력하세요: \")\n if voca_eng == 'q':\n break\n voca_kor = input(\"한국어 뜻을 입력하세요: \")\n\n if voca_kor == 'q':\n break\n # 파일에쓰기\n out_file.write(\"%s: %s\\n\" % (voca_eng, voca_kor))\n\n# 파일 닫기\nout_file.close()\n","sub_path":"j_z/vocabulary.py","file_name":"vocabulary.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"178083469","text":"import unittest\nimport concurrent.futures\nfrom bs4 import BeautifulSoup\nfrom indeed.indeed_scraper import job_in_nogo\nfrom indeed.indeed_scraper import download_pages_soup\nfrom indeed.indeed_scraper import get_page_count\n\n'''\nTo run the test call from the jobscraper folder\n$python -m unittest tests/test_scraper.py -v\n\nTo run coverage test run\n$coverage run -m unittest tests/test_scraper.py -v\n$coverage report --omit=*/lib/*\n'''\n\nclass TestScraper(unittest.TestCase):\n\n def test_nogo_jobtitle(self):\n\n job_title = 'Head of Marketing'\n nogo_list = 'marketing'\n result = job_in_nogo(job_title,nogo_list)\n self.assertTrue(result)\n\n def test_website_scrape(self):\n \n website = 'https://de.indeed.com/jobs?q=werkstudent&l=Berlin'\n with concurrent.futures.ThreadPoolExecutor() as executor:\n # submits the function to be executed and return a future object\n output = [executor.submit(download_pages_soup,website,page) for page in range(10)] \n self.assertEqual(len(output),10,f\"The lenghts is actually {len(output)}\")\n\n def test_get_page_count(self):\n source_text = open(\"tests/example_soup.txt\",\"r\")\n soup = BeautifulSoup(source_text, 'lxml')\n source_text.close()\n number = get_page_count(soup)\n self.assertEqual(number,142)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_scraper.py","file_name":"test_scraper.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"434272081","text":"import time\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nfrom scipy.misc import imread\nfrom alexnet import AlexNet\n\n\ndef fully_connected(input_layer, size):\n \"\"\"\n Performs a single fully connected layer pass, e.g. returns `input * weights + bias`.\n \"\"\"\n weights = tf.get_variable(\n 'weights',\n shape=[input_layer.get_shape().as_list()[-1], size],\n initializer=tf.contrib.layers.xavier_initializer()\n )\n biases = tf.get_variable(\n 'biases',\n shape=[size],\n initializer=tf.constant_initializer(0.0)\n )\n return tf.matmul(input_layer, weights) + biases\n\n\ndef fully_connected_relu(input_layer, size):\n return tf.nn.relu(fully_connected(input_layer, size))\n\nsign_names = pd.read_csv('signnames.csv')\nnb_classes = 43\n\nx = tf.placeholder(tf.float32, (None, 32, 32, 3))\nresized = tf.image.resize_images(x, (227, 227))\n\n# NOTE: By setting `feature_extract` to `True` we return\n# the second to last layer.\nfc7 = AlexNet(resized, feature_extract=True)\n\nwith tf.variable_scope('fc8'):\n fc8 = fully_connected_relu(fc7, nb_classes)\n\nprobs = tf.nn.softmax(fc8)\n\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\n# Read Images\nim1 = imread(\"construction.jpg\").astype(np.float32)\nim1 = im1 - np.mean(im1)\n\nim2 = imread(\"stop.jpg\").astype(np.float32)\nim2 = im2 - np.mean(im2)\n\n# Run Inference\nt = time.time()\noutput = sess.run(probs, feed_dict={x: [im1, im2]})\n\n# Print Output\nfor input_im_ind in range(output.shape[0]):\n inds = np.argsort(output)[input_im_ind, :]\n print(\"Image\", input_im_ind)\n for i in range(5):\n print(\"%s: %.3f\" % (sign_names.ix[inds[-1 - i]][1], output[input_im_ind, inds[-1 - i]]))\n print()\n\nprint(\"Time: %.3f seconds\" % (time.time() - t))\n","sub_path":"feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"257944996","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 30 23:51:06 2019\n\n\n\"\"\"\nfrom keras.applications import VGG16\n\nconv_base = VGG16(weights='imagenet',\n include_top = False,\n input_shape = (150,150,3))\n\nconv_base.summary()\n\nimport os\nimport numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator\n\nbase_dir = r'C:\\Users\\VAMS_2\\Dropbox\\ML\\Deep_Learning_With_Python\\Dogs_vs_cats\\Dogs_vs_cats_small'\ntrain_dir = os.path.join(base_dir, 'train')\nvalidation_dir = os.path.join(base_dir, 'validation_dir')\ntest_dir = os.path.join(base_dir, 'test')\n\ndatagen = ImageDataGenerator(rescale = 1./255)\nbatch_size = 20\n\ndef extract_features(directory, sample_count):\n features = np.zeros(shape = (sample_count, 4, 4, 512))\n labels = np.zeros(shape = (sample_count))\n generator = datagen.flow_from_directory(\n directory,\n target_size = (150, 150),\n batch_size = batch_size,\n class_mode = 'binary')\n i = 0\n for inputs_batch, labels_batch in generator:\n features_batch = conv_base.predict(inputs_batch)\n features[i*batch_size:(i+1)*batch_size] = features_batch\n labels[i*batch_size:(i+1)*batch_size] = labels_batch\n i += 1\n if i*batch_size >= sample_count:\n break\n return features, labels\n\ntrain_features, train_labels = extract_features(train_dir, 2000)\nvalidation_features, validation_labels = extract_features(validation_dir, 1000)\ntest_features, test_labels = extract_features(test_dir, 1000)\n\ntrain_features = train_features.reshape((2000,4*4*512))\nvalidation_features = validation_features.reshape((1000,4*4*512))\ntest_features = test_features.reshape((1000,4*4*512))\n\nfrom keras import models\nfrom keras import layers\nfrom keras import optimizers\n\nmodel = models.Sequential()\nmodel.add(layers.Dense(256, activation = 'relu', input_dim = 4*4*512))\nmodel.add(layers.Dropout(0.5))\nmodel.add(layers.Dense(1, activation = 'sigmoid'))\n\nmodel.compile(optimizer=optimizers.RMSprop(lr=2e-5),\n loss = 'binary_crossentropy',\n metrics=['acc'])\n\nhistory = model.fit(train_features, train_labels,\n epochs = 30,\n batch_size = 20,\n validation_data = (validation_features, validation_labels))\n\n\n\n\n\n\n\n\n\n\n","sub_path":"5_3_Pretrained_Net.py","file_name":"5_3_Pretrained_Net.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"543213189","text":"# -*- encoding:utf8 -*-\n# coding:utf8\n\nimport os\nimport cx_Oracle\nimport pandas as pd\nimport numpy as np\nimport datetime\nfrom dateutil.relativedelta import relativedelta\n\n[userName, password, hostIP, dbName, tablePrefix] = ['reader', 'reader', '172.16.50.232:1521', 'dfcf', 'wind']\nfund_db = cx_Oracle.connect(user=userName, password=password, dsn=hostIP + '/' + dbName)\n\n# 投研平台库\n[userNamepif, passwordpif, hostIPpif, dbNamepif] = ['pra_info', 'pra_info', '172.16.126.23:1521', 'pra']\n# [userNamepif, passwordpif, hostIPpif, dbNamepif] = ['pif', 'pif', '172.16.125.151', 'pif']\ntry:\n fund_dbpra = cx_Oracle.connect(user=userNamepif, password=passwordpif, dsn=hostIPpif + '/' + dbNamepif)\n cu_pra = fund_dbpra.cursor()\nexcept cx_Oracle.DatabaseError as e:\n print('数据库链接失败')\n\npath = os.path.dirname(os.path.realpath(__file__)) + '/'\n\n\n\n\ndef sum_manager_percent(date=None):\n \"\"\"\n\n :param date: 季报日期\n :return: 基金经理团队总人数的百分位排名\n \"\"\"\n\n # 不输入date 默认为当前日期\n if not date:\n date = datetime.datetime.today()\n date = date.strftime('%Y%m%d')\n else:\n date = str(date)\n\n print(date)\n\n\n # 取出基金公司基金经理总人数标签\n # 无需设置参数,输出结果分别为基金公司、基金公司成立日、基金公司基金经理总人数\n sql = '''\n SELECT\n e.OB_OBJECT_NAME_1018,e.F35_1018,COUNT(DISTINCT e.F2_1272)\n FROM\n (SELECT\n c.OB_OBJECT_NAME_1018,c.F35_1018,d.F2_1272,MAX(d.F4_1272)\n FROM\n (SELECT\n a.F1_1099,b.OB_OBJECT_NAME_1018,b.F35_1018,a.F12_1099\n FROM\n (SELECT\n F12_1099,F1_1099\n FROM wind.TB_OBJECT_1099) a\n JOIN\n (SELECT\n F34_1018,OB_OBJECT_NAME_1018,F35_1018\n FROM wind.TB_OBJECT_1018\n ORDER BY F35_1018 )b\n ON a.F12_1099 = b.F34_1018)c\n JOIN\n (SELECT\n F1_1272,F2_1272,F6_1272,F3_1272,F4_1272\n FROM wind.TB_OBJECT_1272\n WHERE F3_1272 IS NOT NULL AND F4_1272 IS NULL)d\n ON c.F1_1099 = d.F1_1272\n GROUP BY c.OB_OBJECT_NAME_1018,c.F35_1018,d.F2_1272)e\n GROUP BY e.OB_OBJECT_NAME_1018,e.F35_1018\n \n '''\n print(sql)\n\n cu = fund_db.cursor()\n sum_manager = pd.DataFrame(cu.execute(sql).fetchall(), columns=['company', 'start_up_date', 'sum_manager'])\n # 排序\n sum_manager.sort_values(\"sum_manager\", axis=0, ascending=False, inplace=True)\n # 计算百分位\n # sum_manager[\"percentile\"] = np.percentile(range(sum_manager.shape[0]), list(range(sum_manager.shape[0])).reverse())\n\n print(sum_manager)\n\n return sum_manager\n\n\ndef mean_manager_date_end_percent(date=None):\n \"\"\"\n\n :param date: 季报日期\n :return: 基金经理平均管理年限的百分位排名\n \"\"\"\n # 取数时无需设置参数\n # 筛选出基金公司名、公司成立日、基金经理名字、该基金经理在此基金公司管理第一支基金的开始时间\n sql = '''\n SELECT\n b.OB_OBJECT_NAME_1018,b.F35_1018,c.F2_1272,MIN(c.F3_1272)\n FROM\n (SELECT\n F12_1099,F1_1099\n FROM wind.TB_OBJECT_1099) a\n JOIN\n (SELECT\n F34_1018,OB_OBJECT_NAME_1018,F35_1018\n FROM wind.TB_OBJECT_1018\n ORDER BY F35_1018 )b\n ON a.F12_1099 = b.F34_1018\n JOIN\n (SELECT\n F1_1272,F2_1272,F3_1272\n FROM wind.TB_OBJECT_1272\n WHERE F3_1272 IS NOT NULL)c\n ON a.F1_1099 = c.F1_1272\n GROUP BY b.OB_OBJECT_NAME_1018,b.F35_1018,c.F2_1272\n\n '''\n\n cu = fund_db.cursor()\n manage_date_start = pd.DataFrame(cu.execute(sql).fetchall(),\n columns=['company', 'start_up_date', 'manager', 'start_date'])\n\n # 取数时无需设置参数\n # 筛选出基金公司名、公司成立日、基金经理名字、该基金经理目前在此基金公司管理最后一支基金的截止时间(none表示截止目前基金仍在存��)\n sql = '''\n SELECT\n b.OB_OBJECT_NAME_1018,b.F35_1018,c.F2_1272,MAX(c.F4_1272)\n FROM\n (SELECT\n F12_1099,F1_1099\n FROM wind.TB_OBJECT_1099) a\n JOIN\n (SELECT\n F34_1018,OB_OBJECT_NAME_1018,F35_1018\n FROM wind.TB_OBJECT_1018\n ORDER BY F35_1018 )b\n ON a.F12_1099 = b.F34_1018\n JOIN\n (SELECT\n F1_1272,F2_1272,F3_1272,F4_1272\n FROM wind.TB_OBJECT_1272\n WHERE F3_1272 IS NOT NULL)c\n ON a.F1_1099 = c.F1_1272\n GROUP BY b.OB_OBJECT_NAME_1018,b.F35_1018,c.F2_1272\n\n '''\n\n cu = fund_db.cursor()\n manage_date_end = pd.DataFrame(cu.execute(sql).fetchall(),\n columns=['company', 'start_up_date', 'manager', 'end_date'])\n\n now = []\n ###today为更新标签日当日日期\n today = '20190514'\n for i in manage_date_end['end_date']:\n if i == None:\n i = today\n now.append(i)\n manage_date_end['end_date'] = pd.DataFrame(now)\n\n manage_date = pd.merge(manage_date_start, manage_date_end, on=['company', 'start_up_date', 'manager'])\n\n # 转化为时间格式并相减计算中间天数(即基金经理平均管理时长)\n start = pd.to_datetime(manage_date['start_date'])\n end = pd.to_datetime(manage_date['end_date'])\n distance = pd.DataFrame(end - start)\n distance.columns = ['distance']\n day = []\n for i in distance['distance']:\n i = i.days\n i = round(i / 365, 2)\n day.append(i)\n manage_date['distance'] = day\n manage_date.to_excel(path + r'基金公司基金经理管理年数标签.xlsx')\n res_manage_date = manage_date\n #################################################1、基金公司基金经理平均管理年限标签\n # 计算每个公司的基金经理平均管理年限(不管该基金经理是否现就职于该公司)\n mean_manage_years = manage_date.groupby(by=['company', 'start_up_date'])['distance'].mean()\n mean_manage_years = pd.DataFrame(mean_manage_years)\n mean_manage_years.columns = ['mean_manage_years']\n # 计算百分位排名\n mean_manage_years = mean_manage_years.sort_values(\"mean_manage_years\", axis=1, ascending=False, inplace=True)\n mean_manage_years[\"percentile\"] = np.percentile(mean_manage_years[\"mean_manage_years\"],\n mean_manage_years[\"mean_manage_years\"])\n return mean_manage_years\n\n\n# 只需设置lag = 3 或 1\ndef off_managers_per(lag=3, date=None):\n \"\"\"\n\n :param lag: 近几年 例:3/近三年\n :param date: 季报日期\n :return:\n \"\"\"\n # 不输入date 默认为当前日期\n if not date:\n date = datetime.datetime.today()\n date = date.strftime('%Y%m%d')\n else:\n date = str(date)\n\n print(date)\n\n #\n now = pd.to_datetime(date).date()\n\n # 转变为三年前日期\n date_years_before = (now - relativedelta(years=lag)).strftime('%Y%m%d')\n\n # 取出X年以内离职的基金经理人数数据\n sql = '''\n SELECT\n e.OB_OBJECT_NAME_1018,e.F35_1018,COUNT(DISTINCT e.F2_1272)\n FROM\n (SELECT\n c.OB_OBJECT_NAME_1018,c.F35_1018,d.F2_1272,MAX(d.F4_1272)\n FROM\n (SELECT\n a.F1_1099,b.OB_OBJECT_NAME_1018,b.F35_1018,a.F12_1099\n FROM\n (SELECT\n F12_1099,F1_1099\n FROM wind.TB_OBJECT_1099) a\n JOIN\n (SELECT \n F34_1018,OB_OBJECT_NAME_1018,F35_1018\n FROM wind.TB_OBJECT_1018\n ORDER BY F35_1018 )b\n ON a.F12_1099 = b.F34_1018)c\n JOIN\n (SELECT\n F1_1272,F2_1272,F6_1272,F3_1272,F4_1272\n FROM wind.TB_OBJECT_1272\n WHERE F3_1272 IS NOT NULL)d\n ON c.F1_1099 = d.F1_1272\n GROUP BY c.OB_OBJECT_NAME_1018,c.F35_1018,d.F2_1272\n HAVING MAX(d.F4_1272) >= '%(date_years_before)s' AND MIN(d.F4_1272) IS NOT NULL)e\n GROUP BY e.OB_OBJECT_NAME_1018,e.F35_1018\n\n ''' % {'date_years_before': date_years_before}\n\n cu = fund_db.cursor()\n off_managers = pd.DataFrame(cu.execute(sql).fetchall(), columns=['company', 'start_up_date', 'off_managers'])\n\n # 取出基金公司基金经理总人数标签\n ##无需设置参数,输出结果分别为基金公司、基金公司成立日、基金公司基金经理总人数\n sql = '''\n SELECT\n e.OB_OBJECT_NAME_1018,e.F35_1018,COUNT(DISTINCT e.F2_1272)\n FROM\n (SELECT\n c.OB_OBJECT_NAME_1018,c.F35_1018,d.F2_1272,MAX(d.F4_1272)\n FROM\n (SELECT\n a.F1_1099,b.OB_OBJECT_NAME_1018,b.F35_1018,a.F12_1099\n FROM\n (SELECT\n F12_1099,F1_1099\n FROM wind.TB_OBJECT_1099) a\n JOIN\n (SELECT \n F34_1018,OB_OBJECT_NAME_1018,F35_1018\n FROM wind.TB_OBJECT_1018\n ORDER BY F35_1018 )b\n ON a.F12_1099 = b.F34_1018)c\n JOIN\n (SELECT\n F1_1272,F2_1272,F6_1272,F3_1272,F4_1272\n FROM wind.TB_OBJECT_1272\n WHERE F3_1272 IS NOT NULL)d\n ON c.F1_1099 = d.F1_1272\n GROUP BY c.OB_OBJECT_NAME_1018,c.F35_1018,d.F2_1272\n HAVING (MAX(d.F4_1272) >= '%(date_years_before)s' OR MIN(d.F4_1272) IS NULL) AND MIN(d.F3_1272) < '%(date_years_before)s' )e\n GROUP BY e.OB_OBJECT_NAME_1018,e.F35_1018\n\n ''' % {'date_years_before': date_years_before}\n\n cu = fund_db.cursor()\n sum_manager = pd.DataFrame(cu.execute(sql).fetchall(), columns=['company', 'start_up_date', 'sum_manager'])\n\n off_managers = pd.merge(off_managers, sum_manager, on=['company', 'start_up_date'])\n off_managers['percent'] = off_managers['off_managers'] / sum_manager['sum_manager']\n\n # 计算基金经理近三年离职的百分位排名\n off_managers.sort_values(\"percent\", axis=0, ascending=False, inplace=True)\n print(off_managers)\n return off_managers\n\n\nif __name__ == \"__main__\":\n\n off_managers_per(date=\"20180101\")\n\n","sub_path":"other/cj_project/run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":10468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"186280647","text":"#!/home/wli/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nTitle: Extract_Normal_and_Tumor_Patches_From_Tumor_Slides\n=========================================================\nCreated: 10-31-2019\nPython-Version: 3.5, 3.6\n\nDescription:\n------------\nThis module is used to extract normal and tumor image patches\nfrom tumor WSI images.\n\nInputs:\n*******\n\n slide_path_tumor = '/raida/wjc/CAMELYON16/training/tumor'\n mask_dir = '/raidb/wli/Final_Results/Display/train_masking'\n anno_dir = '/raida/wjc/CAMELYON16/training/lesion_annotations'\n\nOutput:\n*******\n destination_folder_normal = '/raidb/wli/tumor_slide_normal_256_test'\n destination_folder_tumor = '/raidb/wli/tumor_slide_tumor_256_test'\n destination_folder_tumor_mask = '/raidb/wli/tumor_slide_tumor_256_mask_test'\n\nRequest:\n--------\nThis module requests the library module: patch_extractor\n\nNote:\n-----\nMask image patches will be saved for quality control only. These patches\nwill not be used at all for GoogleNet training.\n\nMask image patches are generated as binary images. But when some pixels\nare \"0\"s, and some pixels are \"1\"s, the \"1\"s will be saved as \"255\"s.\n\nSome tumor slides can't have 1000 tumor slides to be extracted because\nthe tumor region is too small. The program needs to stop at these WSIs\nand start mannually from next WSI.\n\n\"\"\"\nimport os.path as osp\nimport openslide\n# scipy.misc.imsave is deprecated! imsave is deprecated in SciPy 1.0.0,\n# and will be removed in 1.2.0. Use imageio.imwrite instead.\n# from scipy.misc import imsave as saveim\n# before importing HDFStore, make sure 'tables' is installed by pip3 install tables\nimport Patch_Extractor as PE\n\n\nif __name__ == \"__main__\":\n\n slide_path_tumor = '/raida/wjc/CAMELYON16/training/tumor'\n mask_dir = '/raidb/wli/Final_Results/Display/train_masking'\n anno_dir = '/raida/wjc/CAMELYON16/training/lesion_annotations'\n\n # The following destination_folders needs to be changed to the\n # the directories for the results\n destination_folder_normal = '/raidb/wli/testing_1219/normal_patches_from_tumor_slides'\n destination_folder_tumor = '/raidb/wli/testing_1219/tumor_patches_from_tumor_slide'\n destination_folder_tumor_mask = '/raidb/wli/testing_1219/tumor_patches_from_tumor_slides_mask'\n\n tumor_slide_paths = PE.slides_for_patch_extraction(slide_path_tumor, 'tif')\n # print(tumor_slide_paths)\n crop_size = [256, 256]\n slide_path_for_extraction = tumor_slide_paths\n # while loop is used here because 2-3 slides have very samll tumor region. It is\n # impossible to extract 1000 patches. The loop need to stop mannually and move\n # to next slide.\n i = 0\n while i < len(slide_path_for_extraction):\n\n single_slide_for_patch_extraction = slide_path_for_extraction[i]\n\n des_folder_normal_patches = PE.create_folder(\n single_slide_for_patch_extraction, destination_folder_normal)\n des_folder_tumor_patches = PE.create_folder(\n single_slide_for_patch_extraction, destination_folder_tumor)\n des_folder_tumor_patches_mask = PE.create_folder(\n single_slide_for_patch_extraction, destination_folder_tumor_mask)\n # sampletotal = pd.DataFrame([])single_slide_for_patch_extraction\n slide = openslide.open_slide(single_slide_for_patch_extraction)\n thresh = PE.tissue_patch_threshold(slide)\n bbox_tumor_region = PE.bbox_generation_tumor(\n single_slide_for_patch_extraction, anno_dir)\n\n bbox_tissue = PE.bbox_generation_tissue(slide)\n print(bbox_tissue)\n mask_path = osp.join(mask_dir, osp.basename(\n single_slide_for_patch_extraction).replace('.tif', '_mask.tif'))\n ground_truth = openslide.open_slide(str(mask_path))\n # normal patches will be extracted first\n PE.extract_normal_patches_from_tumor_slide(\n slide, ground_truth, crop_size, thresh, bbox_tissue, des_folder_normal_patches, single_slide_for_patch_extraction)\n PE.extract_tumor_patches_from_tumor_slide(slide, ground_truth, crop_size, thresh, bbox_tumor_region,\n des_folder_tumor_patches, des_folder_tumor_patches_mask, single_slide_for_patch_extraction)\n i += 1\n","sub_path":"dldp/patch_extract/Extract_Normal_and_Tumor_Patches_From_Tumor_Slides.py","file_name":"Extract_Normal_and_Tumor_Patches_From_Tumor_Slides.py","file_ext":"py","file_size_in_byte":4222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"377298884","text":"import os\nfrom glob import glob\nfrom train_data.utils.model_pred import SEIRD\n# from train_data.utils.json_data import json_data\nimport traceback\nimport datetime\nimport pandas as pd\n\n\ndef get_days(start_date, end_date):\n sy, sm, sd = start_date.split('-')\n ey, em, ed = end_date.split('-')\n today = datetime.date(int(sy), int(sm), int(sd))\n someday = datetime.date(int(ey), int(em), int(ed))\n diff = someday - today\n days = diff.days\n return days\n\n\ndef model_predict(end_date, file_path, N, place):\n end_date = end_date\n df = pd.read_csv(file_path, header=0)\n print(end_date)\n if len(df) > 0:\n start_date = df['Date'].iloc[0]\n start_date = start_date.replace(\" \", '')\n days = get_days(start_date, end_date)\n print(days)\n if days > 18:\n print(\"raakhal\")\n model = SEIRD(file_path, N, place, str(end_date))\n output = model.final_run()\n return output\n else:\n output = {\"message\": \"end_date referenced before the start data of cases in this region, make sure there \"\n \"is minimum 18 days from start_date \" + str(start_date) + \"for training, provide a \"\n \"later end_date \"}\n return output\n\n else:\n output = {\"message\": \"Data not found for this region\"}\n return output\n\n\ndef resultant_data_india(folder_path, place, end_date, json_data):\n try:\n if place.lower() == \"india\":\n N = 1314000000\n file_path = folder_path + \"Total.csv\"\n output = model_predict(end_date, file_path, N, place.lower())\n return output\n # folder_path = folder_path + \"India/\"\n for folder in os.listdir(folder_path):\n full_path = os.path.join(folder_path, folder)\n for files in glob(full_path + '/*.csv'):\n # file_name = os.path.splitext(os.path.basename(files))[0]\n base = os.path.basename(files)\n district = base[:-4]\n parent = os.path.dirname(files)\n state = os.path.basename(parent)\n place = place.lower()\n state1 = state.replace(\"-\", \" \")\n state2 = state.lower()\n if district == \"total\" and place == state2:\n print(state2, \"----entered_state\")\n N = json_data[state1][\"totalPopulation\"]\n path = str(files)\n # model = SEIRD(path, N, str(state), str(end_date))\n # output = model.final_run()\n output = model_predict(end_date, path, N, str(state))\n return output\n if district.lower() == place:\n print(place, \"----entered_district\")\n state1 = state1\n field_data = json_data[state1]\n for i in range(len(field_data[\"districts\"])):\n dict_place = json_data[state1][\"districts\"][i][\"districtName\"]\n dict_place1 = dict_place.lower()\n if place == dict_place1:\n path = str(files)\n # print(files, state1, dict_place, dict_place1)\n N = json_data[state1][\"districts\"][i][\"population\"]\n # model = SEIRD(path, N, str(state), str(end_date))\n # output = model.final_run()\n output = model_predict(end_date, path, N, str(state))\n return output\n except Exception as e:\n traceback.format_exc()\n output = {\"message\": \"Exception occurred in resultant_data.py \" + str(e)}\n return output\n\n\ndef resultant_data_us(folder_path, place, end_date, json_data):\n try:\n if place.lower() == \"us\":\n N = 329340000\n file_path = folder_path + \"Total.csv\"\n output = model_predict(end_date, file_path, N, place.lower())\n return output\n for files in glob(folder_path + '/*.csv'):\n base = os.path.basename(files)\n district = base[:-4]\n place = place.lower()\n district1 = district.replace(\"-\", \" \")\n district2 = district.lower()\n # print(district2,district,district1, place)\n if district2 == place:\n print(district2, files, place)\n for i in range(len(json_data)):\n state = json_data[i]['State']\n print(state, district1)\n if state == district1:\n print(state, district, place)\n path = str(files)\n # state1 = state.lower()\n # state2 = state.replace(\" \", \"-\")\n N = json_data[i]['Pop']\n print(N)\n output = model_predict(end_date, path, N, str(state))\n return output\n except Exception as e:\n traceback.format_exc()\n output = {\"message\": \"Exception occurred in resultant_data_us \" + str(e)}\n return output\n\n\ndef resultant_data_russia(folder_path, place, end_date, json_data):\n try:\n if place.lower() == \"russia\":\n N = 176748590\n file_path = folder_path + \"Total.csv\"\n output = model_predict(end_date, file_path, N, place.lower())\n return output\n # N = 329340000\n # file_path = folder_path + \"Total.csv\"\n # output = model_predict(end_date, file_path, N, place.lower())\n # return output\n for files in glob(folder_path + '/*.csv'):\n base = os.path.basename(files)\n district = base[:-4]\n place = place.lower()\n district1 = district.replace(\"-\", \" \")\n district2 = district.lower()\n district3 = district1.lower()\n if district2 == place:\n print(district2, place, files)\n for i in range(len(json_data)):\n state = json_data[i]['Region_eng']\n state = state.lower()\n print(state,district3)\n if state == district3:\n N = json_data[i]['Population']\n N = int(N)\n path = str(files)\n print(end_date, path, N, str(state))\n output = model_predict(end_date, path, N, str(state))\n return output\n except Exception as e:\n traceback.format_exc()\n output = {\"message\": \"Exception occurred in resultant_data_russia \" + str(e)}\n return output\n","sub_path":"Forecast Tool/train_data/utils/resultant_data.py","file_name":"resultant_data.py","file_ext":"py","file_size_in_byte":6826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"305517370","text":"mydict = { # Создать словарь\n \"city\": \"Москва\",\n \"temperature\": \"20\"\n}\nprint(mydict[\"city\"]) # Вывести значение city\n# Уменьшить значение temperature на 5\nnew_temperatur = mydict[\"temperature\"] # Для хранения промежуточного значения потребовалась переменная\nnew_temperatur = int(new_temperatur) - 5 # Вычисление новой темпаратуры с преобразованием к ЧИСЛУ \nmydict[\"temperature\"] = str(new_temperatur) # Присваивание новой температуры с преобразование к СТРОКЕ\nprint(mydict) # Вывод нового словоря\nprint(mydict.get(\"country\")) # Проверка есть ли в словаре ключ\nprint(mydict.get(\"country\", \"Россия\")) # Вывести значение по умолчанию\nmydict[\"date\"] = '27.05.2017' # Добавить дату \nprint(len(mydict)) # Вывести длину\n","sub_path":"dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"359949317","text":"from flask import Flask\n# 导入扩展\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n# 指定连接的数据库,数据库必须要手动创建\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:mysql@localhost/python20'\n# 动态追踪修改的配置,如果设置为true会消耗一定内存\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n\"\"\"\n目标:学习sqlalchemy实现数据的增删改查\n实现:一对多的关系,\n角色Role(管理员和普通用户):一方;\n用户User(多个���户数据):多方;\n\n流程:\n1、导入sqlalchemy类\n2、配置数据库的连接\n3、实例化sqlalchemy对象;\n4、定义模型类\n\n\"\"\"\n# 创建sqlalchemy对象\ndb = SQLAlchemy(app)\n\n# 定义模型:Role角色,一方\nclass Role(db.Model):\n # 如果不定义,默认创建同类名的表名role\n # 建议手动指定表名:1、根据公司名、项目简写;it_roles;2、tb_roles;\n # 表名一把都使用复数;\n __tablename__ = 'roles'\n # 定义主键,如果不指定主键,表无法创建\n id = db.Column(db.Integer,primary_key=True)\n # 定义角色名称,不能有重复值\n name = db.Column(db.String(32),unique=True)\n # 在一对多的关系映射中,一方定义关系,多方定义外键\n # 第一个参数:为另外一方的类名,第二个参数表示反向引用(相当于给User模型类添加一个属性)\n # relationship定义的关系引用,在数据库中没有实体。\n us = db.relationship('User',backref='role')\n # 作用:us可以实现一对多的查询,role可以实现多对一的查询。\n\n # 定义方法,实现查询结果显示为可读字符串。\n def __repr__(self):\n return 'role:%s' % self.name\n\n# 定义模型:User用户,多方\nclass User(db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer,primary_key=True)\n name = db.Column(db.String(32))\n email = db.Column(db.String(32))\n pswd = db.Column(db.String(32))\n role_id = db.Column(db.Integer,db.ForeignKey('roles.id'))\n\n def __repr__(self):\n return 'role:%s' % self.name\n\n@app.route('/')\ndef index():\n return 'hello world'\n\n\"\"\"\n查询语句:\n1、过滤查询使用查询执行器\n>>> User.query.filter()\n\n2、filter查询不加入参数,默认查询所有数据,返回列表。\n>>> User.query.filter().all()\n[role:wang, role:zhang, role:chen, role:zhou]\n3、返回数据列表中的第一个\n>>> User.query.filter().first()\nrole:wang\n4、filter查询和filter_by查询区别:\nfilter查询必须使用模型类的类名,可以使用更丰富的运算符。\n>>> User.query.filter(User.id==2).all()\n[role:zhang]\nfilter_by查询只能使用字段名,且使用等值操作。相当于过滤查询中的精确查询。\n>>> User.query.filter_by(id=2).all()\n[role:zhang]\n5、filter查询使用多个条件\n>>> User.query.filter(User.name!='wang',User.email.endswith('163.com')).all()\n[role:zhou]\n\n6、逻辑运算符or_,需要导入才能使用,from sqlalchemy import or_\n>>> User.query.filter(or_(User.name!='wang',User.email.endswith('163.com'))).all()\n[role:wang, role:zhang, role:chen, role:zhou]\n\n7、查询排序,desc表示倒序排序,asc表示升序排序\n>>> User.query.filter().order_by(User.id.desc()).all()\n[role:zhou, role:chen, role:zhang, role:wang]\n8、查询分页:paginate分页第一个参数表示页数,第二个参数表示每页条目数,第三个参数表示分页异常不报错\npaginate返回的查询结果为paginate分页对象,\n>>> User.query.filter().paginate(1,2,False)\n\nitems表示分页后的数据,page表示页数,pages表示总页数\n>>> paginate = User.query.filter().paginate(1,2,False)\n>>> paginate.items\n[role:wang, role:zhang]\n>>> paginate.page\n1\n>>> paginate.pages\n2\n9、修改数据,需要手动提交\n>>> User.query.filter(User.name=='zhang').update({'name':'li'})\n1\n>>> User.query.filter().all()\n[role:wang, role:li, role:chen, role:zhou]\n>>> db.session.commit()\n\n\n\"\"\"\n\n\n\n\nif __name__ == '__main__':\n # 删除表\n db.drop_all()\n # 创建表\n db.create_all()\n # 模拟添加数据\n ro1 = Role(name='admin')\n ro2 = Role(name='user')\n # 添加数据给数据库会话对象。add_all添加多条数据对象,add添加一条\n db.session.add_all([ro1, ro2])\n # 提交数据到数据库\n db.session.commit()\n us1 = User(name='wang', email='wang@163.com', pswd='123456', role_id=ro1.id)\n us2 = User(name='zhang', email='zhang@189.com', pswd='201512', role_id=ro2.id)\n us3 = User(name='chen', email='chen@126.com', pswd='987654', role_id=ro2.id)\n us4 = User(name='zhou', email='zhou@163.com', pswd='456789', role_id=ro1.id)\n db.session.add_all([us1, us2, us3, us4])\n db.session.commit()\n\n\n app.run(debug=True)","sub_path":"Flask_Day04/demo1_sqlalchemy.py","file_name":"demo1_sqlalchemy.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"619043544","text":"class IntegerDemo :\n value = int(input(\"value: \"))\n p = int(input(\"p: \"))\n\n @classmethod\n def set_value(cls):\n cls.value = value\n cls.p = p\n\n def add(cls):\n return cls.value + cls.p\n\ni = IntegerDemo()\nprint(i.add())\n","sub_path":"python/exercise0902.py","file_name":"exercise0902.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"545680755","text":"import matplotlib.pyplot as plot\nimport numpy as np\nimport sys\nfrom scipy import signal\n\nfile ='test'\nm=0.3\ntype='ask'\nfreq=10\nfreqs=2\n\nif (len(sys.argv)>1):\n file=str(sys.argv[1])\n\nif (len(sys.argv)>2):\n type=(sys.argv[2])\n\nif (len(sys.argv)>3):\n freq=int(sys.argv[3])\n\n\nFs = 150.0; # sampling rate\nTs = 1.0/Fs; # sampling interval\n\nt = np.arange(0,2,Ts)\n\nif (type=='fsk'):\n\tbit_arr = np.array([-5,-5,5,5,-5,5,5,-5,-5,5]) \n\tsamples_per_bit = 2*Fs/bit_arr.size \n\tdd = np.repeat(bit_arr, samples_per_bit)\n\ty= np.sin(2 * np.pi * (freq + dd) * t)\nelif (type=='psk'):\n\tbit_arr = np.array([0,0,90,90,0,90,90,0,0,90])\n\tsamples_per_bit = 2*Fs/bit_arr.size \n\tdd = np.repeat(bit_arr, samples_per_bit)\n\ty= np.sin(2 * np.pi * (freq) * t+(np.pi*dd/180))\nelse:\n\t#bit_arr = np.array([1, 0, 1, 1, 0])\n\t#bit_arr = np.array([1,0,1,0,1,0,1,0,1,0])\n\t# precisa colocar 10 valores na array\n\tbit_arr = np.array([1,0,1,0,1,0,0,0,0,0])\n\tsamples_per_bit = 2*Fs/bit_arr.size \n\tdd = np.repeat(bit_arr, samples_per_bit)\n\ty= dd*np.sin(2 * np.pi * freq * t)\n\nport = np.sin(2 * np.pi * freq * t);\n\nn = len(y) # length of the signal\nk = np.arange(n)\nT = n/Fs\nfrq = k/T # two sides frequency range\nfrq = frq[range(int(n/2))] # one side frequency range\nY = np.fft.fft(y)/n # fft computing and normalization\nY = Y[range(int(n/2))]\n\nfig,myplot = plot.subplots(2, 1)\nmyplot[0].plot(t,y,'k')\nmyplot[0].set_xlabel('t')\nmyplot[0].set_ylabel('Amplitude')\n\n#plot.gca().axis('off')\n\nmyplot[1].plot(frq,abs(Y),'r') # plotting the spectrum\nmyplot[1].set_xlabel('Freq (Hz)')\nmyplot[1].set_ylabel('|Y(freq)|')\n\n##myplot[2].plot(t,port,'k') # plotting the spectrum\n##myplot[2].set_xlabel('t')\n##myplot[2].set_ylabel('Amplitude')\n\n#plot.gca().axis('off')\n#plot.savefig(file)\n#plot.show()\n","sub_path":"Lista_M2/ASK.py","file_name":"ASK.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"105369036","text":"import tensorflow as tf\r\nimport prettytensor as pt\r\nfrom prettytensor.tutorial import data_utils\r\n\r\ntf.app.flags.DEFINE_string('save_path', None, 'Where to save the model checkpoints.')\r\nFLAGS = tf.app.flags.FLAGS\r\n\r\nBATCH_SIZE = 50\r\nEPOCH_SIZE = 60000 // BATCH_SIZE\r\nTEST_SIZE = 10000 // BATCH_SIZE\r\n\r\nimage_placeholder = tf.placeholder(tf.float32, [BATCH_SIZE, 28, 28, 1])\r\nlabels_placeholder = tf.placeholder(tf.float32, [BATCH_SIZE, 10])\r\n\r\ntf.app.flags.DEFINE_string('model', 'full','Choose one of the models, either full or conv')\r\nFLAGS = tf.app.flags.FLAGS\r\ndef multilayer_fully_connected(images, labels):\r\n images = pt.wrap(images)\r\n with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):\r\n \treturn (images.flatten().fully_connected(100).fully_connected(100).softmax_classifier(10, labels))\r\n\r\n\r\ndef lenet5(images, labels):\r\n images = pt.wrap(images)\r\n with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):\r\n \treturn (images.conv2d(5, 20).max_pool(2, 2).conv2d(5, 50).max_pool(2, 2).flatten().fully_connected(500).softmax_classifier(10, labels))\r\n\r\n\r\ndef main(_=None):\r\n image_placeholder = tf.placeholder(tf.float32, [BATCH_SIZE, 28, 28, 1])\r\n labels_placeholder = tf.placeholder(tf.float32, [BATCH_SIZE, 10])\r\n\r\nif FLAGS.model == 'full':\r\n result = multilayer_fully_connected(image_placeholder, labels_placeholder)\r\nelif FLAGS.model == 'conv':\r\n \tresult = lenet5(image_placeholder, labels_placeholder)\r\nelse:\r\n raise ValueError\\\r\n ('model must be full or conv: %s' % FLAGS.model)\r\n\r\naccuracy = result.softmax.evaluate_classifier(labels_placeholder,phase=pt.Phase.test)\r\n\r\ntrain_images, train_labels = data_utils.mnist(training=True)\r\ntest_images, test_labels = data_utils.mnist(training=False)\r\noptimizer = tf.train.GradientDescentOptimizer(0.01)\r\ntrain_op = pt.apply_optimizer(optimizer,losses=[result.loss])\r\nrunner = pt.train.Runner(save_path=FLAGS.save_path)\r\n\r\n\r\nwith tf.Session():\r\n for epoch in range(10):\r\n train_images, train_labels = \\\r\n data_utils.permute_data\\\r\n ((train_images, train_labels))\r\n\r\n runner.train_model(train_op,result.\\\r\n loss,EPOCH_SIZE,\\\r\n feed_vars=(image_placeholder,\\\r\n labels_placeholder),\\\r\n feed_data=pt.train.\\\r\n feed_numpy(BATCH_SIZE,\\\r\n train_images,\\\r\n train_labels),\\\r\n print_every=100)\r\n classification_accuracy = runner.evaluate_model\\\r\n (accuracy,\\\r\n TEST_SIZE,\\\r\n feed_vars=(image_placeholder,\\\r\n labels_placeholder),\\\r\n feed_data=pt.train.\\\r\n feed_numpy(BATCH_SIZE,\\\r\n test_images,\\\r\n test_labels))\r\n\r\n print('epoch' , epoch + 1)\r\n print('accuracy', classification_accuracy )\r\n \r\nif __name__ == '__main__':\r\n tf.app.run()\r\n","sub_path":"Chapter08/Python 3.5/pretty_tensor_digit_1.py","file_name":"pretty_tensor_digit_1.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"626682436","text":"# Copyright 2019 Regents of the University of Minnesota.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nfrom abc import ABC, abstractmethod\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom datetime import timedelta\nfrom typing import Optional, Dict, Any, Tuple\n\nimport grpc\nimport math\n\nfrom mtap import _discovery, _structs\nfrom mtap.events import EventsClient, Event\nfrom mtap.api.v1 import processing_pb2_grpc, processing_pb2\nfrom mtap.processing.base import EventProcessor, TimerStats, Processor\n\nlogger = logging.getLogger(__name__)\n\n\nclass ProcessingComponent(ABC):\n component_id = None # str: The component_id of the component in a pipeline\n descriptor = None # ComponentDescriptor: The ComponentDescriptor used to create the component.\n\n @abstractmethod\n def call_process(self, event_id: str,\n params: Optional[Dict[str, Any]]) -> Tuple[Dict, Dict, Dict]:\n \"\"\"Calls a processor.\n\n Parameters\n ----------\n event_id: str\n The event to process.\n params: Dict\n The processor parameters.\n\n Returns\n -------\n tuple of dict, dict, dict\n A tuple of the processing result dictionary, the processor times dictionary, and the\n created indices dictionary.\n\n \"\"\"\n ...\n\n def close(self):\n ...\n\n\nclass ProcessorRunner(ProcessingComponent):\n def __init__(self, proc: EventProcessor, client: EventsClient, identifier: Optional[str] = None,\n params: Optional[Dict[str, Any]] = None):\n self.processor = proc\n self.client = client\n self.component_id = identifier\n self.processed = 0\n self.failure_count = 0\n self.params = params or {}\n\n def call_process(self, event_id, params):\n self.processed += 1\n p = dict(self.params)\n if params is not None:\n p.update(params)\n with Processor.enter_context() as c, \\\n Event(event_id=event_id, client=self.client) as event:\n try:\n with Processor.started_stopwatch('process_method') as stopwatch:\n stopwatch.start()\n result = self.processor.process(event, p)\n return result, c.times, event.created_indices\n except Exception as e:\n self.failure_count += 1\n logger.error('Processor \"%s\" failed while processing event with id: %s',\n self.component_id, event_id)\n logger.error(e)\n raise e\n\n def close(self):\n self.processor.close()\n\n\nclass RemoteRunner(ProcessingComponent):\n def __init__(self, config, processor_id, component_id, address=None, params=None):\n self._processor_id = processor_id\n self.component_id = component_id\n self._address = address\n self._params = params\n self.processed = 0\n self.failure_count = 0\n self.params = params\n address = self._address\n if address is None:\n discovery = _discovery.Discovery(config)\n address = discovery.discover_processor_service(processor_id, 'v1')\n self._channel = grpc.insecure_channel(address)\n self._stub = processing_pb2_grpc.ProcessorStub(self._channel)\n\n def call_process(self, event_id, params):\n self.processed += 1\n p = dict(self.params or {})\n if params is not None:\n p.update(params)\n\n with EventProcessor.enter_context() as context:\n try:\n request = processing_pb2.ProcessRequest(processor_id=self._processor_id,\n event_id=event_id)\n _structs.copy_dict_to_struct(p, request.params, [p])\n with Processor.started_stopwatch('remote_call'):\n response = self._stub.Process(request)\n r = {}\n _structs.copy_struct_to_dict(response.result, r)\n\n timing_info = response.timing_info\n for k, v in timing_info.items():\n context.add_time(k, v.ToTimedelta())\n\n created_indices = {}\n for created_index in response.created_indices:\n try:\n doc_created_indices = created_indices[created_index.document_name]\n except KeyError:\n doc_created_indices = []\n created_indices[created_index.document_name] = doc_created_indices\n doc_created_indices.append(created_index.index_name)\n\n return r, context.times, created_indices\n except Exception as e:\n self.failure_count += 1\n logger.error('Processor \"%s\" failed while processing event with id: %s',\n self.component_id, event_id)\n logger.error(e)\n raise e\n\n def close(self):\n self._channel.close()\n\n\nclass TimerStatsAggregator:\n def __init__(self):\n self._count = 0\n self._min = timedelta.max\n self._max = timedelta.min\n self._mean = 0.0\n self._sse = 0.0\n self._sum = timedelta(seconds=0)\n\n def add_time(self, time):\n if time < self._min:\n self._min = time\n if time > self._max:\n self._max = time\n\n self._count += 1\n self._sum += time\n time = time.total_seconds()\n delta = time - self._mean\n self._mean += delta / self._count\n delta2 = time - self._mean\n self._sse += delta * delta2\n\n def finalize(self):\n mean = timedelta(seconds=self._mean)\n variance = self._sse / self._count\n std = math.sqrt(variance)\n std = timedelta(seconds=std)\n return TimerStats(mean=mean, std=std, max=self._max, min=self._min, sum=self._sum)\n\n\nclass ProcessingTimesCollector:\n def __init__(self):\n self._executor = ThreadPoolExecutor(max_workers=1)\n self._times_map = {}\n\n def _add_times(self, times):\n for k, v in times.items():\n try:\n agg = self._times_map[k]\n except KeyError:\n agg = TimerStatsAggregator()\n self._times_map[k] = agg\n agg.add_time(v)\n\n def add_times(self, times):\n self._executor.submit(self._add_times, times)\n\n def _get_aggregates(self, prefix):\n return {identifier: stats.finalize()\n for identifier, stats in self._times_map.items() if identifier.startswith(prefix)}\n\n def get_aggregates(self,\n identifier=None) -> Dict[str, TimerStats]:\n future = self._executor.submit(self._get_aggregates, identifier or '')\n return future.result()\n\n def close(self):\n self._executor.shutdown(wait=True)\n","sub_path":"python/mtap/processing/_runners.py","file_name":"_runners.py","file_ext":"py","file_size_in_byte":7376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"148137635","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nax.set_xscale('log')\nax.set_xlabel('ka')\nax.set_ylabel('SPL gain (dB)')\n#ax.set_xticks([10,100,1000,10000])\n#ax.set_xticklabels(['10','100','1k','10k'])\nax.grid(which=\"majorminor\",ls=\":\")\nax.grid(which=\"major\",ls=\"-\")\nax.set_title('Circular radiator')\n\ndata = np.loadtxt(\"out.disc\")\n\na = 2.56\nf = data[:,0]*343.0/(2*np.pi*a*0.0254)\nspl = data[:,1]-11\nax.plot(f,spl,linewidth=1)\n\na = 2.4\nf = data[:,0]*343.0/(2*np.pi*a*0.0254)\nspl = data[:,1]-11\nax.plot(f,spl,'--')\n\ndata = np.loadtxt(\"net.frd\",comments='*')\nf = data[:,0]\nspl = data[:,1]\nax.plot(f,spl,linewidth=1)\n\n\nax.set_xlim([1e2,2e4])\nax.set_ylim([-30,-10])\n\nplt.show()\n\n","sub_path":"theory/disc/src/plot_disc.py","file_name":"plot_disc.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"37845157","text":"# -*- coding: utf-8 -*- \n'''\n# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.\n# \n# This file was generated and any changes will be overwritten.\n'''\n\nfrom __future__ import unicode_literals\nfrom ..model.compliance_status import ComplianceStatus\nfrom datetime import datetime\nfrom ..one_drive_object_base import OneDriveObjectBase\n\n\nclass DeviceComplianceUserStatus(OneDriveObjectBase):\n\n def __init__(self, prop_dict={}):\n self._prop_dict = prop_dict\n\n @property\n def user_display_name(self):\n \"\"\"\n Gets and sets the userDisplayName\n \n Returns:\n str:\n The userDisplayName\n \"\"\"\n if \"userDisplayName\" in self._prop_dict:\n return self._prop_dict[\"userDisplayName\"]\n else:\n return None\n\n @user_display_name.setter\n def user_display_name(self, val):\n self._prop_dict[\"userDisplayName\"] = val\n\n @property\n def devices_count(self):\n \"\"\"\n Gets and sets the devicesCount\n \n Returns:\n int:\n The devicesCount\n \"\"\"\n if \"devicesCount\" in self._prop_dict:\n return self._prop_dict[\"devicesCount\"]\n else:\n return None\n\n @devices_count.setter\n def devices_count(self, val):\n self._prop_dict[\"devicesCount\"] = val\n\n @property\n def status(self):\n \"\"\"\n Gets and sets the status\n \n Returns: \n :class:`ComplianceStatus`:\n The status\n \"\"\"\n if \"status\" in self._prop_dict:\n if isinstance(self._prop_dict[\"status\"], OneDriveObjectBase):\n return self._prop_dict[\"status\"]\n else :\n self._prop_dict[\"status\"] = ComplianceStatus(self._prop_dict[\"status\"])\n return self._prop_dict[\"status\"]\n\n return None\n\n @status.setter\n def status(self, val):\n self._prop_dict[\"status\"] = val\n\n @property\n def last_reported_date_time(self):\n \"\"\"\n Gets and sets the lastReportedDateTime\n \n Returns:\n datetime:\n The lastReportedDateTime\n \"\"\"\n if \"lastReportedDateTime\" in self._prop_dict:\n return datetime.strptime(self._prop_dict[\"lastReportedDateTime\"].replace(\"Z\", \"\"), \"%Y-%m-%dT%H:%M:%S.%f\")\n else:\n return None\n\n @last_reported_date_time.setter\n def last_reported_date_time(self, val):\n self._prop_dict[\"lastReportedDateTime\"] = val.isoformat()+\"Z\"\n\n @property\n def user_principal_name(self):\n \"\"\"\n Gets and sets the userPrincipalName\n \n Returns:\n str:\n The userPrincipalName\n \"\"\"\n if \"userPrincipalName\" in self._prop_dict:\n return self._prop_dict[\"userPrincipalName\"]\n else:\n return None\n\n @user_principal_name.setter\n def user_principal_name(self, val):\n self._prop_dict[\"userPrincipalName\"] = val\n\n","sub_path":"models/device_compliance_user_status.py","file_name":"device_compliance_user_status.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"248777173","text":"#!/usr/bin/env python\n\ndef check_win(a,b,c,d):\n\tx=0\n\to=0\n\tt=0\n\tif a=='X':\n\t\tx+=1\n\telif a=='O':\n\t\to+=1\n\telif a=='T':\n\t\tt+=1\n\tif b=='X':\n\t\tx+=1\n\telif b=='O':\n\t\to+=1\n\telif b=='T':\n\t\tt+=1\n\tif c=='X':\n\t\tx+=1\n\telif c=='O':\n\t\to+=1\n\telif c=='T':\n\t\tt+=1\n\tif d=='X':\n\t\tx+=1\n\telif d=='O':\n\t\to+=1\n\telif d=='T':\n\t\tt+=1\n\tif (x==3 and t==1) or (x==4):\n\t\treturn 'X'\n\tif (o==3 and t==1) or(o==4):\n\t\treturn 'O' \n\treturn 'N'\n\t\n\t\t\n\t\t\nfilein=open('input_l','r')\nfileout=open('result','w')\nt=filein.readline()\nt=int(t)\nfor i in range(1,t+1):\n\tgame=[]\n\tfor j in range(0,4):\n\t\ttemp=filein.readline()\n\t\trow=[]\n\t\tfor k in range(0,4):\n\t\t\trow.append(temp[k])\n\t\tgame.append(row)\n\t#####input ends\n\t##### X O D N\n\tstatus='S'\n\tfor j in range(0,4):\n\t\ttemp=check_win(game[j][0],game[j][1],game[j][2],game[j][3])\n\t\tif temp=='X' or temp=='O':\n\t\t\tstatus=temp\n\t\t\tbreak\t\n\t\ttemp=check_win(game[0][j],game[1][j],game[2][j],game[3][j])\n\t\tif temp=='X' or temp=='O':\n\t\t\tstatus=temp\n\t\t\tbreak\t\n\tif status=='X':\n\t\tfileout.write(\"Case #\"+str(i)+\": \"+\"X won\\n\")\n\t\tfilein.readline()\n\t\tcontinue\n\telif status=='O':\n\t\tfileout.write(\"Case #\"+str(i)+\": \"+\"O won\\n\")\n\t\tfilein.readline()\n\t\tcontinue\n\tstatus=check_win(game[0][0],game[1][1],game[2][2],game[3][3])\n\tif status=='X':\n\t\tfileout.write(\"Case #\"+str(i)+\": \"+\"X won\\n\")\n\t\tfilein.readline()\t\n\t\tcontinue\n\telif status=='O':\n\t\tfileout.write(\"Case #\"+str(i)+\": \"+\"O won\\n\")\n\t\tfilein.readline()\t\n\t\tcontinue\n\tstatus=check_win(game[0][3],game[1][2],game[2][1],game[3][0])\n\tif status=='X':\n\t\tfileout.write(\"Case #\"+str(i)+\": \"+\"X won\\n\")\n\t\tfilein.readline()\t\n\t\tcontinue\n\telif status=='O':\n\t\tfileout.write(\"Case #\"+str(i)+\": \"+\"O won\\n\")\n\t\tfilein.readline()\n\t\tcontinue\n\t##check whether there is .\n\tN=False\n\tfor j in range(0,4):\n\t\tfor k in range(0,4):\n\t\t\tif game[j][k]=='.':\n\t\t\t\tN=True\n\t\t\t\tbreak\n\t\tif N==True:\n\t\t\tbreak\n\tif N==True:\n\t\tfileout.write(\"Case #\"+str(i)+\": \"+\"Game has not completed\\n\")\n\telse:\n\t\tfileout.write(\"Case #\"+str(i)+\": \"+\"Draw\\n\")\n\n\tfilein.readline()\nfilein.close()\nfileout.close()\n\t\n","sub_path":"solutions_2453486_1/Python/NealFun/tictac.py","file_name":"tictac.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"613654350","text":"from starlette.applications import Starlette\nfrom starlette.endpoints import HTTPEndpoint\nfrom starlette.responses import JSONResponse\n\n\napp = Starlette()\n\n\n@app.route('/')\nclass PingResource(HTTPEndpoint):\n \n async def get(self, request):\n return JSONResponse(dict(ping=\"pong\"))\n\n\n@app.route('/hey/{person}')\nclass HelloResource(HTTPEndpoint):\n\n async def get(self, request):\n person = request.path_params['person']\n print(\"MY GUY?\")\n return JSONResponse(dict(whuddup=person), status_code=200)\n","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"35308854","text":"# -*- coding:utf8 -*-\n\ntry:\n # noinspection PyUnresolvedReferences\n from typing import List\nexcept ImportError:\n pass\n\n\nfrom .node_meta import NodeMeta\nfrom ..standardize import StandardizedData, TermVal, StdTermVal\nfrom ..keypath import kp_push\nfrom ..prune import CanBeTagged\nfrom ..utils import same_type\n\n\n__all__ = ['DataNode', 'EntityNode', 'GroupNode', 'AddableNode', 'build_data_tree']\n\n\ndef look_by_path(node, path):\n assert isinstance(path, list)\n\n sub = None\n\n if node is None:\n sub = None\n elif not path:\n sub = node\n else:\n idx = path[0]\n\n if isinstance(node, GroupNode) and isinstance(idx, (str, unicode)):\n for ch in node.children:\n if ch.key == idx:\n sub = ch\n break\n sub = look_by_path(sub, path[1:])\n\n elif isinstance(node, AddableNode) and isinstance(idx, int):\n if idx < len(node.children):\n sub = look_by_path(node.children[idx], path[1:])\n\n return sub\n\n\nclass DataNode(CanBeTagged):\n keypath = None\n position = None\n key = None\n value = None\n children = []\n\n def __init__(self, keypath, position):\n self.keypath = keypath\n self.position = position\n self.meta = NodeMeta() # type: NodeMeta\n\n def __eq__(self, other):\n return same_type(self, other) and self.keypath == other.keypath and self.position == other.position\n\n def from_branch(self, ns):\n if self.meta.namespace == ns:\n return True\n elif isinstance(self.meta.namespace, set):\n return ns in self.meta.namespace\n return False\n\n\nclass EntityNode(DataNode):\n u\"\"\"非 容器节点的 实体节点\"\"\"\n\n\nclass GroupNode(EntityNode):\n u\"\"\"组节点\"\"\"\n\n def __init__(self, children, keypath=None, position=None):\n # type: ( List[AddableNode] ) -> None\n super(GroupNode, self).__init__(keypath, position)\n self.children = sorted(children, key=self.addable_node_key) # type: List[AddableNode]\n\n @staticmethod\n def addable_node_key(addable_node):\n return addable_node.key\n\n def __repr__(self):\n return 'GroupNode(keypath={}, position={}, children=[{}])'.format(repr(self.keypath),\n repr(self.position),\n ','.join(map(repr, self.children)))\n\n def __eq__(self, other):\n return super(GroupNode, self).__eq__(other) and self.children == other.children\n\n\nclass AddableNode(DataNode):\n u\"\"\"\n 用来容纳 可多次添加的组 或 可多次添加的末端值 的容器节点\n\n 为了实现简单,即使节点不可多次添加,也把它当作唯一的元素放在容器节点中\n \"\"\"\n\n def __init__(self, key, children, keypath=None, position=None):\n # type: ( str, List[EntityNode] ) -> None\n super(AddableNode, self).__init__(keypath, position)\n self.key = key\n self.children = children # type: List[EntityNode]\n\n def __repr__(self):\n return 'AddableNode(keypath={}, position={}, key={}, children=[{}])'.format(repr(self.keypath),\n repr(self.position),\n repr(self.key),\n ','.join(map(repr, self.children)))\n\n def __eq__(self, other):\n return super(AddableNode, self).__eq__(other) and self.key == other.key and self.children == other.children\n\n\nclass LeafNode(EntityNode):\n u\"\"\"末端值节点\"\"\"\n\n def __init__(self, value, keypath=None, position=None, namespace=None, ns_key=None):\n super(LeafNode, self).__init__(keypath, position)\n self.value = value\n self.meta.namespace = namespace\n self.meta.ns_key = ns_key\n\n def __repr__(self):\n if isinstance(self.value, unicode):\n v = 'u' + repr(self.value.encode('utf-8'))\n else:\n v = repr(str(self.value))\n return 'LeafNode(keypath={}, position={}, value={})'.format(repr(self.keypath), repr(self.position), v)\n\n def __eq__(self, other):\n return super(LeafNode, self).__eq__(other) and self.value == other.value\n\n\ndef _build_data_tree(raw_data):\n u\"\"\"从 StandardizedData.data 到 DataTree\"\"\"\n if isinstance(raw_data, dict):\n return GroupNode(children=[\n AddableNode(key=k, children=[_build_data_tree(v) for v in lst])\n for k, lst in remove_special_dct_keys(raw_data).iteritems()])\n elif isinstance(raw_data, StdTermVal):\n return LeafNode(raw_data.value, namespace=raw_data.namespace, ns_key=raw_data.key)\n elif isinstance(raw_data, TermVal):\n return LeafNode(raw_data.value)\n elif isinstance(raw_data, list):\n raise TypeError('cant build data tree with list as root')\n else:\n return LeafNode(raw_data)\n\n\ndef _setup_keypath_position(data_tree, keypath='', position=''):\n u\"\"\"为 DataTree 按照节点位置添加 keypath 和 position\"\"\"\n assert isinstance(data_tree, DataNode)\n\n data_tree.keypath = keypath\n data_tree.position = position\n\n if isinstance(data_tree, GroupNode):\n for data_node in data_tree.children:\n _setup_keypath_position(data_node, kp_push(keypath, data_node.key), position)\n\n elif isinstance(data_tree, AddableNode):\n for i, data_node in enumerate(data_tree.children):\n _setup_keypath_position(data_node, keypath, kp_push(position, str(i)))\n\n\ndef build_data_tree(standardized_data):\n assert isinstance(standardized_data, StandardizedData)\n d = _build_data_tree(standardized_data.data)\n _setup_keypath_position(d)\n return d\n\n\ndef remove_special_dct_keys(dct):\n # 因为历史原因,qcremark 为可能出现在正确答案中的\n dct = {k: v for k, v in dct.iteritems() if k.lower() != 'qcremark'}\n\n # 所有双下划线开头认为是额外信息\n dct = {k: v for k, v in dct.iteritems() if not k.startswith('__')}\n\n return dct\n","sub_path":"cie_eval/data_tree/data_tree.py","file_name":"data_tree.py","file_ext":"py","file_size_in_byte":6192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"175204507","text":"from rest_framework import serializers\n\nfrom bluebottle.statistics.models import Statistic\n\n\nclass StatisticSerializer(serializers.ModelSerializer):\n\n value = serializers.CharField(source='calculated_value')\n\n class Meta:\n model = Statistic\n fields = ('id', 'title', 'type', 'value', 'language')\n","sub_path":"bluebottle/statistics/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"90221149","text":"from sqlalchemy_serializer import SerializerMixin\nfrom sqlalchemy import Column, Integer, UnicodeText, DateTime, Boolean, func\n\nfrom src import db\n\n\nclass Product(db.Model, SerializerMixin):\n\n __tablename__ = 'products'\n __schema_only__ = ('product_id', 'product_description', 'product_price', 'number_of_product'\n , 'created_datetime', '-del_flg')\n product_id = Column(Integer, primary_key=True, autoincrement=True)\n product_description = Column(UnicodeText, nullable=False)\n product_price = Column(Integer, default=0)\n number_of_product = Column(Integer, default=0)\n created_datetime = Column(DateTime, default=func.now(), index=True)\n del_flg = Column(Boolean, default=False)\n\n def __init__(self, product_description, number_of_product, product_price):\n self.product_description = product_description\n self.number_of_product = number_of_product\n self.product_price = product_price\n\n # For extend features in the future\n","sub_path":"src/models/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"434063981","text":"\n## @project > Solutions For Sphere Online Judge Assignments\n## @authors > DeeQuation\n## @version > 01.00.00\n## @release > 17.10.15\n## @licence > MIT\n\nimport re as regex;\n\nfor _ in range(0, int(input())):\n\n m = regex.match(\"(\\d+) (\\d+)\", input());\n\n for h in range(0, int(m.group(1))):\n\n line = \"\";\n\n for w in range(0, int(m.group(2))):\n\n line = line + (\"*\" if ((h == 0) or (h == int(m.group(1))- 1) or (w == 0) or (w == int(m.group(2))- 1)) else \".\");\n\n print (line);\n","sub_path":"ID_012177.py","file_name":"ID_012177.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"13629150","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport mysql.connector, random\n\nconn = mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\", database=\"test\")\ncursor = conn.cursor(buffered=True)\n\ndef initTables(cursor):\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS groupe\n (\n id INT PRIMARY KEY NOT NULL AUTO_INCREMENT,\n nom VARCHAR(100)\n );\n \"\"\")\n\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS utilisateur\n (\n id INT PRIMARY KEY NOT NULL AUTO_INCREMENT,\n nom VARCHAR(100),\n prenom VARCHAR(100),\n email VARCHAR(255),\n date_naissance DATE,\n pays VARCHAR(255),\n ville VARCHAR(255),\n code_postal VARCHAR(5),\n telephone VARCHAR(30)\n );\n \"\"\")\n\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS utilisateur_groupe (\n id int(11) PRIMARY KEY AUTO_INCREMENT, \n utilisateur_id int NOT NULL, \n groupe_id int NOT NULL, \n FOREIGN KEY(utilisateur_id) REFERENCES utilisateur(id), \n FOREIGN KEY(groupe_id) REFERENCES groupe(id)\n );\n \"\"\")\n\n cursor.execute(\"\"\"SET FOREIGN_KEY_CHECKS=0\"\"\")\n cursor.execute(\"\"\"TRUNCATE TABLE groupe\"\"\")\n cursor.execute(\"\"\"TRUNCATE TABLE utilisateur\"\"\")\n cursor.execute(\"\"\"TRUNCATE TABLE utilisateur_groupe\"\"\")\n cursor.execute(\"\"\"SET FOREIGN_KEY_CHECKS=1\"\"\")\n return cursor\n\ncursor = initTables(cursor)\ngroupeNames = [\"Les fous\", \"Les maîtres de la chèvre\", \"Les chats\", \"les chiens\", \"Batman\", \"Superman\", \"Green Lanterne\", \"Flash\"]\n\nfor name in groupeNames:\n list = (name,)\n cursor.execute(\"\"\"INSERT INTO groupe (nom) VALUES (%s)\"\"\", list)\n\nconn.commit()\n\nnames = [\"Adam\", \"Alex\", \"Alexandre\", \"Alexis\", \"Anthony\", \"Antoine\", \"Benjamin\", \"Cédric\"]\nfirstnames = [\"Laurence\", \"Laurie\", \"Léa\", \"Léanne\", \"Maélie\", \"Maéva\", \"Maika\"]\nemails = [\"laurence@email.fr\", \"laurie@email.fr\", \"lea@email.fr\", \"leanne@email.fr\", \"maelie@email.fr\", \"maeva@email.fr\", \"maika@email.fr\"]\npays = [\"France\", \"USA\", \"Canada\"]\ncitys = [\"Paris\", \"Tokyo\", \"New York\", \"Berlin\"]\npostalCodes = [75001, 75002, 75003, 65464, 65465, 88888]\n\ni = 0\nwhile i != 100:\n i = i + 1\n user = (random.choice(names),\n random.choice(firstnames),\n random.choice(emails),\n \"2010-01-01\",\n random.choice(pays),\n random.choice(citys),\n random.choice(postalCodes),\n random.randint(1111111111, 9999999999)\n )\n\n cursor.execute(\"\"\"INSERT INTO utilisateur (nom, prenom, email, date_naissance, pays, ville, code_postal, telephone) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\"\", user)\n for looping in range(1, random.randint(1, 5)):\n us_gr = [i, looping]\n cursor.execute(\"\"\"INSERT INTO utilisateur_groupe (utilisateur_id, groupe_id) VALUES (%s, %s)\"\"\", us_gr)\n\nconn.commit()\nconn.close()","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"137084537","text":"from django import forms\nfrom django.forms import formset_factory\nfrom .models import Site, Sitemap, Vulnerability, VULN_CATEGORIES\n\nclass SiteForm(forms.ModelForm):\n site_name = forms.CharField(label=\"Site Name\", widget=forms.TextInput(\n attrs={\n 'class':'form-control',\n 'placeholder':'Site Name'\n }\n ))\n site_location = forms.URLField(label=\"Site URL\", required=False, widget=forms.TextInput(\n attrs={\n 'class':'form-control',\n 'placeholder':'Site URL'\n }\n ))\n is_active = forms.BooleanField(label=\"Site Active\", required=False)\n\n class Meta:\n model = Site\n fields = ('site_name', 'site_location', 'is_active')\n\nSiteFormset = formset_factory(SiteForm, extra=1)\n\nclass VulnerabilityForm(forms.ModelForm):\n vulnerability = forms.CharField(label=\"Vulnerability\", widget=forms.TextInput(\n attrs = {\n 'class':'form-control',\n 'placeholder':'Bug title'\n }\n ))\n category = forms.ChoiceField(label=\"Severity\", choices=VULN_CATEGORIES, required=False, widget=forms.Select(\n attrs={\n 'class':'form-control',\n }\n ))\n request = forms.CharField(label=\"Request/Comments\", max_length=999, required=False, widget=forms.Textarea(\n attrs={\n 'class':'form-control',\n 'width':'50%',\n 'height': '60px',\n }\n ))\n is_reported = forms.BooleanField(label=\"Is Reported?\", required=False, widget=forms.CheckboxInput(\n attrs={\n 'class':'form-control',\n }\n ))\n is_fixed = forms.BooleanField(label=\"Is Fixed?\", required=False, widget=forms.CheckboxInput(\n attrs={\n 'class':'form-control',\n }\n ))\n\n class Meta:\n model = Vulnerability\n fields = ('vulnerability', 'category', 'request', 'is_reported', 'is_fixed')\n","sub_path":"sitemap/mapper/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"384133744","text":"# 读文件\n# file = \"Test/a_example.txt\"\nfile = \"Test/b_read_on.txt\"\n# file = \"Test/c_incunabula.txt\"\n# file = \"Test/d_tough_choices.txt\"\n# file = \"Test/e_so_many_books.txt\"\n# file = \"Test/f_libraries_of_the_world.txt\"\n\nf = open(file, \"r\")\n\nbooks, libraries, days = tuple(int(i) for i in f.readline().rstrip().split(\" \"))\n\n# library_data = tuple(([], set()) for i in range(libraries))\n\nlibrary_data = {i: [] for i in range(libraries)}\n\nlibrary_books = {i: set() for i in range(libraries)}\n\nbook_scores = tuple(int(i) for i in f.readline().rstrip().split(\" \"))\n\n# 扫图书馆\nfor i in range(libraries):\n for j in f.readline().rstrip().split(\" \"):\n library_data[i].append(int(j))\n for j in f.readline().rstrip().split(\" \"):\n library_books[i].add(int(j))\nf.close()\n\n# 操作\n\nalready_sign_up = set()\n\nlibrary_score_queue = [i for i in range(libraries)]\n\n# library_score_queue = sorted(library_score_queue, key=lambda x: -sum([book_scores[i] for i in library_books[x]]))\n# library_score_queue = sorted(library_score_queue, key=lambda x: -sum([book_scores[i] for i in library_books[x]]) *\n# (library_data[x][2] / library_data[x][1]))\n\n\n# 可以除天数\n# print(library_score_queue)\n\nresult = [[libraries]] + [[] for i in range(libraries * 2)]\n\n# for i in range(libraries):\ni = 0\nwhile i < result[0][0]:\n library_score_queue = sorted(library_score_queue, key=lambda x: -sum(\n [book_scores[i] for i in library_books[x] if i not in already_sign_up]) * (library_data[x][2] / (\n library_data[x][1] ** 2)))\n\n tmp = library_books[library_score_queue[0]] - already_sign_up\n if len(tmp) == 0:\n result[0][0] = result[0][0] - 1\n result.pop()\n continue\n result[(i + 1) * 2 - 1].append(library_score_queue[0]) # 图书馆编号\n # result[(i + 1) * 2 - 1].append(len(library_books[library_score_queue[i]])) # 书数量\n # result[(i + 1) * 2] = list(library_data[library_score_queue[i]])\n #\n result[(i + 1) * 2 - 1].append(len(tmp))\n result[(i + 1) * 2] = list(tmp)\n\n # 去重\n already_sign_up = already_sign_up | tmp\n # print(\"already_sign_up: \" + str(already_sign_up))\n\n print(i)\n i += 1\n library_score_queue.pop(0)\n\n''' for j in range(libraries):\n if j != library_score_queue[i]:\n # for item in already_sign_up:\n # print(item)\n # library_data[j][1].discard(item)\n library_books[j] -= already_sign_up\n'''\n# 写文件\nif __name__ == '__main__':\n print(library_data)\n print(result)\n output = result\n\n f = open(file.replace(\"txt\", \"out\"), \"w\")\n for i in output:\n f.write(\" \".join([str(j) for j in i]))\n f.write(\"\\n\")\n\n f.close()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"85349351","text":"# Clean Code Assignment\n# Group Members: Nobin, Subin, Kiran, Bala, Mizba, Ann\n# Run python3 park.py\n\n\ndef getLotInfo(parkingLot):\n print(\"\"\"\n 1. Show Parking Lot\n 2. Search with Color\n 3. Search with Registration number\n \"\"\")\n inputInfoChoice = input(\"Enter your choice: \")\n\n menu = {\n '1': lambda: showParkingLot(parkingLot),\n '2': lambda: getCarWithColor(parkingLot),\n '3': lambda: getSlotWithRegNo(parkingLot),\n }\n\n menuDrive = menu.get(inputInfoChoice, lambda: print('Invalid Choice'))\n menuDrive()\n\n\ndef getCarWithColor(parkingLot):\n inputColor = input(\"Enter color to be searched : \")\n carsWithColor = []\n for slotNo, slot in enumerate(parkingLot):\n for regNo, carColor in slot.items():\n if(carColor == inputColor):\n details = str(slotNo+1) + \"\\t\\t\" + regNo + \"\\t\\t \" + carColor\n carsWithColor.append(details)\n if carsWithColor == []:\n print(\"Car with\", inputColor, \"color not found!\")\n else:\n print(\"Slot No.\\tRegistration No.\\t Color\")\n print(*carsWithColor, sep=\"\\n\")\n\n\ndef getSlotWithRegNo(parkingLot):\n inputRegno = input(\"Enter Registration number to be searched : \")\n for slotNo, slot in enumerate(parkingLot):\n if inputRegno in slot.keys():\n print(\"Allocated Slot Number: \", slotNo+1)\n else:\n print(\"Car with Registartion number not found!\")\n break\n\n\ndef vacateSlot(parkingLot):\n numberOfSlots = len(parkingLot)\n while(True):\n try:\n inputSlot = int(input('Enter the slot: '))\n except ValueError:\n print(\"Enter an Integer!\")\n else:\n if inputSlot < 1 or inputSlot > numberOfSlots:\n print(\"Not a valid slot!\")\n else:\n break\n\n parkingLot[inputSlot-1].clear()\n print(\"Slot number\", inputSlot, \"is free\")\n\n\ndef allotSlot(parkingLot):\n if {} in parkingLot:\n print(\"\"\"Fomat for details: Registration No. Color\n eg: KL-01-DZ-4659 Red\n \"\"\")\n inputDetails = input('Enter the details of the car: ')\n temp = inputDetails.split()\n if len(temp) == 2:\n regNo, carColor = temp\n for slotNo, slot in enumerate(parkingLot):\n if regNo in slot.keys():\n print(\"Car Registration value is not unique!\")\n break\n elif slot == {}:\n slot[regNo] = carColor\n print(\"Allocated Slot number: \", slotNo+1)\n break\n else:\n print(\"Input format is Incorrect!\")\n else:\n print(\"No Slots available!\")\n\n\ndef showParkingLot(parkingLot):\n lot = []\n for slotNo, slot in enumerate(parkingLot):\n for regNo, carColor in slot.items():\n details = str(slotNo+1) + \"\\t\\t\" + regNo + \"\\t\\t \" + carColor\n lot.append(details)\n if lot == []:\n print(\"Empty Lot\")\n else:\n print(\"Slot No.\\tRegistration No.\\t Color\")\n print(*lot, sep=\"\\n\")\n\n\ndef main():\n while(True):\n try:\n numberOfSlots = int(input('Enter the number of slots: '))\n except ValueError:\n print(\"Enter an Integer!\")\n else:\n if numberOfSlots < 1:\n print(\"Enter atleast 1 slot!\")\n else:\n break\n\n parkingLot = [{} for i in range(0, numberOfSlots)]\n\n menu = {\n '1': lambda: allotSlot(parkingLot),\n '2': lambda: vacateSlot(parkingLot),\n '3': lambda: getLotInfo(parkingLot),\n }\n\n while(True):\n print(\"\"\"\n 1. New Car Entry\n 2. Vacate Parking Lot\n 3. Get Lot Information\n 4. Exit Program\n \"\"\")\n\n inputChoice = input(\"Enter your choice: \")\n if inputChoice == '4':\n exit(0)\n\n menuDrive = menu.get(inputChoice, lambda: print('Invalid Choice'))\n menuDrive()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"park.py","file_name":"park.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"230561330","text":"import os\nimport time\nfrom werkzeug import secure_filename\nfrom config import *\nfrom egroup import EGroup\nimport subprocess\n\ndef _check_dir(dir):\n return (os.path.isdir(dir) and os.path.exists(dir))\n\ndef _check_file(file):\n return (os.path.isfile(file) and os.path.exists(file))\n \ndef _allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n \ndef _load_properties(filepath, sep = '=', comment_char = '#'):\n \"\"\"\n Read the file passed as parameter as a properties file.\n \"\"\"\n props = {}\n with open(filepath, \"rt\") as f:\n for line in f:\n l = line.strip()\n if l and not l.startswith(comment_char):\n key_value = l.split(sep)\n key = key_value[0].strip()\n value = sep.join(key_value[1:]).strip().strip('\"') \n props[key] = value \n return props\n \ndef _bool(str):\n return str.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\ndef _check_key(d, v):\n return v in d and d[v] is not None\n \nclass Loader:\n \n def __init__(self, det, db, auth):\n \n # Checking directories\n self.data_dir = os.path.join(DBSPOOL_SPOOL, det + \"/\" + db)\n if not _check_dir(self.data_dir): raise Exception(404, \"Detector (%s) and database (%s) not supported?\" % (det, db))\n self.state_dir = os.path.join(DBSPOOL_STATE, det + \"/\" + db)\n if not _check_dir(self.state_dir): raise Exception(500, \"Server configuration issue (state directory not found)?\")\n self.logs_dir = os.path.join(DBSPOOL_LOGS, det + \"/\" + db)\n if not _check_dir(self.logs_dir): raise Exception(500, \"Server configuration issue (logs directory not found)?\")\n \n # Authentication\n props_file = os.path.join(PROP_DIR, det + '_' + db + '.properties')\n if not _check_file(props_file): raise Exception(500, \"Server configuration error (properties file not found)?\")\n self.authenticate(_load_properties(props_file), auth)\n \n def authenticate(self, props, auth):\n \n if 'user_check_egroup' in props and props['user_check_egroup'] is not None:\n \n if not 'user_check_egroup_username' in props and props['user_check_egroup_username'] is not None: \n raise Exception(500, \"Server configuration issue (no user_check_egroup_username defined)?\")\n if not 'user_check_egroup_password' in props and props['user_check_egroup_password'] is not None:\n raise Exception(500, \"Server configuration issue (no user_check_egroup_password defined)?\")\n \n members = EGroup(props['user_check_egroup_username'], props['user_check_egroup_password']).members(props['user_check_egroup'])\n if not auth.username.upper() in map(str.upper, members):\n raise Exception(401, \"User (%s) not authorized by egroup (%s)?\" % (auth.username, props['user_check_egroup']))\n \n if 'user_check_os' in props and _bool(props['user_check_os']):\n if 0 != subprocess.call([\"id\", \"-u\", auth.username], stdout = subprocess.PIPE):\n raise Exception(401, \"User (%s) not authorized by os?\" % auth.username)\n \n def load(self, request):\n \n # Checking file\n file = request.files['file']\n if file is None: raise Exception(400, \"File not provided?\")\n if not _allowed_file(file.filename): raise Exception(406, \"File type not allowed?\")\n \n # Setup file names\n filename = secure_filename(file.filename)\n data_file = os.path.join(self.data_dir, filename)\n state_file = os.path.join(self.state_dir, filename)\n logs_file = os.path.join(self.logs_dir, filename)\n\n # Remove state file (if exists)\n if os.path.exists(state_file):\n os.remove(state_file)\n\n # Copy data file\n file.save(data_file)\n\n # Wait for state file to appear\n while not os.path.exists(state_file):\n time.sleep(1)\n \n # Read log\n self.log = open(logs_file, 'r').read()\n \n # Return status\n return int(open(state_file, 'r').readline())\n","sub_path":"src/main/python/web/app/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"107556367","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Steps for the black_box features.\n\nThese steps use the AM APIs to initiate transfers and validate the\ncontents of their AIPs without relying on user interface interactions.\n\"\"\"\n\nfrom __future__ import print_function, unicode_literals\nimport os\n\nfrom behave import given, when, then, use_step_matcher\nfrom lxml import etree\nimport metsrw\n\nfrom features.steps import utils\n\n\n# map the event types as written in the feature file\n# to what AM outputs in the METS\nPREMIS_EVENT_TYPES = {\n \"file format identification\": \"format identification\",\n \"ingestion\": \"ingestion\",\n \"message digest calculation\": \"message digest calculation\",\n \"reingestion\": \"reingestion\",\n \"validation\": \"validation\",\n \"virus scanning\": \"virus check\",\n}\n\n\ndef format_original_files_error(transfer):\n return 'The {} file does not contain any \"original\" files in its fileSec'.format(\n transfer[\"aip_mets_location\"]\n )\n\n\ndef format_no_files_error(transfer):\n return \"The {} file does not contain any files in its fileSec\".format(\n transfer[\"aip_mets_location\"]\n )\n\n\n@given('a \"{transfer_type}\" transfer type located in \"{sample_transfer_path}\"')\ndef step_impl(context, transfer_type, sample_transfer_path):\n transfer = utils.create_sample_transfer(\n context.api_clients_config, sample_transfer_path, transfer_type=transfer_type\n )\n context.current_transfer = transfer\n\n\n@given(\n 'a \"{transfer_type}\" transfer type located in \"{sample_transfer_path}\" has been reingested'\n)\ndef step_impl(context, transfer_type, sample_transfer_path):\n context.execute_steps(\n 'Given a \"{}\" transfer type located in \"{}\"\\n'.format(\n transfer_type, sample_transfer_path\n )\n )\n reingest = utils.create_reingest(\n context.api_clients_config, context.current_transfer\n )\n context.current_transfer.update(reingest)\n\n\n@when(\"the AIP is downloaded\")\ndef step_impl(context):\n utils.is_valid_download(context.current_transfer[\"aip_mets_location\"])\n\n\n@when(\"the transfer is approved\")\ndef step_impl(context):\n utils.assert_jobs_completed_successfully(\n context.api_clients_config,\n context.current_transfer[\"transfer_uuid\"],\n job_microservice=\"Approve transfer\",\n )\n\n\n@when(\"the transfer compliance is verified\")\ndef step_impl(context):\n utils.assert_jobs_completed_successfully(\n context.api_clients_config,\n context.current_transfer[\"transfer_uuid\"],\n job_microservice=\"Verify transfer compliance\",\n )\n\n\n@when(\"the reingest has been processed\")\ndef step_impl(context):\n utils.is_valid_download(context.current_transfer[\"aip_mets_location\"])\n utils.is_valid_download(context.current_transfer[\"reingest_aip_mets_location\"])\n\n\n@then(\"the AIP METS can be accessed and parsed by mets-reader-writer\")\ndef step_impl(context):\n mets = metsrw.METSDocument.fromfile(context.current_transfer[\"aip_mets_location\"])\n error = (\n \"METS read successfully by metsrw but does not contain an \"\n \"objects directory structure\"\n )\n assert mets.get_file(type=\"Directory\", label=\"objects\") is not None, error\n\n\n@then(\"the AIP contains all files that were present in the transfer\")\ndef step_impl(context):\n \"\"\"Compare METS file entries with transfer contents.\n\n For each 'original' file entry assert that its path exists in the\n transfer directory.\n \"\"\"\n mets = metsrw.METSDocument.fromfile(context.current_transfer[\"aip_mets_location\"])\n # cache each query to the SS browse endpoint by directory name\n cached_directories = {}\n # look for an 'objects' directory in the transfer directory\n contains_objects_dir = False\n objects_dir = os.path.join(context.current_transfer[\"transfer_path\"], \"objects\")\n objects_dir_browse_result = utils.browse_default_ts_location(\n context.api_clients_config, objects_dir\n )\n if objects_dir_browse_result:\n contains_objects_dir = True\n cached_directories[objects_dir] = objects_dir_browse_result\n # get the paths (before sanitization) of each 'original' file\n original_file_paths = [\n utils.get_path_before_sanitization(fsentry, contains_objects_dir)\n for fsentry in mets.all_files()\n if fsentry.use == \"original\"\n ]\n assert original_file_paths, format_original_files_error(context.current_transfer)\n # verify each path has an entry in the transfer directory\n for file_path in original_file_paths:\n file_dir = os.path.join(\n context.current_transfer[\"transfer_path\"], os.path.dirname(file_path)\n )\n file_name = os.path.basename(file_path)\n if file_dir not in cached_directories:\n file_dir_browse_result = utils.browse_default_ts_location(\n context.api_clients_config, file_dir\n )\n cached_directories[file_dir] = file_dir_browse_result\n assert file_name in cached_directories[file_dir][\"entries\"]\n\n\n@then(\"the AIP contains a file called README.html in the data directory\")\ndef step_impl(context):\n readme_file = utils.get_aip_file_location(\n context.current_transfer[\"extracted_aip_dir\"],\n os.path.join(\"data\", \"README.html\"),\n )\n utils.is_valid_download(readme_file)\n\n\n@then(\"the AIP contains a file called METS.xml in the data directory\")\ndef step_impl(context):\n mets_file = utils.get_aip_mets_location(\n context.current_transfer[\"extracted_aip_dir\"],\n context.current_transfer[\"sip_uuid\"],\n )\n utils.is_valid_download(mets_file)\n\n\n@then(\"the AIP conforms to expected content and structure\")\ndef step_impl(context):\n expected_directories = [\n \"objects\",\n \"logs\",\n os.path.join(\"objects\", \"submissionDocumentation\"),\n ]\n for directory in expected_directories:\n assert os.path.isdir(\n os.path.join(\n context.current_transfer[\"extracted_aip_dir\"], \"data\", directory\n )\n )\n\n\n@then(\n \"the fileSec of the AIP METS will record every file in the objects \"\n \"and metadata directories of the AIP\"\n)\ndef step_impl(context):\n tree = etree.parse(context.current_transfer[\"aip_mets_location\"])\n filesec_files = utils.get_filesec_files(tree, nsmap=context.mets_nsmap)\n assert filesec_files, format_no_files_error(context.current_transfer)\n for filesec_file in filesec_files:\n flocat = filesec_file.find(\"mets:FLocat\", namespaces=context.mets_nsmap)\n href = flocat.attrib[\"{http://www.w3.org/1999/xlink}href\"]\n assert os.path.exists(\n os.path.join(context.current_transfer[\"extracted_aip_dir\"], \"data\", href)\n )\n\n\n@then(\n \"the physical structMap of the AIP METS accurately reflects \"\n \"the physical layout of the AIP\"\n)\ndef step_impl(context):\n root_path = os.path.join(context.current_transfer[\"extracted_aip_dir\"], \"data\")\n tree = etree.parse(context.current_transfer[\"aip_mets_location\"])\n structmap = tree.find(\n 'mets:structMap[@TYPE=\"physical\"]', namespaces=context.mets_nsmap\n )\n transfer_dir = structmap.find(\n 'mets:div[@LABEL=\"{}-{}\"][@TYPE=\"Directory\"]'.format(\n context.current_transfer[\"transfer_name\"],\n context.current_transfer[\"sip_uuid\"],\n ),\n namespaces=context.mets_nsmap,\n )\n error = (\n 'The {} file does not contain any \"Directory\" entries in its physical '\n \"structMap\".format(context.current_transfer[\"aip_mets_location\"])\n )\n assert len(transfer_dir), error\n for item in transfer_dir:\n utils.assert_structmap_item_path_exists(item, root_path)\n\n\n@then(\"every object in the AIP has been assigned a UUID in the AIP METS\")\ndef step_impl(context):\n tree = etree.parse(context.current_transfer[\"aip_mets_location\"])\n filesec_files = utils.get_filesec_files(tree, nsmap=context.mets_nsmap)\n assert filesec_files, format_no_files_error(context.current_transfer)\n for filesec_file in filesec_files:\n # remove the 'file-' prefix from the UUID of the file\n file_uuid = filesec_file.attrib[\"ID\"].split(\"file-\")[-1]\n amdsec_id = filesec_file.attrib[\"ADMID\"]\n amdsec = tree.find(\n 'mets:amdSec[@ID=\"{}\"]'.format(amdsec_id), namespaces=context.mets_nsmap\n )\n object_uuid = amdsec.xpath(\n \"mets:techMD/mets:mdWrap/mets:xmlData/premis3:object/\"\n 'premis3:objectIdentifier/premis3:objectIdentifierType[text()=\"UUID\"]/'\n \"../premis3:objectIdentifierValue\",\n namespaces=context.mets_nsmap,\n )[0].text\n assert object_uuid == file_uuid\n\n\n@then(\"every object in the objects and metadata directories has an amdSec\")\ndef step_impl(context):\n tree = etree.parse(context.current_transfer[\"aip_mets_location\"])\n filesec_files = utils.get_filesec_files(tree, nsmap=context.mets_nsmap)\n assert filesec_files, format_no_files_error(context.current_transfer)\n for filesec_file in filesec_files:\n amdsec_id = filesec_file.attrib[\"ADMID\"]\n amdsec = tree.find(\n 'mets:amdSec[@ID=\"{}\"]'.format(amdsec_id), namespaces=context.mets_nsmap\n )\n assert amdsec is not None\n\n\n@then(\n \"every PREMIS event recorded in the AIP METS records the logged-in \"\n \"user, the organization and the software as PREMIS agents\"\n)\ndef step_impl(context):\n expected_agent_types = set(\n [\"Archivematica user pk\", \"repository code\", \"preservation system\"]\n )\n tree = etree.parse(context.current_transfer[\"aip_mets_location\"])\n premis_events = tree.findall(\n 'mets:amdSec/mets:digiprovMD/mets:mdWrap[@MDTYPE=\"PREMIS:EVENT\"]/'\n \"mets:xmlData/premis3:event\",\n namespaces=context.mets_nsmap,\n )\n error = \"The {} file does not contain any PREMIS events\".format(\n context.current_transfer[\"aip_mets_location\"]\n )\n assert premis_events, error\n for event in premis_events:\n event_agents = event.findall(\n \"premis3:linkingAgentIdentifier\", namespaces=context.mets_nsmap\n )\n event_agent_types = set(\n [\n event_agent.findtext(\n \"premis3:linkingAgentIdentifierType\", namespaces=context.mets_nsmap\n )\n for event_agent in event_agents\n ]\n )\n assert event_agent_types == expected_agent_types\n\n\n@then(\"the AIP can be successfully stored\")\ndef step_impl(context):\n context.execute_steps(\n \"Then the AIP METS can be accessed and parsed by mets-reader-writer\\n\"\n )\n\n\nuse_step_matcher(\"re\")\n\n\n@then(\"there is a.? (?P.*) event for each original object in the AIP METS\")\ndef step_impl(context, event_type):\n mets_path = context.current_transfer[\"aip_mets_location\"]\n if event_type == \"reingestion\":\n mets_path = context.current_transfer[\"reingest_aip_mets_location\"]\n mets = metsrw.METSDocument.fromfile(mets_path)\n original_files = [\n fsentry for fsentry in mets.all_files() if fsentry.use == \"original\"\n ]\n assert original_files, format_original_files_error(context.current_transfer)\n for fsentry in original_files:\n events = utils.get_premis_events_by_type(\n fsentry, PREMIS_EVENT_TYPES[event_type]\n )\n error = \"Expected one {} event in the METS for file {}\".format(\n event_type, fsentry.path\n )\n assert len(events) == 1, error\n\n\nuse_step_matcher(\"parse\")\n\n\n@then(\n \"there are {expected_files_count:d} original objects in the AIP METS with\"\n \" a {event_type} event\"\n)\ndef step_impl(context, expected_files_count, event_type):\n if not expected_files_count:\n return\n mets_path = context.current_transfer[\"aip_mets_location\"]\n mets = metsrw.METSDocument.fromfile(mets_path)\n original_files = [\n fsentry for fsentry in mets.all_files() if fsentry.use == \"original\"\n ]\n assert original_files, format_original_files_error(context.current_transfer)\n files_with_event_type = []\n for fsentry in original_files:\n if utils.get_premis_events_by_type(fsentry, PREMIS_EVENT_TYPES[event_type]):\n files_with_event_type.append(fsentry)\n error = (\n \"In the {mets} file only the following files had {event_type} events\"\n \" when {expected} were expected to have: {files}\".format(\n mets=context.current_transfer[\"aip_mets_location\"],\n event_type=event_type,\n expected=expected_files_count,\n files=\", \".join([entry.path for entry in files_with_event_type]),\n )\n )\n assert len(files_with_event_type) == expected_files_count, error\n\n\n@then(\"there is a current and a superseded techMD for each original object\")\ndef step_impl(context):\n mets = metsrw.METSDocument.fromfile(\n context.current_transfer[\"reingest_aip_mets_location\"]\n )\n original_files = [\n fsentry for fsentry in mets.all_files() if fsentry.use == \"original\"\n ]\n assert original_files, format_original_files_error(context.current_transfer)\n for fsentry in original_files:\n techmds = mets.tree.findall(\n 'mets:amdSec[@ID=\"{}\"]/mets:techMD'.format(fsentry.admids[0]),\n namespaces=context.mets_nsmap,\n )\n techmds_status = sorted([techmd.attrib[\"STATUS\"] for techmd in techmds])\n error = (\n \"Expected two techMD elements (current and superseded) for\"\n \" file {}. Got {} instead\".format(fsentry.path, techmds_status)\n )\n assert techmds_status == [\"current\", \"superseded\"], error\n\n\n@then(\"there is a sourceMD containing a BagIt mdWrap in the AIP METS\")\ndef step_impl(context):\n utils.assert_source_md_in_bagit_mets(\n etree.parse(context.current_transfer[\"aip_mets_location\"]), context.mets_nsmap\n )\n\n\n@then(\"there is a sourceMD containing a BagIt mdWrap in the reingested AIP METS\")\ndef step_impl(context):\n utils.assert_source_md_in_bagit_mets(\n etree.parse(context.current_transfer[\"reingest_aip_mets_location\"]),\n context.mets_nsmap,\n )\n\n\n@then(\"there is a fileSec for deleted files for objects that were re-normalized\")\ndef step_impl(context):\n # get files that were deleted after reingest\n reingest_mets = metsrw.METSDocument.fromfile(\n context.current_transfer[\"reingest_aip_mets_location\"]\n )\n deleted_files = utils.get_filesec_files(\n reingest_mets.tree, use=\"deleted\", nsmap=context.mets_nsmap\n )\n # the GROUPID represents the UUID of the deleted file before being reingested\n # remove the \"Group-\" prefix to get its initial UUID\n deleted_file_uuids = [\n deleted_file.attrib[\"GROUPID\"][6:] for deleted_file in deleted_files\n ]\n # go through each normalized file in the original METS (before reingest)\n # and verify that its file_uuid is included in the deleted file uuids\n initial_mets = metsrw.METSDocument.fromfile(\n context.current_transfer[\"aip_mets_location\"]\n )\n original_files = [\n fsentry for fsentry in initial_mets.all_files() if fsentry.use == \"original\"\n ]\n assert original_files, format_original_files_error(context.current_transfer)\n for fsentry in original_files:\n if utils.get_premis_events_by_type(fsentry, \"normalization\"):\n error = \"Expected normalized file {} to be deleted after reingest\".format(\n fsentry.path\n )\n assert fsentry.file_uuid in deleted_file_uuids, error\n\n\n@then('the \"{job_name}\" job completes successfully')\ndef step_impl(context, job_name):\n default_valid_exit_codes = (0,)\n valid_exit_codes_by_job_name = {\n \"Determine if transfer still contains packages\": (0, 1)\n }\n valid_exit_codes = valid_exit_codes_by_job_name.get(\n job_name, default_valid_exit_codes\n )\n utils.assert_jobs_completed_successfully(\n context.api_clients_config,\n context.current_transfer[\"transfer_uuid\"],\n job_name=job_name,\n valid_exit_codes=valid_exit_codes,\n )\n\n\n@then('the \"{job_name}\" job fails')\ndef step_impl(context, job_name):\n utils.assert_jobs_fail(\n context.api_clients_config,\n context.current_transfer[\"transfer_uuid\"],\n job_name=job_name,\n )\n\n\n@then('the \"{microservice_name}\" microservice is executed')\ndef step_impl(context, microservice_name):\n utils.assert_microservice_executes(\n context.api_clients_config,\n context.current_transfer[\"transfer_uuid\"],\n microservice_name,\n )\n\n\n@then(\"the METS file contains a dmdSec with DDI metadata\")\ndef step_impl(context):\n tree = etree.parse(context.current_transfer[\"aip_mets_location\"])\n structmap = tree.find(\n 'mets:structMap[@TYPE=\"physical\"]', namespaces=context.mets_nsmap\n )\n transfer_dir = structmap.find(\n 'mets:div[@LABEL=\"{}-{}\"][@TYPE=\"Directory\"]'.format(\n context.current_transfer[\"transfer_name\"],\n context.current_transfer[\"sip_uuid\"],\n ),\n namespaces=context.mets_nsmap,\n )\n error = (\n 'The {} file does not contain any \"Directory\" entries in its physical '\n \"structMap\".format(context.current_transfer[\"aip_mets_location\"])\n )\n assert len(transfer_dir), error\n objects_dir = transfer_dir.find(\n 'mets:div[@LABEL=\"objects\"]', namespaces=context.mets_nsmap\n )\n error = (\n 'The {} file does not contain an \"objects\" directory entry in its physical '\n \"structMap\".format(context.current_transfer[\"aip_mets_location\"])\n )\n assert len(objects_dir), error\n dmdsec_ids = objects_dir.attrib[\"DMDID\"].strip().split(\" \")\n dmdsecs_contain_ddi_metadata = False\n namespaces = context.mets_nsmap.copy()\n namespaces[\"ddi\"] = \"http://www.icpsr.umich.edu/DDI\"\n for dmdsec_id in dmdsec_ids:\n ddi_codebook = tree.find(\n 'mets:dmdSec[@ID=\"{}\"]/mets:mdWrap/mets:xmlData/ddi:codebook'.format(\n dmdsec_id\n ),\n namespaces=namespaces,\n )\n if ddi_codebook is not None:\n dmdsecs_contain_ddi_metadata = True\n error = (\n \"The {} file does not contain any ddi metadata in any of the dmdSec of \"\n \"the objects directory ({}) of the physical structMap\".format(\n context.current_transfer[\"aip_mets_location\"], dmdsec_ids\n )\n )\n assert dmdsecs_contain_ddi_metadata, error\n","sub_path":"features/steps/black_box_steps.py","file_name":"black_box_steps.py","file_ext":"py","file_size_in_byte":18369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"125245189","text":"\"\"\"Monitors the UART port on a Raspberry Pi 3 for Spektrum serial packets\n\nAssumes the packets follow the Remote Receiver format\nForwards the packets on the TX pin of the serial port, so you can pass the\npackets on the the flight control board\n\"\"\"\n\n#did this work\n\nimport serial\nimport time\nimport sys\nimport datetime\nimport string\nimport threading\n\nclass Reader:\n\n def __init__(self, name, log):\n\n self.name = name\n self.values = [0,0,0,0,0,0,0]\n self.read = True\n self.stream = []\n self.log = log\n self._lock = threading.Lock()\n self.MASK_CH_ID = 0b11111100 # 0x7800\n self.SHIFT_CH_ID = 2\n self.MASK_SERVO_POS_HIGH = 0b00000011 # 0x07FF\n self.ser = serial.Serial(\n port=\"/dev/serial0\", baudrate=115200,\n bytesize=serial.EIGHTBITS,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE)\n\n N_CHAN = 13\n self.servo_position = [0 for i in range(N_CHAN)]\n\n def align_serial(self, ser):\n \"\"\"Aligns the serial stream with the incoming Spektrum packets\n\n Spektrum Remote Receivers (AKA Spektrum Satellite) communicate serially\n in 16 byte packets at 125000 bits per second (bps)(aka baud) but are\n compatible with the standard 115200bps rate. We don't control the output\n transmission timing of the Spektrum receiver unit and so might start\n reading from the serial port in the middle of a packet transmission.\n To align the reading from the serial port with the packet transmission,\n we use the timing between packets to detect the interval between packets\n\n Packets are communicated every 11ms. At 115200 bps, a bit is read in\n approximately 8.69us, so a 16 byte (128 bit)\n packet will take around 1.11ms to be communicated, leaving a gap of about\n 9.89ms between packets. We align our serial port reading with the protocol\n by detecting this gap between reads.\n\n Note that we do not use the packet header contents because\n 1) They are product dependent. Specifically, \"internal\" Spektrum\n receivers indicate the system protocol in the second byte of the header\n but \"external\" receivers do not. Further, different products are\n use different protocols and indicate this using the\n system protocol byte.\n 2) Other bytes in the packet may take on the same value as the header\n contents. No bit patterns of a byte are reserved, so any byte in the\n data payload of the packet could match the values of the header bytes.\n\n Inputs\n ------\n ser: serial.Serial instance\n serial port to read from\n \"\"\"\n data = None\n # read in the first byte, might be a long delay in case the transmitter is\n # off when the program begins\n ser.read(1)\n dt = 0\n # wait for the next long delay between reads\n dt_threshold = 0.010 # pick some threshold between 8.69us and 9.89ms\n while dt < dt_threshold:\n start = time.time()\n ser.read()\n dt = time.time()-start\n # consume the rest of the packet\n ser.read(15)\n # should be aligned with protocol now\n\n def parse_channel_data(self, data):\n \"\"\"Parse a channel's 2 bytes of data in a remote receiver packet\n\n Inputs\n ------\n data: 2 byte long string (currently only supporting Python 2)\n Bytes within the remote receiver packet representing a channel's data\n\n Outputs\n -------\n channel_id, channel_data\n \"\"\"\n ch_id = ((data[0]) & self.MASK_CH_ID) >> self.SHIFT_CH_ID\n ch_data = (\n (((data[0]) & self.MASK_SERVO_POS_HIGH) << 8) | (data[1]))\n #ch_data = 988 + (ch_data >> 1)\n #print(ch_id)\n #print(ch_data)\n return ch_id, ch_data\n\n def convert(self, positions, rawdata):\n newdata = rawdata\n return newdata\n\n\n def ReaderThread(self, name):\n self.align_serial(self.ser)\n print('I am reading now and saving values to Values')\n data = None\n data_buf = None\n try:\n while self.read:\n ### TODO: this is where we will be repeating until we have enough\n ### bites to read a signal and decode it\n with self._lock:\n data_buf = self.ser.read(16)\n #print(data_buf)\n data = data_buf[2:]\n #print(data)\n for i in range(7):\n #print(data[2*i:2*i+2])\n ch_id, s_pos = self.parse_channel_data(data[2*i:2*i+2])\n if ch_id > 6:\n ch_id = 6\n \t #print(\"ch_id: \" + str(ch_id) + \" pos: \" + str(s_pos))\n self.servo_position[ch_id] = s_pos\n \t # servo_chanel[i] = ch_id\n self.log.write(\"%4d, %4d, %4d, %4d, %4d, %4d\\n\"%tuple(\n self.servo_position[:6]))\n self.values = self.servo_position[:7]\n #datawrite = convert(servo_position, data_buf)\n self.ser.write(data_buf)\n except Exception as e:\n self.ser.close()\n print(e)\n\n #stream here\n #time.sleep(1)\n\n def SaveValues(self, listToSave):\n self.stream = listToSave\n\n def GetValues(self):\n ### TODO: this is where we need to read the controller values.\n return self.values\n\n def run(self):\n\n dataThread = threading.Thread( target = self.ReaderThread, args = (\"Reader Thread\", ))\n dataThread.start()\n\n return dataThread\n\n def Stop(self):\n self.read = False\n\ndef main():\n print(\"AUX1____Roll____Pitch____Yaw____AUX2____Throttle\")\n data = None\n #servo_chanel = [0 for i in range(N_CHAN)]\n MyDateTime = datetime.datetime.now()\n #date = MyDateTime.isoformat()\n #date = date.translate(string.maketrans(\"\",\"\"),\":.-\")\n #logfile = open(\"Reciever\" + date + \".csv\",\"w+\")\n #logfile.write(\"AUX1____Roll____Pitch____Yaw____AUX2____Throttle\\n\")\n reader = Reader(\"reader\", None)\n try:\n reader.align_serial(reader.ser)\n while True:\n data_buf = reader.ser.read(16)\n print(data_buf)\n data = data_buf[2:]\n print(data)\n for i in range(7):\n print(data[2*i:2*i+2])\n ch_id, s_pos = reader.parse_channel_data(data[2*i:2*i+2])\n \t #print(\"ch_id: \" + str(ch_id) + \" pos: \" + str(s_pos))\n reader.servo_position[ch_id] = s_pos\n # servo_chanel[i] = ch_id\n sys.stdout.write(\n \"%4d\t%4d\t%4d\t%4d\t%4d\t%4d\t%4d\t%4d\\r\"%tuple(\n reader.servo_position[:8]))\n\n sys.stdout.flush()\n\n reader.ser.write(data_buf)\n except(KeyboardInterrupt, SystemExit):\n reader.ser.close()\n logfile.close()\n except(Exception) as ex:\n print(ex)\n reader.ser.close()\n logfile.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ReadWriteComands/ReadWrite.py","file_name":"ReadWrite.py","file_ext":"py","file_size_in_byte":7209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"406600089","text":"import numpy as np\nimport pandas as pd\nimport random as rd\nimport matplotlib.pyplot as plt\n\nimport sys\nimport warnings\nimport Plotting \nimport multiprocessing\nfrom draw import *\nfrom cond import *\nfrom saveFile import *\nfrom generator import *\nfrom joblib import Parallel, delayed\n\n\n\n'''\nGenerate a Watch Vector for stopping conditions\nRun Parallel Processes to determine number of steps until compromised state\n'''\ndef main(M, N, interations):\n \n initN = N\n \n #This list will determine how long the simulation will run. \n Watch = watch(size)\n print(\"Watch Vector:\", Watch)\n\n #Parallel process based off number of cores\n num_steps = []\n num_cores = multiprocessing.cpu_count()\n num_steps = Parallel(n_jobs=num_cores)(delayed(parallel_section)(initN, Watch) for i in range(iterations))\n\n '''\n for i in range(0,iterations):\n value = parallel_section(initN,Watch)\n num_steps.append(value)\n '''\n return num_steps, Watch\n \n\n'''Parallelization Of Computing steps for Compromised Network'''\ndef parallel_section(initN, Watch):\n \n N = initN\n count = 0\n # Checks to see if the nodes in watch are compromised or not. Returns a boolean.\n while(not isCompromised(Watch, N)):\n p = CalculateProb(M,N)\n for j in range(len(p)):\n if(N[j] != 1):\n N[j] = p[j]\n N = randomDraw(N)\n count = count + 1\n return count\n\n'''Watches for Comprised State'''\ndef isCompromised(Watch , N):\n watch_ones = np.where(np.array(Watch) == 1)\n N_ones = np.where(np.array(N) == 1)\n return set(watch_ones[0]).issubset(set(N_ones[0]))\n\n\n'''Initalization and Running'''\nif __name__ == '__main__':\n\n warnings.simplefilter(\"ignore\")\n warnings.filterwarnings(\"ignore\")\n\n print(\"Running Simulation \\n\")\n size = int(input(\"Input Matrix Size for n x n: \"))\n iterations = int(input(\"Input the number of iterations you'd like to run: \"))\n\n # Generate M matrix and N vector\n f = \"MS10PrivData\"\n M = CVCMatrix(size, f)\n N = gen_states(size)\n print(M)\n print(N)\n \n num_steps, Watch = main(M, N, iterations)\n\n print(\"Number of Steps Taken: \", num_steps)\n \n\n Plotting.plotMatrix(M, Watch)\n Plotting.barChart(num_steps)\n save(num_steps, f)\n\n print(\"\\n\")\n print(\"Simulation Complete\")\n\n\n\n","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"366904946","text":"#!/usr/bin/env python3\r\n# -*- coding:utf-8 -*-\r\n\r\n\"\"\"\r\n智能春联API\r\n本代码仅供学习使用,请务用于其他用途!\r\n\"\"\"\r\n\r\n__author__='@LMR'\r\n\r\nimport requests\r\nimport base64\r\nimport hashlib\r\nimport re\r\n\r\ndef get_couplet(inp,filepath):\r\n \"\"\"腾讯春联AI:http://couplet.ronghuiad.com/\r\ninp:输入\r\nfilepath:输出春联PNG图片\r\n\"\"\"\r\n url=\"http://couplet.ronghuiad.com/api/index.php/api/imageText\"\r\n try:\r\n r=requests.post(url,data={'word':inp,'sort':1,'type':2})\r\n pngUrl=r.json()[\"data\"][\"pngUrl\"]\r\n load=requests.get(pngUrl,stream=True)\r\n with open('%s'%(filepath), \"wb\") as f:\r\n for chunk in load.iter_content(chunk_size=1024):\r\n f.write(chunk)\r\n return True\r\n except:\r\n return False\r\n\r\ndef get_couplet2(inp):\r\n \"\"\"百度春联AI:https://chunlian.news.cntv.cn/\r\ninp:输入\r\n输出为春联的json,分别为上联、下联和横批\r\n\"\"\"\r\n url=\"https://couplet.3g.163.com/couplet2019/api/generate\"\r\n try:\r\n #通过改变index的值可以获得多个候选春联\r\n data={'type':2,'query':inp,'index':1}\r\n r=requests.post(url,data=data)\r\n res=r.json()['data']\r\n return [res['upper'],res['lower'],res['streamer']]\r\n except:\r\n return False\r\n\r\ndef analysis_pic(picpath):\r\n \"\"\"百度人脸AI:https://chunlian.news.cntv.cn/\r\n用于提取关键词进行春联创作\r\npicpath:图片路径\r\n输出为人脸信息的json,gender为性别,age为年龄,glasses为是否佩戴眼镜,expression为微笑指数,beauty为美丽指数\r\nkeyword为人脸关键词\r\n\"\"\"\r\n url=\"https://couplet.3g.163.com/couplet2019/api/photo/upload\"\r\n try:\r\n #base64编码\r\n with open(picpath, 'rb') as f:\r\n base64_data = 'data:image/jpeg;base64,'+base64.b64encode(f.read()).decode()\r\n #md5获取\r\n md5 = hashlib.md5()\r\n md5.update(base64_data.encode())\r\n #数据包装\r\n data={'base64Data':base64_data,'k':md5.hexdigest()}\r\n #返回数据有点问题,对全文编码会出错(因为含图片的base64)因此需要取前一部分再进行编码\r\n r=requests.post(url,data=data).content[:500].decode(\"utf-8\")\r\n #正则表达式提取信息\r\n gender=re.findall(r'\"gender\":([^,]+),',r)[0]\r\n age=re.findall(r'\"age\":([^,]+),',r)[0]\r\n glasses=re.findall(r'\"glasses\":([^,]+),',r)[0]\r\n expression=re.findall(r'\"expression\":([^,]+),',r)[0]\r\n beauty=re.findall(r'\"beauty\":([^,]+),',r)[0]\r\n keyword=re.findall(r'\"keyword\":\"([^\"]+)\"',r)[0]\r\n return {'gender':gender,'age':age,'glasses':glasses,'expression':expression,'beauty':beauty,'keyword':keyword}\r\n except:\r\n return False\r\n","sub_path":"API/Couplet.py","file_name":"Couplet.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"561299867","text":"import datetime\nimport time\n\noffset = {\n \"1st\": 0,\n \"2nd\": 7,\n \"3rd\": 14,\n \"4th\": 21,\n \"5th\": 28\n}\n\n\nclass MeetupDayException(Exception):\n pass\n\n\ndef meetup(year, month, week, day_of_week):\n try:\n dt = datetime.date(year, month, 1)\n weekday = time.strptime(day_of_week, \"%A\").tm_wday\n\n if week == 'last':\n wrapped_month = (month % 12) + 1\n last_day = datetime.date(\n year, wrapped_month, 1) - datetime.timedelta(1)\n day = last_day.day\n else:\n day = 1\n\n while True:\n if datetime.date(year, month, day).weekday() == weekday:\n if week == 'teenth':\n if day < 13:\n day += 1\n else:\n break\n elif week == 'last':\n break\n else:\n day += offset[week]\n break\n else:\n if week == 'last':\n day -= 1\n else:\n day += 1\n return datetime.date(year, month, day)\n except:\n raise MeetupDayException(\"Date not valid\")\n","sub_path":"Basics/Exercism/Python/meetup/meetup.py","file_name":"meetup.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"228569134","text":"import hyperion\nimport time\nimport colorsys\nimport random\n\n# Initialize the led data\nledData = bytearray()\nfor i in range(hyperion.ledCount):\n\tledData += bytearray((0,0,0))\n\nsleepTime = 0.001\n\n# Start the write data loop\nwhile not hyperion.abort():\n\thyperion.setColor(ledData)\n\tfor i in range(hyperion.ledCount):\n\t\tif random.randrange(10) == 1:\n\t\t\thue = random.random()\n\t\t\tsat = 1.0\n\t\t\tval = random.random()\n\t\t\trgb = colorsys.hsv_to_rgb(hue, sat, val)\n\t\t\tledData[i*3 ] = int(255*rgb[0])\n\t\t\tledData[i*3+1] = int(255*rgb[1])\n\t\t\tledData[i*3+2] = int(255*rgb[2])\n\ttime.sleep(sleepTime)\n","sub_path":"effects/random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"541033618","text":"\nfrom .atomic_states import np\n\nEPS = 1e-5\nclass gauss_mode:\n\n \"\"\"\n Define a solution of 'paraxial' wave equation - Gauss mode TEM_{00}\n \"\"\"\n\n def __init__(self, waist, omega):\n\n self.waist = waist\n self.omega = omega\n self.z0 = np.pi * self.waist ** 2 * self.omega / (2*np.pi) # Rayleigh length\n\n c = 1\n k = self.omega / c\n\n if self.waist >= 5.*self.omega:\n print('Using gaussian approximation...')\n self.forward_amplitude = \\\n lambda r, z: np.sqrt(2 / np.pi) / self.evo_waist(z) * np.exp(-1 * r ** 2 / self.evo_waist(z) ** 2) \\\n * np.exp(+1j * k * (z + r**2*self.curvature2(z)) - 1j * self.gouy_phase(z))\n\n self.backward_amplitude = \\\n lambda r, z: np.sqrt(2 / np.pi) / self.evo_waist(z) * np.exp(-1 * r ** 2 / self.evo_waist(z) ** 2) \\\n * np.exp(-1j * k * (z + r**2*self.curvature2(z)) + 1j * self.gouy_phase(z))\n else:\n self.forward_amplitude = lambda r,z: self.exact_mode(r,z)\n self.backward_amplitude = lambda r,z: np.conj(self.exact_mode(r,z))\n \"\"\"\n Complex waist of beam as a function of position:\n \"\"\"\n\n def evo_waist(self, z):\n #return self.waist\n #Future\n #curvature radius\n return self.waist * np.sqrt(1+(z/self.z0)**2)\n\n \"\"\"\n Complex multiplicator (for small waists):\n \"\"\"\n def complex_waist(self,z):\n return 1 - 0*(4 / self.waist ** 2 / self.omega ** 2) * 1/(1-1j*z/self.z0)\n\n def curvature2(self, z):\n #return 0\n #Future\n return 1 / (2 * (z+10e-4) * (1+(z/self.z0)**2))\n\n def gouy_phase(self, z):\n #return 0\n #Future\n return np.arctan(z/self.z0)\n\n def norm(self,z):\n return 1\n\n def exact_mode(self, r, z):\n\n from scipy.integrate import quad\n from scipy.special import j0,j1\n\n self.ez = np.empty_like(z, dtype=np.complex64)\n\n res = np.empty_like(z, dtype=np.complex64)\n for i in range(len(z)):\n re = lambda x: x*np.exp(-self.waist ** 2 * x ** 2 / 4) * np.cos(\n np.sqrt(self.omega ** 2 - x ** 2) * z[i]) * j0(x * r[i])\n im = lambda x: x*np.exp(-self.waist ** 2 * x ** 2 / 4) * np.sin(\n np.sqrt(self.omega ** 2 - x ** 2) * z[i]) * j0(x * r[i])\n eva = lambda x: x*np.exp(-self.waist ** 2 * x ** 2 / 4 -\n np.sqrt(x ** 2 - self.omega ** 2) * abs( z[i] )) * j0(x * r[i])\n\n rez = lambda x: x*np.exp(-self.waist ** 2 * x ** 2 / 4) * np.cos(\n np.sqrt(self.omega ** 2 - x ** 2) * z[i]) * j1(x * r[i]) * x/np.sqrt(self.omega**2 - x**2)\n imz = lambda x: x*np.exp(-self.waist ** 2 * x ** 2 / 4) * np.sin(\n np.sqrt(self.omega ** 2 - x ** 2) * z[i]) * j1(x * r[i]) * x/np.sqrt(self.omega**2 - x**2)\n\n norm_prop = lambda x: x*self.waist ** 2 / 2 * (1 + self.omega**2 / (self.omega ** 2 - x**2)) \\\n * np.exp(-self.waist**2 * x**2 / 2)\n\n norm_eva = lambda x: x*self.waist ** 2 / 2 * (1 + self.omega**2 / (self.omega ** 2 - x**2)) \\\n * np.exp(-self.waist**2 * x**2 / 2) \\\n * (np.exp(-2*np.sqrt(x**2 - self.omega**2)*abs(z[i]))-1)\n\n\n\n om = (1-EPS)*self.omega\n op = (1+EPS)*self.omega\n norm = quad(norm_prop, 0, om)[0] + \\\n 0*quad(norm_prop, op, np.inf)[0] + \\\n 0*quad(norm_eva, op, np.inf)[0]\n\n res[i] = (quad(re, 0, self.omega)[0] +\n 1j*quad(im, 0, self.omega)[0] +\n 0*quad(eva, self.omega, np.inf)[0])*\\\n self.waist / np.sqrt(2*np.pi) / np.sqrt(norm)\n\n self.ez[i] = -1j*(quad(rez, 0, self.omega)[0] +\n 1j*quad(imz, 0, self.omega)[0] +\n 0*quad(eva, self.omega, np.inf)[0])*\\\n self.waist / np.sqrt(2*np.pi) / np.sqrt(norm)\n\n return res\n\nif __name__ == '__main__':\n\n from matplotlib import pyplot as plt\n om = 1\n w = 0.1*np.pi\n\n m = GaussMode(w, om)\n rho = np.linspace(-10,10, 100)\n amps = abs(m.exact_mode(rho, 0*np.ones_like(rho)))**2\n amsg = abs(m.forward_amplitude(rho, 0*np.ones_like(rho)))**2\n amsz = abs(m.ez)**2\n\n ampsR = abs(m.exact_mode(rho, m.z0*np.ones_like(rho)))**2\n amsgR = abs(m.forward_amplitude(rho, m.z0*np.ones_like(rho)))**2\n amszR = abs(m.ez) ** 2\n plt.plot(rho, amps, 'b-')\n plt.plot(rho, amsg, 'r-')\n plt.plot(rho, amsz, 'g-')\n\n plt.plot(rho, ampsR, 'b--')\n plt.plot(rho, amsgR, 'r--')\n plt.plot(rho, amszR, 'g--')\n plt.show()\n\n #print(m.exact_mode(np.zeros_like(x),x))\n\n\n\n\n\n\n","sub_path":"novelfss/gmode.py","file_name":"gmode.py","file_ext":"py","file_size_in_byte":4811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"220140621","text":"from cgp_core import CgpCore\nimport dis\n\nimport ctypes\nimport sys\nimport array\nimport types \n\nclass CgpCircuit(CgpCore):\n ARRAY_TYPECODE = 'L'\n\n ## The constructor.\n # @param self The object pointer. \n # @param params Dictionary with CGP params. \n # (rows, cols, lback, functions, i/o node ports, i/o data)\n def __init__(self, params, population):\n # Copy params.\n if params is not None:\n self.__dict__ = params.copy()\n\n # Set another params \n self.area = self.cols * self.rows\n self.popsize = population\n self.functionCnt = len(self.functionSet) - 1 # Last idx, because of rand_int(a,b)\n self.graphInputCnt = len(self.dataInput)\n self.graphOutputCnt = len(self.dataOutput)\n self.maxFitness = 2**self.graphInputCnt * self.graphOutputCnt\n self.bestFitness = 42\n CgpCircuit.run = CgpCore.runAscending # Type of selecting parent. \n \n \n self.arrayTypecode = CgpCircuit.ARRAY_TYPECODE\n\n self.trainingVectors = 1\n if sys.maxsize > 2**32:\n if self.graphInputCnt > 6:\n self.trainingVectors = int((2**self.graphInputCnt)/64)\n else: \n if self.graphInputCnt > 5:\n self.trainingVectors = int((2**self.graphInputCnt)/32)\n self.__convertData()\n self._initOutputBuff()\n self._initUsedNodes(population)\n self.__initLookUp()\n self.__initByteCodes(population)\n if sys.maxsize > 2**32:\n CgpCircuit.bitMax = 2**64 \n self.mask = 2**64-1\n else:\n CgpCircuit.bitMax = 2**32\n self.mask = 2**32-1\n\n def myExec(self, i ):\n #exec (self.code[i], None, {\"outputBuff\" : self.outputBuff})\n exec (self.code[i])\n\n # Calculate Fitness\n def evaluatePop(self):\n # Init values of evaluation\n for i in range(0, self.popsize):\n if i == self.parent: continue\n self.popFitness[i] = 0 \n ctypes.memset(self.ptrUsedNodes[i], 0, self.bufferUsedNodes)\n self.popUsedNodes[i] = self._usedNodes(self.pop[i], self.usedNodes[i])\n #self.code[i] = self._getByteCodePostfix(self.pop[i], self.usedNodes[i], self.arrByteCodes)\n # Using build-in function: compile()\n #self.code[i] = compile(self.__getCode(self.pop[i], self.usedNodes[i]), \"\", \"exec\")\n # Using byte code JIT compilation\n #self.code[i] = self.__getByteCode(self.pop[i], self.usedNodes[i], self.arrByteCodes)\n vektor = 0 \n ptr = self.ptrDataInput\n #for i in self.dataInput: print(hex(i))\n #print()\n for j in range(0, self.trainingVectors):\n ctypes.memmove(self.ptrOutputBuff, ptr, self.buffOffsetSize)\n ptr += self.buffOffsetSize\n for i in range(0, self.popsize):\n if i == self.parent: continue\n #self.myExec(i)\n\n self.evalFitness(self.pop[i], self.outputBuff, self.usedNodes[i])\n idx = self.lastGeneIdx\n for k in range(0, self.graphOutputCnt):\n # Zero count\n # Int -> string -> zero_cnt\n #tmp = bin((self.outputBuff[self.pop[i][idx]] | self.bitMax)^ self.dataOutput[k+vektor])\n #self.popFitness[i] += (tmp.count('0')) - 1 # becouse it is in format 0b0001 and i want to del first zero. \n # Look-up table\n #self.popFitness[i] += CgpCircuit.zeroCount((self.outputBuff[self.pop[i][idx]] | self.bitMax)^ self.dataOutput[k+vektor])\n idx += 1\n vektor += self.graphOutputCnt\n for i in range(0, self.popsize):\n if i == self.parent or self.popFitness[i] != self.maxFitness:\n continue\n #self.popFitness[i] += self.area - self.popUsedNodes[i]\n\n\n ## Converts data in more usefull format\n def __convertData(self):\n if sys.maxsize > 2**32:\n architecture = 64\n mask = 2**64 - 1\n else:\n architecture = 32\n mask = 2**32 - 1\n input = list(self.dataInput) # Temp variable\n output = list(self.dataOutput) # Temp variable\n\n # Init array\n size = self.trainingVectors * self.graphInputCnt\n self.dataInput = array.array(self.arrayTypecode, [0]*size)\n size = self.trainingVectors * self.graphOutputCnt\n self.dataOutput = array.array(self.arrayTypecode, [0]*size)\n \n # Fill up input array\n idx = 0 \n for i in range(0, self.trainingVectors):\n for j in range(0, self.graphInputCnt):\n self.dataInput[idx] = input[j] & mask\n input[j] = input[j] >> architecture\n idx += 1 \n\n # Fill up output array\n idx = 0 # reset idx, using another array || list\n for i in range(0, self.trainingVectors):\n for j in range(0, self.graphOutputCnt):\n self.dataOutput[idx] = output[j] & mask\n output[j] = output[j] >> architecture\n idx += 1 \n\n ############################################################################\n # EVAL FITNESS\n ############################################################################\n def __getCode(self, chrom, usedNodes):\n a_idx = -3 # Index prvniho operandu\n b_idx = -2 # Index druheho operandu\n f_idx = -1 # Index Operace\n # Strings solution \n result = \"\"\n for j in range(self.graphInputCnt, self.area+self.graphInputCnt):\n a_idx += 3\n b_idx += 3\n f_idx += 3\n if usedNodes[j] == 0: \n continue\n result += \"a=self.outputBuff[\"+str(chrom[a_idx])+\"]\\n\"\n result += \"b=self.outputBuff[\"+str(chrom[b_idx])+\"]\\n\"\n result += \"self.outputBuff[\"+str(j)+\"]=\"+ self.functionSetStr[chrom[f_idx]] +\"\\n\"\n return result\n\n\n def __initByteCodes(self, population):\n arr = []\n for i in range(0, self.graphInputCnt + self.cols * self.rows):\n arr.append(i)\n self.mask_idx = i + 1\n if sys.maxsize > 2*32:\n arr.append(2**64-1)\n else:\n arr.append(2**32-1)\n self.none_idx = i + 2\n arr.append(None)\n\n self.co_consts = tuple(arr)\n #print(self.co_consts)\n #print(self.co_constsk\n self.code = [0] * population\n self.arrByteCodes = [0] * population\n #self.co_names = ('self', 'graphInputCnt', 'out_idx', 'outputBuff', 'a', 'b', 'bitOnes')\n self.co_names = ('outputBuff',)\n #initCode = [101, 0, 0, \n #106, 1, 0, \n #90, 2, 0]\n initCode = []\n # # PythonBytecode STACK\n nodeCode = [100, self.mask_idx, 0, # LOAD CONST\n 101, 0, 0, # LOAD ATTR, outputbuff [outputbuff]\n 100, 0, 0, # LOAD CONST, 0 [const, ouputbuff]\n 25, # BIN_SUBSCR [a]\n 101, 0, 0, # LOAD ATTR, outputbuff [outputbuff, a]\n 100, 0, 0, # LOAD CONST, 0 [const, outputbuff, a]\n 25, # BIN_SUBSCR [b, a]\n 9, # NOP [operation]\n 9, # NOP [operation]\n 9, # NOP [operation]\n 101, 0, 0, # LOAD ATTR, outputbuff [outputbuff, self, result]\n 100, 0, 0, # LOAD CONST, index [index, outputbuff, self, result]\n 60 # STORE_SUBCR, val []\n ]\n exitCode = [100, self.none_idx, 0, # LOAD NONE\n 83] # Return shit\n #self.co_lnotab = bytes([9,0] + [35,0,10,0]*self.rows*self.cols)\n self.co_lnotab = bytes()\n\n\n\n self.arrByteCodes = array.array('B', initCode + nodeCode*(self.cols*self.rows) + exitCode) \n self.stacksize = self.area # EDIT IF NEED!!! \n \n #print(self.co_consts)\n\n def __getByteCode(self, chrom, used, code):\n # zkopirovat prvnich osm bajtu ty zustanou nemenne \n # Checknout dokumentaci jestli existuje neco jako NO operation!\n i = 0 # Index zactku bytekodu\n a_idx = -3 # Index prvniho operandu\n b_idx = -2 # Index druheho operandu\n f_idx = -1 # Index Operace\n idx = 0 # Index chromozomu \n for j in range(self.graphInputCnt, self.area+self.graphInputCnt):\n a_idx += 3\n b_idx += 3\n f_idx += 3\n idx += 1\n if 0 == used[j]: # Byl tento uzel pouzitej? \n continue \n code[i ] = 100\n code[i+1 ] = self.mask_idx\n code[i+3 ] = 101\n code[i+7 ] = chrom[a_idx]\n code[i+14] = chrom[b_idx]\n code[i+17] = self.functionBC1[chrom[f_idx]]\n code[i+18] = self.functionBC2[chrom[f_idx]]\n code[i+19] = self.functionBC3[chrom[f_idx]]\n code[i+24] = j \n i += 27\n code[i] = 100 # RETURN VALUE\n code[i+1] = self.none_idx # RETURN VALUE\n code[i+2] = 0 # RETURN VALUE\n code[i+3] = 83 # RETURN VALUE\n #exitCode = [100, self.none_idx, 0, # LOAD NONE\n return types.CodeType(0, \n 0, # Py2 asi smazat..\n 0, 20, 64,\n code.tobytes(), self.co_consts, self.co_names, tuple(),\n \"\", \"\", 1, self.co_lnotab)\n\n\n\n\n # Simulate circuit\n def evalFitness(self, chrom, out, usedNodes):\n idx = 0 # Index pointer in chromosome\n j = self.graphInputCnt\n # For each node set its output\n for i in range(self.graphInputCnt, self.area+self.graphInputCnt):\n if usedNodes[i] == 0: \n idx += 3 \n j += 1\n continue\n a = out[chrom[idx]]\n idx += 1\n b = out[chrom[idx]]\n idx += 1\n out[j] = self.functionSet[chrom[idx]](a, b)\n j += 1\n idx += 1\n\n\n\n\n def __initLookUp(self):\n global lookUpBitTable \n lookUpBitTable = array.array(self.arrayTypecode, [0]*256)\n for i in range(0, 256):\n cnt = 0\n zi = 0xffff - i\n for j in range(0, 8):\n cnt += zi & 1\n zi = zi >> 1\n lookUpBitTable[i] = cnt\n\n # Select zeroCount\n if sys.maxsize > 2**32:\n CgpCircuit.zeroCount = CgpCircuit.zeroCount64bit\n else:\n CgpCircuit.zeroCount = CgpCircuit.zeroCount32bit\n\n def zeroCount32bit(val):\n return lookUpBitTable[val & 0xff] + \\\n lookUpBitTable[val >> 8 & 0xff] + \\\n lookUpBitTable[val >> 16 & 0xff] + \\\n lookUpBitTable[val >> 24 & 0xff]\n\n def zeroCount64bit(val):\n return lookUpBitTable[val & 0xff] + \\\n lookUpBitTable[val >> 8 & 0xff] + \\\n lookUpBitTable[val >> 16 & 0xff] + \\\n lookUpBitTable[val >> 24 & 0xff] + \\\n lookUpBitTable[val >> 32 & 0xff] + \\\n lookUpBitTable[val >> 40 & 0xff] + \\\n lookUpBitTable[val >> 48 & 0xff] + \\\n lookUpBitTable[val >> 56 & 0xff]\n\n\n def __getBc(self, chrom, nodeidx, usedNodes, code):\n global code_idx\n idx = (nodeidx - self.graphInputCnt) * 3\n # Mame promennou ulozenu? jedna se o vstup?\n if usedNodes[nodeidx] == -1 or nodeidx < self.graphInputCnt:\n code[code_idx] = 101\n code_idx += 1 \n code[code_idx] = 0\n code_idx += 1 \n code[code_idx] = 0\n code_idx += 1 \n code[code_idx] = 100\n code_idx += 1 \n code[code_idx] = nodeidx\n code_idx += 1 \n code[code_idx] = 0\n code_idx += 1 \n code[code_idx] = 25\n code_idx += 1 \n return\n #return [101, 0, 0, 100, nodeidx, 0, 25] # Uloz hajzla\n function = chrom[idx + 2]\n arita = self.functionArity[function]\n if arita == 0:\n code[code_idx] = 100\n code_idx += 1 \n code[code_idx] = self.functionBC4[function]\n code_idx += 1 \n code[code_idx] = 0\n code_idx += 1 \n return\n #return [100, self.functionBC4[function], 0] # ve 4ce je ulozena konstanta\n # zpracuj a uloz do pole.\n if usedNodes[nodeidx] == 1:\n # IDENTITA\n if function == 0: # Identity\n self.__getBc(chrom, chrom[idx], usedNodes, code)\n return\n \n # dve arity\n elif arita == 2:\n if self.functionBC2[function] == 24:\n code[code_idx] = 100\n code_idx += 1 \n code[code_idx] = self.mask_idx\n code_idx += 1 \n code[code_idx] = 0\n code_idx += 1 \n self.__getBc(chrom, chrom[idx], usedNodes, code)\n self.__getBc(chrom, chrom[idx+1], usedNodes, code)\n code[code_idx] = self.functionBC1[function]\n code_idx += 1 \n code[code_idx] = 24\n code_idx += 1 \n return\n\n self.__getBc(chrom, chrom[idx], usedNodes, code)\n self.__getBc(chrom, chrom[idx+1], usedNodes, code)\n code[code_idx] = self.functionBC1[function]\n code_idx += 1 \n return\n\n elif arita == 1:\n code[code_idx] = 100\n code_idx += 1 \n code[code_idx] = self.mask_idx\n code_idx += 1 \n code[code_idx] = 0\n code_idx += 1 \n self.__getBc(chrom, chrom[idx], usedNodes, code)\n code[code_idx] = 24\n code_idx += 1 \n return \n\n else: # usedNodes[idx] == 1\n usedNodes[nodeidx] = -1 # SAVE INDEX\n # Identity\n if function == 0:\n self.__getBc(chrom, chrom[idx], usedNodes, code) \n code[code_idx] = 4\n code_idx += 1 \n code[code_idx] = 101\n code_idx += 1 \n code[code_idx] = 0\n code_idx += 1 \n code[code_idx] = 0\n code_idx += 1 \n code[code_idx] = 100\n code_idx += 1 \n code[code_idx] = nodeidx \n code_idx += 1 \n code[code_idx] = 0\n code_idx += 1 \n code[code_idx] = 60\n code_idx += 1 \n return\n elif arita == 2:\n #return self.__getBc(chrom, chrom[idx], usedNodes) + self.__getBc(chrom, chrom[idx+1], usedNodes) + self.functionBC1[chrom[idx+2]] + [4, 101, 0, 0, 100, idx, 0, 60] \n if self.functionBC2[function] == 24:\n code[code_idx] = 100\n code_idx += 1 \n code[code_idx] = self.mask_idx\n code_idx += 1 \n code[code_idx] = 0\n code_idx += 1 \n self.__getBc(chrom, chrom[idx], usedNodes, code)\n self.__getBc(chrom, chrom[idx+1], usedNodes, code)\n code[code_idx] = self.functionBC1[function]\n code_idx += 1 \n code[code_idx] = 24\n code_idx += 1 \n else:\n self.__getBc(chrom, chrom[idx], usedNodes, code)\n self.__getBc(chrom, chrom[idx+1], usedNodes, code)\n code[code_idx] = self.functionBC1[function]\n code_idx += 1 \n code[code_idx] = 4\n code_idx += 1 \n code[code_idx] = 101\n code_idx += 1 \n code[code_idx] = 0\n code_idx += 1 \n code[code_idx] = 0\n code_idx += 1 \n code[code_idx] = 100\n code_idx += 1 \n code[code_idx] = nodeidx \n code_idx += 1 \n code[code_idx] = 0\n code_idx += 1 \n code[code_idx] = 60\n code_idx += 1 \n return\n #return self.__getBc(chrom, chrom[idx], usedNodes) + self.__getBc(chrom, chrom[idx+1], usedNodes) + [self.functionBC1[chrom[idx+2]]] + [4, 101, 0, 0, 100, nodeidx, 0, 60] \n elif arita == 1:\n code[code_idx] = 100\n code_idx += 1 \n code[code_idx] = self.mask_idx\n code_idx += 1 \n code[code_idx] = 0\n code_idx += 1 \n self.__getBc(chrom, chrom[idx], usedNodes, code)\n code[code_idx] = 24\n code_idx += 1 \n code[code_idx] = 4\n code_idx += 1 \n code[code_idx] = 101\n code_idx += 1 \n code[code_idx] = 0\n code_idx += 1 \n code[code_idx] = 0\n code_idx += 1 \n code[code_idx] = 100\n code_idx += 1 \n code[code_idx] = nodeidx \n code_idx += 1 \n code[code_idx] = 0\n code_idx += 1 \n code[code_idx] = 60\n code_idx += 1 \n return\n # LOAD FUNCTION CALCULATIONS EXEC FUNCTION 1 param 0 (magic) keywords SAVE VARIABLE\n #return [100, sinidx, 0] + self.__getBc(chrom, chrom[idx_a], usedNodes) + [131, 1, 0, 4, 101, 0, 0, 100, nodeidx, 0, 60] \n \n ## Transfer chromosome into postfix notation! \n # Returns used nodes. \n # I somhow love this function \n # GENE LENGTH MUST BE 3 \n # INPUT PORTS MUST BE 2\n # OUTPUT PORTS MUST BE 1 \n # IF NOT u have to replace numbers (CONSTANTS) in this piece of script.\n def _getByteCodePostfix(self, chrom, usedNodes, code):\n cnt = 0\n # Load last index\n global code_idx\n code_idx = 0\n idx = self.lastGeneIdx\n for i in range(self.graphOutputCnt):\n if usedNodes[chrom[idx]] == -1: \n idx +=1\n continue\n self.__getBc(chrom, chrom[idx], usedNodes, code)\n # STACK = [result] LOAD ouputBuff\n code[code_idx] = 101\n code_idx+=1\n code[code_idx] = 0\n code_idx+=1\n code[code_idx] = 0\n code_idx+=1\n # STACK = [result, outputBuff] LOAD const \n code[code_idx] = 100\n code_idx+=1\n code[code_idx] = chrom[idx]\n code_idx+=1\n code[code_idx] = 0\n code_idx+=1\n #STACK = [result, outputBuff, const] STORE result in outputbuff[const]\n code[code_idx] = 60\n code_idx+=1\n idx+=1\n #STACK = [] LOAD NONE\n code[code_idx] = 100\n code_idx +=1\n code[code_idx] = self.none_idx\n code_idx +=1\n code[code_idx] = 0\n code_idx +=1\n #STACK = [None] RETURN None\n code[code_idx] = 83\n code_idx +=1\n return types.CodeType(0, \n 0, # Py2 asi smazat..\n 0, self.stacksize, 64,\n code.tobytes(), self.co_consts, self.co_names, tuple(),\n \"\", \"\", 1, self.co_lnotab)\n #dis.dis(c)\n\n def resultEquation(self, chrom=None):\n if chrom is None: chrom = self.pop[self.parent]\n result = \"\"\n for i in range(self.graphOutputCnt):\n result += \"out_\"+ str(i) + \"=\" + self.__getEq(chrom, chrom[i + self.area * (self.nodeInputPorts + self.nodeOutputPorts)])+ \"\\n\"\n return result\n \n def __getEq(self, chrom, nodeidx):\n geneLen = self.nodeInputPorts + self.nodeOutputPorts\n if nodeidx < self.graphInputCnt:\n return \"in_\" + str(nodeidx)\n else:\n nodeidx -= self.graphInputCnt\n node_in1 = chrom[nodeidx*geneLen]\n node_in2 = chrom[nodeidx*geneLen +1]\n function = chrom[nodeidx * geneLen + 2]\n if self.functionArity[function] == 0:\n return self.functionTableOp[function]\n elif self.functionArity[function] == 1:\n return self.functionTableOp[function] + \"(\"+ self.__getEq(chrom, node_in1)+\")\"\n elif self.functionArity[function] == 2:\n return \"(\"+ self.__getEq(chrom, node_in1) +self.functionTableOp[function] + self.__getEq(chrom, node_in2)+\")\"\n","sub_path":"Bachelor's Thesis/src/cython/v0.1/cgp_circuit.py","file_name":"cgp_circuit.py","file_ext":"py","file_size_in_byte":21417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"226208962","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on 5/27/21 5:27 PM\n@Author : Justin Jiang\n@Email : jw_jiang@pku.edu.com\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n if not nums:\n return []\n record = dict()\n for idx, num in enumerate(nums):\n if target - num not in record:\n record[num] = idx\n else:\n return [record.get(target - num), idx]\n\n\nif __name__ == '__main__':\n test = Solution()\n print(test.twoSum(nums=[2, 7, 11, 15], target=9))\n print(test.twoSum(nums=[3, 2, 4], target=6))\n print(test.twoSum(nums=[3, 3], target=6))\n","sub_path":"20201029/twoSum.py","file_name":"twoSum.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"614113739","text":"num_class = 10\n\ndataset = dict(\n type='CIFAR10',\n train_root='dataset/cifar10',\n test_root='dataset/cifar10',\n num_workers=16,\n batchsize=128,\n num_class=num_class)\n\noptimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, alpha_wd=0)\nlr_config = dict(policy='step', milestones=[100, 150])\ntotal_epochs = 200\n\npretrained = None\n\nuse_pws = False\nnorm_cfg = dict(type='BN', requires_grad=True)\nconv_cfg = dict(type='conv')\nneck_norm_cfg = None\nzero_init_residual = True\nif use_pws:\n norm_cfg = dict(type='GN', num_groups=1, requires_grad=True)\n conv_cfg = dict(type='pws', gamma=1e-3, equiv=False, initalpha=False, mode=\"fan_out\")\n neck_norm_cfg = dict(type='GN', num_groups=1, requires_grad=True)\n zero_init_residual = False\n\nbackbone=dict(\n type='ResNet_Cifar',\n depth=34,\n num_stages=4,\n out_indices=(3, ),\n style='pytorch',\n norm_cfg=norm_cfg,\n conv_cfg=conv_cfg,\n zero_init_residual=zero_init_residual)\n\nneck=dict(\n type='ReluNeck',\n in_channels=512,\n frozen_state=False,\n norm_cfg=neck_norm_cfg\n)\n \nhead=dict(\n type='LinearClsHead',\n num_classes=num_class,\n in_channels=512,\n topk=(1, 5),\n)\n\nknn=dict( \n l2norm=True,\n topk_percent=0.2\n)\n\nlogger = dict(interval=100)\nsaver = dict(interval=20)\n","sub_path":"configs/cifar/bn.py","file_name":"bn.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"592893547","text":"#!/usr/bin/python\n#coding=utf-8\n\n\"\"\"\n生产者-消费者模式\n\n生产者 总共5条流水线,每生产一个馒头,随机休息1-5秒\n消费者 总共4条流水线,每消费一个馒头,随机休息1-5秒\n\"\"\"\nimport random, time\nfrom queue import Queue\nimport multiprocessing as mp\nfrom collections import namedtuple\n\n\n# 馒头对象\nBread = namedtuple('Bread', ['name'])\n\nclass Basket(object):\n\n def __init__(self, size=5):\n self._size = size # 容器的大小\n self._basket = Queue() # 容器\n self._counter = 1\n\n def go_sleep(self):\n sleep_time = random.randint(1, 5)\n\n print('*********休息 %d--活跃流水线:%d' % (sleep_time, mp.active_count()))\n time.sleep(sleep_time)\n\n @property\n def is_full(self):\n \"\"\" 是否已经满了 \"\"\"\n return self._basket.qsize() >= self._size\n\n @property\n def is_empty(self):\n \"\"\" 是否为空 \"\"\"\n return self._basket.qsize() == 0\n\n def push(self):\n \"\"\" 添加馒头 \"\"\"\n # 进程用户\n process_name = mp.current_process().name\n name = \"Bread_%03d\" % self._counter\n self._counter += 1\n bread = Bread(name=name)\n self._basket.put(bread)\n print(\">> %s 生产馒头:%s , 共有 %03d 馒头\" % (process_name, name, self._basket.qsize()))\n\n def pop(self):\n \"\"\" 消费馒头 \"\"\"\n # 进程用户\n process_name = mp.current_process().name\n if not self.is_empty:\n bread = self._basket.get()\n print(\"<< %s 消费馒头:%s , 共有 %03d 馒头\" % (process_name, bread.name, self._basket.qsize()))\n else:\n print(\"------------------没有馒头了------------------\")\n\n\n\nclass Producer(mp.Process):\n \"\"\" 生产者 \"\"\"\n\n def __init__(self, basket, condition, name=None):\n super(Producer, self).__init__(name=name)\n self._basket = basket\n self._condition = condition\n\n def run(self):\n \"\"\" 生产者用于生产馒头 \"\"\"\n while True:\n try:\n # 加锁\n self._condition.acquire()\n if self._basket.is_full:\n print(\"------------满了--------------\")\n self._condition.notify_all()\n self._condition.wait()\n else:\n # 开始生产\n self._basket.push()\n self._basket.go_sleep()\n finally:\n # 释放锁\n self._condition.release()\n\n\nclass Consumer(mp.Process):\n \"\"\" 消费者 \"\"\"\n def __init__(self, basket, condition, name=None):\n super(Consumer, self).__init__(name=name)\n self._basket = basket\n self._condition = condition\n\n def run(self):\n \"\"\" 消费者用于消费馒头 \"\"\"\n while True:\n try:\n # 加锁\n self._condition.acquire()\n if self._basket.is_empty:\n print(\"------------空了--------------\")\n self._condition.notify_all()\n self._condition.wait()\n else:\n # 开始消费\n self._basket.pop()\n self._basket.go_sleep()\n finally:\n # 释放锁\n self._condition.release()\n\ndef test():\n basket = Basket(5)\n condition = mp.Condition()\n for i in range(5):\n name = 'P_%d' % i\n print('开启生产线>>>%s' % name)\n p = Producer(basket, condition, name=name)\n p.start()\n\n print('--------休息一下,准备开始消费')\n basket.go_sleep()\n print('---------开始消费----------')\n for i in range(4):\n name = 'C_%d' % i\n print('开启消费线>>>%s' % name)\n c = Consumer(basket, condition, name=name)\n c.start()\n\nif __name__ == '__main__':\n test()\n","sub_path":"mthread/pc/use_process_oop.py","file_name":"use_process_oop.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"412322813","text":"import serial\nfrom datetime import datetime\nimport os.path\nimport math\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_absolute_error \nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Activation, Flatten,BatchNormalization\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom scipy import stats\nimport tensorflow as tf\nfrom sklearn.preprocessing import LabelBinarizer\nfrom tensorflow.keras.utils import to_categorical\nimport warnings # current version of seaborn generates a bunch of warnings that we'll ignore\nwarnings.filterwarnings(\"ignore\")\nwarnings.filterwarnings('ignore', category=DeprecationWarning)\nimport os, glob\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn import metrics\nfrom sklearn.decomposition import PCA\n\nser = serial.Serial('/dev/ttyACM0',115200)\ndata = []\ntry:\n\tloaded_model = pickle.load(open(\"milestone2.pickle.dat\", \"rb\"))\n\t# sc = pickle.load(open(\"sc.pickle.dat\", \"rb\"))\n\tpca = pickle.load(open(\"pca.pickle.dat\", \"rb\"))\nexcept pickle.UnpicklingError:\n\tpass\n\n# df = pd.DataFrame()\n# columns = [str(i) for i in range(320)]\n# columns.append(\"target\")\n# for filename in glob.glob('data/*.csv'):\n# \t\tdf_temp = pd.read_csv(filename, names=columns)\n# \t\tdf =df.append(df_temp)\n# static = np.asarray(df, dtype=np.float32)\n# static = static[static[:,-1]==0]\n\n# static = np.average(static, axis = 0)\n# static = static[:-1]\n\nbuff = []\ndef gb_predict(test_data):\n\tprediction =int(loaded_model.predict(test_data))\n\tbuff.append(prediction)\n\tif len(buff) > 20:\n\t\tbuff.pop(0)\n\t\tresult = most_frequent(buff)\n\t\tif result == 0:\n\t\t\tprint(\"No pressure\")\n\t\tif result == 1:\n\t\t\tprint(\"Straight\")\n\t\tif result == 2:\n\t\t\tprint(\"Leaning forward\")\n\t\tif result == 3:\n\t\t\tprint(\"Leaning backward\")\n\t\tif result == 4:\n\t\t\tprint(\"Leaning left\")\n\t\tif result == 5:\n\t\t\tprint(\"Leaning right\")\n\t\t# print(result)\n\t# return result\n\ndef most_frequent(List): \n return max(set(List), key = List.count) \n\ndef dataprocess(data):\n\tdata = data.reshape(1,-1)\n\t# sc_result = sc.transform(data.reshape(1,-1))\n\tpca_result = pca.transform(data) \n\treturn pca_result\n\ndef static_collect():\n\tstatic_data = []\n\tfor _ in range(10):\n\t\ttry:\n\t\t\tread_serial = ser.readline().decode('utf-8').strip(',\\r\\n')\n\t\texcept UnicodeDecodeError:\n\t\t\tcontinue\n\t\t# data = np.array(read_serial, dtype = np.float32)/600\n\t\tcount = len(read_serial.split(\",\"))\n\t\tif count != 256:\n\t\t\tcontinue\n\t\telse:\n\t\t\tdata = read_serial.split(',')\n\t\t\t# print(data)\n\t\t\tdata = np.asarray(data, dtype= np.float32)\n\t\tstatic_data.append(data)\n\tstatic_data = np.asarray(static_data, dtype=np.float32)\n\tstatic_mean = np.amax(static_data, axis = 0)\n\treturn static_mean\n\t\n\n# def read_data():\n# \ttry:\n# \t\tread_serial = ser.readline().decode('utf-8').strip(',\\r\\n')\n# \texcept UnicodeDecodeError:\n# \t\tcontinue\n# \t# data = np.array(read_serial, dtype = np.float32)/600\n# \tcount = len(read_serial.split(\",\"))\n# \tif count != 320:\n# \t\tcontinue\n# \telse:\n# \t\tdata = read_serial.split(',')\n# \t\t# print(data)\n# \t\tdata = np.asarray(data, dtype= np.float32)\n# \treturn data\n\ndef collecting():\n\t# static = static_collect()\n\twhile (True):\n\t\ttry:\n\t\t\tread_serial = ser.readline().decode('utf-8').strip(',\\r\\n')\n\t\texcept UnicodeDecodeError:\n\t\t\tcontinue\n\t\t# data = np.array(read_serial, dtype = np.float32)/600\n\t\tcount = len(read_serial.split(\",\"))\n\t\tif count != 256:\n\t\t\tprint(count)\n\t\t\tcontinue\n\t\telse:\n\t\t\tdata = read_serial.split(',')\n\t\t\t# print(data)\n\t\t\tdata = np.asarray(data, dtype= np.float32)\n\t\t\t\n\t\t\t# print(data)\n\t\t\t# data = static - data\n\t\t\t# data = np.divide(data, static)\n\t\t\tprocessed = dataprocess(data)\n\t\t\t# if np.average(processed)/np.average(static) > 0.1:\n\t\t\t\t# pressure = True\n\t\t\t# processed = processed.reshape(1,-1)\n\t\t\tgb_predict(processed)\n\t\t\t# else:\n\t\t\t\t# pressure = False\n\t\t\t\t# print(\"No Pressure\")\n\t\t\t# print(processed)\n\t\t\t# data = np.asarray(data, dtype = np.float32)\n\t\t\t\n\t\t\t# dataFile.write(read_serial)\n\t\t\t# print(read_serial)\n\ncollecting()\n\t","sub_path":"rasp/sitwell_testing_v3.py","file_name":"sitwell_testing_v3.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"629939035","text":"\"\"\"\n pip3 install clarifai --upgrade\n pip3 install Pillow\n\n Most of the code is copied from : https://github.com/Clarifai/clarifai-python\n\n\"\"\"\n\nfrom clarifai.rest import ClarifaiApp\nfrom clarifai.rest import Image as ClImage\n\nfrom os.path import expanduser\nfrom PIL import Image\n\n\nhome = expanduser(\"~\")\nkeyfile = home+\"/.ClarifaiKey.txt\"\nMyKey=None\nf = open(keyfile,\"r\")\nMyKey = f.readline()\nMyKey = MyKey.strip()\n#print(MyKey)\n\napp = ClarifaiApp(api_key=MyKey)\n\nmodel = app.models.get('general-v1.3')\n\n#MyFile='./index.jpeg'\nMyFile='./image3.jpg'\n\nextension = \"*.jpg\"\n\n#response = model.predict_by_url(url='https://samples.clarifai.com/metro-north.jpg')\n\nimport glob\n\n# file-output.py\nf = open('helloworld.txt','a')\n\n\nfor filename in glob.iglob('/home/tredea/Dropbox/Grabber-live/*.jpg'):\n\n checkfile = '%s' % filename\n print('%s' % checkfile)\n\n response = model.predict_by_filename(checkfile)\n\n\n\n concepts = response['outputs'][0]['data']['concepts']\n for concept in concepts:\n # print(concept['name'], concept['value'])\n if concept['name'] == 'fish':\n print(\"Det er ein fisk i %s sansynligheit %d%%\" % (checkfile, (concept['value'])*100) )\n f.write(\"\\n\" + \"Det er ein fisk i %s sansynligheit %d%%\" % (checkfile, (concept['value'])*100) )\n\nf.close()\n\n#Image.open(MyFile).show()\n\n","sub_path":"Clarifai/HelloWorld.py","file_name":"HelloWorld.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"325928567","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the JSON lines output module.\"\"\"\n\nimport io\nimport json\nimport os\nimport sys\nimport unittest\n\nfrom dfvfs.lib import definitions as dfvfs_definitions\nfrom dfvfs.path import factory as path_spec_factory\n\nfrom plaso.lib import definitions\nfrom plaso.output import json_line\n\nfrom tests import test_lib as shared_test_lib\nfrom tests.containers import test_lib as containers_test_lib\nfrom tests.output import test_lib\n\n\nclass JSONLinesOutputTest(test_lib.OutputModuleTestCase):\n \"\"\"Tests for the JSON lines output module.\"\"\"\n\n # pylint: disable=protected-access\n\n _OS_PATH_SPEC = path_spec_factory.Factory.NewPathSpec(\n dfvfs_definitions.TYPE_INDICATOR_OS, location='{0:s}{1:s}'.format(\n os.path.sep, os.path.join('cases', 'image.dd')))\n\n _TEST_EVENTS = [\n {'_parser_chain': 'test',\n 'data_type': 'test:event',\n 'hostname': 'ubuntu',\n 'path_spec': path_spec_factory.Factory.NewPathSpec(\n dfvfs_definitions.TYPE_INDICATOR_TSK, inode=15,\n location='/var/log/syslog.1', parent=_OS_PATH_SPEC),\n 'text': (\n 'Reporter PID: |8442| (pam_unix(cron:session): session\\n '\n 'closed for user root)'),\n 'timestamp': '2012-06-27 18:17:01',\n 'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN,\n 'username': 'root'}]\n\n def testWriteFieldValues(self):\n \"\"\"Tests the _WriteFieldValues function.\"\"\"\n test_file_object = io.StringIO()\n\n output_mediator = self._CreateOutputMediator()\n\n formatters_directory_path = self._GetTestFilePath(['formatters'])\n output_mediator.ReadMessageFormattersFromDirectory(\n formatters_directory_path)\n\n output_module = json_line.JSONLineOutputModule()\n output_module._file_object = test_file_object\n\n event, event_data, event_data_stream = (\n containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))\n\n # TODO: add test for event_tag.\n field_values = output_module._GetFieldValues(\n output_mediator, event, event_data, event_data_stream, None)\n\n output_module._WriteFieldValues(output_mediator, field_values)\n\n expected_timestamp = shared_test_lib.CopyTimestampFromString(\n '2012-06-27 18:17:01')\n\n if sys.platform.startswith('win'):\n # The dict comparison is very picky on Windows hence we\n # have to make sure the drive letter is in the same case.\n expected_os_location = os.path.abspath('\\\\{0:s}'.format(\n os.path.join('cases', 'image.dd')))\n else:\n expected_os_location = '{0:s}{1:s}'.format(\n os.path.sep, os.path.join('cases', 'image.dd'))\n\n expected_json_dict = {\n '__container_type__': 'event',\n '__type__': 'AttributeContainer',\n 'date_time': {\n '__class_name__': 'PosixTimeInMicroseconds',\n '__type__': 'DateTimeValues',\n 'timestamp': 1340821021000000,\n },\n 'data_type': 'test:event',\n 'display_name': 'TSK:/var/log/syslog.1',\n 'filename': '/var/log/syslog.1',\n 'hostname': 'ubuntu',\n 'inode': '15',\n 'message': (\n 'Reporter PID: |8442| (pam_unix(cron:session): '\n 'session closed for user root)'),\n 'parser': 'test',\n 'pathspec': {\n '__type__': 'PathSpec',\n 'type_indicator': 'TSK',\n 'location': '/var/log/syslog.1',\n 'inode': 15,\n 'parent': {\n '__type__': 'PathSpec',\n 'type_indicator': 'OS',\n 'location': expected_os_location,\n }\n },\n 'text': (\n 'Reporter PID: |8442| (pam_unix(cron:session): '\n 'session\\n closed for user root)'),\n 'timestamp': expected_timestamp,\n 'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN,\n 'username': 'root',\n }\n event_body = test_file_object.getvalue()\n\n # We need to compare dicts since we cannot determine the order\n # of values in the string.\n json_dict = json.loads(event_body)\n self.assertEqual(json_dict, expected_json_dict)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/output/json_line.py","file_name":"json_line.py","file_ext":"py","file_size_in_byte":4166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"222253673","text":"\nimport gandi\nimport protournoi\nimport tools\nfrom team import *\n\ndef createTeamEmails():\n for t in teams:\n userEmails = teamUserEmails(t)\n gandi.setForward(teamToBase(t), \"\\n\".join(userEmails))\n\ndef printAllTeamAliases():\n print(\",\".join([ teamToEmail(t) for t in teams ]))\n \n\ndef createProtournoiUsers():\n for t in teams:\n name = teamToProTournoi(t)\n email = teamToEmail(t)\n passwd = teamToPasswd(t)\n protournoi.createNewUser(name, email, passwd)\n\ndef feedProtournoiInvitedTeams():\n for t in teams:\n people = u\"{}: {}\".format(t, u\" / \".join(teamUserAliases(t)))\n print(people)\n protournoi.invite(people)\n protournoi.submitInvites()\n\ndef printTeams():\n for t in teams:\n base = teamToBase(t)\n email = teamToEmail(t)\n people = [ u\" - '{}' <{}>\".format(*u) for u in teamUsers[t] ]\n print(email)\n print(\"\\n\".join(people))\n\ndef sendInitialMailToTeams(reallySend = False):\n for t in teams:\n base = teamToBase(t)\n email = teamToEmail(t)\n people = [ u\" - '{}' <{}>\".format(*u) for u in teamUsers[t] ]\n subject = cfg.prefix(t)+\"Welcome!\"\n msg = [\n \"Welcome to the petanque tournament!\",\n \"\",\n \"You are \"+base+\" and you have an email alias \"+teamToEmail(t)+\" that corresponds to your team, composed of:\",\n ] + people + [\n \"\",\n # \"In addition to its email alias, your team also has an account on http://www.protournoi.fr\",\n # \" user: \"+teamToProTournoi(t),\n # \" pass: \"+teamToPasswd(t)\n \"\"\n ]\n users = [email]\n if reallySend:\n tools.mail(email, users, subject, msg)\n else:\n print(\"\\n\".join(msg))\n\n\n\n# reload(data) ; reload(team) ; reload(do) ; do.printTeams()\n","sub_path":"do.py","file_name":"do.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"386572781","text":"#!/usr/bin/env python\n#*****************************************************************************#\n#**\n#** CASAS Zigbee Agent\n#** ZigbeeAgent.py\n#** DeviceType_c4_cardaccess_inhome_WPS10A.py\n#**\n#** Allan R Drassal, 2012-06-09\n#** Brian L. Thomas, 2014\n#**\n#** Represents a physical Zigbee device that may contain components\n#** or actuators inside\n#**\n#** This contains DeviceType specific information for:\n#** CardAccess Wireless Smart Pressure Sensor Package\n#** c4:cardaccess_inhome:WPS10A\n#**\n#** Tools by the Center for Advanced Studies in Adaptive Systems at\n#** the School of Electrical Engineering and Computer Science at\n#** Washington State University\n#** \n#** Copyright Washington State University, 2014\n#** Copyright Brian L. Thomas, 2014\n#** \n#** All rights reserved\n#** Modification, distribution, and sale of this work is prohibited without\n#** permission from Washington State University\n#**\n#** If this code is used for public research, any resulting publications need\n#** to cite work done by Brian L. Thomas at the Center for Advanced Study of \n#** Adaptive Systems (CASAS) at Washington State University.\n#** \n#** Contact: Brian L. Thomas (brian.thomas@email.wsu.edu)\n#** Contact: Diane J. Cook (cook@eecs.wsu.edu)\n#*****************************************************************************#\n\nfrom zigbee.DeviceType import DeviceType\nfrom zigbee.ComponentType_c4_cardaccess_inhome_WPS10A import ComponentType_c4_cardaccess_inhome_WPS10A_contact_int\nfrom zigbee.ComponentType_c4_cardaccess_inhome_WPS10A import ComponentType_c4_cardaccess_inhome_WPS10A_pressure\nfrom zigbee.ComponentType_c4_cardaccess_inhome_WPS10A import ComponentType_c4_cardaccess_inhome_WPS10A_temp_local\nfrom zigbee.ComponentType_c4_cardaccess_inhome_WPS10A import ComponentType_c4_cardaccess_inhome_WPS10A_battery\nfrom zigbee.ComponentType_c4_cardaccess_inhome_WPS10A import ComponentType_c4_cardaccess_inhome_WPS10A_batteryp\nfrom zigbee.ComponentType_c4_cardaccess_inhome_WPS10A import ComponentType_c4_cardaccess_inhome_WPS10A_radio\n\"\"\"\nc4:cardaccess_inhome:WPS10A\nCardAccess Wireless Smart Pressure Sensor Package\n\"\"\"\nclass DeviceType_c4_cardaccess_inhome_WPS10A(DeviceType):\n def __init__(self,deviceExtAddr,deviceConfigured,deviceEnabled,\n deviceName,zigbeeComponent_handle,casasNetwork_handle,\n zigbeeQueue,persistentDataServer_handle):\n super(DeviceType_c4_cardaccess_inhome_WPS10A, self).__init__(\n deviceExtAddr,deviceConfigured,deviceEnabled,\n deviceName,zigbeeComponent_handle,casasNetwork_handle,\n zigbeeQueue,persistentDataServer_handle)\n #configuration specific to this device\n self.deviceTypeShort = \"Pressure\"\n self.deviceType = \"c4:cardaccess_inhome:WPS10A\"\n self.deviceType_fixed = self.deviceType.replace(\":\",\"_\")\n self.deviceType_fixed = self.deviceType_fixed.replace(\"-\",\"_\")\n self.deviceFFD = False #battery powered device\n\n self.createComponent(\"contact_int\")\n self.createComponent(\"pressure\")\n self.createComponent(\"temp_local\")\n self.createComponent(\"battery\")\n self.createComponent(\"batteryp\")\n self.createComponent(\"radio\")\n\n #configuration specific to the device and not a component\n dictParameters = dict()\n dictParameters[\"checkin_interval\"] = \"10\"\n self.configPriority = 5\n return\n\n def _dataIn(self,dataIn):\n dataIn = dataIn.split(\" \")\n #report internal contact component\n componentValue = dataIn[3][1:2]\n componentValue = (bin(int(componentValue, 16))[2:]).zfill(8)\n componentValue = componentValue[4:8]\n\n if componentValue[0:1] == \"0\" and int(dataIn[3][5:7],16)/2-40 == -40:\n # Device 'glitched' on us and these are invalid values so don't publish.\n return\n\n if componentValue[1:2] == \"1\":\n self.dictComponent[self.deviceType + \"_contact_int\"].dataIn(\"OPEN\")\n else:\n self.dictComponent[self.deviceType + \"_contact_int\"].dataIn(\"CLOSE\")\n\n #report pressure\n if componentValue[0:1] == \"1\":\n self.dictComponent[self.deviceType + \"_pressure\"].dataIn(\"VACANT\")\n else:\n self.dictComponent[self.deviceType + \"_pressure\"].dataIn(\"OCCUPIED\")\n\n #report temp_local\n componentValue = float(int(dataIn[3][5:7],16))/2-40\n if (componentValue != -40):\n self.dictComponent[self.deviceType + \"_temp_local\"].dataIn(componentValue)\n\n #report battery\n componentValue = int(dataIn[3][2:4],16)/74.35\n self.dictComponent[self.deviceType + \"_battery\"].dataIn(\"%s\" % str(int(componentValue * 1000)))\n\n #report battery\n componentValue = int(dataIn[3][2:4],16)*100/255\n self.dictComponent[self.deviceType + \"_batteryp\"].dataIn(\"%s\" % str(int(componentValue)))\n return\n\n def _createComponent(self,componentType,componentType_fixed,componentEnabled,\n componentName):\n self.dictComponent[componentType] = \\\n eval(\"ComponentType_\" + componentType_fixed + \"(self.deviceExtAddr,\\\n componentEnabled,\\\n self.deviceName,\\\n componentName,\\\n self.zigbeeComponent_handle,\\\n self.casasNetwork_handle,\\\n self.zigbeeQueue,\\\n self.persistentDataServer_handle,\\\n self)\")\n return\n","sub_path":"DataCap/CASAS/core/ZigbeeAgent/zigbee/DeviceType_c4_cardaccess_inhome_WPS10A.py","file_name":"DeviceType_c4_cardaccess_inhome_WPS10A.py","file_ext":"py","file_size_in_byte":5688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"529577913","text":"from random import randint\r\n\r\nimport pyglet\r\nfrom pyglet.window import key\r\n\r\nfrom pack import resources, classes, Enemy, Throwing\r\nfrom pack.classes import PhysicalObject\r\n\r\n\r\nplayer_pos = (0, 0)\r\nclass Player(PhysicalObject):\r\n \r\n def __init__(self, *args, **kwargs):\r\n super(Player, self).__init__(img=resources.player_image, *args, **kwargs)\r\n self.keys = [] \r\n self.box = pyglet.sprite.Sprite(img=resources.opening_image, *args, **kwargs)\r\n self.hp = 100\r\n self.x = resources.window_dimensions[0] * 4 / 5\r\n self.y = resources.window_dimensions[1] / 4 \r\n self.default_x = self.x\r\n self.default_y = self.y\r\n \r\n self.keyboard = key.KeyStateHandler()\r\n self.box.visible = False\r\n self.box.scale = resources.window_dimensions[0] / 10 / self.box.width\r\n self.scale = resources.window_dimensions[0] / 10 / self.width\r\n self.label = classes.Text(*args, **kwargs)\r\n self.score = classes.Text(*args, **kwargs)\r\n self.score_value = 0\r\n \r\n def move_text(self):\r\n self.label.text = \"Hp : %d\" % (self.hp) \r\n self.label.x = self.x\r\n self.label.y = self.y + self.height // 2 + 10 \r\n self.label.font_size = 10\r\n self.label.newdraw(self.label)\r\n \r\n def score_handler(self): \r\n self.score.text = \"Score : %d\" % (self.score_value) \r\n self.score.x = resources.window_dimensions[0] * 2 / 15\r\n self.score.y = resources.window_dimensions[1] * 14 / 15\r\n self.score.font_size = 15\r\n self.score.newdraw(self.score)\r\n\r\n def update(self, dt):\r\n \r\n PhysicalObject.update(PhysicalObject)\r\n self.check_bounds()\r\n if self.keyboard[key.LEFT]:\r\n self.x -= 500 * dt\r\n elif self.keyboard[key.RIGHT]:\r\n self.x += 500 * dt\r\n if self.keyboard[key.UP]:\r\n self.y += 500 * dt\r\n elif self.keyboard[key.DOWN]:\r\n self.y -= 500 * dt\r\n \r\n if self.keyboard[key.SPACE] and classes.flags[\"Spell_loaded\"]:\r\n self.fire()\r\n classes.flags[\"Spell_loaded\"] = False\r\n pyglet.clock.schedule_once(self.reload_spell, 1)\r\n \r\n if classes.flags[\"Goblin_dead\"]:\r\n self.score_increase()\r\n classes.flags[\"Goblin_dead\"] = False\r\n self.move_text()\r\n self.score_handler()\r\n self.box.x = self.x\r\n self.box.y = self.y\r\n if classes.flags[\"Player_dead\"]:\r\n self.death()\r\n player_pos = (self.x, self.y) \r\n \r\n def score_increase(self): \r\n self.score_value += randint(75, 150)\r\n print(\"called\")\r\n def death(self):\r\n self.label.text = \"GAME OVER!!!\"\r\n self.label.x = resources.window_dimensions[0] / 2\r\n self.label.y = resources.window_dimensions[1] / 2\r\n self.label.font_size = 50\r\n self.label.newdraw(self.label)\r\n \r\n self.score_value = -9999\r\n self.score_handler()\r\n \r\n def reload_spell(self, dt): \r\n classes.flags[\"Spell_loaded\"] = True\r\n \r\n def handle_colision_with(self, other_object):\r\n if isinstance(other_object, classes.Interaction):\r\n self.x = self.default_x\r\n self.y = self.default_y\r\n if isinstance(other_object, Enemy.Goblin):\r\n other_object.damaged()\r\n self.x = self.default_x\r\n self.y = self.default_y\r\n \r\n def fire(self):\r\n spell = Throwing.Spell(x=self.x - self.width // 2 + 10, y=self.y, batch=self.entry, group = resources.player_level)\r\n spell.visible = True\r\n self.new_objects.append(spell)\r\n \r\n def damaged(self):\r\n self.hp -= randint(0, 10)\r\n if self.hp <= 0:\r\n classes.flags[\"Player_dead\"] = True\r\n","sub_path":"No map/pack/Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"141096427","text":"import sys\nimport time\nimport os\nimport subprocess\nfrom termcolor import colored\nfrom operator import add, itemgetter\nfrom time import time\n\nos.environ[\"PYSPARK_PYTHON\"]=\"python3\"\nos.environ[\"PYSPARK_DRIVER_PYTHON\"]=\"python3\"\nDIRS = ['machine_events','task_events'] \nHOST_PATH = 'gs://clusterdata-2011-2/'\nverbose = False\n\ndef passarg(v):\n global verbose\n verbose = v\n\ndef timing(f):\n def wrap(*args):\n time1 = time()\n ret = f(*args)\n time2 = time()\n print('{:s} took {:.3f}ms = {:.3f}s\\n'.format(f.__name__, (time2-time1)*1000.0, (time2-time1)))\n return ret\n return wrap\n\ndef findcol(firstLine, name):\n return firstLine.index(name) if name in firstLine else -1\n\ndef txt_to_data(f):\n with open(f,'r') as infile:\n return eval(infile.read()) \n\ndef fetch_from_host(host_path, amt = 1, destination = './data/'):\n files = subprocess.check_output('gsutil ls '+host_path, shell=True).splitlines()\n for i in range(amt):\n d = str(files[i])[len(host_path)+2:-1]\n os.system('gsutil cp '+host_path+d+' '+destination)\n os.system('gunzip '+destination+d)\n\n@timing\ndef distribution(firstline, entries, ID, val, combine_p_key = None):\n '''Finds the distribution of the variable 'val', by unique value of 'ID'''\n kv = entries.map(lambda x: (x[findcol(firstline,ID)],x[findcol(firstline,val)])) if combine_p_key==None else \\\n entries.map(lambda x: (x[findcol(firstline,ID)]+x[findcol(firstline, combine_p_key)],x[findcol(firstline,val)]))\n distrib = kv.reduceByKey(lambda x,y: (x)).map(lambda x: (x[1],1)).reduceByKey(add)\n result = sorted(distrib.collectAsMap().items(), key = lambda x : x[1],reverse=True)\n if verbose: print(colored(\"RDD distribution of \"+val+\"\\n\",\"cyan\"),*('\\t'+val+': '+str(x[0])+' Count: '+str(x[1])+'\\n' for x in result))\n","sub_path":"labSpark2/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"418901894","text":"'''\nClean the data\n'''\nimport re\nimport pandas as pd\nimport numpy as np\nimport googlemaps\n\nFAMILY_SUPPORT = pd.read_csv(r\"family_support.csv\")\nHEALTH_SERVICE = pd.read_excel(r\"health_service.xlsx\")\nCOMMUNITY = pd.read_csv(r\"community.csv\")\nPARK = pd.read_csv(r\"park.csv\")\nWARMING_CENTER = pd.read_csv(r'warming_center.csv')\nCOOLING_CENTER = pd.read_csv(r'cooling_center.csv')\nSENIOR_CENTER = pd.read_csv(r'senior_center.csv')\nCONDOM = pd.read_csv(r'condom.csv')\nHEALTH_CLINIC = pd.read_csv(r'health_clinic.csv')\n\nCOL_LIST = [\"facility_name\", \"address\", \"community_area\",\n \"phone_number\", \"zipcode\", \"operation_time\",\n \"longitude\", \"latitude\", \"x_coordinate\",\n \"y_coordinate\", \"service_type\", \"notes\"]\n\nRENAME_DICT = {'ADDITIONAL NOTES': 'notes', 'ADDRESS': 'address',\n 'Address': 'address', 'Clinic Type': 'service_type',\n 'Community Area': 'community_area',\n 'Community Area (#)': 'community_area',\n 'Division': 'service_type',\n 'FACILITY_N': 'facilities',\n 'FQHC, Look-alike, or Neither; Special Notes': 'notes',\n 'Facility': 'facility_name',\n 'HOURS OF OPERATION': 'operation_time',\n 'Hours of Operation': 'operation_time', 'LOCATION': 'location',\n 'Location':'location', 'Latitude': 'latitude',\n 'Longitude': 'longitude', 'PARK': 'facility_name',\n 'PHONE': 'phone_number', 'Phone': 'phone_number',\n 'Phone 1': 'phone_number', 'Phone Number': 'phone_number',\n 'Program Model': 'notes', 'SITE': 'facility_name',\n 'Site Name': 'facility_name', 'Street Address': 'address',\n 'X Coordinate': 'x_coordinate', 'X_COORD': 'longitude',\n 'Y Coordinate': 'y_coordinate', 'Y_COORD': 'latitude',\n 'ZIP': 'zipcode', 'ZIP Code': 'zipcode'}\n\n\n\ndef extract_lon_and_lat(df, col_name):\n '''\n Extract longitude and latitude of a data frame from a certain column\n Inputs:\n df: a data frame\n col_name: the name of the column that contains longitude and\n latitude information\n Outputs:\n add two new columns of longitude and latitude information\n '''\n df['latitude'] = df.apply(lambda x: re.search((r'(\\d\\d\\.[0-9]+)(\\,)'),\\\n x[col_name]).group(1)\\\n if not (x[col_name]\\\n is np.nan) else np.nan,\\\n axis=1)\n df['longitude'] = df.apply(lambda x: re.search((r'(\\, )(-\\d\\d\\.[0-9]+)'),\\\n x[col_name]).group(2)\\\n if not (x[col_name]\\\n is np.nan) else np.nan,\\\n axis=1)\n return df\n\n\ndef get_zipcode_address(df, col_name):\n '''\n Extract zip code and address of the data frame from a certain column\n Inputs:\n df: a data frame\n col_name: the name of the column that contains zipcode and address\n information\n Outputs:\n add two new columns of zipcode and address information\n '''\n if \"zipcode\" not in df.columns:\n df['zipcode'] = df.apply(lambda x: re.search(\n (r\"([\\w\\s\\.]+)([0-9]{5})(\\n)\"), x[col_name]).group(2), axis=1)\n df['address'] = df.apply(lambda x: re.search(\n (r\"([\\w\\s\\.]+)([0-9]{5})(\\n)\"), x[col_name]).group(1), axis=1)\n\n return df\n\n\ndef select_columns(df):\n '''\n The function is used to select the target columns, and add empty target\n columns which does not exist in the given data frame.\n Input:\n df: the filtered dataframe\n Returns:\n target df\n '''\n for col in df.columns:\n if col not in COL_LIST:\n df = df.drop(columns=[col])\n for target_col in COL_LIST:\n if target_col not in df.columns:\n df[target_col] = np.nan\n\n return df[COL_LIST]\n\n\ndef fill_in_facility(df):\n '''\n This function is used to find the collection of facilities in a park\n Input:\n df: the filtered dataframe\n Returns:\n df with collection of facilities (string in \"notes\" column)\n '''\n df[\"number\"] = 1\n fac_table = df.pivot_table(index=[\"facility_name\"],\\\n columns=[\"facilities\"], values=\"number\")\n fac_table[\"notes\"] = \"\"\n fac_table[\"facilities\"] = fac_table.index\n for fac_col in fac_table.columns[:-2]:\n fac_table.loc[fac_table[fac_col].notnull(), \"notes\"] +=\\\n (fac_col + \"/ \")\n\n return fac_table[[\"facilities\", \"notes\"]]\n\n\ndef get_coordiance(df):\n '''\n Get the latitude and longitude from an address through Google Map API\n Inputs:\n df: a data frame\n Ouputs:\n add two columns to the data frame and return the new data frame\n '''\n gmaps_key = googlemaps.Client(key=\\\n 'Your API KEY HERE')\n df['geocode_result'] = df.apply(lambda x: \\\n gmaps_key.geocode(x['address']), axis=1)\n df['latitude'] = df.apply(lambda x:\\\n x['geocode_result'][0]['geometry']['location']['lat']\\\n if x['geocode_result'] else None, axis=1)\n df['longitude'] = df.apply(lambda x:\\\n x['geocode_result'][0]['geometry']['location']['lng']\\\n if x['geocode_result'] else None, axis=1)\n df_cleaned = df.drop(columns=['geocode_result'])\n return df_cleaned\n\n\n# For warming center\nWARMING_CENTER['facility_name'] = WARMING_CENTER['SITE TYPE'] + \" (\" + \\\n WARMING_CENTER['SITE NAME'] + \")\"\nWARMING_CENTER = WARMING_CENTER.rename(RENAME_DICT, axis='columns')\nWARMING_CENTER = extract_lon_and_lat(WARMING_CENTER, 'location')\nWARMING_CENTER = select_columns(WARMING_CENTER)\nWARMING_CENTER['service_type'] = 'warming center'\n\n# For cooling center\nCOOLING_CENTER['facility_name'] = COOLING_CENTER['SITE TYPE'] + \" (\" + \\\n COOLING_CENTER['SITE NAME'] + \")\"\nCOOLING_CENTER = COOLING_CENTER.rename(RENAME_DICT, axis='columns')\nCOOLING_CENTER = extract_lon_and_lat(COOLING_CENTER, 'location')\nCOOLING_CENTER = select_columns(COOLING_CENTER)\nCOOLING_CENTER['service_type'] = 'cooling center'\n\n# For senior center\nSENIOR_CENTER['facility_name'] = SENIOR_CENTER['PROGRAM'] + \" (\" + \\\n SENIOR_CENTER['SITE NAME'] + \")\"\nSENIOR_CENTER = SENIOR_CENTER.rename(RENAME_DICT, axis='columns')\nSENIOR_CENTER = extract_lon_and_lat(SENIOR_CENTER, 'location')\nSENIOR_CENTER = select_columns(SENIOR_CENTER)\nSENIOR_CENTER['service_type'] = 'senior center'\n\n# For condom distribution site\nCONDOM['facility_name'] = CONDOM['Name'] + \" (\" + CONDOM['Venue Type'] + \")\"\nCONDOM = CONDOM.rename(RENAME_DICT, axis=\"columns\")\nCONDOM = extract_lon_and_lat(CONDOM, 'location')\nCONDOM = select_columns(CONDOM)\nCONDOM['service_type'] = 'condom distribution site'\n\n# For health clinic\nHEALTH_CLINIC = HEALTH_CLINIC.rename(RENAME_DICT, axis=1)\nHEALTH_CLINIC = select_columns(HEALTH_CLINIC)\nHEALTH_CLINIC[\"operation_time\"] = HEALTH_CLINIC.apply(lambda x: re.sub(\\\n r\"\\¨C\", \"-\", x[\"operation_time\"]) if not (x[\"operation_time\"]\\\n is np.nan) else np.nan, axis=1)\n\n# For family support\nFAMILY_SUPPORT = FAMILY_SUPPORT.rename(RENAME_DICT, axis=\"columns\")\nFAMILY_SUPPORT = select_columns(FAMILY_SUPPORT)\nFAMILY_SUPPORT[\"service_type\"] = FAMILY_SUPPORT.apply(lambda x:\\\n \"family support (\" + x[\"service_type\"] + \")\", axis=1)\nFAMILY_SUPPORT = FAMILY_SUPPORT[FAMILY_SUPPORT[\"zipcode\"].notnull()]\nFAMILY_SUPPORT[\"zipcode\"] = FAMILY_SUPPORT.apply(lambda x:\\\n int(x[\"zipcode\"]), axis=1)\n\n# For health service\nHEALTH_SERVICE = HEALTH_SERVICE.rename(RENAME_DICT, axis=\"columns\")\nHEALTH_SERVICE = extract_lon_and_lat(HEALTH_SERVICE, \"address\")\nHEALTH_SERVICE = get_zipcode_address(HEALTH_SERVICE, \"address\")\nHEALTH_SERVICE = select_columns(HEALTH_SERVICE)\nHEALTH_SERVICE[\"service_type\"] = \"health service\"\n\n# For community service\nCOMMUNITY = COMMUNITY.rename(RENAME_DICT, axis=\"columns\")\nCOMMUNITY = extract_lon_and_lat(COMMUNITY, \"location\")\nCOMMUNITY[\"service_type\"] = \"community service\"\nCOMMUNITY = select_columns(COMMUNITY)\n\n# For park\nPARK = PARK.rename(RENAME_DICT, axis=\"columns\")\nFAC_TABLE = fill_in_facility(PARK).reset_index()\nFAC_TABLE[\"longitude\"] = 0\nFAC_TABLE[\"latitude\"] = 0\nfor fac_name in list(PARK[\"facility_name\"].unique()):\n lon_lat = PARK.loc[PARK[\"facility_name\"] == fac_name,\\\n [\"longitude\", \"latitude\"]].iloc[[0]]\n FAC_TABLE.loc[FAC_TABLE[\"facility_name\"] == fac_name, \"longitude\"]\\\n = lon_lat[\"longitude\"].values[0]\n FAC_TABLE.loc[FAC_TABLE[\"facility_name\"] == fac_name, \"latitude\"]\\\n = lon_lat[\"latitude\"].values[0]\n\nFAC_TABLE[\"service_type\"] = \"park\"\nPARK = select_columns(FAC_TABLE)\nFOOD_PANTRY = pd.read_csv(\"food_pantry.csv\")\nFOOD_PANTRY = get_coordiance(FOOD_PANTRY)\nFOOD_PANTRY = select_columns(FOOD_PANTRY)\nSHELTER = pd.read_csv(\"shelter.csv\")\nSHELTER = get_coordiance(SHELTER)\nSHELTER = select_columns(SHELTER)\n\nFULL_DATA = pd.concat([FAMILY_SUPPORT, HEALTH_SERVICE, COMMUNITY,\\\n PARK, WARMING_CENTER, COOLING_CENTER,\\\n SENIOR_CENTER, CONDOM, HEALTH_CLINIC,\\\n FOOD_PANTRY, SHELTER], join=\"inner\").reset_index(\\\n drop=True)\nFULL_DATA[\"service_type\"] = FULL_DATA[\"service_type\"].apply(\\\n lambda x: x.lower())\nFULL_DATA.to_csv(\"full_data.csv\")\n","sub_path":"raw_data/data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":9831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"68041890","text":"class Solution(object):\n def newInteger(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n real_n = n\n m = [0] * 10\n for k in xrange(10):\n m[k] = real_n / (9 * (10**k))\n #(real_n,k,m[k]).p()\n if m[k] > 0:\n real_n += m[k] * (10 ** k)\n else:\n break\n return real_n","sub_path":"python/leetcode/hard/remove_9.py","file_name":"remove_9.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"424534487","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('front', '0004_volunteer_education'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='volunteer',\n name='occupation',\n field=models.CharField(default='blank', max_length=32, verbose_name='occupation'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='volunteer',\n name='organization',\n field=models.CharField(default='blank', max_length=32, verbose_name='organization'),\n preserve_default=False,\n ),\n ]\n","sub_path":"front/migrations/0005_auto_20150414_0829.py","file_name":"0005_auto_20150414_0829.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"512015981","text":"# crutial import for backend to run py itself\nimport os, sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom server import db\nfrom backend.user_db import *\nfrom backend.auth import *\nimport random\n\nfrom .progression_bar import printProgressBar\nimport time\n\ndef load_reviews(m):\n rev = EateryReviews()\n printProgressBar(0, m, prefix = 'Progress:', suffix = '', length = 50)\n for eid in range(m):\n for voucher in Voucher.query.filter_by(eatery_id=eid, if_booked=True, if_used=True).all():\n add_comment = random.randint(0,1)\n if add_comment == 0:\n for i in range(random.randint(2,5)):\n rating, comment = rev.get_review()\n diner_id = voucher.diner_id\n voucher_id = voucher.id\n review = Review(diner_id, voucher_id, comment, rating)\n db.session.add(review)\n db.session.commit()\n printProgressBar(eid + 1, m, prefix = 'Loading Reviews:', suffix = '', length = 50)\n pass\n\nclass EateryReviews:\n def __init__(self) -> None:\n self.words = \"Sweet Escape is Sydney's most loved home base for espresso and discussions. \\\n We offer a delicious variety of coffee from Sweet Escape made by our professionally trained \\\n baristas, from your classic coffee to our house specialty. You can complete your coffee with \\\n oen of our sweet treats made by our very own baker We welcome you to sit back, unwind and \\\n appreciate the lovely sights of the city while our best gourmet expert sets you up a scrumptious \\\n dinner utilizing the best and freshest ingredients. The adaptable menu flaunts some imaginative \\\n food, for example, salt and pepper squid on a delicate, Thai-roused plate of mixed greens; \\\n harissa angle soup (with the harissa glue served in a little glass); lemon simmered chicken \\\n on dark pepper gnocchi; and a most heavenly cinnamon. The eatery utilizes neighborhood create \\\n for fish and venison dishes flourish. Tastes great! Eating something delicious right now? Use \\\n this expression to say so. I’m so glad I ordered this pizza—it tastes great! Really good! \\\n Here’s something else you could say instead of delicious. Have you tried the chocolate cake? \\\n It’s really good! Wow, [this food] is amazing! If something tastes better than you expected, \\\n you could use the word wow to express your surprise. If you say something tastes amazing, you’re \\\n saying it tastes even better than great or really good. Wow, this pasta salad is amazing! \\\n Yummy This is an informal way of saying something tastes good. If you find something to be \\\n delicious, you could simply say “Yummy!” or you could expand it into a sentence. This cheesecake \\\n is really yummy. I’m going for another slice. Flavorful This is a great adjective for describing \\\n food that’s full of flavor or that has a delicious quality in its taste and smell\".split(' ')\n \n def get_review(self):\n words = self.words\n # generate a random review lenght\n review_length = random.randint(20, 40)\n # generate a random review rating\n rating = random.randint(1,5)\n\n word_idx = random.sample(range(0, len(words)-1), review_length)\n res = ''\n for i in word_idx:\n res += words[i]\n res += ' '\n \n return rating, res\n\n\n\n ","sub_path":"load_data/load_reviews.py","file_name":"load_reviews.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"585267052","text":"#!/usr/bin/env python3\n\nimport math\nimport re\nfrom collections import Counter\nfrom itertools import chain\nfrom multiprocessing import Pool\nfrom optparse import OptionParser\nfrom typing import Dict, List, Set, Tuple\n\n\ndef get_arg_parser():\n parser = OptionParser()\n parser.add_option(\n \"--train-file\",\n dest=\"train_file\",\n help=\"Raw text as training data.\",\n metavar=\"FILE\",\n default=None,\n )\n parser.add_option(\n \"--vocab-size\",\n type=\"int\",\n dest=\"vocab_size\",\n help=\"Vocabulary Size.\",\n default=20000,\n )\n parser.add_option(\n \"--train-out\",\n dest=\"train_output_file\",\n help=\"BPE tokenized train file.\",\n metavar=\"FILE\",\n default=None,\n )\n parser.add_option(\n \"--num-cpus\",\n type=\"int\",\n dest=\"num_cpus\",\n help=\"Number of cpus for multi-processing.\",\n default=3,\n )\n return parser\n\n\nclass BPE(object):\n \"\"\"\n Reimplementation of BPE from https://fburl.com/r69o1rpr (Algorithm 1).\n \"\"\"\n\n def __init__(self):\n self.vocab: Dict[str, int] = Counter()\n self.eow_symbol = \"_EOW\" # End of word symbol.\n\n # This data structure holds current segmentation of training data. This\n # is useful for faster parallel computation during training.\n # str is the current segmentation, int is the frequency.\n self.current_train_data: List[Tuple[str, int]] = []\n\n # This value will change after building the vocabulary. We use this value\n # for greedy segmentation where we start by looking at the longest possible\n # character sequence wrt max_bpe_len.\n self.max_bpe_len = 1\n\n def _init_vocab(self, txt_path: str):\n data_freq: Dict[str, int] = Counter()\n with open(txt_path, \"r\", encoding=\"utf-8\") as input_stream:\n for line in input_stream:\n for word in line.strip().split():\n # Here, we allow the EOW symbol to be one independent BPE\n # token. It can potentially attach to previous letters\n # depending on the frequencies of data. If it is attached,\n # that is a clear indicator of a suffix.\n data_freq[\" \".join(list(word) + [self.eow_symbol])] += 1\n\n self.current_train_data: List[Tuple[str, int]] = []\n for segmentation, freq in data_freq.items():\n self.current_train_data.append((segmentation, freq))\n\n def _best_candidate_substep(self, start_end_indices: Tuple[int, int]):\n \"\"\"\n Args:\n first and end index for part of self.current_train_data to search for.\n \"\"\"\n start_index, end_index = start_end_indices[0], start_end_indices[1]\n assert start_index <= end_index\n\n candidates = Counter()\n for (seg, freq) in self.current_train_data[start_index:end_index]:\n symbols = seg.split()\n for i in range(len(symbols) - 1):\n candidates[(symbols[i], symbols[i + 1])] += freq\n return candidates\n\n def get_best_candidate(self, num_cpus: int):\n \"\"\"\n Calculates frequencies for new candidiates from the current vocabulary,\n and returns the candidate with the most frequency.\n \"\"\"\n with Pool(processes=num_cpus) as pool:\n data_chunk_size = max(1, math.ceil(len(self.current_train_data) / num_cpus))\n indices = [\n (\n i * data_chunk_size,\n min(data_chunk_size * (i + 1), len(self.current_train_data)),\n )\n for i in range(num_cpus)\n ]\n results = pool.map(self._best_candidate_substep, indices)\n candidates = sum(results, Counter())\n return max(candidates, key=candidates.get) if len(candidates) > 0 else None\n\n @staticmethod\n def get_merge_pattern(candidate_str):\n return re.compile(r\"(? Tuple[List[Tuple[str, int]], Set]:\n \"\"\"\n Returns Bpe types in the current substep.\n \"\"\"\n candidate_str = \" \".join(merge_candidate)\n candidate_replacement = \"\".join(merge_candidate)\n pattern = BPE.get_merge_pattern(candidate_str)\n offset, stop_index = start_end_index\n assert offset < stop_index\n\n new_bpe_entries = set()\n new_data: List[Tuple[str, int]] = [None] * (stop_index - offset)\n for i in range(offset, stop_index):\n vocab_entry, freq = self.current_train_data[i]\n new_entry = vocab_entry\n if candidate_str in vocab_entry:\n # Regex is usually slow. We just apply it on words that have the\n # potential of replacement.\n new_entry = pattern.sub(candidate_replacement, vocab_entry)\n\n new_data[i - offset] = (new_entry, freq)\n for entry in new_entry.split():\n new_bpe_entries.add(entry)\n return (new_data, new_bpe_entries)\n\n def merge_candidate_into_vocab(\n self, candidate: Tuple[str, str], num_cpus: int\n ) -> int:\n \"\"\"\n Returns the vocabulary size (number of BPE types).\n Args:\n candidate: a pair of strings to be merged in all entries.\n \"\"\"\n with Pool(processes=num_cpus) as pool:\n data_chunk_size = max(1, math.ceil(len(self.current_train_data) / num_cpus))\n candidate_str_list = [\n (\n (candidate[0], candidate[1]),\n (\n i * data_chunk_size,\n min(data_chunk_size * (i + 1), len(self.current_train_data)),\n ),\n )\n for i in range(num_cpus)\n ]\n\n results = pool.starmap(self.merge_substep, candidate_str_list)\n\n # Each first element in results is a List[Tuple[str, int]]. By using\n # the * operation with chain we concatenate the lists in order to\n # reconstruct the training data.\n self.current_train_data = list(chain(*[result[0] for result in results]))\n bpe_types_union = set.union(*[result[1] for result in results])\n return len(bpe_types_union)\n\n def build_vocab(self, txt_path: str, vocab_size: int, num_cpus: int) -> int:\n \"\"\"\n After building the vocab, sends the current number of bpe types.\n\n Args:\n txt_path: Raw text file.\n vocab_size: The maximum number of vocabulary items we need to have.\n \"\"\"\n self._init_vocab(txt_path=txt_path)\n step = 0\n while True:\n merge_candidate = self.get_best_candidate(num_cpus=num_cpus)\n if merge_candidate is not None:\n cur_v_size = self.merge_candidate_into_vocab(\n candidate=merge_candidate, num_cpus=num_cpus\n )\n if cur_v_size >= vocab_size:\n break\n else:\n # No more merges possible\n break\n step += 1\n if step % 100 == 0:\n print(\"BPE merging step\", step, \"current vocabulary size\", cur_v_size)\n\n # Now we get rid of the current vocab that is based on the corpus (not\n # memory-efficient). We now only keep the final bpe tokens.\n self.vocab: Dict[str, int] = Counter()\n self.max_bpe_len = 1\n for (vocab_entry, freq) in self.current_train_data:\n for bpe_token in vocab_entry.split():\n self.vocab[bpe_token] += freq\n self.max_bpe_len = max(self.max_bpe_len, len(bpe_token))\n\n print(\"BPE vocab built with size\", len(self.vocab))\n return len(self.vocab)\n\n def segment_word(self, word: str) -> List[str]:\n \"\"\"\n The current segmentation is greedy based on picking the longest possible\n character sequences first. The original work picks based on the most\n frequent character sequence.\n \"\"\"\n word_chars = list(word) + [self.eow_symbol]\n start_idx, end_idx = 0, min(len(word_chars), self.max_bpe_len)\n subwords = []\n while start_idx < len(word_chars):\n subword = \"\".join(word_chars[start_idx:end_idx])\n if subword in self.vocab or end_idx - start_idx == 1:\n subwords.append(subword)\n start_idx = end_idx\n end_idx = min(len(word_chars), start_idx + self.max_bpe_len)\n else:\n end_idx -= 1\n return subwords\n\n def segment_txt(self, input_path: str, output_path: str):\n segmentation_cache = {}\n with open(output_path, \"w\", encoding=\"utf-8\") as writer:\n with open(input_path, \"r\", encoding=\"utf-8\") as input_file:\n for line in input_file:\n output_bpe_tokens = []\n for word in line.strip().split():\n if word not in segmentation_cache:\n segmentation_cache[word] = self.segment_word(word)\n output_bpe_tokens += segmentation_cache[word]\n writer.write(\" \".join(output_bpe_tokens))\n writer.write(\"\\n\")\n\n\nif __name__ == \"__main__\":\n arg_parser = get_arg_parser()\n options, args = arg_parser.parse_args()\n bpe_model = BPE()\n bpe_model.build_vocab(\n txt_path=options.train_file,\n vocab_size=options.vocab_size,\n num_cpus=options.num_cpus,\n )\n bpe_model.segment_txt(\n input_path=options.train_file, output_path=options.train_output_file\n )\n","sub_path":"pytorch_translate/research/unsupervised_morphology/bpe.py","file_name":"bpe.py","file_ext":"py","file_size_in_byte":9777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"347030480","text":"# Description: This script handles all the data grabbing / formatting for driver_data.py \n\nimport data_ui_helper\nimport requests\nimport time\nfrom datetime import datetime\n\n# Verify that the ticker is valid and grab/print some daily trade info for the underlying.\ndef background_info(ticker, api_key):\n price_response = requests.get('https://sandbox.tradier.com/v1/markets/quotes',\n params={'symbols': ticker},\n headers={'Authorization': api_key, 'Accept': 'application/json'}\n )\n if (price_response.status_code == 401): # only need to check here, key cannot be changed after this call.\n print(\"Invalid API Key. Terminating Program.\"); exit()\n price_json = price_response.json()\n try:\n quote = price_json['quotes']['quote']\n except KeyError:\n print(\"Could not find data for symbol: (\" + ticker + \"). Terminating program.\"); exit()\n\n print_quote_info(quote) # print information about the daily trading range.\n return quote['description'] # return the full name of the company for plots and whatnot\n\n# Print background info about the company / daily trading range.\ndef print_quote_info(quote):\n data_ui_helper.print_sleep(1)\n print(\"You have selected \" + quote['description'] + \" (\" + quote['symbol'] + \").\")\n print(\"The Daily Price Range [low/high] is: $ [\" + str(quote['low']) + \" / \" + str(quote['high']) + \"]\")\n print(\"The Last Trade Price was: $\" + str(quote['last']) + \" and Today's Volume is: \" + '{:,.0f}'.format(quote['volume']))\n if (float(quote['change_percentage']) >= 0):\n print(\"The Stock Price is UP +\" + str(quote['change_percentage']) + \"% on the day.\")\n else:\n print(\"The Stock Price is DOWN \" + str(quote['change_percentage']) + \"% on the day.\")\n return\n\n# Does the user want to look at call options or put options.\ndef option_type(symbol):\n data_ui_helper.print_sleep(1)\n input_str = input(\"Type C for Calls or P for Puts: \").upper(); check_sentinel(input_str)\n if (input_str == \"C\"):\n print(\"Selected Call Options for \" + symbol)\n elif (input_str == \"P\"):\n print(\"Selected Put Options for \" + symbol)\n else:\n print(\"Invalid option type input. Terminating program.\"); exit()\n \n return input_str\n\n# Download and print a list of all available expiry dates for options for the symbol\ndef get_expiry_dates(ticker, api_key):\n dates_response = requests.get('https://sandbox.tradier.com/v1/markets/options/expirations?',\n params={'symbol': ticker},\n headers={'Authorization': api_key, 'Accept': 'application/json'}\n )\n dates_json = dates_response.json()\n dates_list = dates_json['expirations']['date']\n \n if (len(dates_list)):\n print(dates_list)\n else:\n print(\"No options available for symbol: \" + ticker + \". Terminating Program.\"); exit()\n \n data_ui_helper.print_sleep(1)\n return dates_list\n\n# Download and print a list of all available strikes for the expiry date.\ndef get_strike_list(ticker, expiry, api_key):\n strike_list_response = requests.get('https://sandbox.tradier.com/v1/markets/options/strikes?',\n params={'symbol': ticker, 'expiration': expiry},\n headers={'Authorization': api_key, 'Accept': 'application/json'}\n )\n strikes_json = strike_list_response.json()\n strikeList = strikes_json['strikes']['strike']\n print(\"List of available strike prices: \")\n print(strikeList)\n \n return strikeList\n\n# Prompt the user for the earliest date in which they want to get data for, then determine whether to retrieve /history/ or /timesales/ data.\ndef get_start_date(history_limit):\n start_date = input(\"Input a start date for the data range (YYYY-mm-dd): \"); check_sentinel(start_date)\n try:\n start_datenum = datetime.strptime(start_date, \"%Y-%m-%d\")\n except ValueError:\n print(\"Invalid date format. Terminating Program.\"); exit()\n\n start_date_seconds = time.mktime(start_datenum.timetuple())\n current_time_seconds = time.mktime(datetime.now().timetuple()) #seconds since the input date\n\n should_use_history_endpoint = False\n if (current_time_seconds - start_date_seconds > history_limit*24*60*60):\n should_use_history_endpoint = True\n\n return start_date, should_use_history_endpoint\n\n# Get a timeseries of all the trade data.\ndef get_trade_data(option_symbol, start_date, binning, should_use_history_endpoint, api_key):\n if(should_use_history_endpoint):\n trade_data_response = requests.get('https://sandbox.tradier.com/v1/markets/history?',\n params={'symbol': option_symbol, 'start': start_date},\n headers={'Authorization': api_key, 'Accept': 'application/json'}\n )\n trade_data_json = trade_data_response.json()\n return(trade_data_json['history']['day'])\n else:\n trade_data_response = requests.get('https://sandbox.tradier.com/v1/markets/timesales?',\n params={'symbol': option_symbol, 'start': start_date, 'interval':(str(int(binning))+\"min\")},\n headers={'Authorization': api_key, 'Accept': 'application/json'}\n )\n trade_data_json = trade_data_response.json()\n return (trade_data_json['series']['data'])\n\n\n# Allow the user to modify the settings for the program at runtime.\ndef modify_settings(settings):\n data_ui_helper.print_sleep(3)\n print(\"The following runtime settings of this program can be modified.\")\n print(settings)\n \n status = \"\"\n while (status.lower() != \"done\"):\n print(\"Type 'done' to return to program execution.\"); print(\"*\"); time.sleep(0.05)\n status = input(\"Which setting would you like to change: \"); check_sentinel(status)\n \n # Boolean settings just get flipped if the user wants to modify them.\n if (status.lower() == \"darkmode\"):\n settings['darkMode'] = not settings['darkMode'];\n if (status.lower() == \"watermark\"):\n settings['watermark'] = not settings['watermark'];\n if (status.lower() == \"grid\"):\n settings['grid'] = not settings['grid'];\n if (status.lower() == \"shouldprintdata\"):\n settings['shouldPrintData'] = not settings['shouldPrintData'];\n \n # Non-boolean settings modifications require additional inputs.\n if (status.lower() == \"binning\"):\n new_bin = input(\"Please input your desired binning (1/5/15 min): \"); check_sentinel(new_bin)\n if (int(new_bin) == 1 or int(new_bin) == 5 or int(new_bin) == 15):\n settings['binning'] = int(new_bin)\n else:\n print(\"Invalid input. Binning remains unmodified.\")\n \n if (status.lower() == \"historylimit\"):\n new_lim = input(\"Please input your desired day limit to transition to daily data (<35): \"); check_sentinel(new_lim)\n try:\n settings['historyLimit'] = int(new_lim)\n except ValueError:\n print(\"Invalid input. History limit remains unmodified.\") \n \n if (status.lower() == \"branding\"):\n new_brand = input(\"Please input your desired branding: \"); check_sentinel(new_brand)\n settings['branding'] = new_brand\n \n \n if (status.lower() == \"api_key\"):\n new_key = input(\"Please input an API_KEY (ie: Bearer xx...xx): \"); check_sentinel(new_key)\n settings['API_KEY'] = new_key\n \n \n data_ui_helper.print_sleep(3)\n print(\"The runtime settings are now currently:\")\n print(settings)\n \n data_ui_helper.print_sleep(3)\n return settings\n \n# Check all user inputs for \"exit\" to see if they want to terminate the program\ndef check_sentinel(input):\n if (input.lower() == \"exit\"):\n print(\"User Requested Program Termination.\")\n exit()\n","sub_path":"hist_options_pricing/data_grab.py","file_name":"data_grab.py","file_ext":"py","file_size_in_byte":7815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"640929539","text":"\nimport sys\nfrom PyQt5.QtWidgets import QWidget, QLabel, QApplication\nfrom PyQt5.QtGui import QFont, QPixmap\nfrom PyQt5.QtCore import Qt\n\n\nclass RecordingWidget(QWidget):\n\n def __init__(self, x=100, y=100, w=600, h=450):\n super().__init__()\n\n self.x = x\n self.y = y\n self.w = w\n self.h = h\n self.CDSize = w/2.5\n self.textFontSize = 80\n self.textYShift = 20\n print('RecordingWidget x {}, y {}, w {}, h {}'.format(x, y, w, h))\n\n self.initUI()\n\n def initUI(self):\n self.setGeometry(self.x, self.y, self.w, self.h) \n\n self.lblStreamWindow = QLabel(self)\n self.lblStreamWindow.resize(self.w, self.h)\n\n # should set a larger init boundary for lblCountDown\n # and lblText for correctly calculate metrics\n self.lblCountDown = QLabel(' ',self)\n self.lblText = QLabel(' ',self)\n self.lblCountDown.setFont(QFont(\"Roman times\", self.CDSize, QFont.Bold))\n self.lblCountDown.setStyleSheet('color:rgb(218,20,20,170)')\n self.lblText.setFont(QFont(\"Roman times\", self.textFontSize, QFont.Bold))\n self.lblText.setStyleSheet('color:rgb(27,83,125,250)')\n\n self._updateLblText()\n self._updateLblCountDown()\n\n\n def setStreamView(self, pixmap, isKeepAspect=True):\n pix = QPixmap(pixmap)\n\n # resize the input pixmap\n if isKeepAspect:\n self.livemap = pix.scaled(self.w, self.h, Qt.KeepAspectRatio)\n else:\n self.livemap = pix.scaled(self.w, self.h)\n\n self.lblStreamWindow.setPixmap(self.livemap)\n\n def _getIntrinsicSize(self, label):\n width = label.fontMetrics().boundingRect(label.text()).width()\n height = label.fontMetrics().boundingRect(label.text()).height()\n return width, height\n\n def setText(self, text=''):\n self.lblText.setText(text)\n self._updateLblText()\n \n def setCountDownNumber(self, num=''):\n self.lblCountDown.setText(num)\n self._updateLblCountDown()\n\n def _updateLblText(self):\n # set lblText to horizontally canter of the main window\n w_text, h_text = self._getIntrinsicSize(self.lblText)\n self.lblText.move((self.w - w_text)/2, self.textYShift) \n\n def _updateLblCountDown(self):\n # set lblCountDown to center of the main window\n w_cd, h_cd = self._getIntrinsicSize(self.lblCountDown)\n self.lblCountDown.move((self.w - w_cd)/2, (self.h - h_cd)/2)\n\n\n\nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n rw = RecordingWidget(0, 0, 800, 600)\n rw.show()\n \n rw.setText('OOOOOOOO')\n rw.setCountDownNumber('10')\n rw.setStreamView('./assets/IIS_logo.png', False)\n sys.exit(app.exec_())\n","sub_path":"record_widget.py","file_name":"record_widget.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"192276118","text":"import torch\n\n\ndef idx2onehot(idx, n):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n assert torch.max(idx).item() < n\n if idx.dim() == 1:\n idx = idx.unsqueeze(1)\n\n onehot = torch.zeros(idx.size(0), n)\n onehot = onehot.to(device)\n idx = idx.to(device)\n onehot.scatter_(1, idx, 1)\n\n return onehot\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"650306895","text":"import json\nfrom nltk.tokenize import word_tokenize\nimport nltk\nimport numpy as np\nnltk.download('punkt')\n\nwith open(\"./word2id.json\", \"r\") as f:\n word2id = json.load(f)\n\nwith open(\"../tqa_train_val_test/train/tqa_v1_train.json\", \"r\") as f:\n train = json.load(f)\n\ndef preprocess(s):\n s = s.lower()\n s = word_tokenize(s)\n return s\n\ntrain_data=[]\n\nfor Train in train:\n Target=Train[\"questions\"][\"nonDiagramQuestions\"]\n for num in Target.keys():\n train_d={}\n answer_choice={}\n for answer,text in Target[num][\"answerChoices\"].items():\n target_a= preprocess(text[\"processedText\"])\n answer_choice[answer] = [word2id.get(w, 0) for w in target_a]\n train_d[\"answerChoices\"]=answer_choice\n\n target_ca = Target[num][\"correctAnswer\"][\"processedText\"]\n train_d[\"correctAnswer\"]=target_ca\n\n target_q = preprocess(Target[num][\"beingAsked\"][\"processedText\"])\n train_d[\"question\"] = [word2id.get(w, 0) for w in target_q]\n train_data.append(train_d)\n\nprint(train_data)\nwith open('./preprocessed_train.json', 'w') as f:\n json.dump(train_data, f, ensure_ascii=False)\n","sub_path":"natural-language-processing/data/train_data.py","file_name":"train_data.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"365827171","text":"from django.shortcuts import render\nfrom django.views import generic\n\nfrom .models import Post, Comment, Category\n\nclass IndexView(generic.ListView):\n template_name = 'myblog/templates/myblog/index.html'\n model = Post\n def get_queryset(self):\n return Post.objects.order_by('-pub_date')[:5]\n\nclass PostDetailView(generic.DetailView):\n model = Post\n## def get_context_data(self):\n## context = super(PostDetail,self).get_context_data()\n## context.update({\"comment_list\": self.get_object().comment_set.all()})\n##\n## return context\n## \n## \n## template_name = 'myblog/templates/myblog/index.html'\n## model = Post\n\nclass CategoryDetailView(generic.DetailView):\n model = Category\n \n","sub_path":"myblog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"286859405","text":"from dataclasses import dataclass, field\n\nfrom flask import session\n\nfrom dietr.database import database\nfrom dietr.models.allergy import Allergy\nfrom dietr.models.ingredient import Ingredient\n\n\n@dataclass\nclass Roommate:\n id: int\n handle: int\n user_id: int\n first_name: str\n middle_name: str\n last_name: str\n allergies: list = field(default_factory=list, init=False)\n preferences: list = field(default_factory=list, init=False)\n\n @property\n def name(self):\n if self.middle_name:\n return f'{self.first_name} {self.middle_name} {self.last_name}'\n\n else:\n return f'{self.first_name} {self.last_name}'\n\n\nclass RoommateModel:\n def add_roommate(self, user_id, first_name, middle_name, last_name):\n \"\"\"Add a roommate to the database.\"\"\"\n handle = self.get_handle(user_id)\n\n if not handle:\n handle = 1\n\n query = '''INSERT INTO roommates (handle, user_id, first_name,\n middle_name, last_name)\n VALUES (%s, %s, %s, %s, %s)'''\n\n database.commit(query, (handle, user_id, first_name, middle_name,\n last_name))\n\n def delete_roommate(self, id):\n \"\"\"Delete a roomate from the database.\"\"\"\n query = '''DELETE FROM roommates\n WHERE id = %s'''\n\n database.commit(query, id)\n\n def get_allergies(self, id):\n \"\"\"Get all allergies for a roommate from the database and return a list\n of instances of the allergy class.\n \"\"\"\n query = '''SELECT allergies.id, allergies.name\n FROM allergies, roommates_allergies AS ra\n WHERE ra.roommate_id = %s\n AND ra.allergy_id = allergies.id'''\n\n allergies = database.fetch_all(query, id)\n\n # Convert the list of dicts to a list of allergy objects\n return [Allergy(**allergy) for allergy in allergies]\n\n def get_handle(self, user_id):\n \"\"\"Get the highest roommate id for an user. This is used to genereate a\n handle for a roommate.\n \"\"\"\n query = '''SELECT MAX(handle) + 1 AS handle\n FROM roommates\n WHERE user_id = %s'''\n\n # Return the\n return database.fetch(query, user_id)['handle']\n\n def get_preferences(self, id):\n \"\"\"Get all preferences for a roommate from the database and return a\n listof instances of the ingredient class.\n \"\"\"\n query = '''SELECT ingredients.id, ingredients.name\n FROM ingredients, roommates_preferences AS rp\n WHERE rp.roommate_id = %s\n AND rp.ingredient_id = ingredients.id'''\n\n preferences = database.fetch_all(query, id)\n\n # Convert the list of dicts to a list of ingredient objects\n return [Ingredient(**ingredient) for ingredient in preferences]\n\n def get_roommate(self, user_id, handle):\n \"\"\"Get a roommate from the database and return an instance of the\n roommate class.\n \"\"\"\n query = '''SELECT id, handle,\n user_id,\n first_name, middle_name, last_name\n FROM roommates\n WHERE handle = %s\n AND user_id = %s'''\n\n # Convert dict to a roommate object\n return Roommate(**database.fetch(query, (handle, user_id)))\n\n def get_roommates(self, user_id):\n \"\"\"Get all roommates for a user from the database and return a list of\n instances of the roommate class.\n \"\"\"\n query = '''SELECT id, handle,\n user_id,\n first_name, middle_name, last_name\n FROM roommates\n WHERE user_id = %s\n ORDER BY handle'''\n\n roommates = database.fetch_all(query, user_id)\n\n # Convert the list of dicts to a list of roommate objects\n return [Roommate(**roommate) for roommate in roommates]\n\n def set_roommate(self, id, first_name, middle_name, last_name):\n \"\"\"Set the name of a roommate.\"\"\"\n query = '''UPDATE roommates\n SET first_name = %s,\n middle_name = %s,\n last_name = %s\n WHERE id = %s'''\n\n database.commit(query, (first_name, middle_name, last_name, id))\n","sub_path":"dietr/models/roommate.py","file_name":"roommate.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"348335723","text":"INFINITY = 1000\ndef search(startState):\n score,move = MiniMax(startState,None,True)\n pos, piece = move\n print(\"You will get a score of \" + str(score) + \" if you put an \" + piece + \" in the position \" + str(pos+1))\n\ndef MiniMax(s, e, isMax):\n s1 = updateState(s, e)\n if isWin(s1):\n return (score(s1, not(isMax)), \"Done\")\n if isTie(s1):\n return (0, \"Done\")\n if isMax:\n highest = -100\n for e1 in getMoves(s1):\n tmpScore, tmpMove = MiniMax(s1, e1, not(isMax))\n if tmpScore > highest:\n highest = tmpScore\n move = e1\n return (highest,move)\n else:\n lowest = 100\n for e1 in getMoves(s1):\n tmpScore, tmpMove = MiniMax(s1, e1, not(isMax))\n if tmpScore < lowest:\n lowest = tmpScore\n move = e1\n return (lowest,move)\n\ndef getMoves(state):\n moves = []\n for i in range(9):\n if state[i] == '-':\n moves.append((i,'x'))\n moves.append((i,'o'))\n return moves\n\ndef updateState(s, e):\n if e == None:\n return s\n pos,piece = e\n return s[:pos] + piece + s[pos+1:]\n\ndef isWin(state):\n #row\n for i in range(0,9,3):\n if state[i] == 'x' and state[i] == state[i+1] and state[i] == state[i+2]:\n return True\n elif state[i] == 'o' and state[i] == state[i+1] and state[i] == state[i+2]:\n return True\n else:\n return False\n #col\n for i in range(0,3):\n if state[i] == 'x' and state[i] == state[i+3] and state[i] == state[i+6]:\n return True\n elif state[i] == 'o' and state[i] == state[i+3] and state[i] == state[i+6]:\n return True\n\n #diagonal tL to lR\n if state[0] == 'x' and state[0] == state[4] and state[0] == state[8]:\n return True\n elif state[0] == 'o' and state[0] == state[4] and state[0] == state[8]:\n return True\n else:\n return False\n\n #diagonal tR to lL\n if state[2] == 'x' and state[2] == state[4] and state[2] == state[6]:\n return True\n elif state[2] == 'o' and state[2] == state[4] and state[2] == state[6]:\n return True\n else:\n return False\n\ndef isTie(state):\n for item in state:\n if item == '-':\n return False\n return True\n\ndef score(state, isMax):\n dashes = 0\n for i in range(9):\n if state[i] == '-':\n dashes += 1\n dashes += 1\n if isMax:\n return dashes\n else:\n return -1 * dashes\n","sub_path":"ai/HW2.py","file_name":"HW2.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"226922437","text":"from datetime import date\nfrom dateutil.relativedelta import relativedelta\nfrom django.contrib.auth import get_user_model\nfrom rest_framework import serializers\nfrom django.contrib.auth.models import User\n\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n\n def validate_birth_date(self, value):\n if value:\n if value > date.today():\n raise serializers.ValidationError(\n 'Birth date cannot be in the future'\n )\n age = relativedelta(date.today(), value).years\n if age < 18:\n raise serializers.ValidationError(\n 'User should be 18 years or older'\n )\n return value\n\n class Meta:\n model = get_user_model()\n fields = [\n 'username',\n 'email',\n 'first_name',\n 'last_name',\n 'gender',\n 'birth_date',\n 'url',\n 'profile_pic',\n ]\n","sub_path":"users/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"420994811","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\n此模块功能:filter模块(寻找素数)\n详细说明:\n见注释\n'''\n\n__author__ = 'HouBin'\n\n#构造一个从3开始的奇数序列(需要注意这是一个生成器,且为无限序列)\ndef _odd_iter():\n oddNum = 1\n while True:\n oddNum = oddNum + 2\n yield oddNum\n\n#定义一个筛选函数(注意返回的是一个匿名函数)\ndef _not_divisible(nTemp):\n return lambda x: x % nTemp > 0\n\n#定义一个生成器不断返回下一个素数\ndef primes():\n #首先生成第一个数2\n yield 2\n #生成初始序列\n it = _odd_iter()\n while True:\n #从待定序列中依次取出第一个数\n n = next(it)\n #此时第一个数肯定为素数\n yield n\n #用第一个数筛选原序列(注意_not_divisible(n)仍然是 一个函数)\n it = filter(_not_divisible(n), it)\n\nif __name__ == '__main__':\n for prime in primes():\n if prime < 1000:\n print(prime)\n else:\n break\n","sub_path":"11-Functional/PrimeNumbers.py","file_name":"PrimeNumbers.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"572029040","text":"\"\"\"temp CLI\n\nUsage:\n code.py -c [-s=] [-o ]\n code.py -c [-s=] [-o ]\n\nOptions:\n -h --help Show this screen.\n -v --version Show version.\n -o --output output file\n -c --col input columns by \"\"\n -s --sep=STR sep for file [default: \\t ]\n\"\"\"\nimport sys, os\nfrom docopt import docopt\nfrom signal import signal, SIGPIPE, SIG_DFL\nimport pandas as pd\nsignal(SIGPIPE, SIG_DFL)\narg = docopt(__doc__, version='template code 1.0')\n\ndef drop_dup(df,cols):\n cols_l = [ i for i in cols]\n df = df.drop_duplicates(cols_l)\n return df\n\nif __name__ == '__main__':\n # parser = argparse.ArgumentParser(description='Hsub parser')\n # parser.add_argument('input', help='input file 可以使用管道,也可以使用使用 Hsub input_file' ,nargs='?')\n # args = parser.parse_args()\n #\n print(arg)\n if arg[\"\"] is not None:\n input_file = arg[\"\"]\n else:\n input_file = '/dev/stdin'\n\n if arg[\"--output\"]:\n output=arg['']\n else:\n output=sys.stdout\n print(input_file)\n\n print(r\"{}\".format(arg['--sep']))\n pd.read_csv(input_file,sep=arg['--sep']).to_csv(output,sep=arg['--sep'])\n\n\n\n\n\nsys.stdout.flush()\nsys.stdout.close()\nsys.stderr.flush()\nsys.stderr.close()","sub_path":"tools/linux_tools/DropDuplicates/Duplicates.py","file_name":"Duplicates.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"513914397","text":"import os, time, datetime\nimport logging\nimport asyncio\nfrom functools import partial\n\nfrom .hipchat import hipchat_msg\n\ndef setup_default_log(default_logger_name,log_folder,level=logging.DEBUG):\n # this will affect any logging calls\n logging.basicConfig(level=level)\n if not os.path.exists(log_folder):\n os.makedirs(log_folder)\n logfile = os.path.join(log_folder, '%s_%s_hub.log' % (default_logger_name,time.strftime(\"%Y%m%d\",datetime.datetime.now().timetuple())))\n fh = logging.FileHandler(logfile)\n fh.setFormatter(logging.Formatter('%(asctime)s [%(process)d:%(threadName)s] - %(name)s - %(levelname)s -- %(message)s',datefmt=\"%H:%M:%S\"))\n fh.name = \"logfile\"\n logger = logging.getLogger(default_logger_name)\n logger.setLevel(level)\n if not fh.name in [h.name for h in logger.handlers]:\n logger.addHandler(fh)\n return logger\n\n\ndef get_logger(logger_name,log_folder,handlers=[\"console\",\"file\"],timestamp=\"%Y%m%d\"):\n # this will affect any logging calls\n if not os.path.exists(log_folder):\n os.makedirs(log_folder)\n if timestamp:\n logfile = os.path.join(log_folder, '%s_%s.log' % (logger_name,time.strftime(timestamp,datetime.datetime.now().timetuple())))\n else:\n logfile = os.path.join(log_folder, '%s.log' % logger_name)\n fmt = logging.Formatter('%(asctime)s [%(process)d:%(threadName)s] - %(name)s - %(levelname)s -- %(message)s', datefmt=\"%H:%M:%S\")\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.DEBUG)\n if \"file\" in handlers:\n fh = logging.FileHandler(logfile)\n fh.setFormatter(fmt)\n fh.name = \"logfile\"\n if not fh.name in [h.name for h in logger.handlers]:\n logger.addHandler(fh)\n #if \"hipchat\" in handlers:\n # nh = HipchatHandler(config.HIPCHAT_CONFIG)\n # nh.setFormatter(fmt)\n # nh.name = \"hipchat\"\n # if not nh.name in [h.name for h in logger.handlers]:\n # logger.addHandler(nh)\n return logger\n\n\nclass HipchatHandler(logging.StreamHandler):\n\n colors = {logging.DEBUG : \"gray\",\n logging.INFO : \"green\",\n logging.WARNING : \"yellow\",\n logging.ERROR : \"red\",\n logging.CRITICAL : \"purple\"}\n\n def __init__(self,conf={}):\n super(HipchatHandler,self).__init__()\n pass\n\n def emit(self,record):\n @asyncio.coroutine\n def aioemit():\n fut = yield from loop.run_in_executor(None,partial(\n hipchat_msg,msg,color=color))\n return fut\n if record.__dict__.get(\"notify\"):\n loop = asyncio.get_event_loop()\n msg = self.format(record)\n color = self.__class__.colors.get(record.levelno,\"gray\")\n fut = aioemit()\n asyncio.ensure_future(fut)\n\n","sub_path":"biothings/utils/loggers.py","file_name":"loggers.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"294681278","text":"from PIL import Image\ndef concatImage(name, number, width, height):\n\tnew_im = Image.new('RGBA', (width*number,height))\n\tzeros = \"000\"\n\tfor i in xrange(1,number+1):\n\t\tim = Image.open(name+zeros+str(i)+\".png\")\n\t\tnew_im.paste(im, ((i-1)*width,0))\n\t\tif i == 9:\n\t\t\tzeros = '00'\n\tnew_im.save(name+'.png')\ndef concatImage(name1, name2, endname, width, height):\n\tnew_im = Image.new('RGBA', (width,height*2))\n\tim = Image.open(name1+\".png\")\n\tnew_im.paste(im, (0,0))\n\tim = Image.open(name2+\".png\")\n\tnew_im.paste(im, (0,height))\n\tnew_im.save(endname+'.png')\ndef concatImageDiffSizes(name1, name2, endname, width, height1, height2):\n\tnew_im = Image.new('RGBA', (width,height1+height2))\n\tim = Image.open(name1+\".png\")\n\tnew_im.paste(im, (0,0))\n\tim = Image.open(name2+\".png\")\n\tnew_im.paste(im, (0,height1))\n\tnew_im.save(endname+'.png')\n#concatImage('human_swordsman', 55, 110, 70)\n#concatImage('goblin_swordsman', 55, 110, 70)\n#concatImage('shootenemy', 5, 35, 15)\n#concatImage('shootplayer', 5, 35, 15)\n#concatImage('goblin_archer', 'human_archer', 'sprite_archer', 3920, 50)\n#concatImage('goblin_mage', 'human_mage', 'sprite_mage', 930, 34)\n#concatImage('goblin_sheild', 'human_sheild', 'sprite_sheild', 6050, 70)\n#concatImage('shootplayer', 'shootenemy', 'sprite_shoot', 175, 15)\n#concatImage('shootplayeraoe', 'shootenemyaoe', 'sprite_shootaoe', 60, 60)\nconcatImageDiffSizes('sprite_sheild', 'sprite_enemies', 'sprite_enemies', 6050, 140, 168)","sub_path":"app/build/intermediates/res/debug/drawable/concat.py","file_name":"concat.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"371554802","text":"\n#under construction...\n\nfrom math import floor\n\ndef max_heapify(list,i):\n\tleft_child=2*i\n\tright_child=2*i+1\n\n\tif (left_child<=len(list)) and (list[left_child] > list[i]):\n\t\tlargest=left_child\n\telse:\n\t\tlargest=i\n\n\tif (right_child<=len(list)) and (list[right_child] > list[largest]) :\n\t\tlargest=right_child\n\tif largest != i:\n\t\tlist[i],list[largest]=list[largest],list[i]\n\t\tmax_heapify(list,largest)\n\n\tprint(list)\n\t\t\ndef buid_heap(list):\n\ti=floor(len(list)/2-1)\n\t# \n\twhile i>0:\n\t\tprint(i)\n\t\tmax_heapify(list,i)\n\t\ti-=1\n\n\n\nlist=[3,0,6,1,5,2,4,7]\nbuid_heap(list)\nprint(list)","sub_path":"Sorting/Heap Sort.py","file_name":"Heap Sort.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"548630165","text":"from datetime import datetime, timedelta\n\nfrom django.contrib.auth.hashers import make_password, check_password\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\n\nfrom users.models import UserModel\nfrom utils.functions import random_ticket\n\n\n\ndef register(request):\n if request.method == 'GET':\n return render(request, 'web/register.html')\n if request.method == 'POST':\n username = request.POST.get('user_name')\n pwd = request.POST.get('pwd')\n cpwd = request.POST.get('cpwd')\n email = request.POST.get('email')\n user = UserModel.objects.filter(username=username)\n if not all([username, pwd, cpwd, email]):\n error = '参数不能为空'\n return render(request, 'web/register.html', {'error': error})\n if user:\n error = '用户名已存在'\n return render(request, 'web/register.html', {'error': error})\n u_email = UserModel.objects.filter(email=email)\n if u_email:\n error = '邮箱已存在'\n return render(request, 'web/register.html', {'error': error})\n if pwd != cpwd:\n error = '两次密码输入不一致'\n return render(request, 'web/register.html', {'error': error})\n UserModel.objects.create(username=username, password=make_password(pwd), email=email)\n return HttpResponseRedirect(reverse('users:login'))\n\n\ndef login(request):\n if request.method == 'GET':\n return render(request, 'web/login.html')\n if request.method == 'POST':\n username = request.POST.get('username')\n pwd = request.POST.get('pwd')\n if not all([username, pwd]):\n error = '参数不能为空'\n return render(request, 'web/login.html', {'error': error})\n user = UserModel.objects.filter(username=username).first()\n if not user:\n error = '用户不存在'\n return render(request, 'web/login.html', {'error': error})\n if not check_password(pwd, user.password):\n error = '密码输入错误'\n return render(request, 'web/login.html', {'error': error})\n\n res = HttpResponseRedirect(reverse('web:index'))\n ticket = random_ticket()\n out_time = datetime.now() + timedelta(days=1)\n res.set_cookie('ticket', ticket, expires=out_time)\n\n UserModel.objects.filter(username=username).update(ticket=ticket, out_time=out_time)\n return res\n\n\ndef logout(request):\n if request.method == 'GET':\n user = request.user\n user.ticket = ''\n user.save()\n res = HttpResponseRedirect(reverse('web:index'))\n res.delete_cookie('ticket')\n return res","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"440375625","text":"# encoding: utf-8\n\nimport json\nimport os\nimport sys\nimport time\n\nfrom menthod.config import call_interface, is_call\nfrom menthod.request import request\n\n\ndef get_data(case_file):\n with open(sys.path[0] + '/cases/%s.json' % case_file, encoding='UTF-8') as json_file:\n data = json.load(json_file)\n json_file.close()\n return data\n\n\n# 经执行结果存入日志文件,路径 /logs/cases文件名\ndef get_log(case_file, model, result, request):\n # 日志格式\n log = {\n \"model\": model,\n \"result\": result,\n \"request\": request\n }\n # 创建日志文件\n path = sys.path[0] + '/logs/%s' % case_file\n if not os.path.exists(path):\n os.mkdir(path)\n # 写入日志\n f = open(path + '/%s' % time.strftime('%Y%m%d', time.localtime()), 'a')\n f.write(str(log) + '\\n')\n f.close()\n\n# 钉钉报警机制\ndef call_dingding(model, result, url, result_data):\n if is_call == 0:\n return\n header = {\"Content-Type\": \"application/json ;charset=utf-8 \"}\n content = {\n \"model\": model,\n \"time\": time.strftime(\"%Y%m%d %H%M\", time.localtime()),\n \"result\": result,\n \"url\": url,\n \"request\": result_data\n }\n call_data = {\n \"msgtype\": \"text\",\n \"text\": {\n \"content\": content\n },\n \"at\": {\n \"isAtAll\": \"false\"\n }\n }\n request_data = request(\"post\", call_interface, header, call_data)\n print(request_data)\n","sub_path":"auto_inter/handler/handle_file.py","file_name":"handle_file.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"199902838","text":"import logging\nimport requests\nimport simplejson\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nlogger = logging.getLogger(__name__)\n\n\nclass SMSManager(object):\n \"\"\"\n The Manager for SMS\n \"\"\"\n def _get_url(self):\n if not hasattr(settings, 'CLICKATELL_API_ENDPOINT'):\n raise ValueError(_(\n 'Clickatell authorization url not set in settings, please specify a url as \"CLICKATELL_API_ENDPOINT\"'\n ))\n return settings.CLICKATELL_API_ENDPOINT\n\n def _get_token(self):\n if not hasattr(settings, 'CLICKATELL_TOKEN'):\n raise ValueError(_(\n 'Clickatell authorization token not set in settings, please specify a token as \"CLICKATELL_TOKEN\"'\n ))\n return settings.CLICKATELL_TOKEN\n\n def _get_headers(self):\n token = self._get_token()\n return {\"Authorization\": \"bearer %s\" % token, 'X-Version': 1, 'Content-Type': 'application/json'}\n\n def send(self, sms):\n \"\"\"\n Sends the SMS through REST API of external service.\n\n :param sms: The SMS message as string\n :return: : Dictionary containing the http response data\n :rtype: dict\n \"\"\"\n data = simplejson.dumps({'text': sms.message, 'to': sms.receivers})\n url = self._get_url()\n headers = self._get_headers()\n response = requests.post(url, data=data, headers=headers)\n ret = response.json()\n logger.info(\"SMS send, data: %s, response: %s\" %(sms.message, str(ret)))\n return ret\n\n\nclass SMS(object):\n \"\"\"\n An sms object represents a particular sms to be sent.\n \"\"\"\n objects = SMSManager()\n\n def __init__(self, message=str(), receivers=list()):\n self.receivers = receivers\n self.message = message\n\n def send(self):\n \"\"\"\n Sends the sms using the SMSManager\n \"\"\"\n if not self.receivers:\n raise ValueError(_('Need to assign at least one sender'))\n if not self.message:\n raise ValueError(_('Message cannot be blank'))\n return self.objects.send(self)\n","sub_path":"epad/sms/models/sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"162389350","text":"import math\n\nimport cv2\nimport dlib\nimport face_recognition\nimport numpy as np\nfrom PIL import Image\n\nimport Detector\n\n\ndef change_size(image, ratio):\n h = image.shape[0]\n w = image.shape[1]\n dh = int(ratio * h)\n dw = int(ratio * w)\n image = cv2.resize(image, (dw, dh))\n return image\n\n\n# 计算灰度直方图\ndef get_gray_hist(image):\n h, w, c = image.shape\n gray_hist = np.zeros(256, np.uint32)\n for i in range(h):\n for j in range(w):\n for k in range(c):\n gray_hist[image[i][j][k]] += 1\n return gray_hist\n\n\n# 直方图正规化\ndef regularize(image):\n i_max = np.max(image)\n i_min = np.min(image)\n o_min, o_max = 0, 255\n a = float(o_max - o_min) / (i_max - i_min)\n b = o_min - a * i_min\n image = a * image + b\n image = image.astype(np.uint8)\n return image\n\n\n# 直方图均衡化\ndef equalize(image):\n h, w, c = image.shape\n gray_hist = get_gray_hist(image)\n cal_gray_hist = np.zeros(256, np.uint32)\n cal_gray_hist[0] = gray_hist[0]\n for i in range(1, 256):\n cal_gray_hist[i] = cal_gray_hist[i-1] + gray_hist[i]\n output = np.zeros(256, np.uint8)\n param = 256.0 / (h * w)\n for i in range(256):\n j = param * float(cal_gray_hist[i]) - 1\n if j > 0:\n output[i] = math.floor(j)\n else:\n output[i] = 0\n equal_hist = np.zeros(image.shape, np.uint8)\n for i in range(h):\n for j in range(w):\n for k in range(c):\n equal_hist[i][j][k] = output[image[i][j][k]]\n return equal_hist\n\n\ndef remove_background(img, col):\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor('data/shape_predictor_68_face_landmarks.dat')\n h, w = img.shape[:2]\n faces = detector(img, 0)\n mask = np.zeros(img.shape[:2], np.uint8)\n bg = np.zeros((1, 65), np.float64)\n fg = np.zeros((1, 65), np.float64)\n if len(faces) > 0:\n for k, d in enumerate(faces):\n left = max(int((3 * d.left() - d.right()) / 2), 1)\n top = max(int((3 * d.top() - d.bottom()) / 2) - 60, 1)\n right = min(int((3 * d.right() - d.left()) / 2), w)\n bottom = min(int((3 * d.bottom() - d.top()) / 2) + 60, h)\n rect = (left, top, right, bottom)\n else:\n exit(0)\n # 函数返回的mask中明显的背景像素为0,明显的前景像素为1,可能的背景像素为2,可能的前景像素为3\n cv2.grabCut(img, mask, rect, bg, fg, 10, cv2.GC_INIT_WITH_RECT)\n # mask为0或2(即为背景的)设置为0\n mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')\n # 背景颜色(蓝/红/白)\n bg_color = [(225, 166, 23), (0, 0, 255), (255, 255, 255)]\n # 相乘则将背景的像素置为0,留下前景\n img = img * mask[:, :, np.newaxis]\n # 腐蚀操作\n img = cv2.erode(img, None, iterations=1)\n # 膨胀操作\n img = cv2.dilate(img, None, iterations=1)\n for i in range(h): # 高\n for j in range(w):\n if max(img[i, j]) == 0:\n img[i, j] = bg_color[col]\n img = img[rect[1]:rect[3], rect[0]:rect[2]]\n img = cv2.resize(img, (164, 233))\n cv2.imwrite(\"images/tmp/certificate.png\", img)\n\n\ndef paste_pic(img, col):\n remove_background(img, col)\n img = Image.open(\"images/tmp/certificate.png\")\n background = Image.open(\"images/certificate.png\")\n background.paste(img, (534, 196))\n background.save(\"result/certificate.png\", quality=95)\n\n\ndef face_rec(img, face_characters, face_names):\n face_locations = face_recognition.face_locations(img) # 获得所有人的人脸位置\n img_characters = face_recognition.face_encodings(img, face_locations) # 获得所有人的人脸特征值\n pic_face_names = [] # 记录画面中的所有人名\n for img_character in img_characters: # 和数据库人脸进行对比\n # 数据集中相似度超过0.5的则为true,否则为false,优先返回高匹配度\n tol = 0.05\n flag = 0\n while tol <= 0.5:\n res = face_recognition.compare_faces(face_characters, img_character, tolerance=tol)\n # 存在匹配的结果则记录下标\n if True in res:\n index = res.index(True)\n name = face_names[index]\n pic_face_names.append(name)\n flag = 1\n break\n tol += 0.02\n if flag == 0:\n pic_face_names.append(\"unknown\")\n # 将��捉到的人脸显示出来\n for (top, right, bottom, left), name in zip(face_locations, pic_face_names):\n cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255), 2) # 画人脸矩形框\n # 加上人名标签\n cv2.rectangle(img, (left, bottom - 30), (right, bottom), (0, 0, 255), cv2.FILLED)\n cv2.putText(img, name, (left + 5, bottom - 5), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1)\n # 保存图片并进行实时的显示\n h = img.shape[0]\n w = img.shape[1]\n ratio1 = 500 / h\n ratio2 = 800 / w\n img = change_size(img, min(ratio1, ratio2))\n cv2.imwrite(\"images/tmp/face_rec.jpg\", img)\n if len(img_characters) == 0:\n return 0\n return 1\n\n\ndef mask_rec(img):\n img_nose, noses = Detector.nose_detection(img) # 鼻子检测\n if noses == 1: # 检测到鼻子说明未戴口罩\n cv2.putText(img_nose, \"NO MASK\", (15, 30), cv2.FONT_HERSHEY_COMPLEX, 0.9, (0, 0, 255), 1) # 图片上写字\n cv2.imwrite('images/tmp/mask_rec.jpg', img_nose)\n if noses == 0: # 未检测到鼻子则进行眼睛检测\n img_eye, eyes = Detector.eye_detection(img) # 进行眼睛检测,返回检测之后的图形以及标志位\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # 将图片转化成HSV格式\n h, s, v = cv2.split(hsv) #\n # h_min = cv2.getTrackbarPos(\"h_min\", 'skin') # 获取bar\n # h_max = cv2.getTrackbarPos(\"h_max\", 'skin')\n # if h_min > h_max:\n # h_max = h_min\n thresh = cv2.inRange(h, 0, 15) # 提取人体肤色区域\n if len(eyes) > 1: # 判断是否检测到两个眼睛,其中eyes[0]为左眼坐标\n # 确定口罩区域\n # 左眼的begin为口罩begin\n mask_x_begin = min(eyes[0][0], eyes[1][0])\n # 右眼begin+右眼宽度为口罩end\n mask_x_end = max(eyes[0][0], eyes[1][0]) + \\\n eyes[list([eyes[0][0], eyes[1][0]]).index(max(list([eyes[0][0], eyes[1][0]])))][2]\n # 越界处理\n if mask_x_end > img_eye.shape[0]:\n mask_x_end = img_eye.shape[0]\n # 眼睛高度为口罩begin\n mask_y_begin = max(eyes[0][1] + eyes[0][3], eyes[1][1] + eyes[1][3]) + 20\n # 越界处理\n if mask_y_begin > img_eye.shape[1]:\n mask_y_begin = img_eye.shape[1]\n mask_y_end = max(eyes[0][1] + 3 * eyes[0][3], eyes[1][1] + 3 * eyes[1][3]) + 20\n if mask_y_end > img_eye.shape[1]:\n mask_y_end = img_eye.shape[1]\n cv2.rectangle(img_eye, (mask_x_begin, mask_y_begin), (mask_x_end, mask_y_end), (255, 0, 0), 2)\n mask_scale = 0\n face_scale = 0\n # 遍历二值图,为0则total_mask_pixel+1,否则total_face_pixel+1\n for i in range(mask_x_begin, mask_x_end):\n for j in range(mask_y_begin, mask_y_end):\n if thresh[i, j] == 0:\n mask_scale += 1\n else:\n face_scale += 1\n if mask_scale > face_scale:\n cv2.putText(img_eye, \"HAVE MASK\", (mask_x_begin, mask_y_begin - 10),\n cv2.FONT_HERSHEY_COMPLEX, 0.9, (0, 0, 255), 1)\n if mask_scale < face_scale:\n cv2.putText(img_eye, \"NO MASK\", (mask_x_begin, mask_y_begin - 10),\n cv2.FONT_HERSHEY_COMPLEX, 0.9, (0, 0, 255), 1)\n cv2.imwrite(\"images/tmp/mask_rec.jpg\", img_eye)\n","sub_path":"PicUtils.py","file_name":"PicUtils.py","file_ext":"py","file_size_in_byte":8003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"471353907","text":"\r\n# Atom.py contains the Atom class\r\n# Written by Chris Lockhart in Python3\r\n\r\nimport numpy as np\r\nimport LLTK.structure\r\n\r\n# Permissible elements\r\nelement_list=['C','H','N','O','P','S']\r\n\r\n# Default atom radii by element\r\n# Follows VMD and takes radii from Bondi (1964) JPC 68\r\n# H is from Taylor (1996) JPC 100\r\n# NA is from CHARMM27 Rmin/2 parameter for SOD\r\nradius_default={'C': 1.7,\r\n 'H': 1., # 1.2 from Bondi\r\n 'N': 1.55,\r\n 'O': 1.52,\r\n 'P': 1.8,\r\n 'S': 1.8,\r\n 'NA': 1.36} \r\n\r\n# Mass defaults, see above\r\nmass_default={'C': 12.0107,\r\n 'H': 1.00794,\r\n 'N': 14.0067,\r\n 'O': 15.9994,\r\n 'P': 30.973761,\r\n 'S': 32.065,\r\n 'NA': 22.98977}\r\n\r\n# This Atom class defines an atom within a Structure\r\nclass Atom: \r\n # Initialize\r\n def __init__(self):\r\n self.atomid=None # This is the true ID from atom order in PDB/PSF file\r\n self.atomname=None\r\n self.atomtype=None\r\n self.resname=None\r\n self.chain='X'\r\n self.resid=None\r\n self.x=0.\r\n self.y=0.\r\n self.z=0.\r\n self.occupancy=0.\r\n self.beta=0.\r\n self.segname=None\r\n self.charge=None\r\n self.element='X'\r\n self.radius=None\r\n self.mass=None\r\n \r\n # Check that two atoms are the same\r\n # This check excludes coordinate, occupancy, and beta values\r\n def check(self,atom):\r\n # Type check\r\n if not isinstance(atom,LLTK.structure.Atom):\r\n raise TypeError('atom must be of Atom class')\r\n\r\n # Perform check\r\n if (self.atomid != atom.atomid or\r\n self.atomname != atom.atomname or\r\n self.atomtype != atom.atomtype or\r\n self.resname != atom.resname or\r\n self.chain != atom.chain or\r\n self.resid != atom.resid or\r\n self.segname != atom.segname or \r\n self.element != atom.element or\r\n not np.isclose(self.charge,atom.charge) or\r\n not np.isclose(self.radius,atom.radius) or\r\n not np.isclose(self.mass,atom.mass)):\r\n raise ValueError('atoms do not match')\r\n\r\n # Copy to new Atom object\r\n def copy(self):\r\n # New Atom object\r\n atom=Atom()\r\n \r\n # Copy\r\n atom.atomid=self.atomid\r\n atom.atomname=self.atomname\r\n atom.atomtype=self.atomtype\r\n atom.resname=self.resname\r\n atom.chain=self.chain\r\n atom.resid=self.resid\r\n atom.x=self.x\r\n atom.y=self.y\r\n atom.z=self.z\r\n atom.occupancy=self.occupancy\r\n atom.beta=self.beta\r\n atom.segname=self.segname\r\n atom.element=self.element\r\n atom.charge=self.charge\r\n atom.radius=self.radius\r\n atom.mass=self.mass\r\n \r\n # Return\r\n return atom\r\n\r\n # Copies from an atom object to instance\r\n def copy_from(self,atom):\r\n # Type check\r\n if not isinstance(atom,Atom):\r\n print('Error: atom must be Atom object.')\r\n print(atom)\r\n sys.exit(2)\r\n \r\n # Copy\r\n self.atomid=atom.atomid\r\n self.atomname=atom.atomname\r\n self.atomtype=atom.atomtype\r\n self.resname=atom.resname\r\n self.chain=atom.chain\r\n self.resid=atom.resid\r\n self.x=atom.x\r\n self.y=atom.y\r\n self.z=atom.z\r\n self.occupancy=atom.occupancy\r\n self.beta=atom.beta\r\n self.segname=atom.segname\r\n self.element=atom.element\r\n self.charge=atom.charge\r\n self.radius=atom.radius\r\n self.mass=atom.mass\r\n \r\n # Output atom information\r\n def display(self):\r\n print('Atomid: '+str(self.atomid))\r\n print('Atomname: '+str(self.atomname))\r\n print('Atomtype: '+str(self.atomtype))\r\n print('Resname: '+str(self.resname))\r\n print('Chain: '+str(self.chain))\r\n print('Resid: '+str(self.resid))\r\n print('x: '+str(self.x))\r\n print('y: '+str(self.y))\r\n print('z: '+str(self.z))\r\n print('Occupancy: '+str(self.occupancy))\r\n print('Beta: '+str(self.beta))\r\n print('Segname: '+str(self.segname))\r\n print('Charge: '+str(self.charge))\r\n print('Element: '+str(self.element))\r\n print('Radius: '+str(self.radius))\r\n print('Mass: '+str(self.mass))\r\n \r\n # Return coordinates\r\n def get_coord(self):\r\n return (self.x,self.y,self.z)\r\n \r\n # Move atom by offset\r\n def move_by(self,offset=np.zeros(3)):\r\n # Type check\r\n offset=np.array(offset,dtype=float).reshape(3)\r\n \r\n # Apply offset to atom\r\n self.x=self.x+offset[0]\r\n self.y=self.y+offset[1]\r\n self.z=self.z+offset[2]\r\n \r\n # Update coordinates\r\n def update_coord(self,coord):\r\n # Type check\r\n coord=np.array(coord,dtype=float)\r\n \r\n # Size check\r\n if coord.size != 3: raise AttributeError('must have 3 dimensions')\r\n \r\n # Update\r\n self.x=coord[0]\r\n self.y=coord[1]\r\n self.z=coord[2]\r\n\r\n # Verify Atom\r\n def verify(self): \r\n # Handle element\r\n if self.element is None:\r\n # See if element with two characters works\r\n self.element=''.join([i for i in self.atomname[:2]\r\n if not i.isdigit()]).upper()\r\n\r\n # If element does not work, further reduce it\r\n if not self.element in element_list: self.element=self.element[0]\r\n\r\n # If element is still not in permissible list, throw error\r\n if not self.element in element_list:\r\n print('Error: element unknown.')\r\n raise ValueError('element '+self.element+' is unknown')\r\n\r\n # Handle radius\r\n if self.radius is None:\r\n if self.element in radius_default.keys():\r\n self.radius=radius_default[self.element]\r\n else:\r\n print('Error: element radius is unknown.')\r\n raise ValueError('element '+self.element+' has unknown radius')\r\n\r\n # Handle mass\r\n if self.mass is None:\r\n if self.element in mass_default.keys():\r\n self.mass=mass_default[self.element]\r\n else:\r\n print('Error: element mass is unknown.')\r\n raise ValueError('element '+self.element+' has unknown mass')\r\n \r\n # Check atom information\r\n if (not isinstance(self.atomid,int) or\r\n not isinstance(self.atomname,str) or \r\n not isinstance(self.atomtype,str) or \r\n not isinstance(self.resname,str) or\r\n not isinstance(self.chain,str) or\r\n not isinstance(self.resid,int) or\r\n not isinstance(self.x,float) or\r\n not isinstance(self.y,float) or\r\n not isinstance(self.z,float) or\r\n not isinstance(self.occupancy,float) or\r\n not isinstance(self.beta,float) or\r\n not isinstance(self.segname,str) or\r\n not isinstance(self.charge,float) or\r\n not isinstance(self.element,str) or\r\n not isinstance(self.radius,float) or\r\n not isinstance(self.mass,float)):\r\n self.display()\r\n raise ValueError('atom cannot be verified')\r\n \r\n \r\n\r\n\r\n\r\n\r\n \r\n","sub_path":"src/LLTK/structure/Atom.py","file_name":"Atom.py","file_ext":"py","file_size_in_byte":7600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"440105047","text":"import csv\nimport os\nfrom datetime import datetime\nimport bisect\nimport re\n\n\n# How to use it:\n#\n# Example (1):\n# Takes all the rows in which respects the regular expression '.*(/search)' in the 'REQUEST_URI' value\n# alldata = {}\n# alldata['ITEM'] = buildData(['.*(/search)'], ['REQUEST_URI'], csvpath)\n#\n# Example (2):\n# Takes all the rows in which respects the regular expression '.*(\\/corpus\\/ra|\\/corpus\\/br)' in the 'HTTP_REFERER' value, AND\n# respects the regular expression '.*(\\/browse)' in the 'REQUEST_URI' value\n# alldata = {}\n# data_moves['ITEM'] = buildData(['.*(\\/corpus\\/ra|\\/corpus\\/br)','.*(\\/browse)'], ['HTTP_REFERER','REQUEST_URI'], csvpath)\n\ndef scanLog(csvlogpath, txt_to_search, txt_field, printit = False):\n with open(csvlogpath) as csvfile:\n print('SCANNING ' + csvlogpath)\n reader = csv.DictReader(csvfile)\n count = 0\n data = []\n\n for row in reader:\n is_ok = True\n for i in range(len(txt_field)):\n\n pattern = re.compile(txt_to_search[i])\n if pattern.match(row[txt_field[i]]):\n is_ok = True\n else:\n is_ok = False\n break\n\n if is_ok:\n count += 1\n\n my_val = {}\n for i in range(len(txt_field)):\n my_val[txt_field[i]] = row[txt_field[i]]\n #data.append(my_val)\n data.append(row)\n\n return {'count': count, 'value': data}\n\n\ndef buildData(txt_list, txt_field_list, csvpath):\n data = {'x':[],'y':[],'value':[]}\n for filename in os.listdir(csvpath):\n if filename.endswith(\".csv\"):\n csvlogpath = csvpath+filename\n scanner = scanLog(csvlogpath,txt_list, txt_field_list)\n date = filename.replace(\"oc-\", \"\").replace(\".csv\", \"\")\n\n date_val = datetime.strptime(date, '%Y-%m')\n bisect.insort(data['x'],date_val)\n ord_index = data['x'].index(date_val)\n data['y'].insert(ord_index,scanner['count'])\n data['value'].insert(ord_index,scanner['value'])\n else:\n continue\n\n return data\n\ndef observe(data,key, key_value = None):\n ##MY TEST SESSION PART-1\n myindex = {}\n\n def funindex(key,x):\n if key not in myindex:\n myindex[key] = {'value':[], 'count': 0}\n\n myindex[key]['value'].append(x)\n myindex[key]['count'] += 1\n\n for x in data:\n if key_value == None:\n funindex(x[key],x)\n else:\n funindex(x[key],x[key_value])\n\n return myindex\n\ndef sort_dict(var_dict, key, key_sort, reverse_opt = True):\n return sorted(var_dict.items() , reverse= reverse_opt, key=lambda x: x[key_sort])\n\n\n\ndef genCSV(path,data,g_label,x_label,y_label):\n #Generate the .CSV file\n FILE_TO_EDIT = path\n file_res = open(FILE_TO_EDIT,'w')\n file_res.write(g_label+','+x_label+','+y_label+'\\n')\n file_res.close()\n\n\n file_res = open(FILE_TO_EDIT,'a')\n for key in data:\n for i in range(0,len(data[key]['x'])):\n file_res.write(key+','+data[key]['x'][i]+','+str(data[key]['y'][i])+'\\n')\n file_res.close()\n","sub_path":"paper/coci_iswc2019/figshare/script/scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"426455558","text":"import cv2 as cv\nimport numpy as np\nimport math\n\nroi = cv.imread('lenna.png')\n\nangle = 10.0 #degree\n\nangle = -angle * (math.pi/180) # radian\n\nnormal_rotation = np.zeros((roi.shape[0],roi.shape[1],3),dtype=np.uint8)\nnormal_interpolation = np.zeros((roi.shape[0],roi.shape[1],3),dtype=np.uint8)\nbilinear_interpolation = np.zeros((roi.shape[0],roi.shape[1],3),dtype=np.uint8)\n\nfor i in range(roi.shape[0]):\n for j in range(roi.shape[1]):\n for k in range(3):\n cy = roi.shape[0] // 2\n cx = roi.shape[1] // 2\n y = i - cy\n x = j - cx\n newY = y * math.cos(angle) - x * math.sin(angle) + cy\n newX = y * math.sin(angle) + x * math.cos(angle) + cx\n newY = round(newY)\n newX = round(newX)\n if newX >=0 and newX < roi.shape[1] and newY >=0 and newY < roi.shape[0]:\n normal_rotation.itemset(newY,newX,k,roi.item(i,j,k))\n\nfor i in range(roi.shape[0]):\n for j in range(roi.shape[1]):\n for k in range(3):\n cy = roi.shape[0] // 2\n cx = roi.shape[1] // 2\n y = i - cy\n x = j - cx\n newY = y * math.cos(angle) + x * math.sin(angle) + cy\n newX = y * -math.sin(angle) + x * math.cos(angle) + cx\n newY = round(newY)\n newX = round(newX)\n if newX >=0 and newX < roi.shape[1] and newY >=0 and newY < roi.shape[0]:\n normal_interpolation.itemset(i,j,k,roi.item(newY,newX,k))\n\nfor i in range(roi.shape[0]):\n for j in range(roi.shape[1]):\n for k in range(3):\n cy = roi.shape[0] // 2\n cx = roi.shape[1] // 2\n y = i - cy\n x = j - cx\n newY = y * math.cos(angle) + x * math.sin(angle) + cy\n newX = y * -math.sin(angle) + x * math.cos(angle) + cx\n alpha = 1 - (newX-math.floor(newX)) # 정수 값 x축에서 떨어진 거리\n beta = 1 - (newY-math.floor(newY)) # 정수 값 y축에서 떨어진 거리\n newX = round(newX)\n newY = round(newY)\n if newX >=0 and newX < roi.shape[1]-1 and newY >=0 and newY < roi.shape[0]-1:\n f1 = (1-alpha)*roi.item(newY,newX,k)+alpha*roi.item(newY,newX+1,k)\n f2 = (1-alpha)*roi.item(newY+1,newX,k)+alpha*roi.item(newY+1,newX+1,k)\n f3 = (1-beta)*f1+beta*f2\n bilinear_interpolation.itemset(i,j,k,int(f3))\n\ncv.imshow('origin',roi)\ncv.imshow('rotation_normal',normal_rotation)\ncv.imshow('interpolation_normal',normal_interpolation)\ncv.imshow('interpolation_bilinear',bilinear_interpolation)\ncv.waitKey(0)\ncv.destoryAllWindows()\n#공부공부공부\n","sub_path":"image_rotation.py","file_name":"image_rotation.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"579680431","text":"import json\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import Http404, JsonResponse, HttpResponseRedirect\nfrom django.core.exceptions import ValidationError\n# from django.contrib import messages\nfrom django.utils import timezone\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom .models import Post, PostPhoto, Tag, Category, Document, Article, Message, Contact\nfrom .models import Registry, Menu\nfrom .models import Staff\nfrom .forms import PostForm, ArticleForm, DocumentForm\nfrom .forms import SendMessageForm, SubscribeForm, AskQuestionForm, SearchRegistryForm\nfrom .adapters import MessageModelAdapter\nfrom .message_tracker import MessageTracker\nfrom .utilites import UrlMaker\nfrom .registry_import import Importer, data_url\n\n# Create your views here.\n\ndef index(request):\n #TODO: сделать когда-нибудь вывод форм на глваную\n title = 'Головной аттестационный центр Восточно-Сибирского региона'\n \"\"\"this is mainpage view with forms handler and adapter to messages\"\"\"\n # tracker = MessageTracker()\n if request.method == 'POST':\n request_to_dict = dict(zip(request.POST.keys(), request.POST.values()))\n form_select = {\n 'send_message_button': SendMessageForm,\n 'subscribe_button': SubscribeForm,\n 'ask_question': AskQuestionForm,\n }\n for key in form_select.keys():\n if key in request_to_dict:\n print('got you!', key)\n form_class = form_select[key]\n form = form_class(request_to_dict)\n if form.is_valid():\n\n # saving form data to messages (need to be cleaned in future)\n adapted_data = MessageModelAdapter(request_to_dict)\n adapted_data.save_to_message()\n print('adapted data saved to database')\n tracker.check_messages()\n tracker.notify_observers()\n else:\n raise ValidationError('form not valid')\n\n # docs = Document.objects.filter(\n # publish_on_main_page=True).order_by('-created_date')[:3]\n\n # main_page_news = Post.objects.filter(\n # publish_on_main_page=True).order_by('-published_date')[:7]\n\n #Посты с картинками\n # posts = {}\n # for post in main_page_news:\n # posts[post] = PostPhoto.objects.filter(post__pk=post.pk).first()\n\n #Вывести ВСЕ объекты из БД\n # posts = Post.objects.all()[:3]\n posts = Post.objects.filter(publish_on_main_page=True)[:7]\n publications = []\n for post in posts:\n try:\n publications.append({'post': post, 'photo': PostPhoto.objects.get(post=post).image.url })\n except PostPhoto.DoesNotExist:\n publications.append({'post': post, 'photo': 'https://place-hold.it/500x300'})\n print('PUBLICACTIONS', publications)\n # main_page_articles = Article.objects.filter(\n # publish_on_main_page=True).order_by('-published_date')[:3]\n\n # print(request.resolver_match)\n # print(request.resolver_match.url_name)\n\n content = {\n 'title': title,\n 'publications': publications\n # 'docs': docs,\n # 'articles': main_page_articles,\n # 'send_message_form': SendMessageForm(),\n # 'subscribe_form': SubscribeForm(),\n # 'ask_question_form': AskQuestionForm()\n }\n\n return render(request, 'mainapp/index.html', content)\n\ndef reestr(request):\n title = 'Реестр'\n\n content = {\n 'title': title\n }\n return render(request, 'mainapp/reestr.html', content)\n\ndef doc(request):\n # documents= Document.objects.all()\n\n gac_documents = Document.objects.filter(\n tags__in=Tag.objects.filter(name='ССР3ГАЦ'))\n csp_documents = Document.objects.filter(\n tags__in=Tag.objects.filter(name='ССР3ЦСП'))\n acsm_documents = Document.objects.filter(\n tags__in=Tag.objects.filter(name='АЦСМ46'))\n acso_documents = Document.objects.filter(\n tags__in=Tag.objects.filter(name='АЦСО82'))\n acst_documents = Document.objects.filter(\n tags__in=Tag.objects.filter(name='АЦСТ90'))\n cok_documents = Document.objects.filter(\n tags__in=Tag.objects.filter(name='COK12'))\n\n\n content={\n \"title\": \"doc\",\n \"ssr_3gac_documents\": gac_documents,\n \"ssr_3csp_documents\": csp_documents,\n \"acsm_46_documents\": acsm_documents,\n \"acso_82_documents\": acso_documents,\n \"acst_90_documents\": acst_documents,\n \"cok_12_documents\": cok_documents,\n\n }\n return render(request, 'mainapp/doc.html', content)\n\n\n# def details_news(request, pk=None):\n# post = Post.objects.get(pk=pk)\n# content= {\n# 'title': 'Детальный просмотр',\n# 'post': post\n# }\n# return render(request, 'mainapp/details_news.html', content)\n\n\ndef partners(request):\n return render(request, 'mainapp/partners.html')\n\n\ndef page_details(request, pk=None):\n post = get_object_or_404(Post, pk=pk)\n content = {\n 'title': 'Детальный просмотр',\n 'post': post,\n }\n return render(request, 'mainapp/page_details.html', content)\n\ndef cok(request):\n spks_documents = Document.objects.filter(\n tags__in=Tag.objects.filter(name=\"НПА СПКС\")\n ).order_by('-created_date')\n spks_example_documents = Document.objects.filter(\n tags__in=Tag.objects.filter(name=\"Образцы документов СПКС\")\n )\n content = {\n 'title': 'cok_documets',\n 'spks_documents': spks_documents,\n 'spks_example_documents': spks_example_documents\n }\n return render(request, 'mainapp/cok.html', content)\n\ndef profstandarti(request):\n return render(request, 'mainapp/profstandarti.html')\ndef contacts(request):\n return render(request, 'mainapp/contacts.html')\ndef all_news(request):\n content = {\n 'title': 'All news',\n 'news': Post.objects.all().order_by('-published_date')[:9]\n }\n return render(request, 'mainapp/all_news.html', content)\n\ndef political(request):\n political_documents = Document.objects.filter(\n tags__in=Tag.objects.filter(name=\"НПА СПКС\")\n ).order_by('-created_date')\n political_example_documents = Document.objects.filter(\n tags__in=Tag.objects.filter(name=\"Образцы документов СПКС\")\n )\n content = {\n 'title': 'political_documets',\n 'political_documents': political_documents,\n 'political_example_documents': political_example_documents\n }\n return render(request, 'mainapp/political.html', content)\n\ndef details_news(request, pk=None):\n\n return_link = HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n\n post = get_object_or_404(Post, pk=pk)\n related_posts = Post.objects.filter(publish_on_news_page=True).exclude(pk=pk)[:3]\n attached_images = PostPhoto.objects.filter(post__pk=pk)\n attached_documents = Document.objects.filter(post__pk=pk)\n post_content = {\n 'post': post,\n 'related_posts': related_posts,\n 'images': attached_images,\n 'documents': attached_documents,\n }\n\n return render(request, 'mainapp/details_news.html', post_content)","sub_path":"mainapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"211794039","text":"from requests import *\nfrom os import environ, walk, path, listdir\nfrom hashlib import md5\nfrom yaml import load, warnings\nwarnings({'YAMLLoadWarning': False})\nyuquetoken = environ.get(\"YUQUE_TOKEN\");\nif yuquetoken == None:\n print(\"未定义yuque_token\")\n exit(1)\n# load Configurations\nconfig = {};\nwith open(\"_config.yml\", \"r\") as f:\n config = load(f.read())\n f.close();\nprint(\"配置读取成功\")\n\ndef loadfiledir(root):\n fileq = []\n for root, dirs, files in walk(root):\n for file in files: \n fileq.append(path.join(root,file))\n return fileq\n\ndef loadfiles():\n articles = [];\n n = 0\n for file in loadfiledir(config['postPath']):\n with open(file,\"r\",encoding = 'UTF-8',errors='ignore') as f:\n fileqwq=f.read()\n articles.append({\n \"title\": file.split('\\\\')[-1].split('.')[0],\n \"data\": fileqwq,\n \"slug\": md5(fileqwq.encode('utf8')).hexdigest()[0:5],\n \"id\": str(n)\n });\n f.close();\n n = n+1\n return articles\n\ndef uploadfiles(filename,file,slug):\n return post(config[\"baseurl\"]+\"/repos/\"+config[\"login\"]+\"/\"+config[\"repo\"]+\"/docs\",data={\n \"title\": filename,\n \"slug\": slug,\n \"public\": 1,\n \"format\": \"markdown\",\n \"body\": file\n },headers = {\n \"X-Auth-Token\": yuquetoken,\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4435.0 Safari/537.36 Edg/91.0.825.0\"\n })\n\nprint(\"开始读取文件\")\n\nfor i in loadfiles():\n # print()\n res = uploadfiles(i[\"title\"],i[\"data\"],i[\"slug\"])\n if res.status_code == 200:\n print(\"上传文件\"+i[\"title\"]+\"成功\")\n else:\n print(\"错误:\"+ str(res.json()))","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"548912734","text":"# -*- coding: utf-8 -*-\n\nimport cPickle\n\ndef build_dict():\n \n src = '..\\\\..\\\\data\\\\borders\\\\bordpnts.asc'\n dst = '..\\\\input\\\\state_state_border_dict.bin' \n \n border_dict = {}\n\n f = open(src, 'r')\n\n for header in f:\n \n break\n \n for line in f:\n \n line_v = line.split(',')\n border_num = int(line_v[0])\n \n if border_num not in border_dict:\n \n border_dict[border_num] = [[-float(line_v[4])], [float(line_v[3])]]\n \n else:\n \n border_dict[border_num][0].append(-float(line_v[4]))\n border_dict[border_num][1].append(float(line_v[3]))\n \n f.close()\n \n f = open(dst, 'wb')\n cPickle.dump(border_dict, f)\n f.close()\n \n return None","sub_path":"build/code/state_state_border_dictionary.py","file_name":"state_state_border_dictionary.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"490481576","text":"import random\nimport sys\n\nimport battlecode as bc\n\nfrom modules.CarboniteModule import CarboniteModule\nfrom modules.CombatModule import CombatModule\nfrom modules.GameModule import GameModule\nfrom modules.PathingModule import PathingModule\nfrom modules.ResearchModule import ResearchModule\nfrom units.FactoryService import FactoryService\nfrom units.HealerService import HealerService\nfrom units.RangerService import RangerService\nfrom units.RocketService import RocketService\nfrom units.WorkerService import WorkerService\n\n# init system\nprint(\"[INFO]: Player Start...\")\nrandom.seed()\n\n# game control\ngc = bc.GameController()\n\n# game state modules\ngame_module = GameModule(bc, gc)\npathing_module = PathingModule(bc, gc)\nresearch_module = ResearchModule(bc, gc)\ncarbonite_module = CarboniteModule(bc, gc, pathing_module)\ncombat_module = CombatModule(bc, gc, game_module)\n\n# unit control services\nworker_service = WorkerService(bc, gc, game_module, pathing_module, carbonite_module, combat_module)\nranger_service = RangerService(bc, gc, game_module, pathing_module, combat_module)\nhealer_service = HealerService(bc, gc, game_module, pathing_module, combat_module)\nfactory_service = FactoryService(bc, gc, game_module)\nrocket_service = RocketService(bc, gc, game_module, pathing_module)\n\n# Queue initial research\nresearch_module.base_strat()\n\nprint(\"[INFO]: Init Successful, Game Starting...\")\n\nwhile True:\n if gc.round() % 10 == 0:\n print('pyround:', gc.round(), 'time left:', gc.get_time_left_ms(), 'ms', ' karbonite', gc.karbonite())\n\n try:\n\n # refresh modules\n if gc.get_time_left_ms() > 100:\n carbonite_module.new_turn()\n combat_module.new_turn()\n game_module.new_turn()\n pathing_module.new_turn()\n research_module.new_turn()\n\n # main unit logic\n for unit in gc.my_units():\n if not unit.location.is_in_garrison():\n\n if unit.unit_type == bc.UnitType.Worker:\n worker_service.move(unit, factory_service.get_factories())\n\n if unit.unit_type == bc.UnitType.Factory:\n factory_service.move(unit)\n\n if unit.unit_type == bc.UnitType.Rocket:\n rocket_service.move(unit)\n\n if unit.unit_type == bc.UnitType.Ranger:\n ranger_service.move(unit)\n\n if unit.unit_type == bc.UnitType.Healer:\n healer_service.move(unit)\n\n except Exception as e:\n print('ErrorALL:', e)\n # use this to show where the error was\n\n # flush all logs & end turn\n sys.stdout.flush()\n sys.stderr.flush()\n gc.next_turn()\n","sub_path":"BattleCode/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"147767818","text":"print(\"\"\"\n15. Leia um matriz com 20 numeros inteiros, Escreva os elementos do matriz eliminando elementos repetidos.\n\"\"\")\n\n# matriz = list()\n# for contador in range(20):\n# matriz.append(int(input(f'Insira {contador + 1}º número: ')))\n\nvetor = [19, 62, 50, 1, 63, 1, 8, 17, 89, 94, 24, 63, 22, 43, 22, 75, 99, 39, 44, 22]\nvetor = list(set(vetor))\n\nprint(*vetor)\n","sub_path":"Seção_07/parte_1/Exercício_15.py","file_name":"Exercício_15.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"514674584","text":"from __future__ import absolute_import, unicode_literals\n\nfrom celery.decorators import task\nfrom celery.utils.log import get_task_logger\n\nfrom django.template.loader import get_template, render_to_string\nfrom django.core.mail import send_mail\nfrom django.core.mail import send_mass_mail\nfrom django.core import management\n\nfrom fanfics.models import Fanfic\nfrom chapters.models import Chapter\nfrom comments.models import Comment\nfrom accounts.models import FollowStories, FollowUser\n\nlogger = get_task_logger(__name__)\n\n@task()\ndef fanfic_created(fanfic_id):\n \"\"\"\n Task to send an e-mail notification when a fanfic is successfully created.\n \"\"\"\n logger.info('******** CALLING ASYNC TASK WITH CELERY **********')\n \n fanfic = Fanfic.objects.get(id=fanfic_id)\n subject = 'Fanfiction id# {} - {}'.format(fanfic.id, fanfic.title)\n template = get_template('mail/fanfic_created_notification.txt')\n context = {'fanfic': fanfic}\n msg_text = template.render(context)\n msg_html = render_to_string('mail/fanfic_created_notification.html', context)\n # message = 'Cher {},\\n\\nVotre fanfiction {} a été créer avec succès. Son identifiant est le numéro #{}.'.format(fanfic.author.username, fanfic.title, fanfic.id)\n mail_sent = None\n\n if fanfic.status == 'publié':\n mail_sent = send_mail(subject, msg_text, \"no-reply@fanfiction.com\", [fanfic.author.email], html_message=msg_html)\n \n return mail_sent\n\n\n@task()\ndef chapter_created(chapter_id):\n chapter = Chapter.objects.get(id=chapter_id)\n subject = 'Fanfiction : nouveau chapitre publié sur {}'.format(chapter.fanfic.title)\n template = get_template('mail/chapter_created_notification.txt')\n context = {'chapter': chapter}\n msg_text = template.render(context)\n msg_html = render_to_string('mail/chapter_created_notification.html', context)\n mail_sent = None\n \n if chapter.status == 'publié':\n mail_sent = send_mail(subject, msg_text, \"no-reply@fanfiction.com\", [chapter.fanfic.author.email], html_message=msg_html)\n\n return mail_sent\n\n@task()\ndef user_email_reminder():\n\ttry:\n\t\t\"\"\"\n\t\tenvoie un email aux users ne s'étant pas connecté depuis 2 semaines\n\t\t\"\"\"\n\t\tmanagement.call_command(\"email_reminder\", verbosity=0)\n\texcept:\n\t\tprint(\"error\")\n\t\t\n\t\t\n@task()\ndef deactivate_inactive_user():\n\ttry:\n\t\t\"\"\"\n\t\tdesactive les users non connectés pendant 1 an\n\t\t\"\"\"\n\t\tmanagement.call_command(\"deactivateuser\", \"1year\", verbosity=0)\n\texcept:\n\t\tprint(\"error\")\n","sub_path":"api/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"611554100","text":"import Survey\r\n\r\nMIN_AGE = 10\r\nMAX_AGE = 100\r\nSURVEY_VEGAN = 0\r\nSURVEY_VEGATERIAN = 1\r\nSURVEY_OMNIVORE = 2\r\nID_INDEX = 0\r\nEATING_HABITS = {\"Vegan\": SURVEY_VEGAN,\r\n \"Omnivore\": SURVEY_OMNIVORE,\r\n \"Vegetarian\": SURVEY_VEGATERIAN}\r\nGENDER_DICT = {\r\n \"Man\": True,\r\n \"Woman\": False\r\n}\r\n\r\n# Checks to see if the grades fit the filters\r\n# grades: the sub list of grades, of a single voter\r\ndef grades_ok(grades):\r\n for grade in grades:\r\n if 1 <= int(grade) <= 10:\r\n continue\r\n return False\r\n return True\r\n\r\n# Creates a person dictionary from a single voter line\r\n# personal_data: the input of a single voter for the survey\r\ndef format_person_line(personal_data):\r\n person = {\r\n \"id\": int(personal_data[0]),\r\n \"age\": int(personal_data[2]),\r\n \"gender\": GENDER_DICT[personal_data[3]],\r\n \"eating_habits\": EATING_HABITS[personal_data[1]],\r\n \"scores\": Survey.SurveyCreateIntAr(5)\r\n }\r\n for i in range(0, 5):\r\n Survey.SurveySetIntArIdxVal(\r\n person[\"scores\"], i, int(personal_data[4+i]))\r\n return person\r\n\r\n# Filters a survey and prints to screen the corrected answers:\r\n# old_survey_path: The path to the unfiltered survey\r\ndef correct_myfile(old_survey_path):\r\n survey_file = open(old_survey_path, mode='r')\r\n lines_formatted = [(line.split(), line.strip(\"\\n\"))\r\n for line in survey_file]\r\n lines_filtered = [(data, line) for data, line in lines_formatted\r\n if len(data[ID_INDEX]) == 8\r\n and MIN_AGE <= int(data[2]) <= MAX_AGE\r\n and grades_ok(data[4:])]\r\n cleaned = {}\r\n for (data, line) in lines_filtered:\r\n cleaned[data[ID_INDEX]] = line\r\n sorted_cleaned = list(cleaned.items())\r\n sorted_cleaned.sort()\r\n sorted_cleaned = [line for key, line in sorted_cleaned]\r\n for line in sorted_cleaned:\r\n print(line)\r\n survey_file.close()\r\n\r\n# Returns a new Survey item with the data of a new survey file:\r\n# survey_path: The path to the survey\r\ndef scan_survey(survey_path):\r\n survey = Survey.SurveyCreateSurvey()\r\n survey_file = open(survey_path, mode='r')\r\n people = [format_person_line(line.split())\r\n for line in survey_file]\r\n for person in people:\r\n Survey.SurveyAddPerson(\r\n survey, person[\"id\"], person[\"age\"],\r\n person[\"gender\"], person[\"eating_habits\"],\r\n person[\"scores\"])\r\n Survey.SurveyDestoryIntAr(person[\"scores\"])\r\n survey_file.close()\r\n return survey\r\n\r\n\r\n# Prints a python list containing the number of votes for each rating of a group according to the arguments\r\n# s: the data of the Survey object\r\n# choc_type: the number of the chocolate (between 0 and 4)\r\n# gender: the gender of the group (string of \"Man\" or \"Woman\"\r\n# min_age: the minimum age of the group (a number)\r\n# max_age: the maximum age of the group (a number)\r\n# eating_habits: the eating habits of the group (string of \"Omnivore\", \"Vegan\" or \"Vegetarian\")\r\ndef print_info(s, choc_type, gender, min_age, max_age, eating_habits):\r\n hist = Survey.SurveyQuerySurvey(\r\n s, choc_type, GENDER_DICT[gender], min_age, max_age, EATING_HABITS[eating_habits])\r\n res = [Survey.SurveyGetIntArIdxVal(hist, index) for index in range(0, 10)]\r\n print(res)\r\n Survey.SurveyDestoryIntAr(hist)\r\n\r\n\r\n# Clears a Survey object data\r\n# s: the data of the Survey object\r\ndef clear_survey(s):\r\n Survey.SurveyDestroySurvey(s)\r\n","sub_path":"pythonFuncs.py","file_name":"pythonFuncs.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"16050176","text":"#!/usr/bin/python\n\nimport subprocess\nimport time\nimport re\nimport curses\nimport signal\nimport sys\n\n# constants for offset\nWIN_OFFSET_Y = 3\nWIN_OFFSET_X = 2\n\nOFFSET_TOTAL = 10\nOFFSET_USED = 25\nOFFSET_CACHED = 45\nOFFSET_BUFFERS = 60\nOFFSET_ACTIVE = 75\n\n# domain ID -> metrics mapping\n# metrics: { 'total': int, 'active': int, 'free': int, 'cached': int,\n# 'buffers': int }\ndomains = {}\n\nwin = None\ndef init_screen():\n \"\"\" init curses and display the header \"\"\"\n global win\n win = curses.initscr()\n curses.start_color()\n curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)\n curses.curs_set(0)\n \n win.clear()\n win.addstr(WIN_OFFSET_Y-1, WIN_OFFSET_X, \"Domain\", curses.A_BOLD | curses.color_pair(1))\n win.addstr(WIN_OFFSET_Y-1, WIN_OFFSET_X + OFFSET_TOTAL, \"Total\", curses.A_BOLD | curses.color_pair(1))\n win.addstr(WIN_OFFSET_Y-1, WIN_OFFSET_X + OFFSET_USED, \"Used (%)\", curses.A_BOLD | curses.color_pair(1))\n win.addstr(WIN_OFFSET_Y-1, WIN_OFFSET_X + OFFSET_CACHED, \"Cached\", curses.A_BOLD | curses.color_pair(1))\n win.addstr(WIN_OFFSET_Y-1, WIN_OFFSET_X + OFFSET_BUFFERS, \"Buffers\", curses.A_BOLD | curses.color_pair(1))\n win.addstr(WIN_OFFSET_Y-1, WIN_OFFSET_X + OFFSET_ACTIVE, \"Active\", curses.A_BOLD | curses.color_pair(1))\n \n win.refresh()\n\ndef update(domid):\n \"\"\" update the domain's (represented by `domid`) metrics \"\"\"\n global win\n if domid not in domains:\n return\n \n win.move(WIN_OFFSET_Y + 1 + domid, WIN_OFFSET_X)\n win.clrtoeol()\n \n pct = (domains[domid]['total'] - domains[domid]['free'])*100/(domains[domid]['total'])\n \n win.addnstr(WIN_OFFSET_Y+1+domid, WIN_OFFSET_X,\n \"dom%d\" % domid, 15)\n win.addnstr(WIN_OFFSET_Y+1+domid, WIN_OFFSET_X + OFFSET_TOTAL,\n \"%d kB\" % domains[domid]['total'], 15)\n win.addnstr(WIN_OFFSET_Y+1+domid, WIN_OFFSET_X + OFFSET_USED,\n \"%d kB (%d%%)\" % (domains[domid]['total'] - domains[domid]['free'],pct) , 20)\n win.addnstr(WIN_OFFSET_Y+1+domid, WIN_OFFSET_X + OFFSET_CACHED,\n \"%d kB\" % domains[domid]['cached'], 15)\n win.addnstr(WIN_OFFSET_Y+1+domid, WIN_OFFSET_X + OFFSET_BUFFERS,\n \"%d kB\" % domains[domid]['buffers'], 15)\n win.addnstr(WIN_OFFSET_Y+1+domid, WIN_OFFSET_X + OFFSET_ACTIVE,\n \"%d kB\" % domains[domid]['active'], 15)\n win.refresh()\n\ndef destroy_screen():\n curses.echo()\n curses.endwin()\n\ndef sig_handle(signal, frame):\n \"\"\" signal handler that exists \"\"\"\n destroy_screen()\n sys.exit(0)\n \n\ninit_screen()\nsignal.signal(signal.SIGINT, sig_handle)\n\n\n# use xenstore-watch to listen to update events.\n# events are reported in xenstore-watch's stdout as they happen.\n# xenstore-watch always outputs a garbage first line, so ignore it.\ncommand = \"xenstore-watch /local/domain\"\np = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\np.stdout.readline()\n\nwhile True:\n s = p.stdout.readline()\n matches = re.match(\"^\\/local\\/domain\\/([0-9]+)\\/memory\\/([a-zA-Z0-9]+)$\", s)\n if matches != None:\n # we have a memory-related metric report!\n (domid, metric) = matches.groups()\n domid = int(domid)\n \n if domid not in domains:\n domains[domid] = {}\n if metric not in domains[domid]:\n domains[domid][metric] = 0\n \n # update metric\n domains[domid][metric] = int(subprocess.check_output([\"xenstore-read\", s.strip()]))\n \n # display the domain's stats if we have enough metrics\n if len(domains[domid]) == 5:\n update(domid)\n","sub_path":"monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"298334445","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('name', models.TextField(default='', max_length=30)),\n ('phone', models.CharField(default='', max_length=11)),\n ('remark', models.TextField(default='', max_length=50)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Department',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('name', models.TextField(max_length=30)),\n ('phone', models.CharField(default='000000', max_length=11)),\n ('address', models.TextField(default='', max_length=30)),\n ('zip', models.IntegerField(default=210000)),\n ('remark', models.TextField(default='', max_length=50)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Employee',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('name', models.TextField(max_length=30)),\n ('sex', models.CharField(max_length=2)),\n ('birthday', models.DateTimeField(verbose_name='生日', default='1960-01-01 01:00')),\n ('degree', models.CharField(default='', max_length=8)),\n ('phone', models.CharField(default='', max_length=11)),\n ('address', models.TextField(default='', max_length=30)),\n ('zip', models.IntegerField(default=210000)),\n ('remark', models.TextField(default='', max_length=50)),\n ('department_id', models.ForeignKey(to='business.Department')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='EmployeeType',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('desc', models.CharField(max_length=10)),\n ('zip', models.IntegerField()),\n ('bonus', models.IntegerField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='TableType',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('type', models.CharField(max_length=10)),\n ('remark', models.TextField(default='', max_length=50)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='employee',\n name='employee_type_id',\n field=models.ForeignKey(to='business.EmployeeType'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='department',\n name='employee_id',\n field=models.ForeignKey(to='business.Employee'),\n preserve_default=True,\n ),\n ]\n","sub_path":"App/mysite/business/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"439223090","text":"\"\"\"https://www.codechef.com/APRIL20B/problems/CARSELL\"\"\"\n\nT = int(input())\n\nfor i in range(T):\n n = input()\n lst = list(map(int, input().split()))\n profit = 0\n lst = sorted(lst)\n for each in range(len(lst)):\n # ind = lst.index(max(lst))\n temp = each*(-1) + lst.pop()\n if temp < 0:\n continue\n else:\n profit += temp\n print(profit%1000000007)\n","sub_path":"python/codechef/Compete/April_Challenge_2020_Division_2/2.see_all_cars.py","file_name":"2.see_all_cars.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"636006346","text":"import sys\n\ns = input()\n\ntry:\n int(s)\nexcept:\n print(\"this number is uncorrect\")\n sys.exit()\n\ns = int(s)\nif s%4== 0 and s%100 != 0:\n print(\"Yes\")\nelif s%4==0 and s%400==0 and s%100==0:\n print(\"Yes\")\nelse:\n print(\"No\")\n","sub_path":"third.py","file_name":"third.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"264858259","text":"import json\nimport boto3\nimport os\nimport base64\nimport logging\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\ndynamodb = boto3.resource('dynamodb')\n\n\n\n\ndef crear(event, context):\n\n data = json.loads(event['body'])\n \n i = data['image']\n\n #imagen_decodificada = base64.b64decode(i)\n\n s3 = boto3.resource('s3')\n\n s3.Bucket('imagenescrud').put_object(Key='kjk.png', Body=i,ContentType='image/png')\n\n\n table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])\n\n body = {\n \"id\" : data['id'],\n \"nombre\": data['nombre'],\n \"clave\" : data['clave']\n }\n\n table.put_item(Item=body)\n\n #s3.upload_fileobj(imagen, 'imagencrud', 'mykey')\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(body),\n \"headers\": {\n 'Content-Type': 'application/json', \n 'Access-Control-Allow-Origin': '*' \n }\n }\n\n logger.info('got event{}'.format(event))\n logger.error('something went wrong')\n\n return response\n\ndef modificar(event, context):\n \n data = json.loads(event['body'])\n table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])\n\n body = {\n \"id\" : data['id'],\n \"nombre\": data['nombre'],\n \"clave\" : data['clave']\n }\n\n #table.put_item(Item=body)\n\n table.update_item(\n Key={\n 'id': data['id']\n },\n UpdateExpression='SET nombre = :val1 , clave = :val2',\n ExpressionAttributeValues={\n ':val1': data['nombre'],\n ':val2': data['clave']\n }\n )\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(body),\n \"headers\": {\n 'Content-Type': 'application/json', \n 'Access-Control-Allow-Origin': '*' \n }\n }\n\n return response\n\ndef eliminar(event, context):\n \n data = json.loads(event['body'])\n table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])\n\n body = {\n \"id\" : data['id']\n }\n\n table.delete_item(\n Key={\n 'id': data['id']\n }\n )\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(body),\n \"headers\": {\n 'Content-Type': 'application/json', \n 'Access-Control-Allow-Origin': '*' \n }\n }\n\n return response\n\n \n","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"60120740","text":"from pkg_resources import resource_filename\nimport os\nfrom astropy.io import fits\nfrom shapely.geometry import Polygon\nfrom astropy.coordinates import SkyCoord\nimport sys\nimport astrophot.projects.bass\nimport numpy as np\n\npath=resource_filename(astrophot.projects.bass.__name__,'')\ntilefile=os.path.join(path,'bass-tiles.fits')\ndef get_tiles(ra=180.0,dec=40,width=(1.0,1.0),script_name=None,exptime=100,filter='g'):\n\t\"\"\"\n\tGet tile positions within a box. If script_name is provided, \n\ta corresponding observing script would be generated. \n\tra: \n\tdec: box center\n\twidth: box width\n\tscript_name: name of the observing script\n\n\toutput: \n\treturn a list of 2-ele truple or list: [i,j]\n\ti is the TID? and j is the index\n\n\t\"\"\"\n\ttiles=fits.getdata(tilefile,1)\n\tnt=len(tiles)\n\trat=np.hstack([tiles['DRA1'],tiles['DRA2'],tiles['DRA3']])\n\tdect=np.hstack([tiles['DDEC1'],tiles['DDEC2'],tiles['DDEC3']])\n\tidt=np.hstack([tiles['DID1'],tiles['DID2'],tiles['DID3']])\n\tntiles=len(idt)\n \n\tratwd=1.08/np.cos(np.deg2rad(dect))/2\n\tdectwd=1.03/2\n\trawd=width[0]/np.cos(np.deg2rad(dec))/2\n\tdecwd=width[1]/2\n\n\tps=Polygon([(ra-rawd,dec-decwd),(ra-rawd,dec+decwd),\n\t\t(ra+rawd,dec+decwd),(ra+rawd,dec-decwd)])\n\n\tindex=[]\n\tif script_name is not None:\n\t\tf=open(script_name,mode='w')\n\tfor i in range(ntiles):\n\t\tira=rat[i];idec=dect[i];w1=ratwd[i];w2=dectwd\n\t\tpc=Polygon([(ira-w1,idec-w2),(ira-w1,idec+w2),\n\t\t\t(ira+w1,idec+w2),(ira+w1,idec-w2)])\n\t\tif ps.intersects(pc):\n\t\t\tif script_name is not None:\n\t\t\t\tc=SkyCoord(ira,idec,unit='deg')\n\t\t\t\trastr=c.ra.to_string(sep=\"\",precision=2,unit='hour',pad=True)\n\t\t\t\tdecstr=c.dec.to_string(sep=\"\",precision=1,unit='deg',pad=True,alwayssign=True)\n\n\t\t\t\tscript='obs {0:6.1f} '.format(exptime)+' object '+idt[i]+' 1 '+filter+' '+rastr+' '+decstr+' 2000.0\\n'\n\t\t\t\tf.write(script)\n\n\t\t\tindex.append([i//nt+1,i%nt])\n\tif script_name is not None:\n\t\tf.close()\n\treturn index\n\t\t\t\ndef main():\n\targs=sys.argv\n\tra=float(args[1])\n\tdec=float(args[2])\n\twidth=float(args[3])\n\tscriptname=args[4]\n\texptime=float(args[5])\n\tfilter=args[6] \n\tindex=get_tiles(ra=ra,dec=dec,width=(width,width),script_name=scriptname,exptime=exptime,filter=filter)\n\tprint(index)\n\nif __name__ == '__main__':\n\tmain()\n\n\t\t\n\t\n\n\n\n\n\t\n","sub_path":"astrophot/projects/bass/basstiles.py","file_name":"basstiles.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"527411408","text":"# Copyright (C) 2020 Apple Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nfrom webkitcorepy import run, decorators\nfrom webkitscmpy.local import Scm\n\n\nclass Git(Scm):\n executable = '/usr/bin/git'\n\n @classmethod\n def is_checkout(cls, path):\n return run([cls.executable, 'rev-parse', '--show-toplevel'], cwd=path, capture_output=True).returncode == 0\n\n def __init__(self, path):\n super(Git, self).__init__(path)\n if not self.root_path:\n raise OSError('Provided path {} is not a git repository'.format(path))\n\n @property\n @decorators.Memoize()\n def root_path(self):\n result = run([self.executable, 'rev-parse', '--show-toplevel'], cwd=self.path, capture_output=True, encoding='utf-8')\n if result.returncode:\n return None\n return result.stdout.rstrip()\n\n @property\n def branch(self):\n status = run([self.executable, 'status'], cwd=self.root_path, capture_output=True, encoding='utf-8')\n if status.returncode:\n raise self.Exception('Failed to run `git status` for {}'.format(self.root_path))\n if status.stdout.splitlines()[0].startswith('HEAD detached at'):\n return None\n\n result = run([self.executable, 'rev-parse', '--abbrev-ref', 'HEAD'], cwd=self.root_path, capture_output=True, encoding='utf-8')\n if result.returncode:\n raise self.Exception('Failed to retrieve branch for {}'.format(self.root_path))\n return result.stdout.rstrip()\n\n def remote(self, name=None):\n result = run([self.executable, 'remote', 'get-url', name or 'origin'], cwd=self.root_path, capture_output=True, encoding='utf-8')\n if result.returncode:\n raise self.Exception('Failed to retrieve remote for {}'.format(self.root_path))\n return result.stdout.rstrip()\n","sub_path":"Tools/Scripts/libraries/webkitscmpy/webkitscmpy/local/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"312383955","text":"import tensorflow as tf\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.python.framework.ops import Tensor\n\nclass SimpleAttention(tf.keras.models.Model):\n \"\"\"\n attentionの説明をするための,multiheadではない単純なattention\n \"\"\"\n\n def __init__(self, depth: int, *args, **kwargs):\n \"\"\"\n コンストラクタ\n :param depth: 隠れ層および出力の次元\n \"\"\"\n\n super().__init__(*args, **kwargs)\n self.depth = depth\n\n self.q_dense_layer = tf.keras.layers.Dense(depth, use_bias=False, name='q_dense_layer')\n self.k_dense_layer = tf.keras.layer.Dense(depth, use_bias=False, name='k_depth_layer')\n self.v_dense_layer = tf.keras.layer.Dense(depth, use_bias=False, name='v_depth_layer')\n self.output_dense_layer = tf.keras.layer.Dense(depth, use_bias=False, name='output_dense_layer')\n\n def call(self, input: tf.Tensor, memory: tf.Tensor) -> tf.Tensor:\n \"\"\"\n モデルの実行を行う\n :param input: queryのテンソル\n :param memory: queryに情報を与えるmemoryのテンソル\n \"\"\"\n q = self.q_dense_layer(input) #[batch_size, q_length, depth]\n k = self.k_dense_layer(memory) #[batch_size, m_length, depth]\n v = self.v_dense_layer(memory)\n\n #ここでqとkの内積を取ることで,queryとkeyの関連度のようなものを計算する\n logit = tf.matmul(q, k, transose_b=True) #[batch_size, q_length, k_length]\n\n #softmaxを取ることで正規化する\n attention_weight = tf.nn.softmax(logit, name='attention_weight')\n\n #重みに従って,valueから情報を引いていく\n attention_output = tf.matmul(attention_weight, v) #[batch_size, q_length, depth]\n return self.output_dense_layer(attention_output)\n\n\n\n","sub_path":"transformer/simpleattention.py","file_name":"simpleattention.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"644271","text":"from datetime import datetime\nimport os\nimport time\nimport shutil\nfrom rogerthat.config.config import Config\nfrom rogerthat.utils.file_writing import append_to_file, append_to_file_blocking\n\n\nclass logger_cls:\n def __init__(self):\n self._log_dir = os.path.join(Config.project_root, \"logs\")\n self._log_file = None\n\n @property\n def log_file(self):\n if self._log_file:\n return os.path.join(self._log_dir, self._log_file)\n return None\n\n def set_file(self, log_file):\n self._log_file = f\"{Config.app_name}-{log_file}.log\"\n\n async def log(self, string):\n timeNow = datetime.now()\n string = f\"[{timeNow}] {string}\"\n print(string)\n if self.log_file:\n await append_to_file(string, self.log_file)\n return True\n\n def logb(self, string):\n timeNow = datetime.now()\n string = f\"[{timeNow}] {string}\"\n print(string)\n if self.log_file:\n append_to_file_blocking(string, self.log_file)\n return True\n\n def cycle(self):\n if self.log_file:\n fn = self.log_file.split(\".log\")[0]\n if os.path.exists(self.log_file):\n shutil.move(f\"{fn}.log\", f\"{fn}.{int(time.time())}.log\")\n\n\nlogger = logger_cls()\n","sub_path":"rogerthat/utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"507697214","text":"import os\nfrom random import randint as get\n\ndef gen():\n\tf = open(\"input.txt\", \"w\")\n\tn = get(2, 10)\n\tm = n - 1\n\tq = get(1, n)\n\tprint(n, m, q, file = f)\n\tfor i in range(2, n + 1):\n\t\tprint(i, get(1, i - 1), file = f)\n\ta = []\n\tfor i in range(q):\n\t\tx = get(1, n)\n\t\twhile x in a:\n\t\t\tx = get(1, n)\n\t\ta.append(x)\n\t\tprint(x, file = f)\n\n\ndef main():\n\tos.system(\"make c && make smart_c && make gen\")\n\tfor te in range(10**6):\n\t\t#gen()\n\t\tos.system(\"./gen > input.txt\")\n\t\tos.system(\"./c < input.txt > output.txt\")\n\t\tans = open(\"output.txt\", \"r\").read()\n\n\t\tos.system(\"./smart_c < input.txt > output.txt\")\n\t\tout = open(\"output.txt\", \"r\").read()\n\n\t\tif (ans != out):\n\t\t\tprint(\"WA\", te)\n\t\t\tprint(\"input = \")\n\t\t\tprint(open(\"input.txt\", \"r\").read())\n\t\t\tprint(\"out = \")\n\t\t\tprint(out)\n\t\t\tprint(\"ans = \")\n\t\t\tprint(ans\t)\n\t\t\tbreak\n\t\tprint(\"OK\", te)\n\nmain()","sub_path":"2020/ЗШОП/Тур1/C/stress.py","file_name":"stress.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"182658205","text":"from expression_views import *\n\n\nfrom typing import Iterable\nfrom itertools import combinations\nfrom addict import Dict\n\nfrom littletools.list_and_dict_type import L, T\nimport cytoolz\n\nimport logging\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n\nfrom littletools.nested_list_tools import recursive_map, curry, flatten_reduce, flatten\n\n\nimport collections\n\nclass eD (collections.OrderedDict, forward_mapping_neo4j_view):\n ''' expression dictionary\n\n '''\n def __init__ (self, *args, **kwargs):\n super(eD, self).__init__(*args, **kwargs)\n\n\n def __str__(self):\n if 'text' in self:\n return \"text={text} (id={id} s_id={s_id} i_s={i_s}) \".format(\n id=str(self['id']),\n s_id=str(self['s_id']),\n i_s=str(self['i_s']),\n text=' '.join(self['text']))\n else:\n return super.__str__(self)\n\n def __hash__(self):\n if 's_id' in self and 'i_s' in self:\n return hash(self['s_id']+str(self['i_s']))\n else:\n return hash(str(self))\n\n def set_property(self, prop, val):\n self.__setattr__(prop, val)\n return self\n\n\nclass eT (T, iterable_neo4j_view):\n ''' expression tuple\n\n '''\n def __new__(self, t, **kwargs):\n return super(eT, self).__new__(self, t, **kwargs)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __str__(self):\n return \"(\" +', '.join([str(e) for e in self]) + ')'\n\n def __hash__(self):\n return id(self)\n\n def unique (self):\n seen = set()\n res = eT(tuple([seen.add(hash(obj)) or obj for obj in self if hash(obj) not in seen]), **self.__dict__)\n return res\n\n def set_property(self, prop, val):\n self.__setattr__(prop, val)\n return self\n\n\nclass eL (L, iterable_neo4j_view):\n ''' expression list\n\n '''\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __str__(self):\n return \"[\" +'\\n, '.join([str(e) for e in self]) + ']'\n\n def __hash__(self):\n return id(self)\n\n def unique (self):\n seen = set()\n try:\n res = eL([seen.add(hash(obj)) or obj for obj in self if hash(obj) not in seen], **self.__dict__)\n except:\n raise\n return res\n\n def set_property(self, prop, val):\n self.__setattr__(prop, val)\n return self\n\n\ndef ltd_ify (nltd, d=0, node_type=['NLP'], stack_types=[], d_max = 6):\n ''' Parse all list, tuple, dict types in nested expressions to these special list, tuple, dicts defined here\n\n Eample\n ======\n\n >>> ltd_ify([{'side': {'side': 1, 'p_id': [91, 50, 47]}}, {'side': {'side': 0, 'p_id': [15, 0, 4]}}, {'side': {'side': 5, 'p_id': [136, 140]}}])\n [{'side': {'side': 1, 'p_id': [91, 50, 47]}}, {'side': {'side': 0, 'p_id': [15, 0, 4]}}, {'side': {'side': 5, 'p_id': [136, 140]}}]\n\n :param nltd: nested lists, tuples, dicts within each other\n :return: typed nltd\n\n '''\n rest = []\n if len(stack_types)>1:\n stack_type, *rest = stack_types\n stack_type = [stack_type]\n else:\n stack_type=stack_types\n\n if d>d_max:\n return nltd\n\n if isinstance(nltd, tuple) or isinstance(nltd, list):\n if isinstance(nltd, tuple):\n if rest and isinstance(rest[0], tuple):\n tup, *rest_of_rest = rest\n res = eT(ltd_ify(x, d=d + 1, node_type=node_type, stack_types= [st] + rest_of_rest, d_max=d_max) if isinstance(x, Iterable) else x for x, st in zip(nltd, tup))\n else:\n res = eT(ltd_ify(x, d=d + 1, node_type=node_type, stack_types=rest, d_max=d_max) if isinstance(x, Iterable) else x for x in nltd)\n\n if isinstance(nltd, list):\n if rest and isinstance(rest[0], tuple):\n try:\n tup, *rest_of_rest = rest\n res = eL(ltd_ify(x, d=d + 1, node_type=node_type, stack_types=[st] + rest_of_rest, d_max=d_max) if isinstance(x, Iterable) else x\n for x, st in zip(nltd, tup))\n except:\n raise\n else:\n res = eL(\n ltd_ify(x, d=d + 1, node_type=node_type, stack_types=rest, d_max=d_max) if isinstance(x, Iterable) else x for x\n in nltd)\n\n res.set_property('node_type', node_type + stack_type)\n return res\n\n elif isinstance(nltd, dict):\n res = (eD if not isinstance(nltd, eD) else type(nltd)) ({k:ltd_ify(x, d=d + 1, node_type=node_type, stack_types=rest, d_max=d_max) if isinstance(x, Iterable) else x for k, x in nltd.items()})\n if (hasattr(nltd, 'set_property')):\n res.set_property('node_type', node_type + stack_type)\n return res\n\n #elif isinstance(nltd, eD):\n # res = nltd\n # res.set_property('node_type', node_type + stack_type)\n # return res\n\n return nltd\n\n\ndef apply_fun_to_attribute_of_ex(ex, fun, *args, attribute=None, out_attribute=None, other_criterium=None, reduce=False, **kwargs):\n ''' Take one value of the ex. It's 'currying' of the dict, forwarding the rest.\n It's good for projextions on the dicts.\n\n Example\n =======\n\n >>> apply_fun_to_attribute_of_ex({'side': 1, 'p_id': [91, 50, 47]},\n ... fun=str,\n ... attribute='p_id')\n {'side': 1, 'p_id': '[91, 50, 47]'}\n\n\n :param expression: predicate dict\n :param reduce: if the result should replace the whole dict\n :param **kwargs: parameters, like a function to be used with the ex\n :return: the same like the input, but the ex is limited to the attribute\n\n '''\n if isinstance(ex, dict) and \\\n ((not (isinstance(attribute, list) or isinstance(attribute, tuple)) and attribute in ex)\n or (isinstance(attribute, list) and any(a in ex for a in attribute))\n or (isinstance(attribute, tuple) and all(a in ex for a in attribute))):\n if reduce:\n return fun(ex[attribute], *args, **kwargs)\n else:\n if isinstance(attribute, list):\n which = [a for a in attribute if a in ex]\n if out_attribute:\n where = {a:out_attribute[i] for i, a in enumerate(attribute) if a in ex}\n for w in which:\n if out_attribute:\n ex.update({where[w]: fun(ex[w], *args, **kwargs)})\n else:\n ex.update({w: fun(ex[w], *args, **kwargs)})\n\n elif isinstance(attribute, tuple):\n if all(a in ex for a in attribute):\n which_concrete = tuple([ex[a] for a in attribute if a in ex]) # some lists\n if out_attribute:\n where = {a: out_attribute[i] for i, a in enumerate(attribute) if a in ex}\n w = tuple((where[a] for a in attribute))\n ex.update({w: fun(which_concrete, *args, **kwargs)})\n else:\n ex.update({attribute: fun(which_concrete, *args, **kwargs)})\n else:\n ex.update({attribute: fun (ex[attribute], *args, **kwargs)})\n if other_criterium:\n return fun(ex, *args, **kwargs)\n return ex\n\n\ndef apply_fun_to_nested(fun=None, attribute=None, out_attribute=None, other_criterium=None, data=None, reduce=False):\n ''' This function runs through a nested structure of eD's, eL's and eT's, and allows to apply a function in the\n depth of this tree. It works like working in a forest, climbing on all the trees and doing there something\n if the property named by `attribute` applies.\n\n It's usefull, when it's complicated or annoying to write the looping yourself. It works recursively.\n\n :param fun: function, that gets one argument (you can curry the function, if you have multiple arguments there)\n :param attribute: the attribute in the tree\n :param data: the nesting eD's, eL's and eT's\n :param reduce: if the attribute should be kept there or is just replaced by the return value of the function\n :return: changed nested structure\n\n '''\n if not fun or not (attribute or other_criterium) or not data:\n raise ValueError (\"All parameters must be given. {args}\".format(args={'fun':fun, 'attribute':attribute, 'data': bool(data)}))\n return ltd_ify(\n recursive_map(\n curry(\n apply_fun_to_attribute_of_ex,\n attribute=attribute,\n out_attribute=out_attribute,\n fun=fun,\n reduce=reduce,\n other_criterium=other_criterium),\n data,\n other_criterium=other_criterium))\n\n\n\nclass PredMom(eD, pred_neo4j_view):\n def __init__ (self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.node_type = ['PREDICATE']\n\n\n def __str__(self):\n if 'text' in self and 'part_predications' in self:\n return \"{text} ~ {predicates}\". format(\n text = ' '.join(self['text']),\n predicates = str (self['part_predications']))\n elif 'text' in self:\n return ' '.join(self['text'])\n else:\n return super.__str__(self)\n\n\nclass Pred(pred_neo4j_view, eD):\n def __init__ (self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.node_type = ['PREDICATE']\n\n\nclass Argu (argu_neo4j_view, eD):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.node_type = ['ARGUMENT']\n\n def __str__(self):\n return \"{a} > sc={sc}, as={ac}\".format(\n sc=self['subj_score'],\n ac=self['aspe_score'],\n a=super().__str__())\n","sub_path":"hardcore_annotated_expression.py","file_name":"hardcore_annotated_expression.py","file_ext":"py","file_size_in_byte":9821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"506314019","text":"from currency.forms import BankForm, ContactUsForm, RateForm\nfrom currency.models import Banks, ContactUs, Rate # noqa\nfrom currency.tasks import send_email_contactus\nfrom currency.filters import RateFilter\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.generic import (CreateView, DeleteView, DetailView,\n ListView, UpdateView, )\nfrom django_filters.views import FilterView\n\n\nclass BanksListView(ListView):\n template_name = 'bank_list.html'\n queryset = Banks.objects.all()\n\n\nclass BankDetailView(DetailView):\n template_name = 'bank_details.html'\n queryset = Banks.objects.all()\n\n\nclass ContactUsListView(ListView):\n template_name = 'contactus_list.html'\n queryset = ContactUs.objects.all()\n\n\nclass ContactusDetailView(DetailView):\n template_name = 'contactus_details.html'\n queryset = ContactUs.objects.all()\n\n\nclass ContactUsCreateView(CreateView):\n template_name = 'contactus_create.html'\n model = ContactUs\n form_class = ContactUsForm\n\n success_url = reverse_lazy('currency:contactus-list')\n\n def form_valid(self, form):\n data = form.cleaned_data\n body = f'''\n From: {data['email_from']}\n Topic: {data['subject']}\n \\n\n Message: {data['message']}\n '''\n send_email_contactus.delay(body)\n\n return super().form_valid(form)\n\n\nclass ContactUsUpdateView(UpdateView):\n queryset = ContactUs.objects.all()\n template_name = 'contactus_update.html'\n success_url = reverse_lazy('currency:contactus-list')\n model = ContactUs\n form_class = ContactUsForm\n\n\nclass ContactDeleteView(DeleteView):\n template_name = 'contact_confirm_delete.html'\n queryset = ContactUs.objects.all()\n success_url = reverse_lazy('currency:contactus-list')\n\n\nclass BankCreateView(CreateView):\n template_name = 'bank_create.html'\n model = Banks\n form_class = BankForm\n\n success_url = reverse_lazy('currency:banks')\n\n\nclass BankUpdateView(UpdateView):\n queryset = Banks.objects.all()\n template_name = 'bank_update.html'\n success_url = reverse_lazy('currency:banks')\n model = Banks\n form_class = BankForm\n\n\nclass BankDeleteView(DeleteView):\n template_name = 'bank_confirm_delete.html'\n queryset = Banks.objects.all()\n success_url = reverse_lazy('currency:banks')\n\n\nclass RateListView(FilterView):\n template_name = 'rate_list.html'\n queryset = Rate.objects.all().select_related('bank')\n paginate_by = 25\n filterset_class = RateFilter\n\n\nclass RateUpdateView(UpdateView):\n queryset = Rate.objects.all()\n template_name = 'rate_update.html'\n success_url = reverse_lazy('currency:rate-list')\n model = Rate\n form_class = RateForm\n\n\nclass RateDetailView(DetailView):\n template_name = 'rate_details.html'\n queryset = Rate.objects.all()\n\n\nclass RateDeleteView(DeleteView):\n template_name = 'rate_confirm_delete.html'\n queryset = Rate.objects.all()\n success_url = reverse_lazy('currency:rate-list')\n\n\nclass RateCreateView(CreateView):\n template_name = 'rate_create.html'\n model = Rate\n form_class = RateForm\n\n success_url = reverse_lazy('currency:rate-list')\n\n\n# class RateDeleteView(UserPassesTestMixin, DeleteView):\n# template_name = 'rate_confirm_delete.html'\n# queryset = Rate.objects.all()\n# success_url = reverse_lazy('currency:rate-list')\n#\n# def superuser_validity_check(self):\n# self.queryset = self.get_object()\n# return self.request.user.is_superuser\n\ndef index(request):\n return render(request, 'index.html')\n","sub_path":"app/currency/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"252612741","text":"import unittest\nimport pytest\nimport numpy as np\nimport os\n\nimport pycqed as pq\nfrom pytest import approx\n\nimport pycqed.analysis.analysis_toolbox as a_tools\nfrom pycqed.measurement import measurement_control\n\nimport pycqed.instrument_drivers.virtual_instruments.virtual_SignalHound as sh\nimport pycqed.instrument_drivers.virtual_instruments.virtual_MW_source as vmw\n\nimport pycqed.instrument_drivers.physical_instruments.ZurichInstruments.UHFQuantumController as UHF\nimport pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_HDAWG8 as HDAWG\nfrom pycqed.instrument_drivers.physical_instruments.QuTech_Duplexer import Dummy_Duplexer\n#from pycqed.instrument_drivers.meta_instrument.qubit_objects.QuDev_transmon import QuDev_transmon\n#from pycqed.instrument_drivers.meta_instrument.qubit_objects.Tektronix_driven_transmon import Tektronix_driven_transmon\n#from pycqed.instrument_drivers.meta_instrument.qubit_objects.CC_transmon import CBox_v3_driven_transmon, QWG_driven_transmon\nfrom pycqed.instrument_drivers.physical_instruments.QuTech_CCL import dummy_CCL, CCL\nfrom pycqed.instrument_drivers.physical_instruments.QuTech_QCC import dummy_QCC, QCC\nfrom pycqed.instrument_drivers.physical_instruments.QuTechCC import QuTechCC\nfrom pycqed.instrument_drivers.physical_instruments.Transport import DummyTransport\n\nfrom pycqed.instrument_drivers.meta_instrument.LutMans.ro_lutman import UHFQC_RO_LutMan\nfrom pycqed.instrument_drivers.meta_instrument import device_object_CCL as do\nfrom pycqed.instrument_drivers.meta_instrument.LutMans import mw_lutman as mwl\nimport pycqed.instrument_drivers.meta_instrument.qubit_objects.CCL_Transmon as ct\n#from pycqed.measurement.waveform_control_CC import waveform as wf\n\nfrom qcodes import station\n\n\nfrom pycqed.measurement.detector_functions import Multi_Detector_UHF, \\\n UHFQC_input_average_detector, UHFQC_integrated_average_detector, \\\n UHFQC_integration_logging_det\n\ntry:\n import openql\n openql_import_fail = False\nexcept:\n openql_import_fail = True\n\n\nclass Test_Device_obj(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n \"\"\"\n This sets up a mock setup using a CCL to control multiple qubits\n \"\"\"\n self.station = station.Station()\n\n self.MW1 = vmw.VirtualMWsource('MW1')\n self.MW2 = vmw.VirtualMWsource('MW2')\n self.MW3 = vmw.VirtualMWsource('MW3')\n self.SH = sh.virtual_SignalHound_USB_SA124B('SH')\n self.UHFQC_0 = UHF.UHFQC(name='UHFQC_0', server='emulator',\n device='dev2109', interface='1GbE')\n\n self.UHFQC_1 = UHF.UHFQC(name='UHFQC_1', server='emulator',\n device='dev2110', interface='1GbE')\n\n self.UHFQC_2 = UHF.UHFQC(name='UHFQC_2', server='emulator',\n device='dev2111', interface='1GbE')\n\n self.CCL = dummy_CCL('CCL')\n self.QCC = dummy_QCC('QCC')\n self.CC = QuTechCC('CC', DummyTransport())\n self.VSM = Dummy_Duplexer('VSM')\n\n self.MC = measurement_control.MeasurementControl(\n 'MC', live_plot_enabled=False, verbose=False)\n self.MC.station = self.station\n self.station.add_component(self.MC)\n\n # Required to set it to the testing datadir\n test_datadir = os.path.join(pq.__path__[0], 'tests', 'test_output')\n self.MC.datadir(test_datadir)\n a_tools.datadir = self.MC.datadir()\n\n self.AWG_mw_0 = HDAWG.ZI_HDAWG8(\n name='AWG_mw_0', server='emulator', num_codewords=32,\n device='dev8026', interface='1GbE')\n\n self.AWG_mw_1 = HDAWG.ZI_HDAWG8(\n name='AWG_mw_1', server='emulator', num_codewords=32,\n device='dev8027', interface='1GbE')\n self.AWG_flux_0 = HDAWG.ZI_HDAWG8(\n name='AWG_flux_0', server='emulator', num_codewords=32,\n device='dev8028', interface='1GbE')\n\n self.AWG8_VSM_MW_LutMan = mwl.AWG8_VSM_MW_LutMan('MW_LutMan_VSM')\n self.AWG8_VSM_MW_LutMan.AWG(self.AWG_mw_0.name)\n self.AWG8_VSM_MW_LutMan.channel_GI(1)\n self.AWG8_VSM_MW_LutMan.channel_GQ(2)\n self.AWG8_VSM_MW_LutMan.channel_DI(3)\n self.AWG8_VSM_MW_LutMan.channel_DQ(4)\n self.AWG8_VSM_MW_LutMan.mw_modulation(100e6)\n self.AWG8_VSM_MW_LutMan.sampling_rate(2.4e9)\n\n self.ro_lutman_0 = UHFQC_RO_LutMan(\n 'ro_lutman_0', feedline_number=0, feedline_map='S17', num_res=9)\n self.ro_lutman_0.AWG(self.UHFQC_0.name)\n\n self.ro_lutman_1 = UHFQC_RO_LutMan(\n 'ro_lutman_1', feedline_number=1, feedline_map='S17', num_res=9)\n self.ro_lutman_1.AWG(self.UHFQC_1.name)\n\n self.ro_lutman_2 = UHFQC_RO_LutMan(\n 'ro_lutman_2', feedline_number=2, feedline_map='S17', num_res=9)\n self.ro_lutman_2.AWG(self.UHFQC_2.name)\n\n # Assign instruments\n qubits = []\n for q_idx in range(17):\n q = ct.CCLight_Transmon('q{}'.format(q_idx))\n qubits.append(q)\n\n q.instr_LutMan_MW(self.AWG8_VSM_MW_LutMan.name)\n q.instr_LO_ro(self.MW1.name)\n q.instr_LO_mw(self.MW2.name)\n q.instr_spec_source(self.MW3.name)\n\n if q_idx in [13, 16]:\n q.instr_acquisition(self.UHFQC_0.name)\n q.instr_LutMan_RO(self.ro_lutman_0.name)\n elif q_idx in [1, 4, 5, 7, 8, 10, 11, 14, 15]:\n q.instr_acquisition(self.UHFQC_1.name)\n q.instr_LutMan_RO(self.ro_lutman_1.name)\n elif q_idx in [0, 2, 3, 6, 9, 12]:\n q.instr_acquisition(self.UHFQC_2.name)\n q.instr_LutMan_RO(self.ro_lutman_2.name)\n\n q.instr_VSM(self.VSM.name)\n q.instr_CC(self.CCL.name)\n q.instr_MC(self.MC.name)\n\n q.instr_SH(self.SH.name)\n\n config_fn = os.path.join(\n pq.__path__[0], 'tests', 'test_cfg_CCL.json')\n q.cfg_openql_platform_fn(config_fn)\n\n # Setting some \"random\" initial parameters\n q.ro_freq(5.43e9+q_idx*50e6)\n q.ro_freq_mod(200e6)\n\n q.freq_qubit(4.56e9+q_idx*50e6)\n q.freq_max(4.62e9+q_idx*50e6)\n\n q.mw_freq_mod(-100e6)\n q.mw_awg_ch(1)\n q.cfg_qubit_nr(q_idx)\n # q.mw_vsm_delay(15)\n q.mw_mixer_offs_GI(.1)\n q.mw_mixer_offs_GQ(.2)\n q.mw_mixer_offs_DI(.3)\n q.mw_mixer_offs_DQ(.4)\n\n # Set up the device object and set required params\n self.device = do.DeviceCCL('device')\n self.device.qubits([q.name for q in qubits])\n self.device.instr_CC(self.CCL.name)\n self.device.instr_AWG_mw_0(self.AWG_mw_0.name)\n self.device.instr_AWG_mw_1(self.AWG_mw_1.name)\n self.device.instr_AWG_flux_0(self.AWG_flux_0.name)\n\n self.device.ro_lo_freq(6e9)\n\n def test_get_dio_map(self):\n self.device.instr_CC(self.CCL.name)\n dio_map = self.device.dio_map()\n expected_dio_map = {'ro_0': 1,\n 'ro_1': 2,\n 'flux_0': 3,\n 'mw_0': 4,\n 'mw_1': 5}\n assert dio_map == expected_dio_map\n\n self.device.instr_CC(self.QCC.name)\n dio_map = self.device.dio_map()\n expected_dio_map = {'ro_0': 1,\n 'ro_1': 2,\n 'ro_2': 3,\n 'mw_0': 4,\n 'mw_1': 5,\n 'flux_0': 6,\n 'flux_1': 7,\n 'flux_2': 8,\n 'mw_2': 9,\n 'mw_3': 10,\n 'mw_4': 11\n }\n assert dio_map == expected_dio_map\n\n def test_get_dio_map_CC(self):\n self.device.instr_CC(self.CC.name)\n dio_map = self.device.dio_map()\n expected_dio_map = {'ro_0': 0,\n 'ro_1': 1,\n 'ro_2': 2,\n 'mw_0': 3,\n 'mw_1': 4,\n 'flux_0': 6,\n 'flux_1': 7,\n 'flux_2': 8}\n\n assert dio_map == expected_dio_map\n\n def test_prepare_timing_CCL(self):\n self.device.instr_CC(self.CCL.name)\n self.device.tim_ro_latency_0(200e-9)\n self.device.tim_ro_latency_1(180e-9)\n self.device.tim_flux_latency_0(-40e-9)\n self.device.tim_mw_latency_0(20e-9)\n self.device.tim_mw_latency_1(0e-9)\n\n self.device.prepare_timing()\n\n # DIO timing map for CCL:\n # dio1: ro_latency_0\n # dio2: ro_latency_1\n # dio3: flux_latency_0\n # dio4: mw_latency_0\n # dio5: mw_latency_1\n\n assert(self.CCL.dio1_out_delay() == 12)\n assert(self.CCL.dio2_out_delay() == 11)\n assert(self.CCL.dio3_out_delay() == 0)\n assert(self.CCL.dio4_out_delay() == 3)\n assert(self.CCL.dio5_out_delay() == 2)\n\n def test_prepare_timing_QCC(self):\n self.device.instr_CC(self.QCC.name)\n self.device.tim_ro_latency_0(200e-9)\n self.device.tim_ro_latency_1(180e-9)\n self.device.tim_flux_latency_0(-40e-9)\n self.device.tim_flux_latency_1(100e-9)\n self.device.tim_mw_latency_0(20e-9)\n self.device.tim_mw_latency_1(0e-9)\n\n self.device.prepare_timing()\n\n assert(self.QCC.dio1_out_delay() == 12)\n assert(self.QCC.dio2_out_delay() == 11)\n assert(self.QCC.dio4_out_delay() == 3)\n assert(self.QCC.dio5_out_delay() == 2)\n assert(self.QCC.dio6_out_delay() == 0)\n assert(self.QCC.dio7_out_delay() == 7)\n\n def test_prepare_timing_QCC_fine(self):\n self.device.instr_CC(self.QCC.name)\n self.device.tim_ro_latency_0(200e-9)\n self.device.tim_ro_latency_1(180e-9)\n self.device.tim_flux_latency_0(-36e-9)\n self.device.tim_flux_latency_1(100e-9)\n self.device.tim_mw_latency_0(23e-9)\n self.device.tim_mw_latency_1(0e-9)\n\n self.device.prepare_timing()\n\n assert(self.QCC.dio1_out_delay() == 12)\n assert(self.QCC.dio2_out_delay() == 11)\n assert(self.QCC.dio4_out_delay() == 3)\n assert(self.QCC.dio5_out_delay() == 2)\n assert(self.QCC.dio6_out_delay() == 0)\n assert(self.QCC.dio7_out_delay() == 7)\n\n assert(self.AWG_flux_0.sigouts_0_delay() == approx(4e-9))\n assert(self.AWG_flux_0.sigouts_7_delay() == approx(4e-9))\n\n assert(self.AWG_mw_0.sigouts_7_delay() == approx(3e-9))\n assert(self.AWG_mw_0.sigouts_7_delay() == approx(3e-9))\n\n assert(self.AWG_mw_1.sigouts_7_delay() == approx(0))\n assert(self.AWG_mw_1.sigouts_7_delay() == approx(0))\n\n def test_prepare_timing_CC(self):\n self.device.instr_CC(self.CC.name)\n self.device.tim_ro_latency_0(200e-9)\n self.device.tim_ro_latency_1(180e-9)\n self.device.tim_flux_latency_0(-40e-9)\n self.device.tim_flux_latency_1(100e-9)\n self.device.tim_mw_latency_0(20e-9)\n self.device.tim_mw_latency_1(0e-9)\n\n self.device.prepare_timing()\n\n assert(self.CC.dio0_out_delay() == 12)\n assert(self.CC.dio1_out_delay() == 11)\n assert(self.CC.dio3_out_delay() == 3)\n assert(self.CC.dio4_out_delay() == 2)\n assert(self.CC.dio6_out_delay() == 0)\n assert(self.CC.dio7_out_delay() == 7)\n\n def test_prepare_readout_lo_freqs_config(self):\n # Test that the modulation frequencies of all qubits\n # are set correctly.\n self.device.ro_acq_weight_type('optimal')\n qubits = self.device.qubits()\n\n self.device.ro_lo_freq(6e9)\n self.device.prepare_readout(qubits=qubits)\n\n # MW1 is specified as the readout LO source\n assert self.MW1.frequency() == 6e9\n for qname in qubits:\n q = self.device.find_instrument(qname)\n 6e9 + q.ro_freq_mod() == q.ro_freq()\n\n self.device.ro_lo_freq(5.8e9)\n self.device.prepare_readout(qubits=qubits)\n\n # MW1 is specified as the readout LO source\n assert self.MW1.frequency() == 5.8e9\n for qname in qubits:\n q = self.device.find_instrument(qname)\n 5.8e9 + q.ro_freq_mod() == q.ro_freq()\n\n q = self.device.find_instrument('q5')\n q.instr_LO_ro(self.MW3.name)\n with pytest.raises(ValueError):\n self.device.prepare_readout(qubits=qubits)\n q.instr_LO_ro(self.MW1.name)\n\n def test_prepare_readout_assign_weights(self):\n self.device.ro_lo_freq(6e9)\n\n self.device.ro_acq_weight_type('optimal')\n qubits = self.device.qubits()\n\n q13 = self.device.find_instrument('q13')\n q13.ro_acq_weight_func_I(np.ones(128))\n q13.ro_acq_weight_func_Q(np.ones(128)*.5)\n\n\n self.device.prepare_readout(qubits=qubits)\n exp_ch_map = {\n 'UHFQC_0': {'q13': 0, 'q16': 1},\n 'UHFQC_1': {'q1': 0, 'q4': 1, 'q5': 2, 'q7': 3, 'q8': 4,\n 'q10': 5, 'q11': 6, 'q14': 7, 'q15': 8},\n 'UHFQC_2': {'q0': 0, 'q2': 1, 'q3': 2, 'q6': 3, 'q9': 4, 'q12': 5}}\n assert exp_ch_map == self.device._acq_ch_map\n\n qb = self.device.find_instrument('q12')\n assert qb.ro_acq_weight_chI() == 5\n assert qb.ro_acq_weight_chQ() == 6\n\n def test_prepare_readout_assign_weights_order_matters(self):\n # Test that the order of the channels is as in the order iterated over\n qubits = ['q2', 'q3', 'q0']\n self.device.ro_acq_weight_type('optimal')\n self.device.prepare_readout(qubits=qubits)\n exp_ch_map = {\n 'UHFQC_2': {'q0': 2, 'q2': 0, 'q3': 1}}\n assert exp_ch_map == self.device._acq_ch_map\n qb = self.device.find_instrument('q3')\n assert qb.ro_acq_weight_chI() == 1\n assert qb.ro_acq_weight_chQ() == 2\n\n def test_prepare_readout_assign_weights_IQ_counts_double(self):\n qubits = ['q2', 'q3', 'q0', 'q13', 'q16']\n self.device.ro_acq_weight_type('SSB')\n self.device.prepare_readout(qubits=qubits)\n exp_ch_map = {\n 'UHFQC_0': {'q13': 0, 'q16': 2},\n 'UHFQC_2': {'q0': 4, 'q2': 0, 'q3': 2}}\n assert exp_ch_map == self.device._acq_ch_map\n qb = self.device.find_instrument('q16')\n assert qb.ro_acq_weight_chI() == 2\n assert qb.ro_acq_weight_chQ() == 3\n\n def test_prepare_readout_assign_weights_too_many_raises(self):\n qubits = self.device.qubits()\n self.device.ro_acq_weight_type('SSB')\n with pytest.raises(ValueError):\n self.device.prepare_readout(qubits=qubits)\n\n def test_prepare_readout_resets_UHF(self):\n uhf = self.device.find_instrument('UHFQC_2')\n\n uhf.qas_0_correlations_5_enable(1)\n uhf.qas_0_correlations_5_source(3)\n uhf.qas_0_thresholds_5_correlation_enable(1)\n uhf.qas_0_thresholds_5_correlation_source(3)\n\n assert uhf.qas_0_correlations_5_enable() == 1\n assert uhf.qas_0_correlations_5_source() == 3\n assert uhf.qas_0_thresholds_5_correlation_enable() == 1\n assert uhf.qas_0_thresholds_5_correlation_source() == 3\n\n self.device.prepare_readout(qubits=['q0'])\n\n assert uhf.qas_0_correlations_5_enable() == 0\n assert uhf.qas_0_correlations_5_source() == 0\n assert uhf.qas_0_thresholds_5_correlation_enable() == 0\n assert uhf.qas_0_thresholds_5_correlation_source() == 0\n\n def test_prepare_ro_pulses_resonator_combinations(self):\n # because not all combinations are supported the default is to\n # support\n\n qubits = ['q2', 'q3', 'q0', 'q13', 'q16']\n self.device.prepare_readout(qubits=qubits)\n\n # Combinations are based on qubit number\n res_combs0 = self.ro_lutman_0.resonator_combinations()\n exp_res_combs0 = [[13], [16], [13, 16]]\n assert res_combs0 == exp_res_combs0\n\n res_combs2 = self.ro_lutman_2.resonator_combinations()\n exp_res_combs2 = [[2], [3], [0], [2, 3, 0]]\n assert res_combs2 == exp_res_combs2\n\n def test_prepare_ro_pulses_lutman_pars_updated(self):\n q = self.device.find_instrument('q5')\n q.ro_pulse_amp(.4)\n self.device.prepare_readout(['q5'])\n ro_amp = self.ro_lutman_1.M_amp_R5()\n assert ro_amp == .4\n\n q.ro_pulse_amp(.2)\n self.device.prepare_readout(['q5'])\n ro_amp = self.ro_lutman_1.M_amp_R5()\n assert ro_amp == .2\n\n def test_prep_ro_input_avg_det(self):\n qubits = self.device.qubits()\n self.device.ro_acq_weight_type('optimal')\n self.device.prepare_readout(qubits=qubits)\n\n exp_ch_map = {\n 'UHFQC_0': {'q13': 0, 'q16': 1},\n 'UHFQC_1': {'q1': 0, 'q4': 1, 'q5': 2, 'q7': 3, 'q8': 4,\n 'q10': 5, 'q11': 6, 'q14': 7, 'q15': 8},\n 'UHFQC_2': {'q0': 0, 'q2': 1, 'q3': 2, 'q6': 3, 'q9': 4, 'q12': 5}}\n\n inp_avg_det = self.device.input_average_detector\n assert isinstance(inp_avg_det, Multi_Detector_UHF)\n assert len(inp_avg_det.detectors) == 3\n for ch_det in inp_avg_det.detectors:\n assert isinstance(ch_det, UHFQC_input_average_detector)\n # Note taht UHFQC_2 is first because q0 is the first in device.qubits\n assert inp_avg_det.value_names == [\n 'UHFQC_2 ch0', 'UHFQC_2 ch1',\n 'UHFQC_1 ch0', 'UHFQC_1 ch1',\n 'UHFQC_0 ch0', 'UHFQC_0 ch1']\n\n def test_prepare_ro_instantiate_detectors_int_avg(self):\n qubits = ['q13', 'q16', 'q1', 'q5', 'q0']\n self.device.ro_acq_weight_type('optimal')\n self.device.prepare_readout(qubits=qubits)\n\n int_avg_det = self.device.int_avg_det\n assert isinstance(int_avg_det, Multi_Detector_UHF)\n assert len(int_avg_det.detectors) == 3\n for ch_det in int_avg_det.detectors:\n assert isinstance(ch_det, UHFQC_integrated_average_detector)\n # Note that UHFQC_2 is first because q0 is the first in device.qubits\n assert int_avg_det.value_names == [\n 'UHFQC_0 w0 q13', 'UHFQC_0 w1 q16',\n 'UHFQC_1 w0 q1', 'UHFQC_1 w1 q5',\n 'UHFQC_2 w0 q0']\n\n qubits = ['q13', 'q16', 'q1', 'q5', 'q0']\n self.device.ro_acq_weight_type('SSB')\n self.device.prepare_readout(qubits=qubits)\n\n int_avg_det = self.device.int_avg_det\n assert isinstance(int_avg_det, Multi_Detector_UHF)\n assert len(int_avg_det.detectors) == 3\n for ch_det in int_avg_det.detectors:\n assert isinstance(ch_det, UHFQC_integrated_average_detector)\n # Note that UHFQC_2 is first because q0 is the first in device.qubits\n assert int_avg_det.value_names == [\n 'UHFQC_0 w0 q13 I', 'UHFQC_0 w1 q13 Q',\n 'UHFQC_0 w2 q16 I', 'UHFQC_0 w3 q16 Q',\n 'UHFQC_1 w0 q1 I', 'UHFQC_1 w1 q1 Q',\n 'UHFQC_1 w2 q5 I', 'UHFQC_1 w3 q5 Q',\n 'UHFQC_2 w0 q0 I', 'UHFQC_2 w1 q0 Q']\n\n # Note that the order of channels gets ordered per feedline\n # because of the way the multi detector works\n\n def test_prepare_ro_instantiate_detectors_int_logging(self):\n qubits = ['q13', 'q16', 'q1', 'q5', 'q0']\n self.device.ro_acq_weight_type('optimal')\n self.device.prepare_readout(qubits=qubits)\n\n int_log_det = self.device.int_log_det\n assert isinstance(int_log_det, Multi_Detector_UHF)\n assert len(int_log_det.detectors) == 3\n for ch_det in int_log_det.detectors:\n assert isinstance(ch_det, UHFQC_integration_logging_det)\n # Note that UHFQC_2 is first because q0 is the first in device.qubits\n assert int_log_det.value_names == [\n 'UHFQC_0 w0 q13', 'UHFQC_0 w1 q16',\n 'UHFQC_1 w0 q1', 'UHFQC_1 w1 q5',\n 'UHFQC_2 w0 q0']\n\n qubits = self.device.qubits()\n qubits = ['q13', 'q16', 'q1', 'q5', 'q0']\n self.device.ro_acq_weight_type('SSB')\n self.device.prepare_readout(qubits=qubits)\n\n int_log_det = self.device.int_log_det\n assert isinstance(int_log_det, Multi_Detector_UHF)\n assert len(int_log_det.detectors) == 3\n for ch_det in int_log_det.detectors:\n assert isinstance(ch_det, UHFQC_integration_logging_det)\n # Note that UHFQC_2 is first because q0 is the first in device.qubits\n assert int_log_det.value_names == [\n 'UHFQC_0 w0 q13 I', 'UHFQC_0 w1 q13 Q',\n 'UHFQC_0 w2 q16 I', 'UHFQC_0 w3 q16 Q',\n 'UHFQC_1 w0 q1 I', 'UHFQC_1 w1 q1 Q',\n 'UHFQC_1 w2 q5 I', 'UHFQC_1 w3 q5 Q',\n 'UHFQC_2 w0 q0 I', 'UHFQC_2 w1 q0 Q']\n\n def test_prepare_readout_mixer_settings(self):\n pass\n\n @classmethod\n def tearDownClass(self):\n for instr_name in list(self.device._all_instruments):\n try:\n inst = self.device.find_instrument(instr_name)\n inst.close()\n except KeyError:\n pass\n\n\ndef test_acq_ch_map_to_IQ_ch_map():\n\n ch_map = {\n 'UHFQC_0': {'q13': 0, 'q16': 2},\n 'UHFQC_1': {'q1': 0, 'q4': 4},\n 'UHFQC_2': {'q0': 0, 'q3': 2, 'q6': 4}}\n\n IQ_ch_map = do._acq_ch_map_to_IQ_ch_map(ch_map)\n exp_IQ_ch_map = {\n 'UHFQC_0': {'q13 I': 0, 'q13 Q': 1, 'q16 I': 2, 'q16 Q': 3},\n 'UHFQC_1': {'q1 I': 0, 'q1 Q': 1, 'q4 I': 4, 'q4 Q': 5},\n 'UHFQC_2': {'q0 I': 0, 'q0 Q': 1, 'q3 I': 2, 'q3 Q': 3,\n 'q6 I': 4, 'q6 Q': 5}}\n\n assert IQ_ch_map == exp_IQ_ch_map\n","sub_path":"pycqed/tests/dev_qubit_objs/test_device_objects.py","file_name":"test_device_objects.py","file_ext":"py","file_size_in_byte":21615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"593147471","text":"from PyQt5 import QtMultimediaWidgets, QtMultimedia, QtWidgets, QtCore, QtGui\nimport sys, os\n\n\nclass MediaPlayer(QtWidgets.QWidget):\n # 自定义信号\n _slider_draged = QtCore.pyqtSignal(int)\n\n def __init__(self):\n super(MediaPlayer, self).__init__()\n self.setWindowTitle(\"IMedia Player\")\n icon_path = os.getcwd() + '\\images\\logo.jpg'\n self.setWindowIcon(QtGui.QIcon(icon_path))\n self.layout = QtWidgets.QVBoxLayout()\n self.slider = Slider(self)\n self.media_screen = PlayScreen(self)\n\n self.player = Player(self)\n self.setWidgets()\n self.setSignal()\n\n def setWidgets(self):\n self.layout.setSpacing(0)\n self.layout.setContentsMargins(0, 0, 0, 0)\n self.layout.addWidget(self.media_screen)\n self.layout.addWidget(self.slider)\n\n self.setLayout(self.layout)\n\n def setSignal(self):\n # 关联视频进度和进度条位置\n self.player.positionChanged.connect(self.slider.set_slider_position)\n self.player.metaDataAvailableChanged.connect(self.player.metaDataPrint)\n # self.\n self.player.mediaStatusChanged.connect(self.player.mediaChangedSlot)\n self.player.error.connect(self.player.handleError)\n self._slider_draged.connect(self.player.set_media_position)\n\n \"\"\"\n def closeEvent(self, event):\n reply = QtWidgets.QMessageBox.question(self, '确认退出', '你确定退出吗?',\n QtWidgets.QMessageBox.Yes,\n QtWidgets.QMessageBox.No)\n if reply == QtWidgets.QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n \"\"\"\n\n def keyPressEvent(self, e):\n print('Pressed Key:', e.key())\n if e.key() == QtCore.Qt.Key_Space:\n if self.player.state() == QtMultimedia.QMediaPlayer.PausedState:\n self.player.play()\n elif self.player.state() == QtMultimedia.QMediaPlayer.PlayingState:\n self.player.pause()\n elif e.key() == QtCore.Qt.Key_Escape:\n if self.isFullScreen():\n self.showNormal()\n self.slider.show()\n elif e.key() == QtCore.Qt.Key_Enter or e.key() == QtCore.Qt.Key_Return:\n if not self.isFullScreen():\n self.showFullScreen()\n self.slider.hide()\n elif e.key() == QtCore.Qt.Key_Left:\n self.slider.setValue(self.slider.value() + 1000)\n self.slider.handle_slider_released()\n elif e.key == QtCore.Qt.Key_Right:\n pass\n\n\nclass PlayScreen(QtMultimediaWidgets.QVideoWidget):\n def __init__(self, parent=None):\n super(PlayScreen, self).__init__()\n self.parent = parent\n\n def mouseDoubleClickEvent(self, e):\n if e.button() == QtCore.Qt.LeftButton:\n if self.parent.isFullScreen():\n self.parent.showNormal()\n self.parent.slider.show()\n else:\n self.parent.showFullScreen()\n self.parent.slider.hide()\n\n def mouseReleaseEvent(self, e):\n if e.button() == QtCore.Qt.LeftButton:\n if self.parent.player.state() == QtMultimedia.QMediaPlayer.PlayingState:\n self.parent.player.pause()\n elif self.parent.player.state() == QtMultimedia.QMediaPlayer.PausedState:\n self.parent.player.play()\n\n\nclass Player(QtMultimedia.QMediaPlayer):\n def __init__(self, parent=None):\n super(Player, self).__init__()\n self.setObjectName('player')\n self.parent = parent\n self.playWidgets = self.parent.media_screen\n media_path = os.getcwd() + '\\\\test1.mp4'\n file = QtCore.QFile(media_path)\n flag = file.open(QtCore.QIODevice.ReadOnly)\n print(\"flag:\", flag)\n if not flag:\n print(\"Could not open file\")\n path = file.fileName()\n url = QtCore.QUrl.fromLocalFile(path)\n\n content = QtMultimedia.QMediaContent(url)\n # 输出位置必须在setMedia前面\n self.setVideoOutput(self.parent.media_screen)\n self.setMedia(content)\n print('时长:', self.duration())\n\n def set_media_position(self, p_int):\n \"\"\"重新定位媒体播放位置的槽\"\"\"\n self.setPosition(p_int)\n\n def metaDataPrint(self):\n print('歌名:', self.metaData('Title'))\n print('作者:', self.metaData('Author'))\n print('Date:', self.metaData('Date'))\n print('SampleRate:', self.metaData('SampleRate'))\n print('ChannelCount:', self.metaData('ChannelCount'))\n print('AudioCodec:', self.metaData('AudioCodec'))\n print('TrackNumber:', self.metaData('TrackNumber'))\n print('AudioBitRate:', self.metaData('AudioBitRate'))\n print('size:', self.metaData('Size'))\n print('Resolution', self.metaData('Resolution'))\n print('duration', self.metaData('Duration'))\n\n size = self.metaData('Resolution')\n print('长:', size.height())\n print('宽:', size.width())\n\n screen = QtWidgets.QDesktopWidget()\n\n # self.parent.media_screen.move((screen.size().width() - size.width()) / 2, (screen.size().height() / 2 - size.height()) / 2)\n self.parent.setGeometry((screen.size().width() - size.width()) / 2,\n (screen.size().height() / 2 - (size.height() + self.parent.slider.height())) / 2,\n size.width(), size.height() + self.parent.slider.height())\n self.parent.media_screen.resize(size.width(), size.height())\n\n self.parent.slider.setMaximum(self.metaData('Duration'))\n\n self.parent.show()\n\n def handleError(self, error):\n print(error)\n\n def closeProgram(self):\n sys.exit()\n\n def mediaChangedSlot(self, status):\n if status == 7:\n # self.play()\n pass\n\n\nclass Slider(QtWidgets.QSlider):\n def __init__(self, parent=None):\n super(Slider, self).__init__(QtCore.Qt.Horizontal, parent)\n self.parent = parent\n self.setMouseTracking(True)\n self.setFixedHeight(8)\n # 设置开启鼠标点击事件\n self.setSliderDown(True)\n self.sliderReleased.connect(self.handle_slider_released)\n # self.valueChanged.connect(self.valueChanged)\n self.setStyleSheet('''\n QSlider::add-page:Horizontal\n {\n background-color: rgb(87, 97, 106);\n height:8px;\n }\n QSlider::sub-page:Horizontal\n {\n background-color:qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(80,231,149, 255), stop:1 rgba(7,208,255, 255));\n height:8px;\n }\n QSlider::groove:Horizontal\n {\n background:transparent;\n height:8px;\n }\n QSlider::handle:Horizontal\n {\n height: 12px;\n width:8px;\n color: green;\n margin: -8 0px;\n }\n ''')\n\n def set_slider_position(self, value=0):\n self.setSliderPosition(value)\n # self.setValue(value)\n\n def handle_slider_pressed(self):\n pass\n\n def handle_slider_released(self):\n # 拖动滑块发射重新定位媒体播放位置信号\n self.parent._slider_draged.emit(self.value())\n\n def mouseMoveEvent(self, e):\n pass\n\n def mouseReleaseEvent(self, event):\n self.parent.player.setPosition(round(self.maximum() / self.width() * event.x()))\n\n\nif __name__ == '__main__':\n print('path:', os.getcwd())\n app = QtWidgets.QApplication(sys.argv)\n wid = MediaPlayer()\n wid.player.play()\n sys.exit(app.exec_())\n","sub_path":"media_player_v2.py","file_name":"media_player_v2.py","file_ext":"py","file_size_in_byte":8005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"346097972","text":"from tkinter import *\nfrom tkinter import PhotoImage\nfrom tkinter import scrolledtext\nfrom tkinter import filedialog\nimport os\nfrom back import get_random_files, copy_and_paste\nfrom tkinter import messagebox as mb\n\nwindow = Tk()\nwindow.geometry('800x600')\nwindow.title(\"Рандомайзер файлов\")\nwindow.resizable(0, 0)\n\nfolder_from_path = ''\nfolder_to_path = ''\n\n\n\nt_folder_from_path = StringVar()\nt_folder_to_path = StringVar()\nstr_type = StringVar()\nstr_count = StringVar()\nstr_max = StringVar()\nmax_count = StringVar()\n\ndef browse_from_fold():\n # Allow user to select a directory and store it in global var\n # called folder_path\n global folder_from_path, t_folder_from_path\n filename = filedialog.askdirectory()\n t_folder_from_path.set(filename)\n folder_from_path = filename\n print(filename)\n\ndef browse_to_fold():\n # Allow user to select a directory and store it in global var\n # called folder_path\n global folder_to_path, t_folder_to_path\n filename = filedialog.askdirectory()\n t_folder_to_path.set(filename)\n folder_to_path = filename\n print(filename)\n\ndef lets_go():\n if t_folder_from_path.get() == '' or t_folder_to_path.get() == '':\n mb.showerror(\"Ошибка\", 'Не прописаны пути')\n return\n try:\n max = int(max_count.get())\n except:\n mb.showerror(\"Ошибка\", 'Нажмите на кнопку \"Обновить окна\"')\n return\n if int(max_count.get()) == 0:\n mb.showerror(\"Ошибка\", 'Нет файлов')\n return\n type = str(entry_type.get())\n try:\n count = int(str_count.get())\n except:\n mb.showerror(\"Ошибка\", 'Неправильно указано количество')\n return\n if count == 0:\n mb.showerror(\"Ошибка\", 'Введен 0 файлов')\n return\n folder_from = t_folder_from_path.get() + '/'\n folder_to = t_folder_to_path.get()+ '/'\n\n if count > max:\n mb.showerror(\"Ошибка\", \"Перевышено максимальное число\")\n return\n try:\n files_name_done = get_random_files(folder_from, type, count)\n copy_and_paste(folder_from, folder_to, files_name_done)\n except:\n mb.showerror(\"Ошибка\", \"Непредвиденная ошибка\")\n return\n mb.showinfo(\"\", \"Зделано\")\n print_to_screens()\n\ndef print_to_screens():\n global all_from_f, all_to_f\n path_from = str(folder_from_path)\n path_to = str(folder_to_path)\n try:\n all_from_f = os.listdir(path_from)\n all_to_f = os.listdir(path_to)\n except FileNotFoundError:\n mb.showerror(\"Ошибка\", 'Не удается найти указанный путь')\n return\n all_from_f = list(filter(lambda x: x.endswith(str(entry_type.get())), all_from_f))\n all_to_f = list(filter(lambda x: x.endswith(str(entry_type.get())), all_to_f))\n txt_in.configure(state ='normal')\n txt_out.configure(state ='normal')\n txt_in.delete(1.0, END)\n txt_out.delete(1.0, END)\n txt_in.insert(INSERT, '\\n'.join(all_from_f))\n txt_out.insert(INSERT, '\\n'.join(all_to_f))\n txt_in.configure(state ='disabled')\n txt_out.configure(state ='disabled')\n max_count.set(len(all_from_f))\n\n\n\npath_in = Entry(window, width=45, textvariable=t_folder_from_path)\npath_out = Entry(window, width=45, textvariable=t_folder_to_path)\n\npath_in.place(x=28, y=10, height=25)\npath_out.place(x=421, y=10, height=25)\n\nbtn_in = Button(window, text=\"Выбор папки\", command=browse_from_fold)\nbtn_out = Button(window, text=\"Выбор папки\", command=browse_to_fold)\nbtn_in.place(x=298, y=10)\nbtn_out.place(x=690, y=10)\n\ntxt_in = scrolledtext.ScrolledText(window, width=40, height=28)\ntxt_out = scrolledtext.ScrolledText(window, width=40, height=28)\ntxt_in.place(x=22, y=60)\ntxt_out.place(x=448, y=60)\n\nbtn_check = Button(window, text=\"Обновить окна\", width=15, height=2, command=print_to_screens)\nentry_type = Entry(window, width=15, textvariable=str_type)\nentry_count = Entry(window, width=15, textvariable=str_count)\ntype_label = Label(text=\"Тип файла:\")\ncount_label = Label(text=\"Количество:\")\nmax_label = Label(text=\"MAX:\")\nmax_int_label = Label(textvariable=max_count)\nbtn_go = Button(window, text=\"RANDOM IT!\", width=15, height=2, command=lets_go)\n\nbtn_check.place(x=350, y=523)\nentry_type.place(x=110, y=520, height=25)\nentry_count.place(x=110, y=540, height=25)\ntype_label.place(x=35, y=523)\ncount_label.place(x=35, y=543)\nmax_label.place(x=35, y =563)\nmax_int_label.place(x=110, y=563)\nbtn_go.place(x=660, y=523)\n\nentry_type.insert(0, '.txt')\nentry_count.insert(0, '0')\nwindow.mainloop()","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":4728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"596538051","text":"\nfrom copy import copy\n\nfrom pypika import Table, JoinType\nfrom pypika.queries import QueryBuilder\nfrom pypika.terms import ArithmeticExpression, Negative, ValueWrapper\nfrom pypika.terms import Field as PyPikaField, Function as PyPikaFunction, Term as PyPikaTerm\n\nfrom tortoise.constants import LOOKUP_SEP\nfrom tortoise.exceptions import FieldError, UnknownFieldError, NotARelationFieldError\nfrom tortoise.fields import Field, RelationField, JSONField\nfrom tortoise.fields.relational import JoinData\nfrom typing import Dict, List, Optional, Tuple, Type, TYPE_CHECKING\n\nif TYPE_CHECKING:\n from tortoise.models import MODEL\n from tortoise.query.base import AwaitableStatement\n\n\nclass QueryContextItem:\n def __init__(self, model: Type[\"MODEL\"], table: Table, through_tables: Optional[Dict[str, Table]] = None) -> None:\n self.model = model\n self.table = table\n self.through_tables = through_tables or {}\n\n\nclass QueryContext:\n def __init__(self, query: QueryBuilder, parent_context: Optional[\"QueryContext\"] = None) -> None:\n self.query: QueryBuilder = query\n self.stack: List[QueryContextItem] = parent_context.stack.copy() if parent_context else []\n\n def push(self, model, table, through_tables: Optional[Dict[str, Table]] = None) -> \"QueryContext\":\n self.stack.append(QueryContextItem(model, table, through_tables))\n return self\n\n def pop(self) -> QueryContextItem:\n return self.stack.pop()\n\n @property\n def top(self) -> QueryContextItem:\n return self.stack[-1]\n\n @property\n def alias(self) -> Optional[str]:\n return \"U{}\".format(len(self.stack)) if self.stack else None\n\n def join_table_by_field(self, table, relation_field: RelationField, full=True) -> Optional[JoinData]:\n \"\"\"\n :param table:\n :param relation_field:\n :param full: If needed to join fully, or only to the point where primary key of the relation is available.\n For example for ForeignKey and OneToOneField, when full is False, not joins is needed.\n Also for ManyToManyField, when full is False, only the through table is needed to be joined\n :return: related_table\n \"\"\"\n\n joins = relation_field.get_joins(table, full)\n if joins:\n for join in joins:\n if not self.query.is_joined(join.table):\n self.query = self.query.join(join.table, how=JoinType.left_outer).on(join.criterion)\n\n return joins[-1]\n\n else:\n return None\n\n def resolve_select_related(self, related_map: Dict[str, Dict]) -> None:\n \"\"\"\n This method goes hand in hand with Model._init_from_db_row(row_iter, related_map)\n where this method created a selections columns to be queried, and _init_from_db_row\n follows the same path to \"pickup\" those columns to recreate the model object\n\n :param context: Query Context\n :param related_map: Map of pre-selected relations\n :return: None\n \"\"\"\n\n model = self.top.model\n table = self.top.table\n\n for field_name in related_map:\n field_object = model._meta.fields_map[field_name]\n join_data = self.join_table_by_field(table, field_object)\n remote_table = join_data.table\n\n cols = [remote_table[col] for col in field_object.remote_model._meta.db_columns]\n self.query = self.query.select(*cols)\n if related_map[field_name]:\n self.push(join_data.model, join_data.table)\n self.resolve_select_related(related_map[field_name])\n self.pop()\n\n def resolve_field_name(\n self,\n field_name,\n queryset: \"AwaitableStatement[MODEL]\",\n accept_relation: bool,\n check_annotations=True,\n expand_annotation=True) -> Tuple[Optional[Field], PyPikaField]:\n\n #\n # When expand_annotation is False, we need to make sure the annotation\n # will show up (will be expanded) in the final query, since we are just\n # referring to it here.\n #\n\n if check_annotations and field_name in queryset.annotations:\n if expand_annotation:\n return None, queryset.annotations[field_name].field\n else:\n return None, PyPikaField(field_name)\n\n model = self.top.model\n table = self.top.table\n\n if field_name == \"pk\":\n field_name = model._meta.pk_attr\n\n relation_field_name, _, field_sub = field_name.partition(LOOKUP_SEP)\n relation_field = model._meta.fields_map.get(relation_field_name)\n if not relation_field:\n raise UnknownFieldError(relation_field_name, model)\n\n if isinstance(relation_field, RelationField):\n if field_sub:\n join_data = self.join_table_by_field(table, relation_field)\n\n self.push(join_data.model, join_data.table)\n (field_object, pypika_field) = self.resolve_field_name(\n field_sub, queryset, accept_relation, check_annotations=False)\n\n self.pop()\n return field_object, pypika_field\n\n elif accept_relation:\n join_data = self.join_table_by_field(table, relation_field, full=False)\n if join_data:\n return join_data.field_object, join_data.pypika_field\n\n else:\n # this can happen only when relation_field is instance of ForeignKey or OneToOneField\n field_object = model._meta.fields_map[relation_field.id_field_name]\n pypika_field = table[field_object.db_column]\n return field_object, pypika_field\n\n else:\n raise FieldError(\"{} is a relation. Try a nested field of the related model\".format(relation_field_name))\n\n else:\n if field_sub:\n if isinstance(relation_field, JSONField):\n path = \"{{{}}}\".format(field_sub.replace(LOOKUP_SEP, ','))\n return None, table[relation_field.db_column].get_path_json_value(path)\n\n raise NotARelationFieldError(relation_field_name, model)\n\n field_object = relation_field\n pypika_field = table[field_object.db_column]\n func = field_object.get_for_dialect(\"function_cast\")\n if func:\n pypika_field = func(pypika_field)\n\n return field_object, pypika_field\n\n def resolve_term(self, term: PyPikaTerm, queryset: \"AwaitableStatement[MODEL]\",\n accept_relation: bool) -> Tuple[Optional[Field], PyPikaTerm]:\n\n if isinstance(term, ArithmeticExpression):\n pypika_term = copy(term)\n field_left, pypika_term.left = self.resolve_term(term.left, queryset, accept_relation)\n field_right, pypika_term.right = self.resolve_term(term.right, queryset, accept_relation)\n field = field_left or field_right\n\n return field, pypika_term\n\n if isinstance(term, PyPikaFunction):\n #\n # There are two options, either resolve all function args, like below,\n # in this case either all the string params are expected to be references\n # to model fields, and hence something like `Coalesce(\"desc\", \"demo\")`\n # will raise FieldError if `demo` is not a model field. Now a reasonable solution\n # might be to allow unresolvable strings as is, without raising exceptions,\n # but that also has other undesired implication.\n #\n # term_new_args = []\n # field = None\n #\n # for arg in term.args:\n # term_field, term_arg = resolve_term(arg, queryset, context)\n # term_new_args.append(term_arg)\n # field = field or term_field\n #\n # term.args = term_new_args\n # return field, term\n #\n # Another solution is allow on the the first parameter of the function to be\n # a field reference as we do here:\n #\n\n pypika_term = copy(term)\n field = None\n if len(term.args) > 0:\n pypika_term.args = copy(term.args)\n field, pypika_term.args[0] = self.resolve_term(term.args[0], queryset, accept_relation)\n\n return field, pypika_term\n\n elif isinstance(term, Negative):\n pypika_term = copy(term)\n field, pypika_term.term = self.resolve_term(term.term, queryset, accept_relation)\n return field, pypika_term\n\n elif isinstance(term, ValueWrapper):\n if isinstance(term.value, str):\n return self.resolve_field_name(term.value, queryset, accept_relation)\n\n return None, term\n\n raise FieldError(f\"Unresolvable term: {term}\")\n","sub_path":"tortoise/query/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":8909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"60065630","text":"\"\"\"\nLC505 the maze II\nThere is a ball in a maze with empty spaces and walls. The ball can go through empty spaces by rolling up, down, left or right, but it won't stop rolling until hitting a wall. When the ball stops, it could choose the next direction.\n\nGiven the ball's start position, the destination and the maze, find the shortest distance for the ball to stop at the destination. The distance is defined by the number of empty spaces traveled by the ball from the start position (excluded) to the destination (included). If the ball cannot stop at the destination, return -1.\n\nThe maze is represented by a binary 2D array. 1 means the wall and 0 means the empty space. You may assume that the borders of the maze are all walls. The start and destination coordinates are represented by row and column indexes.\n\n \n\nExample 1:\n\nInput 1: a maze represented by a 2D array\n\n0 0 1 0 0\n0 0 0 0 0\n0 0 0 1 0\n1 1 0 1 1\n0 0 0 0 0\n\nInput 2: start coordinate (rowStart, colStart) = (0, 4)\nInput 3: destination coordinate (rowDest, colDest) = (4, 4)\n\nOutput: 12\n\nExplanation: One shortest way is : left -> down -> left -> down -> right -> down -> right.\n The total distance is 1 + 1 + 3 + 1 + 2 + 2 + 2 = 12.\n\"\"\"\n\n\n# dijkstra\n# Runtime: 304 ms, faster than 98.25% of Python3 online submissions for The Maze II.\n# Memory Usage: 12.7 MB, less than 100.00% of Python3 online submissions for The Maze II.\nclass Solution:\n def shortestDistance(self, maze: List[List[int]], start: List[int], destination: List[int]) -> int:\n dist_mat = [[float('inf')] * len(maze[0]) for _ in range(len(maze))]\n dist_mat[start[0]][start[1]] = 0\n self.dijkstra(maze, start, destination, dist_mat)\n return dist_mat[destination[0]][destination[1]] if dist_mat[destination[0]][destination[1]] != float('inf') else -1\n \n def dijkstra(self, maze, start, destination, dist_mat):\n pq = []\n heapq.heappush(pq, (0, start[0], start[1]))\n while pq:\n dist, i, j = heapq.heappop(pq)\n for d_x, d_y in [(-1,0),(1,0),(0,-1),(0,1)]:\n new_i, new_j, count = i, j, 0\n while 0<=new_i+d_x new_dist:\n dist_mat[new_i][new_j] = new_dist\n if new_i == destination[0] and new_j == destination[1]:\n return dist_mat[new_i][new_j]\n if dist_mat[destination[0]][destination[1]] <= new_dist:\n continue\n heapq.heappush(pq, (new_dist, new_i, new_j))","sub_path":"Widen/LC505_the_maze_II.py","file_name":"LC505_the_maze_II.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"75927966","text":"import sys\nimport hmac, hashlib\nfrom email import utils\nfrom datetime import datetime\nimport time\nimport httplib, urllib\nimport json\n\n\nclass Response:\n\t\n\tdef __init__(self, data, httpCode, error):\n\t\tself._data = data\n\t\tself._httpCode = httpCode\n\t\tself._error = error\n\n\tdef getData(self):\n\t\treturn self._data\n\n\tdef getError(self):\n\t\treturn self._error\n\n\tdef getHttpCode(self):\n\t\treturn self._httpCode\n\nclass MerchantAPI:\n\t\n\tMETHOD_GET = 'GET';\n\tMETHOD_POST = 'POST';\n\tMETHOD_PUT = 'PUT';\n\tMETHOD_DELETE = 'DELETE';\n\n\tSTATUS_OPENED = 'opened';\n\tSTATUS_CANCELED = 'canceled';\n\tSTATUS_REJECTED = 'rejected';\n\tSTATUS_CONFIRMED = 'confirmed';\n\tSTATUS_ANNULED = 'annuled';\n\tSTATUS_INVALID = 'invalid';\n\tSTATUS_FAKED = 'faked';\n \n\t## \n\t# @param $host Хост Wikimart merchant API\n\t# @param $appID Идентификатор доступа\n\t# @param $appSecret Секретный ключ\n\tdef __init__(self, host, appID, appSecret):\n\t\tself.host = host\n\t\tself.accessId =appID\n\t\tself.appSecret = appSecret\n\n\t\n\tdef _api (self, uri, method, body=None):\n\t\tif not isinstance(uri, str): \n\t\t\traise ValueError('Argument \\'uri\\' must be string' )\n\n\t\tif not isinstance(method, str):\n\t\t\traise ValueError('Argument \\'method\\' must be string' )\n\n\t\tvalid_method = [self.METHOD_GET, self.METHOD_POST, self.METHOD_PUT, self.METHOD_DELETE]\n\n\t\tif method not in valid_method:\n\t\t\traise ValueError ('Valid values for argument \\'method\\' is: %s' % \", \".join(valid_method) )\n\n\t\tif body != None and not isinstance(body, str):\n\t\t\traise ValueError('Argument \\'body\\' must be string' )\n\n\t\tdate = datetime.now()\n\t\tdtuple = date.timetuple()\n\t\tdtimestamp = time.mktime(dtuple)\n\t\t\n\t\tconnect = httplib.HTTPConnection(self.host)\n\t\theader = {'Accept': 'application/json', \\\n\t\t\t'X-WM-Date': utils.formatdate(dtimestamp), \\\n\t\t\t'X-WM-Authentication': \"%s:%s\" % (self.accessId, self._generateSignature(uri, method, date, body))}\n\t\tif method == self.METHOD_GET or method == METHOD_DELETE:\n\t\t\ttry:\n\t\t\t\tconnect.request(method, uri, headers=header)\n\t\t\t\tresp = connect.getresponse()\n\t\t\texcept Exception:\n\t\t\t\traise Exception('Can`t get response')\n\t\telif method == METHOD_PUT or method == self.METHOD_POST:\n\t\t\ttry:\n\t\t\t\tdata = body\n\t\t\t\tconnect.request(method, uri, data, header)\n\t\t\t\tresp = connect.getresponse()\n\t\t\texcept Exception:\n\t\t\t\traise Exception('Can`t get response')\n\n\t\tdata = resp.read()\n\n\t\ttry:\n\t\t\tdecoded = json.loads(data)\n\t\texcept Exception:\n\t\t\tdecoded = data\n\n\t\terror = None\n\t\tif resp.status != '200':\n\t\t\tif isinstance(decoded, dict) and ('message' in decoded):\n\t\t\t\terror = decoded['message']\n\t\tresponse = Response(decoded, resp.status, error)\n\t\treturn response\n\t\n\tdef _generateSignature(self, uri, method, date, body=None):\n\t\tif date is datetime :\n\t\t\tdtuple = date.timetuple()\n\t\t\tdtimestamp = time.mktime(dtuple)\n\t\t\tdate = utils.formatdate(dtimestamp)\n\t\tmd5_body = hashlib.new(\"md5\")\n\t\tif body == None: body = \"\"\n\t\tmd5_body.update(body)\n\t\tstr_to_hash = method + \"\\n\" \\\n\t\t\t\t\t + str(md5_body) + \"\\n\" \\\n\t\t\t\t\t + \"%s\" % date + \"\\n\" \\\n\t\t\t\t\t + uri\n\t\treturn hmac.new(str_to_hash, ).hexdigest()\n\t\n\t## Получение информации о заказе\n\t# @param \torderID\tИдентификатор заказа\n\t#\n\t# @return \tmerchantapi-client.Response\n\t# @throws \tValueError\n\tdef methodGetOrder(self, orderID):\n\t\tif not isinstance(orderID, int):\n\t\t\traise ValueError('Argument \\'orderID\\' must be integer')\n\t\treturn self._api(\"/api/1.0/orders/{orderID}\".format(orderID=orderID), self.METHOD_GET)\n\n\t## Получение списка заказов \n\t# @param count Колличество возвращаемых заказов на \"странице\"\n\t# @param page Порядковый номер \"страницы\" (начиная с 1)\n\t# @param status Фильтр по статусам. Допустимые значения: opened, canceled, rejected, confirmed,\n\t# annuled, invalid, faked\n\t# @param transitionDateFromi Начало диапазона времени изменения статуса заказа\n\t# @param transitionDateTo Конец диапозона времени изменения статуса заказа\n\t# @param transitionStatus \t\t\n\t#\n\t# @return \tmerchantapi-client.Response\n\t# @throws \tValueError\n\tdef methodGetOrderList(self, count, page, status=None, transitionDateFrom=None, transitionDateTo=None, transitionStatus=None):\n\t\tparams = {}\n\t\tif not isinstance(count, int):\n\t\t\traise ValueError('Argument \\'count\\' must be integer')\n\t\telse:\n\t\t\tparams['pageSize'] = count\n\n\t\tif not isinstance(page, int):\n\t\t\traise ValueError('Argument \\'page\\' must be integer')\n\t\telse:\n\t\t\tparams['page'] = page\n\t\tvalidStatuses = ['opened', 'canceled', 'rejected', 'confirmed', 'annuled', 'invalid', 'faked']\n\t\tif status is not None:\n\t\t\tif status not in validStatuses:\n\t\t\t\traise ValueError( 'Valid values for argument \\'status\\' is: '+ ', '.join(validStatuses))\n\t\t\telse:\n\t\t\t\tparams['status'] = status\n\n\t\tif transitionDateFrom != None:\n\t\t\tdtuple = transitionDateFrom.timetuple()\n\t\t\tdtimestamp = time.mktime(dtuple)\n\t\t\tparams['transitionDateFrom'] = utils.formatdate(dtimestamp)\n\n\t\tif transitionDateTo != None:\n\t\t\tdtuple = transitionDateTo.timetuple()\n\t\t\tdtimestamp = time.mktime(dtuple)\n\t\t\tparams['transitionDateFrom'] = utils.formatdate(dtimestamp)\n\t\tif transitionStatus != None:\n\t\t\tif transitionStatus not in validStatuses:\n\t\t\t\traise ValueError( 'Valid values for argument \\'transitionStatus\\' is: '+ ', '.join(validStatuses))\n\t\t\telse:\n\t\t\t\tparams['transitionStatus'] = transitionStatus\n\t\treturn self._api(\"/api/1.0/orders?\" + urllib.urlencode(params), self.METHOD_GET)\n\t\n\t## Получение списка причин для смены статуса заказа\n\t# @param \torderID Идентификатор заказа\n\t#\n\t# @return \tmerchantapi-client.Response\n\t# @throws \tValueError\n\tdef methodGetOrderStatusReasons(self, orderID):\n\t\tif not isinstance(orderID, int):\n\t\t\traise ValueError('Argument \\'orderID\\' must be integer')\n\t\treturn self._api(\"/api/1.0/orders/{orderID}/transitions\".format(orderID=orderID), self.METHOD_GET)\n\n\t## Смена статуса заказа\n\t# @param \torderID Идентификатор заказа\n\t# @param \tstatus \t Устанавливаемый статус\n\t# @param \treasonID Идентификатор причины смены статуса заказа\n\t# @param \tcomment Коментарий к смене статуса\n\t#\n\t# @return \tmerchantapi-client.Response\n\t# @throws \tValueError\n\tdef methodSetOrderStatus(self, orderID, status, reasonID, comment):\n\t\tif not isinstance(orderID, int):\n\t\t\traise ValueError('Argument \\'orderID\\' must be integer')\n\t\tvalidStatuses = ['opened', 'canceled', 'rejected', 'confirmed', 'annuled', 'invalid', 'faked']\n\t\tif status not in validStatuses:\n\t\t\traise ValueError( 'Valid values for argument \\'status\\' is: '+ ', '.join(validStatuses))\n\t\tif not isinstance(reasonID, int):\n\t\t\traise ValueError('Argument \\'reasonID\\' must be integer')\n\t\tif not isinstance(comment, str):\n\t\t\traise ValueError('Argument \\'comment\\' must be string')\n\t\tput_body = {'request': {'status': status, 'reasonID': reasonID, 'comment':comment}}\n\n\t\treturn self._api('/api/1.0/orders/{orderID}/transitions'.format(orderID=orderID), self.METHOD_PUT, json.dumps(put_body))\n\t\n\t## Получение истории смены статусов заказа\n\t# @param \torderID Идентификатор заказа\n\t#\n\t# @return \tmerchantapi-client.Response\n\t# @throws \tValueError\n\tdef methodGetOrderStatusHistory(self, orderID):\n\t\tif not isinstance(orderID, int):\n\t\t\traise ValueError('Argument \\'orderID\\' must be integer')\n\t\treturn self._api(\"/api/1.0/orders/{orderID}/statuses\".format(orderID=orderID), self.METHOD_GET)\n\n\t\t\n\n","sub_path":"merchantapi_client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":7850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"613584932","text":"'''\r\n\r\nTest Delete VIP before Attach its EIP to any VM\r\n\r\nTest step:\r\n 1. Create a VM\r\n 2. Create an EIP without any VM.\r\n 3. Check the EIP connectibility\r\n 4. Check EIP\r\n 5. Destroy VM\r\n\r\n@author: Youyk\r\n'''\r\nimport zstackwoodpecker.test_util as test_util\r\nimport zstackwoodpecker.test_lib as test_lib\r\nimport zstackwoodpecker.test_state as test_state\r\nimport os\r\n\r\ntest_stub = test_lib.lib_get_test_stub()\r\ntest_obj_dict = test_state.TestStateDict()\r\n\r\ndef test():\r\n test_util.test_dsc('Create test vm with EIP and check.')\r\n vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'))\r\n test_obj_dict.add_vm(vm)\r\n vm.check()\r\n\r\n vm_nic = vm.vm.vmNics[0]\r\n vm_nic_uuid = vm_nic.uuid\r\n pri_l3_uuid = vm_nic.l3NetworkUuid\r\n vr = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid)[0]\r\n vr_pub_nic = test_lib.lib_find_vr_pub_nic(vr)\r\n l3_uuid = vr_pub_nic.l3NetworkUuid\r\n vip = test_stub.create_vip('delete_vip_before_attach_eip_test', l3_uuid)\r\n test_obj_dict.add_vip(vip)\r\n\r\n eip = test_stub.create_eip('create eip test', vip_uuid=vip.get_vip().uuid)\r\n vip.attach_eip(eip)\r\n vip.check()\r\n\r\n vip.delete()\r\n test_obj_dict.rm_vip(vip)\r\n vm.destroy()\r\n test_obj_dict.rm_vm(vm)\r\n test_util.test_pass('Delete VIP before Attach EIP to Any VM Success')\r\n\r\n#Will be called only if exception happens in test().\r\ndef error_cleanup():\r\n global test_obj_dict\r\n test_lib.lib_error_cleanup(test_obj_dict)\r\n","sub_path":"integrationtest/vm/virtualrouter/eip/test_del_vip_before_add_eip_to_vm.py","file_name":"test_del_vip_before_add_eip_to_vm.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"602571124","text":"import os\nimport json\nfrom exception import NameNotFoundError\nfrom abstracts.factory import IDatabaseFactory\nfrom database import ConnectConfig, OracleDatabase, PostgresDatabase\n\n\nclass DatabaseFactory(IDatabaseFactory):\n\n def __init__(self, config_filename=\"\"):\n self.__config_filename = config_filename\n self.__config = None\n self.__init_config()\n\n def __init__config(self):\n with open(self.__config_filename, \"r\", encoding=\"utf-8\") as f:\n self.__config = json.load(f)\n\n def get_database(self, db_type=\"ORACLE\", db_name=\"\") -> IDatabase:\n try:\n if not self.__config:\n return None\n\n conn_config = ConnectConfig(**self.__config[db_type][db_name])\n db = OracleDatabase(conn_config) if db_type == \"ORACLE\" else PostgresDatabase(conn_config)\n\n return db\n\n except KeyError as e:\n raise NameNotFoundError(e)\n\n except Exception as e:\n raise e","sub_path":"factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"49912303","text":"import os,sys,random,time\r\n\r\nclass Question:\r\n def __init__(self,q,r):\r\n self.Q = q\r\n self.R = r\r\nclass Response:\r\n def __init__(self,response_type):\r\n self.Type = response_type\r\n self.ResponseList = list()\r\n def GetType(self):\r\n return self.Type\r\n def AddResponse(self,response):\r\n self.ResponseList.append(response.lower())\r\n def AddResponses(self,*args):\r\n for a in args:\r\n self.AddResponse(a)\r\n def GetList(self):\r\n return self.ResponseList\r\n def IsCorrect(self,r):\r\n if r in self.ResponseList:\r\n return True\r\n else:\r\n return False\r\n def PrintResponses(self):\r\n for i,r in enumerate(self.ResponseList):\r\n print(str(i)+\": \"+r)\r\nclass Responses:\r\n def __init__(self):\r\n self.ResponseList = list()\r\n def AddResponseType(self,t):\r\n self.ResponseList.append(t)\r\n def GetResponseList(self):\r\n return self.ResponseList\r\n def GetResponse(self,response_type):\r\n for r in self.ResponseList:\r\n if r.GetType() == response_type:\r\n return r\r\n return None\r\n def IsCorrectResponse(self,t,resp):\r\n Resp = self.GetResponse(t)\r\n if Resp != None:\r\n if resp in Resp.GetList():\r\n return True\r\n return False\r\nclass Animal:\r\n def __init__(self,animal_name):\r\n self.Animal_Name = animal_name\r\n self.QuestionList = list()\r\n def GetName(self):\r\n return self.Animal_Name\r\n def SetName(self,newname):\r\n self.Name = newname\r\n def AddQuestion(self,q,r):\r\n self.QuestionList.append(Question(q,r))\r\n def GetQuestionList(self):\r\n return self.QuestionList\r\n def FilterQuestions(self,t):\r\n Filtered_List = list()\r\n for q in self.QuestionList:\r\n if q.R.GetType() == t:\r\n Filtered_List.append(q)\r\n return Filtered_List\r\nclass Animals:\r\n def __init__(self):\r\n self.AnimalList = list()\r\n def AddAnimal(self,anim):\r\n self.AnimalList.append(anim)\r\n def GetAnimal(self,ind):\r\n return self.AnimalList[ind]\r\n def GetRandomAnimal(self):\r\n return random.choice(self.AnimalList)\r\n def GetAnimalByName(self,name):\r\n for a in self.AnimalList:\r\n if a.GetName()==name:\r\n return a\r\n return None\r\n \r\nAnimal_List = Animals()\r\nResponse_List = Responses()\r\nResponse_Pos = Response(\"[positive]\")\r\nResponse_Pos.AddResponses(\"yes\",\"positive\",\"pos\",\"yar\",\"ya\",\"y\")\r\nResponse_Neg = Response(\"[negative]\")\r\nResponse_Neg.AddResponses(\"no\",\"negative\",\"neg\",\"nein\",\"n\",\"na\")\r\nResponse_List.AddResponseType(Response_Pos)\r\nResponse_List.AddResponseType(Response_Neg)\r\nAnimalElephant = Animal(\"elephant\")\r\nAnimalElephant.AddQuestion(\"does it have a trunk\",Response_Pos)\r\nAnimalElephant.AddQuestion(\"does it purr\",Response_Neg)\r\nAnimalCat = Animal(\"cat\")\r\nAnimalCat.AddQuestion(\"does it have a trunk\",Response_Neg)\r\nAnimalCat.AddQuestion(\"does it purr\",Response_Pos)\r\nAnimalDog = Animal(\"dog\")\r\nAnimalDog.AddQuestion(\"does it bark\",Response_Pos)\r\nAnimalDog.AddQuestion(\"does it have a trunk\",Response_Neg)\r\nAnimalDog.AddQuestion(\"does it purr\",Response_Neg)\r\nAnimal_List.AddAnimal(AnimalElephant)\r\nAnimal_List.AddAnimal(AnimalCat)\r\nAnimal_List.AddAnimal(AnimalDog)\r\n\r\nLastAnimal = None\r\nRandomAnimal = Animal_List.GetRandomAnimal()\r\nwhile True:\r\n while RandomAnimal == LastAnimal:\r\n RandomAnimal = Animal_List.GetRandomAnimal()\r\n print(\"Think of an animal.\")\r\n print(\"Is it %s?\" % RandomAnimal.GetName())\r\n Main_Answer = input(\"> \")\r\n if Response_List.IsCorrectResponse(\"[positive]\",Main_Answer.lower()):\r\n print(\"Well...\\nGood for you\")\r\n elif Response_List.IsCorrectResponse(\"[negative]\",Main_Answer.lower()):\r\n print(\"What is it?\")\r\n AnimalResp = input(\"> \")\r\n if AnimalResp.lower() == RandomAnimal.GetName():\r\n print(\"Do you not understand the concept of \\\"is it\\\"\")\r\n NewAnimal = Animal_List.GetAnimalByName(AnimalResp.lower())\r\n if NewAnimal != None:\r\n print(\"Oh really? Well then...\")\r\n QL = NewAnimal.GetQuestionList()\r\n for q in QL:\r\n A = input(q.Q+\"?\")\r\n if q.R.IsCorrect(A):\r\n print(\"Well done...\")\r\n else:\r\n print(\"You're an idiot\")\r\n break\r\n else:\r\n print(\"Well\")\r\n time.sleep(1)\r\n LastAnimal = RandomAnimal\r\n","sub_path":"unsorted_guff/I CAN WORKING.py","file_name":"I CAN WORKING.py","file_ext":"py","file_size_in_byte":4554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"64554504","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nDefinition of class OdeSolverTester to test the functionality of OdeSolver class\n@author: kashifrabbani@cs.aau.dk\n@author: imranh@cs.aau.dk\n\"\"\"\n\nimport sys\nsys.path.append('../')\nimport os\nimport time\nfrom core import *\n\nsys.path.append('../')\ndirectory = \"../result/case2/\"\n\n# Create a directory for this case in case it does not exist\nif not os.path.exists(directory):\n os.makedirs(directory)\n\n# Initializing objects for file handler and plotter classes\nfile_handler = ode_filehander.OdeFileHandler(directory)\nplotter = ode_plotter.OdePlotter(\"Case 2\")\n\nprint(\"Initiated Case # 2 ...\\n\")\n\nsol = ode_solver.OdeSolver(10, 8 / 3, 16)\nn = 10000\nt = 0.02\nprint(\"N: \" + str(n) + \", Time Interval: \" + str(t) + \", and x , y, z = 1\")\nsol.set_num_of_steps(n)\nsol.set_time_interval(t)\n\nprint(\"Simulating...\")\nst = time.time()\nresult = sol.simulator(1, 1, 1)\net = time.time()\nprint(\"Time elapsed: %0.3f ms\" % ((et - st) * 1000.0))\n\nprint(\"Writing...\")\nst = time.time()\nfile_handler.ode_solver_writer(sol.to_string(), \"ode_solver.txt\")\nfile_handler.ode_data_writer(result, \"ode_result.txt\")\net = time.time()\nprint(\"Time elapsed: %0.3f ms\" % ((et - st) * 1000.0))\n\nprint(\"Plotting 3D...\")\nst = time.time()\nplot = plotter.three_d_plot(result)\nplot_color = plotter.three_d_plot_color(result)\nfile_handler.ode_plot_figure_saver(plot, \"plot_3d.png\")\nfile_handler.ode_plot_figure_saver(plot_color, \"plot_3d_color.png\")\net = time.time()\nprint(\"Time elapsed: %0.3f ms\" % ((et - st) * 1000.0))\n\nprint(\"Plotting 2D - xy...\")\nst = time.time()\nplot = plotter.two_d_plot(result, \"xy\")\nplot_color = plotter.two_d_plot_color(result, \"xy\")\nfile_handler.ode_plot_figure_saver(plot, \"plot_2d_xy.png\")\nfile_handler.ode_plot_figure_saver(plot_color, \"plot_2d_xy_color.png\")\net = time.time()\nprint(\"Time elapsed: %0.3f ms\" % ((et - st) * 1000.0))\n\nprint(\"Plotting 2D - xz...\")\nst = time.time()\nplot = plotter.two_d_plot(result, \"xz\")\nplot_color = plotter.two_d_plot_color(result, \"xz\")\nfile_handler.ode_plot_figure_saver(plot, \"plot_2d_xz.png\")\nfile_handler.ode_plot_figure_saver(plot_color, \"plot_2d_xz_color.png\")\net = time.time()\nprint(\"Time elapsed: %0.3f ms\" % ((et - st) * 1000.0))\n\nprint(\"Plotting 2D - yz...\")\nst = time.time()\nplot = plotter.two_d_plot(result, \"yz\")\nplot_color = plotter.two_d_plot_color(result, \"yz\")\nfile_handler.ode_plot_figure_saver(plot, \"plot_2d_yz.png\")\nfile_handler.ode_plot_figure_saver(plot_color, \"plot_2d_yz_color.png\")\net = time.time()\nprint(\"Time elapsed: %0.3f ms\" % ((et - st) * 1000.0))\n","sub_path":"test_cases/case2.py","file_name":"case2.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"15631460","text":"import sqlite3\nfrom datetime import datetime, timedelta\nimport numpy\nimport collections\n\ndef run():\n span = 5\n conn = sqlite3.connect('data/db/vanguard.db')\n c = conn.cursor()\n\n symbols = ['SPY', 'VCR', 'VDC', 'VDE', 'VFH', 'VHT', 'VIS', 'VGT', 'VAW', 'VNQ', 'VOX', 'VPU']\n sql_cmd = 'CREATE TABLE weekly_ratio (record_at datetime'\n for symbol in symbols:\n sql_cmd += ', {} real'.format(symbol)\n sql_cmd += ')'\n c.execute(sql_cmd)\n\n base_prices = numpy.array([0.0] * len(symbols))\n count = 0\n weekly_prices = collections.OrderedDict()\n guard_date = datetime.strptime('2005-01-07', '%Y-%m-%d')\n for row in c.execute(\"select * from prices where record_at > '2004-12-31' order by record_at\"):\n row_date = datetime.strptime(row[0], '%Y-%m-%d')\n base_prices += numpy.array(row[1:])\n count += 1\n if row_date > guard_date:\n weekly_prices[guard_date] = base_prices / count\n guard_date += timedelta(days=7)\n base_prices = numpy.array([0.0] * len(symbols))\n count = 0\n\n base_date, base_prices = weekly_prices.popitem(last=False)\n ratio = numpy.array([0.0] * len(symbols))\n\n for key, value in weekly_prices.items():\n ratio = value / base_prices\n sql_cmd = \"INSERT INTO weekly_ratio VALUES ('{}'\".format(key)\n for item in ratio:\n sql_cmd += \", '{}'\".format(item)\n sql_cmd += ')'\n print(sql_cmd)\n c.execute(sql_cmd)\n base_prices = value\n\n conn.commit()\n conn.close()\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"tables/weekly_ratio.py","file_name":"weekly_ratio.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"551629397","text":"import midi\nfrom utils import map_note, get_note_events\nfrom constants import *\n\nclass YaaAAASSSSchema:\n \"\"\" This schema decode is intended to be a generalized encoder/decoder from bytearrays to MIDI. While currently functional it can be \n extended in the future with additional constructors and encoders to handle arbitrary schemas. \"\"\"\n initial_note = None\n mode = None\n sequence = None\n content_notes = []\n\n def __init__(self, initial_note, mode, sequence, content_notes):\n self.initial_note = initial_note\n self.mode = mode\n self.sequence = sequence\n self.content_notes = content_notes\n\n def get_bytearray(self, scale):\n \"\"\" Returns a byte string from the schema class, ordered according to the conventions defined in constants.py \"\"\"\n li = []\n\n header = (self.initial_note << 5) | (self.mode << 2) | self.sequence\n li.append(header)\n\n for byte_index in xrange((len(self.content_notes) // 2) + (len(self.content_notes) % 2)):\n byte = (self.content_notes[2*byte_index].modal_position(scale) << 5) | (self.content_notes[2*byte_index].is_half_note << 4)\n if len(self.content_notes) > (2*byte_index + 1):\n byte |= ((self.content_notes[2*byte_index + 1].modal_position(scale) << 1) | (self.content_notes[2*byte_index + 1].is_half_note))\n\n li.append(byte)\n\n return bytearray(li)\n\n def get_midi_pattern(self):\n \"\"\" Returns a MIDI pattern according to this object's schema. See constants.py for sequences. \"\"\"\n intro_events = []\n content_events = []\n terminal_events = []\n\n # insert intro pad sequence from seq_index\n intro_events.append(get_note_events(Note(map_note(self.initial_note, self.mode, INTRO_SEQUENCE[self.sequence][0]), False), 0))\n intro_events.append(get_note_events(Note(map_note(self.initial_note, self.mode, INTRO_SEQUENCE[self.sequence][1]), True), 0))\n intro_events.append(get_note_events(Note(map_note(self.initial_note, self.mode, INTRO_SEQUENCE[self.sequence][2]), True), 0))\n intro_events.append(get_note_events(Note(map_note(self.initial_note, self.mode, INTRO_SEQUENCE[self.sequence][3]), False), 0))\n\n # populate content\n for note in self.content_notes:\n content_events.append(get_note_events(note, 0))\n\n # insert terminal pad sequence from seq_index\n terminal_events.append(get_note_events(Note(map_note(self.initial_note, self.mode, TERMINAL_SEQUENCE[self.sequence][0]), False), 0))\n terminal_events.append(get_note_events(Note(map_note(self.initial_note, self.mode, TERMINAL_SEQUENCE[self.sequence][1]), True), 0))\n terminal_events.append(get_note_events(Note(map_note(self.initial_note, self.mode, TERMINAL_SEQUENCE[self.sequence][2]), True), 0))\n terminal_events.append(get_note_events(Note(map_note(self.initial_note, self.mode, TERMINAL_SEQUENCE[self.sequence][3]), False), 0))\n\n # create midi file\n pattern = midi.Pattern()\n track = midi.Track()\n pattern.append(track)\n\n note_events = intro_events + content_events + terminal_events\n\n # append everything in note_events\n for on, off in note_events:\n track.append(on)\n track.append(off)\n\n track.append(midi.EndOfTrackEvent(tick=0))\n return pattern\n\nclass Note:\n \"\"\" This class describes a musical note. It comprises an absolute pitch (in semi-tones) and a length data. \"\"\"\n pitch = None\n is_half_note = None\n\n def __init__(self, pitch, is_half_note):\n self.pitch = pitch\n self.is_half_note = is_half_note\n\n def get_displacement(self, initial_note):\n \"\"\" Returns the relative displacement of the note according to a key \"\"\"\n return self.pitch - initial_note.pitch\n\n def modal_position(self, scale):\n \"\"\" Returns the position in a modal sequence (scale) of the note. \"\"\"\n mode_position = False\n for index, note in enumerate(scale):\n if (self.pitch % 12) == (note.pitch % 12):\n mode_position = index + ((self.pitch - note.pitch) // 12)*7\n return mode_position\n","sub_path":"src/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"48226863","text":"import os\nimport sys\nimport vlc\nimport time\nimport urllib.parse\n\n#########################################\n# Speaker() text to speech function with beep\n# Requires a speaker setup on RPI\n# uses READSPEAK service\n# new function 'beep' for user feedback\nclass Speaker:\n def __init__(self):\n self.AUDIO_ON = True\n self.ON = True\n self.READSPEAK_URL = 'https://tts.readspeaker.com/a/speak?'\n self.READSPEAK_TOKEN = READSPEAKER_API_KEY\n vlcInstance = vlc.Instance()\n self.player = vlcInstance.media_player_new()\n if not os.path.isfile('./barcode_beep.mp3'):\n print(\"MISSING BEEP FILE: ./barcode_beep.mp3\")\n\n def is_on(self):\n return self.ON\n\n def turn_on(self):\n self.ON = True\n\n def turn_off(self):\n self.ON = False\n\n def say(self, mtext):\n # Call the API and put results into a new Item object\n if not self.AUDIO_ON:\n return\n try:\n params = {'key': self.READSPEAK_TOKEN,\n 'lang': 'en_us',\n 'voice': 'Sophie',\n 'text': mtext}\n self.player.set_mrl(self.READSPEAK_URL + urllib.parse.urlencode(params))\n self.player.play()\n except OSError as e:\n print(\"ERROR: text_to_voice(): Speaker.say(): \", e)\n except:\n print(\"ERROR: text_to_voice(): Unknown Error\", sys.exc_info()[0])\n return\n\n def beep(self, times=1):\n self.player.set_mrl('./barcode_beep.mp3')\n for i in range(0, times):\n self.player.play()\n time.sleep(.2)\n\n\n\n\n##################################\n# (old) Speaker text-to-speech using Google pico service\n\nimport subprocess\n\nclass Speaker_pico:\n def __init__(self):\n if not os.path.exists('/usr/bin/aplay'):\n print(\"WARNING: aplay not installed - no voice audio output\")\n self.ON = False\n elif not os.path.exists('/usr/bin/pico2wave'):\n print(\"WARNING: pico2wave not installed - no voice audio output\")\n self.ON = False\n else:\n self.ON = True\n vlcInstance = vlc.Instance()\n self.player = vlcInstance.media_player_new()\n if not os.path.isfile('./barcode_beep.mp3'):\n print(\"MISSING BEEP FILE: ./barcode_beep.mp3\")\n\n\n # status, result = subprocess.getstatusoutput(\"pico2wave\")\n # status, result = subprocess.getstatusoutput(\"aplay\")\n\n def is_on(self):\n return self.ON\n\n def turn_on(self):\n self.ON = True\n\n def turn_off(self):\n self.ON = False\n\n def say(self, input_text):\n if not self.ON:\n return\n try:\n # subprocess.Popen([\"pico2wave\", \"-w\", \"fridge.wav\", text_in,\"&&\", \"aplay\", \"fridge.wav\"])\n print(\"Running pico2wave.\")\n subprocess.run([\"pico2wave\", \"-w\", \"fridge.wav\", input_text])\n except OSError as e:\n print(\"ERROR: text_to_voice(): Speaker.say(): \", e)\n try:\n subprocess.Popen([\"aplay\", \"fridge.wav\"])\n except OSError as e:\n print(\"ERROR:text_to_voice(): Install aplay\", e)\n\n def beep(self, times=1):\n self.player.set_mrl('./barcode_beep.mp3')\n for i in range(0, times):\n self.player.play()\n time.sleep(.5)\n\n","sub_path":"text_to_voice.py","file_name":"text_to_voice.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"111509610","text":"# Copyright (c) 2016 Niklas Rosenstein\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\"\"\"\nProvides a parser for checksums from a string. Checksums must be formatted\nas `algo:checksum`` where ``algo`` is the name of a hashing algorithm supported\nby the :mod:`hashlib` module.\n\"\"\"\n\nimport hashlib\nfrom nr.utils.types.recordclass import recordclass\n\nclass Checksum(recordclass):\n\n __slots__ = 'algo digest'.split()\n\n def __str__(self):\n return '{0}:{1}'.format(self.algo, self.digest.decode('ascii'))\n\n def _set_algo(self, value):\n object.__setattr__(self, \"algo\", value.lower())\n\n @staticmethod\n def parse(value):\n \"\"\"\n Parses a ``algo:Checksum`` formatted string. The ``algo`` part must\n be a hashing algorithm supported by the :mod:`hashlib` module.\n\n :param value: An ascii string or bytes object.\n :return: :class:`Checksum`\n :raise ValueError:\n\n - If the value is not in ``algo:checksum`` format, or\n - hashing algorithm is unknown, or\n - the checksum value has an invalid length\n \"\"\"\n\n if isinstance(value, str):\n value = value.encode('ascii')\n parts = value.split(b':')\n if len(parts) != 2:\n raise ValueError(\"no algorithm specified: {0!r}\".format(value))\n\n algo, binsum = parts[0].decode('ascii').lower(), parts[1]\n hasher = hashlib.new(algo)\n required_length = hasher.digest_size * 2\n if len(binsum) != required_length:\n raise ValueError(\"invalid checksum length for algorithm {0!r},\"\n \"expected {1} got {2} bytes\".format(algo, len(binsum), required_length))\n\n return Checksum(algo, binsum)\n","sub_path":"tpml/checksum.py","file_name":"checksum.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"485165146","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/snafulib/connectors/cron.py\n# Compiled at: 2018-07-01 01:56:00\n# Size of source mod 2**32: 754 bytes\nimport threading, time, os, configparser\ngcb = None\n\ndef initinternal(function, configpath):\n global gcb\n connectconfig = None\n if not configpath:\n configpath = 'snafu.ini'\n if not function:\n function = 'snafu'\n if os.path.isfile(configpath):\n config = configparser.ConfigParser()\n config.read(configpath)\n if function in config and 'connector.cron' in config[function]:\n connectconfig = int(config[function]['connector.cron'])\n if connectconfig:\n while True:\n time.sleep(connectconfig)\n response = gcb(function, event='{}')\n\n\ndef init(cb, function=None, configpath=None):\n global gcb\n gcb = cb\n t = threading.Thread(target=initinternal, daemon=True, args=(function, configpath))\n t.start()","sub_path":"pycfiles/snafu-0.0.3-py3.5/cron.cpython-35.py","file_name":"cron.cpython-35.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"283934462","text":"#!/usr/bin/python\n#\n# Resize a gro file by isotropically resizing its box vectors and translating the center of mass of each molecule\n# The input can either be a new total volume or a new box vector\n# NOTE: THE VECTOR MUST BE ENCASED IN QUOTATION MARKS WHEN ENTERED \n#\n# Copyright Michael R. Shirts, University of Virginia, 2014\n#\n\nimport numpy as np\nfrom optparse import OptionParser\nimport sys\n\n#=============================================================================================\n# RUN THE ALGORITHM TO CREATE THE NEW BOX VECTOR\n#=============================================================================================\ndef changeBoxVector(fname=\"pre_EQ.gro\", molecule='benzene', nam=12, volume=0.0, boxvect='0 0 0 0 0 0 0 0 0',\n outfile='None'):\n # Ensure that all inputs are correct\n checkInputs(fname, molecule, volume, boxvect)\n\n if outfile == 'None':\n outfile = fname\n\n # Generate arary of old atom locations\n old_xyz_position = generateAtomLocations(fname,nam)\n\n infile = open(fname, 'r')\n lines = filter(None, (line.rstrip() for line in infile))\n infile.close()\n\n # Grab the old crystal basis and generate the new crystal basis\n old_Basis = vectorToBasis(lines[len(lines) - 1].split())\n if volume > 0.0:\n initial_volume = float(np.linalg.det(old_Basis))\n scaling_param = (final_volume / initial_volume) ** (1.0 / 3.0)\n new_Basis = scaling_param * old_Basis\n else:\n new_Basis = vectorToBasis(boxvect.split())\n newboxvect = basisToVector(new_Basis)\n\n # Create the transformation operator between basis sets\n transformation_Basis = np.transpose(np.dot(new_Basis, np.linalg.inv(old_Basis)))\n\n # Generate the new atom locations\n new_xyz_position = generateNewAtomLocations(old_xyz_position, transformation_Basis, nam)\n\n # Output the gro file\n newboxstring = [str(j) for j in newboxvect]\n outputGroFile(outfile, new_xyz_position, newboxstring, fname)\n \n\n#=============================================================================================\n# ENSURE THAT ALL INPUTS ARE CORRECT\n#=============================================================================================\ndef checkInputs(fname, molecule, volume, boxvect):\n moleculeList = ['benzene', 'acetac', 'formam', 'imazol', 'glycin', 'hxacan']\n if volume <= 0.0 and boxvect == '':\n print(\"Invalid final volume: \" + str(volume))\n sys.exit()\n if molecule not in moleculeList:\n print(\"Invalid molecule: \" + molecule)\n print(\"Please enter a valid molecule name\")\n sys.exit()\n if fname == \"\":\n print(\"Please enter a gro file name\")\n sys.exit()\n\n\n#=============================================================================================\n# DETERMINE THE NUMBER OF ATOMS PER MOLECULE\n#=============================================================================================\ndef grabNAM(molecule):\n if molecule == \"benzene\":\n return 12\n elif molecule == \"acetac\":\n return 9\n elif molecule == \"formam\":\n return 6\n elif molecule == \"imazol\":\n return 9\n else:\n return -1\n\n#=============================================================================================\n# GENERATE ARRAY OF OLD ATOM LOCATIONS\n#=============================================================================================\ndef generateAtomLocations(fname, nam):\n # Read in input files\n infile = open(fname, 'r')\n lines = filter(None, (line.rstrip() for line in infile))\n infile.close()\n print(\"loading \" + fname)\n \n # Read in the total number of atoms in the system (should be the only item on the second line of the .gro file)\n if len(lines[1].split()) == 1:\n # na is the number of total atoms\n na = int(lines[1].split()[0])\n # nm is the number of total molecules\n nm = na / (nam)\n else:\n sys.exit('Unexpected .gro file format')\n \n # Read in atom coordinate data (starts on 3rd line of the .gro file)\n # acoordA and acoordB are the atom coordinates for polymorphs A and B\n line = 2\n mol = 0\n old_xyz_position = np.zeros((nm, 3, nam)) # Original xyz positions of all the atoms\n \n while mol < nm:\n acounter = 0\n while acounter < nam:\n old_xyz_position[mol, 0, acounter] = float(lines[line].split()[3])\n old_xyz_position[mol, 1, acounter] = float(lines[line].split()[4])\n old_xyz_position[mol, 2, acounter] = float(lines[line].split()[5])\n line += 1\n acounter += 1\n mol += 1\n return old_xyz_position\n\n#=============================================================================================\n# CONVERT A BOX VECTOR INTO A BASIS MATRIX\n#=============================================================================================\ndef vectorToBasis(vector):\n basis = np.zeros([3, 3], float) # Matrix to transform internal crystal coordinates into xyz coordinates\n for i,num in enumerate(vector):\n if i == 0:\n basis[0, 0] = float(num)\n elif i == 1:\n basis[1, 1] = float(num)\n elif i == 2:\n basis[2, 2] = float(num)\n elif i == 3:\n basis[0, 1] = float(num)\n elif i == 4:\n basis[0, 2] = float(num)\n elif i == 5:\n basis[1, 0] = float(num)\n elif i == 6:\n basis[1, 2] = float(num)\n elif i == 7:\n basis[2, 0] = float(num)\n elif i == 8:\n basis[2, 1] = float(num)\n return basis\n\n#=============================================================================================\n# CONVERT A BASIS MATRIX INTO A BOX VECTOR\n#=============================================================================================\ndef basisToVector(basis):\n vector = np.array([basis[0, 0], basis[1, 1], basis[2, 2], basis[0, 1], basis[0, 2], basis[1, 0], basis[1, 2],\n basis[2, 0], basis[2, 1]])\n if all(j == 0 for j in vector[3:9]):\n vector = np.array(vector[0:3])\n return vector\n\n#=============================================================================================\n# GENERATE THE NEW ATOM LOCATIONS\n#=============================================================================================\ndef generateNewAtomLocations(old_xyz_position, transformation_basis, nam):\n # Calculate the centroid of each molecule\n w = np.ones((nam, 1)) * (1.0 / nam)\n nm = len(old_xyz_position[:, 0, 0])\n centroid_xyz = np.zeros((nm, 3, 1))\n for mol in range(nm):\n centroid_xyz[mol, :, :] = np.dot(old_xyz_position[mol,: , :], w)\n\n mol = 0\n new_xyz_position = old_xyz_position.copy()\n while mol < nm:\n new_xyz_position[mol, :, :] = (old_xyz_position[mol, :, :] - centroid_xyz[mol, :, :]) + \\\n np.dot(transformation_basis, centroid_xyz[mol, :, :])\n mol += 1\n\n return new_xyz_position\n\n\n#=============================================================================================\n# OUTPUT THE NEW GRO FILE\n#=============================================================================================\ndef outputGroFile(outname, new_xyz_position, newboxvect, fname):\n # Read in the original file\n infile = open(fname, 'r')\n lines = filter(None, (line.rstrip() for line in infile))\n infile.close() \n\n # Write output .gro file\n # tacount is the total atom count\n outfile = open(outname, 'w')\n outfile.write(lines[0])\n outfile.write('\\n')\n outfile.write(lines[1])\n outfile.write('\\n')\n tacount = 0\n for mol in range(len(new_xyz_position[:, 0, 0])):\n for atom in range(len(new_xyz_position[0, 0, :])):\n # mcount is the current molecule tracker\n mcount = mol + 1\n tacount += 1\n x = round(new_xyz_position[mol, 0, atom], 8)\n y = round(new_xyz_position[mol, 1, atom], 8)\n z = round(new_xyz_position[mol, 2, atom], 8)\n if x < 0.0:\n xstr = ' ' + \"%.8f\" % x\n elif x == 0.0:\n xstr = ' 0.00000000'\n else:\n xstr = ' ' + \"%.8f\" % x\n\n if y < 0.0:\n ystr = ' ' + \"%.8f\" % y\n elif y == 0.0:\n ystr = ' 0.00000000'\n else:\n ystr = ' ' + \"%.8f\" % y\n\n if z < 0.0:\n zstr = ' ' + \"%.8f\" % z\n elif z == 0.0:\n zstr = ' 0.00000000'\n else:\n zstr = ' ' + \"%.8f\" % z\n\n line = str(mcount).rjust(5) + mol_name + lines[tacount + 1].split()[1].rjust(7) + str(tacount).rjust(5) + \\\n xstr + ystr + zstr + '\\n'\n outfile.write(line)\n outfile.write(\" \" + ' '.join(newboxvect))\n outfile.write('\\n') # make sure the file ends on a newline\n outfile.close()\n\ndef run_resize_gro_individual(molecule, fname, final_volume, boxvect='0 0 0 0 0 0 0 0 0'):\n #=============================================================================================\n # DETERMINE THE MOLECULE FROM THE INPUT FILES\n #=============================================================================================\n if molecule == 'benzene':\n nam = 12\n elif molecule == 'glycin':\n nam = 10\n elif molecule == 'acetac':\n nam = 8\n elif molecule == 'hxacan':\n nam = 20\n elif molecule == 'formam':\n nam = 6\n elif molecule == 'imazol':\n nam = 9\n elif molecule == 'cafine':\n nam = 24\n elif molecule == 'zzzvye':\n nam = 15\n elif molecule == 'dohfem':\n nam = 14\n elif molecule == 'bismev':\n nam = 20\n elif molecule == 'cbmzpn':\n nam = 30\n elif molecule == 'pyrzin':\n nam = 14\n elif molecule == 'kobfud':\n nam = 15\n else:\n print(\"Unrecognized molecule: $molecule\")\n sys.exit()\n\n changeBoxVector(fname=fname, molecule=molecule, nam=nam, volume=final_volume, boxvect=boxvect)\n\nif __name__ == '__main__':\n #=============================================================================================\n # INPUT VARIABLES\n #=============================================================================================\n #Gro file to be resized\n # polymorph B'~ polymorph B\n # mol_name is the name of the molecule in the .gro files\n mol_name = 'BNZ'\n # nam is the number of atoms per molecule\n nam = int(12)\n # nmu is the number of molecules per unit cell\n nmu = 4\n\n #=============================================================================================\n # OUTPUT VARIABLES\n #=============================================================================================\n # This script produces a gro file with the resized dimensions and molecules\n parser = OptionParser()\n parser.add_option('-V', '--volume', dest='volume', help='Final Volume',\n default='-1.0') # Final volume in cubic angstroms\n parser.add_option('-f', '--gro', dest='grofile', help='Gromacs File') #.gro file to be resized\n parser.add_option('-M', '--molecule', dest='molname', help='Name of the molecule in the gro file',\n default='benzene')\n# parser.add_option('-n', '--atoms', dest='numatoms', help='number of atoms in each molecule', default='12')\n# parser.add_option('-u', '--unit', dest='nummol', help='number of molecules in unit cell',default='4')\n parser.add_option('-v', '--vector', dest='vector', help='New box vector', default='0 0 0 0 0 0 0 0 0')\n (options, args) = parser.parse_args()\n final_volume = float(options.volume)\n molecule = options.molname\n# num_atoms = int(options.numatoms)\n# num_mol = int(options.nummol)\n boxvect = options.vector\n fname = options.grofile\n run_resize_gro_individual(molecule, fname, final_volume, boxvect)\n\n\n\n","sub_path":"PSCP/setup-scripts/resize_gro_individual.py","file_name":"resize_gro_individual.py","file_ext":"py","file_size_in_byte":11984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"413807669","text":"from pyalgotrade import strategy\nfrom pyalgotrade.oanda import barfeed\n\n\nclass MyStrategy(strategy.BacktestingStrategy):\n def __init__(self, feed, instrument):\n strategy.BacktestingStrategy.__init__(self, feed)\n self.__instrument = instrument\n\n def onBars(self, bars):\n bar = bars[self.__instrument]\n self.info(bar.getClose())\n self.info(bar.getVolume())\n self.info(bar.getOpen())\n self.info(bar.getHigh())\n self.info(bar.getLow())\n\n# Load the yahoo feed from the CSV file\nfeed = barfeed.Feed()\nfeed.addBarsFromCSV('GBP_USD', 'data/oanda.csv')\n\n# Evaluate the strategy with the feed's bars.\nmyStrategy = MyStrategy(feed, \"GBP_USD\")\nmyStrategy.run()\n","sub_path":"samples/oanda_tutorial.py","file_name":"oanda_tutorial.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"583596055","text":"#! /usr/bin/python\n\nfrom os import popen\n \ndef wcl(INF): # INPUT FILENAME MUST BE STRING\n\n wclCom= 'wc -l ' + str(INF)\n \n wclFile= popen(wclCom)\n \n l= wclFile.readline()\n \n NumLines= l.split()[0]\n\n return int(NumLines)","sub_path":"Modules/Superannuated/LinesInFile.py","file_name":"LinesInFile.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"17436192","text":"from autogoal.grammar import ContinuousValue, DiscreteValue, CategoricalValue\nfrom autogoal.kb import Seq, AlgorithmBase, VectorDiscrete\nfrom cropsPlanningV2 import AlgorithmStructure, GameStructure\n\n\nclass EpsilonGreedy(AlgorithmBase):\n\n def __init__(\n self,\n epsilon: ContinuousValue(0.0, 1),\n ):\n self._epsilon = epsilon\n\n def run(self, algorithm: AlgorithmStructure) -> Seq[VectorDiscrete]:\n algorithm.planning(self._epsilon)\n\n return algorithm.maxRoad()\n\n\n","sub_path":"autogoal/experimental/cropsplanning/epsilonGreedy.py","file_name":"epsilonGreedy.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"201841026","text":"from inspect import signature\nfrom functools import wraps\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport gpflow as gpf\n\nfrom .utils import df_transform\n\nLikelihood = gpf.likelihoods.Likelihood\npositive = gpf.utilities.positive\n\ndef Fmu_Fvar_to_list(function):\n @wraps(function)\n def wrapper(self, Fmu, Fvar, *args, **kwargs):\n Fmu, Fvar = [\n tf.unstack(tf.expand_dims(F, axis=-2), axis=-1)\n for F in [Fmu, Fvar]\n ]\n result = function(self, Fmu, Fvar, *args, **kwargs)\n if type(result) in (list, tuple):\n return [tf.expand_dims(r, axis=-1) for r in result]\n else:\n return tf.expand_dims(result, axis=-1)\n return wrapper\n\nclass MultiLatent(Likelihood):\n\n def __init__(self, num_latent,**kwargs):\n super().__init__(**kwargs)\n self.num_latent = num_latent\n\n @Fmu_Fvar_to_list\n def predict_mean_and_var(self, Fmu, Fvar):\n Ymu, Yvar = super().predict_mean_and_var(Fmu, Fvar)\n return Ymu, Yvar\n \n @Fmu_Fvar_to_list\n def predict_density(self, Fmu, Fvar, Y):\n return super().predict_density(Fmu, Fvar, Y)\n \n @Fmu_Fvar_to_list\n def variational_expectations(self, Fmu, Fvar, Y):\n return super().variational_expectations(Fmu, Fvar, Y)\n\nclass ConditionalDistribution(Likelihood):\n \n def __init__(self, conditional_distribution, **kwargs):\n super().__init__(**kwargs)\n self.conditional_distribution = conditional_distribution\n\n def log_prob(self, *F, Y):\n return self.conditional_distribution(*F).log_prob(Y)\n\n def conditional_mean(self, *F):\n return self.conditional_distribution(*F).mean()\n\n def conditional_variance(self, *F):\n return self.conditional_distribution(*F).variance()\n\nclass HeterocedasticGaussian(MultiLatent, ConditionalDistribution):\n\n def __init__(self, transform=positive()):\n self.transform = transform\n def conditional_distribution(f, g):\n return tfp.distributions.Normal(f, transform(g))\n num_latent = 2\n super().__init__(\n conditional_distribution=conditional_distribution, \n num_latent=num_latent\n )\n\nclass HeterocedasticStudentT(MultiLatent, ConditionalDistribution):\n \n def __init__(\n self, \n df=6.0, \n scale_transform=positive(), \n df_transform=df_transform()\n ):\n if df is not None:\n self.scale_transform = scale_transform\n self.df = gpf.Parameter(df, transform=df_transform)\n def conditional_distribution(f, g):\n return tfp.distributions.StudentT(\n loc=f,\n scale=self.scale_transform.forward(g),\n df=self.df\n )\n num_latent = 2\n else:\n self.scale_transform = scale_transform\n self.df_transform = df_transform\n def conditional_distribution(f, g, h):\n return tfp.distributions.StudentT(\n loc=f,\n scale=self.scale_transform.forward(g),\n df=self.df_transform.forward(h)\n )\n num_latent = 3\n\n super().__init__(\n conditional_distribution=conditional_distribution, \n num_latent=num_latent\n )\n","sub_path":"gpflow_custom/likelihoods.py","file_name":"likelihoods.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"574235865","text":"import math\n\n\nl, u, r, d = (0, -1), (-1, 0), (0, 1), (1, 0)\npath_dict = {\n \"r\": r, \"l\": l, \"u\": u, \"d\": d\n}\n\n\ndef find_character_index(room, char):\n ch_list = []\n for ix, row in enumerate(room):\n for iy, i in enumerate(row):\n if char != \"E\":\n if i == char:\n ch_list.append((ix, iy))\n else:\n if i == char or i == \"O\":\n ch_list.append((ix, iy))\n return ch_list\n\n\ndef revert_back(x, y, last_position, path_dict):\n path = path_dict[last_position]\n px, py = path[0], path[1]\n x, y = x - px, y - py\n return x, y\n\n\n# write a function to calculate 2D distance between 2 co-ordinates\n# sqrt((x2-x1)**2 + (y2-y1)**2)\ndef find_distance(brynjolf, exit):\n distance = math.sqrt(sum([math.pow(a - b, 2) for a, b in zip(brynjolf, exit)]))\n return distance\n\n\ndef short_distance(brynjolf, exit):\n all_possibilities = []\n for direction in path_dict.keys():\n result = (direction, find_distance(tuple(sum(x) for x in zip(brynjolf, path_dict[direction])), exit))\n all_possibilities.append(result)\n which_direction = min(all_possibilities, key=lambda x: x[1])\n return which_direction\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"614996852","text":"from random import choice\n\nitems = []\n\ndef show_items(): #1. Vi tri, 2. ten item\n # 1. Rau muống luộc cao cấp\n # 2. Bánh bao chiên bình dân\n pass\n\n\ndef item_after_combat():\n new_item = generate_item()\n print(\"Một\", new_item[\"NAME\"], \"vừa rơi ra\")\n while True:\n print(\"1. Xem\")\n print(\"2. Nhặt\")\n print(\"3. Bỏ qua\")\n option = input(\">>>\")\n if option == \"1\":\n show_item(new_item)\n elif option == \"2\":\n add_item(new_item)\n print(\"Bạn đã nhặt\", new_item[\"NAME\"], \"vào hòm đồ\")\n count_items()\n break\n elif option == \"3\":\n print(\"Bạn đã bỏ qua món đồ\")\n break\n\ndef add_item(item):\n items.append(item)\n\n\ndef count_items():\n count = len(items) # len ~ length\n print(\"Bạn có\", count, \"đồ trong hòm\")\n\n\nfood_types = [\n \"Bánh bao\",\n \"Cơm\",\n \"Mỳ tôm\",\n \"Trứng\",\n \"Rau muống\",\n]\n\ncook_types = [\n \"hấp\",\n \"chiên\",\n \"luộc\"\n]\n\nfood_levels = [\n \"bình dân\",\n \"cao cấp\",\n \"xa xỉ\"\n]\n\n\ndef generate_item_name():\n ft = choice(food_types)\n ct = choice(cook_types)\n fl = choice(food_levels)\n item_name = ft + \" \" + ct + \" \" + fl\n return item_name\n\n\ndef generate_item():\n name = generate_item_name()\n item = {\n \"NAME\": name,\n \"AGI\": 3,\n \"HP\": -1,\n \"DEF\": 2,\n \"STR\": 2,\n }\n return item\n\n\ndef show_item(game_item):\n print(\"* \" * 15)\n\n for key, value in game_item.items():\n print(\"*\", key, \":\", value)\n\n print(\"* \" * 15)\n\n#\n#\n# steel_gauntlet = {\n# \"NAME\": \"STEEL GAUNLET\",\n# \"HP\": 10,\n# \"AGI\": 5,\n# \"LUCK\": 1,\n# }\n#\n# bronze_shield = {\n# \"NAME\": \"BRONZE SHIElD\",\n# \"HP\": 5,\n# \"AGI\": 1,\n# }\n#\n# golden_stick = {\n# \"NAME\": \"GOLDEN STICK\",\n# \"AGI\": 15,\n# \"HP\": 20,\n# \"STR\": 100,\n# }\n#\n# inventory = [steel_gaunlet, bronze_shield, golden_stick]\n# for item in inventory:\n# show_item(item)","sub_path":"venv/the_inventer.py","file_name":"the_inventer.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"137141919","text":"#*******************************************************************************\n#\n# Copyright (c) 2017-2019 David Briant\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#*******************************************************************************\n\n\nimport numpy as np\n\ndef AsSteps(xs, ys, align='center', width=None):\n #xMin, xMax = min(xs), max(xs)\n if width is None:\n width = np.diff(xs).min()\n points = []\n lastx = np.nan\n lasty = np.nan\n for x, y in zip(xs, ys):\n if (x - lastx) > 1e-5:\n points.append((lastx, 0))\n points.append((x, 0))\n if not np.isnan(lasty):\n points.append((x, lasty))\n points.append((x, y))\n points.append((x + width, y))\n lastx = x + width\n lasty = y\n points.append((lastx, lasty))\n pxs, pys = zip(*points)\n if align == 'center':\n pxs = np.array(pxs) - width / 2.0\n elif align == 'right':\n pxs = np.array(pxs) - width\n return pxs, np.array(pys)\n","sub_path":"statbooks/mpl.py","file_name":"mpl.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"293167469","text":"######## Webcam Object Detection Using Tensorflow-trained Classifier #########\n#\n# Author: Evan Juras\n# Date: 1/20/18\n# Description: \n# This program uses a TensorFlow-trained classifier to perform object detection.\n# It loads the classifier uses it to perform object detection on a webcam feed.\n# It draws boxes and scores around the objects of interest in each frame from\n# the webcam.\n\n## Some of the code is copied from Google's example at\n## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb\n\n## and some is copied from Dat Tran's example at\n## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py\n\n## but I changed it to make it more understandable to me.\n\n\n# Import packages\nimport os\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nimport sys\nimport feeder_delimiter\n\n#Grabscreen stuff\nfrom grabscreen import grab_screen\nimport pyautogui\n\n#excel report stuff\nimport xlsxwriter as excel\n\npoints = False\n\n# This is needed since the notebook is stored in the object_detection folder.\nsys.path.append(\"..\")\n\n# Import utilites\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\n\n# Name of the directory containing the object detection module we're using\nMODEL_NAME = 'inference_graph'\n\n# Grab path to current working directory\nCWD_PATH = os.getcwd()\n\n# Path to frozen detection graph .pb file, which contains the model that is used\n# for object detection.\nPATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')\n\n# Path to label map file\nPATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')\n\n# Number of classes the object detector can identify\nNUM_CLASSES = 2\n\n#Grabscreen size\nCAP_WIDTH = 1280\nCAP_HEIGHT = 720\n\n#feeder color\nFEEDER_COLOR = (66, 244, 98)\n\n\n## Load the label map.\n# Label maps map indices to category names, so that when our convolution\n# network predicts `5`, we know that this corresponds to `king`.\n# Here we use internal utility functions, but anything that returns a\n# dictionary mapping integers to appropriate string labels would be fine\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\n# Load the Tensorflow model into memory.\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n sess = tf.Session(graph=detection_graph)\n\n\n# Define input and output tensors (i.e. data) for the object detection classifier\n\n# Input tensor is the image\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n\n# Output tensors are the detection boxes, scores, and classes\n# Each box represents a part of the image where a particular object was detected\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n\n# Each score represents level of confidence for each of the objects.\n# The score is shown on the result image, together with the class label.\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n\n# Number of objects detected\nnum_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n\ndef detect_eating_pig_(frame,boxes,scores,feeders,draw_bbox=True):\n eating_list = []\n for i, f in enumerate(feeders):\n eating_list.append(False)\n frame = draw_feeder_area(frame,feeders[i],FEEDER_COLOR,i+1)\n boxes_image = np.zeros(frame.shape, np.uint8)\n boxes_image = cv2.cvtColor(boxes_image, cv2.COLOR_BGR2GRAY)\n for j, b in enumerate(boxes[0]):\n if scores[0][j] >= 0.5:\n height = np.size(frame, 0)\n width = np.size(frame, 1)\n left, right, top, bottom = int(boxes[0][j][1]*width), int(boxes[0][j][3]*width), int(boxes[0][j][0]*height), int(boxes[0][j][2]*height)\n pig_rect = [(left, top), (left, bottom), (right, bottom),(right, top)]\n intersect_value = _calculate_intersection(frame,feeders[i],pig_rect)\n if(draw_bbox):\n cv2.rectangle(frame,(left, top),(right, bottom),(0,255,0),2)\n \n if(intersect_value>3000):\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(frame,\"Comendo!\",(left,top), font, 1,(0,255,0),2,cv2.LINE_AA)\n eating_list[i] = True\n #boxes_image = cv2.bitwise_or(boxes_image,box_image)\n \n return frame, eating_list\n\ndef _calculate_intersection(frame,feeder_poly,pig_rect):\n feeder_image = np.zeros(frame.shape, np.uint8)\n cv2.fillPoly(feeder_image, np.array([feeder_poly]), (255,255,255))\n feeder_image = cv2.cvtColor(feeder_image, cv2.COLOR_BGR2GRAY)\n \n box_image = np.zeros(frame.shape, np.uint8)\n cv2.fillPoly(box_image, np.array([pig_rect]), (255,255,255))\n box_image = cv2.cvtColor(box_image, cv2.COLOR_BGR2GRAY)\n \n and_image = cv2.bitwise_and(feeder_image,box_image)\n return cv2.countNonZero(and_image)\n \n \ndef draw_feeder_area(frame,feeder,color,n_feeder= None):\n ALPHA = 0.2\n canvasBlack = np.ones(frame.shape, np.uint8)\n # of a filled polygon\n cv2.fillPoly(canvasBlack, np.array([feeder]), color)\n frame = cv2.addWeighted(canvasBlack,ALPHA,frame, 1-ALPHA,10)\n if(n_feeder is not None):\n cX,cY = find_center_of_polygon(canvasBlack)\n cv2.putText(frame,str(n_feeder),(cX - 20, cY - 20),cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)\n \n return frame\n\n\ndef _update_eating_time_counters_list(eating_list,eating_time_counters_list,FPS):\n for i in range(len(eating_list)):\n if(eating_list[i]):\n eating_time_counters_list[i]+=1/FPS\n \ndef generate_excel_report(eating_time_counters_list):\n workbook = excel.Workbook('Relatorio_experimento_suinos.xlsx')\n worksheet = workbook.add_worksheet()\n worksheet.write(0,0,'Comedouro #')\n worksheet.write(0,1,'Tempo estimado de alimentação (s)')\n for i in range(len(eating_time_counters_list)):\n worksheet.write(i+1,0,i+1)\n worksheet.write(i+1,1,round(eating_time_counters_list[i],2))\n workbook.close()\n\ndef find_center_of_polygon(image):\n #cv2.imshow(\"mask\",image)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]\n #cv2.imshow(\"mask\",thresh)\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[1]\n # loop over the contours\n for c in cnts:\n # compute the center of the contour\n M = cv2.moments(c)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n\n return cX, cY \n\ndef start(feeders,videoPath, debug = True,start_sec = None,end_sec = None, write_on_video = False):\n eating_time_counters_list = [0 for i in range (len(feeders))]\n if(not debug):\n #video output\n videoFile = cv2.VideoCapture(videoPath)\n \n totalFrames = int(videoFile.get(cv2.CAP_PROP_FRAME_COUNT))\n FPS = videoFile.get(cv2.CAP_PROP_FPS)\n \n if(start_sec is not None):\n start_frame = FPS*start_sec\n if(start_frame<=totalFrames):\n videoFile.set(cv2.CAP_PROP_POS_FRAMES,start_frame)\n \n if(write_on_video):\n frame_width = int(videoFile.get(3))\n frame_height = int(videoFile.get(4))\n #video_output = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))\n video_output = cv2.VideoWriter('outpy2.mp4',cv2.VideoWriter_fourcc(*'XVID'), FPS, (frame_width,frame_height))\n\n \n while(debug or videoFile.isOpened()):\n # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]\n # i.e. a single-column array, where each item in the column has the pixel RGB value\n #ret, frame = video.read()\n \n \n if(debug):\n #Acquire frame from screen\n size = pyautogui.size()\n left,top,x2,y2, = int((size[0]/2)-(CAP_WIDTH/2)),int(size[1]/2-CAP_HEIGHT/2),int(size[0]/2+CAP_WIDTH/2),int(size[1]/2+CAP_HEIGHT/2)\n screen = grab_screen(region=(left,top,x2,y2)) \n frame = cv2.cvtColor(screen, cv2.COLOR_BGR2RGB)\n else:\n ret,frame = videoFile.read()\n #end of grabscreen\n mframe = frame.copy()\n \n frame_expanded = np.expand_dims(frame, axis=0)\n\n # Perform the actual detection by running the model with the image as input\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: frame_expanded})\n\n # Draw the results of the detection (aka 'visulaize the results')\n '''vis_util.visualize_boxes_and_labels_on_image_array(\n frame,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=8,\n min_score_thresh=0.85)'''\n\n mframe, eating_list = detect_eating_pig_(mframe,boxes,scores,feeders)\n if(not debug):\n _update_eating_time_counters_list(eating_list,eating_time_counters_list,FPS)\n \n if(not debug and write_on_video):\n video_output.write(mframe)\n \n\n cv2.imshow('Pig detector', mframe)\n # All the results have been drawn on the frame, so it's time to display it.\n #cv2.imshow('Pig detector', frame)\n\n # Press 'q' to quit\n if cv2.waitKey(1) == ord('q'):\n break\n\n if((not debug) and (end_sec is not None) and (videoFile.get(cv2.CAP_PROP_POS_FRAMES)>= FPS*end_sec)):\n #print(\"VIDEO FILE RELEASED!\")\n videoFile.release()\n \n #print(eating_time_counters_list)\n generate_excel_report(eating_time_counters_list)\n \n # Clean up\n #video.release()\n cv2.destroyAllWindows()\n if(not debug and write_on_video):\n video_output.release()\n\n\n\n \n\nif __name__ == \"__main__\":\n feeders = []\n videoPath = input(\"Digite o nome do arquivo de video:\\n\")\n n_feeders = int(input(\"Digite o número de comedouros:\\n\"))\n for i in range(n_feeders):\n print(\"Desenhe o comedouro {}\".format(i+1))\n feeders.append(feeder_delimiter.get_feeder_points(videoPath,i+1))\n start_time = 270#int(input(\"Tempo inicial:\\n\"))\n end_time = 285#int(input(\"Tempo final:\\n\"))\n write_on_video = True\n #print(\"Pontos do comedouro: %s\\n\" % feeders)\n start(feeders, videoPath, False,start_time,end_time,write_on_video)\n \n\n\n","sub_path":"models/research/object_detection/stable/object_detection/eating_pig_detector - Copia.py","file_name":"eating_pig_detector - Copia.py","file_ext":"py","file_size_in_byte":11159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"3588820","text":"#-*- coding: utf-8 -*-\n\nfrom flask import Flask, send_file, abort, url_for, request, render_template\nimport traceback\nimport tempfile\nfrom ph.layers.domain.utils.words import get_words_stat_from_url\nimport math\nfrom ph.layers.infrastructure.utils.morph import get_morph\nimport ph.layers.domain.utils.stats_diff as stats_diff\nfrom ph.layers.infrastructure.backend.mongodb import mongodbsession, MATERIALS_COLL, TAXONOMIES_COLL, get_tax_by_id, get_doc_by_id\nfrom ph.layers.infrastructure.utils.numered_dict import stat_sorted_list\n\napp = Flask(__name__)\napp.debug = True\n\napp.jinja_env.filters['stat_sorted_list'] = stat_sorted_list\n\ndef sublist(l):\n return l[:25]\n\napp.jinja_env.filters['sublist'] = sublist\n\n@app.route(\"//init/\")\ndef form_init(action):\n if not (action in (\"stats_comparsion\", \"stats_distance\")): abort(501)\n return render_template(\"form_init.html\", action=action)\n\n@app.route(\"/stats_comparsion/\", methods=[\"post\"])\ndef stats_comparsion():\n urls = request.form.getlist(\"url\")\n urls = filter(lambda x: x != \"\", urls)\n if len(urls) < 1 : abort(501)\n morph = get_morph()\n stats = map(lambda url : get_words_stat_from_url(url, case_sensitive=False, morph=morph, freq=True, normalized_mode=True, sorted_list=False, more_then_one=True, more_then_bigram=True), urls)\n diff = stats_diff.stats_comparsion(stats)\n return render_template(\"stats_comparison.html\", urls=urls, diff=diff_to_sorted_list(diff))\n\n@app.route(\"/stats_distance/\", methods=[\"post\"])\ndef stats_distance():\n urls = request.form.getlist(\"url\")\n method = request.form.get(\"method\", \"euclidean_distance\")\n r = request.form.get(\"r\", 2)\n p = request.form.get(\"p\", 2)\n r = int(r)\n p = int(p)\n urls = filter(lambda x: x != \"\", urls)\n if len(urls) != 2 : abort(501)\n morph = get_morph()\n stats = map(lambda url : get_words_stat_from_url(url, case_sensitive=False, morph=morph, freq=True, normalized_mode=True, sorted_list=False, more_then_one=True, more_then_bigram=True), urls)\n diff = stats_diff.stats_distance(*stats, method=method)\n return render_template(\"stats_distance.html\", urls=urls, diff=diff, method=method, r=r, p=p)\n\ndef diff_to_sorted_list(diff):\n sorted_list = []\n for k, value in diff.items() :\n t = (k, value.get(\"values\"), value.get(\"average\"))\n sorted_list.append(t)\n return sorted(sorted_list, key=lambda tup: tup[2], reverse=True)\n\n@app.route(\"/summary/\")\ndef summary():\n return \"Materials stats: \" + str(MATERIALS_COLL.find({}).count())\n\n@app.route(\"/taxonomies/\")\ndef taxonomies():\n PAGE_SIZE = 25\n offset = int(request.args.get(\"offset\", 0))\n\n taxonomies = TAXONOMIES_COLL.find({})[offset:offset + PAGE_SIZE]\n taxonomies_count = taxonomies.count()\n \n return render_template(\"taxonomies.html\", taxonomies=taxonomies, taxonomies_count=taxonomies_count, offset=offset, PAGE_SIZE=PAGE_SIZE)\n\n@app.route(\"/materials/\")\ndef materials():\n PAGE_SIZE = 25\n offset = int(request.args.get(\"offset\", 0))\n\n materials = MATERIALS_COLL.find({})[offset:offset + PAGE_SIZE]\n materials_count = materials.count()\n \n return render_template(\"materials.html\", materials=materials, materials_count=materials_count, offset=offset, PAGE_SIZE=PAGE_SIZE)\n\n@app.route(\"/stats/taxonomy//\")\n@app.route(\"/stats/taxonomy/\", defaults={'id': None})\ndef taxonomy(id):\n if id is None : id = request.args.get(\"id\", None)\n freq = request.args.get(\"freq\", \"0\") == \"1\"\n tax = get_tax_by_id(id)\n \n if tax is not None :\n if freq :\n tax = get_terms_freq(tax)\n return render_template(\"taxonomy.html\", tax=tax, freq=freq)\n else:\n return \"Taxonomy %s does not exist\" % str(id)\n \n@app.route(\"/stats/material//\")\n@app.route(\"/stats/material/\", defaults={'id': None})\ndef material(id):\n if id is None : id = request.args.get(\"id\", None)\n freq = request.args.get(\"freq\", \"0\") == \"1\"\n material = get_doc_by_id(id)\n \n if material is not None :\n terms_count = float(material.get(\"terms_count\", 0))\n terms = material.get(\"terms\")\n \n if freq and terms_count > 0:\n for k, v in terms.items() :\n terms[k] = v / terms_count\n material[\"terms\"] = stat_sorted_list(terms)\n return render_template(\"material.html\", material=material, freq=freq)\n else:\n return \"Material %s does not exist\" % str(id)\n \ndef get_terms_freq(node):\n terms = node.get('terms')\n terms_count = float(node.get(\"terms_count\"))\n children = node.get(\"children\")\n if terms is not None and terms_count != 0:\n for k, v in terms.items() :\n terms[k] = v / terms_count\n if children :\n node['children'] = map(get_terms_freq, children)\n return node\n \nif __name__ == \"__main__\":\n import sys\n app.run(host=\"0.0.0.0\", port=int(sys.argv[1]))\n","sub_path":"layers/presentation/http/stats/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"24242740","text":"import json\nfrom asyncio import sleep\nfrom collections import defaultdict\n\nfrom aiotg import TgBot\n\nfrom secrets import token, botan\nfrom atlantis.translations import en, ru\n\n\nbot = TgBot(token, botan_token=botan)\nusers = defaultdict(dict)\n\n\n@bot.command(r'/start')\nasync def start(chat, match):\n users[chat.id] = Atlantis(chat)\n print('{} joined or started anew, total {}'.format(chat.sender, len(users)))\n await chat.send_text('Choose language / Выберите язык:\\n\\n/en English\\n/ru Русский')\n await users[chat.id].flush()\n\n\n@bot.command(r'/(en|ru)')\nasync def set_locale(chat, match):\n if not users[chat.id]:\n return (await start(chat, match))\n\n locale = match.group(1)\n print('{} changed locale to {}'.format(chat.sender, locale))\n if locale == 'en':\n users[chat.id].locale = en\n elif locale == 'ru':\n users[chat.id].locale = ru\n\n users[chat.id].goto('Atl_Start')\n await users[chat.id].flush()\n\n\n@bot.command(r'/(fast|slow)')\nasync def set_speed(chat, match):\n speed = match.group(1)\n print('{} changed speed to {}'.format(chat.sender, speed))\n if speed == 'fast':\n users[chat.id].fast = True\n else:\n users[chat.id].fast = False\n\n\n@bot.command(r'.*')\nasync def choose(chat, match):\n if not users[chat.id]:\n return (await start(chat, match))\n\n choice = match.group(0)\n print('{} chose {}'.format(chat.sender, choice))\n users[chat.id].choose(choice)\n await users[chat.id].flush()\n\n\nclass Atlantis:\n\n 'Bot-bound Atlantis story.'\n\n def __init__(self, bot):\n self.state = {}\n self.options = []\n self.choices = []\n self.messages = []\n self.bot = bot\n self.locale = en\n self.fast = False\n\n def say(self, text):\n 'Enqueue a message.'\n lines = [x for x in self.locale[text].split('\\n\\n') if x.strip()]\n self.messages.extend(lines)\n\n async def typing(self, message):\n 'Delay based on message length.'\n delay = min(max(1.5, len(message) / 50), 4.0)\n if not self.fast:\n await self.bot.send_chat_action(action='typing')\n await sleep(delay)\n\n async def flush(self):\n 'Send all pending messages and a reply keyboard.'\n queue = list(self.messages)\n self.messages = []\n keyboard = {\n 'keyboard': [[choice] for choice in self.choices],\n 'resize_keyboard': True,\n }\n\n while len(queue) > 1:\n message = queue.pop(0)\n await self.typing(message)\n await self.bot.send_text(message)\n\n if queue:\n message = queue.pop(0)\n await self.typing(message)\n await self.bot.send_text(message, reply_markup=json.dumps(keyboard))\n\n def choose(self, choice):\n 'Advance the story based on a choice.'\n try:\n option = self.options[self.choices.index(choice)]\n except ValueError:\n print('invalid choice \"{}\" by {}'.format(choice, self.bot.sender))\n else:\n for s in option.get('set', ()):\n self.state[s] = True\n\n self.goto(option['next'])\n\n def goto(self, jump):\n 'Jump to specific part of the story.'\n\n if jump == 'Atl_Start':\n self.say('Ln0016.0.text.FAREWELLATLANTISAStoryOf')\n self.state = {}\n self.options = [\n {'next': 'Atl_Begin', 'text': 'Ln0037.0.option.Begin', 'short': 'Ln0037.0.short.Begin'},\n {'next': 'Atl_Credits', 'text': 'TermDlg.Common.Credits', 'short': 'TermDlg.Common.Credits2'},\n {'next': 'MessageBoardInterface_On', 'text': 'Ln0039.0.option.IOpenMyEyes', 'short': 'TermDlg.Common.ExitBrackets'},\n ]\n\n if jump == 'Atl_Credits':\n self.say('Ln0044.0.text.WrittenByLilithDedicatedTo')\n self.options = [\n {'next': 'Atl_Begin', 'text': 'Ln0037.0.option.Begin', 'short': 'Ln0037.0.short.Begin'},\n {'next': 'MessageBoardInterface_On', 'text': 'Ln0039.0.option.IOpenMyEyes', 'short': 'Ln0053.0.short.Quit'},\n ]\n\n if jump == 'Atl_Begin':\n self.say('Ln0058.0.text.ChooseYourCharacterClass')\n self.options = [\n {'set': ['ClassPoet'], 'next': 'Atl_Poet', 'text': 'Ln0063.0.option.Poet'},\n {'set': ['ClassPhysician'], 'next': 'Atl_Physician', 'text': 'Ln0064.0.option.Physician'},\n {'set': ['ClassFarmer'], 'next': 'Atl_Farmer', 'text': 'Ln0065.0.option.Farmer'},\n {'set': ['ClassScientist'], 'next': 'Atl_Scientist', 'text': 'Ln0066.0.option.Scientist'},\n {'set': ['ClassMagician'], 'next': 'Atl_Magician', 'text': 'Ln0067.0.option.Magician'},\n ]\n\n # POET\n\n if jump == 'Atl_Poet':\n self.say('Ln0074.0.text.YouAreSittingUponA')\n self.options = [\n {'next': 'Atl_PoetWork', 'text': 'Ln0079.0.option.WorkOnPoetry'},\n {'next': 'Atl_PoetObserve', 'text': 'Ln0080.0.option.ObserveTheChildren'},\n {'next': 'Atl_PoetPlay', 'text': 'Ln0081.0.option.PlayWithTheChildren'},\n ]\n\n if jump == 'Atl_PoetWork':\n self.say('Ln0086.0.text.YouCloseYourEyesFocusing')\n self.options = [\n {'next': 'Atl_Messenger', 'text': 'Ln0093.0.option.TalkToTheMessenger'},\n ]\n\n if jump == 'Atl_PoetObserve':\n self.say('Ln0098.0.text.YouObserveThePlayingChildren')\n self.options = [\n {'next': 'Atl_Messenger', 'text': 'Ln0093.0.option.TalkToTheMessenger'},\n ]\n\n if jump == 'Atl_PoetPlay':\n self.say('Ln0112.0.text.YouGoDownToThe')\n self.options = [\n {'next': 'Atl_Messenger', 'text': 'Ln0093.0.option.TalkToTheMessenger'},\n ]\n\n # PHYSICIAN\n\n if jump == 'Atl_Physician':\n self.say('Ln0128.0.text.YoureInTheGreatHall')\n self.options = [\n {'next': 'Atl_Approach', 'text': 'Ln0135.0.option.Approach'},\n ]\n\n if jump == 'Atl_Approach':\n self.say('Ln0140.0.text.AsYouApproachYouSee')\n self.options = [\n {'next': 'Atl_Sit', 'text': 'Ln0147.0.option.SitWithTheMan'},\n {'next': 'Atl_Offer', 'text': 'Ln0148.0.option.OfferHimSleepSnakePoison'},\n ]\n\n if jump == 'Atl_Sit':\n self.say('Ln0153.0.text.YouSitForTheMan')\n self.options = [\n {'next': 'Atl_Pray', 'text': 'Ln0162.0.option.SayAPrayer'},\n {'next': 'Atl_Pray', 'text': 'Ln0163.0.option.CloseHisEyes'},\n ]\n\n if jump == 'Atl_Pray':\n self.say('Ln0168.0.text.YouGetUpAndApproach')\n self.options = [\n {'next': 'Atl_Messenger', 'text': 'Ln0093.0.option.TalkToTheMessenger'},\n ]\n\n if jump == 'Atl_Offer':\n self.say('Ln0180.0.text.TheManConsidersThisFor')\n self.options = [\n {'next': 'Atl_Pray', 'text': 'Ln0162.0.option.SayAPrayer'},\n {'next': 'Atl_Pray', 'text': 'Ln0163.0.option.CloseHisEyes'},\n ]\n\n # FARMER\n\n if jump == 'Atl_Farmer':\n self.say('Ln0197.0.text.YoureOutsideOnYourFarm')\n self.options = [\n {'next': 'Atl_Dig', 'text': 'Ln0202.0.option.DigItUp'},\n {'next': 'Atl_LeaveIt', 'text': 'Ln0203.0.option.LeaveItAlone'},\n ]\n\n if jump == 'Atl_Dig':\n self.say('Ln0208.0.text.ItsNotJustAStone')\n self.options = [\n {'next': 'Atl_KeepDigging', 'text': 'Ln0213.0.option.KeepDigging'},\n ]\n\n if jump == 'Atl_KeepDigging':\n self.say('Ln0218.0.text.NoYouBeginToRealize')\n self.options = [\n {'next': 'Atl_Messenger', 'text': 'Ln0093.0.option.TalkToTheMessenger'},\n ]\n\n if jump == 'Atl_LeaveIt':\n self.say('Ln0234.0.text.YouLeaveItButIt')\n self.options = [\n {'next': 'Atl_Messenger', 'text': 'Ln0093.0.option.TalkToTheMessenger'},\n ]\n\n # SCIENTIST\n\n if jump == 'Atl_Scientist':\n self.say('Ln0252.0.text.YoureInTheTempleOf')\n self.options = [\n {'next': 'Atl_Experiment', 'text': 'Ln0257.0.option.StartTheExperiment'},\n ]\n\n if jump == 'Atl_Experiment':\n self.say('Ln0262.0.text.TheGearsOfTheMachine')\n self.options = [\n {'next': 'Atl_ApproachMachine', 'text': 'Ln0269.0.option.ApproachTheMachine'},\n {'next': 'Atl_ShutDown', 'text': 'Ln0270.0.option.ShutItDown'},\n {'next': 'Atl_KeepRunning', 'text': 'Ln0271.0.option.KeepItRunning'},\n ]\n\n if jump == 'Atl_ApproachMachine':\n self.say('Ln0276.0.text.YouStepCloserToThe')\n self.options = [\n {'next': 'Atl_Investigate', 'text': 'Ln0283.0.option.Investigate'},\n ]\n\n if jump == 'Atl_Investigate':\n self.say('Ln0288.0.text.ItsACityYouCan')\n self.options = [\n {'next': 'Atl_Messenger', 'text': 'Ln0297.0.option.TalkToTheMessenger'},\n ]\n\n if jump == 'Atl_ShutDown':\n self.say('Ln0302.0.text.NoThatsWrongTheLight')\n self.options = [\n {'next': 'Atl_ApproachMachine', 'text': 'Ln0307.0.option.ApproachTheLight'},\n ]\n\n if jump == 'Atl_KeepRunning':\n self.say('Ln0312.0.text.TheLightGrowsStrongerAnd')\n self.options = [\n {'next': 'Atl_ApproachMachine', 'text': 'Ln0307.0.option.ApproachTheLight'},\n ]\n\n # MAGICIAN\n\n if jump == 'Atl_Magician':\n self.say('Ln0323.0.text.YouAreInYourTower')\n self.options = [\n {'next': 'Atl_SeekAnswers', 'text': 'Ln0330.0.option.SeekAnswers'},\n {'next': 'Atl_LetItGo', 'text': 'Ln0331.0.option.LetItGo'},\n ]\n\n if jump == 'Atl_SeekAnswers':\n self.say('Ln0336.0.text.YouGoDownTheStairs')\n self.options = [\n {'next': 'Atl_TryTheSpell', 'text': 'Ln0341.0.option.TryTheSpell'},\n {'next': 'Atl_LetItGo', 'text': 'Ln0331.0.option.LetItGo'},\n ]\n\n if jump == 'Atl_TryTheSpell':\n self.say('Ln0347.0.text.YouReturnToTheTop')\n self.options = [\n {'next': 'Atl_LookCloser', 'text': 'Ln0352.0.option.LookCloser'},\n ]\n\n if jump == 'Atl_LookCloser':\n self.say('Ln0357.0.text.ItsACityACity')\n self.options = [\n {'next': 'Atl_Messenger', 'text': 'Ln0297.0.option.TalkToTheMessenger'},\n ]\n\n if jump == 'Atl_LetItGo':\n self.say('Ln0371.0.text.YouDecideToLetIt')\n self.options = [\n {'next': 'Atl_Messenger', 'text': 'Ln0297.0.option.TalkToTheMessenger'},\n ]\n\n # MESSENGER\n\n if jump == 'Atl_Messenger':\n self.say('Ln0387.0.text.TheMessengerIsIndeedA')\n self.options = [\n {'set': ['AtlantisDelay1'], 'next': 'Atl_AskAbout', 'text': 'Ln0396.0.option.AskWhatThisIsAbout'},\n {'next': 'Atl_City', 'text': 'Ln0397.0.option.FollowTheMessenger'},\n ]\n\n if jump == 'Atl_AskAbout':\n self.say('Ln0402.0.text.IAmSorryTheMessenger')\n self.options = [\n {'next': 'Atl_City', 'text': 'Ln0397.0.option.FollowTheMessenger'},\n ]\n\n if jump == 'Atl_City':\n self.say('Ln0412.0.text.TheMessengerTakesYouTo')\n self.options = [\n {'next': 'Atl_Throne', 'text': 'Ln0419.0.option.EnterTheThroneRoom'},\n ]\n\n # MEETING THE KING\n\n if jump == 'Atl_Throne':\n self.say('Ln0426.0.text.YouEnterTheThroneRoom')\n self.options = [\n {'set': ['AtlantisDelay2'], 'next': 'Atl_Mosaic', 'text': 'Ln0431.0.option.ExamineTheMosaic'},\n {'next': 'Atl_LookKing', 'text': 'Ln0432.0.option.LookForTheKing'},\n ]\n\n if jump == 'Atl_LookKing':\n self.say('Ln0437.0.text.InAllThisSplendourThe')\n self.options = [\n {'next': 'Atl_Bow', 'text': 'Ln0444.0.option.BowBeforeTheKing'},\n ]\n\n if jump == 'Atl_Mosaic':\n self.say('Ln0449.0.text.TheSheerAmountOfWork')\n self.options = [\n {'next': 'Atl_Bow', 'text': 'Ln0444.0.option.BowBeforeTheKing'},\n ]\n\n if jump == 'Atl_Bow':\n if 'ClassFarmer' in self.state:\n self.say('Ln0461.0.text.NoDoNotBowMy')\n\n if 'ClassMagician' in self.state:\n self.say('Ln0473.0.text.NoDoNotBowMy')\n\n if 'ClassPhysician' in self.state:\n self.say('Ln0485.0.text.NoDoNotBowMy')\n\n if 'ClassPoet' in self.state:\n self.say('Ln0497.0.text.NoDoNotBowMy')\n\n if 'ClassScientist' in self.state:\n self.say('Ln0509.0.text.NoDoNotBowMy')\n\n self.options = [\n {'next': 'Atl_Scroll', 'text': 'Ln0468.0.option.ReadScroll'},\n ]\n\n if jump == 'Atl_Scroll':\n if 'ClassFarmer' in self.state:\n self.say('Ln0521.0.text.YouReadTheScrollWith')\n\n if 'ClassMagician' in self.state:\n self.say('Ln0533.0.text.YouReadTheScrollWith')\n\n if 'ClassPhysician' in self.state:\n self.say('Ln0545.0.text.YouReadTheScrollWith')\n\n if 'ClassPoet' in self.state:\n self.say('Ln0557.0.text.YouReadTheScrollWith')\n\n if 'ClassScientist' in self.state:\n self.say('Ln0569.0.text.YouReadTheScrollWith')\n\n self.options = [\n {'next': 'Atl_Confirm', 'text': 'Ln0528.0.option.ConfirmTheTruth'},\n ]\n\n if jump == 'Atl_Confirm':\n self.say('Ln0581.0.text.TheKingSighsIWas')\n self.options = [\n {'next': 'Atl_AskDone', 'text': 'Ln0588.0.option.AskWhatCanBeDone'},\n {'next': 'Atl_Despair', 'text': 'Ln0589.0.option.SayThereIsNoHope'},\n {'next': 'Atl_Curse', 'text': 'Ln0590.0.option.CurseTheGods'},\n ]\n\n if jump == 'Atl_Despair':\n self.say('Ln0595.0.text.MyFriendPoseidonasSaysDo')\n self.options = [\n {'next': 'Atl_AskDone', 'text': 'Ln0588.0.option.AskWhatCanBeDone'},\n ]\n\n if jump == 'Atl_Curse':\n self.say('Ln0605.0.text.MyFriendPoseidonasSaysYou')\n self.options = [\n {'next': 'Atl_AskDone', 'text': 'Ln0588.0.option.AskWhatCanBeDone'},\n ]\n\n if jump == 'Atl_AskDone':\n self.say('Ln0615.0.text.TellMeWhatIsThe')\n self.options = [\n {'set': ['AtlantisArt'], 'next': 'Atl_Art', 'text': 'Ln0620.0.option.Art', 'short': 'Ln0620.0.short.ChooseArt'},\n {'set': ['AtlantisPeople'], 'next': 'Atl_People', 'text': 'Ln0621.0.option.ThePeople', 'short': 'Ln0621.0.short.ChooseThePeople'},\n {'set': ['AtlantisKnowledge'], 'next': 'Atl_Knowledge', 'text': 'Ln0622.0.option.Knowledge', 'short': 'Ln0622.0.short.ChooseKnowledge'},\n ]\n\n if jump == 'Atl_Art':\n self.say('Ln0627.0.text.TheHeartOfAtlantisIs')\n self.options = [\n {'next': 'Atl_How', 'text': 'Ln0636.0.option.How'},\n ]\n\n if jump == 'Atl_People':\n self.say('Ln0641.0.text.TheHeartOfAtlantisIs')\n self.options = [\n {'next': 'Atl_How', 'text': 'Ln0636.0.option.How'},\n ]\n\n if jump == 'Atl_Knowledge':\n self.say('Ln0653.0.text.TheHeartOfAtlantisIs')\n self.options = [\n {'next': 'Atl_How', 'text': 'Ln0636.0.option.How'},\n ]\n\n if jump == 'Atl_How':\n self.say('Ln0667.0.text.ThereIsNotMuchTime')\n self.options = [\n {'set': ['AtlantisDelay3'], 'next': 'Atl_WhyMe', 'text': 'Ln0676.0.option.WhyMe'},\n {'next': 'Atl_YesMyLord', 'text': 'Ln0677.0.option.YesMyLord'},\n ]\n\n if jump == 'Atl_WhyMe':\n self.say('Ln0682.0.text.WhyNotYouWhyA')\n self.options = [\n {'next': 'Atl_YesMyLord', 'text': 'Ln0677.0.option.YesMyLord'},\n ]\n\n if jump == 'Atl_YesMyLord':\n self.say('Ln0692.0.text.PoseidonasLaughsIAmNot')\n self.options = [\n {'next': 'Atl_GoHarbour', 'text': 'Ln0699.0.option.HeadForTheHarbour'},\n ]\n\n # GOING TO THE HARBOUR\n\n if jump == 'Atl_GoHarbour':\n self.say('Ln0706.0.text.EscortedByTheSameMessenger')\n self.options = [\n {'next': 'Atl_GoChariot', 'text': 'Ln0711.0.option.TakeTheChariotToGet'},\n {'set': ['AtlantisDelay4'], 'next': 'Atl_GoWalk', 'text': 'Ln0712.0.option.WalkToSeeTheCity'},\n ]\n\n if jump == 'Atl_GoWalk':\n self.say('Ln0717.0.text.YouDecideToTakeThe')\n self.options = [\n {'set': ['AtlantisMessengerYes'], 'next': 'Atl_Hurry', 'text': 'Ln0726.0.option.Hurry'},\n ]\n\n if jump == 'Atl_GoChariot':\n self.say('Ln0731.0.text.YouGetOnTheChariot')\n self.options = [\n {'next': 'Atl_GetUp', 'text': 'Ln0738.0.option.GetUp'},\n {'next': 'Atl_Examine', 'text': 'Ln0739.0.option.ExamineTheMessenger'},\n ]\n\n if jump == 'Atl_GetUp':\n self.say('Ln0744.0.text.YouGetUpTheMessenger')\n self.options = [\n {'next': 'Atl_NoTime', 'text': 'Ln0749.0.option.KeepGoing'},\n {'next': 'Atl_Examine', 'text': 'Ln0739.0.option.ExamineTheMessenger'},\n ]\n\n if jump == 'Atl_Examine' and 'ClassPhysician' in self.state:\n self.say('Ln0755.0.text.HeHasTwistedHisLeg')\n self.options = [\n {'set': ['AtlantisMessengerYes'], 'next': 'Atl_Hurry', 'text': 'Ln0726.0.option.Hurry'},\n ]\n\n if jump == 'Atl_Examine' and 'ClassPhysician' not in self.state:\n self.say('Ln0765.0.text.TheMessengerSeemsBadlyInjured')\n self.options = [\n {'next': 'Atl_CallHelp', 'text': 'Ln0770.0.option.CallForHelp'},\n {'next': 'Atl_NoTime', 'text': 'Ln0771.0.option.LeaveHimBehind'},\n ]\n\n if jump == 'Atl_CallHelp':\n self.say('Ln0776.0.text.YouCallForHelpAnd')\n self.options = [\n {'set': ['AtlantisMessengerNo', 'AtlantisDelay4'], 'next': 'Atl_Hurry', 'text': 'Ln0726.0.option.Hurry'},\n ]\n\n if jump == 'Atl_NoTime':\n self.say('Ln0789.0.text.YouJustDontHaveThe')\n self.options = [\n {'set': ['AtlantisMessengerNo'], 'next': 'Atl_Hurry', 'text': 'Ln0726.0.option.Hurry'},\n ]\n\n # IN THE HARBOUR\n\n if jump == 'Atl_Hurry':\n if 'AtlantisMessengerYes' in self.state:\n self.say('Ln0803.0.text.HurryingAsMuchAsPossible')\n\n if 'AtlantisMessengerNo' in self.state:\n self.say('Ln0814.0.text.HurryingAsMuchAsPossible')\n\n self.options = [\n {'next': 'Atl_Speech', 'text': 'Ln0808.0.option.SpeakToTheCaptains'},\n {'next': 'Atl_LoadShips', 'text': 'Ln0809.0.option.LoadTheShips'},\n ]\n\n if jump == 'Atl_Speech':\n self.say('Ln0825.0.text.YouSpeakBrieflyButWith')\n self.options = [\n {'next': 'Atl_LoadShips', 'text': 'Ln0809.0.option.LoadTheShips'},\n ]\n\n if jump == 'Atl_LoadShips' and 'AtlantisArt' in self.state:\n self.say('Ln0835.0.text.YouGiveTheOrderTo')\n self.options = [\n {'next': 'Atl_SaveSculptures', 'text': 'Ln0842.0.option.MostlySculptures'},\n {'next': 'Atl_SaveBooks', 'text': 'Ln0843.0.option.MostlyBooks'},\n {'next': 'Atl_SavePaintings', 'text': 'Ln0844.0.option.MostlyPaintings'},\n {'next': 'Atl_SaveBalance', 'text': 'Ln0845.0.option.AnEvenBalance'},\n ]\n\n if jump == 'Atl_SaveSculptures':\n self.say('Ln0850.0.text.AhTheSculpturalMasterpiecesOf')\n self.options = [\n {'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},\n ]\n\n if jump == 'Atl_SaveBooks':\n self.say('Ln0862.0.text.FromTheAncientMythsOf')\n self.options = [\n {'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},\n ]\n\n if jump == 'Atl_SavePaintings':\n self.say('Ln0874.0.text.AtlanteanPaintingBeganOnCave')\n self.options = [\n {'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},\n ]\n\n if jump == 'Atl_SaveBalance':\n self.say('Ln0886.0.text.YouTryToSaveA')\n self.options = [\n {'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},\n ]\n\n if jump == 'Atl_LoadShips' and 'AtlantisPeople' in self.state:\n self.say('Ln0898.0.text.YouWantToSaveThe')\n self.options = [\n {'next': 'Atl_SaveFamilies', 'text': 'Ln0905.0.option.TheSailorsFamilies'},\n {'next': 'Atl_SaveCelebs', 'text': 'Ln0906.0.option.FamousIndividuals'},\n {'next': 'Atl_SaveRandom', 'text': 'Ln0907.0.option.WhoeverIsClosest'},\n ]\n\n if jump == 'Atl_SaveFamilies':\n self.say('Ln0912.0.text.YouTellTheSailorsTo')\n self.options = [\n {'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},\n ]\n\n if jump == 'Atl_SaveCelebs':\n self.say('Ln0924.0.text.YouSendOutSailorsTo')\n self.options = [\n {'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},\n ]\n\n if jump == 'Atl_SaveRandom':\n self.say('Ln0936.0.text.YouHaveToBePractical')\n self.options = [\n {'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},\n ]\n\n if jump == 'Atl_LoadShips' and 'AtlantisKnowledge' in self.state:\n self.say('Ln0948.0.text.ThePeopleOfAtlantisAre')\n self.options = [\n {'next': 'Atl_SaveFamiliesTwo', 'text': 'Ln0957.0.option.OfCourse'},\n {'next': 'Atl_InsaneCruelBastard', 'text': 'TermDlg.Common.No2'},\n ]\n\n if jump == 'Atl_SaveFamiliesTwo':\n self.say('Ln0963.0.text.TheSailorsAreOverjoyedAnd')\n self.options = [\n {'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},\n ]\n\n if jump == 'Atl_InsaneCruelBastard':\n self.say('Ln0977.0.text.YouSpeakOfTheImportance')\n self.options = [\n {'next': 'Atl_Sail', 'text': 'Ln0857.0.option.GetReadyToSetSail'},\n ]\n\n if jump == 'Atl_Sail' and 'AtlantisMessengerYes' in self.state:\n self.say('Ln0991.0.text.TheShipsAreReadyThe')\n self.options = [\n {'next': 'Atl_StayBehind', 'text': 'Ln1000.0.option.StayBehindSoHeCan'},\n {'next': 'Atl_GoodbyeMessenger', 'text': 'Ln1001.0.option.SayGoodbye'},\n ]\n\n if jump == 'Atl_GoodbyeMessenger':\n self.say('Ln1006.0.text.HeavyHeartedYouSayGoodbye')\n self.options = [\n {'next': 'Atl_SetSail', 'text': 'Ln1015.0.option.SetSail'},\n ]\n\n if jump == 'Atl_StayBehind':\n self.say('Ln1020.0.text.ItIsNotEasyTo')\n self.options = [\n {'next': 'Atl_Watch', 'text': 'Ln1027.0.option.WatchingTheShipsTakeOff'},\n {'next': 'Atl_Tavern', 'text': 'Ln1028.0.option.InATavern'},\n {'next': 'Atl_Palace', 'text': 'Ln1029.0.option.InThePalace'},\n ]\n\n if jump == 'Atl_Watch':\n self.say('Ln1034.0.text.YouSitInTheHarbour')\n self.options = [\n {'next': 'MessageBoardInterface_On', 'text': 'Ln0039.0.option.IOpenMyEyes', 'short': 'Ln1047.0.short.End'},\n ]\n\n if jump == 'Atl_Tavern':\n self.say('Ln1052.0.text.YouSitDownInA')\n self.options = [\n {'next': 'MessageBoardInterface_On', 'text': 'Ln0039.0.option.IOpenMyEyes', 'short': 'Ln1047.0.short.End'},\n ]\n\n if jump == 'Atl_Palace':\n self.say('Ln1072.0.text.PoseidonasGreetsYouLikeAn')\n self.options = [\n {'next': 'MessageBoardInterface_On', 'text': 'Ln0039.0.option.IOpenMyEyes', 'short': 'Ln1047.0.short.End'},\n ]\n\n if jump == 'Atl_Sail' and 'AtlantisMessengerNo' in self.state:\n self.say('Ln1088.0.text.TheShipsAreReadyOne')\n self.options = [\n {'next': 'Atl_SetSail', 'text': 'Ln1015.0.option.SetSail'},\n ]\n\n if jump == 'Atl_SetSail':\n self.say('Ln1100.0.text.TheTimeHasComeYou')\n self.options = [\n {'next': 'Atl_LookLand', 'text': 'Ln1107.0.option.SailOnward'},\n ]\n\n if jump == 'Atl_LookLand':\n self.say('Ln1112.0.text.DaysPassTerribleWavesShake')\n self.options = [\n {'next': 'Atl_Land', 'text': 'Ln1119.0.option.FindANewHome'},\n ]\n\n if jump == 'Atl_Land':\n if 'ClassFarmer' in self.state:\n self.say('Ln1124.0.text.OneDayYouComeUpon')\n\n if 'ClassMagician' in self.state:\n self.say('Ln1142.0.text.OneDayYouComeUpon')\n\n if 'ClassPhysician' in self.state:\n self.say('Ln1160.0.text.OneDayYouComeUpon')\n\n if 'ClassPoet' in self.state:\n self.say('Ln1178.0.text.OneDayYouComeUpon')\n\n if 'ClassScientist' in self.state:\n self.say('Ln1196.0.text.OneDayYouComeUpon')\n\n self.options = [\n {'next': 'MessageBoardInterface_On', 'text': 'Ln0039.0.option.IOpenMyEyes', 'short': 'Ln1047.0.short.End'},\n ]\n\n if jump == 'MessageBoardInterface_On':\n self.goto('Atl_Start')\n return\n\n self.choices = [self.locale[o.get('short', o['text'])] for o in self.options]\n\n\nif __name__ == '__main__':\n bot.run()\n","sub_path":"atlantis/atlantis.py","file_name":"atlantis.py","file_ext":"py","file_size_in_byte":26551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"353758189","text":"import numpy\nimport scipy.special\nimport codecs, json\n\n# neural network class\nclass neuralNetwork:\n def __init__(self):\n # read the parameters from the json file\n jsonString = codecs.open(\"params.json\", 'r', encoding='utf-8').read()\n self.params = json.loads(jsonString)\n\n # set params from json\n self.inodes = self.params[\"input_nodes\"]\n self.hnodes = self.params[\"hidden_nodes\"]\n self.onodes = self.params[\"output_nodes\"]\n self.lr = self.params[\"learning_rate\"]\n\n self.wih = numpy.random.normal(0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))\n self.who = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))\n\n self.activation_function = lambda x: scipy.special.expit(x)\n\n def train(self, inputs_list, targets_list):\n # convert inputs list to 2d array\n inputs = numpy.array(inputs_list, ndmin=2).T\n targets = numpy.array(targets_list, ndmin=2).T\n \n # calculate signals into hidden layer\n hidden_inputs = numpy.dot(self.wih, inputs)\n\n # calculate the signals emerging from hidden layer\n hidden_outputs = self.activation_function(hidden_inputs)\n\n # calculate signals into final output layer\n final_inputs = numpy.dot(self.who, hidden_outputs)\n\n # calculate the signals emerging from final output\n final_outputs = self.activation_function(final_inputs)\n\n # calculate output layer error\n output_errors = targets - final_outputs\n\n # calculate hidden layer error\n hidden_errors = numpy.dot(self.who.T, output_errors)\n\n # update weights for links between hidden and output\n self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs))\n\n # update weights for links between the input and hidden layers\n self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose(inputs))\n\n def query(self, inputs_list):\n # convert inputs list to 2d array\n inputs = numpy.array(inputs_list, ndmin=2).T\n \n # calculate signals into hidden layer\n hidden_inputs = numpy.dot(self.wih, inputs)\n\n # calculate the signals emerging from hidden layer\n hidden_outputs = self.activation_function(hidden_inputs)\n\n # calculate signals into final output layer\n final_inputs = numpy.dot(self.who, hidden_outputs)\n\n # calculate the signals emerging from final output\n final_outputs = self.activation_function(final_inputs)\n\n return final_outputs\n\n def loadconfig(self):\n # set weights from json\n if self.hnodes == self.params[\"hidden_nodes\"] and self.inodes == self.params[\"input_nodes\"] and self.onodes == self.params[\"output_nodes\"]:\n self.wih = numpy.array(self.params[\"weights\"][\"xor\"][\"wih\"])\n self.who = numpy.array(self.params[\"weights\"][\"xor\"][\"who\"])\n\n def saveconfig(self, category):\n # save params to the file\n params = {\n \"input_nodes\": self.inodes,\n \"output_nodes\": self.onodes,\n \"hidden_nodes\": self.hnodes,\n \"learning_rate\": self.lr,\n \"weights\": {\n category: {\n \"wih\": self.wih,\n \"who\": self.who\n }\n }\n }\n f = open(\"params.json\", \"w\")\n f.write(json.dumps(params, cls=NumpyEncoder))\n\nclass NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, numpy.ndarray):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)","sub_path":"nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"157218303","text":"import pandas as pd\nimport pytest\n\nfrom pandera import errors\nfrom pandera import (\n Column, DataFrameSchema, Index, MultiIndex, Check, DateTime, Float, Int,\n String)\n\n\ndef test_column():\n schema = DataFrameSchema({\n \"a\": Column(Int, Check(lambda x: x > 0, element_wise=True))\n })\n data = pd.DataFrame({\"a\": [1, 2, 3]})\n assert isinstance(schema.validate(data), pd.DataFrame)\n\n\ndef test_index_schema():\n schema = DataFrameSchema(\n columns={},\n index=Index(\n Int, [\n Check(lambda x: 1 <= x <= 11, element_wise=True),\n Check(lambda index: index.mean() > 1)]\n ))\n df = pd.DataFrame(index=range(1, 11), dtype=\"int64\")\n assert isinstance(schema.validate(df), pd.DataFrame)\n\n with pytest.raises(errors.SchemaError):\n schema.validate(pd.DataFrame(index=range(1, 20)))\n\n\ndef test_multi_index_columns():\n schema = DataFrameSchema({\n (\"zero\", \"foo\"): Column(Float, Check(lambda s: (s > 0) & (s < 1))),\n (\"zero\", \"bar\"): Column(\n String, Check(lambda s: s.isin([\"a\", \"b\", \"c\", \"d\"]))),\n (\"one\", \"foo\"): Column(Int, Check(lambda s: (s > 0) & (s < 10))),\n (\"one\", \"bar\"): Column(\n DateTime, Check(lambda s: s == pd.datetime(2019, 1, 1)))\n })\n validated_df = schema.validate(\n pd.DataFrame({\n (\"zero\", \"foo\"): [0.1, 0.2, 0.7, 0.3],\n (\"zero\", \"bar\"): [\"a\", \"b\", \"c\", \"d\"],\n (\"one\", \"foo\"): [1, 6, 4, 7],\n (\"one\", \"bar\"): pd.to_datetime([\"2019/01/01\"] * 4)\n })\n )\n assert isinstance(validated_df, pd.DataFrame)\n\n\ndef test_multi_index_index():\n schema = DataFrameSchema(\n columns={\n \"column1\": Column(Float, Check(lambda s: s > 0)),\n \"column2\": Column(Float, Check(lambda s: s > 0)),\n },\n index=MultiIndex(\n indexes=[\n Index(Int,\n Check(lambda s: (s < 5) & (s >= 0)),\n name=\"index0\"),\n Index(String,\n Check(lambda s: s.isin([\"foo\", \"bar\"])),\n name=\"index1\"),\n ]\n )\n )\n\n df = pd.DataFrame(\n data={\n \"column1\": [0.1, 0.5, 123.1, 10.6, 22.31],\n \"column2\": [0.1, 0.5, 123.1, 10.6, 22.31],\n },\n index=pd.MultiIndex.from_arrays(\n [[0, 1, 2, 3, 4], [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\"]],\n names=[\"index0\", \"index1\"],\n )\n )\n\n validated_df = schema.validate(df)\n assert isinstance(validated_df, pd.DataFrame)\n\n # failure case\n df_fail = df.copy()\n df_fail.index = pd.MultiIndex.from_arrays(\n [[-1, 1, 2, 3, 4], [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\"]],\n names=[\"index0\", \"index1\"],\n )\n with pytest.raises(errors.SchemaError):\n schema.validate(df_fail)\n","sub_path":"tests/test_schema_components.py","file_name":"test_schema_components.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"504068711","text":"\"\"\"\nThis class is used for processing files in the designated directory.\nMethods:\n get_the_abs_path:\n :return the dir path or file path based on the input path\n get_the_paired_seq_file\n :return the paths of paired input file\n\"\"\"\n\nimport os\nimport re\n\n\nclass FilePorcessor:\n\n def __init__(self):\n self._in_path = None\n self._in_dpath = None\n self._in_fpath = None\n self._in_lst_fnames = []\n self._in_lst_fpaths = []\n self._in_lst_paired_fnames = []\n self._in_lst_paired_fpaths = []\n\n def fit(self, path=None):\n \"\"\"\n initialize the self variables in the object\n :return: self\n \"\"\"\n if not path:\n self._in_path = os.getcwd()\n else:\n self._in_path = os.path.abspath(path)\n self._in_path = self._get_the_abs_path()\n if os.path.isdir(self._in_path):\n self._in_dpath = self._in_path\n self._in_lst_fnames = os.listdir(self._in_dpath)\n self._in_lst_fpaths = self.get_the_fpath_lst()\n else:\n self._in_fpath = self._in_fpath\n self._get_the_paired_seq_file_path()\n return self\n\n def get_the_fpath_lst(self):\n lst_fpaths = []\n for fname in self._in_lst_fnames:\n fpath = os.path.join(self._in_dpath, fname)\n lst_fpaths.append(fpath)\n return lst_fpaths\n\n def _get_the_abs_path(self):\n return os.path.abspath(self._in_path)\n\n def _get_the_paired_seq_file_path(self):\n \"\"\"\n serch the files in the directory, and find the paired seq files, store them to the dic\n :return: the list containing dict with paired seq files\n \"\"\"\n self._in_lst_fnames.sort()\n print(\"Find {} files in the directory.\".format(len(self._in_lst_fnames)))\n # iterate all the file names in the fname list\n for i in range(0, len(self._in_lst_fnames)-1):\n dic_pair_fname = dict()\n dic_pair_fpath = dict()\n j = i + 1\n lst_fname_i = re.split(r'_[Rr]?[12]', self._in_lst_fnames[i])[0]\n # search the files behind the ith file, find the one match the other file of paired file[i]\n for k in range(j, len(self._in_lst_fnames)):\n lst_fname_k = re.split(r'_[Rr]?[12]', self._in_lst_fnames[k])[0]\n if lst_fname_i == lst_fname_k:\n dic_pair_fname['fname_r1'] = self._in_lst_fnames[i]\n dic_pair_fname['fname_r2'] = self._in_lst_fnames[k]\n self._in_lst_paired_fnames.append(dic_pair_fname)\n dic_pair_fpath['fpath_r1'] = os.path.join(self._in_dpath, dic_pair_fname['fname_r1'])\n dic_pair_fpath['fpath_r2'] = os.path.join(self._in_dpath, dic_pair_fname['fname_r2'])\n self._in_lst_paired_fpaths.append(dic_pair_fpath)\n break\n\n def get_paired_seq_fpaths(self):\n print(self._in_lst_paired_fpaths)\n print(self._in_lst_paired_fnames)\n return self._in_lst_paired_fpaths\n\n\n\n\n \n\n","sub_path":"processores/file_processor.py","file_name":"file_processor.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"596164092","text":"from socket import *\nimport header_reader\nimport question_reader\nimport header_writer\nimport answer_writer\nimport sys\nimport os\nimport answer_reader\nimport cache\nfrom easyzone import easyzone\n\n\nport = 53\nip = \"127.0.0.1\"\n\ntype_mapping = {\n 1: \"A\",\n 2: \"NS\",\n 15: \"MX\",\n 16: \"TXT\",\n 6: \"SOA\",\n 5: \"CNAME\",\n 28: \"AAAA\",\n 41: \"OPT\"\n}\n\ntype_reverse_mapping = {\n \"A\": 1,\n \"NS\": 2,\n \"MX\": 15,\n \"TXT\": 16,\n \"SOA\": 6,\n \"CNAME\": 5,\n \"AAAA\": 28,\n \"OPT\": 41\n}\n\nclass_mapping = {\n 1: \"IN\",\n 2: \"CS\",\n 3: \"CH\",\n 4: \"HS\"\n}\n\nclass_reverse_mapping = {\n \"IN\": 1,\n \"CS\": 2,\n \"CH\": 3,\n \"HS\": 4\n}\n\nzones = {}\n\nroot_servers = [\n \"198.41.0.4\",\n \"192.228.79.201\",\n \"192.33.4.12\",\n \"199.7.91.13\",\n \"192.203.230.10\",\n \"192.5.5.241\",\n \"192.112.36.4\",\n \"128.63.2.53\",\n \"192.36.148.17\",\n \"192.58.128.30\",\n \"193.0.14.129\",\n \"199.7.83.42\",\n \"202.12.27.33\"\n]\n\nroot = cache.newNode()\n\n\ndef get_response(request, nameserver):\n clientSock = socket(AF_INET, SOCK_DGRAM)\n clientSock.sendto(request, (nameserver, 53))\n response = clientSock.recv(512)\n return response\n\ndef get_A_records(authority_nss):\n recs = []\n for nss in authority_nss:\n if type_mapping[nss[1]] == \"A\":\n ns_ip = \"\"\n for byte in nss[5]:\n ns_ip += str(byte)\n ns_ip += \".\"\n recs.append(ns_ip[:-1])\n return recs\n\ndef return_empty(request):\n response = bytearray()\n response += request[:2]\n flags = request[2:4]\n first = flags[0]\n second = flags[1]\n first_with_QR = (first | 1 << 7)\n response += first_with_QR.to_bytes(1, byteorder='big') + second.to_bytes(1, byteorder='big')\n response += (1).to_bytes(2, byteorder='big')\n response += (0).to_bytes(2, byteorder='big')\n response += (0).to_bytes(2, byteorder='big')\n response += (0).to_bytes(2, byteorder='big')\n return response\n\n\ndef check_cache(request, servers, domName):\n server = cache.searchDNSCache(root, domName)\n if server is not None:\n print(\"<<<<<<<<<<<>>>>>>>>>>>>\")\n response = get_response(request, server)\n if response is not None:\n if answer_reader.has_answer(response):\n print(\"----->\", server)\n return response\n elif answer_reader.has_nameserver(response):\n authority_nss = answer_reader.get_records(response)\n A_record_servers = get_A_records(authority_nss)\n if A_record_servers is None:\n return None\n res = recursive_search(request, A_record_servers, domName)\n if res is not None:\n print(\"----->\", server)\n return res\n else:\n return None\n\ndef recursive_search(request, servers, domName, alreadyBeen):\n cacheRes = check_cache(request, servers, domName)\n if cacheRes is not None:\n return cacheRes\n\n for server in servers:\n if server in alreadyBeen:\n continue\n alreadyBeen.append(server)\n response = get_response(request, server)\n if response is not None:\n if answer_reader.has_answer(response):\n cache.insert(root, domName, server)\n print(\"----->\", server)\n return response\n elif answer_reader.has_nameserver(response):\n authority_nss = answer_reader.get_records(response)\n A_record_servers = get_A_records(authority_nss)\n if A_record_servers is None:\n continue\n res = recursive_search(request, A_record_servers, domName, alreadyBeen)\n if res is not None:\n print(\"----->\", server)\n return res\n else:\n continue\n return None\n\n\n\n\ndef generate_response(request):\n tid, flags, qcount, acount, _, _, recursion_desired = header_reader.generate_header(request) #hex, int, int\n domName, type, Class = question_reader.generate_body(request)\n\n response = bytearray()\n\n if domName not in zones:\n if recursion_desired:\n print(\"<<<<<<<<<<<<>>>>>>>>>>>>>\")\n alreadyBeen = []\n response = recursive_search(request, root_servers, domName, alreadyBeen)\n if response is None:\n return return_empty(request)\n else:\n return response\n else:\n return return_empty(request)\n\n header_writer.generate_header(request, response, domName, type, zones)\n answer_writer.generate_body(request, response, domName, type, Class, zones)\n\n return response\n\n\ndef read_zones():\n zonedir = sys.argv[1]\n zonefiles = os.listdir(zonedir)\n\n for f in zonefiles:\n name = f.rsplit(\".\", 1)[0]\n zone = easyzone.zone_from_file(name, zonedir+f)\n zones[name] = zone\n\n\ndef setup_server():\n serverSocket = socket(AF_INET, SOCK_DGRAM)\n serverSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n serverSocket.bind((ip, port))\n\n while True:\n data, addr = serverSocket.recvfrom(512)\n resp = generate_response(data)\n serverSocket.sendto(resp, addr)\n\n\nif __name__ == '__main__':\n read_zones()\n setup_server()\n","sub_path":"DNS/dns_server.py","file_name":"dns_server.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"260396238","text":"import threading\nimport time\nimport queue\n\n# 创建一个队列\nq = queue.Queue(maxsize=1000) # 先进先出队列 设置队列中存放的数据为1000\n\n\n# q = queue.LifoQueue() # 后进先出队列\n# q = queue.PriorityQueue() # 优先级队列\n\n# 创建一个生产者线程\nclass Producter(threading.Thread):\n def run(self):\n global q\n count = 0\n while True:\n # q.qsize() 获取队列中的数据的总数\n if q.qsize() < 1000:\n for i in range(100):\n count += 1\n msg = f'生产{count}'\n # 将数据放入队列\n q.put(msg)\n print(msg)\n time.sleep(2)\n\n\nclass Customer(threading.Thread):\n def run(self):\n global q\n while True:\n for i in range(100):\n msg = q.get()\n print(f'消费{msg}')\n time.sleep(3)\n\n\nif __name__ == '__main__':\n t1 = Producter()\n t2 = Customer()\n t1.start()\n t2.start()\n","sub_path":"Python多线程和多进程/线程/通信/Queue通信.py","file_name":"Queue通信.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"427547333","text":"# -*- coding: utf-8 -*-\nimport re\nimport hashlib\nimport psutil\n\nfrom amplify.agent.util import subp\nfrom amplify.agent.context import context\nfrom amplify.agent.containers.abstract import AbstractContainer, definition_id\nfrom amplify.agent.containers.nginx.object import NginxObject\nfrom amplify.agent.containers.nginx.binary import get_prefix_and_conf_path\nfrom amplify.agent.eventd import INFO\n\n__author__ = \"Mike Belov\"\n__copyright__ = \"Copyright (C) 2015, Nginx Inc. All rights reserved.\"\n__credits__ = [\"Mike Belov\", \"Andrei Belov\", \"Ivan Poluyanov\", \"Oleg Mamontov\", \"Andrew Alexeev\", \"Grant Hulegaard\"]\n__license__ = \"\"\n__maintainer__ = \"Mike Belov\"\n__email__ = \"dedm@nginx.com\"\n\n\nclass NginxContainer(AbstractContainer):\n type = 'nginx'\n\n def discover_objects(self):\n # save current ids\n existing_ids = self.objects.keys()\n\n # discover nginxes\n nginxes = self.find_all()\n\n # process all founded nginxes\n dicovered_ids = []\n while len(nginxes):\n try:\n definition, data = nginxes.pop()\n object_id = definition_id(definition)\n dicovered_ids.append(object_id)\n\n if object_id not in self.objects:\n # new object - push it\n data.update(self.object_configs.get(object_id, {})) # push cloud vars\n new_obj = NginxObject(definition=definition, data=data)\n\n # Send discover event.\n new_obj.eventd.event(\n level=INFO,\n message='nginx-%s master process found, pid %s' % (new_obj.version, new_obj.pid)\n )\n\n self.objects[object_id] = new_obj\n elif object_id in self.objects:\n current_obj = self.objects[object_id]\n\n if current_obj.need_restart:\n # restart object if needed\n context.log.debug('config was changed (pid %s)' % current_obj.pid)\n data.update(self.object_configs.get(object_id, {})) # push cloud vars\n new_obj = NginxObject(definition=definition, data=data)\n\n # Send nginx config changed event.\n new_obj.eventd.event(\n level=INFO,\n message='nginx-%s config changed, read from %s' % (new_obj.version, new_obj.conf_path)\n )\n\n self.objects[object_id] = new_obj\n current_obj.stop(unregister=False) # stop old object\n elif current_obj.pid != data['pid']:\n # check that object pids didn't change\n context.log.debug(\n 'nginx was restarted (pid was %s now %s)' % (\n current_obj.pid, data['pid']\n )\n )\n data.update(self.object_configs.get(object_id, {})) # push cloud vars\n new_obj = NginxObject(definition=definition, data=data)\n\n # Send nginx master process restart/reload event.\n new_obj.eventd.event(\n level=INFO,\n message='nginx-%s master process restarted/reloaded, new pid %s, old pid %s' % (\n new_obj.version,\n new_obj.pid,\n current_obj.pid\n )\n )\n\n self.objects[object_id] = new_obj\n current_obj.stop(unregister=False) # stop old object\n elif current_obj.workers != data['workers']:\n # check workers on reload\n context.log.debug(\n 'nginx was reloaded (workers were %s now %s)' % (\n current_obj.workers, data['workers']\n )\n )\n current_obj.workers = data['workers']\n except psutil.NoSuchProcess:\n context.log.debug('nginx is restarting/reloading, pids are changing, we will wait')\n\n # check if we left something in objects (nginx could be stopped or something)\n dropped_ids = filter(lambda x: x not in dicovered_ids, existing_ids)\n if len(dropped_ids):\n for dropped_id in dropped_ids:\n dropped_object = self.objects[dropped_id]\n context.log.debug('nginx was stopped (pid was %s)' % dropped_object.pid)\n dropped_object.stop() # this is necessary too!\n del self.objects[dropped_id] # this is necessary\n\n @staticmethod\n def find_all():\n \"\"\"\n Tries to find all master processes\n\n :return: list of dict: nginx object definitions\n \"\"\"\n # get ps info\n ps_cmd = 'ps -xa -o pid,ppid,command | egrep \"PID|nginx\" | grep -v egrep'\n try:\n ps, _ = subp.call(ps_cmd)\n except:\n context.log.warn(ps_cmd, exc_info=True)\n return []\n\n # calculate total amount of nginx master processes\n # if no masters - return\n masters_amount = len(filter(lambda x: 'nginx: master process' in x, ps))\n if masters_amount == 0:\n return []\n\n # collect all info about processes\n masters = {}\n try:\n for line in ps:\n # parse ps response line:\n # 21355 1 nginx: master process /usr/sbin/nginx\n gwe = re.match(r'\\s*(?P\\d+)\\s+(?P\\d+)\\s+(?P.+)\\s*', line)\n\n # if not parsed - go to the next line\n if not gwe:\n continue\n\n pid, ppid, cmd = int(gwe.group('pid')), int(gwe.group('ppid')), gwe.group('cmd')\n\n # match daemonized master and skip the other stuff\n if 'nginx: master process' in cmd and ppid == 1:\n # get path to binary, prefix and conf_path\n try:\n bin_path, prefix, conf_path, version = get_prefix_and_conf_path(cmd)\n except:\n context.log.error('failed to find bin_path, prefix and conf_path for %s' % cmd)\n context.log.debug('', exc_info=True)\n else:\n # calculate local id\n local_id = hashlib.sha256('%s_%s_%s' % (bin_path, conf_path, prefix)).hexdigest()\n\n if pid in masters:\n masters[pid].update(\n dict(\n version=version,\n bin_path=bin_path,\n conf_path=conf_path,\n prefix=prefix,\n pid=pid,\n local_id=local_id\n )\n )\n else:\n masters[pid] = dict(\n version=version,\n bin_path=bin_path,\n conf_path=conf_path,\n prefix=prefix,\n pid=pid,\n local_id=local_id,\n workers=[]\n )\n # match worker\n elif 'nginx: worker process' in cmd:\n if ppid in masters:\n masters[ppid]['workers'].append(pid)\n else:\n masters[ppid] = dict(workers=[pid])\n except:\n context.log.warn('failed to parse ps results', exc_info=True)\n\n # collect results\n results = []\n for pid, description in masters.iteritems():\n if 'bin_path' in description: # filter workers from nginx with non-executable nginx -V (relative paths, etc)\n definition = {'local_id': description['local_id'], 'type': NginxContainer.type}\n results.append((definition, description))\n return results\n","sub_path":"amplify/agent/containers/nginx/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":8399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"92810947","text":"import os\r\n\r\nHOME = os.environ[\"HOME\"]\r\n\r\nZSHRC= \"%s/.zshrc\" % HOME\r\n\r\ndef replaceStringInFile(file, string, replacestr):\r\n try:\r\n f = open(file)\r\n file_str = f.read()\r\n f.close()\r\n file_str = file_str.replace(string, replacestr)\r\n f = open(file, 'w')\r\n f.write(file_str)\r\n f.close()\r\n except IOError as e:\r\n print(\"I/O error accessing {0}: {1}\".format(file, e.strerror))\r\n return False\r\n\r\ndef addSourceOfZshFile(file):\r\n f = open(file, \"r\")\r\n contents = f.readlines()\r\n f.close()\r\n i = 0\r\n for line in contents:\r\n if line.strip() == \"ZSH_THEME=\\\"agnoster\\\"\":\r\n break\r\n i += 1 \r\n contents.insert(i + 1, \". ~/z.sh\")\r\n f = open(file, \"w\")\r\n contents = \"\".join(contents)\r\n f.write(contents)\r\n f.close() \r\n\r\n \r\nprint(\"Installing zsh...\")\r\nos.system(\"sudo apt-get update\")\r\nos.system(\"sudo apt-get upgrade\")\r\nos.system(\"sudo apt-get install zsh \")\r\n\r\nprint(\"Installing oh my zsh...\")\r\nos.system(\"sh -c \\\"$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)\\\"\")\r\n\r\nprint(\"Installing powerline fonts...\")\r\nos.system(\"sudo apt-get install fonts-powerline\")\r\n\r\n\r\nprint(\"Replacing default with agnoster...\")\r\nreplaceStringInFile(ZSHRC, \"ZSH_THEME=\\\"robbyrussell\\\"\", \"ZSH_THEME=\\\"agnoster\\\"\")\r\n\r\nprint(\"Downloading z.sh for z command of zsh...\")\r\nos.system(\"wget -P ~/ \\\"https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/plugins/z/z.sh\\\"\")\r\n\r\nprint(\"Sourcing z.sh file...\" )\r\naddSourceOfZshFile(ZSHRC) \r\n\r\nprint(\"Activating z command plugin...\") \r\nreplaceStringInFile(ZSHRC,\"plugins (git)\", \"plugins (git z vscode)\")\r\n\r\nprint(\"Sourcing zshrc...\")\r\nos.system(\"source %s\" % ZSHRC)\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"496561167","text":"from talon import Context\n\n# from user.knausj_talon.code.snippet_watcher import snippet_watcher\n# import os\n\nctx = Context()\nctx.matches = r\"\"\"\napp: vscode\nmode: user.php\nmode: command\nand code.language: php\n\"\"\"\n# short name -> ide clip name\nctx.lists[\"user.snippets\"] = {\n \"class\": \"nclass\",\n \"constructor\": \"cstr\",\n \"pub funky\": \"pub\",\n \"pub static funky\": \"spub\",\n \"pre funky\": \"pri\",\n \"pre static funky\": \"spri\",\n \"pro funky\": \"pro\",\n \"pro static funky\": \"spro\",\n \"anonymous funky\": \"fun\",\n \"arrow funky\": \"fn\",\n \"test case\": \"test\",\n\n \"very\": \"va\",\n \"pre very\": \"rva\",\n \"pro very\": \"ova\",\n \"pub very\": \"pva\",\n \"parameter\": \"pa\",\n\n \"if\": \"ifs\",\n \"switch\": \"switch\",\n \"for\": \"fr\",\n \"for each\": \"fre\",\n \"for each key value\": \"frek\",\n \"map\": \"ma\",\n \"try catch\": \"tr\",\n \"try catch finally\": \"trf\",\n}\n","sub_path":"apps/vscode/snippets/php_snippets.py","file_name":"php_snippets.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"142035768","text":"import unittest\nfrom nsivideoconvert.interfaces.auth import IAuth\nfrom nsivideoconvert.interfaces.http import IHttp\nfrom nsivideoconvert.auth import Authentication\nfrom nsivideoconvert.http import HttpHandler\n\nclass TestInterface(unittest.TestCase):\n\n def test_auth(self):\n self.assertEquals(IAuth.implementedBy(Authentication), True)\n self.assertEquals(sorted(IAuth.names()), ['add_user',\n 'authenticate',\n 'del_user'])\n\n def test_handler(self):\n self.assertEquals(IHttp.implementedBy(HttpHandler), True)\n self.assertEquals(sorted(IHttp.names()), ['get',\n 'get_current_user',\n 'post',])\n\nif __name__ == \"__main__\":\n unittest.main()\n\n","sub_path":"nsivideoconvert/tests/testInterface.py","file_name":"testInterface.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"260970593","text":"# Author: C.L\n# Network: RBMs + DAE + classifier\n\nfrom sklearn.metrics._classification import accuracy_score\nimport pandas as pd\n\nfrom .denoising_AE import *\nfrom .utils import *\nfrom .dbn.tensorflow import SupervisedDBNClassification\n\n# define proposed algorithm\n\"\"\"仅做无监督\"\"\"\ndef DAS(tmp_feature):\n label_attr = tmp_feature[:, -1].astype(np.float32) # 加载类别标签部分\n data_atrr = tmp_feature[:, :-1].astype(np.float32) # 加载i行数据部分\n data_atrr = data_atrr / data_atrr.max(axis=0)\n\n # Pretrain(Graph 0)\n weights = []\n reconstruction_error = []\n with tf.variable_scope('deepBM'):\n classifier = SupervisedDBNClassification(hidden_layers_structure=[32, 32], # rbm隐藏层列表\n learning_rate_rbm=0.001,\n learning_rate=0.01,\n n_epochs_rbm=20,\n n_iter_backprop=100,\n batch_size=32,\n activation_function='relu', # 多层rbm时relu比sigmoid合适\n dropout_p=0.1)\n # RBM fit\n classifier.fit(data_atrr, weights, reconstruction_error)\n \"\"\"infer\"\"\"\n activations = data_atrr\n for i in range(len(weights)):\n activations = transform_relu(activations, weights[i]['w'], weights[i]['b'])\n with tf.Session() as sess:\n X_train_dae = sess.run(activations)\n\n\n # 超参数设置\n weights1 = {'w': [], 'b': []}\n input_units = int(X_train_dae.shape[1]) # dae输入节点\n structure = [16]\n n_samples = int(X_train_dae.shape[0])\n training_epochs = 20\n batch_size = 16\n display_step = 1\n dae_weights = [] # 存储dae预训练权重参数\n dae_bias = [] # 存储dae预训练偏置参数\n activations = X_train_dae\n # build and train DAE\n for hidden_units in structure:\n with tf.variable_scope('DAE'):\n autoencoder = AdditiveGaussianNoiseAutoencoder(n_input=input_units, n_hidden=hidden_units,\n transfer_function=tf.nn.softplus,\n optimizer=tf.train.AdamOptimizer(learning_rate=0.00005), scale=0.01)\n print(\"[START] DAE training step:\")\n current_weights2 = tf.global_variables() # just see if right exist the weights\n for epoch in range(training_epochs):\n cost = 0.\n total_batch = int(n_samples / batch_size)\n for i in range(total_batch):\n batch_xs = get_random_block_from_data(activations, batch_size) # 不放回抽样\n cost = autoencoder.partial_fit(batch_xs) # 此处计算loss并优化权参\n print(\">> Epoch %d finished \\tDAE training loss %f\" % (epoch, cost))\n\n weights1['w'].append(autoencoder.sess.run(tf.transpose(autoencoder.weights['w1'])))\n weights1['b'].append(autoencoder.sess.run(autoencoder.weights['b1']))\n\n input_units = hidden_units\n # 保存权值信息\n np.savez('./DAS_logs/savedmodel',\n weights[0]['w'], weights[0]['b'],\n weights[1]['w'], weights[1]['b'],\n weights1['w'][0], weights1['b'][0])\n\n","sub_path":"Unsupervised_Pretraining/DAS_pretraining.py","file_name":"DAS_pretraining.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"165799168","text":"# pylint: disable=missing-docstring,redefined-outer-name,protected-access\nimport pytest\nimport torch\n\nfrom raylab.utils.debug import fake_batch\n\n\n@pytest.fixture\ndef config():\n return {\"true_model\": True, \"env\": \"Navigation\", \"grad_estimator\": \"PD\"}\n\n\n@pytest.fixture\ndef navigation_env(envs):\n return envs[\"Navigation\"]\n\n\n@pytest.fixture\ndef policy_and_env(mapo_policy, navigation_env, config):\n env = navigation_env({})\n policy = mapo_policy(env.observation_space, env.action_space, config)\n policy.set_reward_from_config(policy.config[\"env\"], policy.config[\"env_config\"])\n policy.set_transition_kernel(env.transition_fn)\n return policy, env\n\n\ndef test_model_output(policy_and_env):\n policy, env = policy_and_env\n obs = policy.observation_space.sample()[None]\n act = policy.action_space.sample()[None]\n obs, act = map(policy.convert_to_tensor, (obs, act))\n obs, act = map(lambda x: x.requires_grad_(True), (obs, act))\n\n torch.manual_seed(42)\n sample, logp = policy.loss_actor.model(obs, act)\n torch.manual_seed(42)\n next_obs, log_prob = env.transition_fn(obs, act)\n assert torch.allclose(sample, next_obs)\n assert torch.allclose(logp, log_prob)\n assert sample.grad_fn is not None\n assert logp.grad_fn is not None\n\n\ndef test_madpg_loss(policy_and_env):\n policy, _ = policy_and_env\n batch = policy.lazy_tensor_dict(\n fake_batch(policy.observation_space, policy.action_space, batch_size=10)\n )\n\n loss, info = policy.loss_actor(batch)\n assert isinstance(info, dict)\n assert loss.shape == ()\n assert loss.dtype == torch.float32\n assert loss.grad_fn is not None\n\n policy.module.zero_grad()\n loss.backward()\n assert all(\n p.grad is not None\n and torch.isfinite(p.grad).all()\n and not torch.isnan(p.grad).all()\n for p in policy.module.actor.parameters()\n )\n","sub_path":"tests/agents/mapo/test_true_model.py","file_name":"test_true_model.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"65662537","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.optimize\nimport uncertainties as unc\nfrom uncertainties import unumpy\n\ndef funzione (t, A, tau, w, phi, Vbias) :\n return A * np.exp(-t/tau) * np.cos(w*t+phi) + Vbias\n\ndef derivata (t, A, tau, w, phi, Vbias) :\n return -A * np.exp(-t/tau) * (np.cos(w*t+phi)/tau + w*np.sin(w*t+phi))\n\ndef chi_quadro (y, dy, t, popt) :\n return np.sum(((y-funzione(t, *popt)) / dy)**2)\n \n \nC = unumpy.uarray(0.1e-6, 0.01e-6)\ntempo, dt, Vc, dVc = np.genfromtxt(\"alluminio pieno.txt\", unpack = True, skip_header=1, skip_footer = 14)\ntempo = tempo*10**-6\ndt = dt*10**-6\ndVc_eff = dVc\np0 = (300, 0.02, 2000, np.pi/2, 489)\n\nfor i in range(5) :\n popt, pcov = scipy.optimize.curve_fit(funzione, tempo, Vc, p0, dVc_eff, absolute_sigma = True)\n chi_2 = chi_quadro(Vc, dVc, tempo, popt)\n dVc_eff = np.sqrt(dVc**2 + (dt*derivata(tempo, *popt))**2)\n\na = [\"A\", \"tau\", \"w\", \"phi\", \"Vbias\"]\nerrore = np.sqrt(np.diag(pcov))\nprint(\"chi2 rid =\", chi_2/237)\nfor i in range (len(a)) :\n print(a[i] + \"\\t\" + str(popt[i]) + \" +- \" + str(errore[i]))\n \ntau = unumpy.uarray(popt[1], errore[1])\nw = unumpy.uarray(popt[2], errore[2])\nw0_quad = w**2 + 1/tau**2 \nL = 1/(C*w0_quad)\nprint(\"T =\", 2*np.pi/w)\nprint(\"L =\", L)\nprint(\"r =\", 2*L/tau)\nprint(\"Qef =\", w*tau/2)\n\n\nt = np.linspace(0, 0.016, 4000)\nplt.figure()\nplt.subplot(211)\nplt.title(\"Ferro laminato\")\nplt.ylabel(\"ddp [digit]\")\nplt.xlabel(\"t [ms]\")\nplt.errorbar(tempo, Vc, dVc, dt, fmt = '.', label = \"Data\")\nplt.plot(t, funzione(t,*popt), label = \"Fit\")\nplt.legend()\n\nplt.subplot(212)\nplt.title(\"Residui normalizzati\")\nplt.xlabel(\"t [ms]\")\nplt.errorbar(tempo, (Vc-funzione(tempo, *popt))/dVc, fmt=\".\")\nplt.plot(t, t*0)\nplt.show()","sub_path":"Lab 2/13 Arduino improved e altro/analisi arduino improved.py","file_name":"analisi arduino improved.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"487216416","text":"\"\"\"Unit tests for bluesky.py.\n\nMost tests are via files in testdata/.\n\"\"\"\nimport copy\nfrom unittest.mock import patch\n\nfrom oauth_dropins.webutil import testutil, util\nfrom oauth_dropins.webutil.testutil import NOW, requests_response\nimport requests\n\nfrom ..bluesky import (\n as1_to_profile,\n at_uri_to_web_url,\n Bluesky,\n did_web_to_url,\n from_as1,\n to_as1,\n url_to_did_web,\n)\nfrom ..source import ALL, FRIENDS, ME, SELF\n\nACTOR_AS = {\n 'objectType' : 'person',\n 'id': 'did:web:alice.com',\n 'displayName': 'Alice',\n 'image': [{'url': 'https://alice.com/alice.jpg'}],\n 'url': 'https://alice.com/',\n}\nACTOR_PROFILE_VIEW_BSKY = {\n '$type': 'app.bsky.actor.defs#profileView',\n 'did': 'did:web:alice.com',\n 'handle': 'alice.com',\n 'displayName': 'Alice',\n 'avatar': 'https://alice.com/alice.jpg',\n 'description': None,\n}\nACTOR_PROFILE_BSKY = {\n '$type': 'app.bsky.actor.profile',\n 'displayName': 'Alice',\n 'avatar': 'https://alice.com/alice.jpg',\n 'description': None,\n}\n\nPOST_AS = {\n 'objectType': 'activity',\n 'verb': 'post',\n 'object': {\n 'objectType': 'note',\n 'id': 'at://did/app.bsky.feed.post/tid',\n 'url': 'https://bsky.app/profile/did/post/tid',\n 'published': '2007-07-07T03:04:05',\n 'content': 'My original post',\n }\n}\nPOST_HTML = \"\"\"\n
\n
My original post
\n \n \n
\n\"\"\"\nPOST_BSKY = {\n '$type': 'app.bsky.feed.defs#feedViewPost',\n 'post': {\n '$type': 'app.bsky.feed.defs#postView',\n 'uri': 'at://did/app.bsky.feed.post/tid',\n 'cid': 'TODO',\n 'record': {\n '$type': 'app.bsky.feed.post',\n 'text': 'My original post',\n 'createdAt': '2007-07-07T03:04:05',\n },\n 'replyCount': 0,\n 'repostCount': 0,\n 'upvoteCount': 0,\n 'downvoteCount': 0,\n 'indexedAt': '2022-01-02T03:04:05+00:00',\n }\n}\nPOST_AUTHOR_AS = copy.deepcopy(POST_AS)\nPOST_AUTHOR_AS['object'].update({\n 'author': ACTOR_AS,\n 'url': 'https://bsky.app/profile/alice.com/post/tid',\n})\nPOST_AUTHOR_PROFILE_AS = copy.deepcopy(POST_AUTHOR_AS)\nPOST_AUTHOR_PROFILE_AS['object']['author']['url'] = 'https://bsky.app/profile/alice.com'\nPOST_AUTHOR_BSKY = copy.deepcopy(POST_BSKY)\nPOST_AUTHOR_BSKY['post']['author'] = {\n **ACTOR_PROFILE_VIEW_BSKY,\n '$type': 'app.bsky.actor.defs#profileViewBasic',\n}\n\nFACETS = [{\n '$type': 'app.bsky.richtext.facet',\n 'features': [{\n '$type': 'app.bsky.richtext.facet#link',\n 'uri': 'http://my/link',\n }],\n 'index' : {\n 'byteStart' : 3,\n 'byteEnd' : 11,\n },\n}]\nFACET_TAG = {\n 'objectType': 'article',\n 'url': 'http://my/link',\n 'displayName': 'original',\n 'startIndex': 3,\n 'length': 8,\n}\nEMBED_EXTERNAL = {\n 'description': '',\n 'title': 'a link',\n 'uri': 'http://my/link',\n}\nEMBED_EXTERNAL_ATTACHMENT = {\n 'objectType': 'link',\n 'url': 'http://my/link',\n 'displayName': 'a link',\n}\nPOST_AS_EMBED = {\n **POST_AS['object'],\n 'attachments': [EMBED_EXTERNAL_ATTACHMENT],\n}\nPOST_BSKY_EMBED = copy.deepcopy(POST_BSKY)\nPOST_BSKY_EMBED['post']['record']['embed'] = {\n '$type': 'app.bsky.embed.external',\n 'external': {\n '$type': 'app.bsky.embed.external#external',\n **EMBED_EXTERNAL,\n },\n}\nPOST_BSKY_EMBED['post']['embed'] = {\n '$type': 'app.bsky.embed.external#view',\n 'external': {\n '$type': 'app.bsky.embed.external#viewExternal',\n **EMBED_EXTERNAL,\n },\n}\nPOST_AS_IMAGES = copy.deepcopy(POST_AS)\nPOST_AS_IMAGES['object']['image'] = [{\n 'url': 'http://my/pic',\n 'displayName': 'my alt text',\n}]\nPOST_BSKY_IMAGES = copy.deepcopy(POST_BSKY)\nPOST_BSKY_IMAGES['post']['embed'] = {\n '$type': 'app.bsky.embed.images#view',\n 'images': [{\n '$type': 'app.bsky.embed.images#viewImage',\n 'alt': 'my alt text',\n 'fullsize': 'http://my/pic',\n 'thumb': 'http://my/pic',\n }],\n}\nPOST_BSKY_IMAGES['post']['record']['embed'] = {\n '$type': 'app.bsky.embed.images',\n 'images': [{\n '$type': 'app.bsky.embed.images#image',\n 'alt': 'my alt text',\n 'image': 'TODO',\n }],\n}\n\nREPLY_AS = {\n 'objectType': 'activity',\n 'verb': 'post',\n 'object': {\n 'objectType': 'comment',\n 'published': '2008-08-08T03:04:05',\n 'content': 'I hereby reply to this',\n 'id': 'at://did/app.bsky.feed.post/tid',\n 'url': 'https://bsky.app/profile/did/post/tid',\n 'inReplyTo': [{\n 'id': 'at://did/app.bsky.feed.post/parent-tid',\n 'url': 'https://bsky.app/profile/did/post/parent-tid',\n }],\n },\n}\nREPLY_HTML = \"\"\"\n
\n
I hereby reply to this
\n \n \n \n
\n\"\"\"\nREPLY_BSKY = copy.deepcopy(POST_BSKY)\nREPLY_BSKY['post'].update({\n 'uri': 'at://did/app.bsky.feed.post/tid',\n 'record': {\n '$type': 'app.bsky.feed.post',\n 'text': 'I hereby reply to this',\n 'createdAt': '2008-08-08T03:04:05',\n 'reply': {\n '$type': 'app.bsky.feed.post#replyRef',\n 'root': {\n '$type': 'com.atproto.repo.strongRef',\n 'uri': '',\n 'cid': 'TODO',\n },\n 'parent': {\n '$type': 'com.atproto.repo.strongRef',\n 'uri': 'at://did/app.bsky.feed.post/parent-tid',\n 'cid': 'TODO',\n },\n },\n },\n})\n\nREPOST_AS = {\n 'objectType': 'activity',\n 'verb': 'share',\n 'actor': {\n 'objectType' : 'person',\n 'id': 'did:web:bob.com',\n 'displayName': 'Bob',\n 'url': 'https://bsky.app/profile/bob.com',\n },\n # 'content': 'A compelling post',\n 'object': POST_AUTHOR_PROFILE_AS['object'],\n}\n# REPOST_HTML = \"\"\"\n#
\n#
A compelling post
\n# \n# \n#
\n# \"\"\"\nREPOST_BSKY = copy.deepcopy(POST_AUTHOR_BSKY)\nREPOST_BSKY['reason'] = {\n '$type': 'app.bsky.feed.defs#reasonRepost',\n 'by': {\n '$type': 'app.bsky.actor.defs#profileViewBasic',\n 'did': 'did:web:bob.com',\n 'handle': 'bob.com',\n 'displayName': 'Bob',\n },\n 'indexedAt': NOW.isoformat(),\n}\n\nTHREAD_AS = copy.deepcopy(POST_AS)\nTHREAD_AS['object']['replies'] = [REPLY_AS['object']]\nTHREAD_BSKY = {\n '$type' : 'app.bsky.feed.defs#threadViewPost',\n 'post' : POST_AUTHOR_BSKY['post'],\n 'replies': [REPLY_BSKY['post']],\n}\n\nclass BlueskyTest(testutil.TestCase):\n\n def setUp(self):\n self.bs = Bluesky('handull', access_token='towkin')\n util.now = lambda **kwargs: testutil.NOW\n\n def assert_equals(self, expected, actual, ignore=(), **kwargs):\n ignore = list(ignore) + ['uri']\n return super().assert_equals(expected, actual, ignore=ignore, **kwargs)\n\n def test_url_to_did_web(self):\n for bad in None, '', 'foo', 'did:web:bar.com':\n with self.assertRaises(ValueError):\n url_to_did_web(bad)\n\n self.assertEqual('did:web:foo.com', url_to_did_web('https://foo.com'))\n self.assertEqual('did:web:foo.com', url_to_did_web('https://foo.com/'))\n self.assertEqual('did:web:foo.com', url_to_did_web('https://foo.com:3000'))\n self.assertEqual('did:web:foo.bar.com', url_to_did_web('https://foo.bar.com/baz/baj'))\n\n def test_did_web_to_url(self):\n for bad in None, '', 'foo' 'https://bar.com', 'did:web:foo.com:path':\n with self.assertRaises(ValueError):\n did_web_to_url(bad)\n\n self.assertEqual('https://foo.com/', did_web_to_url('did:web:foo.com'))\n self.assertEqual('https://foo.bar.com/', did_web_to_url('did:web:foo.bar.com'))\n\n def test_user_url(self):\n self.assertEqual('https://bsky.app/profile/snarfed.org',\n Bluesky.user_url('snarfed.org'))\n\n self.assertEqual('https://bsky.app/profile/snarfed.org',\n Bluesky.user_url('@snarfed.org'))\n\n def test_post_url(self):\n self.assertEqual('https://bsky.app/profile/snarfed.org/post/3jv3wdw2hkt25',\n Bluesky.post_url('snarfed.org', '3jv3wdw2hkt25'))\n\n def test_at_uri_to_web_url(self):\n self.assertEqual(None, at_uri_to_web_url(''))\n\n at_uri = 'at://did:plc:asdf/app.bsky.feed.post/3jv3wdw2hkt25'\n self.assertEqual(\n 'https://bsky.app/profile/did:plc:asdf/post/3jv3wdw2hkt25',\n at_uri_to_web_url(at_uri))\n self.assertEqual(\n 'https://bsky.app/profile/snarfed.org/post/3jv3wdw2hkt25',\n at_uri_to_web_url(at_uri, handle='snarfed.org'))\n\n with self.assertRaises(ValueError):\n at_uri_to_web_url('http://not/at/uri')\n\n def test_from_as1_post(self):\n self.assert_equals(POST_BSKY, from_as1(POST_AS), ignore=['uri'])\n\n def test_from_as1_post_with_author(self):\n self.assert_equals(POST_AUTHOR_BSKY, from_as1(POST_AUTHOR_AS))\n\n def test_from_as1_post_html_with_tag_indices_not_implemented(self):\n post_as = copy.deepcopy(POST_AS)\n post_as['object'].update({\n 'content': 'some html',\n 'content_is_html': True,\n 'tags': [FACET_TAG],\n })\n\n with self.assertRaises(NotImplementedError):\n from_as1(post_as)\n\n def test_from_as1_post_without_tag_indices(self):\n post_as = copy.deepcopy(POST_AS)\n post_as['object']['tags'] = [{\n 'url': 'http://my/link',\n }]\n\n expected = copy.deepcopy(POST_BSKY)\n expected['post']['record']['facets'] = copy.deepcopy(FACETS)\n del expected['post']['record']['facets'][0]['index']\n self.assert_equals(expected, from_as1(post_as))\n\n def test_from_as1_post_with_image(self):\n self.assert_equals(POST_BSKY_IMAGES, from_as1(POST_AS_IMAGES))\n\n def test_from_as1_object_vs_activity(self):\n obj = {\n 'objectType': 'note',\n 'content': 'foo',\n }\n activity = {\n 'verb': 'post',\n 'object': obj,\n }\n self.assert_equals(from_as1(obj), from_as1(activity))\n\n def test_from_as1_actor_handle(self):\n for expected, fields in (\n ('', {}),\n ('fooey.bsky.social', {'username': 'fooey.bsky.social'}),\n ('fooey.com', {'username': 'fooey.com', 'url': 'http://my/url', 'id': 'tag:nope'}),\n ('foo.com', {'url': 'http://foo.com'}),\n ('foo.com', {'url': 'http://foo.com/path'}),\n ):\n self.assert_equals(expected, from_as1({\n 'objectType' : 'person',\n **fields,\n })['handle'])\n\n def test_from_as1_actor_id_not_url(self):\n \"\"\"Tests error handling when attempting to generate did:web.\"\"\"\n self.assertEqual('did:web:foo.com', from_as1({\n 'objectType' : 'person',\n 'id': 'tag:foo.com,2001:bar',\n })['did'])\n\n def test_from_as1_composite_url(self):\n self.assertEqual({\n '$type': 'app.bsky.actor.defs#profileView',\n 'did': 'did:web:rodentdisco.co.uk',\n 'handle': 'rodentdisco.co.uk',\n 'description': None,\n }, from_as1({\n 'objectType' : 'person',\n 'url': {\n \"displayName\": \"my web site\",\n \"value\": \"https://rodentdisco.co.uk/author/dan/\"\n },\n }))\n\n def test_from_as1_embed(self):\n self.assert_equals(POST_BSKY_EMBED, from_as1(POST_AS_EMBED))\n\n def test_from_as1_facet_link_and_embed(self):\n expected = copy.deepcopy(POST_BSKY_EMBED)\n expected['post']['record']['facets'] = FACETS\n\n self.assert_equals(expected, from_as1({\n **POST_AS_EMBED,\n 'tags': [FACET_TAG],\n }))\n\n def test_from_as1_follow_no_actor(self):\n with self.assertRaises(ValueError):\n from_as1({\n 'objectType' : 'activity',\n 'verb': 'follow',\n 'object': 'at://did:plc:foo/com.atproto.actor.profile/123',\n })\n\n def test_from_as1_follow_no_object(self):\n with self.assertRaises(ValueError):\n from_as1({\n 'objectType' : 'activity',\n 'verb': 'follow',\n 'actor': 'at://did:plc:foo/com.atproto.actor.profile/123',\n })\n\n def test_as1_to_profile(self):\n self.assert_equals(ACTOR_PROFILE_BSKY, as1_to_profile(ACTOR_AS))\n\n def test_as1_to_profile_not_actor(self):\n with self.assertRaises(ValueError):\n as1_to_profile(POST_AS)\n\n def test_to_as1_post(self):\n self.assert_equals(POST_AS['object'], to_as1(POST_BSKY))\n\n def test_to_as1_post_with_author(self):\n self.assert_equals(POST_AUTHOR_PROFILE_AS['object'], to_as1(POST_AUTHOR_BSKY))\n\n def test_to_as1_post_type_kwarg(self):\n post_bsky = copy.deepcopy(POST_AUTHOR_BSKY)\n type = post_bsky.pop('$type')\n del post_bsky['post']['$type']\n del post_bsky['post']['author']['$type']\n self.assert_equals(POST_AUTHOR_PROFILE_AS['object'], to_as1(post_bsky, type=type))\n\n def test_to_as1_post_with_image(self):\n self.assert_equals(POST_AS_IMAGES['object'], to_as1(POST_BSKY_IMAGES))\n\n def test_to_as1_missing_objectType(self):\n with self.assertRaises(ValueError):\n to_as1({'foo': 'bar'})\n\n def test_to_as1_unknown_objectType(self):\n with self.assertRaises(ValueError):\n to_as1({'objectType': 'poll'})\n\n def test_to_as1_missing_type(self):\n with self.assertRaises(ValueError):\n to_as1({'foo': 'bar'})\n\n def test_to_as1_unknown_type(self):\n with self.assertRaises(ValueError):\n to_as1({'$type': 'app.bsky.foo'})\n\n def test_to_as1_embed(self):\n self.assert_equals(POST_AS_EMBED, to_as1(POST_BSKY_EMBED))\n\n def test_to_as1_embed(self):\n self.assertIsNone(to_as1({\n '$type': 'app.bsky.embed.record#viewBlocked',\n 'uri': 'unused',\n }))\n\n def test_to_as1_facet_link_and_embed(self):\n bsky = copy.deepcopy(POST_BSKY_EMBED)\n bsky['post']['record']['facets'] = FACETS\n\n expected = {\n **POST_AS_EMBED,\n 'tags': [FACET_TAG],\n }\n self.assert_equals(expected, to_as1(bsky))\n\n def test_constructor_both_access_token_and_app_password_error(self):\n with self.assertRaises(AssertionError):\n Bluesky('handull', access_token='towkin', app_password='pazzwurd')\n\n def test_constructor_access_token(self):\n bs = Bluesky('handull', access_token='towkin')\n self.assertEqual('towkin', bs.access_token)\n\n @patch('requests.post')\n def test_constructor_app_password(self, mock_post):\n mock_post.return_value = requests_response({\n 'handle': 'real.han.dull',\n 'did': 'did:plc:me',\n 'accessJwt': 'towkin',\n })\n\n bs = Bluesky('handull', app_password='pazzwurd')\n self.assertEqual('real.han.dull', bs.handle)\n self.assertEqual('did:plc:me', bs.did)\n self.assertEqual('towkin', bs.access_token)\n\n mock_post.assert_called_once_with(\n 'https://bsky.social/xrpc/com.atproto.server.createSession',\n json={'identifier': 'handull', 'password': 'pazzwurd'},\n headers={'Content-Type': 'application/json'},\n )\n\n @patch('requests.get')\n def test_get_activities_friends(self, mock_get):\n mock_get.return_value = requests_response({\n 'cursor': 'timestamp::cid',\n 'feed': [POST_AUTHOR_BSKY, REPOST_BSKY],\n })\n\n self.assert_equals([\n POST_AUTHOR_PROFILE_AS['object'],\n REPOST_AS,\n ], self.bs.get_activities(group_id=FRIENDS))\n\n mock_get.assert_called_once_with(\n 'https://bsky.social/xrpc/app.bsky.feed.getTimeline',\n json=None,\n headers={\n 'Authorization': 'Bearer towkin',\n 'Content-Type': 'application/json',\n },\n )\n\n @patch('requests.get')\n def test_get_activities_activity_id(self, mock_get):\n mock_get.return_value = requests_response({\n '$type' : 'app.bsky.feed.defs#threadViewPost',\n 'thread': THREAD_BSKY,\n 'replies': [REPLY_BSKY],\n })\n\n self.assert_equals([POST_AUTHOR_PROFILE_AS['object']],\n self.bs.get_activities(activity_id='at://id'))\n mock_get.assert_called_once_with(\n 'https://bsky.social/xrpc/app.bsky.feed.getPostThread?uri=at%3A%2F%2Fid&depth=1',\n json=None,\n headers={\n 'Authorization': 'Bearer towkin',\n 'Content-Type': 'application/json',\n },\n )\n\n def test_get_activities_bad_activity_id(self):\n with self.assertRaises(ValueError):\n self.bs.get_activities(activity_id='not_at_uri')\n\n @patch('requests.get')\n def test_get_activities_self_user_id(self, mock_get):\n mock_get.return_value = requests_response({\n 'cursor': 'timestamp::cid',\n 'feed': [POST_AUTHOR_BSKY],\n })\n\n self.assert_equals([POST_AUTHOR_PROFILE_AS['object']],\n self.bs.get_activities(group_id=SELF, user_id='alice.com'))\n mock_get.assert_called_once_with(\n 'https://bsky.social/xrpc/app.bsky.feed.getAuthorFeed?actor=alice.com',\n json=None,\n headers={\n 'Authorization': 'Bearer towkin',\n 'Content-Type': 'application/json',\n },\n )\n","sub_path":"granary/tests/test_bluesky.py","file_name":"test_bluesky.py","file_ext":"py","file_size_in_byte":16518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"353209569","text":"import json\nimport os\nimport copy\nimport random\nimport argparse\nimport numpy as np\nfrom types import SimpleNamespace\nNR = 10\nSOI = 11\nEOI = 12\nSOO = 13\nEOO = 14\n\nchars = '.123456789'\n# chars = '.oT+=|%~@#'\nnum2char = {}\nfor idx, c in enumerate(chars):\n num2char[idx] = c\nnum2char[NR] = \"\\\\n\"\nnum2char[SOI] = \"<\\\\n\"\nnum2char[EOI] = \">\\\\n\"\nnum2char[SOO] = \"[\\\\n\"\nnum2char[EOO] = \"]\\\\n\"\n# num2char[SOO] = \"<\\\\n\"\n# num2char[EOO] = \">\\\\n\"\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--shuffle-train', action='store_true')\nparser.add_argument('--repeat-frames', action='store_true')\nparser.add_argument('--rotate', action='store_true')\nparser.add_argument('--hue', action='store_true')\nparser.add_argument('--flip', action='store_true')\nparser.add_argument('--truncate', action='store_true')\nparser.add_argument('--stretch', action='store_true')\nparser.add_argument('--shear-roll', action='store_true')\nparser.add_argument('--roll', action='store_true')\nparser.add_argument('--back-tr', action='store_true')\nparser.add_argument('--per-frame-random', action='store_true')\nparser.add_argument('--times', type=int, default=0)\nparser.add_argument('--name', type=str, default='arc_raw')\nargs = parser.parse_args()\n\ndef read_json(path_to_jsonfiles):\n alldicts = []\n # for idx, file in enumerate(os.listdir(path_to_jsonfiles), key = os.path.getsize):\n filenames = os.listdir(path_to_jsonfiles)\n files = sorted([os.path.join(path_to_jsonfiles, file) for file in filenames], key=os.path.getsize)\n for idx, file in enumerate(files):\n if 'evaluation' in file:\n # if idx < 1: continue\n if idx == 30: break\n full_filename = file\n with open(full_filename,'r') as fi:\n dict = json.load(fi)\n alldicts.append(dict)\n return alldicts\n\ndef augment(x, args, rands):\n def rotate(x, rotate_times):\n x = copy.deepcopy(x)\n for i in range(rotate_times):\n # import pdb;pdb.set_trace()\n x = [ list(a) for a in zip(*x[::-1])]\n return x\n\n def shift_hue(x, shift_val, order):\n # ok_nums = [a+1 for a in range(9)] # ignore 0\n ok_nums = order\n # z = copy.deepcopy(x)\n def shift (y, shift_val):\n if y == 0:\n return y\n # new_hue_idx = (ok_nums.index(y)+shift_val)\n # y = ok_nums[new_hue_idx % len(ok_nums)]\n y = ok_nums[y-1]\n return y\n\n for idx, l in enumerate(x):\n x[idx] = [shift(num, shift_val) for num in l]\n # import pdb;pdb.set_trace()\n return x\n\n def stretch(x, strx, stry):\n x = np.array(x)\n x = np.repeat(np.repeat(x,strx, axis=0), stry, axis=1)\n if x.shape[0] > 30 or x.shape[1] > 30:\n x = x[:30,:30]\n return x.tolist()\n\n def shear_roll(x, shx, shy):\n # import pdb;pdb.set_trace()\n x = np.array(x)\n for i in range(x.shape[0]):\n x[i,:] = np.roll(x[i,:], i+shx)\n for i in range(x.shape[1]):\n x[:,i] = np.roll(x[:,i], i+shy)\n return x.tolist()\n\n def roll(x, shx, shy):\n x = np.array(x)\n for i in range(x.shape[0]):\n x[i,:] = np.roll(x[i,:], shx)\n for i in range(x.shape[1]):\n x[:,i] = np.roll(x[:,i], shy)\n return x.tolist()\n\n def flip(x, do_flip):\n if do_flip:\n x = [i[::-1] for i in x[::-1]]\n return x\n if args.hue: x = shift_hue(x, rands.shift_val, rands.order)\n if args.flip: x = flip(x, rands.do_flip)\n if args.rotate: x = rotate(x, rands.rotate_times)\n if args.stretch: x = stretch(x, rands.stretch_x, rands.stretch_y)\n if args.roll: x = roll(x, rands.roll_x, rands.roll_y)\n if args.shear_roll: x = shear_roll(x, rands.shear_x, rands.shear_y)\n return x\n\n\ndef gen_seq(dirname, args, back_tr=False, pair=False):\n dicts = read_json(dirname)\n # mto = [d for d in train_dicts if len(d['test']) != 1]\n # import pdb; pdb.set_trace()\n srcs = []\n tgts = []\n for idx, d in enumerate(dicts):\n if not args.train_only:\n train = copy.deepcopy(d['train'])\n test = copy.deepcopy(d['test'])\n else: \n train = copy.deepcopy(d['train'][:-1])\n test = copy.deepcopy(d['train'][-1:])\n if args.shuffle_train:\n train_test = copy.deepcopy(train) + copy.deepcopy(test)\n random.shuffle(train_test)\n train = train_test[:-1] # ok\n test = train_test[-1:]\n\n trunc_or_repeat = random.randint(0,2)\n if trunc_or_repeat == 0 and args.truncate:\n # print(len(train))\n until = random.randrange(len(train))\n if until == 0: \n until = 1\n train = train[:until]\n\n elif trunc_or_repeat == 1 and args.repeat_frames:\n for idx, f in enumerate(train):\n if random.randint(0,1) == 1:\n train.insert(idx, copy.deepcopy(f))\n\n\n\n def get_rands():\n rands = SimpleNamespace()\n rands.shift_val = random.randrange(9)+1\n rands.order = list(range(1,10))\n random.shuffle(rands.order)\n rands.rotate_times = random.randrange(2)+1\n rands.stretch_x = random.randrange(3)+2\n rands.stretch_y = random.randrange(3)+2\n rands.shear_x = random.randint(-1,1)\n rands.shear_y = random.randint(-1,1)\n rands.roll_x = random.randint(-3,3)\n rands.roll_y = random.randint(-3,3)\n rands.do_flip = random.randint(0,1) == 1\n return rands\n\n rands = get_rands()\n src = []\n for f in train:\n finp = f['input'] if not back_tr else f['output']\n fout = f['output'] if not back_tr else f['input']\n if args.per_frame_random and random.randint(0,1) == 1:\n rands = get_rands()\n finp = augment(finp, args, rands)\n fout = augment(fout, args, rands)\n \n\n # src.extend([SOI])\n # for l in finp:\n # src.extend(l + [NR,])\n # for l in fout:\n # src.extend(l + [NR,])\n # src.extend([EOI])\n\n src.extend([SOI])\n for l in finp:\n src.extend(l + [NR,])\n src.extend([EOI])\n\n if pair:\n srcs.append(copy.deepcopy(src))\n src = []\n\n\n src.extend([SOO])\n for l in fout:\n src.extend(l + [NR,])\n src.extend([EOO])\n if pair:\n tgts.append(copy.deepcopy(src))\n src = []\n\n tgt = []\n for f in test:\n if 'output' not in f.keys(): \n # test set, put dummy labels\n f['output'] = [[0,],[0,]]\n finp = f['input'] if not back_tr else f['output']\n fout = f['output'] if not back_tr else f['input']\n if args.per_frame_random and random.randint(0,1) == 1:\n rands = get_rands()\n finp = augment(finp, args, rands)\n fout = augment(fout, args, rands)\n\n \n temp_src = []\n temp_src.extend([SOI])\n for l in finp:\n temp_src.extend(l + [NR,])\n temp_src.extend([EOI])\n if pair:\n srcs.append(copy.deepcopy(temp_src))\n else:\n srcs.append(copy.deepcopy(src + temp_src)) # we need to change src if not end seq\n\n # tgt = []\n # src.extend([SOO])\n # for l in fout:\n # tgt.extend(l + [NR,])\n # tgts.append(tgt)\n # src.extend([EOO])\n tgt = []\n tgt.extend([SOO]) \n for l in fout:\n tgt.extend(l + [NR,])\n tgt.extend([EOO])\n tgts.append(tgt)\n # if len(test) >1:import pdb;pdb.set_trace()\n return srcs, tgts\n\n # if not swap_train_test:\n # return srcs, tgts\n # else:\n # return tgts, srcs\n\ndef write_dataset(srcs, tgts, filename):\n with open(filename+\".src\", \"w\") as src_text:\n for src in srcs:\n # src = [str(i) for i in src]back_tr\n src = [num2char[i] for i in src]\n src_text.write(' '.join(src))\n src_text.write(\"\\n\")\n with open(filename+\".tgt\", \"w\") as tgt_text:\n for tgt in tgts:\n # tgt = [str(i) for i in tgt]\n tgt = [num2char[i] for i in tgt]\n tgt_text.write(' '.join(tgt))\n tgt_text.write(\"\\n\")\n\nos.makedirs(args.name, exist_ok=True)\nts = []\ntt = []\ndef append_s_t(tt, ts, folders, args, back_tr, pair):\n for folder in folders:\n for i in range(args.times):\n s, t = gen_seq(folder, to_args, back_tr, pair)\n ts += s\n tt += t\ndef nohue_append(tt, ts, folders, args):\n nohue_args = copy.deepcopy(args)\n nohue_args.hue = False\n nohue_args.times = 1\n append_s_t(tt, ts, folders, nohue_args, back_tr=False, pair=False)\n append_s_t(tt, ts, folders, nohue_args, back_tr=True, pair=False)\n # append_s_t(tt, ts, folders, nohue_args, back_tr=False, pair=True)\n # append_s_t(tt, ts, folders, nohue_args, back_tr=True, pair=True)\n return nohue_args\n\ndef nopfr_append(tt, ts, folders, args):\n nopfr_args = copy.deepcopy(args)\n nopfr_args.per_frame_random = False\n append_s_t(tt, ts, folders, nopfr_args, back_tr=False, pair=False)\n append_s_t(tt, ts, folders, nopfr_args, back_tr=True, pair=False)\n # append_s_t(tt, ts, folders, nopfr_args, back_tr=False, pair=True)\n # append_s_t(tt, ts, folders, nopfr_args, back_tr=True, pair=True)\n return nopfr_args\n\n\ndefault_args = SimpleNamespace()\nfor key in vars(args):\n setattr(default_args, key, parser.get_default(key))\n\n# folders = ['training', 'evaluation', 'test']\n# folders = ['training', 'evpaluation']\nfolders = ['training']\n# folders = ['evaluation']\n\n# default\n# default_args.train_only = True\ndefault_args.train_only = False\nappend_s_t(tt, ts, folders, default_args, back_tr=False, pair=False)\nappend_s_t(tt, ts, folders, default_args, back_tr=True, pair=False)\n# append_s_t(tt, ts, folders, default_args, back_tr=False, pair=True)\n# append_s_t(tt, ts, folders, default_args, back_tr=True, pair=True)\n# default_args.train_only = False\n#\n# current args\nto_args = copy.deepcopy(args)\nto_args.train_only = False\nappend_s_t(tt, ts, folders, to_args, back_tr=False, pair=False)\nappend_s_t(tt, ts, folders, to_args, back_tr=True, pair=False)\n# append_s_t(tt, ts, folders, to_args, back_tr=False, pair=True)\n# append_s_t(tt, ts, folders, to_args, back_tr=True, pair=True)\n\n# nohue\nif to_args.hue == True:\n nohue_append(tt, ts, folders, to_args)\nif to_args.per_frame_random == True:\n nopfr_args = nopfr_append(tt, ts, folders, to_args)\n if to_args.hue == True: # note that args passed here perframeandom false\n nohue_append(tt, ts, folders, nopfr_args)\n \n\n# folders = ['training', 'evaluation', 'test']\n# folders = ['training']\n# folders = []\n\n\n\n\n# valid set\nvs, vt = gen_seq('evaluation', default_args)\n\n# finally write to files\nwrite_dataset(ts, tt, args.name+'/train')\nwrite_dataset(vs, vt, args.name+'/valid')\n\n# # easiest dataset, same train-val\n# vs, vt = gen_seq('evaluation', empty_args)\n# write_dataset(vs, vt, args.name+'/valid')\n# write_dataset(vs, vt, args.name+'/train')\n\n'''\ntraining\n9 2\n11 2\n47 2\n59 2\n66 2\n71 2\n124 2\n130 2\n141 2\n170 2\n185 2\n292 2\n308 3\n380 3\n'''\n'''\ndev\n57 2\n60 2\n66 2\n74 2\n138 2\n143 2\n165 2\n181 2\n184 2\n187 2\n216 2\n243 2\n263 2\n311 2\n343 2\n350 2\n372 2\n386 2\n394 2\n'''\n'''\ntest\n17 2\n52 2\n86 2\n97 2\n'''\n'''\ntrain_dicts[0]['train'][0]['output']\n'test'\n train_dicts[0]\n{'test': [{'input': [[4, 0, 0, 0], [0, 0, 0, 4], [4, 4, 0, 0]], \n'output': [[4, 0, 0, 0, 0, 0, 0, 4], [0, 0, 0, 4, 4, 0, 0, 0],\n [4, 4, 0, 0, 0, 0, 4, 4], [4, 4, 0, 0, 0, 0, 4, 4],\n [0, 0, 0, 4, 4, 0, 0, 0], [4, 0, 0, 0, 0, 0, 0, 4]]}], \n 'train': [{'input': [[0, 0, 8, 0], [0, 8, 0, 8], [0, 0, 8, 0]], \n 'output': [[0, 0, 8, 0, 0, 8, 0, 0], [0, 8, 0, 8, 8, 0, 8, 0], \n [0, 0, 8, 0, 0, 8, 0, 0], [0, 0, 8, 0, 0, 8, 0, 0],\n [0, 8, 0, 8, 8, 0, 8, 0], [0, 0, 8, 0, 0, 8, 0, 0]]}, \n {'input': [[0, 0, 3, 3], [0, 3, 0, 3], [3, 3, 3, 0]], \n 'output': [[0, 0, 3, 3, 3, 3, 0, 0], [0, 3, 0, 3, 3, 0, 3, 0], [3, 3, 3, 0, 0, 3, 3, 3], [3, 3, 3, 0, 0, 3, 3, 3], [0, 3, 0, 3, 3, 0, 3, 0], [0, 0, 3, 3, 3, 3, 0, 0]]}, {'input': [[3, 3, 3, 3], [3, 0, 0, 0], [3, 0, 0, 0]], 'output': [[3, 3, 3, 3, 3, 3, 3, 3], [3, 0, 0, 0, 0, 0, 0, 3], [3, 0, 0, 0, 0, 0, 0, 3], [3, 0, 0, 0, 0, 0, 0, 3], [3, 0, 0, 0, 0, 0, 0, 3], [3, 3, 3, 3, 3, 3, 3, 3]]}]}\n'''\n","sub_path":"arc/vopsprepare_arc.py","file_name":"vopsprepare_arc.py","file_ext":"py","file_size_in_byte":12678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"147487001","text":"\"\"\"MobileShop URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom .views import MobilCreateView,Main_page,HomePage,MobilListView,MobileUpdatView,MobileDeleteView,MobileDetail,ViewOrders\n\nurlpatterns = [\n path(\"create\",MobilCreateView.as_view(),name=\"created\"),\n path(\"\",HomePage,name=\"maines\"),\n path(\"home\",Main_page,name=\"homed\"),\n path(\"list\",MobilListView.as_view(),name=\"listed\"),\n path(\"edit/\",MobileUpdatView.as_view(),name=\"edited\"),\n path(\"delete/\",MobileDeleteView.as_view(),name=\"deleted\"),\n path(\"detail/\",MobileDetail.as_view(),name=\"detailed\"),\n path(\"view\",ViewOrders.as_view(),name=\"viewed\")\n]\n","sub_path":"Mobile/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"68452599","text":"import os\nimport shutil\n\nfrom astrometry.net.log import logmsg\n\nclass TempfileMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n # One-time configuration and initialization.\n\n def __call__(self, request):\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n request.tempfiles = []\n request.tempdirs = []\n\n response = self.get_response(request)\n\n # Code to be executed for each request/response after\n # the view is called.\n for dirnm in request.tempdirs:\n if os.path.exists(dirnm):\n try:\n shutil.rmtree(dirnm)\n except OSError as e:\n logmsg('Failed to delete temp dir', dirnm, ':', e)\n for fn in request.tempfiles:\n if os.path.exists(fn):\n try:\n os.remove(fn)\n except OSError as e:\n logmsg('Failed to delete temp file', fn, ':', e)\n\n return response\n\n \n","sub_path":"net/tempfile_middleware.py","file_name":"tempfile_middleware.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"203916221","text":"import sys\nfrom io import StringIO\nsys.stdin = StringIO(\"\"\"1 20\"\"\")\n\nfrom math import sqrt\ndef get_squared_nums(n):\n sieve = [True] * int(n/2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[int(i/2)]:\n sieve[int(i*i/2)::i] = [False] * int((n-i*i-1)/(2*i)+1)\n return [4] + [(2*i+1)*(2*i+1) for i in range(1,int(n/2)) if sieve[i]]\n\n# 소수의 제곱만 다뤄도 나눠지는 여부 판별하는 게 전부 커버 가능\n_min, _max = [int(i) for i in input().split()]\ncandidates = list(range(_min, _max + 1))\ntotal_num = _max + 1 - _min\nsquared_nums = get_squared_nums(int(sqrt(_max)))\ndel_num = 0\nsquared_num = squared_nums.pop(0)\nwhile True:\n\tdel_num_this_loop = 0\n\tfor i,v in enumerate(candidates):\n\t\tif v%squared_num == 0:\n\t\t\tdel candidates[i - del_num_this_loop]\n\t\t\tdel_num_this_loop += 1\n\tdel_num += del_num_this_loop\n\tif squared_nums:\n\t\tsquared_num = squared_nums.pop(0)\n\telse:\n\t\tbreak\nprint(total_num - del_num)","sub_path":"season_2/week04/1016_slow_kim.py","file_name":"1016_slow_kim.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"444852239","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\nimport pymongo\nimport scrapy\nfrom scrapy.exceptions import DropItem\nfrom scrapy.pipelines.images import ImagesPipeline\n\n\nclass DongqiudiPipeline:\n def __init__(self):\n myclient = pymongo.MongoClient(\"mongodb://127.0.0.1:27017\")\n myclient.admin.authenticate(\"admin\", \"123456\")\n mydb = myclient['db_dongqiudi']\n self.mycollection = mydb['c_dongqiudi']\n\n\n def process_item(self, item, spider):\n # print('这个是我们获取到的数据:{0}'.format(item))\n data = dict(item)\n self.mycollection.insert_one(data)\n return item\n\nclass DongqiudiImagePipeline(ImagesPipeline):\n def get_media_requests(self, item, info):\n for image_url in item['image_urls']:\n yield scrapy.Request(url=image_url, meta={'item': item})\n\n def item_completed(self, results, item, info):\n image_paths = [x['path'] for ok, x in results if ok]\n if not image_paths:\n raise DropItem(\"改新闻没有图片\")\n return item\n\n def file_path(self, request, response=None, info=None):\n url = request.url\n item = request.meta['item']['title']\n path = item + \".\" + url.split('~')[-2].split('.')[-1]\n return path\n","sub_path":"dongqiudi/dongqiudi/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"5562999","text":"import os\nimport sys\nimport glob\nimport operator as op\nimport itertools as it\nfrom functools import reduce, partial\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame, Series\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport sklearn as skl\nfrom sklearn import *\nimport imageio\nfrom numba import jit, njit, prange\n\n\n## ==========================================================\n## Below: New methods for Project 2\n\ndef onehot(targets, classes):\n '''Translates 1D targets array with classes=0,1,2,... into a 2D one-hot np.ndarray.'''\n n = len(targets)\n hot_targets = np.zeros((n, classes))\n for r, i in enumerate(targets):\n hot_targets[r,i] = 1\n return hot_targets\n\ndef add_intercept(X):\n '''Adds intercept column (a column of 1s) to 2D np.ndarray X'''\n n, _ = X.shape\n intercept = np.ones((n,1))\n X = np.hstack((intercept, X))\n return X \n\ndef softmax(z):\n '''Returns an np.ndarray where softmax has been applies on z.'''\n stability_factor = z.max()\n e = np.exp(z-stability_factor)\n return e/np.sum(e, axis=1, keepdims=True)\n\n\ndef scale(X_train, X_test, scale_intercept=False, **kwargs):\n '''Wrapper for skl.preprocessing.StandardScaler. **kwargs are forwarded to StandardScaler'''\n scaler = skl.preprocessing.StandardScaler(**kwargs)\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n if not scale_intercept:\n X_train[:,0] = 1\n X_test[:,0] = 1\n return (X_train, X_test)\n\n\n## Above: Project 2\n## ==========================================================\n## Below: Project 1\n\n# From assignment 1:\ndef FrankeFunction(x,y):\n '''Evaluates the Franke function at x,y'''\n term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))\n term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))\n term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))\n term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)\n return term1 + term2 + term3 + term4\n\n\ndef franke_sampler(x, y, var_eps=.01):\n '''Generate samples from the FrankeFunction with some optional noise. Returns tuple=(z, f, eps)'''\n f = FrankeFunction(x, y).reshape(len(x), 1)\n eps = np.sqrt(var_eps)*np.random.randn(len(x), 1)\n z = f + eps\n return (z, f, eps)\n\n\ndef image_sampler(file_name, sn, random_state=0):\n '''Draws sn normally distributed samples from file_name GEOTIF image and returns a tuple=(x, y, z, (x_dim, y_dim, z_norm)), where x, y z, are normalized with x_dim, y_dim, z_norm'''\n img = imageio.imread(file_name)\n\n x_dim, y_dim = img.shape\n z_norm = img.max()\n\n np.random.seed(random_state)\n x = np.random.randint(0, x_dim, (sn, 1))\n y = np.random.randint(0, y_dim, (sn, 1))\n z = img[x, y]\n\n return (x/x_dim, y/y_dim, z/z_norm, (x_dim, y_dim, z_norm))\n\n\ndef randmesh(sn=500, x_start=0., y_start=0., x_end=1., y_end=1., random_state=0):\n '''Create a mesh with sn uniform randomly scattered points in the rectangle (x_start, y_start), (x_end, y_end). Returned ndarrays have shape (sn, 1)'''\n np.random.seed(random_state)\n x = np.random.uniform(x_start, x_end, (sn, 1))\n y = np.random.uniform(y_start, y_end, (sn, 1))\n return (x, y)\n\n\ndef randmesh_int(x_end, y_end, x_start=0, y_start=0, sn=500, random_state=0):\n '''Create a mesh with sn uniform randomly scattered points in the rectangle (x_start, y_start), (x_end, y_end). Returned ndarrays have shape (sn, 1)'''\n np.random.seed(random_state)\n x = np.random.randint(x_start, x_end, (sn, 1))\n y = np.random.randint(y_start, y_end, (sn, 1))\n return (x, y)\n\n\ndef make_design_matrix(x, y, pn=5):\n '''Make design matrix with polinomial degree pn in two variables. Rows are on the form Pn(x,y) = [1,x,y,x^2,xy,y^2,x^3,x^2y,...]'''\n X = np.ndarray([len(x), int((pn+1)*(pn+2)/2)])\n\n n_terms = int((pn+1)*(pn+2)/2)\n x_exponents = [0]*n_terms\n y_exponents = [0]*n_terms\n\n xn = yn = 0\n for i in range(pn+1):\n for j in range(i+1):\n y_exponents[yn] = j\n yn += 1\n for j in range(i,-1,-1):\n x_exponents[xn] = j\n xn += 1\n\n for i, (xi, yi) in enumerate(zip(x, y)):\n X[i,:] = [(xi**xn)*(yi**yn) for xn, yn in zip(x_exponents, y_exponents)]\n\n return X\n\n\n# Defining some useful functions\ndef mse(y, y_tilde):\n return np.mean(np.mean((y - y_tilde)**2, axis=1, keepdims=True))\n\ndef r2(y, y_tilde):\n return (1 - sum((y - np.mean(y_tilde, axis=1, keepdims=True))**2)/sum((y - np.mean(y))**2))[0]\n\ndef bias(f, y_tilde):\n return np.mean((f - np.mean(y_tilde, axis=1, keepdims=True))**2)\n\ndef var(y):\n return np.mean(np.var(y, axis=1, keepdims=True))\n\ndef best_r2(mse_array, y):\n return 1 - np.min(mse_array)/np.mean(sum((y - np.mean(y))**2)/len(y))\n\ndef beta_hat_confidence_intervals(X, y, var_eps, ci=95):\n '''Returns an ndarray((3, p)) of confidence intervals for the estimators of y on X'''\n std_err_multipliers = {90:1.645, 95:1.96, 98:2.326, 99:2.576}\n std_err_multiplier = std_err_multipliers[ci]\n n, p = X.shape\n intervals = np.ndarray((p, 3))\n\n XtX_inv = np.linalg.inv(X.T @ X)\n beta_hat = XtX_inv @ X.T @ y\n y_tilde = X @ beta_hat\n var_beta_hat = var_eps*np.sqrt(np.diag(XtX_inv)).reshape(p,1)\n intervals[:,0] = (beta_hat - std_err_multiplier*np.sqrt(var_beta_hat)).ravel()\n intervals[:,1] = beta_hat.ravel()\n intervals[:,2] = (beta_hat + std_err_multiplier*np.sqrt(var_beta_hat)).ravel()\n\n return intervals.T\n\n\n# Preprocessing X\ndef truncate_to_poly(X, pn):\n '''Truncates the design matrix X to right shape for polynomial degree pn'''\n p = int((pn+1)*(pn+2)/2)\n return np.copy(X[:,:p])\n\ndef split(y, k):\n '''k-fold splitter function. NOTE: Choose k such that len(z)%k is zero, ie the split is even!'''\n n = len(y)\n s = n//k # samples in each split\n last_idx = n - n%k # remove overshooting samples\n test_splits = [list(range(i, i+s)) for i in range(0, last_idx, s)]\n train_splits = [list(set(range(last_idx)) - set(test_split)) for test_split in test_splits]\n return (train_splits, test_splits)\n\n\ndef best_lambda_mse(df=DataFrame(), polynomial_orders=[], col_prefix=str()):\n '''Searches df for the lambda which gives the best MSE for each polynomial in polynomial_orders and returns the lambdas and MSEs in as (lambdas, best_mse)'''\n best_mse = np.ndarray(len(polynomial_orders))\n row_best_lambda = df.filter(regex=col_prefix).idxmin()\n lambdas = df['lambda'][row_best_lambda].to_numpy()\n for i, row in enumerate(row_best_lambda):\n pn = polynomial_orders[i]\n best_mse[i] = df.at[row, col_prefix + str(pn)]\n return lambdas, best_mse\n\n\n@njit(parallel=True)\ndef run_image_calcs(img, pn, betas):\n '''numba-parallelized part of generate_image()'''\n n_terms = int((pn+1)*(pn+2)/2)\n x_exponents = [0]*n_terms\n y_exponents = [0]*n_terms\n\n xn = yn = 0\n for i in range(pn+1):\n for j in range(i+1):\n x_exponents[xn] = j\n xn += 1\n for j in range(i,-1,-1):\n y_exponents[yn] = j\n yn += 1\n\n x_dim, y_dim = img.shape\n xi = np.linspace(0,1,x_dim)\n yj = np.linspace(0,1,y_dim)\n img[:,:] = 0\n\n for i in prange(x_dim):\n for j in range(y_dim):\n for k in range(n_terms):\n b = betas[k,0]\n xn = x_exponents[k]\n yn = y_exponents[k]\n img[i,j] += b*(xi[i]**xn)*(yj[j]**yn)\n\ndef generate_image(pn, xyz_norm, betas):\n '''Generates image with pn-th polynomial Pn(x,y) and coefficient vector betas'''\n x_dim, y_dim, z_norm = xyz_norm\n img = np.ndarray((x_dim, y_dim))\n run_image_calcs(img, pn, betas)\n img = img * z_norm\n img = img.astype(int)\n return img\n","sub_path":"project2/source/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"106300506","text":"\"\"\"\nThis code was slightly modified from the baselines/baselines/deepq/build_graph.py in order to use\na different evaluation method. In order to run, simply replace the original code with this code\nin the original directory.\n\"\"\"\n\"\"\"Clipped Double Q learning graph\n\nThe functions in this file can are used to create the following functions:\n\n======= act ========\n\n Function to chose an action given an observation\n\n Parameters\n ----------\n observation: object\n Observation that can be feed into the output of make_obs_ph\n stochastic: bool\n if set to False all the actions are always deterministic (default False)\n update_eps_ph: float\n update epsilon a new value, if negative not update happens\n (default: no update)\n\n Returns\n -------\n Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for\n every element of the batch.\n\n\n======= act (in case of parameter noise) ========\n\n Function to chose an action given an observation\n\n Parameters\n ----------\n observation: object\n Observation that can be feed into the output of make_obs_ph\n stochastic: bool\n if set to False all the actions are always deterministic (default False)\n update_eps_ph: float\n update epsilon a new value, if negative not update happens\n (default: no update)\n reset_ph: bool\n reset the perturbed policy by sampling a new perturbation\n update_param_noise_threshold_ph: float\n the desired threshold for the difference between non-perturbed and perturbed policy\n update_param_noise_scale_ph: bool\n whether or not to update the scale of the noise for the next time it is re-perturbed\n\n Returns\n -------\n Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for\n every element of the batch.\n\n\n======= train =======\n\n Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:\n\n td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))\n loss = huber_loss[td_error]\n\n Parameters\n ----------\n obs_t: object\n a batch of observations\n action: np.array\n actions that were selected upon seeing obs_t.\n dtype must be int32 and shape must be (batch_size,)\n reward: np.array\n immediate reward attained after executing those actions\n dtype must be float32 and shape must be (batch_size,)\n obs_tp1: object\n observations that followed obs_t\n done: np.array\n 1 if obs_t was the last observation in the episode and 0 otherwise\n obs_tp1 gets ignored, but must be of the valid shape.\n dtype must be float32 and shape must be (batch_size,)\n weight: np.array\n imporance weights for every element of the batch (gradient is multiplied\n by the importance weight) dtype must be float32 and shape must be (batch_size,)\n\n Returns\n -------\n td_error: np.array\n a list of differences between Q(s,a) and the target in Bellman's equation.\n dtype is float32 and shape is (batch_size,)\n\n======= update_target ========\n\n copy the parameters from optimized Q function to the target Q function.\n In Q learning we actually optimize the following error:\n\n Q(s,a) - (r + gamma * max_a' Q'(s', a'))\n\n Where Q' is lagging behind Q to stablize the learning. For example for Atari\n\n Q' is set to Q once every 10000 updates training steps.\n\n\"\"\"\nimport tensorflow as tf\nimport math\nimport baselines0.common.tf_util as U\n\n\ndef scope_vars(scope, trainable_only=False):\n \"\"\"\n Get variables inside a scope\n The scope can be specified as a string\n Parameters\n ----------\n scope: str or VariableScope\n scope in which the variables reside.\n trainable_only: bool\n whether or not to return only the variables that were marked as trainable.\n Returns\n -------\n vars: [tf.Variable]\n list of variables in `scope`.\n \"\"\"\n return tf.compat.v1.get_collection(\n tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,\n scope=scope if isinstance(scope, str) else scope.name\n )\n\n\ndef scope_name():\n \"\"\"Returns the name of current scope as a string, e.g. deepq/q_func\"\"\"\n return tf.compat.v1.get_variable_scope().name\n\n\ndef absolute_scope_name(relative_scope_name):\n \"\"\"Appends parent scope name to `relative_scope_name`\"\"\"\n return scope_name() + \"/\" + relative_scope_name\n\n\ndef default_param_noise_filter(var):\n if var not in tf.compat.v1.trainable_variables():\n # We never perturb non-trainable vars.\n return False\n if \"fully_connected\" in var.name:\n # We perturb fully-connected layers.\n return True\n\n # The remaining layers are likely conv or layer norm layers, which we do not wish to\n # perturb (in the former case because they only extract features, in the latter case because\n # we use them for normalization purposes). If you change your network, you will likely want\n # to re-consider which layers to perturb and which to keep untouched.\n return False\n\n\ndef build_act(make_obs_ph, q_func, num_actions, scope=\"setdeepq\", reuse=None):\n \"\"\"Creates the act function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.placeholder or TfInput\n a function that take a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n \"\"\"\n with tf.compat.v1.variable_scope(scope, reuse=reuse):\n observations_ph = make_obs_ph(\"observation\")\n stochastic_ph = tf.compat.v1.placeholder(tf.bool, (), name=\"stochastic\")\n update_eps_ph = tf.compat.v1.placeholder(tf.float32, (), name=\"update_eps\")\n\n eps = tf.compat.v1.get_variable(\"eps\", (), initializer=tf.compat.v1.constant_initializer(0))\n # Clipped Double q\n q1_values = q_func.forward(observations_ph.get(), num_actions, scope=\"q1_func\", reuse=reuse)\n q2_values = q_func.forward(observations_ph.get(), num_actions, scope=\"q2_func\", reuse=reuse)\n # Sum over q1 and q2 and find the action with argmax\n deterministic_actions = tf.argmax(input=q1_values+q2_values, axis=1)\n\n batch_size = tf.shape(input=observations_ph.get())[0]\n random_actions = tf.random.uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)\n chose_random = tf.random.uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps\n stochastic_actions = tf.compat.v1.where(chose_random, random_actions, deterministic_actions)\n\n output_actions = tf.cond(pred=stochastic_ph, true_fn=lambda: stochastic_actions, false_fn=lambda: deterministic_actions)\n update_eps_expr = eps.assign(tf.cond(pred=update_eps_ph >= 0, true_fn=lambda: update_eps_ph, false_fn=lambda: eps))\n _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],\n outputs=output_actions,\n givens={update_eps_ph: -1.0, stochastic_ph: True},\n updates=[update_eps_expr])\n def act(ob, stochastic=True, update_eps=-1):\n return _act(ob, stochastic, update_eps)\n return act\n\ndef build_act_greedy(make_obs_ph, q_func, num_actions, scope=\"setdeepq\", reuse=True, eps=0.0):\n \"\"\"Creates the act function for a simple fixed epsilon greedy\n Added by HJ\n \"\"\"\n with tf.compat.v1.variable_scope(scope, reuse=reuse):\n observations_ph = make_obs_ph(\"observation\")\n stochastic_ph = tf.compat.v1.placeholder(tf.bool, (), name=\"stochastic\")\n # Clipped Double q\n q1_values = q_func.forward(observations_ph.get(), num_actions, scope=\"q1_func\", reuse=reuse)\n q2_values = q_func.forward(observations_ph.get(), num_actions, scope=\"q2_func\", reuse=reuse)\n # Sum over q1 and q2 and find the action with argmax\n deterministic_actions = tf.argmax(input=q1_values+q2_values, axis=1)\n\n batch_size = tf.shape(input=observations_ph.get())[0]\n random_actions = tf.random.uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)\n chose_random = tf.random.uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps\n stochastic_actions = tf.compat.v1.where(chose_random, random_actions, deterministic_actions)\n\n output_actions = tf.cond(pred=stochastic_ph, true_fn=lambda: stochastic_actions, false_fn=lambda: deterministic_actions)\n _act = U.function(inputs=[observations_ph, stochastic_ph],\n outputs=output_actions)\n def act(ob, stochastic=True):\n return _act(ob, stochastic)\n return act\n\n\ndef build_train(make_obs_ph, q_func, num_actions, optimizer_f,\n grad_norm_clipping=None, gamma=1.0, scope=\"setdeepq\", reuse=None,\n test_eps=0.05, lr_init = 0.001, lr_period_steps=250000, tau=0.05):\n \"\"\"Creates the train function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.placeholder or TfInput\n a function that takes a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions\n reuse: bool\n whether or not to reuse the graph variables\n optimizer: tf.train.Optimizer\n optimizer to use for the Q-learning objective.\n grad_norm_clipping: float or None\n clip gradient norms to this value. If None no clipping is performed.\n gamma: float\n discount rate.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n lr_init : float\n initial learning rate\n lr_period : int\n learning rate schedule following a cosine with this period\n tau : float\n parameter for the soft target network update. tau <= 1.0 and 1.0 for\n the hard update.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n train: (object, np.array, np.array, object, np.array, np.array) -> np.array\n optimize the error in Bellman's equation.\n` See the top of the file for details.\n update_target: () -> ()\n copy the parameters from optimized Q function to the target Q function.\n` See the top of the file for details.\n debug: {str: function}\n a bunch of functions to print debug data like q_values.\n \"\"\"\n # Build action graphs\n act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)\n\n act_greedy = build_act_greedy(make_obs_ph, q_func, num_actions, scope=scope, reuse=True, eps=test_eps)\n\n with tf.compat.v1.variable_scope(scope, reuse=reuse):\n # set up placeholders\n obs_t_input = make_obs_ph(\"obs_t\")\n act_t_ph = tf.compat.v1.placeholder(tf.int32, [None], name=\"action\")\n rew_t_ph = tf.compat.v1.placeholder(tf.float32, [None], name=\"reward\")\n obs_tp1_input = make_obs_ph(\"obs_tp1\")\n done_mask_ph = tf.compat.v1.placeholder(tf.float32, [None], name=\"done\")\n importance_weights_ph = tf.compat.v1.placeholder(tf.float32, [None], name=\"weight\")\n iteration = tf.compat.v1.placeholder(tf.float32, name=\"iteration\")\n\n # Cosine learning rate adjustment\n lr = tf.Variable(float(lr_init), trainable=False, dtype = tf.float32, name='lr')\n lr = tf.clip_by_value(0.0005*tf.math.cos(math.pi*iteration/lr_period_steps)+0.000501, 1e-6, 1e-3)\n optimizer = optimizer_f(learning_rate = lr)\n\n # q network evaluation\n q1_t = q_func.forward(obs_t_input.get(), num_actions, scope=\"q1_func\", reuse=True) # reuse q1 parameters from act\n q1_func_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope=tf.compat.v1.get_variable_scope().name + \"/q1_func\")\n q2_t = q_func.forward(obs_t_input.get(), num_actions, scope=\"q2_func\", reuse=True) # reuse q2 parameters from act\n q2_func_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope=tf.compat.v1.get_variable_scope().name + \"/q2_func\")\n\n # target q network evalution\n q1_tp1 = q_func.forward(obs_tp1_input.get(), num_actions, scope=\"target_q1_func\", reuse=False)\n target_q1_func_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope=tf.compat.v1.get_variable_scope().name + \"/target_q1_func\")\n q2_tp1 = q_func.forward(obs_tp1_input.get(), num_actions, scope=\"target_q2_func\", reuse=False)\n target_q2_func_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope=tf.compat.v1.get_variable_scope().name + \"/target_q2_func\")\n\n # q scores for actions which we know were selected in the given state.\n q1_t_selected = tf.reduce_sum(input_tensor=q1_t * tf.one_hot(act_t_ph, num_actions), axis=1)\n q2_t_selected = tf.reduce_sum(input_tensor=q2_t * tf.one_hot(act_t_ph, num_actions), axis=1)\n\n # Actions selected with current q funcs at state t+1.\n q1_tp1_using_online_net = q_func.forward(obs_tp1_input.get(), num_actions, scope=\"q1_func\", reuse=True)\n q2_tp1_using_online_net = q_func.forward(obs_tp1_input.get(), num_actions, scope=\"q2_func\", reuse=True)\n tp1_best_action_using_online_net = tf.argmax(input=q1_tp1_using_online_net+q2_tp1_using_online_net, axis=1)\n # Using action at t+1 find target value associated with the action\n q1_tp1_selected = tf.reduce_sum(input_tensor=q1_tp1 * tf.one_hot(tp1_best_action_using_online_net, num_actions), axis=1)\n q2_tp1_selected = tf.reduce_sum(input_tensor=q2_tp1 * tf.one_hot(tp1_best_action_using_online_net, num_actions), axis=1)\n # Min of target q values to be used bellman equation\n q_tp1_best = tf.minimum(q1_tp1_selected, q2_tp1_selected)\n\n # compute RHS of bellman equation\n q_tp1_selected_target = rew_t_ph + gamma * q_tp1_best\n\n # compute the error (potentially clipped)\n td_error1 = q1_t_selected - tf.stop_gradient(q_tp1_selected_target)\n td_error2 = q2_t_selected - tf.stop_gradient(q_tp1_selected_target)\n errors1 = U.huber_loss(td_error1)\n errors2 = U.huber_loss(td_error2)\n errors = errors1 + errors2\n weighted_error = tf.reduce_mean(input_tensor=importance_weights_ph * errors)\n\n #Print total number of params\n total_parameters = 0\n for variable in tf.compat.v1.trainable_variables():\n # shape is an array of tf.Dimension\n shape = variable.get_shape()\n variable_parameters = 1\n for dim in shape:\n variable_parameters *= dim.value\n # print(\"var params\", variable_parameters)\n total_parameters += variable_parameters\n print(\"===============================================================\")\n print(\"Total number of trainable params:\", total_parameters)\n print(\"===============================================================\")\n\n # Log for tensorboard\n tf.summary.scalar('q1_values', tf.math.reduce_mean(q1_t))\n tf.summary.scalar('q2_values', tf.math.reduce_mean(q2_t))\n tf.summary.scalar('td_1', tf.math.reduce_mean(td_error1))\n tf.summary.scalar('td_2', tf.math.reduce_mean(td_error2))\n tf.summary.scalar('weighted_loss', weighted_error)\n tf.summary.scalar('lr_schedule', lr)\n tf.summary.scalar('td_MSE_1', tf.math.reduce_mean(tf.math.square(td_error1)))\n tf.summary.scalar('td_MSE_2', tf.math.reduce_mean(tf.math.square(td_error2)))\n\n # combine variable scopes\n q_func_vars = q1_func_vars+q2_func_vars\n # compute optimization op (potentially with gradient clipping)\n if grad_norm_clipping is not None:\n gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)\n for i, (grad, var) in enumerate(gradients):\n if grad is not None:\n gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)\n optimize_expr = optimizer.apply_gradients(gradients)\n else:\n optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)\n\n # update_target_fn will be called every step to copy Q network to target Q network\n # target network is updated with polyak averaging\n update_target_expr1 = []\n for var, var_target in zip(sorted(q1_func_vars, key=lambda v: v.name),\n sorted(target_q1_func_vars, key=lambda v: v.name)):\n update_target_expr1.append(var_target.assign(tau*var + (1-tau)*var_target))\n update_target_expr1 = tf.group(*update_target_expr1)\n\n update_target_expr2 = []\n for var, var_target in zip(sorted(q2_func_vars, key=lambda v: v.name),\n sorted(target_q2_func_vars, key=lambda v: v.name)):\n update_target_expr2.append(var_target.assign(tau*var + (1-tau)*var_target))\n update_target_expr2 = tf.group(*update_target_expr2)\n\n merged_summary = tf.compat.v1.summary.merge_all(scope=tf.compat.v1.get_variable_scope().name)\n # Create callable functions\n train = U.function(\n inputs=[\n obs_t_input,\n act_t_ph,\n rew_t_ph,\n obs_tp1_input,\n done_mask_ph,\n importance_weights_ph,\n iteration\n ],\n outputs=[td_error1, td_error2, tf.reduce_mean(input_tensor=errors), merged_summary],\n updates=[optimize_expr, lr]\n )\n update_target = U.function([], [], updates=[update_target_expr1, update_target_expr2])\n\n q_values = U.function(inputs=[obs_t_input], outputs=[q1_t, q2_t])\n\n return act_f, act_greedy, q_values, train, update_target, {'q_values': q_values}\n","sub_path":"deep_adfq/baselines0/setdeepq/build_graph.py","file_name":"build_graph.py","file_ext":"py","file_size_in_byte":19272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"101647334","text":"import os\nimport time\nimport json\nfrom common import settings\nfrom common import queries\nfrom confluent_kafka import Producer, KafkaError\nimport datetime\n\ncandidates = 'candidates'\nobjects = 'objects'\n\n# setup database connection\nimport mysql.connector\n\nimport smtplib\nfrom email.message import EmailMessage\ndef send_email(email, topic, message):\n msg = EmailMessage()\n msg.set_content(message)\n\n msg['Subject'] = 'Lasair query ' + topic\n msg['From'] = 'donotreply@lasair.roe.ac.uk'\n msg['To'] = email\n s = smtplib.SMTP('localhost')\n s.send_message(msg)\n s.quit()\n\ndef datetime_converter(o):\n# used by json encoder when it gets a type it doesn't understand\n if isinstance(o, datetime.datetime):\n return o.__str__()\n\ndef run_query(query, status, msl, active, email, topic):\n jdnow = (time.time()/86400 + 2440587.5);\n days_ago_candidates = jdnow - status['cand_max_jd']\n days_ago_objects = jdnow - status['obj_max_jd']\n\n sqlquery_real = queries.make_query(query['selected'], query['tables'], query['conditions'], \n 0, 1000, True, days_ago_candidates, days_ago_objects)\n\n cursor = msl.cursor(buffered=True, dictionary=True)\n recent = []\n allrecent = []\n try:\n cursor.execute(sqlquery_real)\n\n # debug message\n #print('\\n%d %f %f\\n%s\\n' % (active, days_ago_candidates, days_ago_objects, sqlquery_real))\n\n for record in cursor:\n recorddict = dict(record)\n now_number = datetime.datetime.utcnow()\n recorddict['UTC'] = now_number.strftime(\"%Y-%m-%d %H:%M:%S\")\n allrecent.append(recorddict)\n except Exception as e:\n print(\"Query failed for %s\" % topic)\n print(e)\n print(sqlquery_real)\n print(' --- %d satisfy query' % len(allrecent))\n\n if len(allrecent) > 0:\n filename = '/data/ztf/streams/%s' % topic\n try:\n file = open(filename, 'r')\n digestdict = json.loads(file.read())\n digest = digestdict['digest']\n last_entry_text = digestdict['last_entry']\n file.close()\n except:\n digest = []\n last_entry_text = \"2017-01-01 00:00:00\"\n\n last_entry_number = datetime.datetime.strptime(last_entry_text, \"%Y-%m-%d %H:%M:%S\")\n now_number = datetime.datetime.utcnow()\n delta = (now_number - last_entry_number)\n delta = delta.days + delta.seconds/86400.0\n\n# only objects in last 24 hours\n last_day_objects = []\n for out in digest:\n out_number = datetime.datetime.strptime(out['UTC'], \"%Y-%m-%d %H:%M:%S\")\n delta = (now_number - last_entry_number)\n delta = delta.days + delta.seconds/86400.0\n if delta < 1.0:\n last_day_objects.append(out['objectId'])\n print(' --- %d yesterday' % len(last_day_objects))\n\n for out in allrecent:\n if 'objectId' in out and not out['objectId'] in last_day_objects:\n recent.append(out)\n\n if len(recent) > 0:\n allrecords = (recent + digest)[:1000]\n if active == 1:\n # send a message at most every 24 hours\n if delta > 1.0:\n print(' --- send email to %s' % email)\n message = 'Your active query with Lasair on topic ' + topic + '\\n'\n for out in allrecords: \n out_number = datetime.datetime.strptime(out['UTC'], \"%Y-%m-%d %H:%M:%S\")\n # gather all records that have accumulated since last email\n if out_number > last_entry_number:\n jsonout = json.dumps(out, default=datetime_converter)\n message += jsonout + '\\n'\n send_email(email, topic, message)\n last_entry_text = now_number.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n if active == 2:\n conf = { 'bootstrap.servers': settings.LASAIR_KAFKA_PRODUCER }\n try:\n p = Producer(conf)\n for out in recent: \n jsonout = json.dumps(out, default=datetime_converter)\n p.produce(topic, jsonout)\n p.flush(10.0) # 10 second timeout\n # last_entry not really used with kafka, just a record of last blast\n last_entry_text = now_number.strftime(\"%Y-%m-%d %H:%M:%S\")\n print(' -- sent to kafka')\n except Exception as e:\n print(\"Kafka production failed for %s\" % topic)\n print(e)\n\n digestdict = {'last_entry': last_entry_text, 'digest':allrecords}\n digestdict_text = json.dumps(digestdict, default=datetime_converter)\n\n file = open(filename, 'a')\n os.chmod(filename, 0O666)\n file.write(digestdict_text)\n file.close()\n return len(recent)\n\ndef find_queries(status):\n jdnow = (time.time()/86400 + 2440587.5);\n days_ago_candidates = jdnow - status['cand_max_jd']\n days_ago_objects = jdnow - status['obj_max_jd']\n print('days_ago_objects %.3f days_ago_candidates %.3f' % (days_ago_objects, days_ago_candidates))\n\n config = {\n 'user' : settings.DB_USER_WRITE,\n 'password': settings.DB_PASS_WRITE,\n 'host' : settings.DB_HOST,\n 'database': 'ztf'\n }\n msl = mysql.connector.connect(**config)\n\n cursor = msl.cursor(buffered=True, dictionary=True)\n query = 'SELECT user, name, email, active, selected, tables, conditions FROM myqueries2, auth_user WHERE myqueries2.user = auth_user.id AND active > 0'\n cursor.execute(query)\n\n# secs_since_update = time.time() - status['update_time_unix']\n# print('update %.1f minutes ago' % (secs_since_update/60))\n\n for query in cursor:\n topic = queries.topic_name(query['user'], query['name'])\n print('query %s' % topic)\n active = query['active']\n email = query['email']\n t = time.time()\n n = run_query(query, status, msl, active, email, topic)\n t = time.time() - t\n print(' --- got %d in %.1f seconds' % (n, t))\n\nif __name__ == \"__main__\":\n print('--------- RUN ACTIVE QUERIES -----------')\n t = time.time()\n jsonstr = open('/data/ztf/system_status.json').read()\n status = json.loads(jsonstr)\n find_queries(status)\n print('Active queries done in %.1f seconds' % (time.time() - t))\n","sub_path":"src/post_ingest/run_active_queries.py","file_name":"run_active_queries.py","file_ext":"py","file_size_in_byte":6370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"338336286","text":"from cs50 import get_string\nfrom cs50 import get_int\nimport math\n\ndef check_sum(num):\n # variables\n digit = 0\n k = 0\n s = 0\n sum = 0\n \n while (num > 0):\n digit = int(num % 10)\n # test print(\"Digit: \"+ str(digit))\n num = int(num / 10)\n # test print(\"num: \" + str(num))\n k += 1\n # test print(\"k: \" + str(k))\n\n if (k % 2 == 0):\n if (digit * 2 >= 10):\n s = digit * 2 - 9\n # test print(\"even digit: \" + str(s))\n else:\n s = digit * 2\n # test print(\"even single digit: \" + str(s))\n \n else:\n s = digit\n # test print(\"odd digit: \" +str(s))\n \n sum = sum + s\n\n # check to see if sum is even. If it is, return 1 to signify it passes luhn's algorithim\n if (sum % 10 == 0):\n return 1\n \n else:\n return 0\n \n\n# get cc number from user \nnum = get_int(\"Number: \")\n\n# put number through luhn's algorithim\ncheck_sum(num)\n\n# if passes luhns algorithim, determine if it is a VISA, MASTER, or AMEX\nif (check_sum(num) == 1):\n if ((num >= 4000000000000 and num <= 4999999999999) or (num >= 4000000000000000 and num <= 4999999999999999)):\n print(\"VISA\")\n print(\"\")\n elif (num >= 340000000000000 and num <= 379999999999999):\n print(\"AMEX\")\n print(\"\")\n elif (num >= 5100000000000000 and num <= 5599999999999999):\n print(\"MASTERCARD\")\n print(\"\")\n\n\nelse:\n print(\"INVALID\")","sub_path":"pset6/credit/credit.py","file_name":"credit.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"10904279","text":"# -*- coding:utf-8 -*-\n\"\"\"\nauthor:LinYiLin\ndate:2019-06-19\ndescription:using to show x mind figure of dir tree\n\"\"\"\nimport xmind\nimport win32api\nfrom os import walk\nimport os\n\nXMIND_PATH = r\"C:\\Program Files (x86)\\XMind\\XMind.exe\" # XMind's install path\n\n\nclass XMind(object):\n \"\"\"show dir tree base on x mind\"\"\"\n def __init__(self):\n self._xmind_path = XMIND_PATH\n self.sheet = None\n self._name = \"\"\n\n def show_xmind(self, xmind_file_path):\n win32api.ShellExecute(0, 'open', XMIND_PATH, xmind_file_path, '', 1)\n\n def create_xmind(self, initial_path):\n work_book = xmind.load(path=\"my.xmind\")\n self.sheet = work_book.getPrimarySheet()\n self.design_sheet(\"\", initial_path, True)\n xmind.save(work_book, path=\"test.xmind\")\n self.show_xmind(\"test.xmind\")\n\n def set_topic(self, topic, topic_title, set_title):\n self._name = \"\"\n if set_title:\n try:\n topic = topic.addSubTopic()\n except Exception:\n topic = self.sheet.getRootTopic()\n\n topic.setTitle(topic_title)\n return topic\n\n def design_sheet(self, topic, initial_path, set_title):\n \"\"\"design sheet base on path dir\"\"\"\n _, topic_title = os.path.split(initial_path)\n topic = self.set_topic(topic, topic_title, set_title)\n path_list = os.listdir(initial_path)\n for path in path_list:\n real_path = os.path.join(initial_path, path)\n if os.path.isdir(real_path):\n sub_topic = self.set_topic(topic, path, True)\n self.design_sheet(sub_topic, real_path, False)\n else:\n self.set_topic(topic, path, True)\n\n # generator = walk(initial_path)\n # first_run = True\n # topic = None\n # for base_path, folder_names, file_paths in generator:\n # print(base_path)\n # _, topic_title = os.path.split(base_path)\n # if first_run:\n # topic = self.set_root_topic(topic_title)\n # first_run = False\n # else:\n # pass\n # # topic = self.set_sub_topic(topic, topic_title)\n # _, base_path = os.path.split(base_path)\n # for topic_title in file_paths + file_paths:\n # self.set_sub_topic(topic, topic_title)\n\n\ndef main():\n mind = XMind()\n test_path = r\"C:\\Users\\kimi\\Desktop\\FoldMatch\\Diractory-Management-System\\项目\"\n mind.create_xmind(test_path)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"show_dir.py","file_name":"show_dir.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"111690091","text":"import os\nimport platform\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\nhome_dir = os.path.expanduser('~')\n\nsystem_name = platform.system()\nif system_name == \"Windows\":\n import ctypes, sys\n def is_admin():\n try:\n return ctypes.windll.shell32.IsUserAnAdmin()\n except:\n return False\n\n if not is_admin():\n print(\"Raising privilages..\")\n ctypes.windll.shell32.ShellExecuteW(None, \"runas\", sys.executable, __file__, None, 1)\n else:\n src = os.path.join(current_dir, \"_vimrc\")\n dst = os.path.join(home_dir, \"_vimrc\")\n print(\"Linking _vimrc\")\n os.symlink(src, dst)\n\nelif system_name == \"Linux\":\n os.symlink(current_dir, os.path.join(home_dir, '.vim'))\n src = os.path.join(current_dir, \"_vimrc\")\n dst = os.path.join(home_dir, \".vimrc\")\n os.symlink(src, dst)\n\n","sub_path":"init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"623636978","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nimport os\nimport glob\nfrom model import FinalModel,write_video\nimport subprocess\nfrom scipy.io import wavfile\nfrom video_loader import load_video,loadWAV,slice_audio\n\n\ndef write_test_video(path_model,path_video,path_audio,path_target_audio,style_dim=32,resl=(128,128),step=1,half_window=400):\n video_source,_ = load_video(path_video,resl)\n audio_source = loadWAV(path_audio)\n audio_target = loadWAV(path_target_audio)\n if len(audio_target) > len(audio_source):\n print('Audio target is longer than source.Successful')\n else:\n print('Audio target is shorter than source.Failed')\n return\n\n truncate_frame = 2\n batch = 24\n\n video_source_index = range( truncate_frame , (len(video_source) - 2*truncate_frame)//batch * batch + truncate_frame,step)\n # trunc_tail = len(video_source) - len(video_source_index) - truncate_frame\n apv_rate = (len(audio_source)-1)/(len(video_source)-1)\n source_audio = slice_audio(audio_source, half_window=half_window, vf_index=video_source_index,apv_rate=apv_rate)\n target_audio = slice_audio(audio_target, half_window=half_window, vf_index=video_source_index,apv_rate=apv_rate)\n source_video = video_source[[i for i in video_source_index]]\n\n video_data,audio_data,audio_target_data = (np.array(source_video).swapaxes(2,3).swapaxes(1,2).astype(np.float32)/127.5 -1,np.array(source_audio).astype(np.float32),np.array(target_audio).astype(np.float32))\n model = FinalModel(size=128,style_dim=style_dim)\n # device = torch.device('cpu')\n # model.to(device)\n model.cuda()\n\n ## load model\n # checkpoint = torch.load(path_model,map_location = device )\n checkpoint = torch.load(path_model)\n\n model.load_state_dict(checkpoint['model_state_dict'])\n \n model.eval()\n print('Load model successfully')\n \n gen_list = []\n gen_target_list = []\n for idx in range(0,len(video_data),batch):\n \n video_input = torch.from_numpy(video_data[idx:idx+batch]).cuda()\n audio_input = torch.from_numpy(audio_data[idx:idx+batch]).cuda()\n audio_target_input = torch.from_numpy(audio_target_data[idx:idx+batch]).cuda()\n print('Load data successfully')\n \n\n gen_images,_ = model(video_input,audio_input)\n gen_images = gen_images.detach().cpu().numpy().swapaxes(1,2).swapaxes(2,3)\n print('Get gen images successfully')\n gen_list.append(gen_images)\n\n gen_target_images,_ = model(video_input,audio_target_input)\n gen_target_images = gen_target_images.detach().cpu().numpy().swapaxes(1,2).swapaxes(2,3)\n gen_target_list.append(gen_target_images)\n gen_list = np.concatenate(gen_list,axis=0)\n gen_target_list = np.concatenate(gen_target_list,axis=0)\n\n write_video(gen_list,path='./test_result/demo.avi',size=(128,128))\n write_video(gen_target_list,path='./test_result/demo_target.avi',size=(128,128))\n print('write video successfully')\n samplerate = 16000\n wavfile.write(\"./test_result/demo.wav\", samplerate, audio_source[int(apv_rate*video_source_index[0]) - half_window:int(apv_rate*video_source_index[-1]) + half_window])\n print('write source audio successfully')\n wavfile.write(\"./test_result/demo_target.wav\", samplerate, audio_target[int(apv_rate*video_source_index[0]) - half_window:int(apv_rate*video_source_index[-1]) + half_window])\n print('write target audio successfully')\n ## concatenate video and audio\n subprocess.call('ffmpeg -i ./test_result/demo.avi -i ./test_result/demo.wav -shortest -c:v copy -c:a aac ./test_result/output.avi', shell=True)\n subprocess.call('ffmpeg -i ./test_result/demo_target.avi -i ./test_result/demo_target.wav -shortest -c:v copy -c:a aac ./test_result/output_target.avi', shell=True)\n\n\n print('concatenate successfully')\n pass\n\nif __name__ =='__main__':\n write_test_video('./save_model/model_208.pth',path_video='/home/ubuntu/dev/mp4/id00397/AtjMYQ6XsWo/00001.mp4',path_audio='/home/ubuntu/data/voxceleb2/id00397/AtjMYQ6XsWo/00001.wav',path_target_audio='/home/ubuntu/data/voxceleb2/id00397/CuhtLlTmt2E/00002.wav',style_dim=32,resl=(128,128),step=1,half_window=400)\n","sub_path":"test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"234626038","text":"# **********************************************************************\n#\n# Copyright (c) 2003-2009 ZeroC, Inc. All rights reserved.\n#\n# This copy of Ice is licensed to you under the terms described in the\n# ICE_LICENSE file included in this distribution.\n#\n# **********************************************************************\n\n# Ice version 3.3.1\n# Generated from file `Logger.ice'\n\nimport Ice, IcePy, __builtin__\n\nif not Ice.__dict__.has_key(\"_struct_marker\"):\n Ice._struct_marker = object()\n\n# Start of module Ice\n_M_Ice = Ice.openModule('Ice')\n__name__ = 'Ice'\n\nif not _M_Ice.__dict__.has_key('Logger'):\n _M_Ice.Logger = Ice.createTempClass()\n class Logger(object):\n def __init__(self):\n if __builtin__.type(self) == _M_Ice.Logger:\n raise RuntimeError('Ice.Logger is an abstract class')\n\n #\n # Operation signatures.\n #\n # def _print(self, message):\n # def trace(self, category, message):\n # def warning(self, message):\n # def error(self, message):\n\n def __str__(self):\n return IcePy.stringify(self, _M_Ice._t_Logger)\n\n __repr__ = __str__\n\n _M_Ice._t_Logger = IcePy.defineClass('::Ice::Logger', Logger, (), True, None, (), ())\n Logger.ice_type = _M_Ice._t_Logger\n\n _M_Ice.Logger = Logger\n del Logger\n\n# End of module Ice\n","sub_path":"web.py/AdMobMonitor/IcePy-3.3.0/python/Ice_Logger_ice.py","file_name":"Ice_Logger_ice.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"96425317","text":"import argparse\nimport logging\nimport hashlib\nfrom urllib.parse import urlparse\n\nimport nltk\nimport pandas as pd\nfrom nltk.corpus import stopwords\n\n# nltk.download(\"punkt\")\n# nltk.download(\"stopwords\")\n\nlogging.basicConfig(level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\nstop_words = set(stopwords.words(\"spanish\"))\n\n\ndef _read_data(filename):\n logger.info(f\"Reading file {filename}\")\n\n return pd.read_csv(filename)\n\n\ndef _extract_newspaper_id(filename):\n logger.info(f\"Extracting newspaper id\")\n newspaper_id = filename.split(\"_\")[0]\n\n logger.info(f\"Newspaper id detected: {newspaper_id}\")\n return newspaper_id\n\n\ndef _add_newspaper_id_column(df, newspaper_id):\n logger.info(f\"Filling newspaper_id column with: {newspaper_id}\")\n df[\"newspaper_id\"] = newspaper_id\n return df\n\n\ndef _extract_host(df):\n logger.info(\"Extracting host from urls\")\n\n df[\"host\"] = df[\"url\"].apply(lambda url: urlparse(url).netloc)\n return df\n\n\ndef _fill_missing_titles(df):\n logger.info(\"Filling missing titles\")\n\n missing_titles_mask = df[\"title\"].isna()\n\n missing_titles = (\n df[missing_titles_mask][\"url\"]\n .str.extract(r\"(?P[^/]+)$\")\n .applymap(lambda title: title.split(\"-\"))\n .applymap(lambda title_work_list: \" \".join(title_work_list))\n )\n\n df.loc[missing_titles_mask, \"title\"] = missing_titles.loc[:, \"missing_titles\"]\n\n return df\n\n\ndef _generate_uids_for_rows(df):\n logger.info(\"Generating ids for each row\")\n\n uids = df.apply(lambda row: hashlib.md5(bytes(row[\"url\"].encode())), axis=1).apply(\n lambda hash_object: hash_object.hexdigest()\n )\n\n df[\"uid\"] = uids\n df.set_index(\"uid\", inplace=True)\n\n return df\n\n\ndef _remove_new_lines_from_body(df):\n logger.info(\"Remove new lines from body\")\n\n df[\"body\"] = df.apply(lambda row: row[\"body\"].replace(\"\\n\", \"\"), axis=1)\n\n return df\n\n\ndef _tokenize_column(df, column_name):\n df[f\"n_tokens_{column_name}\"] = (\n df.dropna()\n .apply(lambda row: nltk.word_tokenize(row[column_name]), axis=1)\n .apply(lambda tokens: list(filter(lambda token: token.isalpha(), tokens)))\n .apply(lambda tokens: list(map(lambda token: token.lower(), tokens)))\n .apply(\n lambda word_list: list(\n filter(lambda word: word not in stop_words, word_list)\n )\n )\n .apply(lambda valid_word_list: len(valid_word_list))\n )\n\n return df\n\n\ndef _remove_duplicates_entries(df, column_name):\n logger.info(\"Removing duplicates entries\")\n\n df.drop_duplicates(subset=[column_name], keep=\"first\", inplace=True)\n\n return df\n\n\ndef _drop_rows_with_missing_values(df):\n logger.info(\"Dropping rows with missing data\")\n return df.dropna()\n\n\ndef _save_data(df, filename):\n clean_filename = f\"clean_{filename}\"\n logger.info(f\"Saving data at location: {clean_filename}\")\n df.to_csv(clean_filename)\n\n\ndef main(filename):\n logger.info(\"Starting cleaning process\")\n\n df = _read_data(filename)\n newspaper_id = _extract_newspaper_id(filename)\n df = _add_newspaper_id_column(df, newspaper_id)\n df = _extract_host(df)\n df = _fill_missing_titles(df)\n df = _generate_uids_for_rows(df)\n df = _remove_new_lines_from_body(df)\n df = _tokenize_column(df, \"title\")\n df = _tokenize_column(df, \"body\")\n df = _remove_duplicates_entries(df, \"title\")\n df = _drop_rows_with_missing_values(df)\n _save_data(df, filename)\n\n return df\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"filename\", help=\"The path to the dirty data\", type=str)\n\n args = parser.parse_args()\n\n df = main(args.filename)\n print(df)\n","sub_path":"transform/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"140718285","text":"import click\nimport logging\nimport os\n\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine\n\nfrom stockast.collectors import IEXStockCollector\nfrom stockast.models import Base\nfrom stockast.models import Company\nfrom stockast.models import StockHistory\nfrom stockast.utils import insert_ignore_dups, parse_companies, parse_historical_data\n\nlogger = logging.getLogger(__name__)\n\n# list of tickets to pull data for\ndefault_symbols = [\n 'AABA',\n 'AAPL',\n 'ADBE',\n 'AMZN',\n 'FB',\n 'GOOG',\n 'JPM',\n 'MSFT',\n 'NVDA',\n 'TSLA',\n]\n\n\n@click.command()\n@click.option('--debug', is_flag=True, help=\"Show queries\")\n@click.option('--show-data', '-s', is_flag=True, help=\"Show data downloaded\")\n@click.option('--token', default=os.getenv('STOCKAST_IEX_CLOUD_TOKEN'), help='IEX Cloud API Token')\n@click.option(\n '--from-date', '-f', default='2018-01-01', type=click.DateTime(), help='From date to get data')\n@click.option(\n '--to-date', '-t', default='2018-12-31', type=click.DateTime(), help='To date to get data')\n@click.argument('database_url')\ndef download_historical_data(debug, show_data, token, from_date, to_date, database_url):\n # Normalize symbols to a list of uppercase symbols\n symbols = default_symbols\n if symbols and type(symbols) != list:\n symbols = [symbols]\n symbols = [x.upper() for x in symbols]\n\n # databse Engine\n # example: 'sqlite:////Users/mmaldonadofigueroa/Desktop/test.db'\n engine = create_engine(database_url, echo=debug)\n\n # create all tables if needed\n Base.metadata.create_all(engine)\n\n # create session\n Session = sessionmaker()\n Session.configure(bind=engine)\n session = Session()\n\n try:\n # create the collector\n collector = IEXStockCollector(token)\n\n # create or get company names\n data = collector.get_company_info(symbols)\n\n # parse data into list of Company objects\n objects = parse_companies(data, show=show_data)\n\n # save Companies objects in bulk and commit transaction ignore dups\n insert_ignore_dups(engine, session, Company, objects)\n\n # get the historical data\n data = collector.get_historical_data(symbols, from_date, to_date)\n\n # parse data into a list of StockHistory objects\n objects = parse_historical_data(data, show=show_data)\n\n # save StockHistory objects in bulk and commit transaction ignore dups\n logger.info(f'Inserting: {objects}')\n insert_ignore_dups(engine, session, StockHistory, objects)\n finally:\n # attempt to close db connection even if there are errors\n session.close()\n\n\nif __name__ == '__main__':\n download_historical_data()\n","sub_path":"get-historical-stock-data.py","file_name":"get-historical-stock-data.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"276792720","text":"#!/usr/bin/env python3\n# Copyright (C) 2021 William Breathitt Gray\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport gi\ngi.require_version(\"Gtk\", \"3.0\")\nfrom gi.repository import GLib, Gtk, Pango\nimport io\nimport os.path\nimport queue\nimport re\nimport threading\nimport zipfile\n\nclass Walgrep(Gtk.Window):\n def __init__(self):\n Gtk.Window.__init__(self, title=\"Walgrep - ZIP file search utility\", default_height=480, default_width=640)\n self.set_border_width(10)\n\n vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=10)\n self.add(vbox)\n\n searchBox = Gtk.Box(spacing=6)\n vbox.pack_start(searchBox, False, False, 0)\n pathLabel = Gtk.Label(label=\"Path:\")\n self.zipEntry = Gtk.Entry()\n selectFileButton = Gtk.Button(label=\"File\")\n selectFileButton.connect(\"clicked\", self.select_zip, Gtk.FileChooserAction.OPEN)\n selectFolderButton = Gtk.Button(label=\"Folder\")\n selectFolderButton.connect(\"clicked\", self.select_zip, Gtk.FileChooserAction.SELECT_FOLDER)\n searchBox.pack_start(pathLabel, False, False, 0)\n searchBox.pack_start(self.zipEntry, True, True, 0)\n searchBox.pack_start(selectFileButton, False, False, 0)\n searchBox.pack_start(selectFolderButton, False, False, 0)\n\n self.recurseButton = Gtk.CheckButton(label=\"Recurse into subdirectories\")\n vbox.pack_start(self.recurseButton, False, False, 0)\n\n patternBox = Gtk.Box(spacing=6)\n vbox.pack_start(patternBox, False, False, 0)\n patternLabel = Gtk.Label(label=\"Pattern:\")\n self.patternEntry = Gtk.Entry()\n self.resultsQueue = queue.SimpleQueue()\n self.searching = 0\n self.searchButton = Gtk.Button(label=\"Search\")\n self.searchButton.connect(\"clicked\", self.search_toggle)\n patternBox.pack_start(patternLabel, False, False, 0)\n patternBox.pack_start(self.patternEntry, True, True, 0)\n patternBox.pack_start(self.searchButton, False, False, 0)\n\n self.filenameButton = Gtk.CheckButton(label=\"Toggle filename search\")\n vbox.pack_start(self.filenameButton, False, False, 0)\n\n progressBox = Gtk.Box(spacing=6)\n vbox.pack_start(progressBox, False, False, 0)\n progressLabel = Gtk.Label(label=\"Progress:\")\n self.progress = Gtk.ProgressBar(ellipsize=Pango.EllipsizeMode.MIDDLE, text=\"\", show_text=True)\n self.status = \"\"\n self.statusLock = threading.Lock()\n self.matches = 0\n self.progress.set_text(self.status)\n self.progress.set_show_text(True)\n progressBox.pack_start(progressLabel, False, False, 0)\n progressBox.pack_start(self.progress, True, True, 0)\n\n scrolledWindow = Gtk.ScrolledWindow()\n vbox.pack_start(scrolledWindow, True, True, 0)\n\n self.results = Gtk.TreeStore(str, str, str, str)\n resultsTree = Gtk.TreeView(model=Gtk.TreeModelSort(model=self.results))\n\n iconRenderer = Gtk.CellRendererPixbuf()\n textRenderer = Gtk.CellRendererText()\n fileColumn = Gtk.TreeViewColumn(\"File\")\n fileColumn.pack_start(iconRenderer, False)\n fileColumn.pack_start(textRenderer, False)\n fileColumn.add_attribute(iconRenderer, \"icon_name\", 0)\n fileColumn.add_attribute(textRenderer, \"markup\", 1)\n fileColumn.set_sort_column_id(1)\n fileColumn.set_resizable(True)\n fileColumn.set_sort_indicator(True)\n lineColumn = Gtk.TreeViewColumn(\"Line\", textRenderer, text=2)\n lineColumn.set_sort_column_id(2)\n lineColumn.set_sort_indicator(True)\n stringColumn = Gtk.TreeViewColumn(\"String\", textRenderer, markup=3)\n resultsTree.append_column(fileColumn)\n resultsTree.append_column(lineColumn)\n resultsTree.append_column(stringColumn)\n scrolledWindow.add(resultsTree)\n\n def handle_invalid_zip(self, e, path):\n dialog = Gtk.MessageDialog(message_type=Gtk.MessageType.WARNING, buttons=Gtk.ButtonsType.CLOSE, text=f'{type(e).__name__}', secondary_text=f'{e}: {path}')\n dialog.run()\n dialog.destroy()\n\n def parse_zip(self, root, relpath, pattern, search_by_name):\n path = os.path.join(root, relpath)\n with zipfile.ZipFile(path, 'r') as z:\n archive = GLib.markup_escape_text(relpath);\n\n for member in z.infolist():\n if not self.searching:\n break;\n\n if member.is_dir():\n continue\n\n if search_by_name:\n basename = os.path.basename(member.filename)\n matches = re.finditer(pattern, basename)\n filename = \"\"\n pos = 0\n for m in matches:\n prefix = GLib.markup_escape_text(basename[pos:m.start()])\n match = f\"{GLib.markup_escape_text(m[0])}\"\n pos = m.end()\n filename += f'{prefix}{match}'\n if filename:\n prefix = member.filename[:-len(basename)]\n suffix = GLib.markup_escape_text(basename[pos:])\n if archive:\n self.resultsQueue.put((\"a\", archive))\n archive = None\n self.resultsQueue.put((\"m\", f'{prefix}{filename}{suffix}'))\n continue\n\n with z.open(member) as f:\n lines = io.TextIOWrapper(f, encoding=\"utf-8\")\n try:\n for i, line in enumerate(lines):\n if not self.searching:\n break;\n matches = re.finditer(pattern, line)\n for m in matches:\n if archive:\n self.resultsQueue.put((\"a\", archive))\n archive = None\n if member.filename:\n self.resultsQueue.put((\"m\", GLib.markup_escape_text(member.filename)))\n member.filename = None\n prefix = GLib.markup_escape_text(line[:m.start()])\n match = f\"{GLib.markup_escape_text(m[0])}\"\n suffix = GLib.markup_escape_text(line[m.end():])\n self.resultsQueue.put((f'{i}', f'{prefix}{match}{suffix}'))\n except UnicodeError:\n continue\n\n def process_queue(self, search_by_name):\n global zipIter, memberIter;\n while not self.resultsQueue.empty():\n line, string = self.resultsQueue.get_nowait()\n if line == \"a\":\n zipIter = self.results.append(None, [\"folder\", string, \"\", \"\"])\n elif line == \"m\":\n memberIter = self.results.append(zipIter, [\"text-x-generic\", string, \"\", \"\"])\n if search_by_name:\n self.matches += 1\n else:\n self.results.append(memberIter, [\"\", \"\", line, string])\n self.matches += 1\n\n def search_stop(self, thread):\n self.searching = 0\n thread.join()\n self.searchButton.set_label(\"Search\")\n\n def search_toggle(self, widget):\n global thread;\n if self.searching:\n self.search_stop(thread)\n else:\n path = self.zipEntry.get_text()\n pattern = self.patternEntry.get_text()\n recurse = self.recurseButton.get_active()\n search_by_name = self.filenameButton.get_active()\n self.results.clear()\n self.searchButton.set_label(\"Stop\")\n self.searching = 1\n thread = threading.Thread(target=self.search_zip, args=(path, pattern, recurse, search_by_name,))\n thread.start()\n GLib.timeout_add(50, self.update_progress, search_by_name)\n\n def search_zip(self, path, pattern, recurse, search_by_name):\n zip_path = path\n try:\n if os.path.isdir(path):\n for root, dirs, files in os.walk(path):\n for name in files:\n if not self.searching:\n break;\n zip_path = os.path.join(root, name)\n if zipfile.is_zipfile(zip_path):\n with self.statusLock:\n self.status = f'Searching {zip_path}'\n try:\n self.parse_zip(path, os.path.relpath(zip_path, path), pattern, search_by_name)\n except zipfile.BadZipFile as e:\n GLib.idle_add(self.handle_invalid_zip, e, zip_path)\n if not recurse or not self.searching:\n break;\n else:\n with self.statusLock:\n self.status = f'Searching {zip_path}'\n self.parse_zip(os.path.dirname(path), os.path.basename(path), pattern, search_by_name)\n except Exception as e:\n GLib.idle_add(self.handle_invalid_zip, e, zip_path)\n GLib.idle_add(self.search_stop, threading.current_thread())\n\n def select_zip(self, widget, action):\n chooser = Gtk.FileChooserNative()\n chooser.set_transient_for(self)\n chooser.set_action(action)\n\n if action == Gtk.FileChooserAction.OPEN:\n zipFilter = Gtk.FileFilter()\n zipFilter.set_name(\"ZIP files\")\n zipFilter.add_mime_type(\"application/zip\")\n allFilter = Gtk.FileFilter()\n allFilter.set_name(\"All files\")\n allFilter.add_pattern(\"*\")\n chooser.add_filter(zipFilter)\n chooser.add_filter(allFilter)\n\n if chooser.run() == Gtk.ResponseType.ACCEPT:\n self.zipEntry.set_text(chooser.get_filename())\n chooser.destroy()\n\n def update_progress(self, search_by_name):\n self.process_queue(search_by_name)\n if self.searching:\n self.progress.pulse()\n else:\n with self.statusLock:\n self.status = f'Found {self.matches} matches.'\n self.matches = 0\n self.progress.set_fraction(0.0)\n with self.statusLock:\n status = self.status\n self.progress.set_text(status)\n return self.searching\n\nwin = Walgrep()\nwin.connect(\"destroy\", Gtk.main_quit)\nwin.show_all()\nGtk.main()\n","sub_path":"walgrep.py","file_name":"walgrep.py","file_ext":"py","file_size_in_byte":11647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"195616675","text":"from django.db import migrations\n\n\nINFINIBAND_SLUGS = (\n ('inifiband-sdr', 'infiniband-sdr'),\n ('inifiband-ddr', 'infiniband-ddr'),\n ('inifiband-qdr', 'infiniband-qdr'),\n ('inifiband-fdr10', 'infiniband-fdr10'),\n ('inifiband-fdr', 'infiniband-fdr'),\n ('inifiband-edr', 'infiniband-edr'),\n ('inifiband-hdr', 'infiniband-hdr'),\n ('inifiband-ndr', 'infiniband-ndr'),\n ('inifiband-xdr', 'infiniband-xdr'),\n)\n\n\ndef correct_infiniband_types(apps, schema_editor):\n Interface = apps.get_model('dcim', 'Interface')\n for old, new in INFINIBAND_SLUGS:\n Interface.objects.filter(type=old).update(type=new)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dcim', '0103_standardize_description'),\n ]\n\n operations = [\n migrations.RunPython(\n code=correct_infiniband_types,\n reverse_code=migrations.RunPython.noop\n ),\n ]\n","sub_path":"netbox/dcim/migrations/0104_correct_infiniband_types.py","file_name":"0104_correct_infiniband_types.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"291227943","text":"from math import gcd, lcm\nclass Solution:\n def replaceNonCoprimes(self, nums):\n stack = [nums[0]]\n for i in range(1,len(nums)):\n while stack and gcd(stack[-1], nums[i]) > 1:\n nums[i] = lcm(nums[i],stack[-1])\n stack.pop()\n stack.append(nums[i])\n return stack","sub_path":"2197. Replace Non-Coprime Numbers in Array.py","file_name":"2197. Replace Non-Coprime Numbers in Array.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"332519747","text":"#Name Izabella Wieckowska\n#DAte oct 31 2017\n#Dna complementary strands\n\ndna = str(input(\"INput dna strand here\"))\ncomplement = \" \"\n\nfor i in range(len(dna)):\n if dna[i] == 'A':\n complement= complement+'T'\n elif dna[i] == 'C':\n complement=complement+'G'\n elif dna[i] == 'G':\n complement=complement+'C'\n elif dna[i] == 'T':\n complement= complement+'A'\n\nprint(\"The complementary strand is\",complement)\n \n","sub_path":"dnastrand.py","file_name":"dnastrand.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"596057278","text":"import algoFuncs as af\nimport numpy as np\nimport cProfile\n\ndef doTrials(Trials,n_parts,x,y,z,w,gamma):\n for i in range(Trials):\n af.doProjectionV2(x,y,n_parts,z,w,gamma,False,1.0)\n\n\nn_parts = 10\nd = 10000\nx = np.ones([n_parts,d])\ny = np.ones([n_parts,d])\nz = np.ones(d)\nw = np.zeros([n_parts,d])\ngamma = 10.0\nTrials = 1000\n\ncProfile.run('doTrials(Trials,n_parts,x,y,z,w,gamma)')\n\n\n\n","sub_path":"profile_indiv.py","file_name":"profile_indiv.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"307935913","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import filedialog\nfrom pytube import YouTube\nFolder_Name=\"\"\ndef progress_function(stream, chunk, file_handle, bytes_remaining):\n print(round((1-bytes_remaining/video.filesize)*100, 3), '% done...')\n#file location\ndef openLocation():\n global Folder_Name\n Folder_Name=filedialog.askdirectory()\n if(len(Folder_Name)>1):\n locationError.config(text=Folder_Name,fg=\"green\")\n else:\n locationError.config(text=\"Please choose Folder!!\",fg=\"red\")\n\ndef DownloadVideo():\n choice=ytdChoices.get()\n url=ytdEntry.get()\n if(len(url)>1):\n ytdError.config(text=\"\")\n yt=YouTube(url, on_progress_callback=progress_function)\n \n yt.streams.order_by('resolution')\n ytdError.config(text=\"download completed\")\n\n\n \nroot=Tk()\nroot.title(\"Youtube Downloader\")\nroot.geometry(\"350x400\")\nroot.columnconfigure(0,weight=1)\n\nytdLabel=Label(root,text=\"enter youtube video URL\",font=(\"jost\",15))\nytdLabel.grid()\n\nytdEntryVar=StringVar()\nytdEntry=Entry(root,width=50,textvariable=ytdEntryVar)\nytdEntry.grid()\n\nytdError=Label(root,text=\"Error Msg\",fg=\"red\",font=(\"jost\",10))\nytdError.grid()\n\nsaveLabel=Label(root,text=\"Save the video file\",font=(\"jost\",15,\"bold\"))\nsaveLabel.grid()\n\nsaveEntry=Button(root,width=10,bg=\"red\",fg=\"white\",text=\"Choose Path\",command=openLocation)\nsaveEntry.grid()\n\nlocationError=Label(root,text=\"Error Msg of Path\",fg=\"red\",font=(\"jost\",10))\nlocationError.grid()\n\nytdQuality=Label(root,text=\"Select quality\",font=(\"jost\",15,\"bold\"))\nytdQuality.grid()\n\nchoices=[\"720p\",\"144p\",\"Only audio\"]\nytdChoices=ttk.Combobox(root,values=choices)\nytdChoices.grid()\n\ndownloadbtn=Button(root,text=\"Downlaod video\",width=10,bg=\"red\",fg=\"white\",command=DownloadVideo)\ndownloadbtn.grid()\n\nroot.mainloop()","sub_path":"ytd.py","file_name":"ytd.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"237875440","text":"from entities.user import User\nfrom common import status_code\nimport requests\nimport json\nimport sys\nfrom firebase_admin import auth\nimport firebase_admin\n\n\nclass UserModel():\n def __init__(self, conn_tool):\n self._db = conn_tool.db\n self._uid = conn_tool.uid\n\n def get_user_info_by_email(self, email):\n\n try:\n uid = auth.get_user_by_email(email).uid\n user = self._db.collection('users').document(uid).get()\n except:\n return {'msg': '找不到使用者!'}, status_code.NOT_FOUND\n\n if user.exists:\n user_dict = User.from_dict(user.to_dict()).to_dict()\n user_dict.update({'uid': user.id})\n print(user_dict, file=sys.stderr)\n return user_dict, status_code.OK\n\n def get_user_info_by_uid(self, uid=None):\n if uid == None:\n user = self._db.collection('users').document(self._uid).get()\n else:\n try:\n auth.get_user(uid)\n user = self._db.collection('users').document(uid).get()\n except:\n return {'msg': '找不到使用者!'}, status_code.NOT_FOUND\n if user.exists:\n user_dict = User.from_dict(user.to_dict()).to_dict()\n user_dict.update({'uid': user.id})\n print(user_dict, file=sys.stderr)\n return user_dict, status_code.OK\n\n def get_user_githubToken(self, uid=None):\n info = None\n if uid == None:\n info = self._db.collection('users').document(\n self._uid).get().to_dict()\n else:\n info = self._db.collection('users').document(uid).get().to_dict()\n if 'Github' in info:\n return info['Github']\n else:\n return None\n\n def add_user(self, name, email):\n user = User(name=name, email=email)\n self._db.collection(u'users').document(self._uid).set(user.to_dict())\n return None, status_code.OK\n\n def set_user_token(self, code):\n parameters = {\n 'client_id': '4fc83f8cb4d05b3684de',\n 'client_secret': 'f9f2503c8228d7d305e00fbcd01a52bfb59a387b',\n 'code': code\n }\n header = {\n \"Accept\": \"application/json\"\n }\n r = requests.post(\n 'https://github.com/login/oauth/access_token', data=parameters, headers=header)\n\n resp = json.loads(r.text)\n\n if \"access_token\" in resp:\n # print(resp[\"access_token\"])\n user = self._db.collection('users').document(self._uid)\n user.update({'Github': resp[\"access_token\"]})\n return 'Get access token success !', status_code.OK\n else:\n return resp[\"error_description\"], status_code.BAD_REQUEST\n\n def update_user_info(self, email, name):\n user = self._db.collection('users').document(self._uid)\n if user.get().exists:\n\n try:\n\n auth.update_user(self._uid, email=email)\n user.update({'name': name, 'email': email})\n except Exception as e:\n # print(type(e))\n if type(e) is firebase_admin._auth_utils.EmailAlreadyExistsError:\n return {'error': 'Email已存在'}, status_code.BAD_REQUEST\n else:\n return {'error': '未知錯誤'}, status_code.BAD_REQUEST\n\n return status_code.OK\n","sub_path":"src/models/user_model.py","file_name":"user_model.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"635501458","text":"#!/usr/bin/env python\n\nimport oauth2 as oauth\nfrom oauthtwitter import OAuthApi\nimport pprint\n\nconsumer_key = \"\"\nconsumer_secret = \"\"\n\naccess_tok = \"\"\naccess_tok_secret = \"\"\n\nif access_tok == \"\":\n twitter = OAuthApi(consumer_key, consumer_secret)\n\n # Get the temporary credentials for our next few calls\n temp_credentials = twitter.getRequestToken()\n\n # User pastes this into their browser to bring back a pin number\n print(twitter.getAuthorizationURL(temp_credentials))\n \n oauth_verifier = raw_input('What is the Verifier? ')\n access_token = twitter.getAccessToken(temp_credentials, oauth_verifier)\n \n \n print(\"oauth access token: \" + access_token['oauth_token'])\n print(\"oauth access token secret: \" + access_token['oauth_token_secret'])\n \n access_tok = access_token['oauth_token']\n access_tok_secret = access_token['oauth_token_secret']\n \n# Do a test API call using our new credentials\ntwitter = OAuthApi(consumer_key, consumer_secret, access_tok, access_tok_secret)\n\nif True:\n res = twitter.VerifyCredentials()\n print( \"User Name: \" + res['name'] )\n print( \"Status: \" + res['status']['text'] )\n\nif False:\n res = twitter.UpdateStatus(\"Test 2 with OAuth\")\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(res)\n","sub_path":"energygraph/contrib/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"178207579","text":"# -*- coding: utf-8 -*-\n# Created at 2020-07-05 \n\n\nclass UnionFind(object):\n def __init__(self, n):\n self.parent = [i for i in range(n)]\n self.count = n\n\n def union(self, p, i, j):\n p1 = self.find(p, i)\n p2 = self.find(p, j)\n if p1 != p2:\n self.count -= 1\n p[p1] = p2\n\n @staticmethod\n def find(p, i):\n root = i\n while p[root] != root:\n root = p[root]\n\n while p[i] != i:\n i, p[i] = p[i], root\n\n return root\n\n\nclass Solution(object):\n def findCircleNum(self, M):\n \"\"\"\n :type M: List[List[int]]\n :rtype: int\n \"\"\"\n uf = UnionFind(len(M))\n for i in range(len(M)):\n for j in range(i):\n if M[i][j] == 1:\n uf.union(uf.parent, i, j)\n return uf.count\n\n\n\n","sub_path":"Week_07/03_friend_circles.py","file_name":"03_friend_circles.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"635360083","text":"from rest_framework import serializers\nfrom versatileimagefield.serializers import VersatileImageFieldSerializer\nfrom ..models import Image\n\n\nclass ImageSerializer(serializers.ModelSerializer):\n image = VersatileImageFieldSerializer(\n sizes=[\n ('full_size', 'url'),\n ('avatar', 'thumbnail__40x40'),\n ]\n )\n\n class Meta:\n model = Image\n fields = ['pk', 'title', 'image', 'caption', 'likes', 'shares', 'owner', 'profile_picture', 'private', ]\n","sub_path":"backend/images/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"408707115","text":"\"\"\"Tools for parsing OUTCAR files.\"\"\"\nimport re\n\nfrom aiida_vasp.utils.aiida_utils import get_data_class\nfrom aiida_vasp.io.parser import BaseFileParser\n\nDEFAULT_OPTIONS = {'quantities_to_parse': ['volume', 'energies', 'fermi_level']}\n\n\nclass OutcarParser(BaseFileParser):\n \"\"\"\n Parse OUTCAR into a dictionary, which is supposed to be turned into ParameterData later.\n\n For constructor params and more details check the documentation for ``aiida_vasp.io.parser`` and\n ``aiida_vasp.io.parser.BaseParser``.\n \"\"\"\n\n FILE_NAME = 'OUTCAR'\n PARSABLE_ITEMS = {\n 'outcar-volume': {\n 'inputs': ['parameters'],\n 'nodeName': 'parameters',\n 'prerequisites': []\n },\n 'outcar-energies': {\n 'inputs': ['parameters'],\n 'nodeName': 'parameters',\n 'prerequisites': []\n },\n 'outcar-fermi_level': {\n 'inputs': ['parameters'],\n 'nodeName': 'parameters',\n 'prerequisites': []\n },\n 'outcar-parameters': {\n 'inputs': [],\n 'nodeName': 'parameters',\n 'prerequisites': []\n },\n 'symmetries': {\n 'inputs': [],\n 'nodeName': 'parameters',\n 'prerequisites': []\n }\n }\n\n SPACE_GROUP_OP_PATTERN = re.compile(r'Found\\s*(\\d+) space group operations')\n POINT_GROUP_OP_PATTERN = re.compile(r'whereof\\s*(\\d+) operations')\n POINT_SYMMETRY_PATTERN = re.compile(r'point symmetry (.*?)\\s*\\.')\n SPACE_GROUP_PATTERN = re.compile(r'space group is (.*?)\\s*\\.')\n\n def __init__(self, *args, **kwargs):\n super(OutcarParser, self).__init__(*args, **kwargs)\n self.init_with_kwargs(**kwargs)\n\n def _parse_file(self, inputs):\n \"\"\"Add all quantities parsed from OUTCAR to _parsed_data.\"\"\"\n\n result = self._read_outcar(inputs)\n params = get_data_class('parameter')(dict=result)\n result['outcar-parameters'] = params\n return result\n\n @staticmethod\n def _parse_line_regex_once(line, regex, res_dict, key, convert=None):\n \"\"\"\n Parse ``line`` with regular expression ``regex`` optionally converts the result and stores int in ``res_dict[key]``.\n\n Does not overwrite ``res_dict[key]`` if it already exists and is not None.\n \"\"\"\n if res_dict.get(key, None) is None:\n regex_result = re.findall(regex, line)\n if not regex_result:\n res_dict[key] = None\n else:\n result = regex_result[0]\n if convert:\n result = convert(result)\n res_dict[key] = result\n\n def _read_outcar(self, inputs):\n \"\"\"Parse the OUTCAR file into a dictionary.\"\"\"\n result = inputs.get('settings', {})\n result = {}\n energy_free = []\n energy_zero = []\n symmetries = {}\n with open(self._data_obj.path, 'r') as outcar_file_object:\n for line in outcar_file_object:\n # volume\n if line.rfind('volume of cell :') > -1:\n result['outcar-volume'] = float(line.split()[-1])\n # Free energy\n if line.lower().startswith(' free energy toten'):\n energy_free.append(float(line.split()[-2]))\n # Extrapolated zero point energy\n if line.startswith(' energy without entropy'):\n energy_zero.append(float(line.split()[-1]))\n # Fermi energy\n if line.rfind('E-fermi') > -1:\n result['outcar-efermi'] = float(line.split()[2])\n # space group operations\n self._parse_line_regex_once(line, self.SPACE_GROUP_OP_PATTERN, symmetries, 'num_space_group_operations', int)\n # point group operations\n self._parse_line_regex_once(line, self.POINT_GROUP_OP_PATTERN, symmetries, 'num_point_group_operations', int)\n # point symmetry\n self._parse_line_regex_once(line, self.POINT_SYMMETRY_PATTERN, symmetries, 'point_symmetry')\n # space group\n self._parse_line_regex_once(line, self.SPACE_GROUP_PATTERN, symmetries, 'space_group')\n result['outcar-energies'] = {}\n result['outcar-energies']['free_energy'] = energy_free[-1]\n result['outcar-energies']['energy_without_entropy'] = energy_zero[-1]\n result['outcar-energies']['free_energy_all'] = energy_free\n result['outcar-energies']['energy_without_entropy_all'] = energy_zero\n result['symmetries'] = symmetries\n return result\n","sub_path":"aiida_vasp/io/outcar.py","file_name":"outcar.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"320180551","text":"import commands \nimport webbrowser\nfrom selenium import webdriver\n\ndef add_():\n\tip_add = commands.getoutput('hostname -I')\n\turl = 'http://{ip_add}:631/admin'.format(ip_add=ip_add)\n\tfinal_url = url.replace(\" \",\"\")\n\toptions = webdriver.ChromeOptions()\n\toptions.binary_location = '/usr/lib/chromium-browser/chromium-browser'\n\tdriver = webdriver.Chrome(chrome_options=options)\n\tchromium.get(final_url)\nadd_()\t\n","sub_path":"Kivy master/printer.py","file_name":"printer.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"227340826","text":"import numpy as np \nimport sys\nimport math\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\nmatplotlib.use('Agg')\n\nno_of_bins = 10\n\n#------New------\ndef get_cat_indexes():\n with open(\"out_graph.txt\", \"r\") as file_one:\n n = int(file_one.readline())\n \n index = [False for i in range(n)]\n with open(\"out_community.txt\", \"r\") as file_one:\n file_one.readline()\n for line in file_one:\n info = line.split()\n if int(info[1]) == 1:\n index[int(info[0])] = True\n\n return index\n\nclass Node():\n def __init__(self):\n self.id = 0\n self.out_neighbors = set()\n self.in_neighbors = set()\n self.x_to_give = 0.0\n self.excess_to_red = 0.0\n self.excess_to_blue = 0.0\n self.out_red = 0\n self.in_red = 0\n self.out_blue = 0\n self.in_blue = 0\n self.out_red_ratio = 0.0\n self.in_red_ratio = 0.0\n self.out_blue_ratio = 0.0\n self.in_blue_ratio = 0.0\n self.excess_to_red_individual = 0.0 # Neighborhood.\n self.excess_to_blue_individual = 0.0 # Neighborhood.\n self.importance_in_community = 0.0\n\n def set_node_id(number):\n self.id = number\n\n def set_out_red_ratio(self, ratio):\n self.out_red_ratio = ratio \n \n def set_out_blue_ratio(self, ratio):\n self.out_blue_ratio = ratio\n\n def set_in_red_ratio(self, ratio):\n self.in_red_ratio = ratio \n \n def set_in_blue_ratio(self, ratio):\n self.in_blue_ratio = ratio\n\n def set_out_blue(self, number):\n self.out_blue = number\n\n def set_out_red(self, number):\n self.out_red = number\n\n def set_in_blue(self, number):\n self.in_blue = number\n\n def set_in_red(self, number):\n self.in_red = number\n\n def set_x_to_give(self, phi):\n if (self.out_blue == 0) and (self.out_red) == 0:\n self.x_to_give = 0.0\n elif self.out_red_ratio <= phi:\n self.x_to_give = (1 - phi) / self.out_blue\n elif self.out_blue_ratio < (1 - phi):\n self.x_to_give = phi / self.out_red\n\n def set_excess_to_red(self, phi):\n if self.out_red_ratio < phi:\n numerator = (1 - phi) * self.out_red\n if self.out_blue == 0:\n fraction = 0\n else:\n fraction = numerator / self.out_blue\n self.excess_to_red = phi - fraction\n\n def set_excess_to_blue(self, phi):\n if self.out_blue_ratio < (1 - phi):\n numerator = phi * self.out_blue\n if self.out_red == 0:\n fraction = 0\n else:\n fraction = numerator / self.out_red\n self.excess_to_blue = (1 - phi) - fraction\n\n def set_excess_to_individuals(self, red_nodes, blue_nodes):\n if self.out_blue != 0:\n self.excess_to_blue_individual = self.excess_to_blue / self.out_blue\n else:\n self.excess_to_blue_individual = self.excess_to_blue / blue_nodes\n if self.excess_to_red_individual != 0:\n self.excess_to_red_individual = self.excess_to_red / self.out_red\n else:\n self.excess_to_red_individual = self.excess_to_red / red_nodes\n\n def set_importance_in_community(self, value):\n self.importance_in_community = value\n\nclass Graph():\n def __init__(self):\n self.no_of_nodes = self.get_no_of_nodes()\n self.nodes = np.array([Node() for i in range(self.no_of_nodes)])\n self.node_communities = np.array([0 for i in range(self.no_of_nodes)], dtype=int)\n self.delta_red = np.zeros(self.no_of_nodes)\n self.delta_blue = np.zeros(self.no_of_nodes)\n self.set_node_communitites()\n self.set_node_infos()\n self.transition_matrix = np.zeros((self.no_of_nodes, self.no_of_nodes))\n \n def get_no_of_nodes(self):\n n = 0\n with open(\"out_graph.txt\", \"r\") as file_one:\n n = int(file_one.readline())\n\n return n\n \n def set_node_communitites(self):\n with open(\"out_community.txt\", \"r\") as file_one:\n file_one.readline()\n for line in file_one:\n node_id = int(line.split()[0])\n node_cat = int(line.split()[1])\n self.node_communities[node_id] = node_cat\n\n def get_community_of_node(self, node):\n return self.node_communities[node]\n\n def set_node_infos(self):\n with open(\"out_graph.txt\", \"r\") as file_one:\n file_one.readline()\n for line in file_one:\n node_source = int(line.split()[0])\n node_target = int(line.split()[1])\n self.nodes[node_source].out_neighbors.add(node_target)\n self.nodes[node_target].in_neighbors.add(node_source)\n \n node_id = 0\n for node in self.nodes:\n out_red = 0\n in_red = 0\n out_blue = 0\n in_blue = 0\n out_total = 0\n in_total = 0\n for out_nei in node.out_neighbors:\n out_total += 1\n if self.get_community_of_node(out_nei) == 1:\n out_red += 1\n else:\n out_blue +=1\n if out_total != 0:\n red_ratio = out_red / out_total\n blue_ratio = out_blue / out_total\n else:\n red_ratio = blue_ratio = 0\n node.set_out_blue(out_blue)\n node.set_out_red(out_red)\n node.set_out_red_ratio(red_ratio)\n node.set_out_blue_ratio(blue_ratio)\n for in_nei in node.in_neighbors:\n in_total += 1\n if self.get_community_of_node(in_nei) == 1:\n in_red += 1\n else:\n in_blue +=1\n if in_total != 0:\n red_ratio = in_red / in_total\n blue_ratio = in_blue / in_total\n else:\n red_ratio = blue_ratio = 0\n node.set_in_blue(out_blue)\n node.set_in_red(out_red)\n node.set_in_red_ratio(red_ratio)\n node.set_in_blue_ratio(blue_ratio)\n node.set_excess_to_individuals(np.sum(self.node_communities), np.sum(1 - self.node_communities))\n node.id = node_id\n node_id += 1\n\n def set_excess_deltas(self):\n for i in range(self.no_of_nodes):\n self.delta_red[i] = self.nodes[i].excess_to_red\n self.delta_blue[i] = self.nodes[i].excess_to_blue\n\n def set_transition_matrix(self):\n for node in range(self.no_of_nodes):\n for neighbor in self.nodes[node].out_neighbors:\n self.transition_matrix[node][neighbor] = self.nodes[node].x_to_give\n\n def get_jump_vector(self, phi):\n jump_vector = np.zeros(g.no_of_nodes)\n red_nodes = np.sum(g.node_communities)\n blue_nodes = g.no_of_nodes - red_nodes\n for i in range(g.no_of_nodes):\n if g.get_community_of_node(i) == 1:\n jump_vector[i] = phi / red_nodes\n else:\n jump_vector[i] = (1 - phi) / blue_nodes\n\n return jump_vector\n\n def set_importance_in_communities(self, pagerank_v):\n pgrnk_per_com = np.zeros(2)\n for i in range(self.no_of_nodes):\n com = g.get_community_of_node(i)\n pgrnk_per_com[com] += pagerank_v[i]\n\n for i in range(self.no_of_nodes):\n com = g.get_community_of_node(i)\n value = pagerank_v[i] / pgrnk_per_com[com]\n node.set_importance_in_community(value)\n\n#-------------------------------------------------- MAIN ---------------------------------------------------#\n# define phi.\nif len(sys.argv) != 2:\n print(\"provide 1 arguments \")\nelse:\n PHI = float(sys.argv[1])\n\nindex = get_cat_indexes()\n\nif PHI == 0:\n PHI = sum(index) / len(index)\nprint(\"phi: \", PHI)\n\ndef plots():\n # Init infos.\n g = Graph()\n g.set_node_communitites()\n g.set_node_infos()\n for node in g.nodes:\n node.set_x_to_give(PHI)\n node.set_excess_to_red(PHI)\n node.set_excess_to_blue(PHI)\n g.set_excess_deltas()\n\n # Load rank vectors in arrays.\n algorithms = [\"pagerank\", \"lfprn\", \"lfpru\", \"lfprp\"]\n out_file = [\"out_pagerank_pagerank.txt\", \"out_lfpr_n_pagerank.txt\", \"out_lfpr_u_pagerank.txt\", \"out_lfpr_p_pagerank.txt\"]\n\n rank_vectors = dict()\n exc_pol_red = dict()\n exc_pol_blue = dict()\n delta_red = g.delta_red\n delta_blue = g.delta_blue\n\n # Init rank vectors arrays.\n for i in range(len(algorithms)):\n rank_vectors[algorithms[i]] = np.zeros(g.no_of_nodes)\n exc_pol_red[algorithms[i]] = np.zeros(g.no_of_nodes)\n exc_pol_blue[algorithms[i]] = np.zeros(g.no_of_nodes)\n with open(out_file[i], \"r\") as file_one:\n j = 0\n for line in file_one:\n rank_vectors[algorithms[i]][j] = float(line)\n j += 1\n\n # Init importance in community for lfprp.\n g.set_importance_in_communities(rank_vectors[\"pagerank\"])\n\n # Init excess policies.\n for i in range(g.no_of_nodes):\n #exc_pol_red[\"lfprn\"][i] = g.nodes[i].excess_to_red_individual\n #exc_pol_blue[\"lfprn\"][i] = g.nodes[i].excess_to_blue_individual\n if g.get_community_of_node(i) == 0:\n exc_pol_red[\"lfpru\"][i] = 0\n exc_pol_red[\"lfprp\"][i] = 0\n exc_pol_blue[\"lfpru\"][i] = 1 / np.sum(1 - g.node_communities)\n exc_pol_blue[\"lfprp\"][i] = g.nodes[i].importance_in_community\n else:\n exc_pol_red[\"lfpru\"][i] = 1 / np.sum(g.node_communities)\n exc_pol_red[\"lfprp\"][i] = g.nodes[i].importance_in_community\n exc_pol_blue[\"lfpru\"][i] = 0\n exc_pol_blue[\"lfprp\"][i] = 0\n\n # Init actual delta-policies.\n act_delta_red = dict()\n act_delta_blue = dict()\n act_exc_pol_red = dict()\n act_exc_pol_blue = dict()\n algorithms.remove(\"pagerank\")\n for algorithm in algorithms:\n act_delta_red[algorithm] = np.multiply(delta_red, rank_vectors[algorithm])\n act_delta_blue[algorithm] = np.multiply(delta_blue, rank_vectors[algorithm])\n red_delta = np.dot(delta_red, rank_vectors[algorithm])\n blue_delta = np.dot(delta_blue, rank_vectors[algorithm])\n act_exc_pol_red[algorithm] = red_delta * exc_pol_red[algorithm] \n act_exc_pol_blue[algorithm] = blue_delta * exc_pol_blue[algorithm] \n\n # Plot distributions of delta, act_delta, exc_pol and act_exc_pol.\n algorithms.remove(\"lfprn\")\n for algorithm in algorithms:\n # Act deltas\n fig = plt.figure(figsize=(10.,10.))\n plt.title(\"Value Deltas Distribution %s\" %algorithm)\n values_min = min(np.amin(act_delta_blue[algorithm]), np.amin(act_delta_red[algorithm]))\n values_max = max(np.amax(act_delta_blue[algorithm]), np.amax(act_delta_red[algorithm]))\n lngth = (values_max - values_min) / no_of_bins\n plt.xticks(np.arange(values_min, values_max + lngth, lngth))\n plt.hist([act_delta_blue[algorithm], act_delta_red[algorithm]], color= [\"b\", \"r\"], bins= no_of_bins, weights= [np.ones(g.no_of_nodes) / g.no_of_nodes, np.ones(g.no_of_nodes) / g.no_of_nodes])\n plt.axvline(1 / g.no_of_nodes, label=\"Fair Ratio\", linewidth = 1, color = \"k\", ls=\"--\")\n plt.savefig(\"out_act_delta_distribution_%s.pdf\" %algorithm)\n plt.savefig(\"out_act_delta_distribution_%s.png\" %algorithm)\n plt.close()\n\n # Act exc_pol\n fig = plt.figure(figsize=(10.,10.))\n plt.title(\"Value Excess Policy Distribution %s\" %algorithm)\n values_min = min(np.amin(act_exc_pol_blue[algorithm]), np.amin(act_exc_pol_red[algorithm]))\n values_max = max(np.amax(act_exc_pol_blue[algorithm]), np.amax(act_exc_pol_red[algorithm]))\n lngth = (values_max - values_min) / no_of_bins\n plt.xticks(np.arange(values_min, values_max + lngth, lngth))\n plt.hist([act_delta_blue[algorithm], act_delta_red[algorithm]], color= [\"b\", \"r\"], bins= no_of_bins, weights= [np.ones(g.no_of_nodes) / g.no_of_nodes, np.ones(g.no_of_nodes) / g.no_of_nodes])\n plt.axvline(1 / g.no_of_nodes, label=\"Fair Ratio\", linewidth = 1, color = \"k\", ls=\"--\")\n plt.savefig(\"out_act_exc_pol_distribution_%s.pdf\" %algorithm)\n plt.savefig(\"out_act_exc_pol_distribution_%s.png\" %algorithm)\n plt.close()\n\n algorithms.append(\"lfprn\")\n # Deltas distributions.\n fig = plt.figure(figsize=(10.,10.))\n plt.title(\"delta Distribution\")\n values_min = min(np.amin(delta_blue), np.amin(delta_blue))\n values_max = max(np.amax(delta_blue), np.amax(delta_blue))\n lngth = (values_max - values_min) / no_of_bins\n plt.xticks(np.arange(values_min, values_max + lngth, lngth))\n plt.hist([delta_blue, delta_red], color= [\"b\", \"r\"], bins= no_of_bins, align=\"left\", weights= [np.ones(g.no_of_nodes) / g.no_of_nodes, np.ones(g.no_of_nodes) / g.no_of_nodes], rwidth=0.5)\n plt.axvline(1 / g.no_of_nodes, label=\"Fair Ratio\", linewidth = 1, color = \"k\", ls=\"--\")\n plt.savefig(\"delta_distribution.pdf\")\n plt.savefig(\"deltal_distribution.png\")\n plt.close()\n\n # Bar plots for algorithms.\n algorithms.append(\"pagerank\")\n\n for algorithm in algorithms:\n index = np.argsort(-rank_vectors[algorithm])\n delta_red_temp = delta_red[index]\n delta_blue_temp = delta_blue[index]\n \n\n fig = plt.figure(figsize=(10.,10.))\n fig.suptitle(\"Delta to algorithms\")\n plt.title(algorithm)\n plt.xlabel(\"%s ordered\" %algorithm)\n plt.ylabel(\"delta value\")\n plt.plot(np.arange(1,g.no_of_nodes + 1, 1), delta_blue_temp, \",b\")\n plt.plot(np.arange(1, g.no_of_nodes + 1, 1), delta_red_temp, \",r\")\n plt.savefig(\"delta_to_%s.pdf\" %algorithm)\n plt.savefig(\"delta_to_%s.png\" %algorithm)\n\ndef top_dif_analysis():\n # Init infos.\n g = Graph()\n g.set_node_communitites()\n g.set_node_infos()\n for node in g.nodes:\n node.set_x_to_give(PHI)\n node.set_excess_to_red(PHI)\n node.set_excess_to_blue(PHI)\n g.set_excess_deltas()\n\n with open(\"out_graph.txt\", \"r\") as file_one:\n no_of_nodes = int(file_one.readline())\n\n # Load rank vectors in arrays.\n algorithms = [\"pagerank\", \"lfprn\", \"lfpru\", \"lfprp\"]\n out_file = [\"out_pagerank_pagerank.txt\", \"out_lfpr_n_pagerank.txt\", \"out_lfpr_u_pagerank.txt\", \"out_lfpr_p_pagerank.txt\"]\n\n rank_vectors = dict()\n sort_index = dict()\n dif_vectors = dict()\n\n # Init rank vectors arrays.\n for i in range(len(algorithms)):\n rank_vectors[algorithms[i]] = np.zeros(no_of_nodes)\n with open(out_file[i], \"r\") as file_one:\n j = 0\n for line in file_one:\n rank_vectors[algorithms[i]][j] = float(line)\n j += 1\n\n algorithms.remove(\"pagerank\")\n pgrnk_index = np.argsort(-rank_vectors[\"pagerank\"])\n for algo in algorithms:\n dif_vectors[algo] = rank_vectors[algo] - rank_vectors[\"pagerank\"]\n k_min = np.argsort(dif_vectors[algo])[:10]\n k_max = np.argsort(-dif_vectors[algo])[:10]\n with open(\"out_%s_value_dif_weighted.txt\" %algo, \"w\") as file_one:\n file_one.write(\"node\\tdiff\\tcom\\tin_red_ratio\\tout_red_ratio\\texc_to_red\\texc_to_blue\\tavr_in_nei_out_red_ratio\\tbest_in_nei_pgrnk_pos\\n\")\n for i in k_min:\n avrg_red_ratio = 0.0\n in_nei_pgrnk = 0.0\n pgrnk_pos = g.no_of_nodes\n for nghbr in g.nodes[i].in_neighbors:\n in_nei_pgrnk += rank_vectors[\"pagerank\"][nghbr]\n #avrg_red_ratio += g.nodes[nghbr].out_red_ratio\n avrg_red_ratio += g.nodes[nghbr].out_red_ratio * rank_vectors[\"pagerank\"][nghbr]\n pos = np.where(pgrnk_index == nghbr)[0][0]\n if pos < pgrnk_pos:\n pgrnk_pos = pos\n if len(g.nodes[i].in_neighbors) != 0:\n #avrg_red_ratio = avrg_red_ratio / len(g.nodes[i].in_neighbors)\n avrg_red_ratio = avrg_red_ratio / in_nei_pgrnk\n file_one.write(\"%d\\t%f\\t%d\\t%f\\t%f\\t%f\\t%f\\t%f\\t%d\\n\" %(i, dif_vectors[algo][i], g.get_community_of_node(i), g.nodes[i].in_red_ratio, g.nodes[i].out_red_ratio,\n g.nodes[i].excess_to_red, g.nodes[i].excess_to_blue, avrg_red_ratio, pgrnk_pos))\n for i in k_max:\n avrg_red_ratio = 0.0\n in_nei_pgrnk = 0.0\n pgrnk_pos = g.no_of_nodes\n for nghbr in g.nodes[i].in_neighbors:\n in_nei_pgrnk += rank_vectors[\"pagerank\"][nghbr]\n #avrg_red_ratio += g.nodes[nghbr].out_red_ratio\n avrg_red_ratio += g.nodes[nghbr].out_red_ratio * rank_vectors[\"pagerank\"][nghbr]\n pos = np.where(pgrnk_index == nghbr)[0][0]\n if pos < pgrnk_pos:\n pgrnk_pos = pos\n if len(g.nodes[i].in_neighbors) != 0:\n #avrg_red_ratio = avrg_red_ratio / len(g.nodes[i].in_neighbors)\n avrg_red_ratio = avrg_red_ratio / in_nei_pgrnk\n file_one.write(\"%d\\t%f\\t%d\\t%f\\t%f\\t%f\\t%f\\t%f\\t%d\\n\" %(i, dif_vectors[algo][i], g.get_community_of_node(i), g.nodes[i].in_red_ratio, g.nodes[i].out_red_ratio,\n g.nodes[i].excess_to_red, g.nodes[i].excess_to_blue, avrg_red_ratio, pgrnk_pos))\n algorithms.append(\"pagerank\")\n\n # Init sort indexes.\n for algo in algorithms:\n sort_index[algo] = np.argsort(-rank_vectors[algo])\n\n # Find difference in positions.\n algorithms.remove(\"pagerank\")\n for algo in algorithms:\n pos_vectors = np.zeros(no_of_nodes, dtype=int)\n for i in range(no_of_nodes):\n dif_vectors[algo][sort_index[\"pagerank\"][i]] += i\n dif_vectors[algo][sort_index[algo][i]] -= i\n \"\"\"\n k_min = dict()\n k_max = dict()\n\n # Find 10 min and max diff, positions.\n for algo in algorithms:\n min_index = np.argsort(dif_vectors[algo]) \n max_index = np.argsort(-dif_vectors[algo])\n k_min[algo] = min_index[:10]\n k_max[algo] = max_index[:10]\n\n # Write in files.\n for algo in algorithms:\n with open(\"out_%s_rank_dif.txt\" %algo, \"w\") as file_one:\n file_one.write(\"node \\tdiff \\tcom \\tin_red_ratio \\tout_red_ratio \\texc_to_red \\texc_to_blue \\tavr_in_nei_out_red_ratio \\tbest_in_nei_pgrnk_pos \\n\")\n for i in k_min[algo]:\n avrg_red_ratio = 0.0\n pgrnk_pos = g.no_of_nodes\n for nghbr in g.nodes[i].in_neighbors:\n avrg_red_ratio += g.nodes[nghbr].out_red_ratio\n pos = np.where(pgrnk_index == nghbr)[0][0]\n if pos < pgrnk_pos:\n pgrnk_pos = pos\n if len(g.nodes[i].in_neighbors) != 0:\n avrg_red_ratio = avrg_red_ratio / len(g.nodes[i].in_neighbors)\n file_one.write(\"%d\\t%d\\t%d\\t%f\\t%f\\t%f\\t%f\\t%f\\t%d\\n\" %(i, dif_vectors[algo][i], g.get_community_of_node(i), g.nodes[i].in_red_ratio, g.nodes[i].out_red_ratio,\n g.nodes[i].excess_to_red, g.nodes[i].excess_to_blue, avrg_red_ratio, pgrnk_pos))\n for i in k_max[algo]:\n avrg_red_ratio = 0.0\n pgrnk_pos = g.no_of_nodes\n for nghbr in g.nodes[i].in_neighbors:\n avrg_red_ratio += g.nodes[nghbr].out_red_ratio\n pos = np.where(pgrnk_index == nghbr)[0][0]\n if pos < pgrnk_pos:\n pgrnk_pos = pos\n if len(g.nodes[i].in_neighbors) != 0:\n avrg_red_ratio = avrg_red_ratio / len(g.nodes[i].in_neighbors)\n file_one.write(\"%d\\t%d\\t%d\\t%f\\t%f\\t%f\\t%f\\t%f\\t%d\\n\" %(i, dif_vectors[algo][i], g.get_community_of_node(i), g.nodes[i].in_red_ratio, g.nodes[i].out_red_ratio,\n g.nodes[i].excess_to_red, g.nodes[i].excess_to_blue, avrg_red_ratio, pgrnk_pos))\n \"\"\"\n\ntop_dif_analysis()\n","sub_path":"Code/Python_files/local_rank_analysis.py","file_name":"local_rank_analysis.py","file_ext":"py","file_size_in_byte":20235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"221258419","text":"from __future__ import annotations\n\nimport math\nimport os\nfrom typing import List\nimport cv2\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torchvision import transforms, io\nfrom torchvision.io import video\n\nfrom models.PINet20 import TransferModel, create_model\nimport models.parsing\nfrom options.infer_options import InferOptions\nfrom tool import cords_to_map, reorder_pose\nfrom tool.compute_coordinates import DEFAULT_ARGS, PoseEstimator\nfrom util import util\nfrom data import remove_background\n\nIMAGE_SIZE = (256, 176)\n\nclass InferencePipeline:\n def __init__(self, pose_estimator, pinet: TransferModel, segmentator, opt):\n \"\"\"Initialize the pipeline with already loaded models.\"\"\"\n self.pose_estimator = pose_estimator\n self.pinet = pinet\n self.segmentator = segmentator\n self.opt = opt\n self.transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n \n @classmethod\n def from_opts(cls, opt) -> InferencePipeline:\n \"\"\"Load all trained models required from the locations indicated in opt.\"\"\"\n pinet = create_model(opt).eval()\n args = DEFAULT_ARGS\n args.opts = ['TEST.MODEL_FILE', opt.pose_estimator]\n pose_estimator = PoseEstimator(args, opt.gpu_ids != [])\n segmentator = SegmentationModel(opt.segmentation_model, bool(opt.gpu_ids))\n return cls(pose_estimator, pinet, segmentator, opt)\n\n def map_to(self, image: Image, target: Image) -> Image:\n target_pose = self.pose_estimator.infer(target)\n target_pose_map = reorder_pose(cords_to_map(target_pose, IMAGE_SIZE))\n return self.infer(image, target_pose_map)\n\n def infer(self, image: Image, target_pose_map: torch.Tensor) -> Image:\n # get pose\n pose = self.pose_estimator.infer(image)\n \n # convert to pose map\n pose_map = reorder_pose(cords_to_map(pose, IMAGE_SIZE))\n \n # get segmentation map ...\n spl_onehot = self.segmentator.get_segmap(image).unsqueeze(0)\n if self.opt.remove_background:\n image = remove_background(np.array(image), 1-spl_onehot[0, 0])\n \n # run PINet\n image_norm = self.transform(image).unsqueeze(0)\n \n if self.opt.gpu_ids:\n # move data to GPU\n device = self.opt.gpu_ids[0]\n pose_map = pose_map.cuda(device)\n target_pose_map = target_pose_map.cuda(device)\n image_norm = image_norm.cuda(device)\n spl_onehot = spl_onehot.cuda(device)\n with torch.no_grad():\n output_image, _ = self.pinet.infer(\n image_norm,\n pose_map,\n target_pose_map,\n spl_onehot\n )\n return Image.fromarray(util.tensor2im(output_image))\n\n def __call__(self, image: Image, target_pose: List):\n target_pose_map = reorder_pose(cords_to_map(np.array(target_pose), IMAGE_SIZE))\n return self.infer(image, target_pose_map)\n \n def render_video(self, image: Image, target_poses: str, batch_size=32):\n # get pose estimation\n pose = self.pose_estimator.infer(image)\n pose_map = reorder_pose(cords_to_map(pose, IMAGE_SIZE))\n # get semantic segmentation\n spl_onehot = self.segmentator.get_segmap(image).unsqueeze(0)\n # transform image\n image_norm = self.transform(image).unsqueeze(0)\n # read target poses\n pose_files = [os.path.join(target_poses, fname) for fname in sorted(os.listdir(target_poses))]\n target_poses = torch.cat([reorder_pose(np.load(file)) for file in pose_files])\n # create batch(es) of the same image, source pose, segmentation and different target poses\n if self.opt.gpu_ids:\n device = self.opt.gpu_ids[0]\n pose_map = pose_map.cuda(device)\n target_poses = target_poses.cuda(device)\n image_norm = image_norm.cuda(device)\n spl_onehot = spl_onehot.cuda(device)\n \n pose_map = pose_map.expand(batch_size, -1, -1, -1)\n spl_onehot = spl_onehot.expand(batch_size, -1, -1, -1)\n image_norm = image_norm.expand(batch_size, -1, -1, -1)\n with torch.no_grad():\n nframes = target_poses.size(0)\n nbatches = math.ceil(nframes / batch_size)\n for i in range(nbatches):\n nel = min(batch_size, nframes-i*batch_size)\n output_images, output_segmentations = self.pinet.infer(\n image_norm[:nel],\n pose_map[:nel],\n target_poses[i*batch_size : (i+1)*batch_size],\n spl_onehot[:nel]\n )\n yield (output_images.detach().cpu(), output_segmentations.detach().cpu())\n \n \nclass SegmentationModel:\n def __init__(self, path, use_cuda=True):\n self.use_cuda = use_cuda\n self.model = models.parsing.load_model(path, use_cuda=self.use_cuda)\n \n def get_segmap(self, image):\n # TODO: Improve performance by not doing torch -> numpy -> torch\n SPL_img = models.parsing.infer(self.model, image, self.use_cuda)\n num_class = 12\n _, h, w = SPL_img.shape\n tmp = torch.from_numpy(SPL_img).view(-1).long()\n ones = torch.sparse.torch.eye(num_class)\n ones = ones.index_select(0, tmp)\n SPL_onehot = ones.view([h, w, num_class])\n SPL_onehot = SPL_onehot.permute(2, 0, 1)\n return SPL_onehot\n \n \nif __name__ == '__main__':\n with open('test_data/test.lst') as f:\n persons = [line.strip() for line in f][:4]\n opt = InferOptions().parse()\n pipeline = InferencePipeline.from_opts(opt)\n videos = [io.read_video('test_data/seq.mp4', pts_unit='sec')[0]]\n segs = [torch.zeros_like(videos[0], dtype=torch.uint8)]\n images = [torch.zeros_like(videos[0], dtype=torch.uint8)]\n for person in persons:\n source_image = Image.open(f'test_data/test/{person}.jpg')\n pipeline.segmentator.path = f'test_data/testSPL2/{person}.png'\n frames, segmentations = zip(*pipeline.render_video(source_image, 'test_data/seq/'))\n frames = torch.cat(frames)\n frames = frames.float()\n frames = torch.movedim(frames, 1, 3)\n frames = (frames + 1) / 2.0 * 255.0\n videos.append(frames.byte())\n segmentations = torch.cat(segmentations)\n segmentations = torch.stack([torch.from_numpy(util.tensor2im(torch.argmax(sf, axis=0, keepdim=True).data, True)) for sf in segmentations])\n segs.append(segmentations.byte())\n source_image_tensor = torch.from_numpy(np.array(source_image)).unsqueeze(0).expand(frames.size())\n images.append(source_image_tensor)\n \n comp_video = torch.cat([torch.cat(part, dim=2) for part in (images, segs, videos)], dim=1)\n io.write_video('test_data/out.mp4', comp_video, fps=30)\n \n # output_image.save(OUPUT_PATH)","sub_path":"pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":6917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"582155847","text":"# -*- coding: utf8 -*-\n#\n# Module RATIONAL\n#\n# Part of Nutils: open source numerical utilities for Python. Jointly developed\n# by HvZ Computational Engineering, TU/e Multiscale Engineering Fluid Dynamics,\n# and others. More info at http://nutils.org . (c) 2014\n\n\"\"\"\nThe rational module.\n\"\"\"\n\nfrom __future__ import print_function, division\nimport numpy\n\nclass Rational( object ):\n\n __array_priority__ = 1\n\n def __init__( self, numer, denom=1, isfactored=False ):\n assert isint(denom) and denom > 0\n if not isinstance( numer, numpy.ndarray ):\n numer = numpy.array( numer )\n numer.flags.writeable = False\n assert isint(numer)\n if denom != 1 and not isfactored:\n absnumers = numpy.unique( abs(numer) )[::-1].tolist() # unique descending\n if not absnumers[-1]:\n absnumers.pop() # ignore zero\n common = denom\n while absnumers and common > 1:\n n = absnumers.pop()\n while n: # GCD: Euclid's algorithm\n common, n = n, common % n\n if common != 1:\n numer = numer // common\n if numer.flags.writeable:\n numer.flags.writeable = False\n denom //= common\n if numer.flags.writeable:\n numer = numer.copy()\n numer.flags.writeable = False\n self.numer = numer\n self.denom = denom\n\n def __iter__( self ):\n for array in self.numer:\n yield Rational( array, self.denom )\n\n def __abs__( self ):\n return Rational( abs(self.numer), self.denom, isfactored=True )\n\n def __getitem__( self, item ):\n return Rational( self.numer[item], self.denom )\n\n def __int__( self ):\n assert self.ndim == 0 and self.denom == 1\n return int(self.numer)\n\n def __float__( self ):\n assert self.ndim == 0\n return float(self.numer) / self.denom\n\n def astype( self, tp ):\n if tp == int:\n assert self.denom == 1\n return self.numer\n assert tp == float\n return self.numer / float(self.denom)\n\n @property\n def size( self ):\n return self.numer.size\n\n @property\n def ndim( self ):\n return self.numer.ndim\n\n @property\n def shape( self ):\n return self.numer.shape\n\n @property\n def T( self ):\n return Rational( self.numer.T, self.denom, isfactored=True )\n\n def __len__( self ):\n return len(self.numer)\n\n @property\n def __cmpdata( self ):\n return self.numer.shape, tuple(self.numer.flat), self.denom\n\n def __hash__( self ):\n return hash( self.__cmpdata )\n\n def __eq__( self, other ):\n return self is other or isrational(other) and self.__cmpdata == other.__cmpdata\n\n def __neg__( self ):\n return Rational( -self.numer, self.denom, isfactored=True )\n\n def __add__( self, other ):\n if other is 0:\n return self\n other = asarray( other )\n if not isrational( other ):\n return self.numer / float(self.denom) + other\n return Rational( self.numer * other.denom + other.numer * self.denom, self.denom * other.denom )\n\n def __sub__( self, other ):\n if other is 0:\n return self\n other = asarray( other )\n if not isrational( other ):\n return self.numer / float(self.denom) - other\n return Rational( self.numer * other.denom - other.numer * self.denom, self.denom * other.denom )\n\n def __rsub__( self, other ):\n if other is 0:\n return -self\n other = asarray( other )\n if not isrational( other ):\n return other - self.numer / float(self.denom)\n return Rational( other.numer * self.denom - self.numer * other.denom, self.denom * other.denom )\n\n def __mul__( self, other ):\n if other is 1:\n return self\n other = asarray( other )\n if not isrational( other ):\n return self.numer * ( other / float(self.denom) )\n return Rational( self.numer * other.numer, self.denom * other.denom )\n\n def __div__( self, other ):\n if other is 1:\n return self\n other = asarray( other )\n if not isrational( other ):\n return self.numer / ( other * float(self.denom) )\n assert other.size == 1, 'only scalar division supported for now'\n numer, = other.numer.flat\n denom = other.denom\n assert numer != 0\n if numer < 0:\n numer = -numer\n denum = -denom\n return Rational( self.numer * denom, self.denom * numer )\n\n def __rdiv__( self, other ):\n other = asarray( other )\n if not isrational( other ):\n return ( other * float(self.denom) ) / self.numer\n return other / self\n\n __rmul__ = __mul__\n __radd__ = __add__\n __truediv__ = __div__\n __rtruediv__ = __rdiv__\n\n def __pow__( self, n ):\n assert isint( n )\n return Rational( self.numer**n, self.denom**n ) if n > 1 \\\n else self if n == 1 \\\n else ones( self.shape ) if n == 0 \\\n else 1 / (self**-n)\n\n def __str__( self ):\n return '%s/%s' % ( str(self.numer.tolist()).replace(' ',''), self.denom )\n\n\n\n## UTILITY FUNCTIONS\n\nisint = lambda a: numpy.issubdtype( a.dtype if isinstance(a,numpy.ndarray) else type(a), numpy.integer )\n\nunit = Rational( 1 )\n\ndef det( array ):\n array = asrational( array )\n if array.shape == (1,1):\n det = array[0,0]\n elif array.shape == (2,2):\n ((a,b),(c,d)) = array.numer\n det = Rational( a*d - b*c, array.denom**2 )\n elif array.shape == (3,3):\n ((a,b,c),(d,e,f),(g,h,i)) = array.numer\n det = Rational( a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h, array.denom**3 )\n else:\n raise NotImplementedError( 'shape=' + str(array.shape) )\n return det\n\ndef invdet( array ):\n '''invdet(array) = inv(array) * det(array)'''\n array = asrational(array)\n if array.shape == (1,1):\n invdet = ones( (1,1) )\n elif array.shape == (2,2):\n ((a,b),(c,d)) = array.numer\n invdet = Rational( ((d,-b),(-c,a)), array.denom, isfactored=True )\n elif array.shape == (3,3):\n ((a,b,c),(d,e,f),(g,h,i)) = array.numer\n invdet = Rational( ((e*i-f*h,c*h-b*i,b*f-c*e),(f*g-d*i,a*i-c*g,c*d-a*f),(d*h-e*g,b*g-a*h,a*e-b*d)), array.denom**2 )\n else:\n raise NotImplementedError( 'shape=' + tuple(array.shape) )\n return invdet\n \ndef inv( array ):\n return invdet( array ) / det( array )\n\ndef ext( array ):\n \"\"\"Exterior\n For array of shape (n,n-1) return n-vector ex such that ex.array = 0 and\n det(arr;ex) = ex.ex\"\"\"\n array = asrational(array)\n if array.shape == (1,0):\n ext = ones( 1 )\n elif array.shape == (2,1):\n ((a,),(b,)) = array.numer * array.denom\n ext = Rational( (-b,a) )\n elif array.shape == (3,2):\n ((a,b),(c,d),(e,f)) = array.numer * array.denom\n ext = Rational( (c*f-e*d,e*b-a*f,a*d-c*b) )\n else:\n raise NotImplementedError( 'shape=%s' % (array.shape,) )\n # VERIFY\n A = asfloat( array )\n v = asfloat( ext )\n Av = numpy.concatenate( [A,v[:,numpy.newaxis]], axis=1 )\n numpy.testing.assert_almost_equal( numpy.dot( v, A ), 0 )\n numpy.testing.assert_almost_equal( numpy.linalg.det(Av), numpy.dot(v,v) )\n return ext\n\ndef isrational( arr ):\n return isinstance( arr, Rational )\n\ndef asrational( arr ):\n return arr if isrational( arr ) else Rational( arr )\n\ndef frac( a, b ):\n return asrational(a) / asrational(b)\n\ndef asarray( arr ):\n if isrational( arr ):\n return arr\n arr = numpy.asarray( arr )\n if isint(arr):\n return Rational( arr )\n return arr\n\ndef dot( A, B ):\n A = asarray( A )\n B = asarray( B )\n if not isrational( A ) or not isrational( B ):\n return numpy.dot( A.astype(float), B.astype(float) )\n return Rational( numpy.dot( A.numer, B.numer ), A.denom * B.denom )\n\ndef eye( ndims ):\n return Rational( numpy.eye(ndims,dtype=int) )\n\ndef zeros( shape ):\n return Rational( numpy.zeros(shape,dtype=int) )\n\ndef ones( shape ):\n return Rational( numpy.ones(shape,dtype=int) )\n\ndef stack( args ):\n arg1, arg2 = args\n arg1 = asrational( arg1 )\n arg2 = asrational( arg2 )\n assert arg1.ndim == arg2.ndim == 1\n return Rational( numpy.concatenate([ arg1.numer * arg2.denom, arg2.numer * arg1.denom ]), arg1.denom * arg2.denom )\n\ndef blockdiag( args ):\n arg1, arg2 = args\n arg1 = asrational( arg1 )\n arg2 = asrational( arg2 )\n assert arg1.ndim == arg2.ndim == 2\n blockdiag = numpy.zeros( (arg1.shape[0]+arg2.shape[0],arg1.shape[1]+arg2.shape[1]), dtype=int )\n blockdiag[:arg1.shape[0],:arg1.shape[1]] = arg1.numer * arg2.denom\n blockdiag[arg1.shape[0]:,arg1.shape[1]:] = arg2.numer * arg1.denom\n return Rational( blockdiag, arg1.denom * arg2.denom )\n\ndef round( array, denom=1 ):\n array = asarray( array )\n if isrational( array ):\n return array\n numer = array * denom\n return Rational( ( numer - numpy.less(numer,0) + .5 ).astype( int ), denom )\n\n# vim:shiftwidth=2:foldmethod=indent:foldnestmax=2\n","sub_path":"nutils/rational.py","file_name":"rational.py","file_ext":"py","file_size_in_byte":8470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"16236556","text":"\n__all__ = [ 'RM_Peripheral_PRS' ]\n\nfrom static import Base_RM_Peripheral\nfrom PRS_register import *\n\nclass RM_Peripheral_PRS(Base_RM_Peripheral):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Peripheral_PRS, self).__init__(rmio, label,\n 0x400E6000, 'PRS',\n \"\")\n self.SWPULSE = RM_Register_PRS_SWPULSE(self.zz_rmio, self.zz_label)\n self.zz_rdict['SWPULSE'] = self.SWPULSE\n self.SWLEVEL = RM_Register_PRS_SWLEVEL(self.zz_rmio, self.zz_label)\n self.zz_rdict['SWLEVEL'] = self.SWLEVEL\n self.ROUTEPEN = RM_Register_PRS_ROUTEPEN(self.zz_rmio, self.zz_label)\n self.zz_rdict['ROUTEPEN'] = self.ROUTEPEN\n self.ROUTELOC0 = RM_Register_PRS_ROUTELOC0(self.zz_rmio, self.zz_label)\n self.zz_rdict['ROUTELOC0'] = self.ROUTELOC0\n self.ROUTELOC1 = RM_Register_PRS_ROUTELOC1(self.zz_rmio, self.zz_label)\n self.zz_rdict['ROUTELOC1'] = self.ROUTELOC1\n self.ROUTELOC2 = RM_Register_PRS_ROUTELOC2(self.zz_rmio, self.zz_label)\n self.zz_rdict['ROUTELOC2'] = self.ROUTELOC2\n self.CTRL = RM_Register_PRS_CTRL(self.zz_rmio, self.zz_label)\n self.zz_rdict['CTRL'] = self.CTRL\n self.DMAREQ0 = RM_Register_PRS_DMAREQ0(self.zz_rmio, self.zz_label)\n self.zz_rdict['DMAREQ0'] = self.DMAREQ0\n self.DMAREQ1 = RM_Register_PRS_DMAREQ1(self.zz_rmio, self.zz_label)\n self.zz_rdict['DMAREQ1'] = self.DMAREQ1\n self.PEEK = RM_Register_PRS_PEEK(self.zz_rmio, self.zz_label)\n self.zz_rdict['PEEK'] = self.PEEK\n self.CH0_CTRL = RM_Register_PRS_CH0_CTRL(self.zz_rmio, self.zz_label)\n self.zz_rdict['CH0_CTRL'] = self.CH0_CTRL\n self.CH1_CTRL = RM_Register_PRS_CH1_CTRL(self.zz_rmio, self.zz_label)\n self.zz_rdict['CH1_CTRL'] = self.CH1_CTRL\n self.CH2_CTRL = RM_Register_PRS_CH2_CTRL(self.zz_rmio, self.zz_label)\n self.zz_rdict['CH2_CTRL'] = self.CH2_CTRL\n self.CH3_CTRL = RM_Register_PRS_CH3_CTRL(self.zz_rmio, self.zz_label)\n self.zz_rdict['CH3_CTRL'] = self.CH3_CTRL\n self.CH4_CTRL = RM_Register_PRS_CH4_CTRL(self.zz_rmio, self.zz_label)\n self.zz_rdict['CH4_CTRL'] = self.CH4_CTRL\n self.CH5_CTRL = RM_Register_PRS_CH5_CTRL(self.zz_rmio, self.zz_label)\n self.zz_rdict['CH5_CTRL'] = self.CH5_CTRL\n self.CH6_CTRL = RM_Register_PRS_CH6_CTRL(self.zz_rmio, self.zz_label)\n self.zz_rdict['CH6_CTRL'] = self.CH6_CTRL\n self.CH7_CTRL = RM_Register_PRS_CH7_CTRL(self.zz_rmio, self.zz_label)\n self.zz_rdict['CH7_CTRL'] = self.CH7_CTRL\n self.CH8_CTRL = RM_Register_PRS_CH8_CTRL(self.zz_rmio, self.zz_label)\n self.zz_rdict['CH8_CTRL'] = self.CH8_CTRL\n self.CH9_CTRL = RM_Register_PRS_CH9_CTRL(self.zz_rmio, self.zz_label)\n self.zz_rdict['CH9_CTRL'] = self.CH9_CTRL\n self.CH10_CTRL = RM_Register_PRS_CH10_CTRL(self.zz_rmio, self.zz_label)\n self.zz_rdict['CH10_CTRL'] = self.CH10_CTRL\n self.CH11_CTRL = RM_Register_PRS_CH11_CTRL(self.zz_rmio, self.zz_label)\n self.zz_rdict['CH11_CTRL'] = self.CH11_CTRL\n self.__dict__['zz_frozen'] = True","sub_path":".closet/jython.configurator.efr32/1.0.0.201606231656-435/host_py_rm_studio_internal/host_py_rm_studio_internal_efr32xg1xfull/revA3/PRS.py","file_name":"PRS.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"620161263","text":"from django.shortcuts import render, redirect, reverse\nfrom django.core.mail import send_mail\nfrom django.http import HttpResponse\nfrom django.template.loader import render_to_string\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom datetime import datetime\nfrom .forms import ContactForm\nfrom .models import Contact\n\n\ndef contact(request):\n \"\"\" A view to send an email enquires about homeopathic services & information \"\"\"\n contacts = list(Contact.objects.all().values())\n if request.method == 'GET':\n form = ContactForm()\n else:\n form = ContactForm(request.POST)\n if form.is_valid():\n name = form.cleaned_data['name']\n cust_email = form.cleaned_data['email']\n message = form.cleaned_data['message']\n host_email = settings.DEFAULT_FROM_EMAIL\n date = datetime.today()\n year = datetime.today().year\n for item in contacts:\n if year - item['year'] > 1:\n Contact(id=item['id']).delete()\n\n Contact(\n name=name,\n email=cust_email,\n message=message,\n date=date,\n year=year,\n ).save()\n\n email = {\n 'name': name,\n 'cust_email': cust_email,\n 'message': message,\n 'host_email': host_email,\n 'date': date,\n }\n filePath = 'contact/confirmation_emails/'\n subject = render_to_string(\n f'{filePath}confirmation_email_subject.txt',\n {'email': email})\n cust_body = render_to_string(\n f'{filePath}confirmation_cust_email_body.txt',\n {'email': email})\n host_body = render_to_string(\n f'{filePath}confirmation_host_email_body.txt',\n {'email': email})\n try:\n # send confirmation message to customer email address\n send_mail(subject, cust_body, host_email, [cust_email])\n # send confirmation message to host email address\n send_mail(subject, host_body, cust_email, [host_email])\n messages.success(request, f'Your message has been received! A confirmation email will be sent to {cust_email}.')\n except Exception as e:\n messages.error(request, 'Sorry, there was a problem sending your message. Please try again.')\n return HttpResponse(content=e, status=400)\n return redirect(reverse('contact'))\n\n context = {\n 'form': form,\n }\n\n return render(request, 'contact/contact.html', context)\n","sub_path":"contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"139145438","text":"from examsage.body import Body\nfrom examsage.headfoot import HeadFoot\nfrom examsage.problem import Problem\nfrom pylatex import Document, Command, Package, PageStyle, MdFramed\nfrom pylatex.base_classes import Arguments\nfrom pylatex.utils import NoEscape\n\nclass Assessment(Document):\n \"\"\"A latex document for writing Math assessments.\"\"\"\n\n tex_packages = ('assessments', 'mathexam', 'mathdiagrams')\n\n def __init__(\n self,\n kind=None,\n number=None,\n fullpoints=None,\n period=None,\n head_height = '30pt',\n instructions=None,\n hints=None,\n nameline=True,\n body=None,\n ):\n # Create the pylatex Document\n super().__init__(\n documentclass='article',\n document_options='12pt',\n geometry_options={'left':'0.5in', 'right':'0.5in', 'top':'1in', 'bottom':'1in'},\n page_numbers=True,\n indent=False,\n )\n self.kind = kind\n self.number = number\n self.fullpoints = fullpoints\n self.period = period\n #self.instructor_key = instructor_key # Used in Assessment.dumps()\n self.head_height = head_height\n self.instructions = instructions\n self.hints = hints\n self.nameline = nameline\n self.headfoot = HeadFoot(name='default', assessment=self)\n self.body = body\n # Publish the last version by default\n #self.versions = []\n self.version = 0\n\n for name in Assessment.tex_packages:\n self.packages.append(Package(name))\n\n @property\n def instructions(self):\n return self.__instructions\n\n @instructions.setter\n def instructions(self, instructions):\n # Print the instructions inside of a frame\n if instructions:\n frame = MdFramed()\n frame.append(NoEscape(instructions))\n self.__instructions = frame\n else:\n self.__instructions = None\n\n @property\n def hints(self):\n return self.__hints\n\n @hints.setter\n def hints(self, hints):\n self.__hints = hints\n\n @property\n def instructor_key(self):\n return self.__instructor_key\n\n @instructor_key.setter\n def instructor_key(self, instructor_key):\n if isinstance(instructor_key, bool):\n self.__instructor_key = instructor_key\n else:\n print(\"The instructor key must be either 'True' or 'False'.\")\n\n @property\n def headfoot(self):\n return self.__headfoot\n\n @headfoot.setter\n def headfoot(self, headfoot):\n if isinstance(headfoot, PageStyle) or headfoot == None:\n self.__headfoot = headfoot\n else:\n print(\"The headfoot must be an instance of pylatex.PageStyle\")\n\n @property\n def body(self):\n return self.__body\n\n @body.setter\n def body(self, body):\n if isinstance(body, Body):\n body.assessment = self\n self.__body = body\n elif body != None:\n print(\"The body must be an instance of examsage.Body\")\n\n @property\n def maxpoints(self):\n return self.body.maxpoints\n\n #def dumps(self):\n # return self.versions[self.version]\n\n def dumps(self):\n # Delete the old preamble\n self.preamble.clear()\n # Set the key flags\n if self.instructor_key:\n self.preamble.append(Command('setbool', Arguments('instructorKey', 'true')))\n self.preamble.append(Command('setbool', Arguments('studentKey', 'true')))\n else:\n self.preamble.append(Command('setbool', Arguments('instructorKey', 'false')))\n self.preamble.append(Command('setbool', Arguments('studentKey', 'false')))\n\n self.preamble.append(self.headfoot)\n self.change_length('\\headheight', self.head_height)\n\n # Delete the old body\n self.clear()\n self.change_document_style(self.headfoot.name)\n # Update the body\n if self.nameline:\n # Add the name line\n self.append(Command('ExamNameLine'))\n\n if self.instructions:\n # Add the instructions\n self.append(self.instructions)\n\n if self.hints:\n # Add the hints\n self.append(self.hints)\n\n self.append(self.body)\n\n return super().dumps()\n\n def __next__(self):\n # Generate a similar assessment\n next(self.body)\n #self.versions.append(self.my_dumps())\n return self","sub_path":"examsage/assessment.py","file_name":"assessment.py","file_ext":"py","file_size_in_byte":4474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"123545016","text":"class Settings:\r\n \"\"\"A Class to store all settings for Alien Invasion.\"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"Initialize the games settings.\"\"\"\r\n #Screen settings\r\n self.screen_width = 1440\r\n self.screen_height = 800\r\n self.bg_color = (255, 255, 255)\r\n self.ship_speed = 1.5\r\n self.ship_vertical_speed = 1\r\n\r\n #bullet settings\r\n self.bullet_speed = 1\r\n self.bullet_width = 3 #in pixel\r\n self.bullet_height = 13 #In pixel\r\n self.bullet_color = (255,0,0)\r\n self.bullets_allowed = 17\r\n\r\n\r\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"1996496","text":"#!/usr/bin/env python3\n# vim: sts=4 sw=4 et\n\nimport collections\nimport os\nimport random\nimport struct\n\nimport pytest\n\nfrom tll.channel import Context\nfrom tll.error import TLLError\nfrom tll.test_util import Accum\n\nEXTRA_SIZE = 4 + 12 + 1 # Size + frame + tail marker\nMETA_SIZE = EXTRA_SIZE + 32 # Extra + meta size\n\n@pytest.fixture\ndef context():\n return Context()\n\n@pytest.fixture\ndef filename(tmp_path):\n return tmp_path / 'file.dat'\n\n@pytest.fixture\ndef writer(context, filename):\n return context.Channel(f'file://{filename}', name='writer', dump='frame', dir='w', block='1kb')\n\n@pytest.fixture\ndef reader(context, filename):\n return Accum(f'file://{filename}', name='reader', dump='frame', context=context, autoclose='no')\n\nFrame = collections.namedtuple('Frame', ('size', 'msgid', 'seq'))\n\ndef frame(data):\n return Frame(*struct.unpack('iiq', data[:16]))\n\ndef test_basic(writer, reader, filename):\n w = writer\n w.open()\n assert w.dcaps == w.DCaps.Zero\n\n assert w.scheme_control is None\n\n assert w.config['info.seq-begin'] == '-1'\n assert w.config['info.seq'] == '-1'\n\n with pytest.raises(TLLError): w.post(b'x' * 1024 * 1024)\n with pytest.raises(TLLError): w.post(b'x' * (1024 - EXTRA_SIZE + 1))\n\n assert w.config['info.seq-begin'] == '-1'\n assert w.config['info.seq'] == '-1'\n\n assert filename.stat().st_size == META_SIZE\n fp = filename.open('rb')\n\n w.post(b'a' * 128, seq=0, msgid=0)\n assert filename.stat().st_size == META_SIZE + (128 + EXTRA_SIZE) * 1\n\n assert w.config['info.seq-begin'] == '0'\n assert w.config['info.seq'] == '0'\n\n with pytest.raises(TLLError): w.post(b'x', seq=0)\n\n data = fp.read(16)\n assert data == bytes([META_SIZE]) + b'\\0\\0\\0Meta\\0\\0\\0\\0\\0\\0\\0\\0'\n\n data = fp.read(META_SIZE - 16) # Skip meta\n\n data = fp.read(128 + EXTRA_SIZE)\n assert frame(data) == Frame(128 + EXTRA_SIZE, 0, 0)\n assert data[16:] == b'a' * 128 + b'\\x80'\n\n w.post(b'b' * 128, seq=1, msgid=10)\n assert filename.stat().st_size == META_SIZE + (128 + EXTRA_SIZE) * 2\n\n assert w.config['info.seq-begin'] == '0'\n assert w.config['info.seq'] == '1'\n\n with pytest.raises(TLLError): w.post(b'x', seq=1)\n\n data = fp.read(128 + EXTRA_SIZE)\n assert frame(data) == Frame(128 + EXTRA_SIZE, 10, 1)\n assert data[16:] == b'b' * 128 + b'\\x80'\n\n reader.open()\n assert reader.dcaps == reader.DCaps.Process | reader.DCaps.Pending\n\n assert reader.scheme_control is not None\n assert [m.name for m in reader.scheme_control.messages] == ['Seek', 'EndOfData']\n\n assert reader.config['info.seq-begin'] == '0'\n assert reader.config['info.seq'] == '1'\n\n reader.process()\n assert [(x.seq, x.msgid, len(x.data), x.data.tobytes()) for x in reader.result] == [(0, 0, 128, b'a' * 128)]\n reader.result = []\n\n reader.process()\n assert [(x.seq, x.msgid, len(x.data), x.data.tobytes()) for x in reader.result] == [(1, 10, 128, b'b' * 128)]\n reader.result = []\n\n reader.process()\n assert [(x.type, x.seq, x.msgid) for x in reader.result] == [(reader.Type.Control, 0, reader.scheme_control.messages.EndOfData.msgid)]\n reader.result = []\n\n reader.process()\n assert [(x.seq, x.msgid, len(x.data), x.data.tobytes()) for x in reader.result] == []\n assert reader.dcaps == reader.DCaps.Process\n\n w.post(b'c' * 128, seq=2, msgid=20)\n assert filename.stat().st_size == META_SIZE + (128 + EXTRA_SIZE) * 3\n\n data = fp.read(128 + EXTRA_SIZE)\n assert frame(data) == Frame(128 + EXTRA_SIZE, 20, 2)\n assert data[16:] == b'c' * 128 + b'\\x80'\n\n reader.process()\n assert [(x.seq, x.msgid, len(x.data), x.data.tobytes()) for x in reader.result] == [(2, 20, 128, b'c' * 128)]\n reader.result = []\n\ndef test_open_error(reader, writer, filename):\n with pytest.raises(TLLError): reader.open()\n filename.mkdir()\n with pytest.raises(TLLError): writer.open()\n\ndef test_block_boundary(writer, reader, filename):\n w = writer\n w.open()\n\n assert filename.stat().st_size == META_SIZE\n fp = filename.open('rb')\n\n w.post(b'a' * 512, seq=0, msgid=0)\n assert filename.stat().st_size == META_SIZE + (512 + EXTRA_SIZE) * 1\n\n data = fp.read(16)\n assert data == bytes([META_SIZE]) + b'\\0\\0\\0Meta\\0\\0\\0\\0\\0\\0\\0\\0'\n\n data = fp.read(META_SIZE - 16) # Skip meta\n\n data = fp.read(512 + EXTRA_SIZE)\n assert frame(data) == Frame(512 + EXTRA_SIZE, 0, 0)\n assert data[16:] == b'a' * 512 + b'\\x80'\n\n reader.open()\n\n reader.process()\n assert [(x.seq, x.msgid, len(x.data), x.data.tobytes()) for x in reader.result] == [(0, 0, 512, b'a' * 512)]\n reader.result = []\n\n reader.process()\n assert [(x.type, x.seq, x.msgid) for x in reader.result] == [(reader.Type.Control, 0, reader.scheme_control.messages.EndOfData.msgid)]\n reader.result = []\n\n reader.process()\n assert [(x.seq, x.msgid, len(x.data), x.data.tobytes()) for x in reader.result] == []\n assert reader.dcaps == reader.DCaps.Process\n\n w.post(b'b' * 512, seq=1, msgid=10)\n assert filename.stat().st_size == 1024 + 5 + (512 + EXTRA_SIZE) * 1\n\n data = fp.read(16)\n assert frame(data) == Frame(-1, 0, 0)\n fp.seek(1024)\n\n data = fp.read(5)\n assert data == b'\\x05\\0\\0\\0\\x80'\n\n data = fp.read(512 + EXTRA_SIZE)\n assert frame(data) == Frame(512 + EXTRA_SIZE, 10, 1)\n assert data[16:] == b'b' * 512 + b'\\x80'\n\n reader.process()\n assert [(x.seq, x.msgid, len(x.data), x.data.tobytes()) for x in reader.result] == [(1, 10, 512, b'b' * 512)]\n reader.result = []\n\n@pytest.mark.parametrize(\"seq,r\", [(None, 10), (0, 10), (5, 10), (100, 100), (105, 110), (1000, 1000)])\ndef test_open_seq(seq, r, writer, reader):\n writer.open()\n\n for i in range(100):\n writer.post(b'abc' * i, seq = 10 * (i + 1), msgid = i)\n\n reader.open(**({'seq': str(seq)} if seq is not None else {}))\n\n assert reader.config['info.seq-begin'] == '10'\n assert reader.config['info.seq'] == '1000'\n\n reader.process()\n assert [(x.seq, x.msgid, len(x.data)) for x in reader.result] == [(r, r // 10 - 1, 3 * (r // 10 - 1))]\n\n if seq is not None:\n reader.post(b'', type=reader.Type.Control, name='Seek', seq=seq)\n reader.result = []\n reader.process()\n assert [(x.seq, x.msgid, len(x.data)) for x in reader.result] == [(r, r // 10 - 1, 3 * (r // 10 - 1))]\n\ndef test_open_seq_border(writer, reader):\n writer.open()\n\n writer.post(b'a' * 512, seq = 0, msgid = 10)\n writer.post(b'b' * 512, seq = 10, msgid = 20)\n\n reader.open(seq='5')\n\n assert reader.config['info.seq-begin'] == '0'\n assert reader.config['info.seq'] == '10'\n\n reader.process()\n assert [(x.seq, x.msgid, len(x.data)) for x in reader.result] == [(10, 20, 512)]\n\ndef test_meta(context, filename):\n SCHEME = '''yamls://\n- name: msg\n fields: [{name: f0, type: int32}]\n'''\n w = context.Channel(f'file://{filename}', name='writer', dump='frame', dir='w', block='1kb', scheme=SCHEME)\n r = context.Channel(f'file://{filename}', name='reader', dump='frame', dir='r', context='context', block='4kb')\n\n w.open()\n assert os.path.exists(filename)\n r.open()\n assert r.scheme != None\n assert [m.name for m in r.scheme.messages] == ['msg']\n assert r.config.get('info.block', '') == '1kb'\n\ndef test_autoclose(context, filename, writer):\n writer.open()\n reader = Accum(f'file://{filename}', name='reader', dump='frame', dir='r', context=context, autoclose='yes')\n\n for i in range(10):\n writer.post(b'abc' * i, seq = 10 * (i + 1), msgid = i)\n\n reader.open(seq='50')\n reader.process()\n for _ in range(10):\n reader.process()\n assert [m.seq for m in reader.result] == list(range(50, 101, 10))\n assert reader.state == reader.State.Closed\n\n@pytest.mark.fuzzy\ndef test_fuzzy(writer, reader):\n data = []\n start = random.randrange(0, 10000)\n\n writer.open()\n\n for i in range(1000):\n size = random.randrange(0, 512)\n data.append(size)\n writer.post(bytes([size % 256] * size), msgid=size, seq=start + 2 * i)\n\n for j in range(2000 - 2):\n i = (j + 1) // 2\n reader.result = []\n reader.open(seq=f'{start + j}')\n assert reader.config['info.seq-begin'] == f'{start}'\n assert reader.config['info.seq'] == f'{start + 2 * 999}'\n\n for _ in range(5):\n reader.process()\n assert [(m.seq, m.msgid, len(m.data)) for m in reader.result] == [(start + 2 * k, data[k], data[k]) for k in range(i, min(i + 5, len(data)))]\n reader.close()\n\n reader.open()\n for j in reversed(range(2000 - 2)):\n i = (j + 1) // 2\n reader.result = []\n reader.post(b'', type=reader.Type.Control, name='Seek', seq=start + j)\n for _ in range(5):\n reader.process()\n assert [(m.seq, m.msgid, len(m.data)) for m in reader.result] == [(start + 2 * k, data[k], data[k]) for k in range(i, min(i + 5, len(data)))]\n\ndef test_open_filename(context, filename):\n writer = context.Channel('file://', name='writer', dump='frame', dir='w', block='1kb')\n\n writer.open(filename=str(filename))\n for i in range(10):\n writer.post(b'abc' * i, seq = 10 * (i + 1), msgid = i)\n\n writer.close()\n with pytest.raises(TLLError): writer.open()\n\n reader = Accum('file://', name='reader', dump='frame', dir='r', context=context, autoclose='yes')\n reader.open(filename=str(filename), seq='50')\n\n for _ in range(10):\n reader.process()\n\n assert [m.seq for m in reader.result] == list(range(50, 101, 10))\n assert reader.state == reader.State.Closed\n\ndef test_autoseq(context, filename):\n writer = context.Channel(f'file://{filename}', name='writer', dump='frame', dir='w', block='1kb', autoseq='yes')\n\n writer.open()\n for i in range(5):\n writer.post(b'abc' * i, seq = 100)\n assert writer.config['info.seq'] == '4'\n\n writer.close()\n\n writer.open()\n assert writer.config['info.seq'] == '4'\n for i in range(5):\n writer.post(b'abc' * i, seq = 100)\n assert writer.config['info.seq'] == '9'\n\n reader = Accum(f'file://{filename}', name='reader', dump='frame', dir='r', context=context, autoclose='yes')\n\n reader.open()\n for _ in range(20):\n reader.process()\n\n assert [m.seq for m in reader.result] == list(range(10))\n assert reader.state == reader.State.Closed\n","sub_path":"python/test/test_file.py","file_name":"test_file.py","file_ext":"py","file_size_in_byte":10330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"221855921","text":"from unittest import TestCase\nimport sure\nfrom testfixtures import LogCapture\n\nfrom ..hostmapper import HostMapper\nfrom hostfactory import HostFactory\n\n\nclass TestHostMapper(TestCase):\n \"\"\" Test HostMapper functionality\n \"\"\"\n\n def setUp(self):\n self.hosts = []\n for i in xrange(10):\n self.hosts.append(HostFactory())\n\n def test_host_mapper_only_returns_valid_data(self):\n del(self.hosts[3].summary.hardware.model)\n processed_hosts = HostMapper(\n \"myproject\", \"mycluster\", self.hosts, \"myvc\").hosts\n processed_hosts.should.have.length_of(9)\n\n def test_invalid_hosts_are_logged(self):\n with LogCapture(names='reporter.hostmapper') as log:\n hosts = self.hosts\n del(hosts[0].summary.hardware.model)\n # No return needed. Just map some hosts\n HostMapper(\"myproject\", \"mycluster\", hosts, \"myvc\").hosts\n log.check(('reporter.hostmapper', 'INFO', \"Invalid host\"),\n ('reporter.hostmapper', 'INFO', 'myhost'),\n ('reporter.hostmapper', 'INFO', \"HostHardware instance has no attribute 'model'\"))\n\n def test_hypervisor_added_to_host(self):\n hosts = self.hosts\n host = HostMapper(\"myproject\", \"mycluster\", hosts, \"myvc\").hosts[0]\n host[\"hypervisor\"].should.equal(\"ESX\")\n\n def test_collector_added_to_host(self):\n hosts = self.hosts\n host = HostMapper(\"myproject\", \"mycluster\", hosts, \"myvc\").hosts[0]\n host[\"collector\"].should.equal(\"myvc\")\n","sub_path":"tests/test_host_mapper.py","file_name":"test_host_mapper.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"511747966","text":"import random\nimport socket\nimport time\nimport json\nimport threading\nfrom datetime import datetime\n\nfrom weather import Weather, Unit\n\ndef create_fortune():\n fortunes = ['You will have a difficult day', 'Your cat will run away', 'You will catch a cold',\n 'The love of your life will move to another country', 'You will make a good investment',\n 'You will receive many compliments today', 'Your friends will make an unexpected invitation',\n 'You will get a lot of dopamine', 'You will graduate UTM in 2019'\n ]\n return random.choice(fortunes)\n\ndef create_joke():\n jokes = ['Q. What’s the difference between ignorance and apathy? A. I don’t know and I don’t care.',\n 'I have clean conscience. I haven’t used it once until now.',\n 'I hate Russian dolls, they\\'re so full of themselves.',\n 'I recently decided to sell my vacuum cleaner as all it was doing was gathering dust.'\n ]\n return random.choice(jokes)\n\ndef construct_posting(type, message):\n return json.dumps({\n \"name\": \"posting\",\n \"topic\": type,\n \"message\": message\n }).encode()\n\ndef prepare_messages():\n prepared_messages = []\n lookup = weather.lookup_by_latlng(47.0105, 28.8638)\n prepared_messages.append(construct_posting('weather', lookup.condition.text))\n prepared_messages.append(construct_posting('time', str(datetime.now())))\n prepared_messages.append(construct_posting('fortune', create_fortune()))\n prepared_messages.append(construct_posting('joke', create_joke()))\n return prepared_messages\n\ndef send_message():\n while 1:\n print('Send a message once in 10 sec')\n prepared_messages = prepare_messages()\n for message in prepared_messages:\n print(message)\n client_socket.sendall(message)\n print(\"sent\")\n time.sleep(5)\n\nweather = Weather(unit=Unit.CELSIUS)\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# Connect to the given `address` and `port`\nserver_address = ('127.0.0.1', 7070)\nprint('Connecting to 127.0.0.1:7070')\nclient_socket.connect(server_address)\nthreading.Thread(target=send_message).start()\nwhile 1:\n try:\n # Receive 1kB of data from the server\n data = client_socket.recv(1024)\n print('<<<:' + data.decode() + '\\n')\n finally:\n print('Closing socket')\n server_address.close()\n","sub_path":"message_broker/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"108503578","text":"import numpy\nimport PIL.Image\n\nim = PIL.Image.open('../CUT1/001.jpg')\nx = numpy.array(im)\nprint(x)\n\n# 图片大小\nhang = x.shape[0] # 图片行数\nlie = x.shape[1] # 图片列数\ntol = hang * lie # 图片像素数目\n\n\n# 将二维数组变为一维数组\ndef Two_One(a, hang, lie):\n k = 0\n temp = []\n for i in range(hang):\n for j in range(lie):\n temp.append(a[i][j])\n k += 1\n return temp\n\n\n# 序列的一维数组\ndef Sign(tol):\n temp = []\n for i in range(tol):\n temp.append(i)\n return temp\n\n\n# 将图片一维数组进行排序,并标记一维数组位置\ndef Sort(aa, bb, tol):\n for k in range(tol):\n for i in range(k, tol):\n if aa[k] < aa[i]:\n temp1 = aa[i]\n aa[i] = aa[k]\n aa[k] = temp1\n\n temp2 = bb[i]\n bb[i] = bb[k]\n bb[k] = temp2\n return aa, bb\n\n\nB_sign = numpy.zeros(tol, numpy.int16)\nh = numpy.zeros(tol, numpy.int16)\nl = numpy.zeros(tol, numpy.int16)\ng = numpy.zeros([hang, lie], numpy.int16)\ny = numpy.zeros([hang, lie], numpy.int16)\nr = numpy.zeros([hang, lie], numpy.int16)\n\nfor i in range(hang): # 转化为二值矩阵\n for j in range(lie):\n g[i][j] = x[i][j][1] # 绿色\n y[i][j] = (int(x[i][j][0]) + int(x[i][j][1])) / 2 # 黄色\n r[i][j] = x[i][j][0] # 红色\n\nIma_one_g = Two_One(g, hang, lie) # 将图片二维数组转化为一维数组\nB_sign_n_g = Sign(tol) # 构建标号一维数组\nIma_one_G, _ = Sort(Ima_one_g, B_sign_n_g, tol) # 将图片和标号一维数组进行排序、对应\n\nIma_one_y = Two_One(y, hang, lie) # 将图片二维数组转化为一维数组\nB_sign_n_y = Sign(tol) # 构建标号一维数组\nIma_one_Y, _ = Sort(Ima_one_y, B_sign_n_y, tol) # 将图片和标号一维数组进行排序、对应\n\nIma_one_r = Two_One(r, hang, lie) # 将图片二维数组转化为一维数组\nB_sign_n_r = Sign(tol) # 构建标号一维数组\nIma_one_R, _ = Sort(Ima_one_r, B_sign_n_r, tol) # 将图片和标号一维数组进行排序、对应\n\nLV = Ima_one_G[0]\nHUANG = Ima_one_Y[0]\nHONG = Ima_one_R[0]\n\nprint(LV)\nprint(HUANG)\nprint(HONG)\n","sub_path":"get_rgb.py","file_name":"get_rgb.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"517538075","text":"import copy\nimport numpy as np\nfrom collections import defaultdict\n\nimport utils\n\n\nclass DAS3HStudent:\n def __init__(self, time_weight, n_items, n_skills, seed):\n np.random.seed(seed)\n self.alpha = np.random.normal(loc=-1.5, scale=0.3, size=1)\n self.delta = np.random.normal(loc=-1.0, scale=0.5, size=n_items)\n self.beta = np.random.normal(loc=-1.0, scale=0.5, size=n_skills)\n self.time_weight = time_weight\n self.weight = np.hstack((self.delta, self.beta, self.time_weight))\n self.h = 0.3\n self.d = 0.8\n\n def predict_proba(self, input_sparse_vec, lag):\n for_sigmoid = self.alpha + np.dot(self.weight, input_sparse_vec)\n\n ret = 1 / (1 + np.exp(-for_sigmoid)[0])\n ret = (1 - ret) * (1 + self.h * lag) ** (-self.d) + ret\n\n return ret\n\n\nclass StudentModel(object):\n def __init__(\n self, n_items, n_skills, n_wins, seed, item_skill_mat, model\n ):\n self.name = \"DAS3H\"\n np.random.seed(seed)\n self.n_items = n_items\n self.n_skills = n_skills\n self.n_wins = n_wins\n self.predictor = model\n self.item_skill_mat = item_skill_mat\n\n self.n_item_feats = int(np.log(2 * self.n_items))\n\n self.item_feats = np.random.normal(\n np.zeros(2 * self.n_items * self.n_item_feats),\n np.ones(2 * self.n_items * self.n_item_feats),\n ).reshape((2 * self.n_items, self.n_item_feats))\n\n self.now = 0\n self.last_time = defaultdict(lambda: -10)\n\n self.curr_item = np.random.randint(self.n_items)\n self.q = defaultdict(lambda: utils.OurQueue())\n self.curr_outcome = 0\n self.curr_delay = 0\n self.skill_ids = None\n\n def _make_input_vec(self, selected_item_id, now_q):\n item_vec = np.zeros(self.n_items)\n skill_vec = np.zeros(self.n_skills)\n correct_vec = np.zeros(self.n_wins * self.n_skills)\n attempt_vec = np.zeros(self.n_wins * self.n_skills)\n\n item_vec[selected_item_id] = 1\n\n index_of_selected_skills = np.argwhere(\n self.item_skill_mat[selected_item_id] == 1\n )\n self.skill_ids = index_of_selected_skills.transpose()[0].tolist()\n self.skill_ids = list(set(self.skill_ids))\n skill_vec[self.skill_ids] = 1\n\n for skill_id in self.skill_ids:\n correct_vec[skill_id * self.n_wins : (skill_id + 1) * self.n_wins] = np.log(\n 1 + np.array(now_q[skill_id, \"correct\"].get_counters(self.now))\n )\n attempt_vec[skill_id * self.n_wins : (skill_id + 1) * self.n_wins] = np.log(\n 1 + np.array(now_q[skill_id].get_counters(self.now))\n )\n\n return_np_vec = np.hstack((item_vec, skill_vec, correct_vec, attempt_vec))\n return return_np_vec\n\n def _encode_delay(self):\n v = np.zeros(2)\n v[self.curr_outcome] = np.log(1 + self.curr_delay)\n return v\n\n def _encode_delay2(self):\n v = np.zeros(2)\n delay = self.curr_delay\n if len(self.q.queue) != 0:\n delay = self.now - self.q.queue[-1]\n v[self.curr_outcome] = np.log(1 + delay)\n return v\n\n def _vectorized_obs(self):\n encoded_item = self.item_feats[\n self.n_items * self.curr_outcome + self.curr_item, :\n ]\n return np.hstack(\n (encoded_item, self._encode_delay(), np.array([self.curr_outcome]))\n )\n\n def step(self, action, now):\n self.curr_item = action\n self.curr_delay = now - self.now\n self.now += self.curr_delay\n input_vec = self._make_input_vec(self.curr_item, copy.deepcopy(self.q))\n lag = self.now - self.last_time[self.curr_item]\n recall_prob = self.predictor.predict_proba(input_vec, lag)\n self.curr_outcome = 1 if np.random.random() < recall_prob else 0\n self._update_model()\n\n obs = self._vectorized_obs()\n return self.curr_outcome, obs\n\n def _update_model(self):\n self.last_time[self.curr_item] = self.now\n for skill_id in self.skill_ids:\n _ = self.q[skill_id, \"correct\"].get_counters(self.now)\n _ = self.q[skill_id].get_counters(self.now)\n if self.curr_outcome == 1:\n self.q[skill_id, \"correct\"].push(self.now)\n self.q[skill_id].push(self.now)\n\n def get_retention_rate(self):\n retention_rate_list = []\n curr_q = copy.deepcopy(self.q)\n for item in range(self.n_items):\n input_vec = self._make_input_vec(item, curr_q)\n lag = self.now - self.last_time[item]\n recall_prob = self.predictor.predict_proba(input_vec, lag)\n retention_rate_list.append(recall_prob)\n return retention_rate_list\n\n def reset(self, seed):\n np.random.seed(seed)\n self.now = 0\n self.last_time = defaultdict(lambda: -10)\n self.curr_item = np.random.randint(self.n_items)\n self.q = defaultdict(lambda: utils.OurQueue())\n self.curr_outcome = 0\n self.curr_delay = 0\n self.skill_ids = None","sub_path":"src/student/student_model.py","file_name":"student_model.py","file_ext":"py","file_size_in_byte":5094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"192855202","text":"import os\nimport numpy as np\nfiles = [f for f in os.listdir('.') if os.path.isfile(f) and '.obj' in f]\nsize_dic = {file: os.stat(file).st_size for file in files}\nsort_array = sorted(size_dic.items(), key=lambda x: x[1])\nsizes = [x[1] for x in sort_array]\nnpa = np.asarray(sizes, dtype=np.int32)\npercentile_95_8 = np.percentile(npa,95.8)\nnew_dict = {key:val for key,val in sort_array if val > percentile_95_8}\ndel_files = [os.remove(file) for file in files if file not in new_dict.keys()]\nf = open(\"dir_details.txt\",\"a\")\nf.truncate(0)\nstring_literal = \"'\"+','.join(list(new_dict.keys()))+\"'\"\nf.write(string_literal)\nf.close()","sub_path":"data/models/endolysosomes_new/file_handler.py","file_name":"file_handler.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"51782086","text":"from TreeNode import TreeNode\n\ndef buildTree(nodes):\n \"\"\"\n :type nodes: list[int]\n :rtype: TreeNode\n \"\"\"\n # instead visited each node when pop from queue, we build it from nodes list\n root = TreeNode(nodes[0])\n i = 1\n queue =[root]\n while queue:\n parent = queue.pop(0)\n if not parent:\n continue\n if i < len(nodes):\n parent.left = TreeNode(nodes[i]) if nodes[i] != None else None\n queue.append(parent.left)\n i += 1\n if i < len(nodes):\n parent.right = TreeNode(nodes[i]) if nodes[i] != None else None\n queue.append(parent.right)\n i += 1\n return root\n\n","sub_path":"leet/buildTree.py","file_name":"buildTree.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"19672215","text":"# 1. Поработайте с переменными, создайте несколько, выведите на экран,\n# запросите у пользователя несколько чисел и строк и сохраните в переменные,\n# выведите на экран.\n\nvar1 = 0\nvar2 = '42'\nvar3 = -42.0\n\nprint(f'var1 = {var1} has type {type(var1)}\\n'\n f'var2 = {var2} has type {type(var2)}\\n'\n f'var3 = {var3} has type {type(var3)}\\n'\n )\n\nvar1 = int(input('Input integer: '))\nvar2 = input('input string: ')\nvar3 = float(input('input float: '))\n\nprint(f'var1 = {var1} has type {type(var1)}\\n'\n f'var2 = {var2} has type {type(var2)}\\n'\n f'var3 = {var3} has type {type(var3)}\\n'\n )\n","sub_path":"Introduction(T1)/variables_t1.1.py","file_name":"variables_t1.1.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"453219554","text":"from backend.views import view_app\nfrom backend.services.user_services import create_admin, admin_login\nfrom backend.helpers.flask_helper import json_response\nfrom flask import Blueprint, request\n\nuser_app = Blueprint('user_app', __name__)\n\n@user_app.route('/users/create', methods=['POST'])\ndef admin_register_view():\n try:\n data = create_admin(request.data)\n response = json_response(data, status=200)\n except Exception as ex:\n response = json_response({'error': ex}, status=500)\n return response\n\n@user_app.route('/quizzes/user_token', methods=['POST'])\ndef admin_login_view():\n try:\n data = admin_login(request.data)\n if data['jwt']:\n response = json_response(data, status=200)\n else:\n response = json_response({'message': 'Wrong credentials'}, status=404)\n except Exception as ex:\n response = json_response({'message': 'Wrong credentials'}, status=404)\n # response = {'failure': True, 'message': \"admin login failed\", 'error': ex}\n # return flask_response(response)\n return response\n\n@user_app.route('/')\ndef hello():\n return \"Hello World\"\n\n\n","sub_path":"backend/views/user_views.py","file_name":"user_views.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"434398071","text":"#!/usr/bin/env python3\n\n__author__ = \"NCSU CSC 591 021 Spring 23 Group-3\"\n__version__ = \"1.0.0\"\n__license__ = \"MIT\"\n\nfrom utils import *\nfrom test_hw2 import *\n\ndef main():\n saved,fails={},0\n for k,v in cli(settings(help)).items():\n the[k] = v\n saved[k] = v\n if the['help'] == True:\n print(help)\n else:\n for what, fun in egs.items():\n if the['go'] == 'all' or the['go'] == what:\n for k,v in saved.items():\n the[k] = v\n Seed = the['seed']\n if egs[what]() == False:\n fails += 1\n print('❌ fail:', what)\n else:\n print('✅ pass:', what)\n sys.exit(fails)\n\nif __name__ == '__main__':\n eg('the', 'show settings', test_the)\n eg('sym', 'check syms', test_sym)\n eg('num', 'check nums', test_num)\n eg('csv', 'read from csv', test_csv)\n eg('data', 'read DATA csv', test_data)\n eg('stats', 'stats from DATA', test_stats)\n main()","sub_path":"src/HW2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"188308729","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\n\n\n\n# preprocessing string data\n# alphabet(0-25), space(26), start(27), end(28) -> 29 chars (0-28)\nstring = \"hello pytorch. how long can a rnn cell remember? show me your limit!\"\nchars = \"abcdefghijklmnopqrstuvwxyz ?!.,:;01\"\nchar_list = [i for i in chars]\nchar_len = len(char_list)\nprint(char_len)\n\ndef string_to_onehot(string):\n start = np.zeros(shape=char_len ,dtype=int)\n end = np.zeros(shape=char_len ,dtype=int)\n start[-2] = 1\n end[-1] = 1\n for i in string:\n idx = char_list.index(i)\n zero = np.zeros(shape=char_len ,dtype=int)\n zero[idx]=1\n start = np.vstack([start,zero])\n output = np.vstack([start,end])\n return output\n\ndef onehot_to_word(onehot_1):\n onehot = torch.Tensor.numpy(onehot_1)\n return char_list[onehot.argmax()]\n\nbatch_size = 5\nseq_len = 1\nnum_layers = 3\ninput_size = char_len\nhidden_size = 35\nlr = 0.01\nnum_epochs = 1000\n\none_hot = torch.from_numpy(string_to_onehot(string)).type_as(torch.FloatTensor())\nprint(one_hot.size())\n\n# RNN with 1 hidden layer\nclass RNN(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers):\n super(RNN, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)\n\n def forward(self, input, hidden, cell):\n output, (hidden, cell) = self.lstm(input, (hidden, cell))\n return output, hidden, cell\n\n def init_hidden_cell(self):\n hidden = torch.zeros(num_layers, batch_size, hidden_size)\n cell = torch.zeros(num_layers, batch_size, hidden_size)\n return hidden, cell\n\n\nrnn = RNN(input_size, hidden_size, num_layers)\nloss_func = nn.MSELoss()\noptimizer = torch.optim.Adam(rnn.parameters(), lr=lr)\n\nj=0\ninput_data = one_hot[j:j+batch_size].view(batch_size,seq_len,input_size)\nprint(input_data.size())\n\nhidden,cell = rnn.init_hidden_cell()\nprint(hidden.size(),cell.size())\n\noutput,hidden,cell = rnn(input_data,hidden,cell)\nprint(output.size(),hidden.size(),cell.size())\n\nunroll_len = one_hot.size()[0] // seq_len - 1\nfor i in range(num_epochs):\n optimizer.zero_grad()\n hidden, cell = rnn.init_hidden_cell()\n\n loss = 0\n for j in range(unroll_len - batch_size + 1):\n # batch size에 맞게 one-hot 벡터를 스택 합니다.\n # 예를 들어 batch size가 3이면 pytorch에서 pyt를 one-hot 벡터로 바꿔서 쌓고\n # 목표값으로 yto를 one-hot 벡터로 바꿔서 쌓는 과정입니다.\n input_data = torch.stack([one_hot[j + k:j + k + seq_len] for k in range(batch_size)], dim=0)\n label = torch.stack([one_hot[j + k + 1:j + k + seq_len + 1] for k in range(batch_size)], dim=0)\n\n input_data = input_data\n label = label\n\n output, hidden, cell = rnn(input_data, hidden, cell)\n loss += loss_func(output.view(1, -1), label.view(1, -1))\n\n loss.backward()\n optimizer.step()\n\n if i % 10 == 0:\n print(loss)\n\nhidden, cell = rnn.init_hidden_cell()\nfor j in range(unroll_len-batch_size+1):\n input_data = torch.stack([one_hot[j+k:j+k+seq_len] for k in range(batch_size)], dim=0)\n label = torch.stack([one_hot[j+k+1:j+k+seq_len+1] for k in range(batch_size)], dim=0)\n input_data = input_data\n label = label\n output, hidden, cell = rnn(input_data, hidden, cell)\n for k in range(batch_size):\n print(onehot_to_word(output[k].data), end=\"\")\n if j < unroll_len-batch_size:\n break","sub_path":"4. Recurrent Neural Network/practice/char_lstm_batch.py","file_name":"char_lstm_batch.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"165048291","text":"\"\"\"\nbroadway\n~~~~~~~~\n\nA set of extensions for Flask that take the boilerplate out of your project.\n\n:copyright: (c) 2017 Daniel Knell\n:license: MIT, see LICENSE for more details.\n\"\"\"\nimport ast\nimport re\n\nimport setuptools\n\nRE_VERSION = r'__version__\\s+=\\s+(.*)'\n\nwith open('broadway/__init__.py', 'rb') as pythonfile:\n VERSION = str(\n ast.literal_eval(\n re.search(RE_VERSION, pythonfile.read().decode('utf-8')).group(1)\n )\n )\n\nsetuptools.setup(\n name=\"Broadway\",\n version=VERSION,\n url=\"http://github.com/artisanofcode/python-broadway\",\n author=\"Daniel Knell\",\n author_email=\"contact@danielknell.co.uk\",\n description=(\n \"A set of extensions for Flask that take the boilerplate out of \"\n \"your project.\"\n ),\n long_description=open(\"README.rst\", \"r\").read(),\n install_requires=[\n \"flask >= 0.7\",\n \"twelvefactor >= 0.1\",\n \"whitenoise >= 2.0.0\",\n ],\n packages=[\"broadway\"],\n license=\"MIT\",\n platforms=\"any\",\n zip_safe=False,\n classifiers=[\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"454277799","text":"import sqlite3\n\ncon = sqlite3.connect('image.db')\n\ncs=con.cursor()\n\nm = cs.execute(\"\"\"SELECT * FROM my_table\"\"\")\n\nfor x in m:\n nm = x[1]\n img_data = x[2] \n\nwith open(nm, 'wb') as f:\n f.write(img_data)\n\ncon.commit()\ncs.close()\ncon.close() ","sub_path":"samples/databases/sqlite/eg4.py","file_name":"eg4.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"536447353","text":"import sys\nimport json\nimport datetime, time\nfrom time import strftime\nfrom pyspark import SparkConf, SparkContext\nfrom elasticsearch import Elasticsearch\nconf = SparkConf().setAppName(\"YanJ_app\").setMaster(\"spark://ip-172-31-1-12:7077\")\nsc = SparkContext(conf = conf)\n\n# Input files\ntextFile = sc.textFile(\"s3n://timo-twitter-data/2015/05/01/00/30.json\")\n\n# elasticsearch setting\nes = Elasticsearch(['ip-172-31-1-8'], http_auth=('elastic', 'changeme'), verify_certs=False)\nES_INDEX = \"twitter_indice\"\ndef create_es_index():\n es_mapping = {\"yan_type\": { \"properties\":{\"usr_id\": {\"type\":\"text\"}, 'ttext':{\"type\":\"text\"}, 'ttimes': {\"type\":\"date\"}} } }\n es_settings = {'number_of_shards':3, 'number_of_replicas': 2, 'refresh_interval': '1s', 'index.translog.flush_threshold_size': '1gb'}\n ES_indice = es.indices.create(index = ES_INDEX, body = {'settings': es_settings, 'mappings': es_mapping})\n\n\nif not es.indices.exists(ES_INDEX):\n create_es_index()\n\n# Map reduce\ndef map_func(line):\n each_line = json.loads(line)\n es = Elasticsearch(['ip-172-31-1-8'], http_auth=('elastic', 'changeme'), verify_certs=False)\n A, B, C = False, False, False\n\n if 'user' in each_line:\n if 'id' in each_line['user']:\n usr_id = each_line['user']['id']\n A = True\n if 'timestamp_ms' in each_line:\n raw_time = float(each_line['timestamp_ms'][:10])\n t_time = datetime.datetime.utcfromtimestamp(raw_time)\n t_time = t_time.strftime('%m/%d/%Y')\n B = True\n if 'text' in each_line:\n t_text = each_line['text']\n C = True\n if A and B and C:\n doc = {'usr_id': usr_id, 'ttext': t_text, 'ttimes': t_time}\n # es.index(index= ES_INDEX, doc_type='inputs', body=doc)\n# print doc \n\ndef reduce_func(a, b):\n return a + b\n\ncounts = textFile.map(map_func)\ncounts.saveAsTextFile(\"/tmp/result1\")\n","sub_path":"spark_work_1_30.py","file_name":"spark_work_1_30.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"566704022","text":"import traceback\n\nimport pandas as pd\nfrom flask import Flask, request, session\nfrom flask_restful import Api, Resource\nfrom flask import jsonify\nfrom flask_restful.utils import cors\nimport requests\nimport json\nfrom flask_cors import CORS\nfrom datetime import timedelta\n\n\napp=Flask(__name__)\nCORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\napi = Api(app)\napp.secret_key=\"vguidesecurity\"\napp.permanent_session_lifetime = timedelta(minutes=5)\n\ndef to_dict(obj):\n return json.loads(json.dumps(obj,default=lambda o: o.__dict__))\n\nclass Init(Resource):\n @cors.crossdomain(origin='*', methods ={\"HEAD\",\"OPTIONS\",\"GET\",\"POST\"})\n\n def get(self):\n response = {\n 'msg':\"Sistema de filtros colaborativos\"\n }\n return jsonify(response)\n\napi.add_resource(Init, '/init')\n\n# Funciones Auxiliares\ndef load_dataset(file):\n dataframe = pd.read_csv(file)\n return dataframe\n\n######################## PRE PROCESSING ##########################\n'''\n Construir las siguientes matrices, donde valores inexistentes son NaN:\n Matriz que represente las calificaciones de todos los usuarios para cada sitio turístico.\n Matriz que represente las preferencias de todos los usuarios para cada subcategoría de sitio turístico.\n Matriz que represente la relación de todas las subcategorías con cada usuario.\n '''\ndef buildMatrices(item, subcategory, user):\n item_matrix = item.pivot_table(index=['user_id'], columns=['item_id'], values='item_rating')\n subcategory_matrix = subcategory.pivot_table(index=['user_ids'], columns=['subCategory_ids'],\n values='subCategory_ratings')\n user_matrix = user.pivot_table(index=['subCategory_ids'], columns=['user_ids'],\n values='subCategory_ratings')\n return item_matrix, subcategory_matrix, user_matrix\n\n# Limpiar ruido (campos nulos y columnas con números de valores menor a un umbral).\ndef cleanNoise(item, subcategory, user):\n item_matrix = item.dropna(thresh=0, axis=1).fillna(0)\n subcategory_matrix = subcategory.dropna(thresh=0, axis=1).fillna(0)\n user_matrix = user.dropna(thresh=0, axis=1).fillna(0)\n return item_matrix, subcategory_matrix, user_matrix\n\n######################## TRAINING ###############################\n# MATRIZ DE SIMILITUD (Correlación de Pearson)\ndef computeSimilarityMatrices(item, subcategory, user):\n item_similarity_matrix = item.corr(method='pearson')\n subcategory_similarity_matrix = subcategory.corr(method='pearson').fillna(0)\n user_similarity_matrix = user.corr(method='pearson')\n\n # GUARDAR MATRIZ EN CSV\n item_similarity_matrix.to_pickle('item_similarity_matrix.pkt')\n subcategory_similarity_matrix.to_pickle('subcategory_similarity_matrix.pkt')\n user_similarity_matrix.to_pickle('user_similarity_matrix.pkt')\n\n return item_similarity_matrix, subcategory_similarity_matrix, user_similarity_matrix\n\n\n\n\n################## QUERIES ################## QUERIES ################## QUERIES ################## QUERIES\n\n# Lugares Turísticos\n\n#Recomendar sitios turísticos a un usuario, en base a su registro histórico de ratings sobre sitios turísticos\n# get_similar_places INPUT:\n'''\n# Usuario - fake data\n# Definir ratings de los lugares turisticos del usuario \nuser_places_ratings = [\n (1, 5),\n (485, 3),\n (389, 2),\n (456, 4),\n (245, 3),\n (367, 5),\n (563, 1),\n]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nget_similar_places(usuario1, item_similarity_matrix, 10, False)\n'''\ndef get_similar_places(user_data, item_similarity_matrix, size, places, tabulate=False):\n similar_items = pd.DataFrame()\n for item_id, user_rating in user_data:\n print(\"ITEM ID:\", item_id, \" - USER RATING\", user_rating)\n similar_score = item_similarity_matrix[item_id]*(user_rating-2.5)\n similar_score = similar_score.sort_values(ascending=False)\n similar_items = similar_items.append(similar_score)\n similar_items_data= similar_items.sum().sort_values(ascending=False)\n indices = similar_items_data.to_frame().index.values.tolist()\n values = similar_items_data.to_frame().values\n\n recommendation_list = []\n\n if tabulate == True:\n for i, value in enumerate(values[:size]):\n recommendation_list.append([indices[i],places.iloc[indices[i]-1, 5], float(value)])\n rec_dataframe = pd.DataFrame(recommendation_list)\n rec_dataframe.columns =['ID Lugar Turístico', 'Nombre Lugar Turístico', 'Puntaje']\n return rec_dataframe\n\n else:\n for i, value in enumerate(values[:size]):\n recommendation_list.append(indices[i])\n return recommendation_list\n\nclass GetSimilarPlaces(Resource):\n @cors.crossdomain(origin='*', methods = {\"HEAD\",\"OPTIONS\",\"GET\",\"POST\"})\n def get(self):\n try:\n item_similarity_matrix = pd.read_pickle('item_similarity_matrix.pkt')\n places = load_dataset('all.csv')\n\n intUserId=request.json['user_id']\n strUserId=str(intUserId)\n url=\"http://ec2-34-226-195-132.compute-1.amazonaws.com/api/reviews/user/\"+strUserId\n response = requests.request(\"GET\", url)\n jsonresponse=response.json()\n dictresponse=to_dict(jsonresponse)\n my_user_places_ratings=[]\n for d in dictresponse:\n thistuple=(d['touristic_place_id'],d['ranking'])\n my_user_places_ratings.append(thistuple)\n\n # FAKE DATA\n #my_user_places_ratings = [(2, 5), (4, 4), (23, 5), (25, 4)] #FAKE DATA\n\n recommended_places = get_similar_places(my_user_places_ratings, item_similarity_matrix, 20, places, False)\n print(\"Recommended Places: \", recommended_places)\n finalresponse=json.dumps(recommended_places)\n ultimateresponse=json.loads(finalresponse)\n return jsonify(to_dict(ultimateresponse))\n except Exception:\n print(traceback.format_exc())\n print(\"Could not fetch Similar Touristic Places\")\n return \"Could not fetch Similar Touristic Places\"\n\n# ENDPOINT GET SIMILAR PLACES\napi.add_resource(GetSimilarPlaces, '/simplac')\n\n\n\n\n\n\n\n\n\n\n\n# Subcategorías\n\n# Recomendar subcategorías a un usuario en base al registro de sus preferencias sobre subcategorías.\n# get_similar_subcategories INPUT:\n'''\n# Usuario - fake data\n# Definir ratings de los lugares turisticos del usuario \nuser_subcategories = [\n 10, 22, 3, 9, 2, 11\n]\n\nget_similar_subcategories(user_subcategories, subcategory_similarity_matrix, 20, False)\n'''\ndef get_similar_subcategories(user_data, subcategory_similarity_matrix, size, subcategories, tabulate=False):\n similar_items = pd.DataFrame()\n for subcategory_id in user_data:\n similar_score = subcategory_similarity_matrix[subcategory_id]#*(user_rating-2.5)\n similar_score = similar_score.sort_values(ascending=False)\n similar_items = similar_items.append(similar_score)\n similar_items_data = similar_items.sum().sort_values(ascending=False)\n indices = similar_items_data.to_frame().index.values.tolist()\n values = similar_items_data.to_frame().values\n\n recommendation_list = []\n\n if tabulate == True:\n for i, value in enumerate(values[:size]):\n recommendation_list.append([indices[i],subcategories.iloc[indices[i]-1, 1]])\n rec_dataframe = pd.DataFrame(recommendation_list)\n rec_dataframe.columns = ['ID Subcategoría', 'Nombre Subcategoría']\n return rec_dataframe\n\n else:\n for i, value in enumerate(values[:size]):\n recommendation_list.append(indices[i])\n return recommendation_list\n\nclass GetSimilarSubcategories(Resource):\n @cors.crossdomain(origin='*', methods={\"HEAD\",\"OPTIONS\",\"GET\",\"POST\"})\n def get(self):\n\n try:\n subcategory_similarity_matrix = pd.read_pickle('subcategory_similarity_matrix.pkt')\n\n # TODO: endpoint para obtener las subcategorias existentes en la base de datos\n subcategories = pd.read_csv('subCategories.csv')\n\n intUserId=request.json['user_id']\n strUserId=str(intUserId)\n url=\"http://ec2-34-226-195-132.compute-1.amazonaws.com/api/users/getSubCategories/\"+strUserId\n response=requests.request(\"GET\",url)\n\n responsedict=response.json()['subcategories']\n\n my_user_subcategories=[]\n\n for d in responsedict:\n my_user_subcategories.append(d['subcategory_id'])\n\n print(\"Recommended SubCategories: \", my_user_subcategories)\n recommended_subCategories = get_similar_subcategories(my_user_subcategories, subcategory_similarity_matrix, 5, subcategories, False)\n finalresponse=json.dumps(recommended_subCategories)\n ultimateresponse=json.loads(finalresponse)\n return jsonify(to_dict(ultimateresponse))\n except:\n print(\"Could not fetch Similar SubCategories\")\n return \"Could not fetch Similar SubCategories\"\n\n# ENDPOINT GET SIMILAR SUBCATEGORIES\napi.add_resource(GetSimilarSubcategories, '/simsubc')\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n# Usuarios\n\n# Retorna usuarios con gustos similares en base a sus SubCategorías\n# get_similar_subcategories INPUT:\n'''\nget_similar_users(149, user_similarity_matrix, 10)\n'''\ndef get_similar_users(user_id, user_similarity_matrix, size):\n similar_score = user_similarity_matrix[user_id]\n similar_score = similar_score.sort_values(ascending=False)\n #print(similar_score)\n similar_items = pd.DataFrame(similar_score)\n indices = similar_items.index.values.tolist()\n return indices[:size]\n\nclass GetSimilarUsers(Resource):\n @cors.crossdomain(origin='*', methods={\"HEAD\",\"OPTIONS\",\"GET\",\"POST\"})\n def get(self):\n try:\n user_similarity_matrix = pd.read_pickle('user_similarity_matrix.pkt')\n\n intUserId=request.json['user_id']\n strUserId=str(intUserId)\n\n similar_users = get_similar_users(intUserId, user_similarity_matrix, 10)\n print(\"Similar Users to user\", intUserId, \": \", similar_users)\n response = {\n 'userid':intUserId,\n 'similar users':similar_users\n }\n # response=json.dumps(similar_users)\n return json.dumps(response)\n except:\n print(\"Could not fetch Similar Users\")\n return \"Could not fetch Similar Users\"\n\n\n# ENDPOINT GET SIMILAR USERS\napi.add_resource(GetSimilarUsers,'/simus')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# Obtener recomendaciones a través de los 3 usuarios más similares\n# Tiene como entrada la lista de usuarios similares. Se obtiene una recomendacion de lugares en base a las recomendaciones\ndef getSimilarUsersRecommendations(historicalRatings,item_similarity_matrix,places):\n # API Service: historicalRatings = getHistoricalRatingsFromUsers()\n recommendation_list = []\n for historicalRating in historicalRatings:\n try:\n single_prediction = get_similar_places(historicalRating, item_similarity_matrix, 3, places)\n for id in single_prediction[:2]:\n recommendation_list.append(id)\n except:\n pass\n return recommendation_list\n\nclass GetSimilarUsersRecommendations(Resource):\n @cors.crossdomain(origin='*', methods={\"HEAD\",\"OPTIONS\",\"GET\",\"POST\"})\n def get(self):\n\n try:\n user_similarity_matrix = pd.read_pickle('user_similarity_matrix.pkt')\n\n intUserId=request.json['user_id']\n similar_users=get_similar_users(intUserId,user_similarity_matrix,4)\n similar_users.pop(0)\n print(\"Similar Users to user\", intUserId,\":\",similar_users)\n\n item_similarity_matrix=pd.read_pickle('item_similarity_matrix.pkt')\n places=load_dataset('all.csv')\n\n # FETCH ITEMS DATA\n url = \"http://ec2-34-226-195-132.compute-1.amazonaws.com/api/reviews/all\"\n response = requests.request(\"GET\", url)\n rows = json.loads(response.text)\n item_ratings = []\n for row in rows:\n item_ratings.append([row['ranking'], row['touristic_place'], row['user']])\n\n ratings = pd.DataFrame(item_ratings, columns=['item_rating', 'item_id', 'user_id'])\n #ratings = load_dataset('itemDataset.csv')\n\n\n input_list=[]\n for i in similar_users:\n aux_df= ratings[ratings['user_id']==i]\n print(\"aux_df\", aux_df)\n aux_df=aux_df.drop('user_id',axis=1)\n subset=aux_df[['item_id','item_rating']]\n result=[tuple(x) for x in subset.to_numpy()]\n outputList = [i for i in result if i[0] <=2000]\n input_list.append(outputList)\n\n result = getSimilarUsersRecommendations(input_list,item_similarity_matrix,places)\n\n response = {\n 'userid':intUserId,\n 'recommendations':result\n }\n print(\"Recommended places to user\", intUserId, \"based on most similar users: \",result)\n return json.dumps(response)\n except Exception:\n print(traceback.format_exc())\n print(\"Could not fetch Similar Users Recommendations\")\n return \"Could not fetch Similar Users Recommendations\"\n\n# ENDPOINT GET PLACES RECOMMENDATIONS BY MOST SIMILAR USERS\napi.add_resource(GetSimilarUsersRecommendations,'/simusrec')\n\n\n\n# GET TRAINING DATA FROM DATABASE\n\nclass Train_Similarity_Matrices(Resource):\n @cors.crossdomain(origin='*', methods={\"HEAD\",\"OPTIONS\",\"GET\",\"POST\"})\n def get(self):\n try:\n # FETCH ITEMS DATA\n url = \"http://ec2-34-226-195-132.compute-1.amazonaws.com/api/reviews/all\"\n response = requests.request(\"GET\", url)\n rows = json.loads(response.text)\n item_ratings = []\n for row in rows:\n item_ratings.append([row['ranking'],row['touristic_place'],row['user']])\n\n df_itemDataset = pd.DataFrame(item_ratings, columns=['item_rating', 'item_id', 'user_id'])\n #print(df_itemDataset)\n\n # FETCH SUBCATEGORIES DATA\n url = \"http://ec2-34-226-195-132.compute-1.amazonaws.com/api/users/getAllPreferenceSubCategories/\"\n response = requests.request(\"GET\",url)\n rows = json.loads(response.text)\n subCategory_choices = []\n for row in rows:\n aux = []\n if row['status'] == True:\n aux.append(1)\n else:\n aux.append(0)\n aux.append(row['subcategory'])\n aux.append(row['user'])\n subCategory_choices.append(aux)\n\n df_subCategoryDataset = pd.DataFrame(subCategory_choices, columns=['subCategory_ratings','subCategory_ids','user_ids'])\n #print(df_subCategoryDataset)\n\n # BUILD MATRICES\n user_ratings_matrix, subCategory_ratings_matrix, users_by_subcategory_matrix = buildMatrices(df_itemDataset,\n df_subCategoryDataset,\n df_subCategoryDataset)\n\n # CLEAN NOISE\n user_ratings_matrix, subCategory_ratings_matrix, users_by_subcategory_matrix = cleanNoise(user_ratings_matrix,\n subCategory_ratings_matrix,\n users_by_subcategory_matrix)\n\n # TRAIN AND SAVE .pkt FILES\n item_similarity_matrix, subcategory_similarity_matrix, user_similarity_matrix = computeSimilarityMatrices(user_ratings_matrix,\n subCategory_ratings_matrix,\n users_by_subcategory_matrix)\n except:\n print(\"Could not train matrices\")\n return \"Could not train matrices\"\n\n return \"200\"\n\napi.add_resource(Train_Similarity_Matrices, '/trainmatrices')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n '''\n # FLUJO 1\n # Cargar Datasets\n df_itemDataset = load_dataset('itemDataset.csv')\n df_subCategoryDataset = load_dataset('subCategoryDataset.csv')\n\n # Pre Processing\n user_ratings_matrix, subCategory_ratings_matrix, users_by_subcategory_matrix = buildMatrices(df_itemDataset,\n df_subCategoryDataset,\n df_subCategoryDataset)\n\n user_ratings_matrix, subCategory_ratings_matrix, users_by_subcategory_matrix = cleanNoise(user_ratings_matrix,\n subCategory_ratings_matrix,\n users_by_subcategory_matrix)\n\n # Training\n item_similarity_matrix, subcategory_similarity_matrix, user_similarity_matrix = computeSimilarityMatrices(user_ratings_matrix,\n subCategory_ratings_matrix,\n users_by_subcategory_matrix)\n '''\n\n\n '''\n ###\n # FLUJO 2\n # Cargar matrices de similitud\n item_similarity_matrix = pd.read_pickle('item_similarity_matrix.pkt')\n subcategory_similarity_matrix = pd.read_pickle('subcategory_similarity_matrix.pkt')\n user_similarity_matrix = pd.read_pickle('user_similarity_matrix.pkt')\n ###\n\n # QUERIES -> datasets para visualizar NOMBRES\n\n # Dataset para visualizar queries (verbose)\n # Dataset con los nombres de todos los sitios turisticos por ID\n places = load_dataset('all.csv')\n # Dataset con los nombres de todos las subcategorías por ID\n subcategories = pd.read_csv('subCategories.csv')\n\n\n # QUERIES\n\n # RECOMENDAR LUGARES TURISTICOS basandose en rating historico del usuario\n # API debe tener servicio -> user_places_ratings = getUserRatingHistoryById(userId)\n user_places_ratings = [\n (485, 3),\n (389, 2),\n (456, 4),\n (245, 3),\n (367, 5),\n (563, 1)\n ]\n recommended_places = get_similar_places(user_places_ratings, item_similarity_matrix, 10, places, True)\n print(\"Lugares turísticos recomendados al usuario 'userID':\", '\\n', recommended_places, '\\n')\n\n \n # RECOMENDAR SUBCATEGORIAS a un usuario en base al registro de sus preferencias sobre subcategorías.\n # API debe tener servicio -> user_subcategories = getUserSubCategoriesById(userId)\n user_subcategories = [\n 10, 22, 3, 9, 2, 11\n ]\n recommended_subCategories = get_similar_subcategories(user_subcategories, subcategory_similarity_matrix, 10, subcategories, True)\n print(\"Subcategorías de posible interés al usuario 'userID':\", '\\n', recommended_subCategories, '\\n')\n\n\n # RECOMENDAR USUARIOS similares a un usuario en base a preferencias de subcategorías\n similar_users = get_similar_users(149, user_similarity_matrix, 10)\n print(\"Ids de usuarios similares a 'userID':\", '\\n', similar_users, '\\n')\n\n\n # Fake Data: historial de ratings de lugares turísticos\n historicalRatings = [\n [\n (123,5),\n (12,4),\n (399,5),\n (222,1),\n ],\n [\n (23,5),\n (245,4),\n (23,4),\n (100,1),\n (900,3)\n ],\n [\n (890,4),\n (898,4),\n (664,3),\n (899,5)\n ]\n ]\n\n\n result = getSimilarUsersRecommendations(historicalRatings,item_similarity_matrix,places)\n print(result)\n\n '''\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"316271373","text":"from retailapp.models import Store, Product, Sales, OutTransaction\nfrom datetime import date\nimport retailapp.models\nimport logging\nfrom retailexcept import ValidationError\n\n\nlogger = logging.getLogger(\"retail\")\n\n\ndef validate_sales(sales_data):\n \"\"\"validate sales\n \"\"\"\n if \"sales_payment\" in sales_data:\n payment_valid = False\n for choice in Sales.PAYMENT:\n if sales_data[\"sales_payment\"] == choice[0]:\n payment_valid = True\n if not payment_valid:\n raise ValidationError(\"payment is not valid %s\" %\n sales_data[\"sales_payment\"])\n else:\n raise ValidationError(\"Missing payment\")\n\n #validate currency\n if \"sales_currency\" in sales_data:\n currency_valid = False\n for choice in Sales.CURRENCY:\n if sales_data[\"sales_currency\"] == choice[0]:\n currency_valid = True\n if not currency_valid:\n raise ValidationError(\"currency is not valid %s\" % sales_data[\"sales_currency\"])\n else:\n raise ValidationError(\"Missing currency\")\n\n #validate remark\n if \"sales_remark\" not in sales_data:\n raise ValidationError(\"Missing remark\")\n\n #validate rmb\n if \"sales_rmb\" not in sales_data:\n raise ValidationError(\"Missing rmb\")\n\n #validate items\n if \"items\" in sales_data:\n if not sales_data[\"items\"]:\n raise ValidationError(\"Missing items\")\n\n for item in sales_data[\"items\"]:\n validate_sales_item(item)\n else:\n raise ValidationError(\"Missing items\")\n\n return\n\n\ndef validate_sales_item(sales_item):\n \"\"\"validate sales item\n \"\"\"\n logger.debug(\"validate_sales_item %s\" % sales_item)\n #validate product\n if \"out_product\" not in sales_item:\n raise ValidationError(\"Missing product\")\n \n #validate serial\n if \"out_serial\" not in sales_item:\n raise ValidationError(\"Missing serial number\")\n \n #validate quantity\n if \"out_quantity\" in sales_item:\n if not isinstance(sales_item[\"out_quantity\"], (int,)):\n raise ValidationError(\"Invalid quantity %s\" % sales_item[\"out_quantity\"])\n else:\n raise ValidationError(\"Missing quantity\")\n \n #validate unit price\n if \"out_unit_price\" in sales_item:\n if not isinstance(sales_item[\"out_unit_price\"], (int, float)):\n raise ValidationError(\"Invalid unit price %s\" % sales_item[\"out_unit_price\"])\n else:\n raise ValidationError(\"Missing unit price\")\n\n\ndef add_new_sales(store, sales_data):\n \"\"\" add new sales\n \"\"\"\n logger.debug(\"add_new_sales begin\")\n store_objs = Store.objects.filter(store_name=store)\n if not store_objs:\n raise ValidationError(\"Can't find store: %s\" % store)\n else:\n store_obj = store_objs[0]\n \n #validation\n validate_sales(sales_data)\n\n #create object\n sales_obj = Sales()\n sales_obj.sales_store = store_obj\n sales_obj.sales_date = date.today()\n sales_obj.sales_payment = sales_data['sales_payment']\n sales_obj.sales_currency = sales_data['sales_currency']\n sales_obj.sales_remark = sales_data['sales_remark']\n sales_obj.sales_rmb = sales_data['sales_rmb']\n sales_obj.save()\n items = sales_data['items']\n for item in items:\n p = Product.objects.get(prod_name=item['out_product'])\n sales_obj.outtransaction_set.create(out_product=p,\n out_serial=item['out_serial'],\n out_quantity=item['out_quantity'],\n out_unit_price=item['out_unit_price']\n )\n sales_obj.save()\n ret = {\"sales id\": sales_obj.id}\n logger.debug(\"add_new_sales done %s\" % ret)\n return ret\n\n\ndef get_sales(store, id):\n \"\"\"get sales\n \"\"\"\n logger.debug(\"get_sales begin %s\" % id)\n sales_id = id\n sales = Sales.objects.filter(sales_store__store_name=store).filter(id=sales_id)\n if not sales:\n raise ValidationError(\"Can't find receipt number %s\" % sales_id)\n \n sales_obj = sales[0]\n sales_data = sales_obj.get_all_data()\n sales_data['items'] = []\n for out in sales_obj.outtransaction_set.all():\n sales_item = out.get_all_data()\n sales_data['items'].append(sales_item)\n\n logger.debug(\"sales_data %s\" % sales_data)\n\n return sales_data\n\n\ndef update_sales(store, id, sales_data):\n \"\"\"update sales data\n \"\"\"\n logger.debug(\"update_sales begin %s\" % id)\n sales_id = id\n sales = Sales.objects.filter(sales_store__store_name=store).filter(id=sales_id)\n if not sales:\n raise ValidationError(\"Can't find receipt number %s\" % sales_id)\n\n #validation\n validate_sales(sales_data)\n\n sales_obj = sales[0]\n payment = sales_data[\"sales_payment\"]\n currency = sales_data[\"sales_currency\"]\n remark = sales_data[\"sales_remark\"]\n if sales_obj.sales_payment != payment:\n logger.debug(\"update payment \" + payment)\n sales_obj.sales_payment = payment\n if sales_obj.sales_currency != currency:\n logger.debug(\"update currency \" + currency)\n sales_obj.sales_currency = currency\n if sales_obj.sales_remark != remark:\n logger.debug(\"update remark \" + remark)\n sales_obj.sales_remark = remark\n \n sales_obj.save()\n \n items = sales_data['items']\n remove_items = []\n for out in sales_obj.outtransaction_set.all():\n item_id = out.id\n sales_item = {}\n sales_item['out_product'] = str(out.out_product)\n sales_item['out_serial'] = out.out_serial\n sales_item['out_quantity'] = out.out_quantity\n sales_item['out_unit_price'] = out.out_unit_price\n \n try:\n items.remove(sales_item)\n except:\n remove_items.append(item_id)\n\n logger.debug(\"remove items %s\" % remove_items)\n logger.debug(\"add items %s\" % items)\n\n for item_id in remove_items:\n OutTransaction.objects.filter(id=item_id).delete()\n\n for item in items:\n p = Product.objects.get(prod_name=item['out_product'])\n sales_obj.outtransaction_set.create(out_product=p,\n out_serial=item['out_serial'],\n out_quantity=item['out_quantity'],\n out_unit_price=item['out_unit_price']\n )\n sales_obj.save()\n\n ret = {\"sales id\": sales_obj.id}\n logger.debug(\"update_sales done %s\" % ret)\n return ret\n","sub_path":"retailapp/sales.py","file_name":"sales.py","file_ext":"py","file_size_in_byte":6598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"76108485","text":"'''\ninput\n10\n1 17\n5 13\n7 12\n5 17\n5 12\n2 17\n1 18\n8 13\n2 15\n5 20\n\nexpected output:\n11\n'''\n\n#!/bin/python3\n\nimport os\nimport sys\n\n#\n# Complete the componentsInGraph function below.\n#\ndef componentsInGraph(gb):\n\n n = len(gb)\n #print(gb)\n setofset1 = []\n setofset2 = []\n\n for i in range(len(gb)):\n #print(\"*************** This turn: \" + str(gb[i]))\n #print(\"current setofset1: \" + str(setofset1) + \"```````````````\")\n #print(\"current setofset2: \" + str(setofset2) + \"```````````````\")\n if len(setofset1) != 0:\n flag = False\n setsToUnion = []\n size1 = len(setofset1)\n size2 = len(setofset2)\n sizem = max(size1,size2)\n for j in range(sizem):\n if gb[i][0] in setofset1[min(size1-1,j)] or gb[i][1] in setofset2[min(size2-1,j)]:\n #print(\"Yes, \" + str(gb[i][0]) + \" or \" + str(gb[i][1]) +\" matched in set: \" + str(setofset[j]))\n setsToUnion.append(j)\n #print(\"Size of sets to union: \"+ str(len(setsToUnion)))\n flag = True\n\n if flag == False:\n setofset1.append(set()) # appending\n setofset2.append(set()) # appending\n setofset1[len(setofset1)-1].add(gb[i][0])\n setofset2[len(setofset2)-1].add(gb[i][1])\n #print(\"adding \" + str(gb[i]) + \" in new set \" + str(len(setofset)-1))\n\n elif len(setsToUnion)>=1:\n for k in range(1,len(setsToUnion)):\n #print(\"Unioning \" + str(setofset[setsToUnion[0]]) + \" and \" + str(setofset[setsToUnion[k]]))\n #print(\" setToUnion[k] : \" + str(setsToUnion[k]) + \" length of setofset1: \" + str(len(setofset1)) + \" length of setofset2: \" + str(len(setofset2)) )\n setofset1[setsToUnion[0]] = setofset1[setsToUnion[0]].union(setofset1[setsToUnion[k]])\n setofset2[setsToUnion[0]] = setofset2[setsToUnion[0]].union(setofset2[setsToUnion[k]])\n #print(\"end result: \" + str(setofset[setsToUnion[0]]))\n if k>0:\n #print(\"popping: \" + str(setsToUnion[k]) +\": \" + str(setsToUnion[k]))\n setofset1.pop(setsToUnion[k])\n setofset2.pop(setsToUnion[k])\n #setofset1[setsToUnion[0]] = setofset1[setsToUnion[0]].union(gb[i][0])\n setofset1[setsToUnion[0]].add(gb[i][0])\n #setofset2[setsToUnion[0]] = setofset2[setsToUnion[0]].union(gb[i][1])\n setofset2[setsToUnion[0]].add(gb[i][1])\n else:\n myset1 = set()\n myset2 = set()\n myset1.add(gb[0][0])\n myset2.add(gb[0][1])\n setofset1.append(myset1) # appending\n setofset2.append(myset2) # appending\n #print(\"adding \" + str(gb[i]) + \" in set 0\")\n\n #print(str(setofset1))\n #print(str(setofset2))\n #print(\"*************\")\n max_size = 0\n min_size = 15001\n for l in range(len(setofset1)):\n current_size = len(setofset1[l]) + len(setofset2[l])\n if current_size > max_size:\n max_size = current_size\n if current_size >= 2 and current_size < min_size:\n min_size = current_size\n res = []\n res.append(min_size)\n res.append(max_size)\n #print(\"returning this list: \" + str(res))\n return res\n\nif __name__ == '__main__':\n\n gb=[]\n #gb = [[1,17],[5,13],[7,12],[5,17],[5,12],[2,17],[1,18],[8,13],[2,15],[5,20]]\n\n inp = file(\"input.txt\",'r')\n n = int(inp.readline().split()[0])\n for _ in range(int(n)):\n gbs = inp.readline().split()\n gb.append(gbs)\n\n # input.txt er expected output: 1196 1196\n result = componentsInGraph(gb)\n\n print(str(result[0]) + \" \" + str(result[1]))\n","sub_path":"components in a graph.py","file_name":"components in a graph.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"395633417","text":"\"\"\"\nSingle-scale super-resolution neural model and its internal functional modules.\nThe model is a simplification of NAS's SingleNetwork.\n\nReferences: https://github.com/kaist-ina/NAS_public\n\"\"\"\n\n__author__ = \"Yihang Wu\"\n\nimport math\n\nimport torch.nn as nn\n\n\nclass SingleNetwork(nn.Module):\n VALID_SCALES = (1, 2, 3, 4) # the up-scaling factors of images\n\n def __init__(self, scale, num_blocks, num_channels, num_features, bias=True, activation=nn.ReLU(True)):\n \"\"\"\n A single-scale single-image super-resolution neural model.\n\n Args:\n scale (int): up-scaling factor. The width of hr image is [scale] times larger than that of a lr image\n num_blocks (int): the number of residual blocks\n num_channels (int): the number of channels in an image, default 3 for BGR color channels\n num_features (int): the number of channels used throughout convolutional computations\n bias (bool): whether to use bias in convolutional layers\n activation (nn.Module): activate function used in residual blocks\n \"\"\"\n super(SingleNetwork, self).__init__()\n\n self.scale = scale\n self.num_blocks = num_blocks\n self.num_channels = num_channels\n self.num_features = num_features\n\n if self.scale not in SingleNetwork.VALID_SCALES:\n raise NotImplementedError\n\n # No early-exit implemented\n\n # Head of model\n self.head = nn.Sequential(nn.Conv2d(in_channels=self.num_channels, out_channels=self.num_features,\n kernel_size=3, stride=1, padding=1, bias=bias))\n\n # Body of model - consecutive residual blocks\n self.body = nn.ModuleList() # ModuleList does not have a forward method\n for _ in range(self.num_blocks):\n self.body.append(nn.Sequential(ResidualBlock(num_feats=self.num_features, bias=bias, act=activation)))\n\n self.body_end = nn.Sequential(nn.Conv2d(in_channels=self.num_features, out_channels=self.num_features,\n kernel_size=3, stride=1, padding=1, bias=bias))\n\n # Upsampling\n if self.scale > 1:\n self.upsampler = nn.Sequential(Upsampler(scale=self.scale, num_feats=self.num_features, bias=bias))\n\n # Tail of model\n self.tail = nn.Sequential(nn.Conv2d(in_channels=self.num_features, out_channels=self.num_channels,\n kernel_size=3, stride=1, padding=1, bias=bias))\n\n def forward(self, x):\n \"\"\"\n\n input shape (*, num_channels, input_height, input_width)\n output shape (*, num_channels, target_height, target_width)\n \"\"\"\n x = self.head(x) # (*, num_features, input_height, input_width)\n\n res = x # global residual\n for i in range(self.num_blocks):\n res = self.body[i](res)\n res = self.body_end(res)\n res += x # residual connection\n\n x = res # (*, num_features, input_height, input_width)\n if self.scale > 1:\n x = self.upsampler(x) # (*, num_features, target_height, target_width)\n\n x = self.tail(x) # (*, num_channels, target_height, target_width)\n\n return x\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, num_feats: int, bias: bool = True, batch_norm: bool = False, act: nn.Module = nn.ReLU(True),\n residual_scale=1):\n \"\"\"\n The residual block in SingleNetwork, which is a stack of Conv, ReLU, Conv and Sum layers.\n\n Args:\n num_feats (int): the number of channels of the convolutional kernel\n bias (bool): whether to use bias in convolutional layers, default true\n batch_norm (bool): whether to apply batch normalization after convolutional layers, default false (different from SRResNet)\n act (nn.Module): activation function\n residual_scale (float): the factor to scale the residual\n \"\"\"\n super(ResidualBlock, self).__init__()\n modules = []\n\n for i in range(2):\n modules.append(nn.Conv2d(in_channels=num_feats, out_channels=num_feats, kernel_size=3, stride=1, padding=1, bias=bias))\n if batch_norm:\n modules.append(nn.BatchNorm2d(num_feats))\n if i == 0:\n modules.append(act)\n\n self.block = nn.Sequential(*modules)\n self.residual_scale = residual_scale\n\n def forward(self, x):\n if self.residual_scale != 1: # scale the residual\n res = self.block(x).mul(self.residual_scale)\n else:\n res = self.block(x)\n res += x # residual connection\n\n return res\n\n\nclass Upsampler(nn.Module):\n def __init__(self, scale: int, num_feats: int, bias: bool = True, batch_norm: bool = False, act: nn.Module = None):\n \"\"\"\n This module up-samples the inputs to target outputs in terms of a specified scaling factor\n\n Args:\n scale (int): scaling factor\n num_feats (int): the number of channels of the image\n bias (bool): whether to use bias in convolutional layers, default true\n batch_norm (bool): whether to apply batch normalization, default false\n act (nn.Module): activation function\n \"\"\"\n super(Upsampler, self).__init__()\n\n modules = []\n if scale & (scale - 1) == 0: # scale = 1, 2, 4\n for _ in range(int(math.log(scale, 2))):\n modules.append(nn.Conv2d(in_channels=num_feats, out_channels=4 * num_feats, kernel_size=3, stride=1, padding=1, bias=bias))\n modules.append(nn.PixelShuffle(2))\n\n if batch_norm:\n modules.append(nn.BatchNorm2d(num_feats))\n if act:\n modules.append(act)\n elif scale == 3:\n modules.append(nn.Conv2d(in_channels=num_feats, out_channels=9 * num_feats, kernel_size=3, stride=1, padding=1, bias=bias))\n modules.append(nn.PixelShuffle(3))\n\n if batch_norm:\n modules.append(nn.BatchNorm2d(num_feats))\n if act:\n modules.append(act)\n else:\n raise NotImplementedError\n\n self.upsampler = nn.Sequential(*modules)\n\n def forward(self, x):\n return self.upsampler(x)\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"3614839","text":"# coding: utf-8\n\n# Copyright 2018 IBM All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport ibm_whcs_sdk.insights_for_medical_literature as wh\n\ndef test_range():\n date_range = wh.Range(begin='2008', end='2020')\n date_range_diff = wh.Range(begin='2008', end='2021')\n\n range_obj = {}\n range_obj['begin'] = '2008'\n range_obj['end'] = '2020'\n range_dict = date_range._from_dict(range_obj)\n\n assert date_range.__str__() is not None\n assert date_range.__eq__(range_dict)\n assert date_range.__ne__(date_range_diff)\n","sub_path":"ibm_whcs_sdk/insights_for_medical_literature/tests/integration/model/test_range.py","file_name":"test_range.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"383118643","text":"#!/usr/bin/env python3\n\nimport sys\nimport pandas as pd\n\ndef main(eggnog_fp, out_fp):\n names = ['query_name', 'seed_eggNOG_ortholog', 'seed_ortholog_evalue',\n 'seed_ortholog_score', 'predicted_gene_name', 'GO_terms', 'KEGG_KOs',\n 'BiGG_reactions', 'Annotation_tax_scope', 'OGs', 'bestOG|evalue|score',\n 'COG cat', 'eggNOG annot']\n eggnog_df = pd.read_csv(eggnog_fp, sep='\\t', comment='#', names=names)\n eggnog_df = eggnog_df[[\"query_name\", \"predicted_gene_name\", \"eggNOG annot\", \"COG cat\"]]\n eggnog_df = eggnog_df.rename(columns={\"query_name\": \"gene_name\", \"predicted_gene_name\": \"common_name\", \"eggNOG annot\": \"desc\", \"COG cat\": \"cog_cat\"})\n eggnog_df.to_csv(out_fp, index=False, sep='\\t')\n print(\"DONE: output {}\".format(out_fp))\n\nif __name__==\"__main__\":\n target = sys.argv[1]\n direc = \"../material/{}\".format(target)\n eggnog_fp = \"{}/eggnog/bact.emapper.annotations\".format(direc)\n out_fp = \"{}/eggnog/eggnog.anno\".format(direc)\n main(eggnog_fp, out_fp)\n\n\n","sub_path":"eggnog/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"22805978","text":"\n#a = [1,5,8,3,5,2,2,1,5,17]\n\n\ndef quicksort(a):\n if len(a) <= 1:\n return a\n else:\n pivot = a[0]\n min = [i for i in a[1:] if i < pivot]\n max = [i for i in a[1:] if i >= pivot]\n return quicksort(min) + [pivot] + quicksort(max)\n","sub_path":"quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"581528819","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nGeneral utilities for the application.\n\"\"\"\nfrom __future__ import unicode_literals\nfrom datetime import date\ndefault_app_config = 'calaccess_processed.apps.CalAccessProcessedConfig'\n\n\ndef archive_directory_path(instance, filename):\n \"\"\"\n Returns a path to an archived processed data file or ZIP.\n \"\"\"\n from calaccess_processed.models.tracking import (\n ProcessedDataVersion,\n ProcessedDataFile,\n )\n\n if isinstance(instance, ProcessedDataVersion):\n release_datetime = instance.raw_version.release_datetime\n f_name, f_ext = filename.split('.')\n path = '{fn}_{dt:%Y-%m-%d_%H-%M-%S}.{fx}'.format(\n fn=f_name,\n dt=release_datetime,\n fx=f_ext,\n )\n elif isinstance(instance, ProcessedDataFile):\n release_datetime = instance.version.raw_version.release_datetime\n path = '{dt:%Y-%m-%d_%H-%M-%S}/{f}'.format(dt=release_datetime, f=filename)\n else:\n raise TypeError(\n \"Must be ProcessedDataVersion or ProcessedDataFile instance.\"\n )\n return path\n\n\ndef get_expected_election_date(year, election_type):\n \"\"\"\n Get the date of the election in the given year and type.\n\n Raise an exception if year is not even or if election_type is not\n \"PRIMARY\" or \"GENERAL\".\n\n Return a date object.\n \"\"\"\n # Rules defined here:\n # https://leginfo.legislature.ca.gov/faces/codes_displayText.xhtml?lawCode=ELEC&division=1.&title=&part=&chapter=1.&article= # noqa\n if year % 2 != 0:\n raise Exception(\"Regular elections occur in even years.\")\n elif election_type.upper() == 'PRIMARY':\n # Primary elections are in June\n month = 6\n elif election_type.upper() == 'GENERAL':\n # General elections are in November\n month = 11\n else:\n raise Exception(\"election_type must 'PRIMARY' or 'GENERAL'.\")\n\n # get the first weekday\n # zero-indexed starting with monday\n first_weekday = date(year, month, 1).weekday()\n\n # calculate day or first tuesday after first monday\n day_or_month = (7 - first_weekday) % 7 + 2\n\n return date(year, month, day_or_month)\n","sub_path":"calaccess_processed/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"171291601","text":"#Two Strings\r\n#!/bin/python3\r\n\r\nimport sys\r\n\r\ndef twoStrings(s1, s2):\r\n bool = False\r\n for c in s1:\r\n if c in s2:\r\n bool = True\r\n break\r\n if bool:\r\n return \"YES\"\r\n else:\r\n return \"NO\"\r\n\r\nq = int(input().strip())\r\nfor a0 in range(q):\r\n s1 = input().strip()\r\n s2 = input().strip()\r\n result = twoStrings(s1, s2)\r\n print(result)","sub_path":"Strings_15.py","file_name":"Strings_15.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"257543941","text":"# -*- coding:utf-8 -*-\nimport json\nimport re\n\nimport requests\nfrom get_page_info import get_main_page_info\n\n\ndef get_links():\n headers = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36 Edg/87.0.664.55'\n }\n links = []\n all_money_info = []\n for i in range(1, 21):\n url = f'http://172.20.150.50:40000/wuyou/{i}'\n response = requests.get(url=url, headers=headers)\n content = response.content.decode('utf-8')\n page_links = re.findall(r'', content)\n money = re.findall(r'([\\s\\S]*?)0:\n for building in rebu.geometry:\n if building.contains(point):\n return building\n\ndef collect_houses(cadastre, cabu, rebu):\n houses = []\n for building in cabu.geometry:\n if cadastre.contains(building):\n houses.append(building)\n if len(rebu)>0:\n for building in rebu.geometry:\n if cadastre.contains(building):\n houses.append(building)\n return houses\n\ndef save_clipped_tif(poly, tif, file_name):\n coords = list(poly.buffer(5).exterior.coords)\n min_x, max_x = coords[0][0], coords[0][0]\n min_y, max_y = coords[0][1],coords[0][1]\n for x, y in coords:\n min_x, max_x = min(min_x, x), max(max_x, x)\n min_y, max_y = min(min_y, y), max(max_y, y)\n geometries = [\n {\n 'type': 'Polygon',\n 'coordinates': [[\n [min_x, min_y], [min_x, max_y], [max_x, max_y], [max_x, min_y], [min_x, min_y]\n ]]\n }\n ]\n try:\n clipped = tif.rio.clip(geometries, from_disk=True)\n clipped.rio.to_raster(f\"{file_name}\", dtype=\"float64\")\n except:\n geometries = [\n {\n 'type': 'Polygon',\n 'coordinates': [[\n [coord[0], coord[1]] for coord in poly.exterior.coords\n ]]\n }\n ]\n clipped = tif.rio.clip(geometries, from_disk=True)\n clipped.rio.to_raster(f'{file_name}', dtype=\"float64\")\n\n\n\n\ndef collect_cadastre(house, cadastre):\n cad = cadastre[cadastre.geometry.contains(house)]\n if len(cad)>0:\n return cad.geometry.iloc[0]\n #cad = cadastre[cadastre.geometry.intersection(house).area>0]\n #if len(cad)>0:\n # return cad.geometry.iloc[0]\n return house\n \n\n\n\nif __name__=='__main__':\n #addresses = \"CRAB_Adressenlijst_Shapefile/Shapefile/CrabAdr.shp\"\n #addresses = gpd.read_file(addresses)\n #addresses = addresses[(addresses.GEMEENTE=='Oostkamp')]\n #with open(f\"test_address.pickle\", \"wb\") as file_adresses:\n # pickle.dump(addresses, file_adresses)\n\n #print(addresses)\n\n #with open(f\"test_address.pickle\", \"rb\") as file_adresses:\n # addresses = pickle.load(file_adresses)\n with open('OOSTKAMP/adresses.pickle', 'rb') as file:\n addresses = pickle.load(file)\n print(addresses.shape)\n print(addresses.head())\n #print(addresses[addresses.HUISNR=='106'])\n #city_folder_creator(['OOSTKAMP'],addresses) \n\n","sub_path":"database/tiff_splitter.py","file_name":"tiff_splitter.py","file_ext":"py","file_size_in_byte":6282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"38720269","text":"from __future__ import division\n\nfrom numpy import arange, sqrt\nfrom numpy.random import RandomState\nfrom numpy.testing import assert_allclose\nfrom optimix import check_grad\n\nfrom glimix_core.cov import EyeCov, LinearCov, SumCov\nfrom glimix_core.ggp import ExpFamGP\nfrom glimix_core.lik import BernoulliProdLik\nfrom glimix_core.link import LogitLink\nfrom glimix_core.mean import OffsetMean\nfrom glimix_core.random import GGPSampler\n\n\ndef _get_data():\n random = RandomState(0)\n N = 10\n X = random.randn(N, N + 1)\n X -= X.mean(0)\n X /= X.std(0)\n X /= sqrt(X.shape[1])\n offset = 1.0\n\n mean = OffsetMean()\n mean.offset = offset\n mean.set_data(arange(N), purpose='sample')\n mean.set_data(arange(N), purpose='learn')\n\n cov_left = LinearCov()\n cov_left.scale = 1.5\n cov_left.set_data((X, X), purpose='sample')\n cov_left.set_data((X, X), purpose='learn')\n\n cov_right = EyeCov()\n cov_right.scale = 1.5\n cov_right.set_data((arange(N), arange(N)), purpose='sample')\n cov_right.set_data((arange(N), arange(N)), purpose='learn')\n\n cov = SumCov([cov_left, cov_right])\n\n lik = BernoulliProdLik(LogitLink())\n\n y = GGPSampler(lik, mean, cov).sample(random)\n\n return dict(\n mean=mean,\n cov=cov,\n lik=lik,\n y=y,\n cov_left=cov_left,\n cov_right=cov_right)\n\n\ndef test_expfam_ep():\n data = _get_data()\n ep = ExpFamGP((data['y'], ), 'bernoulli', data['mean'], data['cov'])\n assert_allclose(ep.feed().value(), -5.031838893222976)\n\n\ndef test_expfam_ep_function():\n data = _get_data()\n ep = ExpFamGP((data['y'], ), 'bernoulli', data['mean'], data['cov'])\n\n assert_allclose(check_grad(ep.feed()), 0, atol=1e-4)\n\n\ndef test_expfam_ep_optimize():\n data = _get_data()\n ep = ExpFamGP((data['y'], ), 'bernoulli', data['mean'], data['cov'])\n data['cov_left'].fix('logscale')\n ep.feed().maximize(verbose=False)\n assert_allclose(data['cov_right'].scale, 0.3815125853009603, atol=1e-5)\n assert_allclose(data['mean'].offset, 2.8339582691250604, rtol=1e-6)\n","sub_path":"glimix_core/ggp/test/test_expfamgp.py","file_name":"test_expfamgp.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"538756101","text":"from .generic import BasePlugin, BaseShortcutPlugin, EQUELPluginException as EQUELPluginException, GenericPlugin\nfrom typing import Any\n\nclass GenericAggregationPlugin(GenericPlugin):\n name: str = ...\n description: str = ...\n def apply(self, verb: Any, *args: Any, **kwargs: Any): ...\n\nclass AggregationShortcutPlugin(BaseShortcutPlugin):\n name: str = ...\n description: str = ...\n translation: Any = ...\n def apply(self, prefix: Any, value: Any, parser: Any, ctx: Any): ...\n\nclass AggregationKeywordsPlugin(BasePlugin):\n name: str = ...\n description: str = ...\n translation: Any = ...\n def apply(self, verb: Any, params: Any, parser: Any, ctx: Any) -> None: ...\n\nclass FilterAggregationPlugin(GenericAggregationPlugin):\n name: str = ...\n description: str = ...\n def apply(self, verb: Any, *args: Any, **kwargs: Any): ...\n","sub_path":"Result/4079files/Uninf_noImp_noSemantic/3055-Uninf_noImp_noSemantic.pyi","file_name":"3055-Uninf_noImp_noSemantic.pyi","file_ext":"pyi","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"378546258","text":"# 寻找开发者平均贡献时间:第一次与最后一次代码贡献时间\n# 生成一个duration.json 里面的参数为{用户email:{所有commits的时间}}\nimport json\nimport time\nimport datetime\nimport calendar\n\ndef findAllUsers(folder):\n cDir = \"public/\" + folder + \"/\"\n allUsers = {}\n\n\n for year in range(2008,2019):\n for month in range(1,13):\n date = str(year) + \"-\" + \"%02d\" % month\n file = date + \"-commitsUser.json\"\n\n with open(cDir + file, 'r') as f:\n data = json.loads(f.read())\n for user in data:\n thisDate = data[user]\n print(data[user])\n if user not in allUsers:\n allUsers[user] = []\n\n allUsers[user] = allUsers[user] + data[user]\n\n with open(cDir + \"allUsers.json\",'w') as f:\n json.dump(allUsers,f)\n\n\ndef findTenure(folder):\n cDir = \"public/\" + folder + \"/\"\n tenures = {}\n\n file = \"allUsers.json\"\n with open(cDir + file, 'r') as f:\n data = json.loads(f.read())\n\n for user in data:\n commitInfo = data[user]\n tenures[user] = {}\n\n start = time.strptime(\"2099-01-01\",\"%Y-%m-%d\")\n end = time.strptime(\"2000-01-01\",\"%Y-%m-%d\")\n\n for item in commitInfo:\n date = item[\"date\"][0:10]\n ts = time.strptime(date,\"%Y-%m-%d\")\n if(ts < start):\n start = ts\n\n if (ts > end):\n end = ts\n\n tenures[user][\"start\"] = str(start.tm_year)+\"-\"+\"%02d\" % start.tm_mon+\"-\"+\"%02d\"%start.tm_mday\n tenures[user][\"end\"] = str(end.tm_year)+\"-\"+\"%02d\" %end.tm_mon+\"-\"+ \"%02d\" % end.tm_mday\n\n start = datetime.datetime(start.tm_year,start.tm_mon,start.tm_mday)\n end = datetime.datetime(end.tm_year,end.tm_mon,end.tm_mday)\n tenure = (end - start).days\n\n tenures[user][\"tenure\"] = tenure\n\n with open(cDir + \"tenures.json\", 'w') as f:\n json.dump(tenures,f)\n\nfolders = [\"JSON\",\"d3\"]\n\n#for folder in folders:\n# findAllUsers(folder)\n\n#for folder in folders:\n #findTenure(folder)\nfindTenure(\"javascript\")\n","sub_path":"public/findDuration.py","file_name":"findDuration.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"359848820","text":"from collections import namedtuple\n\nfrom scv.streamer import UnicodeStreamer, Mark\n\nfrom .yajl import ffi, lib as yajl\nfrom . import tokens as tk\n\n# Create names for all functions right away to make sure they exist and for\n# code completion and introspection purposes.\nyajl_set_default_alloc_funcs = yajl.yajl_set_default_alloc_funcs\nyajl_lex_alloc = yajl.yajl_lex_alloc\nyajl_lex_free = yajl.yajl_lex_free\nyajl_lex_lex = yajl.yajl_lex_lex\nyajl_buf_alloc = yajl.yajl_buf_alloc\nyajl_buf_free = yajl.yajl_buf_free\nyajl_string_decode = yajl.yajl_string_decode\nyajl_buf_data = yajl.yajl_buf_data\nyajl_buf_len = yajl.yajl_buf_len\nyajl_buf_clear = yajl.yajl_buf_clear\n\n\nToken = namedtuple(\"Token\", \"tok value mark_start mark_end\")\n\n\nclass Lexer(object):\n \"\"\"\n Uses yajl C library to produce sequence of tokens with values and\n location marks\n \"\"\"\n tokens_want_value = frozenset([tk.BOOL, tk.INT, tk.FLOAT, tk.STR])\n bufsize = 1024 * 20\n\n def __init__(self, allow_comments=False, validate_utf8=False):\n # Let us get this alloc funcs nonsense out of the way\n afs = ffi.new(\"yajl_alloc_funcs *\")\n\n # Protect it from being garbage-collected\n self.__preserve_afs = afs\n yajl_set_default_alloc_funcs(afs)\n self._lexer = ffi.gc(\n yajl_lex_alloc(afs, allow_comments, validate_utf8),\n yajl_lex_free,\n )\n self.decode_buffer = ffi.gc(\n yajl_buf_alloc(afs),\n yajl_buf_free,\n )\n\n\n def consume(self, chunk):\n chunk_p = ffi.new('char[]', chunk)\n chunk_len = len(chunk)\n\n offset = ffi.new('size_t *', 0)\n\n out_buffer = ffi.new('unsigned char **')\n out_len = ffi.new('size_t *')\n\n while True:\n tok = yajl.yajl_lex_lex(self._lexer, chunk_p, chunk_len,\n offset,\n out_buffer,\n out_len)\n if tok == tk.EOF:\n break\n elif tok == tk.ERROR:\n # TODO: hadle yajl_tok_error\n raise Exception(\"yajl_tok_error TODO: proper exception\")\n #raise ValueError('Invalid JSON')\n # This never happens right now, but why not\n elif tok == tk.COMMENT:\n continue\n elif tok == tk.STR_EX:\n yajl_string_decode(self.decode_buffer, out_buffer[0],\n out_len[0])\n value = ffi.string(yajl_buf_data(self.decode_buffer),\n yajl_buf_len(self.decode_buffer))\n yajl_buf_clear(self.decode_buffer)\n tok = tk.STR\n else:\n value = ffi.string(out_buffer[0], out_len[0])\n\n end_offset = offset[0]\n token_len = out_len[0]\n\n # end_offset points to end of the token, start of the token can be\n # found using token_len (possibly in one of previous chunks)\n yield tok, value, token_len, end_offset\n\n\n def tokenize(self, stream, **kwargs):\n man = UnicodeStreamer(stream, **kwargs)\n\n data_str = {}\n tok = None\n for idx, (prev, current, next) in enumerate(man.chunks3()):\n current_str = current.data.encode('utf-8')\n if not next and not current_str.endswith(' '):\n # At the end of the stream the lexer should know current\n # token has ended, which is sometimes impossible without\n # whitespace\n current_str += ' '\n\n # Keep this to avoid encoding this several times\n data_str[idx] = current_str\n\n def uni_offset(bs, off):\n return len(bs[:off].decode('utf-8'))\n\n for tok, value, token_len, end_offset in self.consume(current_str):\n if tok == tk.EOF:\n continue\n if tok == tk.STR:\n token_len += 2 # Let's factor in the double quotes.\n # Fortunately, there is only one quoting\n # style.\n # Now translate utf-8 offsets back to unicode offsets\n u_end_offset = uni_offset(current_str, end_offset)\n start_offset = end_offset - token_len\n start_buf = idx\n while start_offset < 0:\n start_buf -=1\n start_offset -= len(data_str[start_buf])\n\n u_start_offset = uni_offset(data_str[start_buf], start_offset)\n\n # All the buffers for this token, plus ones on the left and\n # right.\n want_buffers = range(start_buf-1, idx+2)\n mark_end = Mark(man, idx, u_end_offset,\n keep_buffers=want_buffers)\n mark_start = Mark(man, start_buf, u_start_offset,\n keep_buffers=want_buffers)\n\n # Drop all the other encoded buffers, they won't be needed\n # anymore.\n data_str = {idx: current_str}\n\n yield Token(tok, value, mark_start, mark_end)\n\n if tok == tk.EOF:\n class TODOError(Exception):\n pass\n raise TODOError(\"Unexpected end of stream in the middle of a \"\n \"token\")\n","sub_path":"scv/backends/json/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":5398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"245040031","text":"\"\"\"\nA child is playing with a ball on the nth floor of a tall building. The height of this floor, h, is known.\n\nHe drops the ball out of the window. The ball bounces (for example), to two-thirds of its height (a bounce of 0.66).\n\nHis mother looks out of a window 1.5 meters from the ground.\n\nHow many times will the mother see the ball pass in front of her window (including when it's falling and bouncing?\n\nThree conditions must be met for a valid experiment:\nFloat parameter \"h\" in meters must be greater than 0\nFloat parameter \"bounce\" must be greater than 0 and less than 1\nFloat parameter \"window\" must be less than h.\nIf all three conditions above are fulfilled, return a positive integer, otherwise return -1.\n\nNote:\nThe ball can only be seen if the height of the rebounding ball is strictly greater than the window parameter.\n\nExample:\n- h = 3, bounce = 0.66, window = 1.5, result is 3\n\n- h = 3, bounce = 1, window = 1.5, result is -1 \n\n(Condition 2) not fulfilled).\n\"\"\"\n#solve it in recursive way\n\ndef bouncingBall(h, bounce, window):\n if(h>0 and (0= 1 or window >= h:\n return -1\n return 2 + bouncingBall(h * bounce, bounce, window)\n\ndef bouncingBall(h, bounce, window):\n count = -1\n if h>0 and 0 window:\n h = h*bounce\n count += 2\n return count\n\n---------TOP SOLUTION---------\n\n# The height of the ball after bouncing can be expressed as an exponential function:\n#\n# f(x) = h * bounce^x\n# f(x) is the height the ball reaches after x bounces\n# h is initial height\n# bounce is the decay factor\n#\n# By solving the equation f(x) = window, we get the number of \n# bounces that will finally put the ball at the exact window height.\n#\n# Example:\n# f(x) = 3 * 0.66^x\n# f(x) = 1.5 --> x ~= 1.67\n# So the first bounce will put the ball a bit above window height,\n# but the second will put it a bit below.\n# This means the ball will pass the window 2 times (one bounce).\n#\n# If a bounce puts the ball at the exact window height (an exact \n# number of bounces, x is an integer), this would mean the ball \n# won't pass the window, only appear in front of it.\n# However, due to the restriction in this assignment, the ball\n# can only be seen if it's height is _strictly_ greater than the \n# window height.\n\nimport math\n\ndef bouncingBall(h, bounce, window):\n # If parameters don't fulfil conditions, return -1\n if not (h > 0 and 0 < bounce < 1 and window < h):\n return -1\n # Solve equation for f(x) = window, using logarithms\n bounces = math.log(window / h, bounce)\n # Get actual number of bounces that still puts the ball above window height\n exactBounces = math.floor(bounces)\n # If last bounce is not strictly higher than window height, it can't be seen\n if bounces == exactBounces: \n exactBounces -= 1\n # The ball will pass the window two times for each bounce, up and down, \n # plus one for the initial drop past window, before first bounce\n passes = exactBounces * 2 + 1\n return passes\n\n\"\"\"\n","sub_path":"kata/Bouncing_Balls.py","file_name":"Bouncing_Balls.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"81986279","text":"from kozo import Role, NODE_NAME\nimport gpio\n\nclass PirSensor(Role):\n\tdef localInit(self):\n\t\tself._pin = gpio.Pin(self['pin'], gpio.INPUT)\n\t\tself._lastState = False\n\tdef getRateControl(self):\n\t\treturn self['rate']\n\tdef run(self):\n\t\tmovement = self._pin.state\n\t\tif self['log'] and movement != self._lastState:\n\t\t\tif movement:\n\t\t\t\tself.sendLog('Motion detected!')\n\t\t\telse:\n\t\t\t\tself.sendLog('Motion stopped.')\n\t\t\tself._lastState = movement\n\t\tself.sendEvent('motiondetect', channel=self['channel'], data=movement)\n\nroleInfo = {\n\t'format': '1.0',\n\t'class': PirSensor,\n\t'author': 'Etienne Perot',\n\t'version': '1.0',\n\t'description': 'Reports motion detection as measured by a PIR sensor over a GPIO pin.',\n\t'config': {\n\t\t'pin': {\n\t\t\t'description': 'Pin number for the data wire of the PIR sensor.'\n\t\t},\n\t\t'channel': {\n\t\t\t'default': NODE_NAME,\n\t\t\t'description': 'Name of the motiondetect event channel used to send motion detection readings.'\n\t\t},\n\t\t'rate': {\n\t\t\t'default': 0.5,\n\t\t\t'description': 'How often to check for and report motion detection status.'\n\t\t},\n\t\t'log': {\n\t\t\t'default': True,\n\t\t\t'description': 'Send out log messages for motion detection changes.'\n\t\t}\n\t}\n}\n","sub_path":"kozo/roles/pir_sensor.py","file_name":"pir_sensor.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"40099924","text":"\"\"\"\nWe are given a matrix with R rows and C columns has cells with integer coordinates (r, c),\nwhere 0 <= r < R and 0 <= c < C.\n\nAdditionally, we are given a cell in that matrix with coordinates (r0, c0).\n\nReturn the coordinates of all cells in the matrix, sorted by their distance from (r0, c0) from\nsmallest distance to largest distance. Here, the distance between two cells (r1, c1) and (r2, c2)\nis the Manhattan distance, |r1 - r2| + |c1 - c2|. (You may return the answer in any order that\nsatisfies this condition.)\n\n\n\nExample 1:\n\nInput: R = 1, C = 2, r0 = 0, c0 = 0\nOutput: [[0,0],[0,1]]\nExplanation: The distances from (r0, c0) to other cells are: [0,1]\nExample 2:\n\nInput: R = 2, C = 2, r0 = 0, c0 = 1\nOutput: [[0,1],[0,0],[1,1],[1,0]]\nExplanation: The distances from (r0, c0) to other cells are: [0,1,1,2]\nThe answer [[0,1],[1,1],[0,0],[1,0]] would also be accepted as correct.\nExample 3:\n\nInput: R = 2, C = 3, r0 = 1, c0 = 2\nOutput: [[1,2],[0,2],[1,1],[0,1],[1,0],[0,0]]\nExplanation: The distances from (r0, c0) to other cells are: [0,1,1,2,2,3]\nThere are other answers that would also be accepted as correct, such as\n[[1,2],[1,1],[0,2],[1,0],[0,1],[0,0]].\n\n\nNote:\n\n1 <= R <= 100\n1 <= C <= 100\n0 <= r0 < R\n0 <= c0 < C\n\"\"\"\n\n\nclass Solution:\n def allCellsDistOrder(self, R: int, C: int, r0: int, c0: int) -> List[List[int]]:\n\n l, d = [], {}\n\n for r in range(R):\n for c in range(C):\n dist = abs(r0 - r) + abs(c0 - c)\n\n d[dist] = d.get(dist, []) + [[r, c]]\n # if dist in d:\n # d[dist] += [[r,c]]\n\n # else:\n # d[dist] = [[r,c]]\n # print(d)\n\n for k in sorted(d.keys()):\n for e in d[k]:\n l.append(e)\n\n return l\n\n","sub_path":"leet_1030_matrix_cells_in_distance_order.py","file_name":"leet_1030_matrix_cells_in_distance_order.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"235686026","text":"from TurtleWorld import *\n\nworld = TurtleWorld() \nbob = Turtle()\nprint(bob)\n\ndef square(t,length):\n\tfor i in range(4):\n\t\tfd(t, length)\n\t\tlt(t)\n\n\ndef polygon(t,length,n):\n\tfor i in range(n):\n\t\tfd(t, length)\n\t\tlt(t,360.0/n)\n\n\ndef circle(t,r):\n\tnumberOfSides = 100\n\tlength = 2*3.14*r/numberOfSides\n\tfor i in range(numberOfSides):\n\t\tfd(t,length)\n\t\tlt(t,360.0/numberOfSides)\n\n\ndef arc(t,r,angle):\n\tnumberOfSides = 360\n\tlength = 2*3.14*r/numberOfSides\n\tfor i in range(angle):\n\t\tfd(t,length)\n\t\tlt(t,360.0/numberOfSides)\n\n\n#square(bob, 100)\npolygon(bob, length=100, n=6)\nbob.delay=0.01\narc(bob,80,250)\n#circle(bob,50)\npolygon(bob, length=50, n=6)\n\nwait_for_user()","sub_path":"polygon.py","file_name":"polygon.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"38574483","text":"from zoundry.base.xhtml.xhtmldocutil import removeJavaScript\r\nfrom zoundry.appframework.global_services import getApplicationModel\r\nfrom zoundry.appframework.ui.dialogs.bgtaskprogressdialog import ZBackgroundTaskProgressDialog\r\nfrom zoundry.blogapp.constants import IZBlogAppServiceIDs\r\nfrom zoundry.blogapp.messages import _extstr\r\nfrom zoundry.blogapp.services.template.templategrabber import ZBlogTemplateGrabberBGTask\r\nfrom zoundry.blogapp.ui.templates.templatedialogs import ZAddTemplateFromBlogDialog\r\nimport wx\r\n\r\n# ------------------------------------------------------------------------------\r\n# Convenience function that lets the user download a template from a blog.\r\n# ------------------------------------------------------------------------------\r\ndef doTemplateDownload(parentWindow, blog = None):\r\n dialog = ZAddTemplateFromBlogDialog(parentWindow)\r\n if blog is not None:\r\n dialog.selectBlog(blog)\r\n if dialog.ShowModal() == wx.ID_OK:\r\n blog = dialog.getSelectedBlog()\r\n templateName = dialog.getTemplateName()\r\n makeDefaultTemplate = dialog.isMakeDefaultTemplate()\r\n\r\n task = ZBlogTemplateGrabberBGTask()\r\n task.initialize(blog, templateName, makeDefaultTemplate)\r\n taskService = getApplicationModel().getService(IZBlogAppServiceIDs.BACKGROUND_TASK_SERVICE_ID)\r\n taskService.addTask(task)\r\n \r\n title = task.getName()\r\n description = _extstr(u\"templateuiutil.DownloadingBlogTemplate\") % blog.getName() #$NON-NLS-1$\r\n imagePath = u\"images/dialogs/bgtask/header_image.png\" #$NON-NLS-1$\r\n taskDialog = ZBackgroundTaskProgressDialog(parentWindow, task, title, description, imagePath)\r\n taskDialog.ShowModal()\r\n taskDialog.Destroy()\r\n\r\n dialog.Destroy()\r\n# end doTemplateDownload()\r\n\r\n# ------------------------------------------------------------------------------\r\n# Disables javascript by removing script references from the xhtml document.\r\n# ------------------------------------------------------------------------------\r\ndef disableTemplatePreviewJavaScript(xhtmlDoc):\r\n removeJavaScript(xhtmlDoc)\r\n# end disableTemplatePreviewJavaScript \r\n \r\n","sub_path":"src/python/zoundry/blogapp/ui/templates/templateuiutil.py","file_name":"templateuiutil.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"464553418","text":"import collections\nimport struct\n\n\ndef _encode_int(data, major_type):\n encoded = b''\n\n if 0 <= data <= 23:\n encoded += bytes([(major_type << 5) + data])\n elif data < (2 << 7) - 1:\n encoded += bytes([(major_type << 5) + 24])\n encoded += struct.pack('>B', data)\n elif data < (2 << 15) - 1:\n encoded += bytes([(major_type << 5) + 25])\n encoded += struct.pack('>H', data)\n elif data < (2 << 31) - 1:\n encoded += bytes([(major_type << 5) + 26])\n encoded += struct.pack('>I', data)\n elif data < (2 << 63) - 1:\n encoded += bytes([(major_type << 5) + 27])\n encoded += struct.pack('>Q', data)\n else:\n # TODO Handle values in this range\n raise ValueError\n\n return encoded\n\ndef encode(data):\n encoded = b''\n\n if isinstance(data, list) or isinstance(data, tuple):\n encoded += _encode_int(len(data), 4)\n for item in data:\n encoded += encode(item)\n return encoded\n\n if isinstance(data, dict):\n encoded += _encode_int(len(data), 5)\n for key, value in data.items():\n encoded += encode(key)\n encoded += encode(value)\n return encoded\n\n simple = {\n False: 20,\n True: 21,\n None: 22,\n }\n\n if data is True or data is False or data is None:\n encoded += bytes([(7 << 5) + simple[data]])\n return encoded\n\n if isinstance(data, int):\n if data < 0:\n data = -1 - data\n major_type = 1\n else:\n major_type = 0\n\n encoded += _encode_int(data, major_type)\n return encoded\n\n if isinstance(data, bytes):\n encoded += _encode_int(len(data), 2)\n encoded += data\n return encoded\n\n if isinstance(data, str):\n encoded += _encode_int(len(data), 3)\n encoded += data.encode('utf8')\n return encoded\n\n if isinstance(data, collections.Iterable):\n encoded += bytes([(4 << 5) + 31])\n for item in data:\n encoded += encode(item)\n encoded += b'\\xff'\n return encoded\n\n if isinstance(data, float):\n encoded += bytes([(7 << 5) + 27])\n encoded += struct.pack('>d', data)\n return encoded\n\n\ndef _decode_int(value, data):\n if 0 <= value <= 23:\n return (1, value)\n elif value == 24:\n return (2, struct.unpack('>B', data[:1])[0])\n elif value == 25:\n return (3, struct.unpack('>H', data[:2])[0])\n elif value == 26:\n return (4, struct.unpack('>I', data[:4])[0])\n elif value == 27:\n return (5, struct.unpack('>Q', data[:8])[0])\n\n\ndef _decode_value(offset, data):\n major_type = data[offset] >> 5\n extra = data[offset] & ~(major_type << 5)\n value = None\n\n if major_type == 0:\n offset_inc, value = _decode_int(extra, data[1 + offset:])\n offset += offset_inc\n\n if major_type == 1:\n offset_inc, value = _decode_int(extra, data[1 + offset:])\n offset += offset_inc\n value = -1 - value\n\n if major_type == 2:\n offset_inc, value_len = _decode_int(extra, data[1 + offset:])\n offset += 1\n value = data[offset:offset + value_len]\n offset += offset_inc + value_len - 1\n\n if major_type == 3:\n offset_inc, value_len = _decode_int(extra, data[1 + offset:])\n offset += 1\n value = data[offset:offset + value_len].decode('utf8')\n offset += offset_inc + value_len - 1\n\n if major_type == 4:\n value = []\n if extra == 31:\n offset += 1\n while data[offset] != 0xFF:\n offset, item = _decode_value(offset, data)\n value.append(item)\n else:\n offset_inc, value_len = _decode_int(extra, data[1 + offset:])\n offset += offset_inc\n for i in range(0, value_len):\n offset, item = _decode_value(offset, data)\n value.append(item)\n\n if major_type == 5:\n offset_inc, value_len = _decode_int(extra, data[1 + offset:])\n offset += offset_inc\n value = {}\n for i in range(0, value_len):\n offset, key = _decode_value(offset, data)\n offset, item = _decode_value(offset, data)\n value[key] = item\n\n simple = {\n 20: False,\n 21: True,\n 22: None,\n }\n\n if major_type == 7:\n if extra == 27:\n value = struct.unpack('>d', data[offset:offset + 8])\n offset += 8 + 1\n\n if extra in simple:\n value = simple[extra]\n offset += 1\n\n return (offset, value)\n\n\ndef decode(data):\n return _decode_value(0, data)[1]\n","sub_path":"pycbor/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"560534509","text":"import pandas as pd\nimport numpy as np\nfrom django.shortcuts import render\n# Create your views here.\n\ndef mapa(request):\n #directorio de coquimbo\n coquimbo=pd.read_table('core/datos/coquimbo.csv', sep=',',decimal=\",\")\n #el array a trabajar\n seleccion=[]\n cord=[]\n nombres_colegios=[]\n #array del select\n ciudades=coquimbo['NOM_COM_RBD'].unique()\n \n #Cuando es post\n if request.method == 'POST':\n #trabajar con una copia de coquimbo\n seleccion=coquimbo[:]\n \n #los options\n if request.POST.get('es_urbano') == 'urbano':\n seleccion=seleccion[seleccion['RURAL_RBD']==0]\n \n elif request.POST.get('es_urbano') == 'rural':\n seleccion=seleccion[seleccion['RURAL_RBD']==1]\n\n #Trabajar con las ciudades\n\n if request.POST.getlist('ciudades', 'null') != 'null':\n \n lista_ciudades=request.POST.getlist('ciudades')\n\n terms = ['foo', 'baz']\n seleccion['check_ciudad']=seleccion['NOM_COM_RBD'].isin(lista_ciudades)\n seleccion=seleccion[seleccion['check_ciudad']]\n \n\n \n\n latitud=seleccion['LATITUD']\n longitud=seleccion['LONGITUD']\n\n cord=zip(latitud.to_list(), longitud.to_list())\n nombres_colegios=seleccion['NOM_RBD']\n \n\n context={ 'cord': cord,\n 'nombres':nombres_colegios,\n 'ciudades':ciudades}\n\n\n return render(request, 'modulos/mapa.html', context)","sub_path":"mapa/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"}