diff --git "a/6351.jsonl" "b/6351.jsonl" new file mode 100644--- /dev/null +++ "b/6351.jsonl" @@ -0,0 +1,675 @@ +{"seq_id":"634915787","text":"from pyvdp.visadirect import VisaDirectDispatcher\n\n\ndef send(data):\n \"\"\"Submits a MultiReverseFundsTransactions request.\n\n :param data: **Required**. Instance of :func:`~pyvdp.visadirect.fundstransfer.MultiReverseFundsTransactionsModel`\n :return: Dictionary with VDP API response.\n \n **Usage:**\n \n .. code:: python\n \n from pyvdp.visadirect import (CardAcceptorModel, \n OriginalDataElementsModel)\n \n from pyvdp.visadirect.fundstransfer import multireversefundstransactions, MultiReverseFundsTransactionsModel\n \n address_kwargs = {\n \"country\": \"USA\",\n \"county\": \"00\",\n \"state\": \"CA\",\n \"zipCode\": \"94454\" \n }\n \n card_acceptor_kwargs = {\n \"amount\": \"100.00\",\n \"idCode\": \"5678\",\n \"name\": \"Mr Smith\",\n \"terminalId\": \"1234\",\n \"address\": CardAcceptorModel.Address(**address_kwargs)\n }\n \n ode_kwargs = {\n \"acquiringBin\": \"408999\",\n \"approvalCode\": \"1ABCDE\",\n \"systemsTraceAuditNumber\": \"228112\",\n \"transmissionDateTime\": \"2017-04-21T03:56:17\", \n }\n \n request = {\n \"amount\": \"100.00\",\n \"cardAcceptor\": CardAcceptorModel(**card_acceptor_kwargs),\n \"originalDataElements\": OriginalDataElementsModel(**ode_kwargs),\n \"retrievalReferenceNumber\": \"401010101011\",\n \"senderCardExpiryDate\": \"2020-12\",\n \"senderCurrencyCode\": \"USD\",\n \"senderPrimaryAccountNumber\": \"4485810000000131\",\n \"systemsTraceAuditNumber\": \"101011\",\n \"transactionIdentifier\": \"101010101010\" \n }\n \n data_kwargs = {\n \"acquirerCountryCode\": \"840\",\n \"acquiringBin\": \"408999\",\n \"request\": [\n request\n ] \n }\n \n data = MultiReverseFundsTransactionsModel(**data_kwargs)\n result = multireversefundstransactions.send(data)\n print(result)\n \"\"\"\n c = VisaDirectDispatcher(resource='visadirect',\n api='fundstransfer',\n method='multireversefundstransactions',\n http_verb='POST',\n data=data)\n return c.send()\n\n\ndef get(status_id):\n \"\"\"Fetches a status of previously submitted MultiReverseFundsTransactions request.\n\n Returns a status of :func:`~pyvdp.visadirect.fundstransfer.MultiReverseFundsTransactionsModel` request by \n transaction identifier, returned with 202 response.\n\n :param str status_id: **Required**. Transaction status identifier.\n :return: Dictionary with VDP API response\n \n **Usage:**\n \n .. code:: python\n \n from pyvdp.visadirect.fundstransfer import multireversefundstransactions\n \n status_id = '1491819372_186_81_l73c003_VDP_ARM'\n \n result = multireversefundstransactions.get(status_id)\n print(result)\n \"\"\"\n query_string = '/' + status_id\n\n c = VisaDirectDispatcher(resource='visadirect',\n api='fundstransfer',\n method='multireversefundstransactions',\n http_verb='GET',\n query_string=query_string)\n return c.send()\n","sub_path":"pyvdp/visadirect/fundstransfer/multireversefundstransactions.py","file_name":"multireversefundstransactions.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"272391478","text":"# -*- coding: utf-8 -*-\nfrom customsort.sort6 import sort\nimport itertools\n\nsortable = 0\nfor l in itertools.permutations('123456', 6):\n if sort((''.join(l))):\n sortable += 1\n\nprint('The count of sorted: %s' % sortable)","sub_path":"examples/all_permutations.py","file_name":"all_permutations.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"1282440","text":"import socket, videosocket\r\nfrom videofeed import VideoFeed\r\n\r\nclass Server:\r\n\tdef __init__(self):\r\n\t\tself.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\tself.server_socket.bind((\"\", 9999))\r\n\t\tself.server_socket.listen(5)\r\n\t\tself.videofeed = VideoFeed(1, \"server\", 1)\r\n\t\tprint(\"TCP server waiting for client on port 9999\")\r\n\r\n\tdef start(self):\r\n\t\twhile True:\r\n\t\t\tclient_socket, address = self.server_socket.accept()\r\n\t\t\tprint(\"Got a connection from\", address)\r\n\t\t\tvsock = videosocket.videosocket(client_socket)\r\n\t\t\twhile True:\r\n\t\t\t\tframe = vsock.videoreceive()\r\n\t\t\t\tself.videofeed.set_frame(frame)\r\n\t\t\t\tframe = self.videofeed.get_frame()\r\n\t\t\t\tvsock.videosend(frame)\r\n\r\nif __name__ == \"__main__\":\r\n\tserver = Server()\r\n\tserver.start()","sub_path":"temp/server1.py","file_name":"server1.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"379374496","text":"from .BaseHandler import BaseHandler\nfrom tornado.web import asynchronous\nfrom models import Attendances, Users\nfrom controller.decorators import is_authenticated\n\n\nclass AttendanceHandler(BaseHandler):\n\n @is_authenticated()\n @asynchronous\n async def get(self, user_id=None):\n attendances = await Attendances.find({\"user_token\": self.current_user['cardToken']})\n\n if self.current_user['role'] == \"ROLE_STUDENT\":\n lecturers = await Users.find({\"role\": \"ROLE_LECTURER\"})\n else:\n lecturers = await Users.find({\"role\": \"ROLE_STUDENT\"})\n\n self.render(self.template, attendances=attendances, lecturers=lecturers)\n","sub_path":"controller/handler/AttendanceHandler.py","file_name":"AttendanceHandler.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"477026879","text":"import sys\nsys.stdin = open(\"괄호검사.txt\",\"r\")\n\nT = int(input())\n\nfor t in range(T):\n msg = input()\n bracket_li = [['(',')'], ['{','}']]\n bracket = ''\n\n for m in msg:\n for bl in bracket_li:\n for brk in bl:\n if m == brk:\n bracket += m\n\n stack = []\n result = '0'\n error_tf = False\n \n for b in bracket:\n if b == bracket_li[0][0] or b == bracket_li[1][0]:\n stack.append(b)\n else:\n if stack:\n if stack[-1]==bracket_li[0][0] and b==bracket_li[0][1]:\n stack.pop()\n elif stack[-1]==bracket_li[1][0] and b==bracket_li[1][1]:\n stack.pop()\n else:\n error_tf = True\n break\n else:\n error_tf = True\n break\n\n if not error_tf:\n if not stack:\n result = '1'\n\n print(f\"#{t+1} {result}\")","sub_path":"Algorithm/19.02/190214/1_괄호검사.py","file_name":"1_괄호검사.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"224489451","text":"import configparser\nimport json\nimport logging\nimport logging.config\nimport os\n\nCONFIG_FILE_PATH = '/etc/softfire/sdn-proxy.ini'\n\n_logger = dict()\n\n\ndef get_logger(name):\n print(\"config_file: %s\" % CONFIG_FILE_PATH)\n logging.config.fileConfig(CONFIG_FILE_PATH)\n if _logger.get(name) is None:\n _logger[name] = logging.getLogger(\"eu.softfire.%s\" % name)\n return _logger[name]\n\n\ndef get_config_parser() -> configparser.ConfigParser:\n \"\"\"\n \"\"\"\n config = configparser.ConfigParser()\n print(\"config_file: %s\" % CONFIG_FILE_PATH)\n if os.path.exists(CONFIG_FILE_PATH) and os.path.isfile(CONFIG_FILE_PATH):\n config.read(CONFIG_FILE_PATH)\n return config\n else:\n logging.error(\"Config file not found, create %s\" % CONFIG_FILE_PATH)\n exit(1)\n\n\ndef get_config(section, key, default=None, config: configparser.ConfigParser = None):\n if not config:\n config = get_config_parser()\n if default is None:\n return config.get(section=section, option=key)\n try:\n return config.get(section=section, option=key)\n except configparser.NoOptionError:\n return default\n\n\ndef load_experiments(config: configparser.ConfigParser = None) -> dict:\n filename = get_config(\"sdn\", \"experiments-storage-filename\", default=\"/etc/softfire/sdn-proxy-experiments.json\",\n config=config)\n try:\n if os.path.exists(filename) and os.path.isfile(filename):\n with open(filename, 'r') as f:\n return json.loads(f.read())\n else:\n return dict()\n except ValueError:\n return dict()\n\n\ndef store_experiments(experiments: dict, config: configparser.ConfigParser = None):\n if experiments:\n filename = get_config(\"sdn\", \"experiments-storage-filename\", default=\"/etc/softfire/sdn-proxy-experiments.json\",\n config=config)\n with open(filename, 'w') as f:\n f.write(json.dumps(experiments))\n\n\ndef make_jsonrpc_error(responseid, code, message, version=\"2.0\"):\n return dict(id=responseid, error=dict(message=message, code=code), jsonrpc=version)\n\n\ndef make_jsonrpc_response(responseid, result, version=\"2.0\"):\n return dict(id=responseid, jsonrpc=version, result=result)\n","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"318101113","text":"# coding:utf-8\n\nimport sys\nsys.path.append(\"/class/core\")\nimport public\n\nfrom flask import Blueprint, render_template\n\ncrontab = Blueprint('crontab', __name__, template_folder='templates')\n\n\n@crontab.route(\"/\")\ndef index():\n return render_template('default/crontab.html')\n\n\n@crontab.route(\"/list\", methods=['GET', 'POST'])\ndef list():\n data = []\n return public.getJson({})\n","sub_path":"route/crontab.py","file_name":"crontab.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"167360039","text":"#!/usr/bin/env python\nimport pyeapi\n\neapi_conn = pyeapi.connect_to(\"pynet-sw2\")\n\ninterfaces = eapi_conn.enable(\"show interfaces\")\ninterfaces = interfaces[0]['result']['interfaces']\n\ndata = {}\nfor interface, value in interfaces.items():\n interface_counters = value.get('interfaceCounters', {})\n data[interface] = (interface_counters.get('inOctets', '0'), interface_counters.get('outOctets', '0'))\n\nprint(\"{:20} {:<20} {:<20}\".format('Interface:', 'inOctets', 'outOctets'))\nfor interface, octets in sorted(data.items()):\n print(\"{:20} {:<20} {:<20}\".format(interface, octets[0], octets[1]))\n","sub_path":"class5/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"46233495","text":"import numpy as np\r\nimport tensorflow as tf\r\nimport math\r\nimport time\r\nimport scipy.io\r\nfrom nnfunctions import *\r\nimport matplotlib.pyplot as plt\r\n\r\ndata = np.loadtxt(open(\"HAR_privatized_d1_5_full.csv\", \"rb\"), dtype= float, delimiter=\",\", skiprows=1)\r\n\r\n\r\n\r\n# Y1 action, Y2 identity\r\nN_train = 8000\r\nN_validate = 1000\r\nX_data_train = data[0:N_train, 0:561]\r\n\r\nIden_data_train = data[0:N_train, 561]\r\n\r\nAct_data_train = data[0:N_train, 562]\r\n\r\n\r\n\r\nAct_data_train = np.reshape(Act_data_train, (Act_data_train.shape[0], 1))\r\nIden_data_train = np.reshape(Iden_data_train, (Iden_data_train.shape[0], 1))\r\n\r\nXtrain = X_data_train[0: N_train, :]\r\n\r\nY1train = Act_data_train[0: N_train, :]\r\nY2train = Iden_data_train[0: N_train, :]\r\n\r\nXvalidate = X_data_train[N_train - N_validate: N_train, :]\r\nY1validate = Act_data_train[N_train - N_validate: N_train, :]\r\nY2validate = Iden_data_train[N_train - N_validate: N_train, :]\r\n\r\n\r\n\r\n\r\nX_data_test = data[N_train:, 0:561]\r\nN_test = X_data_test.shape[0]\r\nIden_data_test = data[N_train:, 561]\r\n\r\nAct_data_test = data[N_train:, 562]\r\n\r\n\r\n\r\nAct_data_test = np.reshape(Act_data_test, (Act_data_test.shape[0], 1))\r\nIden_data_test = np.reshape(Iden_data_test, (Iden_data_test.shape[0], 1))\r\nXtest = X_data_test\r\nY1test = Act_data_test\r\nY2test = Iden_data_test\r\n#print(np.max(Xtest))\r\n\r\nepoch = 450\r\nmbsize = 128\r\nnoise_seed_dim =100\r\nnum_iter = int(epoch * (N_train)/mbsize)\r\nprint(num_iter)\r\n\r\n\r\nN_act = 6\r\nN_iden = 30\r\n\r\nN_feature = X_data_train.shape[1]\r\nN_all = N_train + N_test\r\n\r\n\r\na_loss = []\r\np_loss = []\r\np_dist = []\r\n\r\n\r\nlearning_ratea = 0.00005\r\nnoise_seed_dim =100\r\n\r\nX = tf.placeholder(dtype = tf.float32, shape = [None, 561])\r\nY_act = tf.placeholder(dtype = tf.float32, shape = [None, N_act])\r\nY_iden = tf.placeholder(dtype = tf.float32, shape = [None, N_iden])\r\nY_onehot = tf.placeholder(tf.float32, shape=[None, N_iden])\r\nZ = tf.placeholder(tf.float32, shape=[None, noise_seed_dim], name='Z')\r\nkeep_prob = tf.placeholder(tf.float32)\r\npenalty_rate = tf.placeholder(tf.float32)\r\n\r\n\r\n\r\ndef minibatch(X, action, identity,mbsize, N_sample):\r\n idx = np.arange(N_sample)\r\n np.random.shuffle(idx)\r\n idx = idx[0:mbsize]\r\n X_mb = [X[i, :] for i in idx]\r\n Y_act = [action[i, :] for i in idx]\r\n Y_iden = [identity[i, :] for i in idx]\r\n return np.asarray(X_mb), np.asarray(Y_act), np.asarray(Y_iden)\r\n\r\n\r\n\r\ndef privatizernn(data, noise_seed, structure=[512, 512, 512], alpha=0.1, keep_prob = 1.0):\r\n with tf.variable_scope(\"privatizer\"):\r\n input = tf.concat(values=[data, noise_seed], axis=1)\r\n fc1 = fc_bn_leakyRelu(input, structure[0], alpha=alpha, keep_prob = keep_prob)\r\n\r\n fc2 = fc_bn_leakyRelu(fc1, structure[1], alpha=alpha, keep_prob = keep_prob)\r\n fc3 = fc_bn_leakyRelu(fc2, structure[2], alpha=alpha, keep_prob = keep_prob)\r\n x_hat = tf.layers.dense(fc3, data.shape[1], activation=None)\r\n return x_hat\r\n\r\n# def adversarynn(data, num_out, structure = [512, 512, 512], alpha = 0.1, keep_prob = 1.0):\r\n# with tf.variable_scope(\"adversary\"):\r\n# fc1_a = fc_bn_leakyRelu(data, structure[0], alpha = alpha, keep_prob = keep_prob)\r\n# fc2_a = fc_bn_leakyRelu(fc1_a, structure[1], alpha=alpha, keep_prob = keep_prob)\r\n# fc3_a = fc_bn_leakyRelu(fc2_a, structure[2], alpha=alpha, keep_prob = keep_prob)\r\n# h_hat = tf.layers.dense(fc3_a, num_out, activation=None)\r\n# #y_hat = tf.nn.softmax(h_hat)\r\n# return h_hat\r\n\r\ndef adversarynn(data, num_out, structure = [512, 512, 256, 128], alpha = 0.1, keep_prob = 1.0):\r\n with tf.variable_scope(\"adversary\"):\r\n fc1_a = fc_bn_leakyRelu(data, structure[0], alpha = alpha, keep_prob = keep_prob)\r\n fc2_a = fc_bn_leakyRelu(fc1_a, structure[1], alpha=alpha, keep_prob = keep_prob)\r\n fc3_a = fc_bn_leakyRelu(fc2_a, structure[2], alpha=alpha, keep_prob = keep_prob)\r\n fc4_a = fc_bn_leakyRelu(fc3_a, structure[3], alpha=alpha, keep_prob = keep_prob)\r\n h_hat = tf.layers.dense(fc4_a, num_out, activation=None)\r\n #y_hat = tf.nn.softmax(h_hat)\r\n return h_hat\r\n\r\ndef test_ff_nn(Xtest, Ytest, Size):\r\n #print('Xtest:', Xtest)\r\n Ytest_onehot = tf.squeeze(tf.one_hot(indices=Ytest, depth=N_iden), [1])\r\n Ytest_onehot = Ytest_onehot.eval()\r\n Ztest = np.random.normal(0.0, 1.0, [Size, noise_seed_dim])\r\n ytest = sess.run(y, feed_dict={X: Xtest, Y_onehot: Ytest_onehot, Z: Ztest, keep_prob: 1.0})\r\n\r\n #xhattest = sess.run(X_hat, feed_dict={X: Xtest, Y_onehot: Ytest_onehot, Z: Ztest, keep_prob: 1.0})\r\n ydec = np.argmax(ytest, axis=1)\r\n ytrue = np.reshape(Ytest, [Size])\r\n err_rate = np.mean(ytrue != ydec)\r\n print(ydec)\r\n print(ytrue)\r\n #print(err_rate)\r\n accuracy = 1 - err_rate\r\n #dist = sess.run(distortion, feed_dict={X: batchX, Z: batchZ})\r\n #print('Accuracy: %.3f, Distortion: %.3f' % (accuracy, dist))\r\n return accuracy#, xhattest, dist, ydec\r\n\r\ny = adversarynn(X, N_iden, keep_prob = keep_prob)\r\n\r\n\r\nadversary_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y_onehot, logits=y))\r\n\r\n\r\nt_vars = tf.trainable_variables()\r\na_vars = [var for var in t_vars if var.name.startswith(\"adversary\")]\r\np_vars = [var for var in t_vars if var.name.startswith(\"privatizer\")]\r\n\r\na_train = tf.train.AdamOptimizer(learning_ratea).minimize(adversary_loss, var_list=a_vars)\r\n\r\n# Add ops to save and restore all the variables.\r\nsaver = tf.train.Saver()\r\n\r\nwith tf.Session() as sess:\r\n\r\n sess.run(tf.global_variables_initializer())\r\n for iter in range (num_iter):\r\n start_time = time.time()\r\n batchX, batchY1, batchY2 = minibatch(Xtrain, Y1train, Y2train, mbsize, N_train)\r\n batchY_onehot = tf.one_hot(indices = batchY2, depth = N_iden)\r\n batchY_onehot = tf.squeeze(batchY_onehot, [1])\r\n batchZ = np.random.normal(0.0, 1.0, [mbsize, noise_seed_dim])\r\n\r\n #Training adversary\r\n _, A_loss_curr = sess.run([a_train, adversary_loss],\r\n feed_dict={X: batchX, Y_onehot: batchY_onehot.eval(), Z: batchZ, keep_prob: 0.5})\r\n\r\n\r\n if(iter % 100 == 0):\r\n #print('iter:', iter)\r\n #print('prate:', prate)\r\n #print('Adversary loss: ', A_loss_curr)\r\n #print('Privatizer loss:', P_loss_curr)\r\n #print('Distortion:', p_distortion)\r\n #print('privatizer_dist:', sess.run(privatizer_dist, feed_dict={X: batchX, Z: batchZ, penalty_rate: prate}))\r\n duration = time.time() - start_time\r\n print('loss: iter=%d, loss=%f, time=%.3f' % (iter, A_loss_curr, duration))\r\n a_loss.append(A_loss_curr)\r\n #p_loss.append(P_loss_curr)\r\n #p_dist.append(p_distortion)\r\n\r\n acc_final= test_ff_nn(Xtest, Y2test, N_test)\r\n # Save the variables to disk.\r\n save_path = saver.save(sess, \"/tmp/HAR/Iden/model_privatized1_5.ckpt\")\r\n print(\"Model saved in path: %s\" % save_path)\r\n print(acc_final)\r\n result = {}\r\n result['acc_final'] = acc_final\r\n\r\n a_loss_arr = np.array(a_loss)\r\n scipy.io.savemat('HAR_1_5iden', result)\r\n\r\n plt.figure(1)\r\n\r\n plt.plot(a_loss_arr, label='Adversary training loss')\r\n #plt.plot(p_loss_arr, label='Privatizer training loss')\r\n\r\n plt.title(\"Cross Entropy\")\r\n plt.legend()\r\n plt.show()\r\n","sub_path":"HAR/GAPHARIden.py","file_name":"GAPHARIden.py","file_ext":"py","file_size_in_byte":7305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"608081666","text":"import pygame\nfrom pygame.locals import *\nimport sys\n\nimport bullet\n\n\nclass Player(pygame.sprite.Sprite):\n \"\"\"自機\"\"\" \n speed = 8\n display = Rect(0, 0, 1280, 720)\n\n def __init__(self, _display):\n # self.display = _display\n pygame.sprite.Sprite.__init__(self, self.containers)\n self.rect = self.image.get_rect()\n self.rect.bottom = self.display.bottom\n\n def update(self):\n pressed_keys = pygame.key.get_pressed()\n\n if pressed_keys[K_LEFT]:\n self.rect.move_ip(-self.speed, 0)\n if pressed_keys[K_RIGHT]:\n self.rect.move_ip(self.speed, 0)\n if pressed_keys[K_UP]:\n self.rect.move_ip(0, -self.speed)\n if pressed_keys[K_DOWN]:\n self.rect.move_ip(0, self.speed)\n\n self.rect.clamp_ip(self.display)\n\n if pressed_keys[K_SPACE]:\n bullet.Bullet((self.rect.right, self.rect.centery), self.display)\n\n def display_set(self, _display):\n self.display = _display\n","sub_path":"Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"631044764","text":"\ndef main(j, args, params, tags, tasklet):\n doc = args.doc\n\n out = list()\n\n dockerhosts = j.apps.cockpit.atyourservice.listServices(role='node')\n\n if not any(dockerhosts.values()):\n out = 'no Dockerhosts running on this enviroment'\n params.result = (out, args.doc)\n return params\n\n out.append('||Instance||STATUS||AYS Repo||')\n for ayspath, dockerhostinstances in dockerhosts.items():\n for dockerhost in dockerhostinstances.values():\n out.append('|[%s|/cockpit/dockerhost?ayspath=%s&dockerhost=%s]|N/A|%s|' % (dockerhost.instance,\n ayspath, dockerhost.instance,\n ayspath))\n out = '\\n'.join(out)\n params.result = (out, doc)\n\n return params\n","sub_path":"apps/Cockpit/.macros/wiki/dockerhosts/3_dockerhosts.py","file_name":"3_dockerhosts.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"646836916","text":"from pathlib import Path, PurePath\nfrom ..SMARTAttrsNames import SMARTAttrsNames\nfrom .. import database\nfrom plumbum import cli\nfrom ..database import TableSpecGen, tablesSchemas, DrivesStatsTableSpec, tablesNames, DrivesParamsTableSpec\nfrom collections import defaultdict\nfrom pathlib import Path\n\nclass CPPSchemaGen(cli.Application):\n\t\"\"\"Generates the files needed for native importer.\"\"\"\n\t\n\t\n\tdef main(self, generatedHeadersDir=\"./faster_importer/src/generated\"):\n\t\tgeneratedHeadersDir=Path(generatedHeadersDir)\n\t\t\n\t\tfor hN, hT in self.generateHeaders().items():\n\t\t\t(generatedHeadersDir / hN).write_text(hT)\n\t\t\n\t\n\tdef generateHeaders(self):\n\t\tspecsSmart = super(tablesSchemas[\"smart\"].__class__.__mro__[1], tablesSchemas[\"smart\"]).genSpecs()\n\t\tspecsTemp = super(tablesSchemas[\"csvImportTemp\"].__class__.__mro__[1], tablesSchemas[\"csvImportTemp\"]).genSpecs()\n\t\tfrom pprint import pprint\n\t\t\n\t\tres=defaultdict(list)\n\t\t\n\t\trecordsQueryVarName=\"recordsQuery\"\n\t\trowVarName=\"row\"\n\t\trowVarName1=\"r\"\n\t\t\n\t\tres[\"tableColumnsNamesVars.h\"]=defaultdict(list);\n\t\t\n\t\ttypesRemap={\n\t\t\t\"INTEGER (1)\": \"uint8_t\",\n\t\t\t\"INTEGER (2)\": \"uint16_t\",\n\t\t\t\"INTEGER (4)\": \"uint32_t\",\n\t\t\t\"INTEGER (8)\": \"int64_t\", # FUCK,SQLite doesn't have unsigned type\n\t\t};\n\t\t\n\t\tfor line in zip(specsSmart, specsTemp):\n\t\t\tfieldsPairsInLine=list(zip(*line))\n\t\t\tfor friendly, raw in fieldsPairsInLine:\n\t\t\t\t#res+=[str(friendly), ]\n\t\t\t\tfriendlyNoDashes=friendly[0].replace(\"-\", \"_\")\n\t\t\t\tres[\"tableColumnsNamesStrings.h\"].append('\"'+raw[0]+'\"')\n\t\t\t\tres[\"tableColumnsNamesVars1.h\"].append('{1}.{0}'.format(friendlyNoDashes, rowVarName))\n\t\t\t\tres[\"tableColumnsNames.h\"].append(\"`\"+friendly[0]+\"`\")\n\t\t\t\tres[\"tableColumnsNamesVars.h\"][friendly[1]].append(friendlyNoDashes);\n\t\t\t\tres[\"tableColumnsPlaceholders.h\"].append(\":{0}\".format(friendlyNoDashes))\n\t\t\t\tres[\"variablesIndices.h\"].append('auto {0}Idx=sqlite3_bind_parameter_index({1}.mStmtPtr, \":{0}\");'.format(friendlyNoDashes, recordsQueryVarName))\n\t\t\t\tres[\"variablesBind.h\"].append('{1}.bind({0}Idx, {2}.{0});'.format(friendlyNoDashes, recordsQueryVarName, rowVarName1))\n\t\t\n\t\tres[\"countOfSMARTColumns.h\"] = str(len(res[\"tableColumnsNamesStrings.h\"]));\n\t\tres[\"tableColumnsNamesStrings.h\"]=\", \".join(res[\"tableColumnsNamesStrings.h\"]);\n\t\tres[\"tableColumnsNamesVars1.h\"]=\", \".join(res[\"tableColumnsNamesVars1.h\"]);\n\t\tres[\"tableColumnsPlaceholders.h\"]='R\"smartAttrsNames(\\n'+\", \".join(res[\"tableColumnsPlaceholders.h\"])+'\\n)smartAttrsNames\"';\n\t\tres[\"tableColumnsNames.h\"]='R\"smartAttrsNames(\\n'+\", \".join(res[\"tableColumnsNames.h\"])+'\\n)smartAttrsNames\"';\n\t\tres[\"variablesIndices.h\"]=\"\\n\".join(res[\"variablesIndices.h\"]);\n\t\tres[\"variablesBind.h\"]=\"\\n\".join(res[\"variablesBind.h\"]);\n\t\t\n\t\tres[\"tableColumnsNamesVars.h\"]=\"\\n\".join((typesRemap[k]+\" \"+\", \".join(v)+\";\") for k, v in res[\"tableColumnsNamesVars.h\"].items());\n\t\t\n\t\treturn res\n\t\t\n\t\t#print(list(TableSpecGen.genTableColumnsSpecsLines(specsTemp, \"t2\", specsSmart)))\n\n\nif __name__ == \"__main__\":\n\tCPPSchemaGen.run()","sub_path":"backblaze_analytics/tools/nativeImporterCodeGen.py","file_name":"nativeImporterCodeGen.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"371854902","text":"# Copyright (c) The Diem Core Contributors\n# SPDX-License-Identifier: Apache-2.0\n\nfrom dataclasses import dataclass, asdict\nfrom typing import Dict, List, Any, Tuple\nfrom .app import App\nfrom .json_input import JsonInput\nfrom .... import offchain\nimport falcon, json, traceback, logging, pathlib\n\n\nbase_path: str = str(pathlib.Path(__file__).resolve().parent.joinpath(\"static\"))\n\n\n@dataclass\nclass LoggerMiddleware:\n logger: logging.Logger\n\n def process_request(self, req, resp): # pyre-ignore\n self.logger.debug(\"%s %s\", req.method, req.relative_uri)\n\n def process_response(self, req, resp, *args, **kwargs): # pyre-ignore\n tc = req.get_header(\"X-Test-Case\")\n # test case format is ::, only log the test method name\n test_case = \"[%s] \" % tc.split(\"::\")[-1] if tc else \"\"\n self.logger.info(\"%s%s %s - %s\", test_case, req.method, req.relative_uri, resp.status)\n\n\ndef rest_handler(fn: Any): # pyre-ignore\n def wrapper(self, req, resp, **kwargs): # pyre-ignore\n try:\n if req.content_length:\n try:\n data = json.load(req.stream)\n except json.decoder.JSONDecodeError:\n raise ValueError(\"request body is invalid JSON\")\n self.app.logger.debug(\"request body: %s\", data)\n else:\n data = {}\n status, body = fn(self, input=JsonInput(data), **kwargs)\n resp.status = status\n resp.body = json.dumps(body)\n except ValueError as e:\n resp.status = falcon.HTTP_400\n resp.body = json.dumps({\"error\": str(e), \"stacktrace\": traceback.format_exc()})\n\n return wrapper\n\n\n@dataclass\nclass Endpoints:\n app: App\n\n @rest_handler\n def on_post_accounts(self, input: JsonInput) -> Tuple[str, Dict[str, str]]:\n return (falcon.HTTP_201, self.app.create_account(input))\n\n @rest_handler\n def on_post_payments(self, account_id: str, input: JsonInput) -> Tuple[str, Dict[str, Any]]:\n return (falcon.HTTP_202, self.app.create_account_payment(account_id, input))\n\n @rest_handler\n def on_post_account_identifiers(self, account_id: str, input: JsonInput) -> Tuple[str, Dict[str, Any]]:\n return (falcon.HTTP_200, self.app.create_account_identifier(account_id, input))\n\n @rest_handler\n def on_get_balances(self, account_id: str, input: JsonInput) -> Tuple[str, Dict[str, int]]:\n return (falcon.HTTP_200, self.app.get_account_balances(account_id))\n\n @rest_handler\n def on_get_events(self, account_id: str, input: JsonInput) -> Tuple[str, List[Dict[str, Any]]]:\n return (falcon.HTTP_200, [asdict(e) for e in self.app.get_account_events(account_id)])\n\n @rest_handler\n def on_get_kyc_sample(self, input: JsonInput) -> Tuple[str, Dict[str, str]]:\n return (falcon.HTTP_200, asdict(self.app.kyc_sample))\n\n def on_post_offchain(self, req: falcon.Request, resp: falcon.Response) -> None:\n request_id = req.get_header(offchain.X_REQUEST_ID)\n resp.set_header(offchain.X_REQUEST_ID, request_id)\n request_sender_address = req.get_header(offchain.X_REQUEST_SENDER_ADDRESS)\n input_data = req.stream.read()\n\n resp_obj = self.app.offchain_api(request_id, request_sender_address, input_data)\n if resp_obj.error is not None:\n resp.status = falcon.HTTP_400\n resp.body = self.app.jws_serialize(resp_obj)\n\n def on_get_index(self, req: falcon.Request, resp: falcon.Response) -> None:\n raise falcon.HTTPMovedPermanently(\"/index.html\")\n\n\ndef falcon_api(app: App, disable_events_api: bool = False) -> falcon.API:\n endpoints = Endpoints(app=app)\n api = falcon.API(middleware=[LoggerMiddleware(logger=app.logger)])\n api.add_static_route(\"/\", base_path)\n api.add_route(\"/\", endpoints, suffix=\"index\")\n api.add_route(\"/accounts\", endpoints, suffix=\"accounts\")\n for res in [\"balances\", \"payments\", \"account_identifiers\", \"events\"]:\n if res == \"events\" and disable_events_api:\n continue\n api.add_route(\"/accounts/{account_id}/%s\" % res, endpoints, suffix=res)\n api.add_route(\"/kyc_sample\", endpoints, suffix=\"kyc_sample\")\n api.add_route(\"/v2/command\", endpoints, suffix=\"offchain\")\n app.start_bg_worker_thread()\n return api\n","sub_path":"src/diem/testing/miniwallet/app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"497822751","text":"import RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(4, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(9, GPIO.OUT)\n\nstatus = False\n\ndef onSwitch(channel):\n global status\n if status: \n status = False\n GPIO.output(9, GPIO.LOW)\n else: \n status = True\n GPIO.output(9, GPIO.HIGH)\n \n print(\"Channel %s went high\" % channel)\n print(\"Current status is\", status)\n\n\n\nGPIO.add_event_detect(4, GPIO.RISING, callback=onSwitch, bouncetime=200)\n\ntry:\n while True:\n time.sleep(1)\nfinally:\n GPIO.cleanup()","sub_path":"prototypes/improvisedSwitchWithLED.py","file_name":"improvisedSwitchWithLED.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"54528427","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import timedelta\nimport random\nfrom odoo import models, fields, api\n\nclass Course(models.Model):\n _name = 'busdemoapp.bus'\n _description = \"Bus_Demo_App Bus\"\n\n\n name = fields.Char(string=\"Title\", required=True)\n image = fields.Binary()\n\n\n @api.model\n def notify_student(self, notification):\n notifications = []\n print(notification)\n students = self.env['busdemoapp.students'].search([])\n for user in students:\n notif = notification\n notifications.append([(self._cr.dbname, 'image.data', user.partner_id.id), notif])\n if len(notifications) > 0:\n self.env['bus.bus'].sendmany(notifications)\n\n\nclass Student(models.Model):\n _name = 'busdemoapp.students'\n _description = \"Bus_Demo_App Student\"\n\n partner_id = fields.Many2one('res.partner', string=\"Student List\")\n\n @api.model\n def add_member(self, partner_id):\n print(partner_id)\n student = self.search([('partner_id','=',partner_id)])\n print(student)\n if not student:\n student = self.create({\n 'partner_id': partner_id\n })\n return student\n","sub_path":"Bus_Demo_App/models/branch.py","file_name":"branch.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"42659919","text":"#!/usr/bin/env python\n\"\"\" collections of utility functions \"\"\"\n\nimport sys\nimport os\nimport csv\nimport numpy as np\nimport scipy.io.wavfile\nimport matplotlib.pyplot as plt\nfrom sklearn.learning_curve import learning_curve\n\nclass Instance(object):\n \"\"\"\n Instance class represents set of raw data collected per data instance\n \"\"\"\n def __init__(self, dir):\n self.audio = self._load_audio(dir)\n self.touch = self._load_touch_data(dir)\n self.info = self._load_instance_info(dir)\n\n def _load_audio(self, dir):\n \"\"\" load audio data\n param dir: a path to an instance directory\n return: audio data\n \"\"\"\n rate, wav = scipy.io.wavfile.read(os.path.join(dir, \"audio.wav\"))\n return wav\n \n def _load_touch_data(self, dir):\n \"\"\" load touch data\n param dir: a path to an instance directory\n return : a dictionary contains touch data\n \"\"\"\n with open(os.path.join(dir, \"touch.csv\"), \"rU\") as f:\n reader = csv.DictReader(f)\n for touch in reader:\n for key in touch.keys():\n touch[key] = float(touch[key])\n break\n return touch\n \n def _load_instance_info(self, dir):\n \"\"\" load instance info from a directory path\n param dir: a path to an instance directory\n return: a dictionary contains basic instance information\n \"\"\"\n info = {}\n user_dirnames = os.path.basename(os.path.dirname(dir)).split(\"-\")\n info[\"surface\"] = user_dirnames[0]\n info[\"user\"] = user_dirnames[1]\n instance_dirnames = os.path.basename(dir).split(\"-\")\n info[\"timestamp\"] = instance_dirnames[0]\n # set None to classlabel if it's test data\n info[\"classlabel\"] = instance_dirnames[1] if len(instance_dirnames) == 2 else None\n return info\n\n\ndef load_instances(dir):\n \"\"\" function for loading raw data instances\n param dir: a path to a data directory (i.e. task_data/train or task_data/test)\n return: a list of data instance objects\n \"\"\"\n instances = []\n for root, dirs, files in os.walk(os.path.join(dir)):\n for filename in files:\n if filename == \"audio.wav\":\n instances.append(Instance(root))\n return instances\n\n\ndef load_labels(instances):\n \"\"\" load class labels\n param instances: a list of data instance objects\n return: class labels mapped to a number (0=pad, 1=knuckle)\n \"\"\"\n y = np.array([{\"pad\": 0, \"knuckle\": 1}[instance.info[\"classlabel\"]] for instance in instances], dtype=int)\n return y\n\n\ndef load_timestamps(instances):\n \"\"\" load timestamps\n param instances: a list of data instance objects\n \"\"\"\n timestamps = [instance.info[\"timestamp\"] for instance in instances]\n return timestamps\n\n\ndef convert_to_classlabels(y):\n \"\"\" convert to classlabels\n param y: mapped class labels\n return: class labels\n \"\"\"\n classlabels = [[\"pad\", \"knuckle\"][y[i]] for i in range(len(y))]\n return classlabels\n\n\ndef write_results(timestamps, classlabels, output):\n \"\"\" write classification results to an output file\n param timestamps: a list of timestamps\n param classlabels: a list of predicted class labels\n return : None\n \"\"\"\n if len(timestamps) != len(classlabels):\n raise Exception(\"The number of timestamps and classlabels doesn't match.\")\n with open(output, \"wb\") as f:\n f.write(\"timestamp,label\\n\")\n for timestamp, classlabel in zip(timestamps, classlabels):\n f.write(timestamp + \",\" + classlabel + \"\\n\")\n\ndef plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):\n \"\"\"\n Generate a simple plot of the test and training learning curve.\n\n Parameters\n ----------\n estimator : object type that implements the \"fit\" and \"predict\" methods\n An object of that type which is cloned for each validation.\n\n title : string\n Title for the chart.\n\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape (n_samples) or (n_samples, n_features), optional\n Target relative to X for classification or regression;\n None for unsupervised learning.\n\n ylim : tuple, shape (ymin, ymax), optional\n Defines minimum and maximum yvalues plotted.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - An object to be used as a cross-validation generator.\n - An iterable yielding train/test splits.\n\n For integer/None inputs, if ``y`` is binary or multiclass,\n :class:`StratifiedKFold` used. If the estimator is not a classifier\n or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.\n\n Refer :ref:`User Guide ` for the various\n cross-validators that can be used here.\n\n n_jobs : integer, optional\n Number of jobs to run in parallel (default 1).\n \"\"\"\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt\n\ndef main(argv):\n raise Exception(\"This script isn't meant to be run.\")\n\n\nif __name__ == '__main__':\n exit(main(sys.argv[1:]))\n","sub_path":"src/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":6594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"387540633","text":"\"\"\"\n Write a function that takes a string and does the same thing as the strip()\n string method. If no other arguments are passed other than the string to\n strip, then whitespace characters will be removed from the beginning and\n end of the string. Otherwise, the characters specified in the second argu-\n ment to the function will be removed from the string.\n\"\"\"\nimport re\n\nsample_str = \" This is a sample String. \"\nregex = re.compile(r'^\\s+|\\s+$')\n\n\ndef regex_strip(s, char=None):\n if not char:\n s = regex.sub(\"\", s) # replacing space with \"\" in string s\n else:\n strip_char = re.compile(re.escape(char))\n s = re.sub(strip_char, \"\", s)\n return s\n\n\nprint(sample_str.strip())\nprint(regex_strip(sample_str))\nprint(sample_str.strip() == regex_strip(sample_str))\nprint(regex_strip(sample_str, 'is'))\n","sub_path":"regex_strip_method.py","file_name":"regex_strip_method.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"604307482","text":"from gusto import *\nfrom firedrake import (SpatialCoordinate, PeriodicRectangleMesh,\n ExtrudedMesh, Function)\n\n\ndef setup_gw(dirname):\n nlayers = 10 # horizontal layers\n columns = 30 # number of columns\n L = 1.e5\n m = PeriodicRectangleMesh(columns, 1, L, 1.e4, quadrilateral=True)\n dt = 6.0\n\n # build volume mesh\n H = 1.0e4 # Height position of the model top\n mesh = ExtrudedMesh(m, layers=nlayers, layer_height=H/nlayers)\n\n fieldlist = ['u', 'p', 'b']\n timestepping = TimesteppingParameters(dt=dt)\n output = OutputParameters(dirname=dirname+\"/gw_incompressible\", dumplist=['u'], dumpfreq=5)\n parameters = CompressibleParameters()\n\n state = State(mesh, vertical_degree=1, horizontal_degree=1,\n family=\"RTCF\",\n timestepping=timestepping,\n output=output,\n parameters=parameters,\n fieldlist=fieldlist)\n\n # Initial conditions\n u0 = state.fields(\"u\")\n p0 = state.fields(\"p\")\n b0 = state.fields(\"b\")\n\n # z.grad(bref) = N**2\n x, y, z = SpatialCoordinate(mesh)\n N = parameters.N\n bref = z*(N**2)\n\n b_b = Function(b0.function_space()).interpolate(bref)\n b0.interpolate(b_b)\n incompressible_hydrostatic_balance(state, b0, p0)\n state.initialise([('u', u0),\n ('p', p0),\n ('b', b0)])\n\n # Set up forcing\n forcing = IncompressibleForcing(state)\n\n return state, forcing\n\n\ndef run_gw_incompressible(dirname):\n\n state, forcing = setup_gw(dirname)\n dt = state.timestepping.dt\n forcing.apply(dt, state.xn, state.xn, state.xn)\n u = state.xn.split()[0]\n w = Function(state.spaces(\"DG\")).interpolate(u[2])\n return w\n\n\ndef test_gw(tmpdir):\n\n dirname = str(tmpdir)\n w = run_gw_incompressible(dirname)\n assert max(abs(w.dat.data.min()), w.dat.data.max()) < 3e-8\n","sub_path":"tests/test_gw_incompressible.py","file_name":"test_gw_incompressible.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"363966727","text":"#!/usr/bin/env python3\n\nimport os\nimport re\nimport socket\nfrom struct import pack, unpack\nfrom ctypes import *\nfrom enum import Enum, auto, unique\nfrom . import DebugAdapter\n\nclass DEBUG_STATUS(Enum):\n\tNO_CHANGE = 0\n\tGO = 1\n\tGO_HANDLED = 2\n\tGO_NOT_HANDLED = 3\n\tSTEP_OVER = 4\n\tSTEP_INTO = 5\n\tBREAK = 6\n\tNO_DEBUGGEE = 7\n\tSTEP_BRANCH = 8\n\tIGNORE_EVENT = 9\n\tRESTART_REQUESTED = 10\n\tREVERSE_GO = 11\n\tREVERSE_STEP_BRANCH = 12\n\tREVERSE_STEP_OVER = 13\n\tREVERSE_STEP_INTO = 14\n\tOUT_OF_SYNC = 15\n\tWAIT_INPUT = 16\n\tTIMEOUT = 17\n\nclass WINNT_STATUS(Enum):\n\tSTATUS_DATATYPE_MISALIGNMENT = 0x80000002\n\tSTATUS_BREAKPOINT = 0x80000003\n\tSTATUS_SINGLE_STEP = 0x80000004\n\tSTATUS_ACCESS_VIOLATION = 0xC0000005\n\tSTATUS_IN_PAGE_ERROR = 0xC0000006\n\tSTATUS_NO_MEMORY = 0xC0000017\n\tSTATUS_ILLEGAL_INSTRUCTION = 0xC000001D\n\tSTATUS_NONCONTINUABLE_EXCEPTION = 0xC0000025\n\tSTATUS_INVALID_DISPOSITION = 0xC0000026\n\tSTATUS_ARRAY_BOUNDS_EXCEEDED = 0xC000008C\n\tSTATUS_FLOAT_DENORMAL_OPERAND = 0xC000008D\n\tSTATUS_FLOAT_DIVIDE_BY_ZERO = 0xC000008E\n\tSTATUS_FLOAT_INEXACT_RESULT = 0xC000008F\n\tSTATUS_FLOAT_INVALID_OPERATION = 0xC0000090\n\tSTATUS_FLOAT_OVERFLOW = 0xC0000091\n\tSTATUS_FLOAT_STACK_CHECK = 0xC0000092\n\tSTATUS_FLOAT_UNDERFLOW = 0xC0000093\n\tSTATUS_INTEGER_DIVIDE_BY_ZERO = 0xC0000094\n\tSTATUS_INTEGER_OVERFLOW = 0xC0000095\n\tSTATUS_PRIVILEGED_INSTRUCTION = 0xC0000096\n\tSTATUS_STACK_OVERFLOW = 0xC00000FD\n\tSTATUS_CONTROL_C_EXIT = 0xC000013A\n\n# dll uses return values to indicate success/failure while we use exceptions\nERROR_UNSPECIFIED = -1\n\nclass DebugAdapterDbgeng(DebugAdapter.DebugAdapter):\n\tdef __init__(self, **kwargs):\n\t\tDebugAdapter.DebugAdapter.__init__(self, **kwargs)\n\n\t\tfpath = os.path.abspath(__file__)\n\t\tfpath = os.path.dirname(fpath)\n\t\tfpath = os.path.join(fpath, 'dbgengadapt\\dbgengadapt.dll')\n\t\tself.dll = CDLL(fpath)\n\t\tif not self.dll:\n\t\t\traise DebugAdapter.GeneralError(\"loading %s\" % fpath)\n\n\t\t# keep mapping between addresses (DbgAdapter namespace) and breakpoint\n\t\t# id's (dbgeng namespace)\n\t\tself.bp_addr_to_id = {}\n\n\t\t#\n\t\tself.stop_reason_fallback = DebugAdapter.STOP_REASON.UNKNOWN\n\n\tdef __del__(self):\n\t\t#print('destructor')\n\t\tpass\n\n\tdef get_last_breakpoint_address(self):\n\t\taddr = c_ulonglong()\n\t\tif self.dll.get_last_breakpoint_address(byref(addr)) != 0:\n\t\t\traise DebugAdapter.GeneralError(\"retrieving last breakpoint address\")\n\t\treturn addr.value\n\n\tdef get_last_exception_info(self):\n\t\t# TODO: handle 32 bit case\n\t\t#typedef struct _EXCEPTION_RECORD64 {\n\t\t# DWORD ExceptionCode;\n\t\t# DWORD ExceptionFlags;\n\t\t# DWORD64 ExceptionRecord;\n\t\t# DWORD64 ExceptionAddress;\n\t\t# DWORD NumberParameters;\n\t\t# DWORD __unusedAlignment;\n\t\t# DWORD64 ExceptionInformation[EXCEPTION_MAXIMUM_PARAMETERS];\n\t\t#} EXCEPTION_RECORD64, *PEXCEPTION_RECORD64;\n\t\trecord = create_string_buffer(4+4+8+8+4+4+8*15)\n\t\tself.dll.get_exception_record64(record)\n\t\t(ExceptionCode, ExceptionFlags, ExceptionRecord, ExceptionAddress, NumberParameters) = \\\n\t\t\tunpack(')\n\tdef go(self):\n\t\t# TODO: Handle output\n\t\tself.dll.go()\n\t\treturn self.thunk_stop_reason()\n\n\tdef step_into(self):\n\t\tself.stop_reason_fallback = DebugAdapter.STOP_REASON.SINGLE_STEP\n\t\tself.dll.step_into()\n\t\treturn self.thunk_stop_reason()\n\n\tdef step_over(self):\n\t\tself.stop_reason_fallback = DebugAdapter.STOP_REASON.SINGLE_STEP\n\t\tself.dll.step_over()\n\t\treturn self.thunk_stop_reason()\n\n\t# testing\n\tdef test(self):\n\t\tpass\n\n","sub_path":"dbgeng.py","file_name":"dbgeng.py","file_ext":"py","file_size_in_byte":10773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"55205679","text":"\nclass GCN(nn.Module):\n def __init__(self, num_state, num_node, bias=False):\n super(GCN, self).__init__()\n self.conv1 = nn.Conv1d(num_node, num_node, kernel_size=1)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv1d(num_state, num_state, kernel_size=1, bias=bias)\n\n def forward(self, x):\n h = self.conv1(x.permute(0, 2, 1)).permute(0, 2, 1)\n h = h - x\n h = self.relu(self.conv2(h))\n return h","sub_path":"networks/attention/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"365705717","text":"import OpenGL.GLUT as GLUT\r\nimport OpenGL.GLU as GLU\r\nimport OpenGL.GL as GL\r\nfrom sys import argv\r\nfrom math import sin, cos, pi\r\n\r\nwindow_name = \"Prisma\"\r\n\r\nleft_button = False\r\nalpha = 0\r\nbeta = 0\r\ndelta_alpha = 0.5\r\n\r\nright_button = False\r\ndelta_x, delta_y, delta_z = 0, 0, 0\r\n\r\ndown_x, down_y = 0, 0\r\n\r\n# Cores\r\nsides_colors = (\r\n (0.700, 0.132, 0.512),\r\n (0.298, 0.819, 0.215),\r\n (0, 0.658, 1),\r\n (0.123,0.213,0.745),\r\n (0.435,0.326,0.777)\r\n)\r\n\r\ntop_bottom_colors = (0.862, 0.866, 0.882)\r\n\r\n# Background Color RGBA\r\nbackground_color = (0.384, 0.411, 0.450, 1)\r\n\r\n# Figure Vars\r\n\r\nvertices = 5\r\nradius = 2\r\nprism_height = 3\r\n\r\ndef figure():\r\n\r\n polygon_points = []\r\n faces_angle = (2*pi)/vertices\r\n \r\n GL.glPushMatrix()\r\n \r\n GL.glTranslatef(0.0, 1.5, -10)\r\n GL.glRotatef(90,1.0,0.0,1.5)\r\n\r\n # Translation and Zoom\r\n GL.glTranslatef(delta_x, delta_y, delta_z)\r\n\r\n # Rotation\r\n # X axis\r\n GL.glRotatef(alpha, 0.0, 0.0, 1.0)\r\n # Y axis\r\n GL.glRotatef(beta, 0.0, 1.0, 0.0)\r\n\r\n # Bottom\r\n GL.glColor3fv(top_bottom_colors)\r\n GL.glBegin(GL.GL_POLYGON)\r\n for i in range(vertices):\r\n x = radius * cos(i*faces_angle)\r\n y = radius * sin(i*faces_angle)\r\n polygon_points += [ (x,y) ]\r\n GL.glVertex3f(x,y,0.0)\r\n GL.glEnd()\r\n\r\n # Top\r\n GL.glBegin(GL.GL_POLYGON)\r\n for x,y in polygon_points:\r\n GL.glVertex3f(x,y, prism_height)\r\n GL.glEnd()\r\n\r\n # Sides\r\n GL.glBegin(GL.GL_QUADS)\r\n for i in range(vertices):\r\n GL.glColor3fv(sides_colors[i%5])\r\n \r\n GL.glVertex3f(polygon_points[i][0],polygon_points[i][1],0)\r\n GL.glVertex3f(polygon_points[i][0],polygon_points[i][1],prism_height)\r\n\r\n GL.glVertex3f(polygon_points[(i+1)%vertices][0],polygon_points[(i+1)%vertices][1],prism_height)\r\n GL.glVertex3f(polygon_points[(i+1)%vertices][0],polygon_points[(i+1)%vertices][1],0)\r\n GL.glEnd()\r\n\r\n GL.glPopMatrix()\r\n\r\n\r\ndef draw():\r\n global alpha, left_button, right_button\r\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\r\n\r\n figure()\r\n # Auto-Rotation\r\n alpha = alpha + delta_alpha\r\n\r\n GLUT.glutSwapBuffers()\r\n\r\n\r\ndef timer(i):\r\n GLUT.glutPostRedisplay()\r\n GLUT.glutTimerFunc(10, timer, 1)\r\n \r\n\r\ndef main(): \r\n GLUT.glutInit(argv)\r\n GLUT.glutInitDisplayMode(\r\n GLUT.GLUT_DOUBLE | GLUT.GLUT_RGBA | GLUT.GLUT_DEPTH | GLUT.GLUT_MULTISAMPLE\r\n )\r\n\r\n # Creating a screen with good resolution proportions\r\n screen_width = GLUT.glutGet(GLUT.GLUT_SCREEN_WIDTH)\r\n screen_height = GLUT.glutGet(GLUT.GLUT_SCREEN_HEIGHT)\r\n\r\n window_width = round(2 * screen_width / 3)\r\n window_height = round(2 * screen_height / 3)\r\n\r\n GLUT.glutInitWindowSize(window_width, window_height)\r\n GLUT.glutInitWindowPosition(\r\n round((screen_width - window_width) / 2), round((screen_height - window_height) / 2)\r\n )\r\n GLUT.glutCreateWindow(window_name)\r\n\r\n # Drawing Function\r\n GLUT.glutDisplayFunc(draw)\r\n\r\n GL.glEnable(GL.GL_MULTISAMPLE)\r\n GL.glEnable(GL.GL_DEPTH_TEST)\r\n\r\n GL.glClearColor(*background_color)\r\n\r\n # Pre-render camera positioning\r\n GLU.gluPerspective(-45, window_width / window_height, 0.1, 100.0)\r\n\r\n\r\n GLUT.glutTimerFunc(10, timer, 1)\r\n GLUT.glutMainLoop()\r\n\r\n\r\nif(__name__ == '__main__'):\r\n main()","sub_path":"03 - Prisma e Tronco de Pirâmide/prisma.py","file_name":"prisma.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"385596844","text":"from .utilities import *\nfrom pygments.lexers import get_lexer_for_filename, get_lexer_by_name, guess_lexer\nfrom pygments import lex\nimport re\nimport argparse\nimport codecs\n\n# Tokenize the code such that line information is kept\n# Returns the tokenized code as a string along with the\n# corresponding types of each token\ndef get_tokenization(lexedWoComments):\n tokenized_string = ''\n token_types = []\n curr_line_empty = True\n for t in lexedWoComments:\n token_type = str(t[0])\n token = t[1]\n token_stripped = token.strip()\n\n if '\\n' in token:\n if curr_line_empty:\n if t[0] != Token.Text and token_stripped != '':\n tokenized_string += token_stripped + \"\\n\"\n token_types.append(token_type)\n else:\n tokenized_string += token_stripped + \"\\n\"\n\n # Edge case for stray \"/\" in code\n if token_stripped == \"\\\\\":\n token_types.append(token_type)\n curr_line_empty = True\n elif t[0] == Token.Text:\n continue\n else:\n curr_line_empty = False\n tokenized_string += token + ' '\n token_types.append(token_type)\n\n return tokenized_string, token_types\n\ndef tokenize_code(code, literal_option, lexer):\n language = languageForLexer(lexer)\n tokens = lex(code, lexer)\n tokensList = list(tokens)\n\n # Strip comments and alter strings\n lexedWoComments = tokensExceptTokenType(tokensList, Token.Comment)\n lexedWoComments = tokensExceptTokenType(lexedWoComments, Token.Literal.String.Doc)\n lexedWoComments = fixTypes(lexedWoComments, language) #Alter the pygments lexer types to be more comparable between our languages\n lexedWoComments = convertNamespaceTokens(lexedWoComments, language)\n\n if(literal_option == 0):\n lexedWoComments = modifyStrings(lexedWoComments, underscoreString)\n elif(literal_option == 1):\n lexedWoComments = modifyStrings(lexedWoComments, singleStringToken)\n elif(literal_option == 2):\n lexedWoComments = modifyStrings(lexedWoComments, spaceString)\n elif(literal_option == 3):\n lexedWoComments = modifyStrings(lexedWoComments, singleStringToken)\n lexedWoComments = collapseStrings(lexedWoComments)\n lexedWoComments = modifyNumbers(lexedWoComments, singleNumberToken)\n\n return get_tokenization(lexedWoComments)\n\n# source_file: path of source file to be tokenized\n# literal_option:\n# 0 -> replace all spaces in strings with _\n# 1 -> replace all strings with a tag\n# 2 -> add spaces to the ends of the strings\n# 3 -> collapse strings to and collapses numbers to a type as well.\ndef tokenize_file(source_file, literal_option):\n code = \"\"\n try:\n with codecs.open(source_file, \"r\",encoding='utf-8', errors='ignore') as f:\n code = f.read()\n except UnicodeDecodeError:\n return '', []\n\n lexer = get_lexer_for_filename(source_file)\n return tokenize_code(code, literal_option, lexer)","sub_path":"source/utils/lexer/simplePyLex.py","file_name":"simplePyLex.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"605091214","text":"# ******************************\n# |docname| - Session Management\n# ******************************\n# The main thing in this file is to create the auth manager and to provide a ``user_loader``\n# The auth manager uses the ``user_loader`` on every route that requires authentication\n# The way we do protected routes in FastAPI is to include a parameter on the endpoint\n# ``user=Depends(auth_manager)`` This will cause the JWT token (provided in a cookie)\n# OR in a header to be validated. If the token is valid then the user will be looked\n# up in the database using the ``load_user`` function in this file.\n# see `./routers/auth.py` for more detail.\n\n# Imports\n# =======\n# These are listed in the order prescribed by `PEP 8`_.\n#\n# Standard library\n# ----------------\n# None.\n\n# Third-party imports\n# -------------------\nfrom fastapi import Request\nfrom fastapi.exceptions import HTTPException\nfrom fastapi_login import LoginManager\n\n# Local application imports\n# -------------------------\nfrom .config import settings\nfrom .crud import fetch_instructor_courses, fetch_user\nfrom .applogger import rslogger\n\n\nauth_manager = LoginManager(settings.secret, \"/auth/validate\", use_cookie=True)\nauth_manager.cookie_name = \"access_token\"\n\n\n@auth_manager.user_loader\nasync def load_user(user_id: str):\n \"\"\"\n fetch a user object from the database. This is designed to work with the\n original web2py auth_user schema but make it easier to migrate to a new\n database by simply returning a user object.\n \"\"\"\n rslogger.debug(f\"Going to fetch {user_id}\")\n return await fetch_user(user_id)\n\n\nasync def is_instructor(request: Request) -> bool:\n user = request.state.user\n if user is None:\n raise HTTPException(401)\n elif len(await fetch_instructor_courses(user.id, user.course_id)) > 0:\n return True\n else:\n return False\n","sub_path":"bookserver/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"404099323","text":"\"\"\"QtTestImage and QtTestImageLayout classes.\n\"\"\"\nfrom collections import namedtuple\nfrom typing import Callable, Tuple\n\nfrom qtpy.QtWidgets import (\n QCheckBox,\n QComboBox,\n QFrame,\n QGroupBox,\n QLabel,\n QPushButton,\n QVBoxLayout,\n)\n\nfrom ....utils import config\nfrom .qt_labeled_spin_box import QtLabeledSpinBox\nfrom .test_image import create_test_image\n\nCallback = Callable[[], None]\nIntCallback = Callable[[int], None]\n\nTILE_SIZE_DEFAULT = 64\nTILE_SIZE_RANGE = range(1, 4096, 100)\n\nIMAGE_SHAPE_DEFAULT = (1024, 1024) # (height, width)\nIMAGE_SHAPE_RANGE = range(1, 65536, 100)\n\n# The test images which QtTestImage can create. There is a drop-down and\n# the user can pick any one of these. Some are fixed shape, while others\n# allow you to request a specific shape.\nTEST_IMAGES = {\n \"Digits\": {\n \"shape\": None,\n \"factory\": lambda image_shape: create_test_image(\n \"0\", (16, 16), image_shape\n ),\n }\n}\nTEST_IMAGE_DEFAULT = \"Digits\"\n\n# Add skimage.data images if installed. Napari does not depend on\n# skimage but many developers will have it.\ntry:\n import skimage.data as data\n\n TEST_IMAGES.update(\n {\n \"Astronaut\": {\n \"shape\": (512, 512),\n \"factory\": lambda: data.astronaut(),\n },\n \"Chelsea\": {\n \"shape\": (300, 451),\n \"factory\": lambda: data.chelsea(),\n },\n \"Coffee\": {\"shape\": (400, 600), \"factory\": lambda: data.coffee()},\n }\n )\nexcept ImportError:\n pass # These images won't be listed.\n\n\nclass QtSetShape(QGroupBox):\n \"\"\"Controls to set the shape of an image.\"\"\"\n\n def __init__(self):\n super().__init__(\"Dimensions\")\n\n layout = QVBoxLayout()\n self.height = QtLabeledSpinBox(\n \"Height\", IMAGE_SHAPE_DEFAULT[0], IMAGE_SHAPE_RANGE\n )\n layout.addLayout(self.height)\n self.width = QtLabeledSpinBox(\n \"Width\", IMAGE_SHAPE_DEFAULT[1], IMAGE_SHAPE_RANGE\n )\n layout.addLayout(self.width)\n self.setLayout(layout)\n\n def get_shape(self) -> Tuple[int, int]:\n \"\"\"Return the currently configured shape.\n\n Return\n ------\n Tuple[int, int]\n The requestsed [height, width] shape.\n \"\"\"\n return self.height.spin.value(), self.width.spin.value()\n\n\nclass QtFixedShape(QGroupBox):\n \"\"\"Controls to display the fixed shape of an image.\"\"\"\n\n def __init__(self):\n super().__init__(\"Details\")\n\n layout = QVBoxLayout()\n self.shape = QLabel(\"Shape: ???\")\n layout.addWidget(self.shape)\n self.setLayout(layout)\n\n def set_shape(self, shape: Tuple[int, int]) -> None:\n \"\"\"Set the shape to show in the labels.\n\n shape : Tuple[int, int]\n The shape to show in the labels.\n \"\"\"\n self.shape.setText(f\"Shape: ({shape[0]}, {shape[1]})\")\n\n\nclass QtTestImageLayout(QVBoxLayout):\n \"\"\"Controls to a create a new test image layer.\n\n Parameters\n ----------\n on_create : Callable[[], None]\n Called when the create test image button is pressed.\n \"\"\"\n\n def __init__(self, on_create: Callback):\n super().__init__()\n self.addStretch(1)\n\n self.name = QComboBox()\n self.name.addItems(TEST_IMAGES.keys())\n self.name.activated[str].connect(self._on_name)\n self.addWidget(self.name)\n\n ShapeControls = namedtuple('ShapeControls', \"set fixed\")\n self.shape_controls = ShapeControls(QtSetShape(), QtFixedShape())\n\n # Add both, but only one will be visible at a time.\n self.addWidget(self.shape_controls.set)\n self.addWidget(self.shape_controls.fixed)\n\n # User can always set the tile size. Tiles always square for now.\n self.tile_size = QtLabeledSpinBox(\n \"Tile Size\", TILE_SIZE_DEFAULT, TILE_SIZE_RANGE\n )\n self.addLayout(self.tile_size)\n\n # Checkbox so we can choose between OctreeImage and regular Image.\n self.octree = QCheckBox(\"Octree Image\")\n self.octree.setChecked(1)\n self.addWidget(self.octree)\n\n # The create button.\n button = QPushButton(\"Create Test Image\")\n button.setToolTip(\"Create a new test image\")\n button.clicked.connect(on_create)\n self.addWidget(button)\n\n # Set the initially selected image.\n self._on_name(TEST_IMAGE_DEFAULT)\n\n def _on_name(self, value: str) -> None:\n \"\"\"Called when a new image name is selected.\n\n Set which image controls are visible based on the spec of\n the newly selected image.\n\n Parameters\n ----------\n value : str\n The new image name.\n \"\"\"\n spec = TEST_IMAGES[value]\n\n if spec['shape'] is None:\n # Image has a settable shape.\n self.shape_controls.set.show()\n self.shape_controls.fixed.hide()\n else:\n # Image has a fixed shape.\n self.shape_controls.set.hide()\n self.shape_controls.fixed.show()\n self.shape_controls.fixed.set_shape(spec['shape'])\n\n def get_image_shape(self) -> Tuple[int, int]:\n \"\"\"Return the configured image shape.\n\n Return\n ------\n Tuple[int, int]\n The [height, width] shape requested by the user.\n \"\"\"\n return self.shape_controls.set.get_shape()\n\n def get_tile_size(self) -> int:\n \"\"\"Return the configured tile size.\n\n Return\n ------\n int\n The requested tile size.\n \"\"\"\n return self.tile_size.spin.value()\n\n\nclass QtTestImage(QFrame):\n \"\"\"Frame with controls to create a new test image.\n\n Parameters\n ----------\n viewer : Viewer\n The napari viewer.\n \"\"\"\n\n # Class attribute so system-wide we create unique names, even if\n # created from different QtRender widgets for different layers.\n image_index = 0\n\n def __init__(self, viewer):\n super().__init__()\n self.viewer = viewer\n self.layout = QtTestImageLayout(self._create_test_image)\n self.setLayout(self.layout)\n\n def _create_test_image(self) -> None:\n \"\"\"Create a new test image layer.\"\"\"\n\n # Get the spec for the current selected type of image.\n image_name = self.layout.name.currentText()\n spec = TEST_IMAGES[image_name]\n factory = spec['factory']\n\n if spec['shape'] is None:\n # Image has a settable shape provided by the UI.\n shape = self.layout.get_image_shape()\n data = factory(shape)\n else:\n # Image comes in just one specific shape.\n data = factory()\n\n # Give each layer a unique name.\n unique_name = f\"test-image-{QtTestImage.image_index:003}\"\n QtTestImage.image_index += 1\n\n # Set config to create Octree or regular images.\n config.create_octree_images = self.layout.octree.isChecked()\n\n # Add the new image layer.\n layer = self.viewer.add_image(data, rgb=True, name=unique_name)\n layer.tile_size = self.layout.get_tile_size()\n","sub_path":"napari/_qt/experimental/render/qt_test_image.py","file_name":"qt_test_image.py","file_ext":"py","file_size_in_byte":7143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"293436935","text":"x = 19\r\ny = 5\r\n\r\nif x > y:\t# Si x > y = True\r\n\tprint(x, \"es mayor que\", y)\t# Entonces haz esto\r\nelse:\t# Si x < y = True\r\n\tprint(x, \"es menor que\", y) # Entonces haz esto\r\n\r\n\r\n# Ejemplo\r\nprint(\"---\")\r\nprint(\"Bienvenido al programa de valoración en Python\\n\")\r\n\r\nnota = input(\"Inserte la nota obtenida en el exámen: \")\r\nnota = int(nota) # La función input nos permite almacenar datos enviados por consola\r\n# en una variable, pero el elemento recibido es un string\r\n# por ende debemos convertirlo al respectivo tipo de dato que necesitemos\r\n\r\ndef valoracion(nota):\r\n\tif nota < 10:\t# Si la nota es menor que 10\r\n\t\tprint(\"Usted reprobó el exámen\\n\") # El alumno reprobó\r\n\telif nota == 10:\t# Sino es menor que 10, verifica Si la nota es igual a 10\r\n\t\tprint(\"Usted casi reprueba, debe esforzarse más\\n\")\t# Si eso es verdad, dale una advertencia\r\n\telif nota == 20:\t# Si no es igual a 10, verifica si la nota es igual a 20\r\n\t\tprint(\"Excelente trabajo, siga así\\n\")\t# Si eso es verdad fecilitalo\r\n\telse:\t# Si no se cumple ninguna de las condiciones anteriores entonces el alumno aprobó\r\n\t\tprint(\"Usted aprobó el exámen\\n\")\t# Haz esto\r\n\r\nvaloracion(nota)\r\n\r\n# Otro ejemplo\r\n\r\nedad = -7\r\n# Concatenación de operadores de comparación\r\nif 0 -1 and index + index1 < len(s) and s[index - index1] == s[index + index1]:\n index1 += 1\n size_pali += 2\n if size_pali > res:\n res_str = s[index-int((size_pali-1)/2) : index + int((size_pali-1)/2) + 1:1]\n res = size_pali\n \n for index in range(0, len(s)-1):\n if s[index] == s[index+1]:\n\n index1 = 1\n size_pali = 2\n while index - index1 > -1 and index + 1 + index1 < len(s) and s[index - index1] == s[index + 1 + index1]:\n size_pali += 2\n if size_pali > res:\n res_str = s[index-index1:index+index1+2:1]\n res = size_pali\n index1 += 1\n \n return res_str","sub_path":"py/P0005.py","file_name":"P0005.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"544014129","text":"# -*- coding: utf-8 -*-\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.urlresolvers import reverse\n\nfrom grappelli.dashboard import modules, Dashboard\nfrom grappelli.dashboard.utils import get_admin_site_name\n\n\nclass CustomIndexDashboard(Dashboard):\n def init_with_context(self, context):\n site_name = get_admin_site_name(context)\n\n\n self.children.append(modules.Group(\n title= u'Администрирование',\n column=1,\n collapsible=True,\n children=[\n modules.ModelList(\n title='Users',\n models=('django.contrib.*', 'strongme.profile.*')\n )\n ]\n ))\n\n self.children.append(modules.Group(\n title= u'Магазин',\n column=1,\n collapsible=True,\n children=[\n modules.ModelList(\n title=u'Питание',\n models=('strongme.catalog.models.*',)\n ),\n modules.ModelList(\n title=u'Одежда',\n models=('strongme.shop.models.*',)\n )\n ]\n ))\n self.children.append(modules.Group(\n title= u'Заказы',\n column=1,\n collapsible=True,\n children=[\n modules.ModelList(\n title=u'Заказы',\n models=('strongme.cart.models.*',)\n ),\n ]\n ))\n\n self.children.append(modules.Group(\n title= u'Остальное',\n column=1,\n collapsible=True,\n children=[\n modules.ModelList(\n title=u'Меню',\n models=('strongme.main_menu.models.*',)\n ),\n modules.ModelList(\n title=u'Видеогалерея',\n models=('strongme.video.models.*',)\n ),\n modules.ModelList(\n title=u'Статьи',\n models=('strongme.articles.models.*',)\n ),\n modules.ModelList(\n title=u'Новости',\n models=('strongme.news.models.*',)\n ),\n modules.ModelList(\n title=u'FAQ',\n models=('strongme.FAQ.models.*',)\n ),\n modules.ModelList(\n title=u'Комментарии',\n models=('strongme.comments.models.*',)\n ),\n ]\n ))\n\n self.children.append(modules.LinkList(\n u'Медиа',\n column=2,\n children=[\n {\n 'title': _('FileBrowser'),\n 'url': '/admin/filebrowser/browse/',\n 'external': False,\n },\n ]\n ))\n\n self.children.append(modules.RecentActions(\n _(u'Последние действия'),\n limit=5,\n collapsible=True,\n column=3,\n ))\n\n\n","sub_path":"app/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"508196614","text":"from SGU.models import Usuario, Permissions, Grupos\n\nclass Gerencia_usuario():\n def Deleta_usuario(delete):\n Permissions.objects.filter(usuario_id=delete).delete()\n Usuario.objects.filter(id=delete).delete()\n \n def Cria_usuario(request, form):\n nomes = request.POST.getlist(\"grupo\")\n form.save()\n username = request.POST.get(\"username\")\n pessoa = Usuario.objects.get(username=username)\n for nome in nomes:\n done = Permissions.objects.create(groups=nome, usuario_id=pessoa.id)\n done.save()\n\n\n def Atualiza_usuario(request, username):\n nome = request.POST.get(\"nome\")\n email = request.POST.get(\"email\")\n perm_update = request.POST.getlist(\"grupo\")\n is_active = request.POST.get(\"is_active\")\n pessoa = Usuario.objects.get(username=username)\n perms = []\n grupo = []\n for i in Permissions.objects.filter(usuario_id__id=pessoa.id):\n i = str(i)\n perms.append(i)\n for i in perms:\n if i not in perm_update:\n Permissions.objects.filter(usuario_id__id=pessoa.id, groups=i).delete()\n for i in perm_update:\n if i not in perms:\n Permissions.objects.create(groups=i, usuario_id=pessoa.id)\n for i in Grupos.objects.all():\n i = str(i)\n grupo.append(i)\n for i in Permissions.objects.filter(usuario_id__id=pessoa.id):\n i = str(i)\n perms.append(i)\n if is_active == \"on\":\n pessoa.is_active = 1\n elif is_active is None:\n pessoa.is_active = 0\n pessoa.nome = nome\n pessoa.email = email\n pessoa.save()\n contexto = {\n \"detalhes\" : pessoa,\n \"grupos\" : grupo,\n \"perms\" : perms,\n }\n\n return contexto\n\n\nclass Gerencia_permissao():\n def Pega_grupo(request):\n perm = Permissions.objects.filter(usuario_id__username=request.username)\n grupos = []\n for i in perm:\n i = str(i)\n grupos.append(i)\n return grupos","sub_path":"src/usuario.py","file_name":"usuario.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"149045001","text":"#!/usr/bin/python3\n\"\"\"Using a REST API, for a given employee ID\"\"\"\nif __name__ == \"__main__\":\n import requests\n from sys import argv\n\nif __name__ == \"__main__\":\n import json\n import requests\n import sys\n\n employee = sys.argv[1]\n url = 'https://jsonplaceholder.typicode.com/'\n employee_url = url + 'users/' + employee\n response = requests.get(employee_url)\n\n employee_name = response.json().get('username')\n employee_url = url + 'todos'\n response = requests.get(employee_url)\n\n employee_tasks = [task for task in response.json()\n if task.get('userId') is int(employee)]\n\n list = []\n for element in employee_tasks:\n new_dict = {\"task\": element['title'],\n \"completed\": element['completed'],\n \"username\": employee_name}\n list.append(new_dict)\n\n out_dictionary = {sys.argv[1]: list}\n with open('{}.json'.format(sys.argv[1]), 'w') as f:\n json.dump(out_dictionary, f)\n","sub_path":"0x15-api/2-export_to_JSON.py","file_name":"2-export_to_JSON.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"300180863","text":"import cv2\nimport numpy as np\n\n\nprint(\"動画の幅と高さを入力してください\")\nx = input(\"画像の幅  = \")\ny = input(\"画像の高さ = \")\n\n#動画パス\nfilepath = \"_US_img_1-1000_{}_{}.mp4\".format(int(x),int(y))\n\n# 動画の読み込み\ncap = cv2.VideoCapture(filepath)\n\n# フレーム番号\nf_num = 0\n\n# 動画終了まで繰り返し\nwhile(cap.isOpened()):\n \n # フレームを取得\n ret, frame = cap.read()\n\n #print(ret)\n # 動画再生内のcapがない場合\n if ret != True :\n cap.release()\n break\n\n # フレームを保存\n f_num = 1 + f_num\n cv2.imwrite(\"_2_US_img_1-1000_{0}_{0}/pic_{0}_{1}.png\".format(x, f_num), frame)\n\n # フレームを表示\n cv2.imshow(\"Frame\", frame)\n\n # qキーが押されたら途中終了\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n \n","sub_path":"_2_make_imgs_from_mov.py","file_name":"_2_make_imgs_from_mov.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"593968665","text":"import os\n\nimport uproot\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nfrom matplotlib import rc\nimport sys\nimport zlib\nimport copy\nlib_path = os.path.abspath(os.path.join(__file__, '..', '..'))\nsys.path.append(lib_path)\n\nfrom package.events import *\nfrom package.cut import *\nfrom package.stackplot import *\nfrom curveplot import *\nfrom cutstring import *\nimport multiprocessing\nfrom package.loadnormfactor import *\n\ndef pickleit(obj, path):\n outfile = open(path, 'wb')\n pickle.dump(obj, outfile)\n outfile.close()\n\ndef unpickleit(path):\n infile = open(path, 'rb')\n output = pickle.load(infile)\n infile.close()\n return output\n\ndef poly(x, argv):\n s = 0\n for i, each in enumerate(argv):\n s += x**i * each\n return s\n\ndef fitfunction_real(x, p0, p1, p2, p4):\n if x < p4:\n return p0 + p1 * x + p2 * x**2\n return p0 + p1 * p4 + p2 * p4**2\n\ndef fitfunction(x, p0, p1, p2, p4):\n y = np.zeros(len(x))\n y += (p0 + p1 * x + p2 * x**2) * (x <= p4)\n y += (p0 + p1 * p4 + p2 * p4**2) * (x > p4)\n return y\n\ndef autobin(data_list, bins, alias=None, variable=b\"pTV\"):\n new_data = None\n for each in data_list:\n if (alias is None and alias != \"data\") or each.alias == alias:\n if new_data is None:\n new_data = each\n else:\n new_data = new_data + each\n height, error = new_data.binned_weight_variation(variable, bins, scale=1000.)\n newbin = [bins[0]]\n\n sum_weight = 0\n sum_error = 0\n for i in range(len(height)):\n if i == len(height) - 1:\n newbin.append(bins[i+1])\n break\n sum_weight += height[i]\n sum_error += error[i]\n\n if (sum_error**0.5)/sum_weight < 0.35 and sum_weight > 3:\n newbin.append(bins[i+1])\n sum_weight = 0\n sum_error = 0\n return newbin\n\ndef autobin_withdata(data_list, bins, alias=None, variable=b\"pTV\"):\n data_list = copy.deepcopy(data_list)\n new_data = None\n for each in data_list:\n if (alias is None and alias != \"data\") or each.alias == alias:\n if new_data is None:\n new_data = each\n else:\n new_data = new_data + each\n height, error = new_data.binned_weight_variation(variable, bins, scale=1000.)\n\n new_data = None\n for each in data_list:\n if each.alias == \"data\":\n if new_data is None:\n new_data = each\n else:\n new_data = new_data + each\n height_data, error_data = new_data.binned_weight_variation(variable, bins, scale=1000.)\n\n newbin = [bins[0]]\n sum_weight = 0\n sum_error = 0\n sum_weight_data = 0\n sum_error_data = 0\n for i in range(len(height)):\n if i == len(height) - 1:\n newbin.append(bins[i+1])\n break\n sum_weight += height[i]\n sum_error += error[i]\n sum_weight_data += height_data[i]\n sum_error_data += error_data[i]\n\n if (sum_error**0.5)/sum_weight < 0.35 and sum_weight > 3 and (sum_error_data**0.5)/sum_weight_data < 0.35:\n newbin.append(bins[i+1])\n sum_weight = 0\n sum_error = 0\n return newbin\n\n\nntag = 2\n\ndef stack_cxaod(sample_directory, each_names, each_alias, each_color, branches_list_data, debug, cut, m_allsamples, matas=None):\n sample = load_CxAODs(sample_directory,each_names,branches_list_data, debug, \n colour=each_color,alias=each_alias,matanames=matas)\n if not sample:\n print(\"Warning: No \"+each_alias+\" samples found!\")\n if cut and sample:\n sample.matacut(s_resolved)\n # sample.cut(cut_lowmbb)\n #sample.cut(cut_highmbb)\n sample.matacut(s_mbbcr)\n sample.cut_parameter(cut_btag_is, ntag)\n #sample.cut(srcut)\n #sample.cut(cut_btag)\n #sample.cut(cut_muon)\n #sample.more()\n #sample.cut(crtopcut)\n #sample.cut(crmbbcut)\n\n m_allsamples.append(sample)\n if not cut:\n m_allsamples.append(sample)\n\n #print(each_alias)\n return 0\n\nif __name__ == '__main__':\n debug = False\n cut = True\n sample_directory = [\"../CxAOD31_01a/\"]\n tag = \"run2\"\n rescale = True\n slopecorrection = False\n\n t2 = r\"$\\mathit{\\sqrt{s}=13\\:TeV,36.1\\:fb^{-1}}$\"\n if tag == \"a\":\n t2 = r\"$\\mathit{\\sqrt{s}=13\\:TeV,36.1\\:fb^{-1}}$\"\n sample_directory = [\"../sample/\" + tag + \"/\"]\n if tag == \"d\":\n t2 = r\"$\\mathit{\\sqrt{s}=13\\:TeV,43.6\\:fb^{-1}}$\"\n sample_directory = [\"../sample/\" + tag + \"/\"]\n if tag == \"e\":\n t2 = r\"$\\mathit{\\sqrt{s}=13\\:TeV,58.5\\:fb^{-1}}$\"\n sample_directory = [\"../sample/\" + tag + \"/\"]\n if tag == \"run2\":\n t2 = r\"$\\mathit{\\sqrt{s}=13\\:TeV,139\\:fb^{-1}}$\"\n sample_directory = [\"../sample/a/\", \"../sample/d/\", \"../sample/e/\"]\n #sample_directory = [\"../sample/CxAOD32_06a/\", \"../sample/CxAOD32_06d/\", \"../sample/CxAOD32_06e/\"]\n #sample_directory = [\"../phi/a/\", \"../phi/d/\", \"../phi/e/\"]\n #sample_directory = [\"../phi/a/\"]\n mc_Wlvjet = [\"Wenu_Sh221\", \"WenuB_Sh221\", \"WenuC_Sh221\", \"WenuL_Sh221\", \"Wmunu_Sh221\", \"WmunuB_Sh221\", \"WmunuC_Sh221\", \"WmunuL_Sh221\", \"Wtaunu_Sh221\", \"WtaunuB_Sh221\", \"WtaunuC_Sh221\", \"WtaunuL_Sh221\"]\n #mc_Zlljet = [\"Zee_Sh221\", \"ZeeB_Sh221\", \"ZeeC_Sh221\", \"ZeeL_Sh221\", \"Zmumu_Sh221\", \"ZmumuB_Sh221\", \"ZmumuC_Sh221\", \"ZmumuL_Sh221\", \"Ztautau_Sh221\", \"ZtautauB_Sh221\", \"ZtautauC_Sh221\", \"ZtautauL_Sh221\",\"Znunu_Sh221\", \"ZnunuB_Sh221\", \"ZnunuC_Sh221\", \"ZnunuL_Sh221\"]\n # mc_Zlljet1 = [\"ZeeB_MGPy8\", \"ZeeC_MGPy8\"]\n # mc_Zlljet2 = [\"ZeeL_MGPy8\", \"ZmumuB_MGPy8\"]\n # mc_Zlljet3 = [\"Zmumu_Sh221\", \"ZmumuB_MGPy8\"]\n # mc_Zlljet4 = [\"ZmumuC_MGPy8\", \"ZmumuL_MGPy8\"]\n\n mc_Zlljet1 = [\"Zee_Sh221\", \"ZeeB_Sh221\"]\n mc_Zlljet2 = [\"ZeeC_Sh221\", \"ZeeL_Sh221\"]\n mc_Zlljet3 = [\"Zmumu_Sh221\", \"ZmumuB_Sh221\"]\n mc_Zlljet4 = [\"ZmumuC_Sh221\", \"ZmumuL_Sh221\"]\n mc_Zlljet5 = [\"Ztautau_Sh221\", \"ZtautauB_Sh221\", \"ZtautauC_Sh221\", \"ZtautauL_Sh221\",\"Znunu_Sh221\", \"ZnunuB_Sh221\", \"ZnunuC_Sh221\", \"ZnunuL_Sh221\"]\n mc_tt_bar = [ \"ttbar_nonallhad_PwPy8\", \"ttbar_allhad_PwPy8\", \"ttbar_dilep_PwPy8\"]#\"ttbar_nonallhad_PwPy8\", , \"ttbar_allhad_PwPy8\"]#\"ttbar_nonallhad_PwPy8\"]#, \"ttbar_allhad_PwPy8\"]\n mc_singletop = [\"stops_PwPy8\", \"stopt_PwPy8\", \"stopWt_PwPy8\", \"stopWt_dilep_PwPy8\"]\n mc_Diboson = [\"WqqWlv_Sh221\", \"WqqZll_Sh221\", \"WqqZvv_Sh221\", \"ZqqZll_Sh221\", \"ZqqZvv_Sh221\", \"WlvZqq_Sh221\", \"ggZqqZll_Sh222\", \"ggWqqWlv_Sh222\"]\n #sm_Higgs = [\"qqWlvHbbJ_PwPy8MINLO\", \"qqZllHbbJ_PwPy8MINLO\", \"qqZvvHbbJ_PwPy8MINLO\", \"ggZllHbb_PwPy8\", \"ggZvvHbb_PwPy8\", \"ggHbb_PwPy8NNLOPS\"] \n sm_Higgs = [\"bbHinc_aMCatNLOPy8\", \"ggHinc_PwPy8\", \"ggZllHbb_PwPy8\",\"ggZllHcc_PwPy8\",\"ggZvvHbb_PwPy8\",\"ggZvvHcc_PwPy8\",\"qqWlvHbbJ_PwPy8MINLO\",\"qqWlvHccJ_PwPy8MINLO\",\"qqZllHbbJ_PwPy8MINLO\",\"qqZllHccJ_PwPy8MINLO\",\"qqZvvHbbJ_PwPy8MINLO\",\"qqZvvHccJ_PwPy8MINLO\"]\n #other = [\"ggZqqZll_Sh222\", \"ggWqqWlv_Sh222\"]#,\"ttV_aMCatNLOPy8\",\"ggWqqWlv_Sh222\",\"ggZqqZvv_Sh222\",\"stoptZ_MGPy8\"]#[ \"ttV_aMCatNLOPy8\"]#\"VV_fulllep_Sh222\",\n data = [\"data16\", \"data15\", \"data17\", \"data18\"]\n bbA300 = [ \"bbA300\"]\n ggA300 = [ \"ggA300\"]\n file_name_array = [data, mc_Diboson, mc_tt_bar, mc_singletop, mc_Zlljet1, mc_Zlljet2, mc_Zlljet3, mc_Zlljet4, mc_Zlljet5, mc_Wlvjet, sm_Higgs]#, bbA300, ggA300]#, sm_ggHiggs, sm_qqHiggs]\n alias = [\"data\", \"Diboson\", \"ttbar\", \"singletop\", \"Zlljet\", \"Zlljet\", \"Zlljet\", \"Zlljet\", \"Zlljet\", \"Wlvjet\", \"smHiggs\"]#,'bbA300', 'ggA300']#, \"sm_ggHiggs\", \"sm_qqHiggs\"]\n colors = [None, 'g', 'yellow', 'tab:orange', 'royalblue', 'royalblue', 'royalblue', 'royalblue', 'royalblue', 'm', 'teal']#, 'k', 'dimgrey']\n\n\n branches_list_data = [b\"mBBres\", b\"EventWeight\", b\"pTV\", b'mVH', b'nTags']\n matas = [\"Sample\", \"Description\", \"Regime\"]\n branches_list_MC = branches_list_data\n bins = range(100,1400,50)\n bins = range(20,200,5)\n #bins = np.linspace(100,140,16)\n #all_sample = []\n rescaledic = None\n if rescale:\n rescaledic = loadnorm(\"C:/Users/qiutt/Desktop/postreader/PlotTool_Root/jsonoutput/confignormonly.cfg\",\n \"C:/Users/qiutt/Desktop/postreader/PlotTool_Root/jsonoutput/GlobalFit_fitres_unconditionnal_mu0_normonly.txt\")\n\n if slopecorrection:\n p1s = []\n p2s = []\n bottom = 0\n middle = 0\n top = 0\n with open(\"output/slopefit/\" + \"pTV-mbbcut-\"+str(ntag)+\"tagpolyfitresult.csv\") as f:\n #with open(\"output/slopefit/\" + \"pTV-mbbcut-1tagpolyfitresult.csv\") as f:\n for each in f:\n each_array = each.split(',')\n if top == 0:\n bottom = float(each_array[0])\n middle = float(each_array[1])\n top = float(each_array[2])\n elif not p1s:\n p1s = each_array[0:-1]\n else:\n p2s = each_array[0:-1]\n for i in range(len(p1s)):\n p1s[i] = float(p1s[i])\n p2s[i] = float(p2s[i])\n print(p1s,p2s)\n # p1s = []\n # with open(\"output/slopefit/\" + \"pTV-mbbcut-\"+str(ntag)+\"tagpolyfitresult.csv\") as f:\n # for each_line in f:\n # p1s = each_line.split(',')\n # for i in range(len(p1s)-1):\n # p1s[i] = float(p1s[i])\n # print(p1s)\n # break\n processes = []\n manager = multiprocessing.Manager()\n all_sample = manager.list()\n for each_names, each_alias, each_color in zip(file_name_array,alias,colors):\n if \"data\" in each_alias:\n t = multiprocessing.Process(target=stack_cxaod, args=(sample_directory, each_names, each_alias, each_color, branches_list_data, debug, cut, all_sample, matas))\n else:\n t = multiprocessing.Process(target=stack_cxaod, args=(sample_directory, each_names, each_alias, each_color, branches_list_MC, debug, cut, all_sample, matas))\n processes.append(t)\n t.start()\n\n i = 0\n for each_process, each_alias in zip(processes, alias):\n i += 1\n print(i,\" Waiting for \" + each_alias + \"...\")\n each_process.join()\n print(i, each_alias + \" finished.\")\n print(\"All done.\")\n #print(rescaledic)\n all_sample_after = [each for each in all_sample]\n\n if rescale:\n print(\"Performing rescale...\")\n for i in range(len(all_sample_after)):\n for each_key in rescaledic.keys():\n if 'ALL' in rescaledic[each_key]:\n factor = rescaledic[each_key]['ALL'] + 1\n mask = all_sample_after[i].mata[\"Sample\"] == zlib.adler32(each_key.encode())\n if True in mask:\n all_sample_after[i].rescale(factor, mask)\n\n if slopecorrection:\n print(\"Performing slope correction...\")\n for i in range(len(all_sample_after)):\n mask1 = all_sample_after[i].data[b'pTV']/1000. < middle\n mask2 = all_sample_after[i].data[b'pTV']/1000. >= middle\n mask2 = np.logical_and(all_sample_after[i].data[b'pTV']/1000. < top, mask2)\n print(\"before\", all_sample_after[i].weight.sum())\n if True in mask1:\n all_sample_after[i].weight[mask1] = all_sample_after[i].weight[mask1] * (poly(all_sample_after[i].data[b'pTV'][mask1]/1000., p1s))\n if True in mask2:\n all_sample_after[i].weight[mask2] = all_sample_after[i].weight[mask2] * (poly(all_sample_after[i].data[b'pTV'][mask2]/1000., p2s))\n print(\"after\", all_sample_after[i].weight.sum())\n # for i in range(len(all_sample_after)):\n # all_sample_after[i].weight = all_sample_after[i].weight * (fitfunction(all_sample_after[i].data[b'pTV']/1000., p1s[0], p1s[1], p1s[2], p1s[3]))\n # print(\"after\", all_sample_after[i].weight.sum())\n bins = [0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, 1000, 1050, 1100, 1150, 1200, 1250, 1300]\n \n\n title3=\"mBBcr \" + str(ntag) +\" btags\"\n direct = \"output/t_make_plot/\"\n name = \"-mbbcut-\" + str(ntag) +\"tag\"\n if rescale:\n direct = \"output/t_make_plot_rescale/\"\n if slopecorrection and rescale:\n direct = \"output/t_make_plot_rescale_slopecorrection/\"\n if slopecorrection and not rescale:\n direct = \"output/t_make_plot_slopecorrection/\"\n\n\n bins = [0, 25, 50, 75, 100, 125, 150, 175, 200, 225, 250, 275, 300, 325, 350, 375, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, 1000, 1050, 1100, 1150, 1200, 1250, 1300]\n bins = range(0,1400,20)\n bins = autobin_withdata(all_sample_after, bins, alias=\"Zlljet\", variable=b\"pTV\")\n print(bins)\n chi2, nod = stackplot(all_sample_after,b'pTV',bins,1000.,\n xlabel=r\"$p_{TV}[GeV]$\", title3=title3, filename=direct + \"pTV\" + name, print_height=True,\n title2=t2,auto_colour=False, limit_y = 0.5, upper_y=2.0, log_y=True, printzpjets=True, chi2=True)\n print(\"pTV\", chi2, nod)\n bins = [50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 1000, 1150, 1350, 1550, 1800]\n chi2, nod = stackplot(all_sample_after,b'mVH',bins,1000.,\n xlabel=r\"$m_{VH}[GeV]$\", title3=title3, filename=direct + \"mVH\" + name, print_height=True,\n title2=t2,auto_colour=False, limit_y = 0.5, upper_y=2.0, log_y=True, printzpjets=True, chi2=True)\n print(\"mVH\", chi2, nod)\n bins = range(20, 200, 1)\n stackplot(all_sample_after,b'mBBres',bins,1000.,\n xlabel=r\"$m_{BB}[GeV]$\", title3=title3, filename=direct + \"mBB\" + name, print_height=True,\n title2=t2,auto_colour=False, limit_y = 0.5, upper_y=2.0, log_y=False, printzpjets=True, chi2=True)\n\n\n if not slopecorrection:\n all_sample_after1 = copy.deepcopy(all_sample_after)\n all_sample_after2 = copy.deepcopy(all_sample_after)\n\n for i in range(len(all_sample_after)):\n all_sample_after1[i].cut(cut_lowmbb)\n all_sample_after2[i].cut(cut_highmbb)\n \n\n title3=\"lowmBBcr \" + str(ntag) +\" btags\"\n name = \"-lowmbbcut-\" + str(ntag) +\"tag\"\n bins = range(0,1400,20)\n bins = autobin_withdata(all_sample_after1, bins, alias=\"Zlljet\", variable=b\"pTV\")\n print(bins)\n chi2, nod = stackplot(all_sample_after1,b'pTV',bins,1000.,\n xlabel=r\"$p_{TV}[GeV]$\", title3=title3, filename=direct + \"pTV\" + name, print_height=True,\n title2=t2,auto_colour=False, limit_y = 0.5, upper_y=2.0, log_y=True, printzpjets=True, chi2=True)\n print(\"pTV\", chi2, nod)\n bins = [50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 1000, 1150, 1350, 1550, 1800]\n chi2, nod = stackplot(all_sample_after1,b'mVH',bins,1000.,\n xlabel=r\"$m_{VH}[GeV]$\", title3=title3, filename=direct + \"mVH\" + name, print_height=True,\n title2=t2,auto_colour=False, limit_y = 0.5, upper_y=2.0, log_y=True, printzpjets=True, chi2=True)\n print(\"mVH\", chi2, nod)\n bins = range(20, 200, 1)\n stackplot(all_sample_after1,b'mBBres',bins,1000.,\n xlabel=r\"$m_{BB}[GeV]$\", title3=title3, filename=direct + \"mBB\" + name, print_height=True,\n title2=t2,auto_colour=False, limit_y = 0.5, upper_y=2.0, log_y=False, printzpjets=True, chi2=True)\n\n\n title3=\"highmBBcr \" + str(ntag) +\" btags\"\n name = \"-highmbbcut-\" + str(ntag) +\"tag\"\n bins = range(0,1400,20)\n bins = autobin_withdata(all_sample_after2, bins, alias=\"Zlljet\", variable=b\"pTV\")\n print(bins)\n chi2, nod = stackplot(all_sample_after2,b'pTV',bins,1000.,\n xlabel=r\"$p_{TV}[GeV]$\", title3=title3, filename=direct + \"pTV\" + name, print_height=True,\n title2=t2,auto_colour=False, limit_y = 0.5, upper_y=2.0, log_y=True, printzpjets=True, chi2=True)\n print(\"pTV\", chi2, nod)\n bins = [50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 1000, 1150, 1350, 1550, 1800]\n chi2, nod = stackplot(all_sample_after2,b'mVH',bins,1000.,\n xlabel=r\"$m_{VH}[GeV]$\", title3=title3, filename=direct + \"mVH\" + name, print_height=True,\n title2=t2,auto_colour=False, limit_y = 0.5, upper_y=2.0, log_y=True, printzpjets=True, chi2=True)\n print(\"mVH\", chi2, nod)\n bins = range(20, 200, 1)\n stackplot(all_sample_after2,b'mBBres',bins,1000.,\n xlabel=r\"$m_{BB}[GeV]$\", title3=title3, filename=direct + \"mBB\" + name, print_height=True,\n title2=t2,auto_colour=False, limit_y = 0.5, upper_y=2.0, log_y=False, printzpjets=True, chi2=True)","sub_path":"run/t_make_plot_quickselection.py","file_name":"t_make_plot_quickselection.py","file_ext":"py","file_size_in_byte":16744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"46039215","text":"# -*- coding: utf-8 -*-\n# Copyright © 2008-2011 Kozea\n# This file is part of Multicorn, licensed under a 3-clause BSD license.\n\nimport decimal\nimport datetime\nfrom collections import namedtuple\nfrom . import AbstractCornExtenser\nfrom ...requests.types import Type, Dict, List\n\n\nclass FixedOffsetTimeZone(datetime.tzinfo):\n \"\"\"Fixed offset in hours and minutes from UTC.\n\n >>> fixed = FixedOffsetTimeZone(-2, 30)\n >>> dt = datetime.date(2007, 1, 25)\n >>> fixed.utcoffset(dt)\n datetime.timedelta(-1, 81000)\n >>> fixed.tzname(dt)\n 'UTC-02:30'\n >>> fixed.dst(dt)\n datetime.timedelta(0)\n\n \"\"\"\n def __init__(self, offset_hours, offset_minutes):\n \"\"\"Initialize timezone information with given offsets and name.\"\"\"\n super(FixedOffsetTimeZone, self).__init__()\n self.__offset = datetime.timedelta(\n hours=offset_hours, minutes=offset_minutes)\n self.__name = \"UTC%+03i:%02i\" % (offset_hours, offset_minutes)\n\n def utcoffset(self, _):\n \"\"\"Return offset of local time from UTC, in minutes east of UTC.\"\"\"\n return self.__offset\n\n def tzname(self, _):\n \"\"\"Return the time zone name as a string.\"\"\"\n return self.__name\n\n def dst(self, _):\n \"\"\"Return daylight saving time adjustment, in minutes east of UTC.\"\"\"\n return datetime.timedelta(0)\n\n\ndef to_datetime(value):\n \"\"\"Cast ``value`` into :class:`datetime.datetime` object.\n\n >>> to_datetime(datetime.date(2010, 8, 4))\n datetime.datetime(2010, 8, 4, 0, 0)\n >>> to_datetime(datetime.datetime(2010, 8, 4, 0, 0))\n datetime.datetime(2010, 8, 4, 0, 0)\n >>> to_datetime(\"20100804\")\n datetime.datetime(2010, 8, 4, 0, 0)\n >>> to_datetime(\"2010-08-04\")\n datetime.datetime(2010, 8, 4, 0, 0)\n >>> to_datetime(\"2010-08-04T20:34:31\")\n datetime.datetime(2010, 8, 4, 20, 34, 31)\n >>> to_datetime(\"2010-08-04T20:34:31Z\")\n ... # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS\n datetime.datetime(2010, 8, 4, 20, 34, 31,\n tzinfo=)\n >>> to_datetime(\"20100804-203431Z\")\n ... # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS\n datetime.datetime(2010, 8, 4, 20, 34, 31,\n tzinfo=)\n >>> to_datetime(\"2010-08-04T20:34:31+02:30\")\n ... # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS\n datetime.datetime(2010, 8, 4, 20, 34, 31,\n tzinfo=)\n >>> to_datetime(\"2010-08-04T20:34:31+02:30\")\n ... # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS\n datetime.datetime(2010, 8, 4, 20, 34, 31,\n tzinfo=)\n >>> to_datetime(10) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS\n Traceback (most recent call last):\n ....\n ValueError: 10 cannot be cast to datetime.\n\n \"\"\"\n if isinstance(value, datetime.datetime):\n return value\n elif isinstance(value, datetime.date):\n return datetime.datetime(value.year, value.month, value.day)\n elif isinstance(value, basestring):\n value = value.replace(\"-\", \"\").replace(\":\", \"\").replace(\"T\", \"\")\n if len(value) == 8:\n return datetime.datetime.strptime(value, \"%Y%m%d\")\n elif len(value) == 14:\n return datetime.datetime.strptime(value, \"%Y%m%d%H%M%S\")\n elif len(value) == 15 and value.endswith(\"Z\"):\n value = value[:-1] + \"+0000\"\n if len(value) == 19:\n time, timezone = value[:14], value[14:]\n hours, minutes = timezone[:2], timezone[2:]\n time = datetime.datetime.strptime(time, \"%Y%m%d%H%M%S\")\n return time.replace(\n tzinfo=FixedOffsetTimeZone(int(hours), int(minutes)))\n raise ValueError(\"%s cannot be cast to datetime.\" % value)\n\n\ndef to_date(value):\n \"\"\"Cast ``value`` into :class:`datetime.date` object.\n\n >>> to_date(datetime.date(2010, 8, 4))\n datetime.date(2010, 8, 4)\n >>> to_date(datetime.datetime(2010, 8, 4, 0, 0))\n datetime.date(2010, 8, 4)\n >>> to_date(\"20100804\")\n datetime.date(2010, 8, 4)\n >>> to_date(\"2010-08-04\")\n datetime.date(2010, 8, 4)\n >>> to_date(10) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS\n Traceback (most recent call last):\n ....\n ValueError: 10 cannot be cast to date.\n\n \"\"\"\n if isinstance(value, datetime.datetime):\n return value.date()\n elif isinstance(value, datetime.date):\n return value\n elif isinstance(value, basestring):\n value = value.replace(\"-\", \"\").replace(\":\", \"\")\n return datetime.datetime.strptime(value, \"%Y%m%d\").date()\n raise ValueError(\"%s cannot be cast to date.\" % value)\n\ndef to_bytes(value, encoding=\"utf-8\"):\n \"\"\"Cast ``value`` into bytes.\n\n This function works with Python 2.x and 3.x and can be used in Kalamar.\n\n >>> spam = to_bytes(\"spam\")\n >>> spam.decode(\"utf-8\") == to_unicode(\"spam\")\n True\n >>> type(spam) == bytes\n True\n >>> touche = to_bytes(\"Touché\")\n >>> touche.decode(\"utf-8\") == to_unicode(\"Touché\")\n True\n >>> type(touche) == bytes\n True\n >>> ten = to_bytes(\"10\")\n >>> type(ten) == bytes\n True\n >>> int(ten)\n 10\n\n \"\"\"\n if type(value) == bytes:\n return value\n else:\n try:\n return bytes(value, encoding=encoding)\n except:\n try:\n return bytes(value)\n except:\n return value.encode(encoding)\n\n\ndef to_unicode(value, encoding=\"utf-8\"):\n \"\"\"Cast ``value`` into unicode string.\n\n This function works with Python 2.x and 3.x and can be used in Kalamar.\n\n >>> spam = to_unicode(\"spam\")\n >>> spam.encode(\"utf-8\") == to_bytes(\"spam\")\n True\n >>> type(spam) == unicode\n True\n >>> touche = to_unicode(\"Touché\")\n >>> touche.encode(\"utf-8\") == to_bytes(\"Touché\")\n True\n >>> type(touche) == unicode\n True\n >>> ten = to_unicode(\"10\")\n >>> type(ten) == unicode\n True\n >>> int(ten)\n 10\n\n \"\"\"\n if type(value) == unicode:\n return value\n else:\n try:\n string = unicode(value, encoding=encoding)\n except:\n string = unicode(value)\n return unicodedata.normalize(\"NFC\", string)\n\n\ndef to_type(value, data_type):\n \"\"\"Return ``value`` if instance of ``data_type`` else raise error.\n\n >>> to_type(1, int)\n 1\n >>> eggs = to_type(\"eggs\", unicode)\n >>> eggs == \"eggs\"\n True\n >>> type(eggs) == unicode\n True\n >>> to_type(\"1+j\", complex)\n (1+1j)\n >>> to_type(\"eggs\", float) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS\n Traceback (most recent call last):\n ....\n ValueError: eggs cannot be cast to float.\n\n \"\"\"\n if isinstance(value, data_type) or value is None:\n return value\n else:\n try:\n return data_type(value)\n except:\n raise ValueError(\"%s cannot be cast to %s.\" % (\n value, data_type.__name__))\n\ndef to_number(value, data_type):\n if isinstance(value, data_type) or value is None:\n return value\n if not value:\n return data_type(0)\n return to_type(value, data_type)\n\nConverter = namedtuple('Converter', ['down', 'up'])\n\nclass TypeExtenser(AbstractCornExtenser):\n\n default_converters = {\n unicode: to_unicode,\n bytes: to_bytes,\n int: lambda value: to_number(value, int),\n float: lambda value: to_number(value, float),\n decimal.Decimal: lambda value: to_number(value, decimal.Decimal),\n datetime.datetime: to_datetime,\n datetime.date: to_date,\n bool: bool,\n dict: dict,\n object: lambda value: value,\n }\n\n def __init__(self, name, wrapped_corn, type_converters=None):\n super(TypeExtenser, self).__init__(name, wrapped_corn)\n self.converters = self.default_converters.copy()\n # Contains a mapping of property name to downward/upward\n # convertors\n self.named_convertors = {}\n if type_converters:\n self.converters.update(type_converters)\n\n def register(self, name, type=object, custom_down=None, custom_up=None):\n if name not in self.wrapped_corn.properties:\n raise KeyError('Cannot register a type converter for nonexistent property')\n self.properties[name] = Type(corn=self, type=type, name=name)\n if custom_down:\n down_converter = custom_down\n else:\n down_converter = self.converters[self.wrapped_corn.properties[name].type]\n if custom_up:\n up_converter = custom_up\n else:\n up_converter = self.converters[type]\n self.named_convertors[self.properties[name]] = Converter(down=down_converter, up=up_converter)\n\n def execute(self, query):\n #TODO: convert query so that every type is casted appropriately\n pass\n","sub_path":"multicorn/corns/extensers/typeextenser.py","file_name":"typeextenser.py","file_ext":"py","file_size_in_byte":8869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"428574402","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/summerrpc/helper/singleton.py\n# Compiled at: 2018-07-31 10:42:31\n__all__ = [\n 'Singleton']\n__authors__ = ['Tim Chow']\nimport threading\n\nclass Singleton(object):\n _instances = {}\n _lock = threading.Lock()\n\n def __new__(cls, *a, **kw):\n if cls in Singleton._instances:\n return Singleton._instances[cls]\n with Singleton._lock:\n if cls in Singleton._instances:\n return Singleton._instances[cls]\n else:\n instance = super(Singleton, cls).__new__(cls, *a, **kw)\n Singleton._instances[cls] = instance\n return instance","sub_path":"pycfiles/summerrpc-2.0.0-py2.7/singleton.py","file_name":"singleton.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"195061463","text":"import datetime\nimport sys\n\nclass Logger:\n def __init__(self, filename=None):\n self.fout = sys.stdout\n if filename:\n self.fout = open('logs/' + datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") + '_' + filename,\n 'w', encoding='utf-8')\n\n def print(self, *args, end='\\n'):\n self.fout.write('\\t'.join(str(s) for s in args) + end)\n","sub_path":"Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"470472098","text":"import pickle\nimport time\nfrom threading import Thread\nfrom queue import Queue\n\nimport ana\n\nfrom .jobs import CFGGenerationJob\nfrom .object_container import ObjectContainer\nfrom ..logic import GlobalInfo\nfrom ..logic.threads import gui_thread_schedule_async\n\n\nclass Instance:\n def __init__(self, project=None):\n self.workspace = None\n\n self.jobs = []\n self._jobs_queue = Queue()\n self.simgrs = ObjectContainer([], name='Global simulation managers list')\n self.states = ObjectContainer([], name='Global states list')\n self._project_container = ObjectContainer(project, \"the current angr project\")\n self.cfg_container = ObjectContainer(project, \"the current CFG\")\n\n self._start_worker()\n\n self._cfg = None\n self._cfb = None\n\n self.database_path = None\n\n # The image name when loading image\n self.img_name = None\n\n #\n # Properties\n #\n\n @property\n def project(self):\n return self._project_container.am_obj\n\n @project.setter\n def project(self, v):\n self._project_container.am_obj = v\n self._project_container.am_event()\n\n @property\n def project_container(self):\n return self._project_container\n\n @property\n def cfg(self):\n return self.cfg_container.am_obj\n\n @cfg.setter\n def cfg(self, v):\n self.cfg_container.am_obj = v\n self.cfg_container.am_event()\n\n # notify the workspace\n if self.workspace is not None:\n self.workspace.reload()\n\n @property\n def cfb(self):\n return self._cfb\n\n @cfb.setter\n def cfb(self, v):\n self._cfb = v\n\n #\n # Public methods\n #\n\n def async_set_cfg(self, cfg):\n self.cfg_container.am_obj = cfg\n # This should not trigger a signal because the CFG is not yet done. We'll trigger a\n # signal on cfg.setter only\n # self.cfg_container.am_event()\n\n def async_set_cfb(self, cfb):\n self._cfb = cfb\n\n def set_project(self, project):\n self.project = project\n\n def set_image(self, image):\n self.img_name = image\n\n def initialize(self, cfg_args=None):\n if cfg_args is None:\n cfg_args = {}\n cfg_job = CFGGenerationJob(\n on_finish=self.workspace.on_cfg_generated,\n **cfg_args\n )\n self.add_job(cfg_job)\n\n self._start_daemon_thread(self._refresh_cfg, 'Progressive Refreshing CFG', args=(cfg_job,))\n\n def add_job(self, job):\n self.jobs.append(job)\n self._jobs_queue.put(job)\n\n def save(self, loc):\n with open(loc, 'wb') as f:\n pickled = pickle.dumps(self)\n store = ana.get_dl()._state_store\n pickle.dump({'store': store, 'pickled': pickled}, f)\n\n @staticmethod\n def from_file(loc):\n with open(loc, 'rb') as f:\n saved = pickle.load(f)\n ana.get_dl()._state_store = saved['store']\n return pickle.loads(saved['pickled'])\n\n #\n # Private methods\n #\n\n def _start_daemon_thread(self, target, name, args=None):\n t = Thread(target=target, name=name, args=args if args else tuple())\n t.daemon = True\n t.start()\n\n def _start_worker(self):\n self._start_daemon_thread(self._worker, 'angr Management Worker Thread')\n\n def _worker(self):\n while True:\n if self._jobs_queue.empty():\n gui_thread_schedule_async(self._set_status, args=(\"Ready.\",))\n\n job = self._jobs_queue.get()\n gui_thread_schedule_async(self._set_status, args=(\"Working...\",))\n\n result = job.run(self)\n gui_thread_schedule_async(job.finish, args=(self, result))\n\n self.jobs.remove(job)\n\n def _set_status(self, status_text):\n GlobalInfo.main_window.status = status_text\n\n def _refresh_cfg(self, cfg_job):\n time.sleep(1.0)\n while True:\n if self._cfg is not None:\n if self.workspace is not None:\n gui_thread_schedule_async(lambda: self.workspace.reload())\n\n time.sleep(0.3)\n if cfg_job not in self.jobs:\n break\n","sub_path":"angrmanagement/data/instance.py","file_name":"instance.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"92351693","text":"import math\r\n\r\ndef check(char, plus = False):\r\n val = 0\r\n if plus: val += 4\r\n if char == \"T\": val += 3\r\n elif char == \"C\": val += 1\r\n elif char == \"G\": val += 2\r\n else: pass\r\n return val\r\n\r\ndef sign(pos_arr, pos):\r\n for start, finish in pos_arr:\r\n if pos >= start and pos <= finish:\r\n return 1\r\n return 0\r\n\r\ndef fill_AB(line, pos_arr):\r\n A = [0]*4\r\n B = [[0]*8 for _ in range(8)]\r\n C = [[0]*8 for _ in range(8)]\r\n neg_len = 0\r\n prev_neg = False\r\n prev_j = 0\r\n for i in range(len(line)):\r\n j = line[i]\r\n neg = sign(pos_arr, i) == 0 \r\n if neg:\r\n neg_len += 1\r\n A[check(j, not neg)] += 1\r\n if i > 0:\r\n B[check(prev_j, not prev_neg)][check(j, not neg)] += 1\r\n prev_neg = neg\r\n prev_j = j\r\n\r\n for i in range(4):\r\n A[i] = math.log((A[i] + 10**-30) / (neg_len + 1))\r\n\r\n for i in range(8):\r\n s = sum(B[i])\r\n for j in range(8):\r\n C[i][j] = math.log((B[i][j] + 10**-30) / (s + 1))\r\n\r\n return A, C\r\n\r\nif __name__ == \"__main__\":\r\n pos_arr = []\r\n glob_line = ''\r\n with open('input.txt') as f:\r\n line = f.readline()\r\n while line:\r\n line = line.strip()\r\n if line != '':\r\n line = line.split()\r\n if (len(line)) == 1:\r\n glob_line = line[0]\r\n else:\r\n pos_arr += [[int(i) - 1 for i in line]]\r\n line = f.readline()\r\n\r\n A, B = fill_AB(glob_line, pos_arr)\r\n\r\n for i in range(4):\r\n print(A[i])\r\n \r\n for i in range(8):\r\n for j in range(8):\r\n print(B[i][j])\r\n \r\n","sub_path":"ШЦЭ 2019 Биоинформатика/Оценка параметров HMM.py","file_name":"Оценка параметров HMM.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"440871991","text":"# Implement function ToLowerCase() that has a string parameter str, and returns the same string in lowercase.\r\n#\n#  \r\n#\n#\n# Example 1:\r\n#\n#\n# Input: \"Hello\"\r\n# Output: \"hello\"\r\n#\n#\n#\n# Example 2:\r\n#\n#\n# Input: \"here\"\r\n# Output: \"here\"\r\n#\n#\n#\n# Example 3:\r\n#\n#\n# Input: \"LOVELY\"\r\n# Output: \"lovely\"\r\n#\n#\n#\n#\n\n\nclass Solution:\n def toLowerCase(self, str: str) -> str:\n # return str.lower()\n\n # 大小写字母ASCII值差32,利用ord()函数即可\n ans = ''\n for i in str:\n if i >= 'A' and i <= 'Z':\n ans += chr(ord(i) + 32)\n else:\n ans += i\n return ans\n","sub_path":"solutions/742-to-lower-case/to-lower-case.py","file_name":"to-lower-case.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"479309377","text":"# -*- coding: utf-8 -*-\n\nimport os.path\nimport subprocess\nimport shutil\n\ndef split(filename):\n basename = os.path.basename(filename)\n basepath = \"C:\\\\Users\\\\314\\\\Documents\\\\book\\\\target2\\\\\" + basename[:-4]\n print(basename)\n #subprocess.Popen(['convert', '-quality', '90', '-crop', crop_option, filename, basename + '_2.jpg'], stdout=subprocess.PIPE, shell=True)\n command = 'magick \"' + \"C:\\\\Users\\\\314\\\\Documents\\\\book\\\\\" + filename + '\" -crop 2x1@ +repage \"' + basepath + '_%01d.png\"'\n print(command)\n subprocess.call(command, shell=True)\n \n shutil.move(basepath + '_0.png', basepath + '_2.png')\n \ndef all():\n path = \"target\"\n dirs = os.listdir(path)\n for item in dirs:\n split(path + \"\\\\\" + item)\nall()","sub_path":"book/split_png.py","file_name":"split_png.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"416634289","text":"from pydnameth import DataType, Task, Method\n\n\ndef get_method_metrics_keys(config):\n metrics = []\n\n if config.experiment.data in [DataType.betas, DataType.residuals_common, DataType.residuals_special]:\n\n if config.experiment.task == Task.table:\n\n if config.experiment.method == Method.linreg:\n\n metrics = [\n 'item',\n 'aux',\n 'R2',\n 'R2_adj',\n 'f_stat',\n 'prob(f_stat)',\n 'log_likelihood',\n 'AIC',\n 'BIC',\n 'omnibus',\n 'prob(omnibus)',\n 'skew',\n 'kurtosis',\n 'durbin_watson',\n 'jarque_bera',\n 'prob(jarque_bera)',\n 'cond_no',\n 'intercept',\n 'slope',\n 'intercept_std',\n 'slope_std',\n 'intercept_p_value',\n 'slope_p_value',\n 'normality_p_value_shapiro',\n 'normality_p_value_ks_wo_params',\n 'normality_p_value_ks_with_params',\n 'normality_p_value_dagostino'\n ]\n\n elif config.experiment.method == Method.variance:\n\n metrics = [\n 'item',\n 'aux',\n\n 'best_R2',\n\n 'box_b_best_type',\n 'box_b_best_R2',\n 'box_b_lin_lin_R2',\n 'box_b_lin_lin_intercept',\n 'box_b_lin_lin_slope',\n 'box_b_lin_lin_intercept_std',\n 'box_b_lin_lin_slope_std',\n 'box_b_lin_lin_intercept_p_value',\n 'box_b_lin_lin_slope_p_value',\n 'box_b_lin_log_R2',\n 'box_b_lin_log_intercept',\n 'box_b_lin_log_slope',\n 'box_b_lin_log_intercept_std',\n 'box_b_lin_log_slope_std',\n 'box_b_lin_log_intercept_p_value',\n 'box_b_lin_log_slope_p_value',\n 'box_b_log_log_R2',\n 'box_b_log_log_intercept',\n 'box_b_log_log_slope',\n 'box_b_log_log_intercept_std',\n 'box_b_log_log_slope_std',\n 'box_b_log_log_intercept_p_value',\n 'box_b_log_log_slope_p_value',\n\n 'box_m_best_type',\n 'box_m_best_R2',\n 'box_m_lin_lin_R2',\n 'box_m_lin_lin_intercept',\n 'box_m_lin_lin_slope',\n 'box_m_lin_lin_intercept_std',\n 'box_m_lin_lin_slope_std',\n 'box_m_lin_lin_intercept_p_value',\n 'box_m_lin_lin_slope_p_value',\n 'box_m_lin_log_R2',\n 'box_m_lin_log_intercept',\n 'box_m_lin_log_slope',\n 'box_m_lin_log_intercept_std',\n 'box_m_lin_log_slope_std',\n 'box_m_lin_log_intercept_p_value',\n 'box_m_lin_log_slope_p_value',\n 'box_m_log_log_R2',\n 'box_m_log_log_intercept',\n 'box_m_log_log_slope',\n 'box_m_log_log_intercept_std',\n 'box_m_log_log_slope_std',\n 'box_m_log_log_intercept_p_value',\n 'box_m_log_log_slope_p_value',\n\n 'box_t_best_type',\n 'box_t_best_R2',\n 'box_t_lin_lin_R2',\n 'box_t_lin_lin_intercept',\n 'box_t_lin_lin_slope',\n 'box_t_lin_lin_intercept_std',\n 'box_t_lin_lin_slope_std',\n 'box_t_lin_lin_intercept_p_value',\n 'box_t_lin_lin_slope_p_value',\n 'box_t_lin_log_R2',\n 'box_t_lin_log_intercept',\n 'box_t_lin_log_slope',\n 'box_t_lin_log_intercept_std',\n 'box_t_lin_log_slope_std',\n 'box_t_lin_log_intercept_p_value',\n 'box_t_lin_log_slope_p_value',\n 'box_t_log_log_R2',\n 'box_t_log_log_intercept',\n 'box_t_log_log_slope',\n 'box_t_log_log_intercept_std',\n 'box_t_log_log_slope_std',\n 'box_t_log_log_intercept_p_value',\n 'box_t_log_log_slope_p_value',\n ]\n\n elif config.experiment.method == Method.cluster:\n\n metrics = [\n 'item',\n 'aux',\n 'number_of_clusters',\n 'number_of_noise_points',\n ]\n\n elif config.experiment.method == Method.polygon:\n\n if config.experiment.method_params['method'] == Method.linreg:\n\n metrics = [\n 'item',\n 'aux',\n 'area_intersection_rel',\n 'slope_intersection_rel',\n 'max_abs_slope',\n 'is_inside'\n ]\n\n elif config.experiment.method_params['method'] == Method.variance:\n\n metrics = [\n 'item',\n 'aux',\n 'area_intersection_rel_box'\n ]\n\n elif config.experiment.method == Method.special:\n\n metrics = [\n 'item'\n ]\n\n elif config.experiment.method == Method.z_test_linreg:\n\n metrics = [\n 'item',\n 'aux',\n 'z_value',\n 'p_value',\n 'abs_z_value'\n ]\n\n elif config.experiment.method == Method.aggregator:\n\n metrics = [\n 'item',\n 'aux'\n ]\n\n elif config.experiment.task == Task.clock:\n\n if config.experiment.method == Method.linreg:\n metrics = [\n 'item',\n 'aux',\n 'R2',\n 'r',\n 'evs',\n 'mae',\n 'rmse',\n ]\n\n return metrics\n","sub_path":"pydnameth/config/experiment/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":6633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"174239400","text":"from django.shortcuts import render, get_object_or_404\n\nfrom Blog.models import Post, Category, AboutAndContact\n\nquery_populer = Post.objects.order_by(\"updated\")[:3]\nquery_kategori = Category.objects.all()[:5]\n\n\ndef anasayfa(request):\n query = Post.objects.all().order_by(\"-updated\")\n context = {\n \"objects\": query,\n \"populer\": query_populer,\n \"categories\": query_kategori,\n }\n return render(request, \"anasayfa.html\", context)\n\n\ndef allposts(request):\n query = Post.objects.all().order_by(\"-updated\")\n context = {\n \"objects\": query,\n \"populer\": query_populer,\n \"categories\": query_kategori,\n }\n return render(request, \"allposts.html\", context)\n\n\ndef detail(request, id):\n query = get_object_or_404(Post, id=id)\n context = {\n \"objects\": query,\n \"populer\": query_populer,\n \"categories\": query_kategori,\n }\n return render(request, \"detay.html\", context)\n\n\ndef categories(request):\n # query = Post.objects.values('category__name','category_id').annotate(count=Count('category'))\n query = Category.objects.all()\n context = {\n \"objects\": query,\n \"populer\": query_populer,\n \"categories\": query_kategori,\n }\n return render(request, \"kategoriler.html\", context)\n\n\ndef category_detail(request, id):\n query = get_object_or_404(Category, id=id)\n query_post = Post.objects.all().filter(category_id=id)\n context = {\n \"objects\": query,\n \"obje\": query_post,\n \"populer\": query_populer,\n \"categories\": query_kategori,\n }\n return render(request, \"kategori_detail.html\", context)\n\n\ndef iletisim(request):\n query = AboutAndContact.objects.all()\n context = {\n \"objects\": query\n }\n return render(request, \"iletisim.html\", context)\n\n\ndef hakkinda(request):\n query = AboutAndContact.objects.all()\n context = {\n \"objects\": query\n }\n return render(request, \"hakkinda.html\", context)\n","sub_path":"Blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"239669991","text":"from flask import Flask, current_app, render_template\nfrom flask import request, Blueprint\n\nfrom flask_cors import CORS\nfrom flask_restful.reqparse import RequestParser\n\nfrom flask_restful_swagger_3 import swagger, get_swagger_blueprint\nfrom flask_restful_swagger_3 import Api, swagger, Resource, abort\nfrom flask_restful_swagger_3 import Schema\n\nfrom datetime import datetime\n#from views_blueprint import get_user_resources\n\n######\n\napp = Flask(__name__)\nCORS(app, resources={\"/api/*\": {\"origins\": \"*\"}})\n\n\ndef auth(api_key, endpoint, method):\n # Space for your fancy authentication. Return True if access is granted, otherwise False\n return True\n\nswagger.auth = auth\n\n######\n\nclass AlunoModel(Schema):\n properties = {\n 'fname': {\n 'type': 'string'\n },\n 'lname': {\n 'type': 'string'\n },\n }\n required = ['lname']\n\nclass ErrorModel(Schema):\n type = 'object'\n properties = {\n 'message': {\n 'type': 'string'\n }\n }\n\nclass SimpleEmailModel(Schema):\n type = 'string'\n \nclass EmailModel(Schema):\n type = 'string'\n format = 'email'\n\nclass SuperUserModel(Schema):\n type = 'object'\n properties = {\n 'id': {\n 'type': 'integer',\n 'format': 'int64',\n },\n 'mail': SimpleEmailModel,\n }\n required = ['id']\n\nclass KeysModel(Schema):\n type = 'string'\n \nclass UserModel(SuperUserModel):\n properties = {\n 'name': {\n 'type': 'string'\n },\n 'mail': EmailModel,\n 'keys': KeysModel.array(),\n 'user_type': {\n 'type': 'string',\n 'enum': ['admin', 'regular'],\n 'nullable': True\n },\n 'password': {\n 'type': 'string',\n 'format': 'password',\n 'load_only': True\n }\n }\n required = ['name']\n \n#####\n\nknown_users = []\n\n\nclass UserResource(Resource):\n @swagger.tags('users')\n @swagger.reorder_with(UserModel, response_code=200, summary=\"Add User\")\n @swagger.parameter(_in='query', name='query', schema=UserModel, required=True, description='query')\n def post(self, _parser):\n \"\"\"\n Adds a user\n \"\"\"\n # Validate request body with schema model\n try:\n data = UserModel(**_parser.parse_args())\n\n except ValueError as e:\n return ErrorModel(**{'message': e.args[0]}), 400\n\n data['id'] = len(known_users) + 1\n known_users.append(data)\n\n return data, 201, {'Location': request.path + '/' + str(data['id'])}\n\n @swagger.tags('users')\n @swagger.response(response_code=200)\n def get(self):\n \"\"\"\n Returns all users.\n \"\"\"\n users = ([u for u in known_users if u['name']])\n\n # Return data through schema model\n return list(map(lambda user: UserModel(**user), users)), 200\n # return \"success\"\n\n\nclass UserItemResource(Resource):\n @swagger.tags('user')\n @swagger.response(response_code=200)\n def get(self, user_id):\n \"\"\"\n Returns a specific user.\n :param user_id: The user identifier\n \"\"\"\n user = next((u for u in known_users if u['id'] == user_id), None)\n\n if user is None:\n return ErrorModel(**{'message': \"User id {} not found\".format(user_id)}), 404\n\n # Return data through schema model\n return UserModel(**user), 200\n\n#####\n\ndef get_timestamp():\n return datetime.now().strftime((\"%Y-%m-%d %H:%M:%S\"))\n\nPEOPLE = {\n \"Jones\": {\n \"fname\": \"Indiana\",\n \"lname\": \"Jones\",\n \"timestamp\": get_timestamp(),\n },\n \"Sparrow\": {\n \"fname\": \"Jack\",\n \"lname\": \"Sparrow\",\n \"timestamp\": get_timestamp(),\n },\n \"Snow\": {\n \"fname\": \"John\",\n \"lname\": \"Snow\",\n \"timestamp\": get_timestamp(),\n },\n}\n\nclass Alunos(Resource):\n @swagger.tags('alunos')\n @swagger.response(response_code=200)\n def get(self):\n \"\"\"\n Returns all users.\n \"\"\"\n alunos = [PEOPLE[key] for key in sorted(PEOPLE.keys())]\n return alunos, 200\n #users = ([u for u in PEOPLE if u['lname']])\n # Return data through schema model\n #return list(map(lambda user: AlunoModel(**user), users)), 200\n # return \"success\"\n \n @swagger.tags('alunos')\n @swagger.reorder_with(AlunoModel, response_code=200, summary=\"Adicionar Aluno\")\n @swagger.parameter(_in='query', name='query', schema=AlunoModel, required=True, description='query')\n def post(self, _parser):\n \"\"\"\n Adds a user\n \"\"\"\n # Validate request body with schema model\n try:\n data = AlunoModel(**_parser.parse_args())\n\n except ValueError as e:\n return ErrorModel(**{'message': e.args[0]}), 400\n\n #data['id'] = len(known_users) + 1\n #known_users.append(data)\n #return data, 201, {'Location': request.path + '/' + str(data['id'])}\n \n lname = data.get(\"lname\", None)\n fname = data.get(\"fname\", None)\n if lname not in PEOPLE and lname is not None:\n PEOPLE[lname] = {\n \"lname\": lname,\n \"fname\": fname,\n \"timestamp\": get_timestamp(),\n }\n return \"{lname} criado com sucesso\".format(lname=lname), 201\n else:\n abort(406, message=\"Pessoa com sobrenome {lname} ja existe\".format(lname))\n\n''' \nclass Alunos(Resource):\n @swagger.tags('alunos')\n @swagger.response(response_code=200)\n def get(self):\n alunos = [PEOPLE[key] for key in sorted(PEOPLE.keys())]\n return alunos, 200\n\n @swagger.tags('users')\n @swagger.reorder_with(AlunoModel, response_code=200, summary=\"Add User\")\n @swagger.parameter(_in='query', name='query', schema=AlunoModel, required=True, description='query')\n def post(self):\n person = parser.parse_args()\n lname = person.get(\"lname\", None)\n fname = person.get(\"fname\", None)\n \n if lname not in PEOPLE and lname is not None:\n PEOPLE[lname] = {\n \"lname\": lname,\n \"fname\": fname,\n \"timestamp\": get_timestamp(),\n }\n return \"{lname} criado com sucesso\".format(lname=lname), 201\n else:\n abort(406, message=\"Pessoa com sobrenome {lname} ja existe\".format(lname))\n'''\n\nclass Aluno(Resource):\n @swagger.tags('alunos')\n @swagger.response(response_code=200)\n def get(self, lname):\n if lname in PEOPLE:\n person = PEOPLE.get(lname)\n else:\n abort(404, message=\"Pessoa com sobrenome {lname} nao encontrada\".format(lname))\n return person\n \"\"\"\n Returns a specific user.\n :param user_id: The user identifier\n \"\"\"\n '''\n user = next((u for u in known_users if u['id'] == user_id), None)\n\n if user is None:\n return ErrorModel(**{'message': \"User id {} not found\".format(user_id)}), 404\n\n # Return data through schema model\n return UserModel(**user), 200\n '''\n\n'''\nclass Aluno(Resource):\n ## Metodos específico que recebem parametro \n @swagger.tags('aluno')\n @swagger.response(response_code=200)\n def get(self, lname):\n if lname in PEOPLE:\n person = PEOPLE.get(lname)\n else:\n abort(404, message=\"Pessoa com sobrenome {lname} nao encontrada\".format(lname))\n return person\n\n def delete(self, lname):\n if lname in PEOPLE:\n del PEOPLE[lname]\n return \"{lname} deletado com sucesso\".format(lname=lname), 204\n else:\n abort(404, message=\"Pessoa com sobrenome {lname} nao encontrada\".format(lname))\n\n def put(self, lname):\n person = parser.parse_args()\n lname = person.get(\"lname\", None)\n fname = person.get(\"fname\", None)\n \n if lname in PEOPLE:\n PEOPLE[lname][\"fname\"] = person.get(\"fname\")\n PEOPLE[lname][\"timestamp\"] = get_timestamp()\n \n return PEOPLE[lname]\n else:\n abort(404, message=\"Pessoa com sobrenome {lname} nao encontrada\".format(lname))\n'''\n\n\n#####\n\n\ndef get_user_resources():\n \"\"\"\n Returns user resources.\n :param app: The Flask instance\n :return: User resources\n \"\"\"\n \n #blueprint = Blueprint('user', __name__)\n blueprint = Blueprint('aluno', __name__)\n\n api = Api(blueprint)\n\n api.add_resource(UserResource, '/api/users')\n api.add_resource(UserItemResource, '/api/users/')\n \n #api.add_resource(AlunosResource, '/api/alunos')\n\n api.add_resource(Alunos, '/api/alunos')\n #api.add_resource(Aluno, '/api/alunos/')\n\n return api\n\n# Get user resources\nuser_resources = get_user_resources()\n\n# Register the blueprint for user resources\napp.register_blueprint(user_resources.blueprint)\n\n# Prepare a blueprint to server the combined list of swagger document objects and register it\n#servers = [{\"url\": \"http://localhost:5000\"}]\n\nSWAGGER_URL = '/api/doc' # URL for exposing Swagger UI (without trailing '/')\nAPI_URL = 'swagger.json' # Our API url (can of course be a local resource)\n\napp.config.setdefault('SWAGGER_BLUEPRINT_URL_PREFIX', '/swagger')\n\nwith app.app_context():\n swagger_blueprint = get_swagger_blueprint(\n user_resources.open_api_object,\n swagger_prefix_url=SWAGGER_URL,\n swagger_url=API_URL,\n title='Alunos Microservice', version='1')#, servers=servers)\n\n\napp.register_blueprint(swagger_blueprint, url_prefix='/swagger')\n\n@app.route('/')\ndef home(): \n return render_template('alunos.html')\n \nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5007, debug=True)\n","sub_path":"server_swagger.py","file_name":"server_swagger.py","file_ext":"py","file_size_in_byte":9741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"92096158","text":"# -*- coding: utf-8 -*-\nimport pytest\nimport webtest\nfrom app import create_app\nfrom settings import get_settings\n\nfrom db.models import User, Room\n\n\n@pytest.fixture(scope='session')\ndef app():\n test_settings = get_settings()\n return webtest.TestApp(create_app(test_settings), use_unicode=True)\n\n\ndef test_create_room(session):\n room = Room(name='Testing', tzinfo='Europe/Berlin')\n session.add(room)\n assert room.id is None\n session.commit()\n assert room.id > 0\n\n\ndef test_room_still_in_session(session):\n room = session.query(Room).filter_by(name='Testing').one_or_none()\n assert room is not None\n\n\ndef test_room_in_api(app):\n resp = app.get('/api/rooms/1/')\n assert resp.status_code == 200\n\n\ndef test_create_admin(session):\n admin = User(\n firstname='John', lastname='Doe',\n email='admin@example.local',\n username='admin',\n password='test',\n auth_token='test',\n admin=1,\n staff=1)\n session.add(admin)\n session.commit()\n assert admin.id > 0\n\n\ndef test_admin_in_api(app):\n resp = app.get('/api/users/admin/')\n assert resp.status_code == 200\n\n\ndef test_create_meeting_as_admin(app):\n doc = {\n \"name\": \"John\",\n \"company\": \"Doe\",\n \"begin\": \"2016-01-01T10:00:00\",\n \"end\": \"2016-01-01T11:00:00\",\n \"room_id\": 1,\n \"contact\": \"Foo\",\n \"auth_token\": \"test\",\n }\n resp = app.post_json('/api/meetings/', doc, expect_errors=False)\n assert resp.status_code == 201\n\n\ndef test_create_meeting_unauthorized(app):\n doc = {\n \"name\": \"John\",\n \"company\": \"Doe\",\n \"begin\": \"2016-01-01T10:00:00\",\n \"end\": \"2016-01-01T11:00:00\",\n \"room_id\": 1,\n \"contact\": \"Foo\"\n }\n resp = app.post_json('/api/meetings/', doc, expect_errors=True)\n assert resp.status_code == 401\n\n\ndef test_create_staff(app):\n doc = {\n \"firstname\": \"John\",\n \"lastname\": \"Doe\",\n \"email\": \"john@doe.local\",\n \"username\": \"testuser\",\n \"password\": \"test\",\n \"admin\": 0,\n \"staff\": 1,\n \"auth_token\": 'test',\n }\n resp = app.post_json('/api/users/', doc, expect_errors=False)\n assert resp.status_code == 201\n","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"438209623","text":"\"\"\"mysite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework import routers\nfrom polls.urls import router as polls_router\nfrom catalogs.urls import router as catalogs_router\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\n\nfrom rest_framework_swagger.views import get_swagger_view\nschema_view = get_swagger_view(title='Pastebin API')\n\n\ndef_router = routers.DefaultRouter()\ndef_router.registry.extend(polls_router.registry)\ndef_router.registry.extend(catalogs_router.registry)\n\nurlpatterns = [\n path('polls/', include('polls.urls')),\n path('admin/', admin.site.urls),\n path('', include('catalogs.urls')),\n path('api/',include(def_router.urls)),\n path('api/auth', include('rest_framework.urls')),\n path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n path('swagger',schema_view, name='swagger')\n]\n","sub_path":"mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"215198338","text":"N, X = list(map(int, input().split()))\n\na = [1]\np = [1]\n\nfor i in range(N):\n a.append(a[i] * 2 + 3)\n p.append(p[i] * 2 + 1)\n\ndef sou(level):\n if level == 0:\n return 1\n else:\n return sou(level - 1) * 2 + 3\n\ndef patty(N):\n if N == 0:\n return 1\n else:\n return patty(N-1) * 2 + 1\n\ndef dp(N, X):\n if N >= 2:\n if X == 1:\n return 0\n elif X <= 1 + sou(N-1):\n return dp(N-2, X-1)\n elif X == 1 + sou(N-1) + 1:\n return patty(N-1) + 1\n elif X <= 1 + sou(N-1) + 1 + sou(N-1):\n X = X - 1 - sou(N-1) - 1\n return patty(N-1) + 1 + dp(N-2, X)\n elif X == sou(N):\n return patty(N)\n else:\n if N == 0:\n return 1\n else:\n if X == 1:\n return 0\n elif X == 2:\n return 1\n elif X == 3:\n return 2\n else:\n return 3\n\n\nprint(dp(N, X))","sub_path":"ABC115/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"4002212","text":"from .models import Lunch\nfrom api_user_lunch.models import UserLunch \nimport datetime\nfrom api_user_lunch.services import UserLunchServices\n\n\nclass LunchServices:\n\n @classmethod\n def create(cls, serializer, provider):\n date = serializer.validated_data['date']\n if Lunch.objects.filter(date=date).exists():\n raise Exception(f'Lunch have been created with date {date}')\n lunch = serializer.save(provider_id=provider)\n UserLunch.objects.filter(date=date).update(lunch=lunch)\n return lunch\n\n @classmethod\n def create_many(cls, list_lunches):\n if not list_lunches:\n raise Exception('list_lunches are empty')\n expected_input = []\n get_lunches = Lunch.objects.all()\n expected_dates = [lunch.date for lunch in get_lunches]\n for lunch in list_lunches:\n list_dates = lunch.get('list_dates', [])\n list_dates = list(dict.fromkeys(list_dates))\n for date in list_dates:\n convert_to_datetime = datetime.datetime.strptime(date, '%Y-%m-%d').date()\n if convert_to_datetime in expected_dates:\n continue\n expected_dates.append(convert_to_datetime)\n expected_input.append(Lunch(\n date=date,\n has_veggie=lunch.get('has_veggie', False),\n note=lunch.get('note', ''),\n provider_id=lunch.get('provider', None),\n ))\n\n lunches = Lunch.objects.bulk_create(expected_input)\n for lunch_data in lunches:\n UserLunch.objects.filter(date=lunch_data.date).update(lunch=lunch_data)\n return lunches\n\n @classmethod\n def get_object(cls, pk):\n try:\n lunch = Lunch.objects.get(id=pk)\n return lunch\n except Lunch.DoesNotExist:\n raise Exception('lunch is empty')\n\n @classmethod\n def update(cls, serializer, pk, date):\n lunch_instance = cls.get_object(pk)\n get_lunch = Lunch.objects.filter(date=date).first()\n lunch_date = lunch_instance.date\n date_instance = datetime.date(lunch_date.year, lunch_date.month, lunch_date.day).strftime('%Y-%m-%d')\n if date and date != date_instance and get_lunch is not None:\n raise Exception(f'Lunch have been created with date {date}')\n return serializer.save()\n","sub_path":"src/api_lunch/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"537416419","text":"# domain classifier\n\nimport json\nimport random\nimport copy\nimport logging\nimport argparse\nimport os\nimport yaml\nfrom datetime import datetime\nfrom tqdm import tqdm, trange\nfrom utils import set_seeds, clean\nfrom transformers import AutoModelForSequenceClassification, BertForSequenceClassification, AutoTokenizer, AutoConfig\n\nimport torch\nfrom torch.utils.data import DataLoader, Dataset, RandomSampler\nfrom transformers import AutoTokenizer, AutoModelForPreTraining\nfrom transformers import get_linear_schedule_with_warmup, AdamW\nfrom transformers import WEIGHTS_NAME, CONFIG_NAME\n\n\nclass BERTDANN(Dataset):\n def __init__(self, args, tokenizer, mode):\n self.tokenizer = tokenizer\n self.sample_counter = 0\n self.max_seq_length = 128\n\n with open(f'{args.data_dir}/sports.json', 'r', encoding='utf-8') as f:\n sports_data = json.load(f)\n with open(f\"{args.data_dir}/tv.json\", 'r', encoding='utf-8') as f:\n tv_data = json.load(f)\n with open(f\"{args.data_dir}/movie_train.json\", 'r', encoding='utf-8') as f:\n movie_data = json.load(f)\n \n self.sports_data = [(line['text'], 1) for line in sports_data if len(line['text']) > 5]\n self.tv_data = [(line['text'], 1) for line in tv_data if len(line['text']) > 5]\n self.movie_data = [(line['text'], 0) for line in movie_data if type(line['text'])==str and len(line['text']) > 5]\n \n target_train_size = min(int(0.9 * len(self.sports_data)), int(0.9 * len(self.tv_data)))\n movie_train_size = int(0.9 * len(self.movie_data))\n # target_train_size = 5000\n # movie_train_size = 5000\n # valid_size = 500\n \n if mode == 'train':\n self.movie_data = self.movie_data[:movie_train_size]\n self.sports_data = self.sports_data[:target_train_size]\n if args.target_data == 'sports':\n self.dataset = self.sports_data + self.movie_data\n random.shuffle(self.dataset)\n else:\n self.dataset = self.tv_data[:target_train_size] + self.movie_data\n random.shuffle(self.dataset)\n else:\n self.movie_data = self.movie_data[movie_train_size:]\n self.sports_data = self.sports_data[target_train_size:]\n if args.target_data == 'sports':\n self.dataset = self.sports_data + self.movie_data\n random.shuffle(self.dataset)\n else:\n self.dataset = self.tv_data[target_train_size:] + self.movie_data\n random.shuffle(self.dataset)\n\n def __len__(self):\n return len(self.dataset)\n \n def __getitem__(self, idx):\n guid = self.sample_counter\n self.sample_counter += 1\n\n tokens_a = \"\"\n while tokens_a == \"\":\n tokens_a, label= self.random_sent(idx)\n tokens_a = self.tokenizer.tokenize(clean(tokens_a))\n \n example = InputExample(guid=guid, tokens_a=tokens_a, label=label)\n \n features = convert_example_to_features(example, self.tokenizer, self.max_seq_length)\n \n tensors = (torch.tensor(features.input_ids),\n torch.tensor(features.input_mask),\n torch.tensor(features.label))\n\n return tensors\n \n def random_sent(self, idx):\n line, label = self.dataset[idx]\n return line, label\n\n\nclass InputExample:\n def __init__(self, \n guid, \n tokens_a, \n tokens_b=\"\", \n label=None):\n \n self.guid = guid\n self.tokens_a = tokens_a\n self.tokens_b = tokens_b\n self.label = label\n\nclass InputFeatures:\n def __init__(self, \n input_ids, \n input_mask,\n label):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.label = label\n\n\ndef convert_example_to_features(example, tokenizer, max_seq_length):\n tokens_a = example.tokens_a\n tokens_b = example.tokens_b # None\n \n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n\n tokens = []\n tokens.append(\"[CLS]\")\n for token in tokens_a:\n tokens.append(token)\n tokens.append(\"[SEP]\")\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n input_mask = [1] * len(input_ids)\n \n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n\n if example.guid < 5:\n print(\"*** Example ***\")\n print(\"guid: %s\" % (example.guid))\n print(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n print(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n print(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n print(\"movie = 0 | sports = 1: %s \" % (example.label))\n\n features = InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n label=example.label)\n return features\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n","sub_path":"src/new_model_4/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":5410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"314153066","text":"#MenuTitle: Adjust Kerning in Master\n# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, unicode_literals\ntry:\n\tfrom builtins import str\nexcept Exception as e:\n\tprint(\"Warning: 'future' module not installed. Run 'sudo pip install future' in Terminal.\")\n__doc__=\"\"\"\nAdjusts all kerning values by a specified amount.\n\"\"\"\n\nimport vanilla\n\noptionList = [ \"Multiply by\", \"Add\", \"Add Absolute\", \"Round by\" ]\n\nclass AdjustKerning( object ):\n\tdef __init__( self ):\n\t\t# GUI:\n\t\toffset = 10\n\t\tline = 20\n\t\twindowWidth = 280\n\t\twindowHeight = 2*offset+7*line\n\t\twindowWidthResize = 100 # user can resize width by this value\n\t\twindowHeightResize = 0 # user can resize height by this value\n\t\t\n\t\tself.w = vanilla.FloatingWindow(\n\t\t\t( windowWidth, windowHeight ), # default window size\n\t\t\t\"Adjust Kerning\", # window title\n\t\t\tminSize = ( windowWidth, windowHeight ), # minimum size (for resizing)\n\t\t\tmaxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)\n\t\t\tautosaveName = \"com.mekkablue.AdjustKerning.mainwindow\" # stores last window position and size\n\t\t)\n\t\t\n\t\tself.w.text_1 = vanilla.TextBox( (15-1, offset+2, -15, line), \"In the current font master, do this:\", sizeStyle='small' )\n\t\tself.w.doWhat = vanilla.PopUpButton( (25, offset+line, 100, line), optionList, callback=self.SavePreferences, sizeStyle='small' )\n\t\tself.w.howMuch = vanilla.EditText((25+100+10, offset+line+1, -15, line), \"10\", sizeStyle='small', callback=self.SavePreferences)\n\t\t\n\t\tself.w.text_2 = vanilla.TextBox( (15-1, offset*2+line*2+2, -15, line), \"To these kerning pairs:\", sizeStyle='small' )\n\t\tself.w.positive = vanilla.CheckBox( (25, offset*2+line*3, -15, line), \"Positive,\", value=True, callback=self.SavePreferences, sizeStyle='small' )\n\t\tself.w.zero = vanilla.CheckBox( (90, offset*2+line*3, -15, line), \"zero, and\", value=True, callback=self.SavePreferences, sizeStyle='small' )\n\t\tself.w.negative = vanilla.CheckBox( (162, offset*2+line*3, -15, line), \"negative pairs\", value=True, callback=self.SavePreferences, sizeStyle='small' )\n\n\t\t# self.w.keepWindow = vanilla.CheckBox( (25, offset*2+line*4, -15, line), \"Keep window open\", value=False, callback=self.SavePreferences, sizeStyle='small' )\n\n\t\tself.w.runButton = vanilla.Button((-80-15, -offset-line-5, -15, line), \"Adjust\", sizeStyle='small', callback=self.AdjustKerningMain )\n\t\tself.w.setDefaultButton( self.w.runButton )\n\t\t\n\t\t# Load Settings:\n\t\tif not self.LoadPreferences():\n\t\t\tprint(\"Note: 'Adjust Kerning in Master' could not load preferences. Will resort to defaults\")\n\t\t\n\t\t# Open window and focus on it:\n\t\tself.w.open()\n\t\tself.w.makeKey()\n\t\t\n\t\t\n\tdef SavePreferences( self, sender ):\n\t\ttry:\n\t\t\tGlyphs.defaults[\"com.mekkablue.AdjustKerning.doWhat\"] = self.w.doWhat.get()\n\t\t\tGlyphs.defaults[\"com.mekkablue.AdjustKerning.howMuch\"] = self.w.howMuch.get()\n\t\t\t# Glyphs.defaults[\"com.mekkablue.AdjustKerning.keepWindow\"] = self.w.keepWindow.get()\n\t\t\tGlyphs.defaults[\"com.mekkablue.AdjustKerning.positive\"] = self.w.positive.get()\n\t\t\tGlyphs.defaults[\"com.mekkablue.AdjustKerning.zero\"] = self.w.zero.get()\n\t\t\tGlyphs.defaults[\"com.mekkablue.AdjustKerning.negative\"] = self.w.negative.get()\n\t\texcept:\n\t\t\treturn False\n\t\t\t\n\t\treturn True\n\n\tdef LoadPreferences( self ):\n\t\ttry:\n\t\t\tGlyphs.registerDefault(\"com.mekkablue.AdjustKerning.doWhat\", 0)\n\t\t\tGlyphs.registerDefault(\"com.mekkablue.AdjustKerning.howMuch\", \"20\")\n\t\t\tGlyphs.registerDefault(\"com.mekkablue.AdjustKerning.positive\", True)\n\t\t\tGlyphs.registerDefault(\"com.mekkablue.AdjustKerning.zero\", True)\n\t\t\tGlyphs.registerDefault(\"com.mekkablue.AdjustKerning.negative\", True)\n\t\t\tself.w.doWhat.set( Glyphs.defaults[\"com.mekkablue.AdjustKerning.doWhat\"] )\n\t\t\tself.w.howMuch.set( Glyphs.defaults[\"com.mekkablue.AdjustKerning.howMuch\"] )\n\t\t\tself.w.positive.set( Glyphs.defaults[\"com.mekkablue.AdjustKerning.positive\"] )\n\t\t\tself.w.zero.set( Glyphs.defaults[\"com.mekkablue.AdjustKerning.zero\"] )\n\t\t\tself.w.negative.set( Glyphs.defaults[\"com.mekkablue.AdjustKerning.negative\"] )\n\t\texcept:\n\t\t\treturn False\n\t\t\t\n\t\treturn True\n\t\n\tdef nameForID( self, Font, ID ):\n\t\ttry:\n\t\t\tif ID[0] == \"@\": # is a group\n\t\t\t\treturn ID\n\t\t\telse: # is a glyph\n\t\t\t\treturn Font.glyphForId_( ID ).name\n\t\texcept Exception as e:\n\t\t\traise e\n\t\n\tdef userChoosesToProcessKerning( self, kernValue ):\n\t\ttry:\n\t\t\tif Glyphs.defaults[\"com.mekkablue.AdjustKerning.positive\"] and kernValue > 0:\n\t\t\t\treturn True\n\t\t\telif Glyphs.defaults[\"com.mekkablue.AdjustKerning.zero\"] and kernValue == 0:\n\t\t\t\treturn True\n\t\t\telif Glyphs.defaults[\"com.mekkablue.AdjustKerning.negative\"] and kernValue < 0:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\texcept Exception as e:\n\t\t\traise e\n\n\tdef AdjustKerningMain( self, sender ):\n\t\ttry:\n\t\t\tFont = Glyphs.font\n\t\t\tMaster = Font.selectedFontMaster\n\t\t\tMasterID = Master.id\n\t\t\tMasterKernDict = Font.kerning[ MasterID ]\n\t\t\tcalculation = str( self.w.doWhat.getItems()[ Glyphs.defaults[\"com.mekkablue.AdjustKerning.doWhat\"] ] )\n\t\t\tvalue = float( Glyphs.defaults[\"com.mekkablue.AdjustKerning.howMuch\"] )\n\t\t\t\n\t\t\tFont.disableUpdateInterface()\n\t\t\t\n\t\t\tif calculation == optionList[0]:\n\n\t\t\t\tfor leftGlyphID in MasterKernDict.keys():\n\t\t\t\t\tleftName = self.nameForID( Font, leftGlyphID )\n\t\t\t\t\t\n\t\t\t\t\tfor rightGlyphID in MasterKernDict[ leftGlyphID ].keys():\n\t\t\t\t\t\toriginalKerning = MasterKernDict[ leftGlyphID ][ rightGlyphID ]\n\t\t\t\t\t\tif self.userChoosesToProcessKerning( originalKerning ):\n\t\t\t\t\t\t\trightName = self.nameForID( Font, rightGlyphID )\n\t\t\t\t\t\t\tFont.setKerningForPair( MasterID, leftName, rightName, originalKerning * value )\n\n\t\t\telif calculation == optionList[1]:\n\t\t\t\t\n\t\t\t\tfor leftGlyphID in MasterKernDict.keys():\n\t\t\t\t\tleftName = self.nameForID( Font, leftGlyphID )\n\n\t\t\t\t\tfor rightGlyphID in MasterKernDict[ leftGlyphID ].keys():\n\t\t\t\t\t\toriginalKerning = MasterKernDict[ leftGlyphID ][ rightGlyphID ]\n\t\t\t\t\t\tif self.userChoosesToProcessKerning( originalKerning ):\n\t\t\t\t\t\t\trightName = self.nameForID( Font, rightGlyphID )\n\t\t\t\t\t\t\tFont.setKerningForPair( MasterID, leftName, rightName, originalKerning + value )\n\t\t\t\t\t\t\n\t\t\telif calculation == optionList[2]:\n\t\t\t\t\n\t\t\t\tfor leftGlyphID in MasterKernDict.keys():\n\t\t\t\t\tleftName = self.nameForID( Font, leftGlyphID )\n\n\t\t\t\t\tfor rightGlyphID in MasterKernDict[ leftGlyphID ].keys():\n\t\t\t\t\t\toriginalKerning = MasterKernDict[ leftGlyphID ][ rightGlyphID ]\n\t\t\t\t\t\tif self.userChoosesToProcessKerning( originalKerning ):\n\t\t\t\t\t\t\trightName = self.nameForID( Font, rightGlyphID )\n\t\t\t\t\t\t\tif originalKerning < 0:\n\t\t\t\t\t\t\t\tfactor = -1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tfactor = 1\n\t\t\t\t\t\t\tFont.setKerningForPair( MasterID, leftName, rightName, originalKerning + factor * value )\n\t\t\t\t\t\t\n\t\t\telif calculation == optionList[3]:\n\t\t\t\t\n\t\t\t\tfor leftGlyphID in MasterKernDict.keys():\n\t\t\t\t\tleftName = self.nameForID( Font, leftGlyphID )\n\t\t\t\t\t\n\t\t\t\t\tfor rightGlyphID in MasterKernDict[ leftGlyphID ].keys():\n\t\t\t\t\t\toriginalKerning = MasterKernDict[ leftGlyphID ][ rightGlyphID ]\n\t\t\t\t\t\tif self.userChoosesToProcessKerning( originalKerning ):\n\t\t\t\t\t\t\trightName = self.nameForID( Font, rightGlyphID )\n\t\t\t\t\t\t\tFont.setKerningForPair( MasterID, leftName, rightName, round( originalKerning / value, 0 ) * value )\n\t\t\t\t\n\t\t\tFont.enableUpdateInterface()\n\t\t\t\n\t\t\tif not self.SavePreferences( self ):\n\t\t\t\tprint(\"Note: could not write preferences.\")\n\t\texcept Exception as e:\n\t\t\traise e\n\nAdjustKerning()\n","sub_path":"Kerning/Adjust Kerning in Master.py","file_name":"Adjust Kerning in Master.py","file_ext":"py","file_size_in_byte":7295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"282550613","text":"import torch\r\ntry:\r\n import xml.etree.cElementTree as ET #解析xml的c语言版的模块\r\nexcept ImportError:\r\n import xml.etree.ElementTree as ET\r\nfrom torch.utils import data\r\nimport os\r\nfrom PIL import Image\r\nimport numpy as np\r\nfrom torchvision import transforms\r\n\r\n\r\n\r\nclasses = {}\r\nindex = 1\r\n\r\ntransforms = transforms.Compose(\r\n [\r\n transforms.Resize(224),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize( mean=tuple(np.array([125.3, 123.0, 113.9])/ 255.0),std = tuple(np.array([63.0, 62.1, 66.7]) / 255.0))\r\n ]\r\n)\r\n\r\nclass VOC2007(data.Dataset):\r\n def __init__(self,imgsRoot,labelsRoot,transforms=None):\r\n imgs = os.listdir(imgsRoot)\r\n self.imgs = [os.path.join(imgsRoot,img) for img in imgs]\r\n labels = os.listdir(labelsRoot)\r\n self.labels = [os.path.join(labelsRoot, label) for label in labels]\r\n self.transforms = transforms\r\n\r\n def __getitem__(self, index):\r\n img = Image.open(self.imgs[index]).convert('RGB')\r\n if self.transforms:\r\n img = self.transforms(img)\r\n annotations = parseXML(self.labels[index])\r\n annotationsArray = convert2Array(annotations)\r\n sample = {'img':img,'annot':annotationsArray}\r\n return sample\r\n def __len__(self):\r\n return len(self.imgs)\r\n\r\ndef parseXML(path):\r\n xml_desc = ET.ElementTree(file=path)\r\n root = xml_desc.getroot()\r\n ObjectSet = root.findall('object')\r\n annotations = []\r\n global index,classes\r\n for Object in ObjectSet:\r\n annotation = {}\r\n annotation['class'] = Object.find('name').text\r\n if annotation['class'] not in classes.keys():\r\n classes[annotation['class']] = index\r\n index += 1;\r\n BndBox = Object.find('bndbox')\r\n annotation['xmin'] = int(float(BndBox.find('xmin').text))\r\n annotation['ymin'] = int(float(BndBox.find('ymin').text))\r\n annotation['xmax'] = int(float(BndBox.find('xmax').text))\r\n annotation['ymax'] = int(float(BndBox.find('ymax').text))\r\n annotations.append(annotation)\r\n\r\n return annotations\r\n\r\ndef convert2Array(annotations):\r\n annos = np.zeros((0,5))\r\n for obj in annotations:\r\n anno = np.zeros((1,5))\r\n #print(anno)\r\n # print(obj['xmin'])\r\n anno[0,0] = int(obj['xmin'])\r\n anno[0,1] = int(obj['ymin'])\r\n anno[0,2] = int(obj['xmax'])\r\n anno[0,3] = int(obj['ymax'])\r\n anno[0,4] = classes[obj['class']]\r\n annos = np.append(annos,anno.copy(),axis=0)\r\n return annos\r\ndef detection_collate(batch):\r\n targets = []\r\n imgs = []\r\n for sample in batch:\r\n\r\n imgs.append(sample['img'])\r\n targets.append(torch.FloatTensor(sample['annot']))\r\n return torch.stack(imgs, 0), targets\r\n# dataset = VOC2007(imgsRoot='../dataset/VOC2007/JPEGImages',labelsRoot='../dataset/VOC2007/Annotations',transforms=transforms)\r\n# # print(dataset[0])\r\n# data_iter = data.DataLoader(dataset=dataset,batch_size=1,shuffle=False,drop_last=True,collate_fn=detection_collate,pin_memory=True)\r\n# for i,data in enumerate(data_iter):\r\n# if i == 1:\r\n# break\r\n# print(data[0].size())\r\n# label = data[1][0]\r\n# print(label.size())\r\n\r\n","sub_path":"utils/loadVOCData.py","file_name":"loadVOCData.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"513739855","text":"import time\r\nfrom random import randint\r\nfrom random import choice\r\nimport random\r\n\r\n# familias:\r\nma = \"metales alcalinos\"\r\nmt = \"metales de transicion\"\r\nmat = \"metales alcalinoterreos\"\r\nml = \"metaloides\"\r\nsm = \"semiconductores\"\r\nac = \"actinidos\"\r\nnm = \"no metales\"\r\nha = \"halogenos\"\r\nla = \"lantanidos\"\r\ngn = \"gases nobles\"\r\njugadora = \"\"\r\njugadorb = \"\"\r\npjugadora = 0\r\npjugadorb = 0\r\naleatorio = [\"simbolo\", \"nombre\", \"numero\", \"familia\", \"peso\", \"radiactivo\"]\r\nsimbolo_m = [\"H\", \"He\", \"Li\", \"Be\", \"B\", \"C\", \"N\", \"O\", \"F\", \"Ne\", \"Na\", \"Mg\", \"Al\", \"Si\", \"P\", \"S\", \"Cl\", \"Ar\", \"K\",\r\n \"Ca\", \"Sc\", \"Ti\", \"V\", \"Cr\", \"Mn\", \"Fe\", \"Co\", \"Ni\", \"Cu\", \"Zn\", \"Ga\", \"Ge\", \"As\", \"Se\", \"Br\", \"Kr\", \"Rb\",\r\n \"Sr\", \"Y\", \"Zr\", \"Nb\", \"Mo\", \"Tc\", \"Ru\", \"Rh\", \"Pd\", \"Ag\", \"Cd\", \"In\", \"Sn\", \"Sb\", \"Te\", \"I\", \"Xe\", \"Cs\",\r\n \"Ba\", \"La\", \"Ce\", \"Pr\", \"Nd\", \"Pm\", \"Sm\", \"Eu\", \"Gd\", \"Tb\", \"Dy\", \"Ho\", \"Er\", \"Tm\", \"Yb\", \"Lu\", \"Hf\", \"Ta\",\r\n \"W\", \"Re\", \"Os\", \"Ir\", \"Pt\", \"Au\", \"Hg\", \"Tl\", \"Pb\", \"Bi\", \"Po\", \"At\", \"Rn\", \"Fr\", \"Ra\", \"Ac\", \"Th\", \"Pa\",\r\n \"U\", \"Np\", \"Pu\", \"Am\", \"Cm\", \"Bk\", \"Cf\", \"Es\", \"Fm\", \"Md\", \"No\", \"Lr\", \"Rf\", \"Db\", \"Sg\", \"Bh\", \"Hs\", \"Mt\",\r\n \"Ds\", \"Rg\", \"Cn\", \"Nh\", \"Fl\", \"Mc\", \"Lv\", \"Ts\", \"Og\", \"Uu\", \"Ub\"]\r\nnombre_m = [\"Hidrogeno\", \"Helio\", \"Litio\", \"Berilio\", \"Boro\", \"Carbono\", \"Nitrogeno\", \"Oxigeno\", \"Fluor\", \"Neon\",\r\n \"Sodio\", \"Magnesio\", \"Aluminio\", \"Silicio\", \"Fosforo\", \"Azufre\", \"Cloro\", \"Argon\", \"Potasio\", \"Calcio\",\r\n \"Escandio\", \"Titanio\", \"Vanadio\", \"Cromo\", \"Manganeso\", \"Hierro\", \"Cobalto\", \"Niquel\", \"Cobre\", \"Cinc\",\r\n \"Galio\", \"Germanio\", \"Arsenico\", \"Selenio\", \"Bromo\", \"Kripton\", \"Rubidio\", \"Estroncio\", \"Itrio\", \"Circonio\",\r\n \"Niobio\", \"Molibdeno\", \"Tecnecio\", \"Rutenio\", \"Rodio\", \"Paladio\", \"Plata\", \"Cadmio\", \"Indio\", \"Estaño\",\r\n \"Antimonio\", \"Telurio\", \"Yodo\", \"Xenon\", \"Cesio\", \"Bario\", \"Lantano\", \"Cerio\", \"Praseodimio\", \"Neodimio\",\r\n \"Prometio\", \"Samario\", \"Europio\", \"Gadolinio\", \"Terbio\", \"Disprosio\", \"Holmio\", \"Erbio\", \"Tulio\", \"Iterbio\",\r\n \"Lutecio\", \"Hafnio\", \"Tantalo\", \"Tungsteno\", \"Renio\", \"Osnio\", \"Iridio\", \"Platino\", \"Oro\", \"Mercurio\",\r\n \"Talio\", \"Plomo\", \"Bismuto\", \"Polonio\", \"Astato\", \"Radon\", \"Francio\", \"Radio\", \"Actinio\", \"Torio\",\r\n \"Protactinio\", \"Uranio\", \"Neptunio\", \"Plutonio\", \"Americio\", \"Curio\", \"Berkelio\", \"Californio\", \"Einstenio\",\r\n \"Fermio\", \"Mendelevio\", \"Nobelio\", \"Lawrencio\", \"Rutherfordio\", \"Dubnio\", \"Seaborgio\", \"Bohrio\", \"Hasio\",\r\n \"Meitnerio\", \"Darmstatio\", \"Roentgenio\", \"Copernicio\", \"Nihonio\", \"Flerovio\", \"Moscovio\", \"Livermorio\",\r\n \"Teneso\", \"Oganeson\", \"Ununennio\", \"Unbinilio\"]\r\nsimbolo = [\"h\", \"he\", \"li\", \"be\", \"b\", \"c\", \"n\", \"o\", \"f\", \"ne\", \"na\", \"mg\", \"al\", \"si\", \"p\", \"s\", \"cl\", \"ar\", \"k\",\r\n \"ca\", \"sc\", \"ti\", \"v\", \"cr\", \"mn\", \"fe\", \"co\", \"ni\", \"cu\", \"zn\", \"ga\", \"ge\", \"as\", \"se\", \"br\", \"kr\", \"rb\",\r\n \"sr\", \"y\", \"zr\", \"nb\", \"mo\", \"tc\", \"ru\", \"rh\", \"pd\", \"ag\", \"cd\", \"in\", \"sn\", \"sb\", \"te\", \"i\", \"xe\", \"cs\",\r\n \"ba\", \"la\", \"ce\", \"pr\", \"nd\", \"pm\", \"sm\", \"eu\", \"gd\", \"tb\", \"dy\", \"ho\", \"er\", \"tm\", \"yb\", \"lu\", \"hf\", \"ta\",\r\n \"w\", \"re\", \"os\", \"ir\", \"pt\", \"au\", \"hg\", \"tl\", \"pb\", \"bi\", \"po\", \"at\", \"rn\", \"fr\", \"ra\", \"ac\", \"th\", \"pa\",\r\n \"u\", \"np\", \"pu\", \"am\", \"cm\", \"bk\", \"cf\", \"es\", \"fm\", \"md\", \"no\", \"lr\", \"rf\", \"db\", \"sg\", \"bh\", \"hs\", \"mt\",\r\n \"ds\", \"rg\", \"cn\", \"nh\", \"fl\", \"mc\", \"lv\", \"ts\", \"og\", \"uu\", \"ub\"]\r\nnombre = [\"hidrogeno\", \"helio\", \"litio\", \"berilio\", \"boro\", \"carbono\", \"nitrogeno\", \"oxigeno\", \"fluor\", \"neon\", \"sodio\",\r\n \"magnesio\", \"aluminio\", \"silicio\", \"fosforo\", \"azufre\", \"cloro\", \"argon\", \"potasio\", \"calcio\", \"escandio\",\r\n \"titanio\", \"vanadio\", \"cromo\", \"manganeso\", \"hierro\", \"cobalto\", \"niquel\", \"cobre\", \"cinc\", \"galio\",\r\n \"germanio\", \"arsenico\", \"selenio\", \"bromo\", \"kripton\", \"rubidio\", \"estroncio\", \"itrio\", \"zirconio\", \"niobio\",\r\n \"molibdeno\", \"tecnecio\", \"rutenio\", \"rodio\", \"paladio\", \"plata\", \"cadmio\", \"indio\", \"estaño\", \"antimonio\",\r\n \"telurio\", \"yodo\", \"xenon\", \"cesio\", \"bario\", \"lantano\", \"cerio\", \"praseodimio\", \"neodimio\", \"prometio\",\r\n \"samario\", \"europio\", \"gadolinio\", \"terbio\", \"disprosio\", \"holmio\", \"erbio\", \"tulio\", \"iterbio\", \"lutecio\",\r\n \"hafnio\", \"tantalo\", \"tungsteno\", \"renio\", \"osnio\", \"iridio\", \"platino\", \"oro\", \"mercurio\", \"talio\", \"plomo\",\r\n \"bismuto\", \"polonio\", \"astato\", \"radon\", \"francio\", \"radio\", \"actinio\", \"torio\", \"protactinio\", \"uranio\",\r\n \"neptunio\", \"plutonio\", \"americio\", \"curio\", \"berkelio\", \"californio\", \"einstenio\", \"fermio\", \"mendelevio\",\r\n \"nobelio\", \"lawrencio\", \"rutherfordio\", \"dubnio\", \"seaborgio\", \"bohrio\", \"hasio\", \"meitnerio\", \"darmstatio\",\r\n \"roentgenio\", \"copernicio\", \"nihonio\", \"flerovio\", \"moscovio\", \"livermorio\", \"teneso\", \"oganeson\",\r\n \"ununennio\", \"unbinilio\"]\r\nnumero = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,\r\n 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,\r\n 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,\r\n 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,\r\n 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120]\r\nfamilia = [nm, gn, ma, mat, ml, nm, nm, nm, ha, gn, ma, mat, sm, ml, nm, nm, ha, gn, ma, mat, mt, mt, mt, mt, mt, mt,\r\n mt, mt, mt, mt, sm, ml, ml, nm, ha, gn, ma, mat, mt, mt, mt, mt, mt, mt, mt, mt, mt, mt, sm, sm, ml, ml, ha,\r\n ma, ma, mat, la, la, la, la, la, la, la, la, la, la, la, la, la, la, la, mt, mt, mt, mt, mt, mt, mt, mt, mt,\r\n sm, sm, sm, ml, ha, gn, ma, ma, ac, ac, ac, ac, ac, ac, ac, ac, ac, ac, ac, ac, ac, ac, ac, mt, mt, mt, mt,\r\n mt, mt, mt, mt, mt, sm, sm, sm, sm, ha, gn, ma, mat]\r\npeso = [1, 4, 7, 9, 10.8, 12, 14, 16, 19, 20, 23, 24, 27, 28, 31, 32, 35, 40, 39, 40, 45, 48, 51, 52, 55, 55.8, 59,\r\n 58.6, 63.5, 65.4, 69.7, 72.6, 65, 79, 80, 83.7, 85, 87.6, 89, 91, 93, 96, 99, 101, 103, 106, 107.8, 112.4,\r\n 114.8, 118.7, 121.7, 127.6, 127, 131, 133, 137, 139, 140, 141, 144, 147, 150, 152, 157, 159, 162.5, 165, 167,\r\n 169, 173, 175, 178.4, 181, 183.8, 186, 190, 192, 195, 197, 200, 204, 207, 209, 209, 210, 222, 223, 226, 227,\r\n 232, 231, 238, 237, 244, 243, 247, 247, 251, 252, 257, 258, 259, 266, 267, 268, 272, 274, 276, 278, 281, 283,\r\n 285, 287, 289, 291, 293, 294, 294, 316, 320]\r\nperiodo = [\"1\", \"1\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"4\", \"4\",\"4\", \"4\",\r\n\t \"4\", \"4\", \"4\", \"4\", \"4\", \"4\", \"4\", \"4\",\"4\", \"4\", \"4\", \"4\", \"4\", \"4\", \"5\", \"5\", \"5\", \"5\", \"5\", \"5\", \"5\", \"5\", \r\n\t\"5\", \"5\", \"5\", \"5\", \"5\", \"5\",\"5\", \"5\", \"5\", \"5\", \"6\", \"6\", \"6\", \"6\", \"6\", \"6\",\"6\", \"6\", \"6\", \"6\", \"6\", \"6\", \"6\",\r\n\t \"6\", \"6\", \"6\",\"6\", \"6\", \"6\", \"6\", \"6\", \"6\", \"6\", \"6\", \"6\", \"6\",\"6\", \"6\", \"6\", \"6\", \"6\", \"6\", \"7\", \"7\", \"7\", \"7\",\r\n\t\"7\", \"7\", \"7\", \"7\", \"7\", \"7\", \"7\", \"7\", \"7\",\"7\", \"7\", \"7\", \"7\", \"7\", \"7\", \"7\", \"7\", \"7\",\"7\", \"7\", \"7\", \"7\", \"7\", \r\n\t\"7\", \"7\", \"7\",\"7\", \"7\", \"8\", \"8\"]\r\ngrupo = [\"1\", \"18\", \"1\", \"2\", \"13\", \"14\", \"15\", \"16\", \"17\", \"10\", \"1\", \"2\", \"13\", \"14\", \"15\", \"16\", \"17\", \"18\", \"1\",\r\n\t \"2\",\"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"11\", \"12\",\"13\", \"14\", \"15\", \"16\", \"17\", \"18\", \"1\", \"2\", \"3\", \"4\",\r\n\t \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"11\", \"12\", \"13\", \"14\",\"15\", \"16\", \"17\", \"18\", \"1\", \"2\", \"\", \"\", \"\", \"\",\"\", \"\", \r\n\t\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\",\"\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"11\", \"12\",\"13\", \"14\", \"15\", \"16\", \"17\",\r\n\t \"18\", \"1\", \"2\", \"\", \"\",\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\",\"\", \"\", \"\", \"\", \"4\", \"5\", \"6\", \"7\", \"8\",\"9\", \"10\", \"11\",\r\n\t \"12\", \"13\", \"14\", \"15\", \"16\",\"17\", \"18\", \"1\", \"2\"]\r\n# Alietoriedad\r\nselec1 = (random.randint(1, 20))\r\nselec2 = (random.randint(1, 20))\r\nselec3 = (random.randint(1, 54))\r\nselec4 = (random.randint(1, 40))\r\nselec5g = (random.randint(1, 18))\r\nselec5p = (random.randint(4, 5))\r\nselec6 = (random.randint(1, 60))\r\nselec7 = (random.randint(1, 80))\r\nselec8 = (random.randint(1, 56))\r\nselec9 = (random.randint(1, 100))\r\nselec10g = (random.randint(1, 18))\r\nselec10p = (random.randint(4, 5))\r\npjugadora = 0\r\npjugadorb = 0\r\n\r\n#Verificador de Pregunta 5\r\nif selec5p == 4:\r\n rpreguntai5 = 18\r\nelse: rpreguntai5 = 36\r\nrpreguntaf5 = ((rpreguntai5+selec5g)-1)\r\n\r\n#Verificador de Pregunta 10\r\nif selec10p == 4:\r\n rpreguntai10 = 18\r\nelse: rpreguntai10 = 36\r\nrpreguntaf10 = ((rpreguntai10+selec10g)-1)\r\n\r\n\r\n# Preguntas:\r\n\r\npregunta1 = (\"¿Cuál es el nombre del elemento: \" + simbolo_m[selec1]+\"?\")\r\nrpregunta1 = (nombre[selec1])\r\n\r\npregunta2 = (\"¿En qué familia está el \" + nombre_m[selec2]+\"?\")\r\nrpregunta2 = (familia[selec2])\r\n\r\npregunta3 = (\"¿En qué grupo está el \" + nombre_m[selec3]+\"?\")\r\nrpregunta3 = (grupo[selec3])\r\n\r\npregunta4 = (\"¿En qué periodo está el \" + nombre_m[selec4]+\"?\")\r\nrpregunta4 = (periodo[selec4])\r\n\r\npregunta5 = (\"¿Cuál es simbolo del elemento ubicado en el grupo \" + str(selec5g) + \" y periodo \" + str(selec5p))\r\nrpregunta5 = simbolo[rpreguntaf5]\r\n\r\npregunta6 = (\"Cuál es el nombre del elemento: \" + str(selec6+1)+\"?\")\r\nrpregunta6 = (nombre[selec6])\r\n\r\npregunta7 = (\"¿En qué familia está el \" + simbolo_m[selec7]+\"?\")\r\nrpregunta7 = (familia[selec7])\r\n\r\npregunta8 = (\"¿En qué grupo está el \" + simbolo_m[selec8]+\"?\")\r\nrpregunta8 = (grupo[selec8])\r\n\r\npregunta9 = (\"¿En qué periodo está el \" + nombre_m[selec9]+\"?\")\r\nrpregunta9 = (periodo[selec9])\r\n\r\npregunta10 = (\"¿Cuál es nombre del elemento ubicado en el grupo \" + str(selec10g) + \" y periodo \" + str(selec10p)+\"?\")\r\nrpregunta10 = nombre[rpreguntaf10]\r\n\r\n\r\nopcion = \"\"\r\n\r\nwhile opcion != \"X\":\r\n print(\" ███████████████████████████████\")\r\n print(\" █ █\")\r\n print(\" █ Juego de la tabla periódica █\")\r\n print(\" █ █\")\r\n print(\" ███████████████████████████████\")\r\n print(\"\")\r\n print(\" █ Empieza a jugar █\")\r\n print(\"\")\r\n print(\"Para empezar a jugar precione J\")\r\n print(\"\")\r\n print(\" █ ¿Quieres practicar? █\")\r\n print(\"\")\r\n print(\"Para ingresar el símbolo del elemento, introduzca S\")\r\n print(\"Para ingresar el número del elemento, introduzca NU\")\r\n print(\"Para ingresar el nombre del elemento, introduzca NO\")\r\n print(\"Para ver las reglas del juego, introduzca R\")\r\n print(\"\")\r\n print(\" █ Ingresa la Letra █\")\r\n print(\"\")\r\n opcion = str(input())\r\n\r\n if opcion.lower() == \"creditos\":\r\n print(\"Créditos: Ahmad Hussen, Esmeralda Waiss, Sebastián Bonilla y Gerhard Freiberg.\")\r\n\t\t\r\n elif opcion.lower() == \"r\":\r\n print(\"REGLAS:\")\r\n print(\"-> Cuando te pregunte por qué familia está deberás poner de esta manera:\")\r\n print(\" metales alcalinos, metales de transicion, metales alcalinoterreos\")\r\n print(\" metaloides, semiconductores, actinidos, no metales, halogenos\")\r\n print(\" lantanidos\"+\"gases nobles\")\r\n print(\"\")\r\n print(\"->Los grupos están del 1 al 18\")\r\n\t\t\r\n elif opcion.lower() == \"s\":\r\n print(\"ingrese el símbolo del elemento: \")\r\n val = str(input())\r\n x = simbolo.index(val.lower())\r\n\r\n print(\"nombre del elemento: \", nombre_m[x])\r\n print(\"número atómico: \", numero[x])\r\n print(\"familia: \", familia[x])\r\n print(\"peso atómico: \", peso[x])\r\n\r\n\r\n elif opcion.lower() == \"nu\":\r\n print(\"ingrese el número del átomo: \")\r\n val = int(input())\r\n x = numero.index(val)\r\n print(\"\")\r\n print(\"nombre del elemento: \", nombre_m[x])\r\n print(\"símbolo del elemento: \", simbolo_m[x])\r\n print(\"familia: \", familia[x])\r\n print(\"peso atómico: \", peso[x])\r\n\r\n elif opcion.lower() == \"no\":\r\n print(\"ingrese el nombre del átomo: \")\r\n val = str(input())\r\n x = nombre.index(val.lower())\r\n print(\"\")\r\n print(\"símbolo del elemento: \", simbolo_m[x])\r\n print(\"número atómico: \", numero[x])\r\n print(\"familia: \", familia[x])\r\n print(\"peso atómico: \", peso[x])\r\n\r\n elif opcion.lower() == \"j\":\r\n# Primera parte del juego\r\n print(\"Nombre del primer jugador\")\r\n jugadora = str(input())\r\n time.sleep(0.2)\r\n print(\"\\n\"+\"Nombre del segundo jugador\")\r\n jugadorb = str(input())\r\n time.sleep(1)\r\n print(\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\")\r\n # Pregunta 1\r\n print(\"\\n\" + \"Turno de \" + jugadora + \" (Pregunta 1)\")\r\n print(pregunta1)\r\n input1 = input()\r\n if input1.lower() == rpregunta1:\r\n print(\"Bien\")\r\n pjugadora +=1\r\n else:\r\n print(\"La respuesta correcta era \" + str(rpregunta1) + \"\\n\")\r\n\r\n # Pregunta 2\r\n print(\"\\n\" + \"Truno de \" + jugadorb + \" (Pregunta 2)\")\r\n print(pregunta2)\r\n input2 = input()\r\n if input2.lower() == rpregunta2:\r\n print(\"Bien\")\r\n pjugadorb +=1\r\n else:\r\n print(\"La respuesta correcta era \" + str(rpregunta2) + \"\\n\")\r\n\r\n # Pregunta 3\r\n print(\"\\n\" + \"Turno de \" + jugadora + \" (Pregunta 3)\")\r\n print(pregunta3)\r\n input3 = input()\r\n if input3.lower() == rpregunta3:\r\n print(\"Bien\")\r\n pjugadora +=1\r\n else:\r\n print(\"La respuesta correcta era \" + str(rpregunta3) + \"\\n\")\r\n\r\n # Pregunta 4\r\n print(\"\\n\" + \"Truno de \" + jugadorb + \" (Pregunta 4)\")\r\n print(pregunta4)\r\n input4 = input()\r\n if input4.lower() == rpregunta4:\r\n print(\"Bien\")\r\n pjugadorb +=1\r\n else:\r\n print(\"La respuesta correcta era \" + str(rpregunta4) + \"\\n\")\r\n\r\n # Pregunta 5\r\n print(\"\\n\" + \"Turno de \" + jugadora + \" (Pregunta 5)\")\r\n print(pregunta5)\r\n input5 = input()\r\n if input5.lower() == rpregunta5:\r\n print(\"Bien\")\r\n pjugadora +=1\r\n else:\r\n print(\"La respuesta correcta era \" + str(rpregunta5) + \"\\n\")\r\n\r\n # Pregunta 6\r\n print(\"\\n\" + \"Turno de \" + jugadorb + \" (Pregunta 6)\")\r\n print(pregunta6)\r\n input6 = input()\r\n if input6.lower() == rpregunta6:\r\n print(\"Bien\")\r\n pjugadorb +=1\r\n else:\r\n print(\"La respuesta correcta era \" + str(rpregunta6) + \"\\n\")\r\n\r\n # Pregunta 7\r\n print(\"\\n\" + \"Turno de \" + jugadora + \" (Pregunta 7)\")\r\n print(pregunta7)\r\n input7 = input()\r\n if input7.lower() == rpregunta7:\r\n print(\"Bien\")\r\n pjugadora +=1\r\n else:\r\n print(\"La respuesta correcta era \" + str(rpregunta7) + \"\\n\")\r\n\r\n # Pregunta 8\r\n print(\"\\n\" + \"Truno de \" + jugadorb + \" (Pregunta 8)\")\r\n print(pregunta8)\r\n input8 = input()\r\n if input8.lower() == rpregunta8:\r\n print(\"Bien\")\r\n pjugadorb +=1\r\n else:\r\n print(\"La respuesta correcta era \" + str(rpregunta8) + \"\\n\")\r\n\r\n # Pregunta 9\r\n print(\"\\n\" + \"Turno de \" + jugadora + \" (Pregunta 9)\")\r\n print(pregunta9)\r\n input9 = input()\r\n if input9.lower() == rpregunta9:\r\n print(\"Bien\")\r\n pjugadora +=1\r\n else:\r\n print(\"La respuesta correcta era \" + str(rpregunta9) + \"\\n\")\r\n\r\n # Pregunta 10\r\n print(\"\\n\" + \"Turno de \" + jugadorb + \" (Pregunta 10)\")\r\n print(pregunta10)\r\n input10 = input()\r\n if input10.lower() == rpregunta10:\r\n print(\"Bien\")\r\n pjugadorb +=1\r\n else:\r\n print(\"La respuesta correcta era \" + str(rpregunta10) + \"\\n\")\r\n\r\n\r\n\r\n\r\n # Resultados\r\n print(\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\"+\"Resultado final: \")\r\n print(jugadora + \" \" + str(pjugadora))\r\n print(jugadorb + \" \" + str(pjugadorb))\r\n\r\n #Timer\r\n time.sleep(5)\r\n print(\"\")\r\n print(\"█████████ █ ██████ █ █ \")\r\n time.sleep(0.1)\r\n print(\" █ █ █ █ █ █ █ █ \")\r\n time.sleep(0.1)\r\n print(\" █ █ █ █ █ █ █ █\")\r\n time.sleep(0.1)\r\n print(\" █ ███████ ██████ █ ███████\")\r\n time.sleep(0.1)\r\n print(\" █ █ █ █ █ █ █ █\")\r\n time.sleep(0.1)\r\n print(\" █ █ █ ██████ ███████ █ █\")\r\n print(\"\")\r\n print(\"██████ ███████ ██████ █████ █████ ██████ █████ █████ █ \")\r\n time.sleep(0.1)\r\n print(\"█ █ █ █ █ █ █ █ █ █ █ █ █ █ █ \")\r\n time.sleep(0.1)\r\n print(\"█ █ █ █ █ █ █ █ █ █ █ █ █ █\")\r\n time.sleep(0.1)\r\n print(\"██████ ███████ █████ █ █ █ █ █ █ █ ███████\")\r\n time.sleep(0.1)\r\n print(\"█ █ █ █ █ █ █ █ █ █ █ █ █ █\")\r\n time.sleep(0.1)\r\n print(\"█ ███████ █ █ █████ █████ ██████ █████ █████ █ █\")\r\n\r\n print(\"¿Quieres volver a hacer las mismas preguntas?\")\r\n print(\"Presione C para continuar o cualquier letra para salir.\")\r\n salir = str(input())\r\n print(salir)\r\n if \"C\"or\"c\" == salir:\r\n print(\"\")\r\n print(\" ███████████████████████████████\")\r\n print(\" █ █\")\r\n print(\" █ Juego de la tabla periódica █\")\r\n print(\" █ █\")\r\n print(\" ███████████████████████████████\")\r\n print(\"\")\r\n print(\" █ Empieza a jugar █\")\r\n print(\"\")\r\n print(\"Para empezar a jugar precione J\")\r\n print(\"\")\r\n print(\" █ ¿Quieres practicar? █\")\r\n print(\"\")\r\n print(\"para ingresar el símbolo del elemento, introduzca S\")\r\n print(\"para ingresar el número del elemento, introduzca NU\")\r\n print(\"para ingresar el nombre del elemento, introduzca NO\")\r\n print(\"Para ver las reglas del juego, introduzca R\")\r\n print(\"\")\r\n print(\" █ Ingresa la Letra █\")\r\n print(\"\")\r\n opcion = str(input())\r\n\r\n else: break\r\n","sub_path":"Juego Tabla periodica.py","file_name":"Juego Tabla periodica.py","file_ext":"py","file_size_in_byte":19543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"517200060","text":"import os\nimport sys\nfrom os import path\n\ndir = \"/Users/fangcai/Downloads/data/IPCAS_7_0026104_0026119\"\n\nfor root, dirs, files in os.walk(dir, topdown=True):\n print('root:', root)\n print('dirs:', dirs)\n print('files:', files)\n parts=root.split('/')\n print('parts:', parts)\n print('parts[-1]:', parts[-1])\n\n if parts[-1]=='anat_1':\n new_r = \"/\".join(parts[:-1])\n print('new_r:', new_r)\n new_d = f\"{new_r}/anat\"\n print('new_d:', new_d)\n os.rename(root, new_d)\n \n if 'anat.nii.gz' in files:\n old_f = f\"{new_d}/anat.nii.gz\"\n print('old_f:', old_f)\n new_f = f\"{new_d}/{parts[-3]}_{parts[-2]}_T1w.nii.gz\"\n print(\"new_f:\", new_f)\n os.rename(old_f, new_f)\n","sub_path":"utils/batch_anat.py","file_name":"batch_anat.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"289519229","text":"import time\nimport hashlib\n\n\ndef current_time_str():\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time.time()))\n\n\ndef get_timestamp_from_str(time_str: str) -> float:\n try:\n time_stamp = time.mktime(time.strptime(time_str, '%Y-%m-%d %H:%M:%S'))\n except ValueError:\n time_stamp = 0\n except TypeError:\n time_stamp = 0\n return time_stamp\n\n\ndef get_file_md5(filename):\n with open(filename, \"rb\") as f:\n m = hashlib.md5()\n m.update(f.read())\n return m.hexdigest()\n","sub_path":"photomanager/lib/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"162587204","text":"#-*- coding: utf-8 -*-\n\ndef electroball(n, first_speed, second_speed):\n if n == '1':\n f_speed = first_speed\n s_speed = second_speed\n elif n == '2':\n f_speed = second_speed\n s_speed = first_speed\n speed = float(s_speed) / float(f_speed)\n if speed <= 0.25:\n dmg = 150\n elif speed <= 0.3:\n dmg = 120\n elif speed <= 0.5:\n dmg = 80\n else:\n dmg = 60\n return dmg\n\n\ndef gyroball(n, first_speed, second_speed):\n if n == '1':\n f_speed = first_speed\n s_speed = second_speed\n elif n == '2':\n f_speed = second_speed\n s_speed = first_speed\n dmg = float(s_speed) / float(f_speed)\n dmg = dmg * 25\n if dmg > 150:\n dmg = 150\n return dmg\n\n\ndef second_breath(hp, max_hp):\n hp = float(hp) / float(max_hp)\n hp = hp * 100\n if hp <= 3:\n dmg = 200\n elif hp <= 10:\n dmg = 150\n elif hp <= 20:\n dmg = 100\n elif hp <= 33:\n dmg = 80\n elif hp <= 67:\n dmg = 40\n else:\n dmg = 20\n return dmg\n\n\ndef wring_out(hp, max_hp):\n hp = float(hp) / float(max_hp)\n hp = hp * 100\n if hp <= 25:\n dmg = 20\n elif hp <= 50:\n dmg = 60\n else:\n dmg = 120\n return dmg\n","sub_path":"abilities/abilities.py","file_name":"abilities.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"234530098","text":"import samples_repository\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nALPHABET_LOW = [\"g\",\"p\",\"q\",\"s_bassa\"]\nALPHABET_HIGH = [\"d_alta\",\"s_alta\",\"b\",\"f\",\"h\",\"l\"]\nALPHABET_MID = [\"a\",\"c\",\"d_mediana\",\"e\",\"i\",\"m\",\"n\",\"o\",\"r\",\"s_mediana\",\"t\",\"u\"]\nALPHABET_ALL = [\"a\",\"c\",\"d_mediana\",\"e\",\"i\",\"m\",\"n\",\"o\",\"r\",\"s_mediana\",\"t\",\"u\",\\\n \"d_alta\",\"s_alta\",\"b\",\"f\",\"h\",\"l\",\"g\",\"p\",\"q\",\"s_bassa\"]\n\npos_neg_ratio=1.0\ntrain_test_ratio=6.0/7.0\nneg_labeled_height_ratio = 1.0/2.0\n\ndef split_by_ratio(array, ratio):\n first_half_size, second_half_size = calc_ratio(len(array), ratio)\n first_half, second_half = split(array, first_half_size)\n assert len(first_half) == first_half_size\n assert len(second_half) == second_half_size\n assert (len(first_half) + len(second_half)) == len(array)\n return (first_half, second_half)\n\ndef calc_ratio(val, ratio):\n first_half_size = int(val*ratio)\n second_half_size = val - first_half_size\n assert (first_half_size + second_half_size) == val\n return (first_half_size, second_half_size)\n\ndef split(array, index):\n first_half = array[:index]\n second_half = array[index:]\n return (first_half, second_half)\n\n\ndef generate_positive_and_negative_labeled(char, pos_neg_ratio=pos_neg_ratio, train_test_ratio=train_test_ratio, verbose=1):\n # Calcolo i samples e le labels positivi per il training e il test\n positive_samples = samples_repository.get_all_positive_samples_by_char(char)\n positive_samples_train, positive_samples_test = split_by_ratio(positive_samples, train_test_ratio)\n positive_samples_train_labels = np.ones(positive_samples_train.shape[0], dtype='uint8')\n positive_samples_test_labels = np.ones(positive_samples_test.shape[0], dtype='uint8')\n\n if verbose == 1:\n print (\"Trovati\", positive_samples.shape[0], \"esempi positivi per il carattere\", char.upper(), \".\")\n print (\"Campioni di training:\", positive_samples_train.shape[0], \"\\tCampioni di test:\", positive_samples_test.shape[0])\n\n # Calolo i samples negativi e le labels per il training e per il test\n neg_samples_n = int(positive_samples.shape[0]*pos_neg_ratio)\n negative_samples = samples_repository.get_n_negative_labeled_samples_by_char(char, neg_samples_n)\n negative_samples_train, negative_samples_test = split_by_ratio(negative_samples, train_test_ratio)\n negative_samples_train_labels = np.zeros(negative_samples_train.shape[0], dtype='uint8')\n negative_samples_test_labels = np.zeros(negative_samples_test.shape[0], dtype='uint8')\n\n if verbose == 1:\n print (\"Richiesti\", neg_samples_n, \"esempi negativi: trovati\", negative_samples.shape[0], \"generici.\")\n\n assert neg_samples_n == negative_samples.shape[0]\n\n train_imgs = np.append(positive_samples_train, negative_samples_train, axis=0)\n train_labels = np.append(positive_samples_train_labels, negative_samples_train_labels, axis=0)\n\n if verbose == 1:\n print (\"Numero totale di campioni di training:\", train_imgs.shape[0])\n\n test_imgs = np.append(positive_samples_test, negative_samples_test, axis=0)\n test_labels = np.append(positive_samples_test_labels, negative_samples_test_labels, axis=0)\n\n if verbose == 1:\n print (\"Numero totale di campioni di test:\", test_imgs.shape[0])\n\n return (train_imgs, train_labels, test_imgs, test_labels)\n\n\ndef generate_half_labeled_half_height(char, char_type, \n pos_neg_ratio=pos_neg_ratio, train_test_ratio=train_test_ratio,\n neg_labeled_height_ratio=neg_labeled_height_ratio):\n\n # Calcolo i samples e le labels positivi per il training e il test\n positive_samples = samples_repository.get_all_positive_samples_by_char(char)\n positive_samples_train, positive_samples_test = split_by_ratio(positive_samples, train_test_ratio)\n positive_samples_train_labels = np.ones(positive_samples_train.shape[0], dtype='uint8')\n positive_samples_test_labels = np.ones(positive_samples_test.shape[0], dtype='uint8')\n\n print (\"Trovati\", positive_samples.shape[0], \"esempi positivi per il carattere\", char.upper(), \".\")\n print (\"Campioni di training:\", positive_samples_train.shape[0], \"\\tCampioni di test:\", positive_samples_test.shape[0])\n\n # Calolo i samples negativi e le labels per il training e per il test\n neg_samples_n = int(positive_samples.shape[0]*pos_neg_ratio)\n # divisi in generici e per altezza\n neg_labeled_samples_n, neg_height_samples_n = calc_ratio(neg_samples_n, neg_labeled_height_ratio)\n neg_labeled_samples = samples_repository.get_n_negative_labeled_samples_by_char(char, neg_labeled_samples_n)\n neg_height_samples = samples_repository.get_n_negative_samples_by_height_and_char(char, char_type, neg_height_samples_n)\n\n print (\"Richiesti\", neg_samples_n, \"esempi negativi: trovati\", neg_labeled_samples.shape[0], \"generici e\", neg_height_samples.shape[0], \"per altezza.\")\n\n assert neg_labeled_samples_n == neg_labeled_samples.shape[0]\n assert neg_height_samples_n == neg_height_samples.shape[0]\n\n neg_labeled_samples_train, neg_labeled_samples_test = split_by_ratio(neg_labeled_samples, train_test_ratio)\n neg_height_samples_train , neg_height_samples_test = split_by_ratio(neg_height_samples, train_test_ratio)\n\n print (\"Samples negativi generici in training:\", neg_labeled_samples_train.shape[0], \"\\tin test:\", neg_labeled_samples_test.shape[0])\n print (\"Samples negativi per altezza in training:\", neg_height_samples_train.shape[0], \"\\tin test:\", neg_height_samples_test.shape[0])\n\n neg_samples_train = np.append(neg_labeled_samples_train, neg_height_samples_train, axis=0)\n neg_samples_test = np.append(neg_labeled_samples_test, neg_height_samples_test, axis=0)\n neg_samples_train_labels = np.zeros(neg_samples_train.shape[0], dtype='uint8')\n neg_samples_test_labels = np.zeros(neg_samples_test.shape[0], dtype='uint8')\n\n print (\"Campioni di training:\", neg_samples_train.shape[0], \"\\tCampioni di test:\", neg_samples_test.shape[0])\n\n train_imgs = np.append(positive_samples_train, neg_samples_train, axis=0)\n train_labels = np.append(positive_samples_train_labels, neg_samples_train_labels, axis=0)\n\n print (\"Numero totale di campioni di training:\", train_imgs.shape[0])\n\n test_imgs = np.append(positive_samples_test, neg_samples_test, axis=0)\n test_labels = np.append(positive_samples_test_labels, neg_samples_test_labels, axis=0)\n\n print (\"Numero totale di campioni di test:\", test_imgs.shape[0])\n\n return (train_imgs, train_labels, test_imgs, test_labels)\n\ndef generate_all_for_char_with_class(char, classification,\n train_test_ratio=train_test_ratio, verbose=1):\n\n # Calcolo i samples e le labels positivi per il training e il test\n positive_samples = samples_repository.get_all_positive_samples_by_char(char)\n positive_samples_train, positive_samples_test = split_by_ratio(positive_samples, train_test_ratio)\n positive_samples_train_labels = np.empty(positive_samples_train.shape[0], dtype='uint8')\n positive_samples_train_labels.fill(classification)\n positive_samples_test_labels = np.empty(positive_samples_test.shape[0], dtype='uint8')\n positive_samples_test_labels.fill(classification)\n\n if(verbose == 1):\n print (\"Trovati\", positive_samples.shape[0], \"esempi positivi per il carattere\", char.upper(), \".\")\n print (\"Campioni di training:\", positive_samples_train.shape[0], \"\\tCampioni di test:\", positive_samples_test.shape[0])\n\n train_imgs = positive_samples_train\n train_labels = positive_samples_train_labels # not actually labels, it's the class\n\n if(verbose == 1):\n print (\"Numero totale di campioni di training:\", train_imgs.shape[0])\n\n test_imgs = positive_samples_test\n test_labels = positive_samples_test_labels\n\n if(verbose == 1):\n print (\"Numero totale di campioni di test:\", test_imgs.shape[0])\n\n return (train_imgs, train_labels, test_imgs, test_labels)\n\n\ndef generate_all_chars_with_class(chars = ALPHABET_ALL,\n train_test_ratio=train_test_ratio, plot=False, verbose=1):\n \n sizes = np.zeros(len(chars))\n \n classifications = range(len(chars)) # this way chars[classification] = \"a\" if classification == 0\n \n (train_imgs, train_class, test_imgs, test_class) = generate_all_for_char_with_class(chars[0],\\\n classifications[0], \\\n verbose=verbose)\n sizes[0] = train_imgs.shape[0] + test_imgs.shape[0];\n \n for i in classifications:\n if (i>0):\n (train_imgs_prov, train_class_prov, test_imgs_prov, test_class_prov) = \\\n generate_all_for_char_with_class(chars[i],\\\n classifications[i], verbose=verbose)\n \n sizes[i] = train_imgs_prov.shape[0] + train_imgs_prov.shape[0]\n \n train_imgs = np.append(train_imgs, train_imgs_prov, axis=0)\n train_class = np.append(train_class, train_class_prov, axis=0)\n test_imgs = np.append(test_imgs, test_imgs_prov, axis=0)\n test_class = np.append(test_class, test_class_prov, axis=0)\n \n chars = np.asarray(chars)\n \n if plot:\n plt.plot(sizes, 'ro')\n plt.xticks(classifications, chars, rotation='vertical')\n plt.margins(0.1)\n plt.subplots_adjust(bottom=0.15)\n plt.show()\n\n return (train_imgs, train_class, test_imgs, test_class, chars)\n\ndef generate_all_chars_with_same_class(chars = ALPHABET_ALL, classification=0,\n train_test_ratio=train_test_ratio, plot=False, verbose=1):\n \n sizes = np.zeros(len(chars))\n \n classifications = range(len(chars)) # this way chars[classification] = \"a\" if classification == 0\n \n (train_imgs, train_class, test_imgs, test_class) = generate_all_for_char_with_class(chars[0],\\\n classification, \\\n verbose=verbose)\n sizes[0] = train_imgs.shape[0] + test_imgs.shape[0];\n \n for i in classifications:\n if (i>0):\n (train_imgs_prov, train_class_prov, test_imgs_prov, test_class_prov) = \\\n generate_all_for_char_with_class(chars[i],\\\n classification, verbose=verbose)\n \n sizes[i] = train_imgs_prov.shape[0] + train_imgs_prov.shape[0]\n \n train_imgs = np.append(train_imgs, train_imgs_prov, axis=0)\n train_class = np.append(train_class, train_class_prov, axis=0)\n test_imgs = np.append(test_imgs, test_imgs_prov, axis=0)\n test_class = np.append(test_class, test_class_prov, axis=0)\n \n chars = np.asarray(chars)\n \n if plot:\n plt.plot(sizes, 'ro')\n plt.xticks(classifications, chars, rotation='vertical')\n plt.margins(0.1)\n plt.subplots_adjust(bottom=0.15)\n plt.show()\n\n return (train_imgs, train_class, test_imgs, test_class, chars)\n\n# Returns a dataset of bad cutted letters, useful for build a classificator that discriminate good cutted letters\n# from bad cutted letters\ndef generate_bad_letters_of_chosen_chars(chars=ALPHABET_ALL, n_sample_for_class_width=100, split_ratio=0.7, verbose=0, plot=True):\n\n images = []\n\n yAxis = []\n\n for letter in chars:\n datas = samples_repository.get_n_negative_samples_by_width_and_char(letter, n_sample_for_class_width, verbose = verbose)\n yAxis.append(len(datas))\n images.extend(datas)\n\n images_len = len(images)\n split_value = int(images_len*split_ratio)\n \n indexes = [i for i in range(images_len)]\n np.random.shuffle(indexes)\n\n images = np.array(images)\n\n if plot:\n plt.plot(yAxis, 'ro')\n plt.xticks(np.arange(len(chars)),chars, rotation='vertical')\n plt.margins(0.1)\n plt.subplots_adjust(bottom=0.15)\n plt.show()\n\n return (images[indexes[:split_value]], images[indexes[split_value:]])\n\ndef generate_dataset_for_segmentator(verbose=0, plot=True, label_pos_class=1, label_neg_class=0):\n (X_train_Pos, y_train_Pos, X_test_Pos, y_test_Pos, _) = generate_all_chars_with_same_class(verbose=verbose, plot=plot, classification=label_pos_class)\n (X_train_Neg, X_test_Neg) = generate_bad_letters_of_chosen_chars(n_sample_for_class_width = 5000, plot=plot, verbose=verbose)\n \n X_train = []\n X_train.extend(X_train_Pos)\n X_train.extend(X_train_Neg)\n\n X_test = []\n X_test.extend(X_test_Pos)\n X_test.extend(X_test_Neg)\n\n y_train_Neg = [label_neg_class] * len(X_train_Neg)\n y_test_Neg = [label_neg_class] * len(X_test_Neg)\n\n y_train = []\n y_train.extend(y_train_Pos)\n y_train.extend(y_train_Neg)\n\n y_test = []\n y_test.extend(y_test_Pos)\n y_test.extend(y_test_Neg)\n\n X_train = np.array(X_train)\n X_test = np.array(X_test)\n y_train = np.array(y_train)\n y_test = np.array(y_test)\n \n return (X_train, y_train, X_test, y_test)\n","sub_path":"Notebooks/dataset_generator.py","file_name":"dataset_generator.py","file_ext":"py","file_size_in_byte":13434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"13601928","text":"class ExcelSheetColumnTitle(object):\n def convertToTitle(self,n):\n \"\"\"\n :type n: int\n :rtype: str\n \"\"\"\n l=[]\n while n!=0:\n if n%26==0:\n l.append('Z')\n else:\n l.append(chr(64+n%26))\n if n%26==0:\n n=n//26-1\n continue\n n=n//26\n l.reverse()\n return ''.join(l)","sub_path":"python/ExcelSheetColumnTitle.py","file_name":"ExcelSheetColumnTitle.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"135484553","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2012, Jonas Obrist\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the Jonas Obrist nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL JONAS OBRIST BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nfrom pymaging.colors import RGB\nfrom pymaging.image import Image\nfrom pymaging.formats import Format\nfrom pymaging.exceptions import FormatNotSupported\nimport array\nimport struct\nfrom pymaging.pixelarray import get_pixel_array\n\n\ndef BITMAPINFOHEADER(decoder):\n (decoder.width, decoder.height, decoder.nplanes, decoder.bits_per_pixel,\n decoder.compression_method, decoder.bmp_bytesz, decoder.hres, decoder.vres,\n decoder.ncolors, decoder.nimpcolors) = struct.unpack_from('= self.width:\n return\n b <<= 1\n\n def get_image(self):\n # go to the start of the pixel array\n self.fileobj.seek(self.offset)\n # since bmps are stored upside down, initialize a pixel list\n initial = array.array('B', [0] * self.width * self.height * self.pixelsize)\n pixel_array = get_pixel_array(initial, self.width, self.height, self.pixelsize)\n # iterate BACKWARDS over the line indices so we don't have to reverse\n # later. this is why we intialize pixels above.\n for row_num in range(self.height - 1, -1, -1):\n self.read_row(pixel_array, row_num)\n # TODO: Not necessarily RGB\n\n return Image(pixel_array, RGB, palette=self.palette)\n\ndef decode(fileobj):\n try:\n decoder = BMPDecoder(fileobj)\n except:\n fileobj.seek(0)\n return None\n return decoder.get_image()\n\ndef encode(image, fileobj):\n raise FormatNotSupported('bmp')\n\nBMP = Format(decode, encode, ['bmp'])\n","sub_path":"pymaging_bmp/codec.py","file_name":"codec.py","file_ext":"py","file_size_in_byte":6624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"375584233","text":"# Author: Jintao Huang\n# Time: 2020-5-20\nimport torch\nimport torch.nn as nn\nfrom .utils import encode_boxes\nfrom torchvision.ops import box_iou\n\n\ndef weighted_binary_focal_loss(y_pred, y_true, alpha=0.25, gamma=2, with_logits=False, reduction=\"mean\"):\n \"\"\"f(x) = -alpha * (1 - x)^a * ln(x) = alpha * (1 - x)^a * CELoss(x)(已测试)\n\n :param y_pred: shape = (N,) or (...)\n :param y_true: shape = (N,) or (...)\n :param alpha: 负样本与正样本的权重. The weight of the negative sample and the positive sample\n = alpha * positive + (1 - alpha) * negative\n :param with_logits: y_pred是否未经过sigmoid\"\"\"\n\n if reduction == \"mean\":\n func = torch.mean\n elif reduction == \"sum\":\n func = torch.sum\n else:\n raise ValueError(\"reduction should in ('mean', 'sum')\")\n if with_logits:\n y_pred = torch.sigmoid(y_pred)\n y_pred = torch.clamp(y_pred, 1e-6, 1 - 1e-6)\n\n # 前式与后式关于0.5对称(The former and the latter are symmetric about 0.5)\n # y_true 为-1. 即: 既不是正样本、也不是负样本。\n return func((alpha * y_true * -torch.log(y_pred) * (1 - y_pred) ** gamma +\n (1 - alpha) * (1 - y_true) * -torch.log(1 - y_pred) * y_pred ** gamma) *\n (y_true >= 0).float())\n\n\ndef smooth_l1_loss(y_pred, y_true, divide_line=1.):\n \"\"\"无论divide_line为多少, 交界处的梯度为1\n\n :param y_pred: shape(N, num) or (...)\n :param y_true: shape(N, num) or (...)\n :param divide_line: = 分界线\n :return: ()\"\"\"\n\n diff = torch.abs(y_pred - y_true)\n return torch.mean(torch.where(diff < divide_line, 0.5 / divide_line * diff ** 2, diff - 0.5 * divide_line))\n\n\nclass FocalLoss(nn.Module):\n def __init__(self, alpha=0.25, gamma=2, divide_line=1 / 9):\n \"\"\"\n\n :param alpha: focal_loss的alpha\n :param gamma: focal_loss的gamma\n :param divide_line: smooth_l1_loss的分界线\n \"\"\"\n super(FocalLoss, self).__init__()\n self.alpha = alpha\n self.gamma = gamma\n self.divide_line = divide_line\n\n def forward(self, classifications, regressions, anchors, targets):\n \"\"\"\n\n :param classifications: Tensor[N, NUM_X, num_classes]. NUM_X: F*H*W*A\n :param regressions: Tensor[N, NUM_X, 4]. NUM_X: F*H*W*A\n :param anchors: Tensor[NUM_X, 4]. NUM_X: F*H*W*A\n :param targets: Dict['labels': List[Tensor[NUMi]], 'boxes': List[Tensor[NUMi, 4]]]\n boxes: left, top, right, bottom\n :return: dict(\"class_loss\", \"reg_loss\")\n \"\"\"\n class_loss_total = []\n reg_loss_total = []\n device = anchors.device\n\n for i, target in enumerate(targets): # 遍历每一张图片\n labels_ori, boxes_ori = target['labels'], target['boxes']\n classification, regression = classifications[i], regressions[i]\n if labels_ori.shape[0] == 0: # 空标签图片\n labels = torch.zeros_like(classification, device=device)\n class_loss_total.append(weighted_binary_focal_loss(\n classification, labels, self.alpha, self.gamma, False, 'sum'))\n reg_loss_total.append(torch.tensor(0.).to(device))\n continue\n # ---------------------------------------- class_loss\n iou, matched = box_iou(anchors, boxes_ori).max(dim=1) # 每个anchors只能对应一个boxes\n matched_labels = labels_ori[matched]\n labels = torch.zeros_like(classification, device=device)\n positive_idxs = torch.nonzero(iou >= 0.5) # 正标签\n ignore_idxs = torch.nonzero((iou >= 0.4) & (iou < 0.5)) # 既不是负样本,也不是正样本 -> -1(忽略)\n labels[positive_idxs, matched_labels[positive_idxs]] = 1\n labels[ignore_idxs, matched_labels[ignore_idxs]] = -1 # 忽略样本\n class_loss_total.append(weighted_binary_focal_loss(\n classification, labels, self.alpha, self.gamma, False, 'sum') /\n max(positive_idxs.shape[0], 1))\n # ---------------------------------------- reg_loss\n boxes = boxes_ori[matched][positive_idxs]\n if boxes.shape[0] == 0:\n reg_loss_total.append(torch.tensor(0.).to(device))\n continue\n anchors_pos = anchors[positive_idxs] # anchors_positive\n reg_true = encode_boxes(boxes, anchors_pos)\n regression = regression[positive_idxs]\n reg_loss_total.append(smooth_l1_loss(regression, reg_true, self.divide_line))\n\n class_loss = sum(class_loss_total) / len(class_loss_total)\n reg_loss = sum(reg_loss_total) / len(reg_loss_total)\n return {\"class_loss\": class_loss, \"reg_loss\": reg_loss}\n","sub_path":"models/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"24238132","text":"import os, sys\nimport time\nimport random\nimport matplotlib\nimport numpy as np\nimport sklearn.datasets\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nimport tflib.plot\nimport tflib as lib\nimport tflib.ops.linear\n\n\n# lib.print_model_settings(locals().copy())\n\n\nclass GAN_Toy():\n def __init__(self, mode = 'wgan-gp', dim = 512, lambdaa = 0.1, fixed_generator = False , batch_size = 256):\n self.mode = mode\n self.dim = dim\n self.lambdaa = lambdaa\n self.fixed_generator = fixed_generator\n self.batch_size = batch_size\n\n self.define()\n\n def ReLULayer(self, name, n_in, n_out, inputs):\n output = lib.ops.linear.Linear(\n name+'.Linear',\n n_in,\n n_out,\n inputs,\n initialization='he'\n )\n output = tf.nn.relu(output)\n return output\n\n def Generator(self, n_samples, real_data):\n if self.fixed_generator:\n return real_data + (1.*tf.random_normal(tf.shape(real_data)))\n else:\n noise = tf.random_normal([n_samples, 2])\n output = self.ReLULayer('Generator.1', 2, self.dim, noise)\n output = self.ReLULayer('Generator.2', self.dim, self.dim, output)\n output = self.ReLULayer('Generator.3', self.dim, self.dim, output)\n output = lib.ops.linear.Linear('Generator.4', self.dim, 2, output) #o/p has same dims as i/p\n return output\n\n def Discriminator(self, inputs):\n output = self.ReLULayer('Discriminator.1', 2, self.dim, inputs)\n output = self.ReLULayer('Discriminator.2', self.dim, self.dim, output)\n output = self.ReLULayer('Discriminator.3', self.dim, self.dim, output)\n output = lib.ops.linear.Linear('Discriminator.4', self.dim, 1, output) # o/p is [self.dim, 1]\n return tf.reshape(output, [-1]) # flattens the previous step\n\n def define(self):\n self.real_data = tf.placeholder(tf.float32, shape=[None, 2]) #2-dimensional data\n self.fake_data = self.Generator(self.batch_size, self.real_data)\n\n self.disc_real = self.Discriminator(self.real_data)\n self.disc_fake = self.Discriminator(self.fake_data)\n\n # WGAN loss\n print (' ---> Defining Disc and Gen Loss')\n self.disc_cost = tf.reduce_mean(self.disc_fake) - tf.reduce_mean(self.disc_real)\n self.gen_cost = -tf.reduce_mean(self.disc_fake)\n\n # WGAN gradient penalty\n if self.mode == 'wgan-gp':\n alpha = tf.random_uniform(\n shape=[self.batch_size,1], \n minval=0.,\n maxval=1.\n )\n interpolates = alpha * self.real_data + ((1-alpha) * self.fake_data) #Eh??\n disc_interpolates = self.Discriminator(interpolates)\n gradients = tf.gradients(disc_interpolates, [interpolates])[0]\n slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))\n gradient_penalty = tf.reduce_mean((slopes-1)**2)\n \n self.disc_cost += self.lambdaa * gradient_penalty\n\n print (' ---> Aggregating all variables for Disc and Gen in two distinct variables')\n disc_params = lib.params_with_name('Discriminator')\n gen_params = lib.params_with_name('Generator')\n\n if self.mode == 'wgan-gp':\n print (' ---> Defining Optimizers for training')\n self.disc_train_op = tf.train.AdamOptimizer(\n learning_rate=1e-4, \n beta1=0.5, \n beta2=0.9\n ).minimize(\n self.disc_cost, \n var_list = disc_params\n )\n if len(gen_params) > 0:\n self.gen_train_op = tf.train.AdamOptimizer(\n learning_rate=1e-4, \n beta1=0.5, \n beta2=0.9\n ).minimize(\n self.gen_cost, \n var_list = gen_params\n )\n else:\n self.gen_train_op = tf.no_op()\n\n else:\n pass\n # disc_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(\n # disc_cost, \n # var_list=disc_params\n # )\n # if len(gen_params) > 0:\n # gen_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(\n # gen_cost, \n # var_list=gen_params\n # )\n # else:\n # gen_train_op = tf.no_op()\n\n\n # # Build an op to do the weight clipping\n # clip_ops = []\n # for var in disc_params:\n # clip_bounds = [-.01, .01]\n # clip_ops.append(\n # tf.assign(\n # var, \n # tf.clip_by_value(var, clip_bounds[0], clip_bounds[1])\n # )\n # )\n # clip_disc_weights = tf.group(*clip_ops)\n\n # print \"Generator params:\"\n # for var in lib.params_with_name('Generator'):\n # print \"\\t{}\\t{}\".format(var.name, var.get_shape())\n # print \"Discriminator params:\"\n # for var in lib.params_with_name('Discriminator'):\n # print \"\\t{}\\t{}\".format(var.name, var.get_shape())\n\n def generate_image(self, session, iteration, true_dist, dataset_type):\n \"\"\"\n Generates and saves a plot of the true distribution, the generator, and the\n critic.\n \"\"\"\n N_POINTS = 128\n RANGE = 3\n\n points = np.zeros((N_POINTS, N_POINTS, 2), dtype='float32')\n points[:,:,0] = np.linspace(-RANGE, RANGE, N_POINTS)[:,None]\n points[:,:,1] = np.linspace(-RANGE, RANGE, N_POINTS)[None,:]\n points = points.reshape((-1,2))\n\n samples, disc_map = session.run(\n [self.fake_data, self.disc_real], \n feed_dict={self.real_data : points}\n )\n disc_map = session.run(self.disc_real, feed_dict={self.real_data : points}) #this is repeated!!??\n\n plt.clf()\n\n x = y = np.linspace(-RANGE, RANGE, N_POINTS)\n plt.contour(x,y,disc_map.reshape((len(x), len(y))).transpose())\n\n plt.scatter(true_dist[:, 0], true_dist[:, 1], c='orange', marker='+') # generated dataset\n plt.scatter(samples[:, 0], samples[:, 1], c='green', marker='+') # predicted dataset\n\n if iteration == 0:\n print ('\\nTrue Dist (orange +) : ', true_dist.shape)\n print ('Samples (green +) : ', samples.shape)\n print ('Discrimnator Map : ', disc_map.shape)\n\n save_dir = os.path.join(os.getcwd(), 'gan_toy_output')\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n\n # file_name = os.path.join(save_dir, 'frame' + str(self.frame_index[0]) + '.jpg')\n file_name = os.path.join(save_dir, 'frame' + str(iteration) + '.png')\n plt.savefig(file_name)\n\n # Dataset iterator\n def inf_train_gen(self, dataset_type):\n if dataset_type == '25gaussians':\n dataset = []\n for i in range(100000/25):\n for x in range(-2, 3):\n for y in range(-2, 3):\n point = np.random.randn(2)*0.05\n point[0] += 2*x\n point[1] += 2*y\n dataset.append(point)\n dataset = np.array(dataset, dtype='float32')\n np.random.shuffle(dataset)\n dataset /= 2.828 # stdev\n while True:\n for i in range(len(dataset) / self.batch_size):\n yield dataset[i * self.batch_size : (i+1) * self.batch_size]\n\n elif dataset_type == 'swissroll':\n while True:\n data = sklearn.datasets.make_swiss_roll(\n n_samples = self.batch_size, \n noise = 0.25\n )[0]\n data = data.astype('float32')[:, [0, 2]]\n data /= 7.5 # stdev plus a little\n yield data\n\n elif dataset_type == '8gaussians':\n scale = 2.\n centers = [\n (1,0),\n (-1,0),\n (0,1),\n (0,-1),\n (1./np.sqrt(2), 1./np.sqrt(2)),\n (1./np.sqrt(2), -1./np.sqrt(2)),\n (-1./np.sqrt(2), 1./np.sqrt(2)),\n (-1./np.sqrt(2), -1./np.sqrt(2))\n ]\n centers = [(scale*x,scale*y) for x,y in centers]\n while True:\n dataset = []\n for i in range(self.batch_size):\n point = np.random.randn(2)*.02\n center = random.choice(centers)\n point[0] += center[0]\n point[1] += center[1]\n dataset.append(point)\n dataset = np.array(dataset, dtype='float32')\n dataset /= 1.414 # stdev (sqrt of 2) \n yield dataset\n \n else:\n print ('Cannot generate this dataset type as of yet')\n\n def train(self, iters = 100000, critic_iters = 5, dataset_type = '8gaussians'):\n # Train loop!\n with tf.Session() as session:\n print ('\\n ======= TRAINING ======= ')\n session.run(tf.global_variables_initializer())\n session.run(tf.local_variables_initializer())\n \n gen = self.inf_train_gen(dataset_type)\n \n for iteration in range(iters):\n # TRAIN GENERATOR\n if iteration > 0:\n _ = session.run(self.gen_train_op)\n \n # TRAIN CRITIC\n for i in range(critic_iters):\n _data = gen.__next__() #[batch_size, 2]\n _disc_cost, _ = session.run(\n [self.disc_cost, self.disc_train_op],\n feed_dict={self.real_data: _data}\n )\n # if self.mode == 'wgan':\n # _ = session.run([clip_disc_weights])\n \n # LOGGING\n # lib.plot.plot('disc cost', _disc_cost)\n if iteration % 10 == 0:\n # lib.plot.flush()\n print ('Iter:', iteration,' ---> Disc Cost:', _disc_cost)\n self.generate_image(session, iteration, _data, dataset_type)\n # slib.plot.tick()\n","sub_path":"demo/gan_toy/gan_toy.py","file_name":"gan_toy.py","file_ext":"py","file_size_in_byte":10440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"44834197","text":"# coding=utf-8\nfrom annoying.functions import get_object_or_None\nfrom datetime import datetime\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.views.generic import ListView\nfrom apps.city.models import City\nfrom apps.manager.models import Manager\nfrom apps.ticket.forms import TicketChangeForm\nfrom .models import Ticket\n\n__author__ = 'alexy'\n\n\ndef percent_count(total, current):\n if total:\n percent = (float(current)/float(total)) * 100\n return int(percent)\n else:\n return 0\n\n\nclass TicketListView(ListView):\n model = Ticket\n paginate_by = 25\n template_name = 'ticket/ticket_list.html'\n\n def get_queryset(self):\n user = self.request.user\n if user.type == 1:\n qs = Ticket.objects.all()\n elif user.type == 6:\n qs = Ticket.objects.filter(city__in=user.superviser.city_id_list())\n elif user.type == 2:\n qs = Ticket.objects.filter(city__moderator=user)\n elif user.type == 5:\n manager = Manager.objects.get(user=user)\n qs = Ticket.objects.filter(city__moderator=manager.moderator)\n else:\n qs = None\n if self.request.GET.get('name'):\n qs = qs.filter(name__icontains=self.request.GET.get('name'))\n if self.request.GET.get('phone'):\n qs = qs.filter(phone__icontains=self.request.GET.get('phone'))\n if self.request.GET.get('city') and int(self.request.GET.get('city')) != 0:\n qs = qs.filter(city__id=int(self.request.GET.get('city')))\n if self.request.GET.get('type'):\n qs = qs.filter(type=int(self.request.GET.get('type')))\n r_date_s = self.request.GET.get('date_s')\n r_date_e = self.request.GET.get('date_e')\n if r_date_s:\n qs = qs.filter(created__gte=datetime.strptime(r_date_s, '%d.%m.%Y'))\n if r_date_e:\n qs = qs.filter(created__lte=datetime.strptime(r_date_e, '%d.%m.%Y'))\n return qs\n\n def get_context_data(self, **kwargs):\n context = super(TicketListView, self).get_context_data(**kwargs)\n user = self.request.user\n if user.type == 1:\n city_qs = City.objects.all()\n elif user.type == 6:\n city_qs = user.superviser.city.all()\n elif user.type == 2:\n city_qs = City.objects.filter(moderator=user)\n elif user.type == 5:\n manager = Manager.objects.get(user=user)\n city_qs = City.objects.filter(moderator=manager.moderator)\n else:\n city_qs = None\n context.update({\n 'city_list': city_qs,\n })\n if self.request.GET.get('city'):\n context.update({\n 'r_city': int(self.request.GET.get('city'))\n })\n if self.request.GET.get('type'):\n context.update({\n 'r_type': int(self.request.GET.get('type'))\n })\n if self.request.GET.get('phone'):\n context.update({\n 'r_phone': self.request.GET.get('phone')\n })\n if self.request.GET.get('name'):\n context.update({\n 'r_name': self.request.GET.get('name')\n })\n if self.request.GET.get('date_s'):\n context.update({\n 'r_date_s': self.request.GET.get('date_s')\n })\n if self.request.GET.get('date_e'):\n context.update({\n 'r_date_e': self.request.GET.get('date_e')\n })\n total_count = self.object_list.count()\n new_count = self.object_list.filter(type=0).count()\n action_count = self.object_list.filter(type=1).count()\n sale_count = self.object_list.filter(type=3).count()\n new_count_p = percent_count(total_count, new_count)\n action_count_p = percent_count(total_count, action_count)\n sale_count_p = percent_count(total_count, sale_count)\n price_sum = 0\n for i in self.object_list.filter(type=3):\n if i.price:\n price_sum += i.price\n context.update({\n 'total_count': total_count,\n 'new_count': new_count,\n 'action_count': action_count,\n 'sale_count': sale_count,\n 'new_count_p': new_count_p,\n 'action_count_p': action_count_p,\n 'sale_count_p': sale_count_p,\n 'price_sum': price_sum\n })\n return context\n\n\n@login_required\ndef ticket_detail(request, pk):\n context = {}\n user = request.user\n city_qs = City.objects.all()\n if user.type == 6:\n city_qs = user.superviser.city.all()\n elif user.type == 2:\n city_qs = city_qs.filter(moderator=user)\n elif user.type == 5:\n city_qs = city_qs.filter(moderator=user.manager.moderator)\n ticket = get_object_or_None(Ticket, pk=int(pk))\n if request.method == 'POST':\n form = TicketChangeForm(request.POST, instance=ticket)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('ticket:list'))\n else:\n context.update({\n 'error': u'Проверьте правильность ввода данных'\n })\n else:\n form = TicketChangeForm(instance=ticket)\n form.fields['city'].queryset = city_qs\n context.update({\n 'form': form,\n 'object': ticket\n })\n return render(request, 'ticket/ticket_detail.html', context)\n","sub_path":"apps/ticket/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"13976526","text":"import serial \nimport time\nimport numpy\nimport sys\n\ndataTemp=[]\ndataTiempo=[]\n#comentar puerto a conveniencia\n#puerto='/dev/ttyACM0'\npuerto='/dev/ttyUSB0'\n\n\ndef writer(data): #performs operations to record time and temps in a file\n\tfilo=open(\"Temperatura\", \"a\")\n\tfile=open(\"Tiempo\", \"a\")\n\n\tfilo.write(data)\n\tfilo.write(\"\\n\")\n\n\t#guarda tiempo de cada lectura en archivo separado\n\tfile.write(time.strftime('%X %x'))\n\tfile.write(\"\\n\")\n\t\n\tfile.close()\n\tfilo.close()\n\n\ndef reader(): #reads the serial port and catchs temps\n\tser=serial.Serial(puerto, 9600, timeout=0) #timeout=0 (arduino controla el loop)\n\tt0=time.clock() #cambiar esta funci[on porque lee el tiempo que toma el loop\n\twhile True:\n\t\tdata=ser.read(24).strip()\n #al usar sys.getsizeof(28.75) [un float] da 24\n\t\t#data=ser.readline().decode('ascii') \n\t\tif data:\n\t\t\t\n\t\t\tprint(data)\n\t\t\twriter(data)\n\t\t\tdataTemp.append(data)\n\t\t\t\n\t\t\t\n\t\t\n\n\n\n","sub_path":"Python_programas/ProyectoBiodigestor/seriamente.py","file_name":"seriamente.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"482010545","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom .models import PollDescription\nfrom .forms import which_poll\n\n\ndef start(request):\n context = {}\n return render(request, 'polls/start.html', context)\n\n\n@login_required\ndef show_polls(request):\n descs = PollDescription.objects.all()\n\n context = {\n 'descs': descs,\n }\n return render(request, 'polls/poll-choice.html', context)\n\n\n@login_required\ndef poll(request, pk=None):\n if pk is None:\n return redirect('polls:poll-choice')\n\n if request.method == 'POST':\n form = which_poll(pk, request.POST)\n\n if form.is_valid():\n form = form.save(commit=False)\n form.user = request.user\n form.which_poll = PollDescription.objects.get(pk=pk)\n form.save()\n # TO DO - redirect to the page, where users can see their code\n # TO DO - email\n messages.success(request,\n 'Thank you for filling in the survey. '\n 'An email with your discount code has been sent')\n # TO DO - filled-in surveys should be marked \"complete\"\n return redirect('polls:poll-choice')\n else:\n form = which_poll(pk)\n polls = PollDescription.objects.all()\n\n context = {\n 'poll_id': pk,\n 'polls': polls,\n 'form': form,\n }\n return render(request, 'polls/poll.html', context)\n","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"538972721","text":"'''\nCreated on 29.04.2016\n\n@author: lemmerfn\n'''\nimport copy\nimport pysubgroup as ps\nfrom bitarray import bitarray\n\nclass BSD_Bitarray(object):\n \n def execute (self, task):\n self.popSize = len(task.data)\n \n # generate target bitset\n self.targetBitset = bitarray(self.popSize)\n for index, row in task.data.iterrows():\n self.targetBitset[index] = task.target.covers(row)\n self.popPositives = self.targetBitset.count()\n\n # generate selector bitsets\n self.bitsets = {}\n for sel in task.search_space:\n # generate bitset\n selBitset = bitarray(self.popSize)\n for index, row in task.data.iterrows():\n selBitset[index] = sel.covers(row)\n self.bitsets[sel] = selBitset\n result = self.search_internal(task, [], task.search_space, [], self.popSize * bitarray('1'))\n result.sort(key=lambda x: x[0], reverse=True)\n return result\n \n \n def search_internal(self, task, prefix, modificationSet, result, bitset):\n sg = ps.Subgroup(task.target, copy.copy(prefix))\n \n sgSize = bitset.count()\n positiveInstances = bitset & self.targetBitset\n sgPositiveCount = positiveInstances.count()\n \n optimisticEstimate = task.qf.optimistic_estimate_from_statistics (self.popSize, self.popPositives, sgSize, sgPositiveCount)\n if (optimisticEstimate <= ps.minimum_required_quality(result, task)):\n return result\n \n quality = task.qf.evaluate_from_statistics(self.popSize, self.popPositives, sgSize, sgPositiveCount)\n ps.add_if_required (result, sg, quality, task)\n \n if len(prefix) < task.depth:\n newModificationSet = copy.copy(modificationSet)\n for sel in modificationSet:\n prefix.append(sel)\n newBitset = bitset & self.bitsets [sel]\n newModificationSet.pop(0)\n self.search_internal(task, prefix, newModificationSet, result, newBitset)\n # remove the sel again\n prefix.pop(-1)\n return result\n","sub_path":"pysubgroup.archive/bsd_bitarray.py","file_name":"bsd_bitarray.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"80750547","text":"#!/usr/bin/env python\n\nimport math\n\nclass Time:\n \"\"\"Astronomical calendar and time conversion class.\"\"\"\n \n months = ['January', 'February', 'March', 'April', 'May', 'June', \\\n 'July', 'August', 'September', 'October', 'November', 'December']\n\n days_of_week = {0:'Sunday', 1:'Monday', 2:'Tuesday', 3:'Wednesday', \\\n 4:'Thursday', 5:'Friday', 6:'Saturday'}\n\n def __init__(self):\n pass\n\n def days_in_month(self, month):\n \"\"\"Number of days in each calendar month.\n \n Keyword arguments:\n month -- capitalized name of the month or February_leap\n \n \"\"\"\n \n lookup_days_in_month = {'January':31, 'February':28, 'February_leap':29, \\\n 'March':31, 'April':30, 'May':31, 'June':30, 'July':31, 'August':31, \\\n 'September':30, 'October':31, 'November':30, 'December':31}\n return lookup_days_in_month[month]\n\n def date_of_easter(self, year): \n \"\"\"Date of Easter for the year.\n\n Keyword arguments:\n year -- Gregorian year after 1583 \n \n Easter day is the first Sunday after the fourteenth day after the first \n new Moon after March 21st. This method is valid from the year 1583 \n onwards.\n \n \"\"\"\n \n s = self \n a = year%19\n b = year//100\n c = year%100\n d = b//4\n e = b%4\n f = (b + 8)//25\n g = (b - f + 1)//3\n h = (19 * a + b - d - g + 15)%30\n i = c//4\n k = c%4\n l = (32 + 2 * e + 2 * i - h - k)%7\n m = (a + 11 * h + 22 * l)//451\n n = (h + l - 7 * m + 114)//31\n p = (h + l - 7 * m + 114)%31\n day = p + 1\n month = n\n print('Easter Sunday for the year', year, 'is', s.months[month-1], day)\n return s.months[month-1], day\n\n def is_leap(self, year):\n \"\"\"Determine if the year is a leap year.\n\n Keyword arguments:\n year -- integer year\n \n \"\"\"\n\n if year%400 == 0:\n return True\n elif year%4 == 0 and year%100 != 0:\n return True\n else:\n return False\n\n def day_number(self, year, month, day):\n '''Convert date to the day number.\n \n Keyword arguments:\n year -- integer year\n month -- integer month\n day -- integer day\n \n Starting point is 0 hours on January 0th, which is taken as midnight \n between December 30th and 31st of the previous year.'''\n\n s = self \n leap = s.is_leap(year)\n if month < 3:\n return int(((month - 1)*(63 - int(leap)))//2 + day)\n else:\n return int(int(math.floor((month + 1)*30.6)) - 63 + int(leap) + day)\n \n def date_to_julian_day(self, year, month, day):\n \"\"\"Julian day corresponding to a date.\n \n Keyword arguments:\n year -- integer year\n month -- integer month\n day -- float day\n \n The fundamental epoch for this calculation is Greenwich mean noon of\n January 1st 4713 B.C. Julian day begins at 12h 00m UT.\n \n \"\"\"\n\n d = day \n if month == 1 or month == 2:\n y = year - 1\n m = month + 12\n else:\n y = year\n m = month\n if y > 1582 or (y == 1582 and m >= 10 and d >= 15.):\n A = y//100\n B = 2 - A + A//4\n else:\n B = 0\n if (y < 0):\n C = int(365.25*y - 0.75)\n else:\n C = int(365.25*y)\n D = int(30.6001*(m + 1))\n return B + C + D + d + 1720994.5\n \n def julian_day_1990_epoch(self):\n \"\"\"Julian date of the epoch 1990 January 0.0.\n \n \"\"\"\n\n return 2447891.5\n \n def date_to_modified_julian_day(self, year, month, day):\n \"\"\"Modified Julian day corresponding to a date.\n \n Keyword arguments:\n year -- integer year\n month -- integer month\n day -- float day\n \n The fundamental epoch for this calculation is 0h on November 17th 1858.\n \n \"\"\"\n \n s = self\n return s.date_to_julian_day(year, month, day) - 2400000.5\n \n def julian_day_to_date(self, julian):\n \"\"\"Converts Julian day number to calendar date.\n\n Keyword arguments:\n julian -- Julian day number\n \n Valid after Greenwich mean noon of January 1st 4713 B.C.\n \n \"\"\"\n \n JD = julian + 0.5\n I = int(JD)\n F = JD - I\n if I > 2299160:\n A = int((I - 1867216.25)/36524.25)\n B = I + 1 + A - A//4\n else:\n B = I\n C = B + 1524\n D = int((C - 122.1)/365.25)\n E = int(365.25*D)\n G = int((C - E) / 30.6001)\n d = C - E + F - int(30.6001*G)\n if G < 13.5:\n m = G - 1\n else:\n m = G - 13\n if m > 2.5:\n y = D - 4716\n else:\n y = D - 4715\n return y, m, d\n \n def day_of_week(self, julian):\n \"\"\"Day of the week for a Julian day.\n \n Keyword arguments:\n julian -- Julian day number corresponding to 0h UT\n \n \"\"\"\n \n s = self \n A = (julian + 1.5)/7\n f = A - int(A)\n d = round(f*7)\n return s.days_of_week[d]\n \n def hhmmss_to_decimal_hours(self, hh, mm, ss):\n \"\"\"Convert hours, minutes and seconds to decimal hours.\n \n Keyword arguments:\n hh -- integer hour\n mm -- integer minutes\n ss -- float seconds\n \n \"\"\"\n \n return (ss/60. + mm)/60. + hh\n \n def decimal_hours_to_hhmmss(self, decimal):\n \"\"\"Convert decimal hours to hours, minutes and seconds.\n \n Keyword arguments:\n decimal -- decimal hours \n \n \"\"\"\n \n hh = int(decimal)\n m = 60*(decimal%1.0)\n mm = int(m)\n ss = 60.*(m%1.0)\n \n return hh, mm, ss\n \n def local_time_to_UT(self, hh, mm, ss, zone, dst = False):\n \"\"\"Convert local time to UT.\n\n Keyword arguments:\n hh -- integer hour\n mm -- integer minutes\n ss -- float seconds\n zone -- zone correction\n dst -- daylight saving \n \n \"\"\"\n \n s = self \n if dst == True:\n hh = hh - 1\n zone_time = s.hhmmss_to_decimal_hours(hh, mm, ss)\n UT = zone_time - zone\n if UT > 24.0:\n UT = UT - 24.\n if UT < 0:\n UT = UT + 24.\n return s.decimal_hours_to_hhmmss(UT)\n \n def UT_to_local_time(self, hh, mm, ss, zone, dst = False):\n \"\"\"Convert UT to local time.\n \n Keyword arguments:\n hh -- integer hour\n mm -- integer minutes\n ss -- float seconds\n zone -- zone correction\n dst -- daylight saving \n \n \"\"\"\n \n s = self \n decimal_UT = s.hhmmss_to_decimal_hours(hh, mm, ss)\n decimal_UT = decimal_UT + zone\n if decimal_UT > 24.0:\n decimal_UT = decimal_UT - 24.0\n if decimal_UT < 0:\n decimal_UT = decimal_UT + 24.0 \n [hh_loc, mm_loc, ss_loc] = s.decimal_hours_to_hhmmss(decimal_UT)\n if dst == True:\n hh_loc = hh_loc + 1\n return hh_loc, mm_loc, ss_loc\n \n def UT_to_GST(self, year, month, day, hh, mm, ss):\n \"\"\"Convert UT to GST.\n \n Keyword arguments:\n year -- integer year\n month -- integer month\n day -- integer day \n hh -- integer hour\n mm -- integer minutes\n ss -- float seconds\n \n \"\"\" \n \n s = self \n JD = s.date_to_julian_day(year, month, day)\n S = JD - 2451545.\n T = S/36525.\n T0 = 6.697374558 + T*2400.051336 + T**2*0.000025862\n T0 = T0%24\n if T0 < 0.:\n T0 = T0 + 24.\n UT = s.hhmmss_to_decimal_hours(hh, mm, ss)*1.002737909\n GST = T0 + UT\n GST = GST%24 \n if GST < 0.:\n GST = GST + 24.\n \n return s.decimal_hours_to_hhmmss(GST)\n\n def GST_to_UT(self, year, month, day, hh, mm, ss):\n \"\"\"Convert UT to GST.\n \n Keyword arguments:\n year -- integer year\n month -- integer month\n day -- integer day \n hh -- integer hour\n mm -- integer minutes\n ss -- float seconds\n \n \"\"\" \n \n s = self \n \n JD = s.date_to_julian_day(year, month, day)\n S = JD - 2451545.\n T = S/36525.\n T0 = 6.697374558 + T*2400.051336 + T**2*0.000025862\n T0 = T0%24\n if T0 < 0.:\n T0 = T0 + 24.\n GST = s.hhmmss_to_decimal_hours(hh, mm, ss)\n UT = GST - T0 \n if UT < 0.:\n UT = UT + 24. \n UT = UT*0.9972695663\n \n return s.decimal_hours_to_hhmmss(UT)\n \n def LST_to_GST(self, hh, mm, ss, long):\n \"\"\"Convert LST to GST.\n \n Keyword arguments: \n hh -- integer hour\n mm -- integer minutes\n ss -- float seconds \n long -- longitude (E positive, W negative)\n \n \"\"\" \n \n s = self\n GST = s.hhmmss_to_decimal_hours(hh, mm, ss)\n hours = long/15.\n LST = GST - hours\n if LST > 24.:\n LST = LST - 24.\n if LST < 0.:\n LST = LST + 24.\n return s.decimal_hours_to_hhmmss(LST)\n \n def GST_to_LST(self, hh, mm, ss, long):\n \"\"\"Convert GST to LST.\n \n Keyword arguments: \n hh -- integer hour\n mm -- integer minutes\n ss -- float seconds \n long -- longitude (E positive, W negative)\n \n \"\"\" \n \n s = self\n LST = s.hhmmss_to_decimal_hours(hh, mm, ss)\n hours = long/15.\n GST = LST + hours\n if GST > 24.:\n GST = GST -24.\n if GST < 0.:\n GST = GST + 24.\n return s.decimal_hours_to_hhmmss(GST)\n \nif __name__ == \"__main__\":\n pass","sub_path":"astro_time/astro_time.py","file_name":"astro_time.py","file_ext":"py","file_size_in_byte":10343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"139405697","text":"from memory_profiler import memory_usage\nimport time\n\n\ndef getallparams(li):\n params = 0\n for module in li:\n for param in module.parameters():\n params += param.numel()\n return params\n\n\ndef all_in_one_train(trainprocess, trainmodules):\n starttime = time.time()\n mem = max(memory_usage(proc=trainprocess))\n endtime = time.time()\n\n print(\"Training Time: \"+str(endtime-starttime))\n print(\"Training Peak Mem: \"+str(mem))\n print(\"Training Params: \"+str(getallparams(trainmodules)))\n\n\ndef all_in_one_test(testprocess, testmodules):\n teststart = time.time()\n testprocess()\n testend = time.time()\n print(\"Inference Time: \"+str(testend-teststart))\n print(\"Inference Params: \"+str(getallparams(testmodules)))\n","sub_path":"private_test_scripts/all_in_one.py","file_name":"all_in_one.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"57576785","text":"#自顶向下\ndef msort(L):\n n=len(L)\n if n>1:\n leftL=L[:n//2]\n rightL=L[n//2:]\n leftL=msort(leftL)\n rightL=msort(rightL)\n L=merge(L,leftL,rightL)\n return L\n\n#自底向上\n#先归并那些微型数组,然后成对归并得到的微型数组\ndef msort(L):\n n=len(L)\n length=1\n while length=rest[1]:\n return head\n else:\n return rest\n\ndef bestWord(rack, dictionary):\n if rack == []:\n return []\n else:\n return getMax(scoreList(rack, dictionary))","sub_path":"hw1pr2.py","file_name":"hw1pr2.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"610308741","text":"# -*- coding: utf-8 -*-\nimport pytest\n\nfrom cfme.infrastructure.virtual_machines import Vm\nfrom cfme.services import requests\nfrom cfme.web_ui import flash\nfrom utils.wait import wait_for\nfrom utils import testgen\n\npytestmark = [\n pytest.mark.meta(server_roles=\"+automate\")\n]\n\n\ndef pytest_generate_tests(metafunc):\n argnames, argvalues, idlist = testgen.provider_by_type(metafunc, ['virtualcenter'])\n testgen.parametrize(metafunc, argnames, argvalues, ids=idlist, scope=\"module\")\n\n\n@pytest.mark.meta(blockers=[1256903])\ndef test_vm_migrate(setup_provider, provider, request):\n \"\"\"Tests migration of a vm\n\n Metadata:\n test_flag: migrate, provision\n \"\"\"\n vm = Vm(\"vmtest\", provider)\n vm.migrate_vm(\"email@xyz.com\", \"first\", \"last\")\n flash.assert_no_errors()\n row_description = 'vmtest'\n cells = {'Description': row_description}\n row, __ = wait_for(requests.wait_for_request, [cells, True],\n fail_func=requests.reload, num_sec=600, delay=20)\n assert row.request_state.text == 'Migrated'\n","sub_path":"cfme/tests/infrastructure/test_vm_migrate.py","file_name":"test_vm_migrate.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"606875930","text":"\"\"\"\n\n Student : Shahreen Shahjahan Psyche\n\n The Code Ran Successfully in Leetcode for all the test cases.\n\n Memory Complexity : O(N)\n \n Time Complexity:\n def push : O(N)\n def pop : O(N)\n def peek: O(1)\n def empty : O(1)\n\n\"\"\"\n\nclass MyQueue:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.push_stack = []\n self.pop_stack = []\n \n\n def push(self, x: int) -> None:\n \"\"\"\n Push element x to the back of queue.\n \"\"\"\n if len(self.pop_stack) != 0:\n while(self.pop_stack):\n self.push_stack.append(self.pop_stack.pop())\n self.push_stack.append(x) \n\n def pop(self) -> int:\n \"\"\"\n Removes the element from in front of queue and returns that element.\n \"\"\"\n if len(self.push_stack) == 0 and len(self.pop_stack) == 0:\n return\n if len(self.push_stack) != 0:\n while(self.push_stack):\n self.pop_stack.append(self.push_stack.pop())\n return self.pop_stack.pop()\n \n\n def peek(self) -> int:\n \"\"\"\n Get the front element.\n \"\"\"\n if len(self.push_stack) != 0:\n return self.push_stack[0]\n elif len(self.pop_stack) != 0:\n return self.pop_stack[-1]\n else:\n return\n \n\n def empty(self) -> bool:\n \"\"\"\n Returns whether the queue is empty.\n \"\"\"\n if len(self.push_stack) == 0 and len(self.pop_stack) == 0:\n return True\n return False\n \n \n","sub_path":"Problem_1.py","file_name":"Problem_1.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"7092839","text":"import sklearn as sk\nimport mlrose as ml\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport time\n\nsample_sizes = []\nfitness = ml.OneMax()\n\n\nmain_best_fit_gene = []\nmain_execution_times_gene = []\nmain_total_iterations_gene = []\nmain_opt_iterations_gene = []\n\n\nfor y in range(10):\n best_fit_gene = []\n execution_times_gene = []\n total_iterations_gene = []\n opt_iterations_gene = []\n for x in range(5):\n start = time.time()\n opt = ml.DiscreteOpt((y*6)+6, fitness)\n #best_state, best_fitness, curve = ml.random_hill_climb(opt, max_iters=5, restarts = 0, curve=True, random_state = 2)\n best_state_gene, best_fitness_gene, curve_gene = ml.genetic_alg(opt, pop_size=(y*6)+6, max_attempts=100, curve=True, random_state = x)\n \n end = time.time()\n best_fit_gene.append(best_fitness_gene)\n total_iterations_gene.append(len(curve_gene))\n opt_iterations_gene.append(np.argmax(curve_gene))\n \n if(len(sample_sizes) < 10 and x==0):\n sample_sizes.append((y*6)+6)\n execution_times_gene.append(end-start)\n main_best_fit_gene.append(np.mean(best_fit_gene))\n main_execution_times_gene.append(np.mean(execution_times_gene))\n main_total_iterations_gene.append(np.mean(total_iterations_gene))\n main_opt_iterations_gene.append(np.mean(opt_iterations_gene))\n\nprint(\"genetic alg data\")\nprint(main_best_fit_gene)\nprint(main_execution_times_gene)\nprint(main_opt_iterations_gene)\nprint(main_total_iterations_gene)\nprint(sample_sizes)\n\n\nmain_best_fit_hill = []\nmain_execution_times_hill = []\nmain_total_iterations_hill = []\nmain_opt_iterations_hill = []\n\n\nfor y in range(10):\n best_fit_hill = []\n execution_times_hill = []\n total_iterations_hill = []\n opt_iterations_hill = []\n for x in range(5):\n start = time.time()\n opt = ml.DiscreteOpt((y*6)+6, fitness)\n #best_state, best_fitness, curve = ml.random_hill_climb(opt, pop_size=(y*10)+10, max_iters=5, restarts = 0, curve=True, random_state = 2)\n best_state_hill, best_fitness_hill, curve_hill = ml.random_hill_climb(opt, max_iters=5, restarts = 0, curve=True, random_state = 2)\n \n end = time.time()\n best_fit_hill.append(best_fitness_hill)\n total_iterations_hill.append(len(curve_hill))\n opt_iterations_hill.append(np.argmax(curve_hill))\n \n if(len(sample_sizes) < 10 and x==0):\n sample_sizes.append((y*6)+6)\n execution_times_hill.append(end-start)\n main_best_fit_hill.append(np.mean(best_fit_hill))\n main_execution_times_hill.append(np.mean(execution_times_hill))\n main_total_iterations_hill.append(np.mean(total_iterations_hill))\n main_opt_iterations_hill.append(np.mean(opt_iterations_hill))\n\nprint(\"hill data\")\nprint(main_best_fit_hill)\nprint(main_execution_times_hill)\nprint(main_opt_iterations_hill)\nprint(main_total_iterations_hill)\nprint(sample_sizes)\n\n\n\nmain_best_fit_anneal = []\nmain_execution_times_anneal = []\nmain_total_iterations_anneal = []\nmain_opt_iterations_anneal = []\nprint(\"anneal fitness scores\")\nfor y in range(10):\n #print(\"y = \"+y)\n best_fit_anneal = []\n execution_times_anneal = []\n total_iterations_anneal = []\n opt_iterations_anneal = []\n for x in range(5):\n start = time.time()\n opt = ml.DiscreteOpt((y*6)+6, fitness)\n #best_state, best_fitness, curve = ml.random_anneal_climb(opt, pop_size=(y*10)+10, max_iters=5, restarts = 0, curve=True, random_state = 2)\n best_state_anneal, best_fitness_anneal, curve_anneal = ml.simulated_annealing(opt, max_attempts=100, curve=True, random_state = 2)\n print(best_fitness_anneal)\n end = time.time()\n best_fit_anneal.append(best_fitness_anneal)\n total_iterations_anneal.append(len(curve_anneal))\n opt_iterations_anneal.append(np.argmax(curve_anneal))\n \n if(len(sample_sizes) < 10 and x==0):\n sample_sizes.append((y*6)+6)\n execution_times_anneal.append(end-start)\n main_best_fit_anneal.append(np.mean(best_fit_anneal))\n main_execution_times_anneal.append(np.mean(execution_times_anneal))\n main_total_iterations_anneal.append(np.mean(total_iterations_anneal))\n main_opt_iterations_anneal.append(np.mean(opt_iterations_anneal))\n\nprint(\"anneal data\")\nprint(main_best_fit_anneal)\nprint(main_execution_times_anneal)\nprint(main_opt_iterations_anneal)\nprint(main_total_iterations_anneal)\nprint(sample_sizes)\n\n\n\n\nmain_best_fit_mimic = []\nmain_execution_times_mimic = []\nmain_total_iterations_mimic = []\nmain_opt_iterations_mimic = []\n\nfor y in range(10):\n best_fit_mimic = []\n execution_times_mimic = []\n total_iterations_mimic = []\n opt_iterations_mimic = []\n for x in range(2):\n start = time.time()\n opt = ml.DiscreteOpt((y*6)+6, fitness)\n #best_state, best_fitness, curve = ml.random_mimic_climb(opt, pop_size=(y*10)+10, max_iters=5, restarts = 0, curve=True, random_state = 2)\n best_state_mimic, best_fitness_mimic, curve_mimic = ml.mimic(opt, pop_size=(y*6)+6, max_attempts=10,max_iters=900, curve=True, random_state = 2)\n \n end = time.time()\n best_fit_mimic.append(best_fitness_mimic)\n total_iterations_mimic.append(len(curve_mimic))\n opt_iterations_mimic.append(np.argmax(curve_mimic))\n \n if(len(sample_sizes) < 10 and x==0):\n sample_sizes.append((y*6)+6)\n execution_times_mimic.append(end-start)\n #print(\"x = \" + x)\n #print(\"y = \" + y)\n main_best_fit_mimic.append(np.mean(best_fit_mimic))\n main_execution_times_mimic.append(np.mean(execution_times_mimic))\n main_total_iterations_mimic.append(np.mean(total_iterations_mimic))\n main_opt_iterations_mimic.append(np.mean(opt_iterations_mimic))\n\nprint(\"mimic data\")\nprint(main_best_fit_mimic)\nprint(main_execution_times_mimic)\nprint(main_opt_iterations_mimic)\nprint(main_total_iterations_mimic)\nprint(sample_sizes)\n\n\n\n\n\nplt.title(\"Execution times vs Population Size\")\nplt.xlabel(\"Population Size\")\nplt.ylabel(\"Mean Execution Time\") \nplt.grid()\nplt.plot(sample_sizes, main_execution_times_gene,color=\"g\",linestyle='dashed', marker='o',label=\"Genetic Algorithm\")\nplt.plot(sample_sizes, main_execution_times_anneal,color=\"r\",linestyle='dashed', marker='o',label=\"Simulated Annealing\")\nplt.plot(sample_sizes, main_execution_times_hill, color=\"b\",linestyle='dashed', marker='o',label=\"Random Hill Climbing\")\nplt.plot(sample_sizes, main_execution_times_mimic, color=\"m\",linestyle='dashed', marker='o',label=\"MIMIC\")\nplt.legend(loc=\"best\")\nplt.savefig(\"onemax_times.png\")\nplt.clf()\n#plt.ylim([0,100])\nplt.title(\"Best Fitness Score vs Population Size\")\nplt.xlabel(\"Population Size\")\nplt.ylabel(\"Mean Best Fitness Score\")\nplt.grid()\nplt.plot(sample_sizes, main_best_fit_gene,color=\"g\",linestyle='dashed', marker='o',label=\"Genetic Algorithm\")\nplt.plot(sample_sizes, main_best_fit_anneal,color=\"r\",linestyle='dashed', marker='o',label=\"Simulated Annealing\")\nplt.plot(sample_sizes, main_best_fit_hill, color=\"b\",linestyle='dashed', marker='o',label=\"Random Hill Climbing\")\nplt.plot(sample_sizes, main_best_fit_mimic, color=\"m\",linestyle='dashed', marker='o',label=\"MIMIC\")\nplt.legend(loc=\"best\")\nplt.savefig(\"onemax_accuracy.png\")\nplt.clf()\n\nplt.title(\"No. Iterations to Optimization vs Population Size\")\nplt.xlabel(\"Population Size\")\nplt.ylabel(\"Mean Iterations Needed for Optimization\")\nplt.grid()\nplt.plot(sample_sizes, main_opt_iterations_gene,color=\"g\",linestyle='dashed', marker='o',label=\"Genetic Algorithm\")\nplt.plot(sample_sizes, main_opt_iterations_anneal,color=\"r\",linestyle='dashed', marker='o',label=\"Simulated Annealing\")\nplt.plot(sample_sizes, main_opt_iterations_hill, color=\"b\",linestyle='dashed', marker='o',label=\"Random Hill Climbing\")\nplt.plot(sample_sizes, main_opt_iterations_mimic, color=\"m\",linestyle='dashed', marker='o',label=\"MIMIC\")\nplt.legend(loc=\"best\")\nplt.savefig(\"onemax_opt_iters.png\")\nplt.clf()\n\nplt.title(\"Total Iterations vs Population Size\")\nplt.xlabel(\"Population Size\")\nplt.ylabel(\"Mean Total Iterations\")\nplt.grid()\nplt.plot(sample_sizes, main_total_iterations_gene,color=\"g\",linestyle='dashed', marker='o',label=\"Genetic Algorithm\")\nplt.plot(sample_sizes, main_total_iterations_anneal,color=\"r\",linestyle='dashed', marker='o',label=\"Simulated Annealing\")\nplt.plot(sample_sizes, main_total_iterations_hill, color=\"b\",linestyle='dashed', marker='o',label=\"Random Hill Climbing\")\nplt.plot(sample_sizes, main_total_iterations_mimic, color=\"m\",linestyle='dashed', marker='o',label=\"MIMIC\")\nplt.legend(loc=\"best\")\nplt.savefig(\"onemax_total_iters.png\")\nplt.clf()\n\nplt.title(\"Execution Time vs Iterations for Optimization\")\nplt.xlabel(\"Mean Iterations Needed for Optimization\")\nplt.ylabel(\"Mean Execution Time\")\nplt.grid()\nplt.plot(main_opt_iterations_gene, main_execution_times_gene,color=\"g\",linestyle='dashed', marker='o',label=\"Genetic Algorithm\")\nplt.plot(main_opt_iterations_anneal, main_execution_times_anneal,color=\"r\",linestyle='dashed', marker='o',label=\"Simulated Annealing\")\nplt.plot(main_opt_iterations_hill, main_execution_times_hill, color=\"b\",linestyle='dashed', marker='o',label=\"Random Hill Climbing\")\nplt.plot(main_opt_iterations_mimic, main_execution_times_mimic, color=\"m\",linestyle='dashed', marker='o',label=\"MIMIC\")\nplt.legend(loc=\"best\")\nplt.savefig(\"onemax_time_iters.png\")\nplt.clf()","sub_path":"Randomized Optimization/OneMax.py","file_name":"OneMax.py","file_ext":"py","file_size_in_byte":9390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"51667454","text":"import pygame\r\nimport random\r\nimport time\r\n\r\npygame.init()\r\n\r\n# Colors\r\nBLUE = (0, 0, 255)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\n\r\n# Screen\r\nWIDTH = 400\r\nHEIGHT = 600\r\nDISPLAYSURF = pygame.display.set_mode((WIDTH, HEIGHT))\r\npygame.display.set_caption(\"Game\")\r\n\r\n# Background\r\nBACKGROUND = pygame.image.load(\"AnimatedStreet.png\")\r\n\r\n# FPS\r\nFPS = 60\r\ntimer = pygame.time.Clock()\r\n\r\n# Setting up Fonts\r\nfont = pygame.font.SysFont(\"Verdana\", 60)\r\nfont_small = pygame.font.SysFont(\"Verdana\", 20)\r\ngame_over = font.render(\"Game Over\", True, BLACK)\r\n\r\nCOINS = 0\r\n\r\nclass Player(pygame.sprite.Sprite):\r\n def __init__(self):\r\n super().__init__()\r\n self.image = pygame.image.load(\"Player.png\")\r\n self.surf = pygame.Surface(self.image.get_size())\r\n\r\n center = (WIDTH // 2, HEIGHT - self.image.get_height() // 2)\r\n self.rect = self.surf.get_rect(center=center)\r\n\r\n self.speed = 300\r\n\r\n def move(self):\r\n pixels_per_frame = self.speed // FPS\r\n pressed_keys = pygame.key.get_pressed()\r\n\r\n if self.rect.left > 0:\r\n if pressed_keys[pygame.K_LEFT]:\r\n self.rect.move_ip(-pixels_per_frame, 0)\r\n if self.rect.right < WIDTH:\r\n if pressed_keys[pygame.K_RIGHT]:\r\n self.rect.move_ip(pixels_per_frame, 0)\r\n\r\n def draw(self, surface):\r\n surface.blit(self.image, self.rect)\r\n\r\n\r\nclass Enemy(pygame.sprite.Sprite):\r\n def __init__(self):\r\n super().__init__()\r\n self.image = pygame.image.load(\"Enemy.png\")\r\n self.width, self.height = self.image.get_size()\r\n self.surf = pygame.Surface(self.image.get_size())\r\n\r\n center = (random.randint(self.width // 2, WIDTH - self.width // 2),\r\n -self.height // 2)\r\n self.rect = self.surf.get_rect(center=center)\r\n\r\n self.speed = 600\r\n\r\n def move(self):\r\n global score\r\n pixels_per_frame = self.speed // FPS\r\n self.rect.move_ip(0, pixels_per_frame)\r\n if self.rect.top > HEIGHT:\r\n score += 1\r\n center = (random.randint(self.width // 2, WIDTH - self.width // 2),\r\n -self.height // 2)\r\n self.rect.center = center\r\n\r\n def draw(self, surface):\r\n surface.blit(self.image, self.rect)\r\n\r\n\r\nclass Coin(pygame.sprite.Sprite):\r\n def __init__(self):\r\n super().__init__()\r\n self.image = pygame.image.load(\"coin.png\")\r\n self.surf = pygame.Surface((50, 50))\r\n self.rect = self.surf.get_rect(center=(random.randint(40, WIDTH - 40), 0))\r\n\r\n self.speed = 600\r\n\r\n def move(self):\r\n self.rect.move_ip(0, 4)\r\n global COINS\r\n if (self.rect.bottom > 600):\r\n self.rect.top = 0\r\n self.rect.center = (random.randint(40, WIDTH - 40), 0)\r\n if pygame.sprite.spritecollideany(player1, enemies2):\r\n pygame.mixer.Sound('./ rune.wav').play()\r\n COINS += 1\r\n self.rect.top = 0\r\n self.rect.center = (random.randint(40, WIDTH - 40), 0)\r\n\r\n\r\n# Creating our own event\r\nINC_SPEED = pygame.USEREVENT + 1\r\npygame.time.set_timer(INC_SPEED, 1000)\r\ncnt = 0\r\nenemy1 = Enemy()\r\nplayer1 = Player()\r\ncoin1 = Coin()\r\n\r\nenemies = pygame.sprite.Group()\r\nenemies.add(enemy1)\r\nenemies2 = pygame.sprite.Group()\r\nenemies2.add(coin1)\r\nall_sprites = pygame.sprite.Group()\r\nall_sprites.add(player1)\r\nall_sprites.add(enemy1)\r\nall_sprites.add(coin1)\r\n\r\ngame_done = False\r\nwhile not game_done:\r\n score = 0\r\n done = False\r\n while not done:\r\n timer.tick(FPS)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n done = True\r\n game_done = True\r\n\r\n if pygame.sprite.spritecollideany(player1, enemies):\r\n pygame.mixer.Sound('w10_crash.wav').play()\r\n DISPLAYSURF.fill(RED)\r\n txt_rect = game_over.get_rect(center=(WIDTH // 2, HEIGHT // 2))\r\n DISPLAYSURF.blit(game_over, txt_rect)\r\n pygame.display.flip()\r\n choosen = False\r\n while not choosen:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n game_done = True\r\n choosen = True\r\n if event.type == pygame.KEYDOWN:\r\n choosen = True\r\n if event.key == pygame.K_SPACE:\r\n game_done = True\r\n done = True\r\n\r\n DISPLAYSURF.blit(BACKGROUND, (0, 0))\r\n\r\n scores = font_small.render('Score: ' + str(score), True, BLACK)\r\n DISPLAYSURF.blit(scores, (10, 10))\r\n coinsss = font_small.render('Coins: ' + str(COINS), True, BLACK)\r\n DISPLAYSURF.blit(coinsss, (300, 10))\r\n\r\n for entity in all_sprites:\r\n DISPLAYSURF.blit(entity.image, entity.rect)\r\n entity.move()\r\n\r\n pygame.display.flip()\r\n\r\npygame.quit()","sub_path":"Tsis 8/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":4992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"250750445","text":"import requests\nimport json\nimport boto3\nimport string\nimport sys\nimport os\nfrom datetime import datetime\nimport shutil\n#!/usr/bin/python\n\nfrom botocore.utils import fix_s3_host\nfrom botocore.handlers import disable_signing\nendpoint_url='http://mtt-scl.data.pedalean.com'\nbucket_name = 'pedalean'\nprefix = 'mtt-gz/'+sys.argv[1]\nprint(sys.argv[1])\nresource =boto3.resource('s3', endpoint_url=endpoint_url)\nresource.meta.client.meta.events.unregister('before-sign.s3', fix_s3_host)\nresource.meta.client.meta.events.register('choose-signer.s3.*',disable_signing)\nbucket = resource.Bucket(bucket_name)\nkey_list =[k.key for k in bucket.objects.filter(Prefix=prefix)]\ncontador=len(key_list);\nshutil.rmtree('./data')\npath=\"./data/\"+sys.argv[1].replace(\"/\",\"-\")\nprint(path)\nos.makedirs(path)\n\nfor x in range(0, len(key_list)):\n host = \"http://mtt-scl.data.pedalean.com/pedalean/\"\n url = host+key_list[x]\n try:\n resp = requests.get(url)\n data = json.loads(resp.text)\n name = key_list[x][21:40]\n print (str(x) + \" de \" + str(len(key_list)))\n with open(path+\"/\"+name, 'w') as f:\n json.dump(data, f)\n except requests.exceptions.ConnectionError:\n r.status_code = \"Connection refused\"\n","sub_path":"descarga.py","file_name":"descarga.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"494757976","text":"\"\"\"\n Handles parsing logistics for pollease.\n\"\"\"\nimport shlex\n\ndef parse_create_command(command_text):\n \"\"\" Parses the vote options out of a create command.\"\"\"\n params = __deserialize_text(command_text)\n params.pop(0) #Command\n poll_name = params.pop(0) #Pollname\n return poll_name, params\n\ndef __deserialize_text(command_text):\n \"\"\"\n Deserializes the slack command text, which includes the name of the function.\n i.e. >>create myPoll \"opt 1\" \"opt 2\"\n \"\"\"\n #Slack likes to use smart quotes, but shlex doesn't deal with them\n command_text = command_text.replace(u\"\\u201d\", \"\\\"\").replace(u\"\\u201c\", \"\\\"\")\n command_text = command_text.replace(u\"\\u2018\", \"'\").replace(u\"\\u2019\", \"'\")\n\n params = shlex.split(command_text.encode('utf8'))\n\n return params\n","sub_path":"app/resources/command_parser.py","file_name":"command_parser.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"86262044","text":"import json\nimport logging\nimport traceback\n\nfrom watcher.backends import redis\nfrom watcher.exceptions import Message\nfrom watcher.util import say\n\nbot_commands = {}\n\n# Commands are imported last for the commands dict\nfrom watcher.commands import twitter # flake8: noqa\nfrom watcher.commands import webpage # flake8: noqa\n\n# Logging\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger(__name__)\n\n\ndef handle_pubsub(message):\n command = message[\"channel\"].split(\":\", 1)[1]\n\n if command not in bot_commands:\n return\n\n try:\n data = json.loads(message[\"data\"])\n except ValueError:\n return\n\n try:\n bot_commands[command][\"f\"](data)\n except Message as e:\n say(data[\"channel\"], str(e))\n except:\n say(data[\"channel\"], \"Caught unknown exception processing your command. pinging @nepeat\\n```%s```\\n\" % (\n traceback.format_exc()\n ))\n traceback.print_exc()\n\n\ndef run():\n # Register bot commands with the core service.\n for cmd, meta in bot_commands.items():\n if meta[\"admin\"]:\n redis.hset(\"bot:admincommands\", cmd, meta[\"help\"])\n else:\n redis.hset(\"bot:commands\", cmd, meta[\"help\"])\n\n # Subscribe to the core service messages.\n ps = redis.pubsub(ignore_subscribe_messages=True)\n ps.subscribe(*[\"command:\" + command for command in bot_commands])\n\n for message in ps.listen():\n handle_pubsub(message)\n","sub_path":"watcher/watcher/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"378609704","text":"import argparse\nimport os\nimport glob\nimport shutil\nfrom ColorMoments import CM\nfrom SIFT import SIFT\n\n#Parsing the command line arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('-m', '--model',action=\"store\", dest=\"model\",help=\"Provide any of these model: LBP, SIFT, CM, HOG\", default=\"CM\")\nparser.add_argument('-d', '--dir',action=\"store\", dest=\"dir\",help=\"Provide directory name\", default=\"None\")\nparser.add_argument('-r', '--ranking',action=\"store_true\", dest=\"ranking\",help=\"Enable ranking of images\")\nparser.add_argument('-i', '--imageLoc',action=\"store\", dest=\"imageLoc\",help=\"Provide image name\", default=\"None\")\nparser.add_argument('-k', '--kimage',type=int, dest=\"kimage\",help=\"Provide k value to get k similar images\", default=-1)\nparser.add_argument('-s', '--single_task',action=\"store_true\", dest=\"single_task\",help=\"Enable task 1 for single image\")\n\n\nargs = parser.parse_args()\n\nif args.dir == \"None\":\n print(\"Please provide directory name\")\n exit(1)\n\ncurpath = os.path.dirname(os.path.abspath(__file__))\n\ndirpath = os.path.join(curpath, '..', args.dir)\n\nif not os.path.exists(dirpath):\n print(\"Please provide proper directory location\")\n exit(1)\n\nimagePath = os.path.join(curpath, '..', args.dir, args.imageLoc)\n\n## TASK1 #####\nif args.single_task:\n if args.imageLoc == \"None\" or not os.path.exists(imagePath):\n print(\"Please provide proper directory location\")\n exit(1)\n\n if args.model == 'CM':\n md = CM(imagePath)\n lst = md.getFeatureDescriptors()\n md.createFeatureOutputFile(lst)\n\n elif args.model == 'SIFT':\n md = SIFT(imagePath)\n lst = md.getFeatureDescriptors()\n md.createFeatureOutputFile(lst)\n\n else:\n print(\"Please provide proper model name\")\n exit(1)\n\n exit(0)\n\nif args.ranking:\n if args.imageLoc == \"None\" or not os.path.exists(imagePath):\n print(\"Please provide proper image location using '-i'\")\n exit(1)\n if int(args.kimage) == -1:\n print(\"Please provide k value using '-k'\")\n exit(1)\n\n####### TASK 2 #######\nif args.model == 'CM':\n for image in glob.glob(os.path.join(dirpath,\"*.jpg\")):\n md = CM(image)\n lst = md.getFeatureDescriptors()\n md.createFeatureOutputFile(lst)\n\nelif args.model == 'SIFT':\n for image in glob.glob(os.path.join(dirpath,\"*.jpg\")):\n md = SIFT(image)\n lst = md.getFeatureDescriptors()\n md.createFeatureOutputFile(lst)\n\nelse:\n print(\"Please provide proper model name\")\n exit(1)\n\n# Compare the images based on provided arguments\n\n######## TASK 3 #######################\nif args.model == 'CM' and args.ranking:\n rank_dict = {}\n head =\"\"\n for image in glob.glob(os.path.join(dirpath,\"*.jpg\")):\n md = CM(image)\n x = md.compareImages(imagePath)\n if x == -1:\n continue\n head, tail = os.path.split(image)\n rank_dict.update({tail : x})\n\n k = 0\n res_dir = os.path.join(curpath, '..', 'output', 'CM', 'match')\n if os.path.exists(res_dir):\n shutil.rmtree(res_dir)\n os.mkdir(res_dir)\n\n print(\"\\n\\nNow printing top {} matched Images and their matching scores\".format(args.kimage))\n for key, value in sorted(rank_dict.items(), key=lambda item: item[1]):\n if k < args.kimage:\n print(key + \" has matching score:: \" + str(value))\n shutil.copy(os.path.join(head, key), res_dir)\n k+=1\n else:\n break\n\nelif args.model == 'SIFT' and args.ranking:\n rank_dict = {}\n head = \"\"\n for image in glob.glob(os.path.join(dirpath,\"*.jpg\")):\n md = SIFT(image)\n x = md.compareImages(imagePath)\n if x == -1:\n continue\n head, tail = os.path.split(image)\n rank_dict.update({tail : x})\n\n k = 0\n res_dir = os.path.join(curpath, '..', 'output', 'SIFT', 'match')\n if os.path.exists(res_dir):\n shutil.rmtree(res_dir)\n os.mkdir(res_dir)\n print(\"\\n\\nNow printing top {} matched Images and their matching scores\".format(args.kimage))\n for key, value in sorted(rank_dict.items(), key=lambda item: item[1], reverse=True):\n if k < args.kimage:\n print(key + \" has matching score:: \" + str(value))\n shutil.copy(os.path.join(head, key), res_dir)\n k+=1\n else:\n break\n","sub_path":"Phase-1/code/create_feature_script.py","file_name":"create_feature_script.py","file_ext":"py","file_size_in_byte":4344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"441854260","text":"\nimport expanddouban\nimport requests\nimport csv\nfrom bs4 import BeautifulSoup\n\n\ndef get_movie_url(category, location):\n \"\"\"\n mission:1\n return a string corresponding to the URL of douban movie lists given category and location.\n \"\"\"\n default_url = \"https://movie.douban.com/tag/#/?sort=S&range=9,10&tags=电影\"\n if location != \"\":\n real_url = \",\".join([default_url, category, location])\n else:\n real_url = \",\".join([default_url, category])\n return real_url\n\n\ndef get_movies(category, location=\"\"):\n \"\"\"\n mission:2\n return a list of Movie objects with the given category and location.\n \"\"\"\n real_url = get_movie_url(category, location)\n html = expanddouban.getHtml(real_url, True)\n result = find_movie_info(html,category,location)\n if len(result) > 0:\n return result\n else:\n return None\n\n\nclass Movie(object):\n \"\"\"\n mission:3\n name = “肖申克的救赎”\n rate = 9.6\n location = \"美国\"\n category = \"剧情\"\n info_link = \"https://movie.douban.com/subject/1292052/\"\n cover_link = “https://img3.doubanio.com/view/movie_poster_cover/lpst/public/p480747492.jpg”\n\n m = Movie(name, rate, location, category, info_link, cover_link)\n \"\"\"\n def __init__(self,name,rate,location,category,info_link,cover_link):\n self.name = name\n self.rate = rate\n self.location = location\n self.category = category\n self.info_link = info_link\n self.cover_link = cover_link\n\n\ndef find_movie_info(html, category, location):\n \"\"\"\n mission:4\n :param html: html text\n :param category: movie category\n :param location: Country/location\n :return: the list that include Movie objects\n \"\"\"\n movie_list = []\n soup = BeautifulSoup(html, \"html.parser\")\n list_div = soup.find(class_=\"list-wp\")\n for element in list_div:\n movie_info = Movie(element.p.find(class_=\"title\").string,\n element.p.find(class_=\"rate\").string,\n location,category,element.get('href'),\n element.img.get('src'))\n\n movie_list.append(movie_info)\n return movie_list\n\n\ndef write_to_csv_file(movies):\n \"\"\"\n mission:5\n :param movies: movie objects\n :return: None\n \"\"\"\n with open(\"movie.csv\", \"w\") as movie_csv:\n write = csv.writer(movie_csv)\n for movie in movies:\n input_data = [movie.name, movie.rate, movie.location,\n movie.category, movie.info_link, movie.cover_link]\n write.writerow(input_data)\n\n\ndef get_all_we_need(category_list):\n location_list = [\"大陆\",\"美国\",\"香港\",\"台湾\",\"日本\",\"韩国\",\"英国\",\"法国\",\"德国\",\n \"意大利\",\"西班牙\",\"印度\",\"泰国\",\"俄罗斯\",\"伊朗\",\"加拿大\",\"澳大利亚\",\n \"爱尔兰\",\"瑞典\",\"巴西\",\"丹麦\"]\n for category in category_list:\n for location in location_list:\n movies_info = get_movies(category,location)\n\n\nm = get_movies(\"喜剧\")\nwrite_to_csv_file(m)","sub_path":"DoubanCrawler.py","file_name":"DoubanCrawler.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"458116096","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport scipy.io as sio\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame\n# import \nimport time\nimport csv\nimport os\nimport threading\n\nalg_lists = [\"NSGAII\",\"NSGAIII\",\"MOEAD\",\"KnEA\",\"RVEA\"]\npro_lists = [\"DTLZ1\",\"DTLZ2\",\"DTLZ3\",\"DTLZ4\",\"DTLZ5\"]\n\ndata = {\n \"data_NSGAII\" : DataFrame({\"M\":[],\"D\":[],\"IGD\":[], \"Problem\":[], \"Algoorithm\":[]}),\n \"data_NSGAIII\" : DataFrame({\"M\":[],\"D\":[],\"IGD\":[], \"Problem\":[], \"Algoorithm\":[]}),\n \"data_MOEAD\" : DataFrame({\"M\":[],\"D\":[],\"IGD\":[], \"Problem\":[], \"Algoorithm\":[]}),\n \"data_KnEA\" : DataFrame({\"M\":[],\"D\":[],\"IGD\":[], \"Problem\":[], \"Algoorithm\":[]}),\n \"data_RVEA\" : DataFrame({\"M\":[],\"D\":[],\"IGD\":[], \"Problem\":[], \"Algoorithm\":[]})\n}\n\ndef paser_filename(filename):\n return filename.split('_')\n\ndef save(folderename, filename , data_Franme):\n # convert and save \n try:\n os.mkdir(folderename, 777)\n except:\n pass\n try:\n csv_data = data_Franme.to_csv(path_or_buf=os.path.join(folderename, filename+'.csv'))\n information = \"{0} Data saves successfully!\".format(filename)\n except IOError:\n information = \"{0} Data saves failed!\".format(filename)\n return(information)\n\ndef extract_data(D):\n for algorithm in alg_lists:\n dir = os.path.join('Data_'+str(D),algorithm)\n for filename in os.walk(dir):\n for files in filename[2]:\n parameters = paser_filename(files)\n if parameters[3].replace(\".mat\",\"\") != '1':\n continue\n print(\"extracting {0} data from {1} files in data_{2} document ...\".format(algorithm,files,str(D)))\n try:\n file_dir = os.path.join(dir,files)\n mat_file = sio.loadmat(file_dir)\n sweap = {\n \"M\":int(parameters[2].replace(\"M\",\"\")),\n \"D\":D,\n \"IGD\":mat_file[\"metric\"][\"IGD\"][0][0][0][0],\n \"Problem\": parameters[1],\n \"Algorithm\": algorithm\n }\n time.sleep(0.001)\n data[\"data_\"+algorithm] = data[\"data_\"+algorithm].append(sweap,ignore_index=True)\n except:\n print(\"ERROR inform {0}\".format(file_dir))\n\ndef Thread_save():\n counter = 0\n end_list = list()\n information_thread = list()\n\n while True:\n for task in tasks:\n if not tasks[task].isAlive():\n if task not in end_list:\n counter = counter + 1\n end_list.append(task)\n if counter == len(tasks):\n for alg in alg_lists:\n for pro in pro_lists:\n print(\"saving {0}_{1}\".format(alg,pro))\n data_f = data['data_'+alg][data['data_'+alg].Problem == pro]\n information_thread.append(save(folderename=\"csv\", filename= \"data_{0}_{1}\".format(str(alg),str(pro)),data_Franme=data_f))\n break\n for inform in information_thread:\n print(inform)\n return True\n \n\n\nif __name__ == \"__main__\":\n tasks = dict()\n for D in [x+14 for x in range(37)]:\n extract_data(D)\n for alg in alg_lists:\n for pro in pro_lists:\n print(\"saving {0}_{1}\".format(alg,pro))\n data_f = data['data_'+alg][data['data_'+alg].Problem == pro]\n save(folderename=\"csv\", filename= \"data_{0}_{1}\".format(str(alg),str(pro)),data_Franme=data_f)\n # print(\"{} threading init...\".format(D))\n # tasks[str(D)] = threading.Thread(target=extract_data, args=[D])\n # save_token = threading.Thread(target=Thread_save,args=[])\n # for task in [x+14 for x in range(37)]:\n # print(\"{} threading starting...\".format(task))\n # tasks[str(task)].start()\n # save_token.start()\n ","sub_path":"Dataset/dataset_tools/data_paser.py","file_name":"data_paser.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"213249604","text":"\"\"\"Illustrate centering and boundary checking.\"\"\"\r\n\r\n# Import and initialize pygame.\r\nimport pygame\r\npygame.init()\r\n\r\ndef make_window(width: int, height: int, caption: str) -> pygame.Surface:\r\n \"\"\"Create and return a pygame window.\"\"\"\r\n screen: pygame.Surface\r\n screen = pygame.display.set_mode((width, height))\r\n pygame.display.set_caption(caption)\r\n return screen\r\n\r\ndef main() -> None:\r\n \"\"\"Draw an image centered and at the boundaries.\"\"\"\r\n # Annotate and initialize variables.\r\n SCREEN_SIZE: int = 480\r\n SQUARE_SIZE: int = 50\r\n screen: pygame.Surface\r\n background: pygame.Surface\r\n square: pygame.Surface\r\n user_quit: bool = False\r\n e: pygame.event.Event\r\n \r\n # Set up assets.\r\n screen = make_window(SCREEN_SIZE, SCREEN_SIZE, \"Boundary Demo\")\r\n background = pygame.Surface((SCREEN_SIZE, SCREEN_SIZE))\r\n background.fill((255, 255, 255))\r\n square = pygame.Surface((SQUARE_SIZE, SQUARE_SIZE))\r\n square.fill((0, 150, 150))\r\n clock: pygame.time.Clock = pygame.time.Clock()\r\n\r\n # Calculate coordinate to blit square to center of background.\r\n center_xcoord: float = (background.get_width() / 2) - (square.get_width() / 2)\r\n center_ycoord: float = (background.get_height() / 2) - (square.get_height() / 2)\r\n\r\n # Calculate coordinate to blit square to the bottom, right.\r\n bottom_right_xcoord: float = background.get_width() - square.get_width()\r\n bottom_right_ycoord: float = background.get_height() - square.get_height()\r\n \r\n # Draw to the screen and show.\r\n background.blit(square, (center_xcoord, center_ycoord))\r\n background.blit(square, (bottom_right_xcoord, bottom_right_ycoord))\r\n screen.blit(background, (0, 0))\r\n pygame.display.flip()\r\n\r\n # Process events until the user chooses to quit.\r\n while not user_quit:\r\n # Loop 30 times per second\r\n clock.tick(30)\r\n for e in pygame.event.get():\r\n # Process a quit choice.\r\n if e.type == pygame.QUIT:\r\n user_quit = True\r\n \r\n pygame.quit()\r\n\r\nmain()\r\n","sub_path":"Chapter8TextbookCode/Listing 8-5.py","file_name":"Listing 8-5.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"119253767","text":"import random\npartidas = int(input(\"Cuantas partidas quieres jugar: \"))\nnombre = str(input(\"Dime tu nombre: \"))\nvictorias = 0\nderrotas = 0\nwhile True:\n print(\"\"\"\n 1) Parells\n 2) Senars\n \"\"\")\n quiere = int(input(\"Que quieres: \"))\n numero = int(input(\"Que numero entre el 1 y el 9 eliges: \"))\n ordenador = random.randint(1, 9)\n if quiere == 1:\n if (numero + ordenador) % 2 == 0:\n print(\"Has ganado \" + nombre)\n victorias += 1 \n elif (numero + ordenador) % 2 != 0:\n print(\"Has perdido \" + nombre)\n derrotas += 1\n elif quiere == 2:\n if (numero + ordenador) % 2 == 0:\n print(\"Has perdido \" + nombre) \n derrotas += 1\n elif (numero + ordenador) % 2 != 0:\n print(\"Has ganado \" + nombre) \n victorias += 1\n print(\"la maquina ha elegido: \" , ordenador)\n print(\"La suma es: \" , numero + ordenador)\n print(\"-----------------------------------------\")\n if victorias + derrotas >= partidas:\n print(\"Has ganado\" , victorias)\n print(\"Has perdido\" , derrotas)\n break","sub_path":"paresonones.py","file_name":"paresonones.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"611165795","text":"from __future__ import annotations\n\nimport math\nimport logging\nimport itertools\nfrom dataclasses import dataclass\nfrom typing import List, Optional, Callable, Iterable, cast, Tuple, Dict, Any\n\nimport numpy\nfrom scipy import stats, optimize\nfrom numpy import linalg\nfrom numpy.polynomial.chebyshev import chebfit, chebval\nfrom scipy.stats.mstats_basic import NormaltestResult\n\nfrom . import Storable, round_digits, NumVector1d, NumVector2d, Number\nfrom .selectored_storage import TimeSeries\nfrom .inumeric import Numpy2d, Numpy1d\n\n\nlogger = logging.getLogger(\"utils\")\n\n\nDOUBLE_DELTA = 1e-8\nMIN_VALUES_FOR_CONFIDENCE = 7\nMIN_VAL = 1\nMAX_LIN_DIFF = 100\nUPPER_ROUND_COEF = 0.99999\nNO_VALUE = -1\n\n\naverage = numpy.mean\ndev = lambda x: math.sqrt(numpy.var(x, ddof=1))\n\n\n@dataclass\nclass ArrayData:\n header: List[str]\n histo_bins: Optional[numpy.ndarray]\n data: Optional[numpy.ndarray]\n\n\ndef auto_edges(vals: Numpy1d, log_base: float = 2, bins: int = 20,\n round_base: int = 10, log_space: bool = True) -> Numpy1d:\n lower = numpy.min(vals)\n upper = numpy.max(vals)\n return auto_edges2(lower, upper, log_base, bins, round_base, log_space=log_space)\n\n\ndef auto_edges2(lower: float, upper: float, log_base: float = 2,\n bins: int = 20, round_base: int = 10, log_space: bool = True) -> Numpy1d:\n if lower == upper:\n return numpy.array([lower * 0.9, lower * 1.1])\n\n if round_base and lower > MIN_VAL:\n lower = round_base ** (math.floor(math.log(lower) / math.log(round_base)))\n upper = round_base ** (math.floor(math.log(lower) / math.log(round_base) + UPPER_ROUND_COEF))\n\n if lower < MIN_VAL or upper / lower < MAX_LIN_DIFF or not log_space:\n return numpy.linspace(lower, upper, bins + 1)\n\n lower_lg = math.log(lower) / math.log(log_base)\n upper_lg = math.log(upper) / math.log(log_base)\n return numpy.logspace(lower_lg, upper_lg, bins + 1, base=log_base)\n\n\ndef approximate_ts(times: NumVector1d, values: NumVector1d, begin: float, end: float, step: float = 1000000) -> Numpy1d:\n if len(times) != len(values):\n raise AssertionError(\"Times and values arrays has different sizes\")\n\n if begin < times[0] or end > times[-1] or end <= begin:\n raise AssertionError(\"Can't approximate as at least one border is not beelong data range or incorect borders\")\n\n pos1, pos2 = numpy.searchsorted(times, (begin, end))\n\n # current real data time chunk begin time\n edge_it = iter(times[pos1 - 1: pos2 + 1])\n\n # current real data value\n val_it = iter(values[pos1 - 1: pos2 + 1])\n\n # result array, cumulative value per second\n result = numpy.zeros(int(end - begin) // step)\n idx = 0\n curr_summ = 0\n\n # end of current time slot\n results_cell_ends = begin + step\n\n # hack to unify looping\n real_data_end = next(edge_it)\n while results_cell_ends <= end:\n real_data_start = real_data_end\n real_data_end = next(edge_it)\n real_val_left = next(val_it)\n\n # real data \"speed\" for interval [real_data_start, real_data_end]\n real_val_ps = float(real_val_left) / (real_data_end - real_data_start)\n\n while real_data_end >= results_cell_ends and results_cell_ends <= end:\n # part of current real value, which is fit into current result cell\n curr_real_chunk = int((results_cell_ends - real_data_start) * real_val_ps)\n\n # calculate rest of real data for next result cell\n real_val_left -= curr_real_chunk\n result[idx] = curr_summ + curr_real_chunk\n idx += 1\n curr_summ = 0\n\n # adjust real data start time\n real_data_start = results_cell_ends\n results_cell_ends += step\n\n # don't lost any real data\n curr_summ += real_val_left\n\n return result\n\n\n# data is timeseries of 1D arrays, each array is view on system parts load at come time\n# E.G. OSD loads at t0. t0 + 1, t0 + 2, ...\n# return 2D heatmap array\ndef prepare_heatmap(data: NumVector2d, bins_vals: NumVector1d,\n bins_count: int, outliers_perc: Tuple[float, float]) -> Tuple[Numpy2d, Numpy1d]:\n \"\"\"\n :param data: list of histograms, one per line\n :param bins_vals: values at center of each bin\n :param bins_count: result bin count for each column\n :param outliers_perc: pair of outliers limits tupically (0.25, 0.75)\n :return:\n \"\"\"\n\n assert len(data.shape) == 2\n assert data.shape[1] == len(bins_vals)\n\n total_hist = data.sum(axis=0)\n\n # idx1, idx2 = hist_outliers_perc(total_hist, style.outliers_lat)\n idx1, idx2 = ts_hist_outliers_perc(data, bounds_perc=outliers_perc)\n\n # don't cut too many bins\n min_bins_left = bins_count\n if idx2 - idx1 < min_bins_left:\n missed = min_bins_left - (idx2 - idx1) // 2\n idx2 = min(len(total_hist), idx2 + missed)\n idx1 = max(0, idx1 - missed)\n\n data = data[:, idx1:idx2]\n bins_vals = bins_vals[idx1:idx2]\n\n # don't using rebin_histogram here, as we need apply same bins for many arrays\n step = (bins_vals[-1] - bins_vals[0]) / bins_count\n new_bins_edges = numpy.arange(bins_count) * step + bins_vals[0]\n bin_mapping = numpy.clip(numpy.searchsorted(new_bins_edges, bins_vals) - 1, 0, len(new_bins_edges) - 1)\n\n # map origin bins ranges to heatmap bins, iterate over rows\n cmap = []\n for line in data:\n curr_bins = [0] * bins_count\n for idx, count in zip(bin_mapping, line):\n curr_bins[idx] += count\n cmap.append(curr_bins)\n\n return numpy.array(cmap), new_bins_edges\n\n\ndef avg_counters(counts: List[int], values: List[float]) -> numpy.ndarray:\n counts_a = numpy.array(counts, dtype=numpy.float32)\n values_a = numpy.array(values, dtype=numpy.float32)\n\n with numpy.errstate(divide='ignore', invalid='ignore'): # type: ignore\n avg_vals = (values_a[1:] - values_a[:-1]) / (counts_a[1:] - counts_a[:-1])\n\n avg_vals[avg_vals == numpy.inf] = NO_VALUE\n avg_vals[numpy.isnan(avg_vals)] = NO_VALUE # type: ignore\n\n return avg_vals # type: ignore\n\n\nclass StatProps(Storable):\n \"\"\"Statistic properties for timeseries with unknown data distribution\"\"\"\n\n __ignore_fields__ = ['data']\n\n def __init__(self, data: numpy.array, units: str) -> None:\n self.perc_99: Optional[float] = None\n self.perc_95: Optional[float] = None\n self.perc_90: Optional[float] = None\n self.perc_50: Optional[float] = None\n self.perc_10: Optional[float] = None\n self.perc_5: Optional[float] = None\n self.perc_1: Optional[float] = None\n\n self.min: Optional[Number] = None\n self.max: Optional[Number] = None\n\n # bin_center: bin_count\n self.log_bins = False\n self.bins_populations: Optional[Numpy1d] = None\n\n # bin edges, one more element that in bins_populations\n self.bins_edges: Optional[Numpy1d] = None\n\n self.data = data\n self.units = units\n\n def __str__(self) -> str:\n res = [\"{}(size = {}):\".format(self.__class__.__name__, len(self.data))]\n for name in [\"perc_1\", \"perc_5\", \"perc_10\", \"perc_50\", \"perc_90\", \"perc_95\", \"perc_99\"]:\n res.append(\" {} = {}\".format(name, round_digits(getattr(self, name))))\n res.append(\" range {} {}\".format(round_digits(self.min), round_digits(self.max)))\n return \"\\n\".join(res)\n\n def __repr__(self) -> str:\n return str(self)\n\n def raw(self) -> Dict[str, Any]:\n data = super().raw()\n data['bins_mids'] = list(data['bins_mids'])\n data['bins_populations'] = list(data['bins_populations'])\n return data\n\n @classmethod\n def fromraw(cls, data: Dict[str, Any]) -> StatProps:\n data['bins_mids'] = numpy.array(data['bins_mids'])\n data['bins_populations'] = numpy.array(data['bins_populations'])\n return cast(StatProps, super().fromraw(data))\n\n\nclass HistoStatProps(StatProps):\n \"\"\"Statistic properties for 2D timeseries with unknown data distribution and histogram as input value.\n Used for latency\"\"\"\n def __init__(self, data: numpy.array, units: str) -> None:\n StatProps.__init__(self, data, units)\n\n\nclass NormStatProps(StatProps):\n \"\"\"Statistic properties for timeseries with normal data distribution. Used for iops/bw\"\"\"\n def __init__(self, data: numpy.array, units: str) -> None:\n StatProps.__init__(self, data, units)\n self.average: Optional[float] = None\n self.deviation: Optional[float] = None\n self.confidence: Optional[float] = None\n self.confidence_level: Optional[float] = None\n self.normtest: Optional[NormaltestResult] = None\n self.skew: Optional[float] = None\n self.kurt: Optional[float] = None\n\n def __str__(self) -> str:\n res = [\"NormStatProps(size = {}):\".format(len(self.data)),\n \" distr = {} ~ {}\".format(round_digits(self.average), round_digits(self.deviation)),\n \" confidence({0.confidence_level}) = {1}\".format(self, round_digits(self.confidence)),\n \" perc_1 = {}\".format(round_digits(self.perc_1)),\n \" perc_5 = {}\".format(round_digits(self.perc_5)),\n \" perc_10 = {}\".format(round_digits(self.perc_10)),\n \" perc_50 = {}\".format(round_digits(self.perc_50)),\n \" perc_90 = {}\".format(round_digits(self.perc_90)),\n \" perc_95 = {}\".format(round_digits(self.perc_95)),\n \" perc_99 = {}\".format(round_digits(self.perc_99)),\n \" range {} {}\".format(round_digits(self.min), round_digits(self.max)),\n \" normtest = {0.normtest}\".format(self),\n \" skew ~ kurt = {0.skew} ~ {0.kurt}\".format(self)]\n return \"\\n\".join(res)\n\n def raw(self) -> Dict[str, Any]:\n data = super().raw()\n data['normtest'] = (data['nortest'].statistic, data['nortest'].pvalue)\n return data\n\n @classmethod\n def fromraw(cls, data: Dict[str, Any]) -> 'NormStatProps':\n data['normtest'] = NormaltestResult(*data['normtest'])\n return cast(NormStatProps, super().fromraw(data))\n\n\ndef calc_norm_stat_props(ts: TimeSeries, bins_count: int = None, confidence: float = 0.95) -> NormStatProps:\n \"\"\"\"Calculate statistical properties of array of numbers\"\"\"\n\n res = NormStatProps(ts.data, ts.units) # type: ignore\n\n if len(ts.data) == 0:\n raise ValueError(\"Input array is empty\")\n\n res.average = average(ts.data)\n res.deviation = dev(ts.data)\n\n data = sorted(ts.data)\n res.max = data[-1]\n res.min = data[0]\n pcs = numpy.percentile(data, q=[1.0, 5.0, 10., 50., 90., 95., 99.])\n res.perc_1, res.perc_5, res.perc_10, res.perc_50, res.perc_90, res.perc_95, res.perc_99 = pcs\n\n if len(data) >= MIN_VALUES_FOR_CONFIDENCE:\n res.confidence = stats.sem(ts.data) * \\\n stats.t.ppf((1 + confidence) / 2, len(ts.data) - 1)\n res.confidence_level = confidence\n else:\n res.confidence = None\n res.confidence_level = None\n\n if bins_count is not None:\n res.bins_populations, res.bins_edges = numpy.histogram(ts.data, bins=bins_count)\n res.bins_edges = res.bins_edges[:-1]\n\n try:\n res.normtest = stats.mstats.normaltest(ts.data)\n except Exception as exc:\n logger.warning(\"stats.mstats.normaltest failed with error: %s\", exc)\n\n res.skew = stats.skew(ts.data)\n res.kurt = stats.kurtosis(ts.data)\n\n return res\n\n\n# update this code\ndef rebin_histogram(bins_populations: numpy.array,\n bins_edges: numpy.array,\n new_bins_count: int,\n left_tail_idx: int = None,\n right_tail_idx: int = None,\n log_bins: bool = False) -> Tuple[numpy.array, numpy.array]:\n # rebin large histogram into smaller with new_bins bins, linearly distributes across\n # left_tail_idx:right_tail_idx range\n\n assert len(bins_populations.shape) == 1\n assert len(bins_edges.shape) == 1\n assert bins_edges.shape[0] == bins_populations.shape[0]\n\n if left_tail_idx is None:\n min_val = bins_edges[0]\n else:\n min_val = bins_edges[left_tail_idx]\n\n if right_tail_idx is None:\n max_val = bins_edges[-1]\n else:\n max_val = bins_edges[right_tail_idx]\n\n if log_bins:\n assert min_val > 1E-3\n step = (max_val / min_val) ** (1 / new_bins_count)\n new_bins_edges = min_val * (step ** numpy.arange(new_bins_count)) # type: numpy.array\n else:\n new_bins_edges = numpy.linspace(min_val, max_val, new_bins_count + 1, dtype='float')[:-1] # type: numpy.array\n\n old_bins_pos = numpy.searchsorted(new_bins_edges, bins_edges, side='right')\n new_bins = numpy.zeros(new_bins_count, dtype=int) # type: numpy.array\n\n # last source bin can't be split\n # TODO: need to add assert for this\n new_bins[-1] += bins_populations[-1]\n bin_sizes = bins_edges[1:] - bins_edges[:-1]\n\n # correct position to get bin idx from edge idx\n old_bins_pos -= 1\n old_bins_pos[old_bins_pos < 0] = 0\n new_bins_sizes = new_bins_edges[1:] - new_bins_edges[:-1]\n\n for population, begin, end, bsize in zip(bins_populations[:-1], old_bins_pos[:-1], old_bins_pos[1:], bin_sizes):\n if begin == end:\n new_bins[begin] += population\n else:\n density = population / bsize\n for curr_box in range(begin, end):\n cnt = min(int(new_bins_sizes[begin] * density + 0.5), population)\n new_bins[begin] += cnt\n population -= cnt\n\n return new_bins, new_bins_edges\n\n\ndef calc_histo_stat_props(ts: TimeSeries,\n bins_edges: numpy.array = None,\n rebins_count: int = None,\n tail: float = 0.005) -> HistoStatProps:\n if bins_edges is None:\n bins_edges = ts.histo_bins\n\n res = HistoStatProps(ts.data, ts.units)\n\n # summ across all series\n aggregated = ts.data.sum(axis=0, dtype='int')\n total = aggregated.sum()\n\n # percentiles levels\n expected = list(numpy.array([0.01, 0.05, 0.1, 0.5, 0.9, 0.95, 0.99]) * total)\n cumsum = numpy.cumsum(aggregated)\n\n percentiles_bins = numpy.searchsorted(cumsum, expected)\n percentiles = bins_edges[percentiles_bins]\n res.perc_1, res.perc_5, res.perc_10, res.perc_50, res.perc_90, res.perc_95, res.perc_99 = percentiles\n\n # don't show tail ranges on histogram\n left_tail_idx, right_tail_idx = numpy.searchsorted(cumsum, [tail * total, (1 - tail) * total])\n\n # minimax and maximal non-zero elements\n non_zero = numpy.nonzero(aggregated)[0]\n if len(non_zero) > 0:\n res.min = bins_edges[aggregated[non_zero[0]]]\n res.max = bins_edges[non_zero[-1] + (1 if non_zero[-1] != len(bins_edges) - 1 else 0)]\n else:\n res.min = res.max = 0\n\n res.log_bins = False\n if rebins_count is not None:\n res.bins_populations, res.bins_edges = rebin_histogram(aggregated, bins_edges, rebins_count,\n left_tail_idx, right_tail_idx)\n else:\n res.bins_populations = aggregated\n res.bins_edges = bins_edges.copy()\n\n return res\n\n\ndef groupby_globally(data: Iterable, key_func: Callable):\n grouped = {} # type: ignore\n grouped_iter = itertools.groupby(data, key_func)\n\n for (bs, cache_tp, act, conc), curr_data_it in grouped_iter:\n key = (bs, cache_tp, act, conc)\n grouped.setdefault(key, []).extend(curr_data_it)\n\n return grouped\n\n\ndef approximate_curve(x: List[Number], y: List[float], xnew: List[Number], curved_coef: int) -> List[float]:\n \"\"\"returns ynew - y values of some curve approximation\"\"\"\n return cast(List[float], chebval(xnew, chebfit(x, y, curved_coef)))\n\n\ndef approximate_line(x: List[Number], y: List[float], xnew: List[Number], relative_dist: bool = False) -> List[float]:\n \"\"\"\n x, y - test data, xnew - dots, where we want find approximation\n if not relative_dist distance = y - newy\n returns ynew - y values of linear approximation\n \"\"\"\n ox = numpy.array(x)\n oy = numpy.array(y)\n\n # set approximation function\n def func_line(tpl, x):\n return tpl[0] * x + tpl[1]\n\n def error_func_rel(tpl, x, y):\n return 1.0 - y / func_line(tpl, x)\n\n def error_func_abs(tpl, x, y):\n return y - func_line(tpl, x)\n\n # choose distance mode\n error_func = error_func_rel if relative_dist else error_func_abs\n\n tpl_initial = tuple(linalg.solve([[ox[0], 1.0], [ox[1], 1.0]],\n oy[:2]))\n\n # find line\n tpl_final, success = optimize.leastsq(error_func, tpl_initial[:], args=(ox, oy))\n\n # if error\n if success not in range(1, 5):\n raise ValueError(\"No line for this dots\")\n\n # return new dots\n return func_line(tpl_final, numpy.array(xnew))\n\n\ndef moving_average(data: numpy.array, window: int) -> numpy.array:\n cumsum = numpy.cumsum(data)\n cumsum[window:] = cumsum[window:] - cumsum[:-window]\n return cumsum[window - 1:] / window\n\n\ndef moving_dev(data: numpy.array, window: int) -> numpy.array:\n cumsum = numpy.cumsum(data)\n cumsum2 = numpy.cumsum(data ** 2)\n cumsum[window:] = cumsum[window:] - cumsum[:-window]\n cumsum2[window:] = cumsum2[window:] - cumsum2[:-window]\n return ((cumsum2[window - 1:] - cumsum[window - 1:] ** 2 / window) / (window - 1)) ** 0.5\n\n\ndef outlier_vals(data: numpy.array, center_range: Tuple[int, int], cut_range: float) -> Tuple[float, float]:\n v1, v2 = numpy.percentile(data, center_range)\n return (v1 + v2) / 2, (v2 - v1) / 2 * cut_range\n\n\ndef find_ouliers(data: numpy.array, center_range: Tuple[int, int] = (25, 75), cut_range: float = 3.0) -> numpy.array:\n center, rng = outlier_vals(data, center_range, cut_range)\n return numpy.abs(data - center) > rng\n\n\ndef find_ouliers_ts(data: numpy.array,\n windows_size: int = 30,\n center_range: Tuple[int, int] = (25, 75),\n cut_range: float = 3.0) -> numpy.array:\n outliers = numpy.zeros(data.shape, dtype=bool)\n\n if len(data) < windows_size:\n return outliers\n\n begin_idx = 0\n if len(data) < windows_size * 2:\n end_idx = (len(data) % windows_size) // 2 + windows_size\n else:\n end_idx = len(data)\n\n while True:\n cdata = data[begin_idx: end_idx]\n outliers[begin_idx: end_idx] = find_ouliers(cdata, center_range, cut_range)\n begin_idx = end_idx\n\n if end_idx == len(data):\n break\n\n end_idx += windows_size\n if len(data) - end_idx < windows_size:\n end_idx = len(data)\n\n return outliers\n\n\ndef hist_outliers_nd(bin_populations: numpy.array,\n bin_centers: numpy.array,\n center_range: Tuple[int, int] = (25, 75),\n cut_range: float = 3.0) -> Tuple[int, int]:\n assert len(bin_populations) == len(bin_centers)\n total_count = bin_populations.sum()\n\n perc25 = total_count / 100.0 * center_range[0]\n perc75 = total_count / 100.0 * center_range[1]\n\n perc25_idx, perc75_idx = numpy.searchsorted(numpy.cumsum(bin_populations), [perc25, perc75])\n middle = (bin_centers[perc75_idx] + bin_centers[perc25_idx]) / 2\n r = (bin_centers[perc75_idx] - bin_centers[perc25_idx]) / 2\n\n lower_bound = middle - r * cut_range\n upper_bound = middle + r * cut_range\n\n lower_cut_idx, upper_cut_idx = numpy.searchsorted(bin_centers, [lower_bound, upper_bound])\n return lower_cut_idx, upper_cut_idx\n\n\ndef hist_outliers_perc(bin_populations: numpy.array,\n bounds_perc: Tuple[float, float] = (0.01, 0.99),\n min_bins_left: int = None) -> Tuple[int, int]:\n assert len(bin_populations.shape) == 1\n total_count = bin_populations.sum()\n lower_perc = total_count * bounds_perc[0]\n upper_perc = total_count * bounds_perc[1]\n idx1, idx2 = numpy.searchsorted(numpy.cumsum(bin_populations), [lower_perc, upper_perc])\n\n # don't cut too many bins. At least min_bins_left must left\n if min_bins_left is not None and idx2 - idx1 < min_bins_left:\n missed = min_bins_left - (idx2 - idx1) // 2\n idx2 = min(len(bin_populations), idx2 + missed)\n idx1 = max(0, idx1 - missed)\n\n return idx1, idx2\n\n\ndef ts_hist_outliers_perc(bin_populations: numpy.array,\n window_size: int = 10,\n bounds_perc: Tuple[float, float] = (0.01, 0.99),\n min_bins_left: int = None) -> Tuple[int, int]:\n assert len(bin_populations.shape) == 2\n\n points = list(range(0, len(bin_populations), window_size))\n if len(bin_populations) % window_size != 0:\n points.append(points[-1] + window_size)\n\n ranges: List[Tuple[int, int]] = []\n for begin, end in zip(points[:-1], points[1:]):\n window_hist = bin_populations[begin:end].sum(axis=0)\n ranges.append(hist_outliers_perc(window_hist, bounds_perc=bounds_perc, min_bins_left=min_bins_left))\n\n return min(i[0] for i in ranges), max(i[1] for i in ranges)\n\n\ndef make_2d_histo(tss: List[TimeSeries],\n outliers_range: Tuple[float, float] = (0.02, 0.98),\n bins_count: int = 20,\n log_bins: bool = False) -> TimeSeries:\n\n # validate input data\n for ts in tss:\n assert len(ts.times) == len(ts.data), \"Time(={}) and data(={}) sizes doesn't equal for {!s}\"\\\n .format(len(ts.times), len(ts.data), ts.source)\n assert ts.time_units == 's', \"All arrays should have the same data units\"\n assert ts.units == tss[0].units, \"All arrays should have the same data units\"\n assert ts.data.shape == tss[0].data.shape, \"All arrays should have the same data size\"\n assert len(ts.data.shape) == 1, \"All arrays should be 1d\"\n\n whole_arr = numpy.concatenate([ts.data for ts in tss])\n whole_arr.shape = [len(tss), -1]\n\n if outliers_range is not None:\n max_vl, begin, end, min_vl = numpy.percentile(whole_arr,\n [0, outliers_range[0] * 100, outliers_range[1] * 100, 100])\n bins_edges = auto_edges2(begin, end, bins=bins_count, log_space=log_bins)\n fixed_bins_edges = bins_edges.copy()\n fixed_bins_edges[0] = begin\n fixed_bins_edges[-1] = end\n else:\n begin, end = numpy.percentile(whole_arr, [0, 100])\n bins_edges = auto_edges2(begin, end, bins=bins_count, log_space=log_bins)\n fixed_bins_edges = bins_edges\n\n res_data = numpy.concatenate(numpy.histogram(column, fixed_bins_edges) for column in whole_arr.T)\n res_data.shape = (len(tss), -1)\n res = TimeSeries(data=res_data,\n times=tss[0].times,\n units=tss[0].units,\n source=tss[0].source,\n time_units=tss[0].time_units,\n histo_bins=bins_edges)\n return res\n\n\ndef aggregate_histograms(tss: List[TimeSeries],\n outliers_range: Tuple[float, float] = (0.02, 0.98),\n bins_count: int = 20,\n log_bins: bool = False) -> TimeSeries:\n\n # validate input data\n for ts in tss:\n assert len(ts.times) == len(ts.data), \"Need to use stripped time\"\n assert ts.time_units == 's', \"All arrays should have the same data units\"\n assert ts.units == tss[0].units, \"All arrays should have the same data units\"\n assert ts.data.shape == tss[0].data.shape, \"All arrays should have the same data size\"\n assert len(ts.data.shape) == 2, \"All arrays should be 2d\"\n assert ts.histo_bins is not None, \"All arrays should be 2d\"\n\n whole_arr = numpy.concatenate([ts.data for ts in tss])\n whole_arr.shape = [len(tss), -1]\n\n max_val = whole_arr.min()\n min_val = whole_arr.max()\n\n if outliers_range is not None:\n begin, end = numpy.percentile(whole_arr, [outliers_range[0] * 100, outliers_range[1] * 100])\n else:\n begin = min_val\n end = max_val\n\n bins_edges = auto_edges2(begin, end, bins=bins_count, log_space=log_bins)\n\n if outliers_range is not None:\n fixed_bins_edges = bins_edges.copy()\n fixed_bins_edges[0] = begin\n fixed_bins_edges[-1] = end\n else:\n fixed_bins_edges = bins_edges\n\n res_data = numpy.concatenate(numpy.histogram(column, fixed_bins_edges) for column in whole_arr.T)\n res_data.shape = (len(tss), -1)\n return TimeSeries(res_data,\n times=tss[0].times,\n units=tss[0].units,\n source=tss[0].source,\n time_units=tss[0].time_units,\n histo_bins=fixed_bins_edges)\n","sub_path":"koder_utils/numeric.py","file_name":"numeric.py","file_ext":"py","file_size_in_byte":24802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"359978547","text":"#!user/bin/python\n# -*-coding:utf-8-*-\n\n#FileName: block.py\n\n\nimport pygame\nfrom .pygame_anchors import *\nimport os,sys\n\n#impath = os.path.split(sys.argv[0])[0]+'/VisualStimuli/template.bmp'\n\nclass Imagebox(object):\n def __init__(self,root,\n image = impath,\n siz = (0,0),\n position = (0,0),\n anchor = 'center',\n text = '',\n textcolor = (0,255,255),\n textfont = 'arial',\n textsize = 10,\n textanchor = 'lefttop',\n borderon = False,\n borderwidth = 1,\n bordercolor = (255,255,255),\n layer = 0,\n visible = False):\n\n pygame.font.init()\n self.root = root\n self.siz = siz\n self.position = position\n self.anchor = anchor\n self.textcolor = textcolor\n self.textfont = textfont\n self.textsize = textsize\n self.text = text\n self.textanchor = textanchor\n self.image = image\n self.borderon = borderon\n self.borderwidth = borderwidth\n self.bordercolor = bordercolor\n self.visible = visible\n self.layer = layer\n\n if not os.path.isfile(self.textfont): self.textfont = pygame.font.match_font(self.textfont)\n self.font_object = pygame.font.Font(self.textfont,self.textsize)\n\n self.blitp =blit_pos1(self.siz,self.position,self.anchor)\n\n def reset(self):\n if self.siz == (0,0):\n self.im = pygame.image.load(self.image).convert()\n self.siz = self.im.get_size()\n else:\n self.im = pygame.transform.scale(pygame.image.load(self.image).convert(),self.siz)\n\n if self.text != '':\n self.font_object = pygame.font.Font(self.textfont,self.textsize)\n self.textsur = self.font_object.render(self.text,1,self.textcolor)\n corner = getcorner(self.siz,self.textanchor)\n p = blit_pos(self.textsur,corner,self.textanchor)\n self.im.blit(self.textsur,p)\n self.blitp = blit_pos1(self.siz,self.position,self.anchor)\n return self.im,self.blitp\n\n def show(self):\n if self.visible:\n if self.im!=None:\n self.root.blit(self.im,self.blitp)\n if self.borderon:\n pygame.draw.rect(self.root,self.bordercolor,pygame.Rect(self.blitp,self.siz),self.borderwidth)\n","sub_path":"visualstimuli/visualstimuli/imagebox.py","file_name":"imagebox.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"312436587","text":"# This program takes information about a trip from a text file and outputs information about fuel efficiency.\n\n\ndef main():\n in_file = open('mileage.txt')\n initial_odo = float(in_file.readline())\n mpg_lst = []\n for line in in_file:\n user_input_lst = (line.strip('\\n')).split(\" \")\n new_odo = float(user_input_lst[0])\n print(new_odo)\n gallons_consumed = float(user_input_lst[1])\n print(gallons_consumed)\n miles = new_odo - initial_odo\n mpg_lst.append(miles / gallons_consumed)\n print('Your fuel efficiency for this leg of the trip was '\n '{} miles per gallon.'.format(round(miles / gallons_consumed)), 2)\n initial_odo = new_odo\n combined_mpg = 0\n for mpg in mpg_lst:\n combined_mpg += mpg\n combined_mpg = combined_mpg / len(mpg_lst)\n print('Your fuel efficiency for this trip was {:02} miles per gallon.'.format(combined_mpg))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"johnzellebook/ch8/ex8-10.py","file_name":"ex8-10.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"352113873","text":"from guizero import App, PushButton, Slider, Text\n\"\"\"In case google assistant is hard to use, but it turned out it wasn't.\"\"\"\n\ndef start_recording():\n \"\"\"Prepares the recording and notifies the user to talk.\"\"\"\n message.value = \"Start Speaking Please!\"\n print('hello will')\n\n\napp = App(title=\"Speech Improve\", bg=\"pink\")\nmessage = Text(app, text=\"Welcome to Speech Improve!\")\nbutton = PushButton(app, command=start_recording)\n\napp.display()\n","sub_path":"testgui.py","file_name":"testgui.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"225776828","text":"# 1 ile 20 arasındaki tüm sayılara bölünen en küçük pozitif sayı\r\n\r\n\r\ndef smallest_multiple(n):\r\n count = 0\r\n for j in range(1, 21):\r\n if n % j == 0:\r\n count += 1\r\n return count\r\n\r\n# 1 ile 20 arasındaki tüm sayılara bölünüyorsa 20 ve 19(bölünebildiği en büyük sayılar)'un\r\n# katlarınada bölünebilir. 1520, 20 * 19'un 4. katıdır.Bu sayede program\r\n# çok daha hızlı çalışır.\r\n\r\n\r\ndef solve():\r\n listn = []\r\n for i in range(0, 999999999, 1520):\r\n if smallest_multiple(i) == 20:\r\n listn.append(i)\r\n # 0 tüm sayılara bölündüğü için listn[0] her zaman 0'dır.\r\n return listn[1]\r\n\r\n\r\nprint(solve())\r\n","sub_path":"Problem 005.py","file_name":"Problem 005.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"414361469","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on May 15 17:56:49 2018\n\n@author: hangz\nThis script is to do the experiment of comparison between normal transfer learning and transitive transfer learning\nNormal TL: train bottleneck feature and fine-tuning\nTransitive: do Normal TL for the categories with large image numbers, and then train the model by same dataset with Normal TL.\nThere are 3 model to select categories: random, similar and non-similar\n\"\"\"\nimport gc\nimport os.path\nimport csv\nimport time\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom keras.utils import np_utils\nfrom readingUlits import readingUlits\nfrom sklearn.cross_validation import train_test_split\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.models import Model\nfrom keras.layers import Input, GlobalAveragePooling2D, Dense\nfrom keras.optimizers import SGD\nfrom keras.optimizers import RMSprop\nfrom keras.callbacks import EarlyStopping\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom keras.preprocessing import image\nfrom keras import backend as K\n\n\ndef plot_training(history, name):\n \"\"\"\n Draw plot images for testing results\n Args:\n history: training history objects\n name: name of part of experiment, usually: normal, transitive\n \"\"\"\n plt.ioff()\n acc_path = log_dir + name + '_Accuracy.png'\n loss_path = log_dir + name + '_Loss.png'\n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n epochs = range(len(acc))\n fig = plt.figure()\n plt.plot(epochs, acc, 'r.')\n plt.plot(epochs, val_acc, 'r')\n plt.title('Training and validation accuracy of ' + name + ' TL')\n plt.savefig(acc_path)\n plt.close(fig)\n fig = plt.figure()\n plt.plot(epochs, loss, 'r.')\n plt.plot(epochs, val_loss, 'r-')\n plt.title('Training and validation loss of ' + name + 'TL')\n plt.savefig(loss_path)\n plt.close(fig)\n\ndef getExperimentId(Id_file):\n \"\"\"\n Read and update for experiment Id\n Args:\n Id_file: file name that contains current Id\n \"\"\"\n rev = 0\n if os.path.isfile(Id_file):\n file = open(Id_file, 'r')\n revStr = file.readline()\n rev = int(revStr)\n rev += 1\n file.close()\n file = open(Id_file,'w')\n file.write(str(rev))\n file.close()\n else:\n file = open(Id_file,'w')\n file.write('0')\n file.close()\n return rev\n\ndef predict(model, img, top, target_size):\n \"\"\"\n Predict label of an image\n Args:\n model: trained network\n img: image path\n top: top n labels\n target_size: size of image\n \"\"\"\n if img.size != target_size:\n img = img.resize(target_size)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n x /= 255\n preds = model.predict(x)\n index = np.argmax(preds, axis=top)\n return index\n\ndef testing(model, select, filename, error_log, increment):\n \"\"\"\n Test model by all data with trained labels, save the performance of each categories to file, and update error log for wrong prediction\n Args:\n model: trained model\n select: categories haven been trained\n filename: usually: normal, transitive\n error_log: error log for wrong prediction\n increment: an int value to help to figure out which step the error happened\n \"\"\"\n cat_performance_path = log_dir + filename + '_categories_performance.csv'\n csv_cols = ['category', 'accuracy']\n if not os.path.isfile(cat_performance_path):\n with open(cat_performance_path, 'a', newline=\"\\n\", encoding=\"utf-8\") as newFile:\n newFileWriter = csv.writer(newFile)\n newFileWriter.writerow(csv_cols)\n newFile.close()\n for cat in select:\n imgs = os.listdir(os.getcwd() + '/' + input_file + '/' + cat)\n p = os.getcwd() + '/' + input_file + '/' + cat\n total = 0\n wrong = 0\n for img in imgs:\n if not img.startswith('.'):\n total += 1\n name = img\n img = image.load_img(p + '/' + img, target_size=(299, 299))\n preds = predict(model, img, 1, target_size=(299, 299))\n if cat != selectlabelled[preds[0]]:\n key = cat + '\\t' + name + '\\t' + selectlabelled[preds[0]]\n if key not in error_log:\n error_log[key] = increment\n else:\n error_log[key] += increment\n wrong += 1\n accuracy = float(total - wrong) / float(total)\n aLine = [cat, \"{:.4f}\".format(accuracy)]\n with open(cat_performance_path, 'a', newline=\"\\n\", encoding=\"utf-8\") as newFile:\n newFileWriter = csv.writer(newFile)\n newFileWriter.writerow(aLine)\n newFile.close()\n\ndef errorWriteToFile(error_l, dir_path):\n \"\"\"\n Write error log to csv file\n Args:\n error_l: error log dictionary\n dir_path: the path of csv file\n \"\"\"\n print('Writing error log......')\n error_path = dir_path + 'error_log.csv'\n error_cols = ['Category', 'File Name', 'Wrong Prediction', 'Error in']\n with open(error_path, 'w', newline=\"\\n\", encoding=\"utf-8\") as newFile:\n newFileWriter = csv.writer(newFile)\n newFileWriter.writerow(error_cols)\n for key, value in error_l.items():\n cols = key.split('\\t')\n m = ''\n if value == 1:\n m = 'Normal TL'\n elif value == 2:\n m = 'Transitive TL'\n elif value == 3:\n m = 'Both'\n line = [cols[0], cols[1], cols[2], m]\n newFileWriter.writerow(line)\n newFile.close()\n\n#################################\n#Start execution\n#parameters haven been adjusted, do not change\nfreeze = 172\nb_size = 32\ne_cpoch = 60\nlearning_rate = 0.003\nmomen = 0.0\nopti = 'SGD'\nde = 0.0\n#################################\n\n#################################\n#early stopping point for training\nearlyStopping=EarlyStopping(monitor='val_loss', min_delta=0.01, patience = 2, verbose=0, mode='auto')\n#################################\n\n#################################\n#get experiment id, this id will be increment automatically by 1\nId_file = 'Id.txt'\nId = getExperimentId(Id_file)\n#################################\n\n#################################\n#create report directory\nreport_dir_re = 'report'\nreport_dir = r'report/'\nif not os.path.isdir(report_dir_re):\n os.mkdir(report_dir_re)\n#################################\n\n#################################\n#create log directory for each experiment\nlog_dir_re = report_dir + \"{:d}_\".format(Id) + 'log'\nlog_dir = report_dir + \"{:d}_\".format(Id) + r'log/'\nif not os.path.isdir(log_dir_re):\n os.mkdir(log_dir_re)\n#################################\n\n#################################\n#create training summary log file\nreport_file = report_dir + \"training_summary_log.csv\"\ncsv_columns = ['Id', 'Input Directory', 'Mode of selecting categories', \n 'Lowerest Image Numbers for step 1', 'Highest Image Numbers for step 1', 'Number of categories for step 1',\n 'Lowerest Image Numbers for step 2', 'Highest Image Numbers for step 2', 'Number of categories for step 2', \n 'Validation Loss of normal TL', 'Validation Accuracy of normal TL', 'Execution Time of normal TL',\n 'Validation Loss of transitive TL', 'Validation Accuracy of transitive TL', 'Execution Time of transitive TL']\nif not os.path.isfile(report_file):\n with open(report_file, 'w', newline=\"\\n\", encoding=\"utf-8\") as newFile:\n newFileWriter = csv.writer(newFile)\n newFileWriter.writerow(csv_columns)\n newFile.close()\n#################################\n\n#################################\n#The section you may edit during the experiment\n#(To selection the sub categories you want to train, basically depends on the number of images)\nmode = 1 # 1: randomly pick categories, 2: pick categories with similar names, 3: pick categories with non-similar names\ninput_file = 'Insecta' # which super category you want to train\nrangeMin = 50 # the categories for step 1 which at least x(number) of images\nrangeMax = 100 # the categories for step 1 which at most x(number) of images\nnumber_of_cats = 10 # How many categories you wan to train for seep 1\nrangeMin2 = 0 # the categories for step 2 which at least x(number) of images\nrangeMax2 = 50 # the categories for step 2 which at most x(number) of images\nnumber_of_cats2 = 10 # How many categories you wan to train for step 2\n#################################\n\n#################################\n#functions for select categories and read image files: \n#setCats, setCatsSimilar, setCatsNonSimilar\n#appendCats, appendCatsSimilar, appendCatsNonSimilar\nreader = readingUlits(input_file) #initialize readingUlits object\nif mode == 1:\n reader.setCats(rangeMin, rangeMax, number_of_cats)\n reader.appendCats(rangeMin2, rangeMax2, number_of_cats2)\nelif mode == 2:\n reader.setCatsSimilar(rangeMin, rangeMax, number_of_cats)\n reader.appendCatsSimilar(rangeMin2, rangeMax2, number_of_cats2)\nelse:\n reader.setCatsNonSimilar(rangeMin, rangeMax, number_of_cats)\n reader.appendCatsNonSimilar(rangeMin2, rangeMax2, number_of_cats2)\nselect = reader.getSelect() #save the selected categories without label\nimages = reader.setInfo()\nselectlabelled = reader.getLabelledInfo() #save the selected categories with label\nprint(selectlabelled)\n#################################\n\n#################################\n#setup labels for data\nlabel_name = reader.getLabelNames()\nlocations = reader.getLocations()\nnumber_of_classess = reader.getSubCategoryNumber()\nlabels = np.ones((reader.getTotalImageNumber(),),dtype='int64')\nprev = 0\nfor i in range(number_of_classess):\n labels[prev:prev + locations[i]] = i\n prev += locations[i]\nres = np_utils.to_categorical(labels, number_of_classess)\n#split dataset randomly into two subset: training dataset and testing dataset\nX_train, X_test, y_train, y_test = train_test_split(images, res, test_size=0.1, random_state=2)\n#Data agumentation\ndatagen = ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True,\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True)\ndatagen.fit(X_train)\n#################################\n\n#################################\n#start time of normal TL\nstart_time = time.time()\n#################################\n\n#################################\n#load model\nimage_input = Input(shape=(299, 299, 3))\nbase_model = InceptionV3(input_tensor=image_input, weights='imagenet', include_top=False)\n#add dense layers\nx = base_model.output\nx = GlobalAveragePooling2D()(x)\nx = Dense(1024, activation='relu')(x)\npredictions = Dense(number_of_classess, activation='softmax')(x)\nmodel = Model(inputs=base_model.input, outputs=predictions)\n#freeze all layers for training dense layer\nfor layer in base_model.layers:\n layer.trainable = False\n#training dense layer\nhistory_ft = model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])\n#################################\n\n#################################\nprint('Training (Phase 1)------------')\nmodel.fit_generator(datagen.flow(X_train, y_train, batch_size=b_size), steps_per_epoch=len(X_train) / b_size, \n epochs=e_cpoch, verbose=1, validation_data=(X_test, y_test), callbacks=[earlyStopping], shuffle=True)\n#################################\n\n#################################\n#freeze some layers and fine-tuning\nfor layer in model.layers[:freeze]:\n layer.trainable = False\nfor layer in model.layers[freeze:]:\n layer.trainable = True\nprint('Fine-tuning (Phase 1)------------')\nif opti == 'SGD':\n model.compile(optimizer=SGD(lr=learning_rate, momentum=momen, decay = de), loss='categorical_crossentropy', metrics=['accuracy'])\nelif opti == 'RMSprop':\n model.compile(optimizer=RMSprop(lr=learning_rate, decay=de), loss='categorical_crossentropy', metrics=['accuracy'])\nhistory_ft = model.fit_generator(datagen.flow(X_train, y_train, batch_size=b_size), steps_per_epoch=len(X_train) / b_size, \n epochs=e_cpoch, verbose=1, validation_data=(X_test, y_test), callbacks=[earlyStopping], shuffle=True)\n#Testing with testing dataset\n(loss_normal, accuracy_normal) = model.evaluate(X_test, y_test, batch_size=b_size, verbose=1)\n#################################\n\n#################################\n#save plots for normal TL\nplot_training(history_ft, 'Normal')\n#################################\n\n#################################\n#Testing with all data (including both training and testing dataset)\n#Then save the accuracy for each category and record all error data records\nerror_log = {}\ntesting(model, select, 'Normal', error_log, 1)\n#################################\n\n#################################\n#record execution time for normal TL\nend_time = time.time()\ninterval = end_time - start_time\ntd_normal = str(datetime.timedelta(seconds=interval))\n#################################\n\n#################################\n#delete normal TL model and release memory\nK.clear_session() #to avoid clutter from old models / layers.\nsess = tf.Session()\nK.set_session(sess)\ndel base_model\ndel model\ngc.collect()\n#################################\n\n#################################\n#TTL\n#functions for select categories and read image files: \n#setCats, setCatsSimilar, setCatsNonSimilar\n#appendCats, appendCatsSimilar, appendCatsNonSimilar\nreader1 = readingUlits(input_file) #initialize readingUlits object\nif mode == 1:\n reader1.setCats(rangeMin, rangeMax, number_of_cats)\nelif mode == 2:\n reader1.setCatsSimilar(rangeMin, rangeMax, number_of_cats)\nelse:\n reader1.setCatsNonSimilar(rangeMin, rangeMax, number_of_cats)\nimages1 = reader1.setInfo()\n#################################\n\n#################################\n#setup labels for data\nlabel_name1 = reader1.getLabelNames()\nlocations1 = reader1.getLocations()\nnumber_of_classess1 = reader1.getSubCategoryNumber()\nlabels1 = np.ones((reader1.getTotalImageNumber(),),dtype='int64')\nprev1 = 0\nfor i in range(number_of_classess1):\n labels1[prev1:prev1 + locations1[i]] = i\n prev1 += locations1[i]\nres1 = np_utils.to_categorical(labels1, number_of_classess1)\n#split dataset randomly into two subset: training dataset and testing dataset\nX_train1, X_test1, y_train1, y_test1 = train_test_split(images1, res1, test_size=0.1, random_state=2)\n#Data agumentation\ndatagen.fit(X_train1)\n#################################\n\n#################################\n#start time of transitive TL\nstart_time = time.time()\n#################################\n\n#################################\n#TTL Phase 1\n#load model\nimage_input1 = Input(shape=(299, 299, 3))\nbase_model1 = InceptionV3(input_tensor=image_input1, weights='imagenet', include_top=False)\n#add dense layers\nx1 = base_model1.output\nx1 = GlobalAveragePooling2D()(x1)\nx1 = Dense(1024, activation='relu')(x1)\npredictions1 = Dense(number_of_classess1, activation='softmax')(x1)\nmodel1 = Model(inputs=base_model1.input, outputs=predictions1)\n#freeze all layers for training dense layer\nfor layer in base_model1.layers:\n layer.trainable = False\n#training dense layer\nhistory_ft_temp = model1.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])\nprint('Training (Phase 2: step 1) ------------')\nmodel1.fit_generator(datagen.flow(X_train1, y_train1, batch_size=b_size), steps_per_epoch=len(X_train1) / b_size, \n epochs=e_cpoch, verbose=1, validation_data=(X_test1, y_test1), callbacks=[earlyStopping], shuffle=True)\nfor layer in model1.layers[:freeze]:\n layer.trainable = False\nfor layer in model1.layers[freeze:]:\n layer.trainable = True\nprint('Fine-tuning (Phase 2: step 1)------------')\nif opti == 'SGD':\n model1.compile(optimizer=SGD(lr=learning_rate, momentum=momen, decay = de), loss='categorical_crossentropy', metrics=['accuracy'])\nelif opti == 'RMSprop':\n model1.compile(optimizer=RMSprop(lr=learning_rate, decay=de), loss='categorical_crossentropy', metrics=['accuracy'])\nmodel1.fit_generator(datagen.flow(X_train1, y_train1, batch_size=b_size), steps_per_epoch=len(X_train1) / b_size, \n epochs=e_cpoch, verbose=1, validation_data=(X_test1, y_test1), callbacks=[earlyStopping], shuffle=True)\n#Release memory for reuse\ndel images1\ndel X_train1\ndel y_train1\ndel X_test1\ndel y_test1\ngc.collect()\n#################################\n\n#################################\n#TTL phase 2\n#Remove previous dense layer\nmodel1.layers.pop()\nx = model1.layers[-1].output\npredictions = Dense(number_of_classess, activation='softmax')(x)\nmodel1 = Model(inputs=base_model1.input, outputs=predictions)\nfor layer in base_model1.layers:\n layer.trainable = False\nhistory_ft = model1.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])\nprint('Training (Phase 2: step 2)------------')\nmodel1.fit_generator(datagen.flow(X_train, y_train, batch_size=b_size), steps_per_epoch=len(X_train) / b_size, \n epochs=e_cpoch, verbose=1, validation_data=(X_test, y_test), callbacks=[earlyStopping], shuffle=True)\nfor layer in model1.layers[:freeze]:\n layer.trainable = False\nfor layer in model1.layers[freeze:]:\n layer.trainable = True\nprint('Fine-tuning (Phase 2: step 2) ------------')\nif opti == 'SGD':\n model1.compile(optimizer=SGD(lr=learning_rate, momentum=momen, decay = de), loss='categorical_crossentropy', metrics=['accuracy'])\nelif opti == 'RMSprop':\n model1.compile(optimizer=RMSprop(lr=learning_rate, decay=de), loss='categorical_crossentropy', metrics=['accuracy'])\nhistory_ft1 = model1.fit_generator(datagen.flow(X_train, y_train, batch_size=b_size), steps_per_epoch=len(X_train) / b_size, \n epochs=e_cpoch, verbose=1, validation_data=(X_test, y_test), callbacks=[earlyStopping], shuffle=True)\n#Testing with testing dataset\n(loss_transitive, accuracy_transitive) = model1.evaluate(X_test, y_test, batch_size=b_size, verbose=1)\n#################################\n\n#################################\n#save plots for transitive TL\nplot_training(history_ft1, 'Transitive')\n#################################\n\n#################################\n#Testing with all data (including both training and testing dataset)\n#Then save the accuracy for each category and record all error data records\n#Write all error log to file\ntesting(model1, select, 'Transitive', error_log, 2)\nerrorWriteToFile(error_log, log_dir)\n#################################\n\n#################################\n#record execution time for transitive TL\nend_time = time.time()\ninterval = end_time - start_time\ntd_transitive = str(datetime.timedelta(seconds=interval))\n#################################\n\n#################################\n#Writing overall summary\nm = ''\nif mode == 1:\n m = 'random'\nelif mode == 2:\n m = 'similar'\nelif mode == 3:\n m = 'non similar'\nsummarys = [\"{:d}\".format(Id), input_file, m, \n \"{:d}\".format(rangeMin), \"{:d}\".format(rangeMax), \"{:d}\".format(number_of_cats),\n \"{:d}\".format(rangeMin2), \"{:d}\".format(rangeMax2), \"{:d}\".format(number_of_cats2), \n \"{:.4f}\".format(loss_normal), \"{:.4f}\".format(accuracy_normal), td_normal,\n \"{:.4f}\".format(loss_transitive), \"{:.4f}\".format(accuracy_transitive), td_transitive]\nwith open(report_file, 'a', newline=\"\\n\", encoding=\"utf-8\") as newFile:\n newFileWriter = csv.writer(newFile)\n newFileWriter.writerow(summarys)\n newFile.close()\n#################################\n\n#################################\n#Free all memory occupied by this program\ndel base_model1\ndel model1\ndel images\ndel X_train\ndel y_train\ndel X_test\ndel y_test\ngc.collect()\n#################################\n\n#Finihsed\nprint('Successfully done.')\n\n\n","sub_path":"Driver/NN_driver_Together.py","file_name":"NN_driver_Together.py","file_ext":"py","file_size_in_byte":20037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"457273216","text":"import numpy as np\nimport pandas as pd\nfrom collections import Counter, OrderedDict\nimport matplotlib.pyplot as plt\nimport cv2\nimport tensorflow as tf\nfrom random import randint, shuffle\nimport csv\nimport time\n\n# Load pickled data\nimport pickle\n\n\n# Helper function: convert an np.ndarray image from RGB to grayscale using OpenCV\ndef rgb_to_gray(image):\n return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\ntraining_file = 'lab 2 data/train.p'\ntesting_file = 'lab 2 data/test.p'\n\nwith open(training_file, mode='rb') as f:\n train = pickle.load(f)\nwith open(testing_file, mode='rb') as f:\n test = pickle.load(f)\n\nX_train, Y_train = train['features'], train['labels']\nX_test, Y_test = test['features'], test['labels']\n\n\n# # Color to RGB, and normalize the numpy ndarray values to mean 0\n\n# Keep originals for checking purposes\noriginal_train = train.copy()\noriginal_test = test.copy()\n\ngrey_train = {}\ngrey_train['features'] = []\nfor i in range(len(X_train)):\n grey_train['features'].append(rgb_to_gray(X_train[i]))\ntrain['features'] = grey_train['features']\n\ngrey_test = {}\ngrey_test['features'] = []\nfor i in range(len(X_test)):\n grey_test['features'].append(rgb_to_gray(X_test[i]))\ntest['features'] = grey_test['features']\n\n\n## Normalize: (value - 128) / 128\nnormalized_train = {}\nnormalized_train['features'] = []\nfor i in range(len(train['features'])):\n normalized_train['features'].append((train['features'][i] - 128) / 128)\ntrain['features'] = normalized_train['features']\n\nnormalized_test = {}\nnormalized_test['features'] = []\nfor i in range(len(test['features'])):\n normalized_test['features'].append((test['features'][i] - 128) / 128)\ntest['features'] = normalized_test['features']\n\n\n### To start off let's do a basic data summary.\n\nn_train = len(train['features'])\nn_test = len(test['features'])\nimage_shape = train['features'][0].shape\nn_classes = len(set(train['labels']))\n\n\n# Stolen helper function for one_hot encoding\ndef dense_to_one_hot(labels_dense, num_classes):\n \"\"\"Convert class labels from scalars to one-hot vectors.\"\"\"\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot\n\none_hot_train_labels = dense_to_one_hot(train['labels'], n_classes)\none_hot_test_labels = dense_to_one_hot(test['labels'], n_classes)\n\n\n# The actual Neural Network itself\n\n# Parameters\nlearning_rate = 0.005\ntraining_epochs = 500\nbatch_size = 50\ndisplay_step = 1\n\n# Shuffle training set\nindex = list(range(0, len(train['features']) - 1))\nshuffle(index)\n\nshuffled_features = []\nshuffled_labels = []\nshuffled_orig_labels = []\nfor i in index:\n shuffled_features.append(train['features'][i])\n shuffled_labels.append(one_hot_train_labels[i])\n\n# Create batches\ntotal_batch = int(n_train / batch_size) + 1\nfeature_batches = np.array_split(shuffled_features, total_batch)\nlabel_batches = np.array_split(shuffled_labels, total_batch)\n\nn_input = image_shape[0] * image_shape[1] # traffic sign input (img shape: 32*32)\n\nn_hidden_layer = 256 # layer number of features.\n# how many? http://stats.stackexchange.com/questions/181/how-to-choose-the-number-of-hidden-layers-and-nodes-in-a-feedforward-neural-netw\n# somewhere between 32*32 and the number of output classes, 43\n\n# Store layers weight & bias\nweights = {\n 'hidden_layer': tf.Variable(tf.truncated_normal([n_input, n_hidden_layer], stddev=0.1)),\n 'out': tf.Variable(tf.truncated_normal([n_hidden_layer, n_classes], stddev=0.1))\n}\nbiases = {\n 'hidden_layer': tf.Variable(tf.constant(0.0, shape=[n_hidden_layer])),\n 'out': tf.Variable(tf.constant(0.0, shape=[n_classes]))\n}\n\n# tf Graph input\nx = tf.placeholder(\"float\", [None, image_shape[0], image_shape[1]])\ny = tf.placeholder(\"float\", [None, n_classes])\n\n# tensorflow.python.framework.errors.InvalidArgumentError: logits and labels must be same size: logits_size=[300,43] labels_size=[100,43]\nx_flat = tf.reshape(x, [-1, n_input])\n\n# # Hidden layer with RELU activation\nlayer_1 = tf.add(tf.matmul(x_flat, weights['hidden_layer']), biases['hidden_layer'])\nlayer_1 = tf.nn.relu(layer_1)\n# # Output layer with linear activation\nlogits = tf.matmul(layer_1, weights['out']) + biases['out']\n\n# # Define loss and optimizer\n#cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, y))\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, y))\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# # Initializing the variables\ninit = tf.initialize_all_variables()\n\n# # Launch the graph\nwith tf.Session() as sess:\n start_time = time.time()\n sess.run(init)\n # Training cycle\n for epoch in range(training_epochs):\n total_batch = int(n_train/batch_size)\n # Loop over all batches\n for i in range(total_batch):\n batch_x = feature_batches[i]\n batch_y = label_batches[i]\n # Run optimization op (backprop) and cost op (to get loss value)\n sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})\n # Display logs per epoch step\n if epoch % display_step == 0:\n c = sess.run(cost, feed_dict={x: batch_x, y: batch_y})\n print(\"Epoch:\", '%04d' % (epoch + 1), \"cost=\", \\\n \"{:.9f}\".format(c))\n print(\"Optimization Finished!\")\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\n # Test model\n correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))\n # Calculate accuracy\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n print(\"Accuracy:\", accuracy.eval({x: test['features'], y: one_hot_test_labels}))\n\n# notes to self of stuff to do:\n# upsampling to make categories balanced\n# multiple layers\n# some normalization: L2, momentum, learning rate decay\n# number of neurons in hidden layer\n# getting validation set right\n","sub_path":"3_simplify.py","file_name":"3_simplify.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"344006718","text":"from flask import Blueprint, render_template, redirect, url_for\n\n\nhogwarts_blueprint = Blueprint('hogwarts', __name__, url_prefix='')\n\ncss_specials = {'template.css': 'maraudersmap.css',\n 'home.css': 'hogwarts.css',\n 'signup.css': 'boats.css'}\njs_specials = {'signup.js': 'boats.js'}\n\n\n@hogwarts_blueprint.app_errorhandler(500)\ndef internal(error):\n desc = 'Uh oh! Something went wrong.'\n return render_template(\n 'voldemort.html', status=500, description=desc\n ), 500\n\n\n@hogwarts_blueprint.app_errorhandler(404)\ndef not_found(error):\n desc = 'This page does not exist.'\n return render_template(\n 'voldemort.html', status=404, description=desc\n ), 404\n\n\n@hogwarts_blueprint.route('/')\ndef hogwargs_index():\n return render_template('hogwarts.html')\n\n\n@hogwarts_blueprint.route('/css/')\ndef send_css(path):\n if path in css_specials:\n path = 'css/'+css_specials[path]\n return redirect(url_for('static', filename=path))\n\n\n@hogwarts_blueprint.route('/js/')\ndef send_js(path):\n if path in js_specials:\n path = 'js/'+js_specials[path]\n return redirect(url_for('static', filename=path))\n","sub_path":"getpost/desk/hogwarts.py","file_name":"hogwarts.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"423510345","text":"import pymongo\nimport json\nfrom pymongo import MongoClient\nfrom datetime import datetime\nimport os\nimport datetime\nimport os.path\nimport re\nimport math\n\n\n\nclient = MongoClient('mongodb://localhost:27017/')\ndb = client.storage_db\n\n\ndef convert_size(file_size):\n if file_size == 0:\n return \"0B\"\n size_name = (\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\")\n i = int(math.floor(math.log(file_size, 1024)))\n p = math.pow(1024, i)\n s = round(file_size / p, 2)\n return \"%s %s\" % (s, size_name[i])\n\n\n\n\ndef find_and_scan_files():\n collection = db.file_collection\n cursor = collection.find()\n for document in cursor:\n file_path = document['file_path']\n file_id = document['_id']\n if os.path.isfile(file_path):\n print('file is:', file_path)\n file_accessed = datetime.datetime.fromtimestamp(os.path.getatime(file_path))\n file_modified = datetime.datetime.fromtimestamp(os.path.getmtime(file_path))\n file_created = datetime.datetime.fromtimestamp(os.path.getctime(file_path))\n file_size = os.stat(file_path).st_size\n file_convert_size = convert_size(file_size)\n utc_datetime = datetime.datetime.utcnow()\n update_id = collection.update_one(\n {'_id': file_id},\n {\n \"$set\": {\n \"file_size\": file_convert_size,\n \"file_size_bytes\": file_size,\n \"process_datestamp\": utc_datetime.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"file_created\": file_created,\n \"file_accessed\": file_accessed,\n \"file_modified\": file_modified\n },\n },\n upsert=True)\n update_id\n else:\n print('file is not available:', file_path)\n delete_id = collection.delete_one(file_id)\n delete_id\n\n\ndef scan_for_file(dir_path, storage_bucket):\n for file in os.listdir(dir_path):\n curpath = os.path.join(dir_path, file)\n if os.path.isfile(curpath):\n print('found file:', file)\n print('checking db for:', curpath)\n collection = db.file_collection\n if collection.find({'file_path': curpath}).count() > 0:\n print('found file in db', curpath)\n cursor = collection.find({'file_path': curpath})\n for document in cursor:\n file_db_id = document['_id']\n file_db_path = document['file_path']\n file_accessed = datetime.datetime.fromtimestamp(os.path.getatime(file_db_path))\n file_modified = datetime.datetime.fromtimestamp(os.path.getmtime(file_db_path))\n file_created = datetime.datetime.fromtimestamp(os.path.getctime(file_db_path))\n file_size = os.stat(file_db_path).st_size\n file_convert_size = convert_size(file_size)\n utc_datetime = datetime.datetime.utcnow()\n update_id = collection.update_one(\n {'_id': file_db_id},\n {\n \"$set\": {\n \"file_size\": file_convert_size,\n \"file_size_bytes\": file_size,\n \"process_datestamp\": utc_datetime.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"file_created\": file_created,\n \"file_accessed\": file_accessed,\n \"file_modified\": file_modified\n },\n },\n upsert=True)\n update_id\n print('Updated ID:', file_db_id)\n\n elif collection.find({'file_path': curpath}).count() == 0:\n print('no file in db:', curpath)\n file_accessed = datetime.datetime.fromtimestamp(os.path.getatime(curpath))\n file_modified = datetime.datetime.fromtimestamp(os.path.getmtime(curpath))\n file_created = datetime.datetime.fromtimestamp(os.path.getctime(curpath))\n file_size = os.stat(curpath).st_size\n file_convert_size = convert_size(file_size)\n utc_datetime = datetime.datetime.utcnow()\n post = {\"storage_bucket\": storage_bucket,\n \"dir_path\": dir_path,\n \"file_path\": curpath,\n \"file_name\": file,\n \"file_size\": file_convert_size,\n \"file_size_bytes\": file_size,\n \"process_datestamp\": utc_datetime.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"file_created\": file_created,\n \"file_accessed\": file_accessed,\n \"file_modified\": file_modified}\n posts = db.file_collection\n post_id = posts.insert_one(post).inserted_id\n post_id\n print('New posted ID:', post)\n else:\n print('error')\n\n\n\n\n\ndef scan_storagebuckets():\n collection = db.dir_collection\n cursor = collection.find({\"dir_size\": re.compile('KB|MB|GB|TB')})\n\n for document in cursor:\n storage = document[\"storage_bucket\"]\n dir_path = document[\"dir_path\"]\n if storage == \"RT2\":\n print('searching RT2 path:', dir_path)\n storage_bucket = \"RT2\"\n scan_for_file(dir_path, storage_bucket)\n elif storage == \"RT3\":\n print('searching RT3 path:', dir_path)\n storage_bucket = \"RT3\"\n scan_for_file(dir_path, storage_bucket)\n elif storage == \"RT4\":\n print('searching RT4 path:', dir_path)\n storage_bucket = \"RT4\"\n scan_for_file(dir_path, storage_bucket)\n\n\n\n\n\nscan_storagebuckets()\n","sub_path":"files_compare_db_san.py","file_name":"files_compare_db_san.py","file_ext":"py","file_size_in_byte":5951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"244579170","text":"import sys\nimport numpy as np\nimport csv\nimport re\nimport statistics\n\ndef main(argv):\n in_data_lst = []\n try:\n in_data_lst = read_infile(argv[1]) \n preprocess_data(in_data_lst,argv[2],argv[3]) \n except:\n print(\"Need input filename, and 2 output file names\")\n\ndef read_infile(filename):\n '''\n input is the csv filename\n output is a list with all the fields\n '''\n in_data_lst = []\n with open(filename,'r') as csvfile:\n input_val = csv.reader(csvfile, delimiter='|')\n for row in input_val:\n in_data_lst.append(row)\n return (in_data_lst)\n\ndef preprocess_data(data,file1,file2):\n '''\n input is the data list, and the two files to write the output\n output is the two written files after processing info\n '''\n id_zip_dict = {} # dictionary with (id,zipcode) as keys that stores freq and amt\n id_date_dict = {} # dictionary with (id,date) as keys that stores freq and amt\n id_zip_amt_dict = {} # dictionary with zipcode as key that stores a list of amt to calculate median\n id_date_amt_dict = {} #dictionary with date as key that stores a list of amt to calculate median\n\n with open(file1,'w') as fw1:\n \n year = r\"(^[0-9]{4}(2015|2016|2017)$)\" #match year of format xxxxy where x is a digit(0-9) and y is 2015/2016/2017\n zipcode=r\"(^[0-9]{5}$)\" #match zipcode of format xxxxx where x is a digit(0-9)\n for i in range(len(data)):\n if ((data[i][0] == '') # CMTE_ID is blank\n or (data[i][14] == '') #TRANSACTION_AMT is blank\n or (data[i][15] != '')): #OTHER_ID is not blank\n continue\n zip_flag = False\n year_flag = False\n zipc = data[i][10]\n if len(data[i][10]) >= 5: #if zipcode greater than 5 digits then get the first five digits\n zipc = data[i][10][0:5] \n if re.match(zipcode,zipc):\n zip_flag = True #zipcode is valid\n \n date = data[i][13]\n if re.match(year,date):\n year_flag = True #year is valid and between 2015-2017\n\n\n #process the zip dict\n if zip_flag:\n id_zip_dict,id_zip_amt_dict = process_id_zip(data[i][0],zipc,data[i][14],id_zip_dict,id_zip_amt_dict)\n med = round(statistics.median(id_zip_amt_dict[(data[i][0],zipc)]))\n fw1.write(data[i][0]+\"|\"+zipc+\"|\"+str(med)+\"|\"+str(id_zip_dict[(data[i][0],zipc)][1])+\"|\"+str(id_zip_dict[(data[i][0],zipc)][0])+\"\\n\")\n \n #process the date dict\n if year_flag:\n id_date_dict,id_date_amt_dict = process_id_date(data[i][0],date,data[i][14],id_date_dict,id_date_amt_dict)\n \n #sort date dictionary \n with open(file2,'w') as fw2:\n for keys in sorted(id_date_dict.items()):\n med_date = round(statistics.median(id_date_amt_dict[keys[0]]))\n fw2.write(keys[0][0]+\"|\"+keys[0][1]+\"|\"+str(med_date)+\"|\"+str(keys[1][1])+\"|\"+str(keys[1][0])+\"\\n\")\n\n \ndef process_id_zip(custid,zipc,amt,id_zip_dict,id_zip_amt_dict):\n '''\n inputs are flags,CMTE_ID,ZIP_CODE,TRANSACTION_AMT,and the two zipcode dictionaries\n outputs are the updated two zipcode dictionaries\n '''\n if ((custid,zipc) not in id_zip_dict):\n fr = 1\n am = int(amt)\n id_zip_dict[(custid,zipc)]=[am,fr]\n id_zip_amt_dict[custid,zipc] = [am]\n else:\n fr = id_zip_dict[(custid,zipc)][1] + 1\n am = id_zip_dict[(custid,zipc)][0] + int(amt) \n id_zip_dict[(custid,zipc)]=[am,fr]\n id_zip_amt_dict[(custid,zipc)].append(int(amt))\n #sort the amt list\n id_zip_amt_dict = {x:sorted(id_zip_amt_dict[x]) for x in id_zip_amt_dict.keys()}\n return (id_zip_dict,id_zip_amt_dict) \n \n\n\ndef process_id_date(custid,date,amt,id_date_dict,id_date_amt_dict):\n '''\n inputs are the year_flag,CMTE_ID,TRANSACTION_DATE,TRANSACTION_AMT and the two date dictionaries\n outputs are the updated two date dictionaries\n '''\n if ((custid,date) not in id_date_dict):\n fr = 1\n am = int(amt)\n id_date_dict[(custid,date)]=[am,fr]\n id_date_amt_dict[custid,date] = [am]\n else:\n fr = id_date_dict[(custid,date)][1] + 1\n am = id_date_dict[(custid,date)][0] + int(amt)\n id_date_dict[(custid,date)]=[am,fr]\n id_date_amt_dict[(custid,date)].append(int(amt))\n #sort the amt list\n id_date_amt_dict = {x:sorted(id_date_amt_dict[x]) for x in id_date_amt_dict.keys()}\n return (id_date_dict,id_date_amt_dict)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"insight_testsuite/temp/src/find_political_donors.py","file_name":"find_political_donors.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"441332704","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport model_utils.fields\nimport ore.core.util\nimport ore.core.models\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Namespace',\n fields=[\n ('id', models.AutoField(\n serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('status', model_utils.fields.StatusField(choices=[\n ('active', 'active'), ('deleted', 'deleted')], max_length=100, no_check_for_status=True, default='active')),\n ('name', models.CharField(max_length=32, unique=True, verbose_name='name', validators=[django.core.validators.RegexValidator(\n '^[\\\\w.@+-]+$', 'Enter a namespace organization name.', 'invalid'), ore.core.util.validate_not_prohibited])),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Organization',\n fields=[\n ('namespace_ptr', models.OneToOneField(parent_link=True, primary_key=True,\n auto_created=True, to='core.Namespace', serialize=False)),\n ('avatar_image', models.ImageField(null=True, blank=True,\n default=None, upload_to=ore.core.models.organization_avatar_upload)),\n ],\n options={\n },\n bases=('core.namespace',),\n ),\n migrations.CreateModel(\n name='Permission',\n fields=[\n ('id', models.AutoField(\n serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('slug', models.SlugField(max_length=64, unique=True)),\n ('name', models.CharField(max_length=64)),\n ('description', models.TextField()),\n ('applies_to_project', models.BooleanField(default=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"ore/core/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"591747950","text":"import importlib\nimport json\nimport os\nimport re\n\nimport rospy\nfrom std_msgs.msg import String\n\nfrom dynamic_stack_decider.abstract_action_element import AbstractActionElement\nfrom dynamic_stack_decider.abstract_decision_element import AbstractDecisionElement\nfrom dynamic_stack_decider.sequence_element import SequenceElement\nfrom dynamic_stack_decider.abstract_stack_element import AbstractStackElement\nfrom dynamic_stack_decider.parser import DSDParser\nfrom dynamic_stack_decider.tree import Tree, AbstractTreeElement, ActionTreeElement, DecisionTreeElement, \\\n SequenceTreeElement\n\n\ndef discover_elements(path):\n \"\"\"\n Extract all the classes from the files in the given path and return a dictionary containing them\n\n :param path: The path containing the files that should be registered\n :type path: str\n :return: A dictionary with class names as keys and classes as values\n :rtype: Dict[str, AbstractStackElement]\n \"\"\"\n elements = {}\n files = [f for f in os.listdir(path) if f.endswith('.py')]\n for file in files:\n with open(os.path.join(path, file), \"r\") as dp:\n for line in dp:\n m = re.search(r\"(?<=^class\\s)[a-zA-Z0-9]*\", line)\n if m:\n classname = m.group()\n # relative_filename is the name relative to the src directory (from where it will be imported)\n # split path at \"src\" and take the last part\n relative_filename = os.path.join(path.split(\"/src/\")[-1], file) \n module_path = relative_filename.replace(\"/\", \".\").replace(\"\\\\\", \".\").replace(\".py\", \"\")\n try:\n module = importlib.import_module(module_path)\n except Exception as e:\n rospy.logerr('Error while loading class {}: {}'.format(classname, e))\n else:\n elements[classname] = getattr(module, classname)\n return elements\n\n\nclass DSD:\n \"\"\"\n One decision is defined as the root decision, the starting point.\n Each decision element, which is pushed on the stack, is immediately executed until no further element is pushed.\n Following, each iteration, for each element is checked if it requires to be reevaluated and finally the\n top element of the stack will be executed, usually an action.\n If the outcome of a reevaluated element changes, the entire stack on top of this element will be dropped and the\n stack newly constructed.\n As soon as the action is complete, the element will be popped off the stack and the module underneath will be\n executed in the next iteration.\n If this is a decision, it again pushes a further decision or an action and the new top element will be executed.\n\n By this structure, it is always visible which action the robot tries to perform and which decisions were made.\n\n If a new element is pushed on top of the stack, it is directly executed.\n In most cases, the pushing element is completing its execution with the push of another element.\n Any following code will be executed as soon as the stack is not further expanded.\n \"\"\"\n\n start_element = None\n start_element_data = None\n stack_exec_index = -1\n stack_reevaluate = False\n do_not_reevaluate = False\n old_representation = \"\"\n\n def __init__(self, blackboard, debug_topic=None):\n \"\"\"\n :param blackboard: Blackboard instance which will be available to all modules\n :param debug_topic: Topic on which debug data should be published\n :type debug_topic: str\n \"\"\"\n self.blackboard = blackboard\n\n self.tree = None # type: Tree\n # The stack is implemented as a list of tuples consisting of the tree element\n # and the actual module instance\n self.stack = [] # type: List[Tuple[AbstractTreeElement, AbstractStackElement]]\n\n self.actions = {} # type: Dict[str, AbstractActionElement]\n self.decisions = {} # type: Dict[str, AbstractDecisionElement]\n\n # Setup debug publisher if needed\n self.debug_active = debug_topic is not None\n if self.debug_active:\n rospy.loginfo('Debugging is active. Publishing on {}'.format(debug_topic))\n self.debug_publisher = rospy.Publisher(debug_topic, String, queue_size=10)\n\n def register_actions(self, modulepath):\n \"\"\"\n Register every class in a given path as an action\n :param modulepath: A path containing files with classes extending AbstractActionElement\n \"\"\"\n self.actions = discover_elements(modulepath)\n\n def register_decisions(self, modulepath):\n \"\"\"\n Register every class in a given path as a decision\n :param modulepath: A path containing files with classes extending AbstractDecisionElement\n \"\"\"\n self.decisions = discover_elements(modulepath)\n\n def load_behavior(self, path):\n \"\"\"\n Load a .dsd file into the behaviour to execute it. This should be called after the actions\n and decisions have been loaded.\n :param path: The path to the .dsd file describing the behaviour\n :return:\n \"\"\"\n dsd_parser = DSDParser()\n self.tree = dsd_parser.parse(path)\n self._bind_modules(self.tree.root_element)\n self.set_start_element(self.tree.root_element)\n\n def _bind_modules(self, element):\n \"\"\"\n Recursively traverse the tree and bind the registered action and decision classes to\n the corresponding tree elements\n :param element: The starting element\n \"\"\"\n if isinstance(element, ActionTreeElement):\n element.module = self.actions[element.name]\n elif isinstance(element, DecisionTreeElement):\n element.module = self.decisions[element.name]\n for child in element.children.values():\n self._bind_modules(child)\n elif isinstance(element, SequenceTreeElement):\n for action in element.action_elements:\n self._bind_modules(action)\n else:\n raise KeyError('Provided element ' + str(element) + 'was not found in registered actions or decisions')\n\n def _init_element(self, element, parameters=None):\n \"\"\" Initialises the module belonging to the given element. \"\"\"\n if isinstance(element, SequenceTreeElement):\n initialized_actions = list()\n for action in element.action_elements:\n initialized_actions.append(action.module(self.blackboard, self, action.parameters))\n return SequenceElement(self.blackboard, self, initialized_actions)\n else:\n return element.module(self.blackboard, self, parameters)\n\n def set_start_element(self, start_element, init_data=None):\n \"\"\"\n This method defines the start element on the stack, which stays always on the bottom of the stack.\n It should be called in __init__.\n \"\"\"\n self.start_element = start_element\n self.start_element_data = init_data\n self.stack = [(self.start_element, self._init_element(self.start_element, self.start_element_data))]\n\n def interrupt(self):\n \"\"\"\n An interrupt is an event which clears the complete stack to reset the behavior.\n In the special case of RoboCup, we use it when the game-state changes, but it can also be used for\n example if the robot is kidnapped or paused.\n In the following iteration, the stack will be newly created starting at the root element.\n \"\"\"\n if self.stack_reevaluate:\n # we were currently checking preconditions\n # we stop this, so that update() knows that it has to stop\n self.stack_reevaluate = False\n self.stack = [(self.start_element, self._init_element(self.start_element, self.start_element_data))]\n\n def update(self, reevaluate=True):\n \"\"\"\n Calls the element which is currently on top of the stack.\n Before doing this, all preconditions are checked (all decision elements where reevaluate is true).\n\n :param: reevaluate: Can be set to False to avoid the reevaluation\n :type reevaluate: bool\n \"\"\"\n self.publish_debug_msg()\n\n if reevaluate and not self.do_not_reevaluate:\n self.stack_exec_index = 0\n self.stack_reevaluate = True\n for tree_element, instance in self.stack[:-1]:\n # check all elements except the top one, but not the actions\n if isinstance(instance, AbstractDecisionElement) and instance.get_reevaluate():\n result = instance.perform(True)\n # Push element if necessary\n if result != self.stack[self.stack_exec_index + 1][0].activation_reason:\n self.stack = self.stack[0:self.stack_exec_index + 1]\n self.stack_reevaluate = False\n self.push(tree_element.get_child(result))\n\n if not self.stack_reevaluate:\n # We had some external interrupt, we stop here\n return\n self.stack_exec_index += 1\n self.stack_reevaluate = False\n if reevaluate:\n # reset flag\n self.do_not_reevaluate = False\n # run the top module\n current_tree_element, current_instance = self.stack[-1]\n result = current_instance.perform()\n if isinstance(current_instance, AbstractDecisionElement):\n self.push(current_tree_element.get_child(result))\n\n def push(self, element):\n \"\"\"\n Put a new element on the stack and start it directly.\n\n This should only be called by the DSD, not from any of the modules\n\n :param element: The tree element that should be put on top of the stack.\n :type element: AbstractTreeElement\n \"\"\"\n self.stack.append((element, self._init_element(element, element.parameters)))\n\n # we call the new element without another reevaluate\n self.update(False)\n\n def pop(self):\n \"\"\"\n Removes the element from the stack. The previous element will not be called again.\n \"\"\"\n if len(self.stack) > 1:\n if self.stack_reevaluate:\n # we are currently reevaluating. we shorten the stack here\n if self.stack_exec_index > 0:\n # only shorten stack if it still has one element\n self.stack = self.stack[0:self.stack_exec_index]\n # stop reevaluating\n self.stack_reevaluate = False\n else:\n if isinstance(self.stack[-1][1], SequenceElement):\n # If we are in a sequence, only one action should be popped\n in_sequence = self.stack[-1][1].pop_one()\n if in_sequence:\n # We are still in the sequence, therefore we do not want to pop the SequenceElement\n # We also do not want to reset do_not_reevaluate because an action in the sequence\n # may control the stack beyond its own lifetime but in the sequence element's lifetime\n return\n # Remove the last element of the stack\n self.stack.pop()\n\n # We will reevaluate even when the popped element set do_not_reevaluate\n # because no module should control the stack beyond its lifetime\n self.do_not_reevaluate = False\n\n def set_do_not_reevaluate(self):\n \"\"\"No reevaluation on next iteration\"\"\"\n self.do_not_reevaluate = True\n\n def get_stack(self):\n \"\"\"\n Returns the current stack\n \"\"\"\n return self.stack\n\n def publish_debug_msg(self):\n \"\"\"\n Helper method to publish debug data\n \"\"\"\n\n if self.debug_active:\n # Construct JSON encodable object which represents the current stack\n data = None\n for tree_elem, elem_instance in reversed(self.stack):\n elem_data = elem_instance.repr_dict()\n elem_data['activation_reason'] = tree_elem.activation_reason\n elem_data['next'] = data\n data = elem_data\n\n msg = String(data=json.dumps(data))\n self.debug_publisher.publish(msg)\n","sub_path":"dynamic_stack_decider/src/dynamic_stack_decider/dsd.py","file_name":"dsd.py","file_ext":"py","file_size_in_byte":12423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"345456701","text":"import os\nimport stat\nimport time\nimport wx\nfrom ObjectListView import ObjectListView, ColumnDefn\n\n## -------------------------------------------------------------------------- ##\n# Modified \"OLV DnD Tutorial\" found at: #\n# http://www.blog.pythonlibrary.org/ #\n# Added code to support rearranging list objects from: #\n# http://wiki.wxpython.org/ListControls#Drag_and_Drop_with_a_striped_drag_list #\n## -------------------------------------------------------------------------- ##\n\nclass DragList(ObjectListView):\n def __init__(self, *arg, **kw):\n ObjectListView.__init__(self, *arg, **kw)\n\n self.Bind(wx.EVT_LIST_BEGIN_DRAG, self._onDrag)\n self.Bind(wx.EVT_LIST_ITEM_SELECTED, self._onSelect)\n self.Bind(wx.EVT_LEFT_UP,self._onMouseUp)\n self.Bind(wx.EVT_LEFT_DOWN, self._onMouseDown)\n self.Bind(wx.EVT_LEAVE_WINDOW, self._onLeaveWindow)\n self.Bind(wx.EVT_ENTER_WINDOW, self._onEnterWindow)\n self.Bind(wx.EVT_LIST_INSERT_ITEM, self._onInsert)\n self.Bind(wx.EVT_LIST_DELETE_ITEM, self._onDelete)\n\n #---------------\n # Variables\n #---------------\n self.IsInControl=True\n self.startIndex=-1\n self.dropIndex=-1\n self.IsDrag=False\n self.dragIndex=-1\n\n def _onLeaveWindow(self, event):\n self.IsInControl=False\n self.IsDrag=False\n event.Skip()\n\n def _onEnterWindow(self, event):\n self.IsInControl=True\n event.Skip()\n\n def _onDrag(self, event):\n self.IsDrag=True\n self.dragIndex=event.m_itemIndex\n event.Skip()\n pass\n\n def _onSelect(self, event):\n self.startIndex=event.m_itemIndex\n event.Skip()\n\n def _onMouseUp(self, event):\n # Purpose: to generate a dropIndex.\n # Process: check self.IsInControl, check self.IsDrag, HitTest, compare HitTest value\n # The mouse can end up in 5 different places:\n # Outside the Control\n # On itself\n # Above its starting point and on another item\n # Below its starting point and on another item\n # Below its starting point and not on another item\n\n if self.IsInControl==False: #1. Outside the control : Do Nothing\n self.IsDrag=False\n else: # In control but not a drag event : Do Nothing\n if self.IsDrag==False:\n pass\n else: # In control and is a drag event : Determine Location\n self.hitIndex=self.HitTest(event.GetPosition())\n self.dropIndex=self.hitIndex[0]\n # -- Drop index indicates where the drop location is; what index number\n #---------\n # Determine dropIndex and its validity\n #--------\n if self.dropIndex==self.startIndex or self.dropIndex==-1: #2. On itself or below control : Do Nothing\n pass\n else:\n #----------\n # Now that dropIndex has been established do 3 things\n # 1. gather item data\n # 2. delete item in list\n # 3. insert item & it's data into the list at the new index\n #----------\n dropList=[] # Drop List is the list of field values from the list control\n thisItem=self.GetItem(self.startIndex)\n for x in range(self.GetColumnCount()):\n dropList.append(self.GetItem(self.startIndex,x).GetText())\n thisItem.SetId(self.dropIndex)\n self.DeleteItem(self.startIndex)\n self.InsertItem(thisItem)\n for x in range(self.GetColumnCount()):\n self.SetStringItem(self.dropIndex,x,dropList[x])\n #------------\n # I don't know exactly why, but the mouse event MUST\n # call the stripe procedure if the control is to be successfully\n # striped. Every time it was only in the _onInsert, it failed on\n # dragging index 3 to the index 1 spot.\n #-------------\n # Furthermore, in the load button on the wxFrame that this lives in,\n # I had to call the _onStripe directly because it would occasionally fail\n # to stripe without it. You'll notice that this is present in the example stub.\n # Someone with more knowledge than I probably knows why...and how to fix it properly.\n #-------------\n self._onStripe()\n self.IsDrag=False\n event.Skip()\n\n def _onMouseDown(self, event):\n self.IsInControl=True\n event.Skip()\n\n def _onInsert(self, event):\n # Sequencing on a drop event is:\n # wx.EVT_LIST_ITEM_SELECTED\n # wx.EVT_LIST_BEGIN_DRAG\n # wx.EVT_LEFT_UP\n # wx.EVT_LIST_ITEM_SELECTED (at the new index)\n # wx.EVT_LIST_INSERT_ITEM\n #--------------------------------\n # this call to onStripe catches any addition to the list; drag or not\n self._onStripe()\n self.dragIndex=-1\n event.Skip()\n\n def _onDelete(self, event):\n self._onStripe()\n event.Skip()\n\n def _onStripe(self):\n if self.GetItemCount()>0:\n for x in range(self.GetItemCount()):\n if x % 2==0:\n self.SetItemBackgroundColour(x,wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DLIGHT))\n else:\n self.SetItemBackgroundColour(x,wx.WHITE)\n\nclass MyFileDropTarget(wx.FileDropTarget):\n def __init__(self, window):\n \"\"\"Constructor\"\"\"\n wx.FileDropTarget.__init__(self)\n self.window = window\n\n def OnDropFiles(self, x, y, filenames):\n \"\"\"\n When files are dropped, update the display\n \"\"\"\n self.window.updateDisplay(filenames)\n\nclass FileInfo(object):\n def __init__(self, path, date_created, date_modified, size):\n \"\"\"Constructor\"\"\"\n self.name = os.path.basename(path)\n self.path = path\n self.date_created = date_created\n self.date_modified = date_modified\n self.size = size\n\nclass MainPanel(wx.Panel):\n def __init__(self, parent):\n \"\"\"Constructor\"\"\"\n wx.Panel.__init__(self, parent=parent)\n self.file_list = []\n \n file_drop_target = MyFileDropTarget(self)\n self.olv = DragList(self, style=wx.LC_REPORT|wx.SUNKEN_BORDER)\n self.olv.SetDropTarget(file_drop_target)\n self.setFiles()\n \n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(self.olv, 1, wx.EXPAND)\n self.SetSizer(sizer)\n\n def updateDisplay(self, file_list):\n for path in file_list:\n file_stats = os.stat(path)\n creation_time = time.strftime(\"%m/%d/%Y %I:%M %p\",\n time.localtime(file_stats[stat.ST_CTIME]))\n modified_time = time.strftime(\"%m/%d/%Y %I:%M %p\",\n time.localtime(file_stats[stat.ST_MTIME]))\n file_size = file_stats[stat.ST_SIZE]\n if file_size > 1024:\n file_size = file_size / 1024.0\n file_size = \"%.2f KB\" % file_size\n \n self.file_list.append(FileInfo(path,\n creation_time,\n modified_time,\n file_size))\n \n self.olv.SetObjects(self.file_list)\n\n def setFiles(self):\n \"\"\"\"\"\"\n self.olv.SetColumns([\n ColumnDefn(\"Name\", \"left\", 220, \"name\"),\n ColumnDefn(\"Date created\", \"left\", 150, \"date_created\"),\n ColumnDefn(\"Date modified\", \"left\", 150, \"date_modified\"),\n ColumnDefn(\"Size\", \"left\", 100, \"size\")\n ])\n self.olv.SetObjects(self.file_list)\n\nclass MainFrame(wx.Frame):\n def __init__(self):\n \"\"\"Constructor\"\"\"\n wx.Frame.__init__(self, None, title=\"OLV DnD Tutorial\", size=(800,600))\n panel = MainPanel(self)\n self.Show()\n\ndef main():\n \"\"\"\"\"\"\n app = wx.App(False)\n frame = MainFrame()\n app.MainLoop()\n \nif __name__ == \"__main__\":\n main()","sub_path":"FILES/pyGUI/_ref code/filedrop.py","file_name":"filedrop.py","file_ext":"py","file_size_in_byte":8364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"187151309","text":"s=\"211101112\"\nl=len(s)\n#print(l)\nn=len(s)//2\nif l%2==0:\n n=len(s)//2+1\ncount=0\n\nfor i in range(1 , n+1):\n i1=i-1\n j=-i\n if s[i1]==s[j]:\n print(s[i1])\n print(s[j])\n count+=1\nif count==n:\n print(\"it is palindrome\")\nelse:\n print(\"not a palindrome\")","sub_path":"Problems/que3.py","file_name":"que3.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"506167170","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nHomework B004\nNumerical methods for non-stationary Partial Differential Equations (PDEs):\nFinite Differences and Finite Volumes\n\nMathematics and Modeling\nMaster of Mathematics and Applications\n\n@author: Andre ALMEIDA ROCHA (3701739)\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport random\n\ndef main(): \n points = [10, 20, 40, 80, 160, 320]\n \n errors = []\n \n for NX in points:\n norme = difference_finites(NX+1)\n errors.append(norme)\n\n print(errors)\n\n lix = [1/x for x in points]\n \n plt.figure(0)\n plt.plot(points, lix)\n plt.plot(points, errors, \"r\")\n \n \n plt.figure(1)\n plt.loglog(points, lix)\n plt.loglog(points, errors, \"r\")\n \n plt.show()\n \n \ndef difference_finites(NX): \n \"\"\"\n Finite differences method\n NX = 101 #nombre de points de grille\n \"\"\" \n # PARAMETRES PHYSIQUES \n Lx = 1.0 #taille du domaine \n T = 0.4 #temps d'integration\n \n print(\"T final=\",T) \n \n # PARAMETRES NUMERIQUES\n #NX = 11 #nombre de points de grille\n dx = Lx/(NX-1) #pas de grille (espace)\n CFL = 0.375 \n dt = CFL * dx #pas de grille (temps) \n NT = int(T/dt) #nombre de pas de temps\n \n print(\"Nombre pas de espace= \",NX)\n print(\"Nombre pas de temps= \",NT)\n print(\"dt= \",dt)\n \n # PARAMETRES FONCTION INITIAL\n alpha = 1\n beta = 1\n \n \n # Pour la figure\n xx = np.zeros(NX)\n for i in np.arange(0,NX):\n xx[i] = i*dx\n \n #Initialisation\n ddU = np.zeros(NX)\n U_old = np.zeros(NX) \n U_new = np.zeros(NX)\n \n # Solution exacte\n U_sol = np.zeros(NX)\n \n # Condition initial \n U_data = np.zeros(NX)\n U_data = alpha * np.sin(2*np.pi*xx) + beta * np.cos(2*np.pi*xx)\n \n for i in np.arange(0, NX):\n U_old[i] = U_data[i]\n \n U_new = np.zeros(NX)\n \n plt.figure(-1)\n plt.plot(xx,U_old)\n plt.legend((\"t=0.\"), 'best')\n plt.xlabel('x')\n plt.ylabel('u')\n \n # Boucle en temps \n random.seed()\n \n \n time=0.\n for n in np.arange(0, NT):\n \n time=time+dt \n if (n%100 == 0):\n print (\"t=\",time) \n\n for j in np.arange(0, NX): \n U_sol[j] = np.exp(-time) * (alpha * np.sin(2*np.pi*xx[j]) + beta * np.cos(2*np.pi*xx[j])) \n \n \n \n # schemas explicites \n for j in np.arange(2, NX-2):\n ddU[j] = -4/3*(U_old[j+1] - 2*U_old[j] + U_old[j-1]) + 1/12*(U_old[j+2] - 2*U_old[j] + U_old[j-2]) - 0.5*CFL*(U_old[j+2] - 4*U_old[j+1] + 6*U_old[j] - 4*U_old[j-1] + U_old[j-2])\n \n ddU[1] = -4/3 * (U_old[2] - 2 * U_old[1] + U_old[0]) + 1/12 * (U_old[3] - 2 * U_old[1] + U_old[NX-1]) - 0.5*CFL*(U_old[3] - 4*U_old[2] + 6*U_old[1] - 4*U_old[0] + U_old[NX-1])\n ddU[0] = -4/3 * (U_old[1] - 2 * U_old[0] + U_old[NX-1]) + 1/12 * (U_old[2] - 2 * U_old[0] + U_old[NX-2]) - 0.5*CFL*(U_old[2] - 4 * U_old[1] + 6 * U_old[0] - 4 * U_old[NX-1] + U_old[NX-2])\n \n\n # Actualisation schemas explicites\n for j in np.arange(0, NX-2):\n U_new[j] = U_old[j] - CFL * ddU[j]\n\n # Actualisation pour la 1-periodicite\n U_new[NX-1] = U_new[0]\n U_new[NX-2] = U_new[1]\n\n # Actualisation en temps\n for j in np.arange(0, NX-1):\n U_old[j] = U_new[j]\n \n print (\"tFinal=\", time)\n \n # caclul erreur numerique\n norme = 0.\n for j in np.arange(0, NX-1):\n norme = norme + dx*np.fabs(U_new[j] - U_data[j])**2\n \n norme = np.sqrt(norme)\n \n print (\"Erreur/norme L2\",norme) \n plt.figure(0)\n \n plt.plot(xx,U_new,\"r\",marker='x')\n plt.plot(xx,U_sol,\"b\",marker='o')\n plt.legend((\"t=T\"), 'best')\n plt.xlabel('x')\n plt.ylabel('u')\n \n plt.show()\n\n return norme \n \nif __name__ == '__main__':\n main()","sub_path":"homework/B004.py","file_name":"B004.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"354159898","text":"\"\"\" BDD tests for API /hourlies. \"\"\"\nimport logging\nfrom datetime import datetime, timezone\nfrom pytest_bdd import scenario, given, then\nfrom starlette.testclient import TestClient\nfrom aiohttp import ClientSession\nimport app.main\nfrom app.tests.common import default_mock_client_get\nimport app.wildfire_one\n\nLOGGER = logging.getLogger(__name__)\n\n\n@scenario('test_get_hourlies.feature', 'Get hourlies',\n example_converters=dict(codes=str, status=int, num_groups=int, num_readings_per_group=str))\ndef test_hourlies():\n \"\"\" BDD Scenario. \"\"\"\n\n\n# pylint: disable=unused-argument\n@given('I request hourlies for stations: ')\ndef response(monkeypatch, mock_env_with_use_wfwx, mock_jwt_decode, codes):\n \"\"\" Make /hourlies/ request using mocked out ClientSession.\n \"\"\"\n\n # Mock out the part that gives us a datetime.\n def mock_now(*args, **kwargs):\n return datetime.fromtimestamp(1590076213962/1000, tz=timezone.utc)\n\n monkeypatch.setattr(app.wildfire_one, '_get_now', mock_now)\n\n monkeypatch.setattr(ClientSession, 'get', default_mock_client_get)\n # NOTE: should be using a converter\n # pylint: disable=eval-used\n stations = eval(codes)\n\n # Create API client and get the reppnse.\n client = TestClient(app.main.app)\n headers = {'Content-Type': 'application/json',\n 'Authorization': 'Bearer token'}\n return client.post('/hourlies/', headers=headers, json={\"stations\": stations})\n\n\n# pylint: disable=redefined-outer-name\n@then('the response status code is ')\ndef assert_status_code(response, status):\n \"\"\" Assert that we recieve the expected status code \"\"\"\n assert response.status_code == status\n\n\n@then('there are groups of hourlies')\ndef assert_number_of_hourlies_groups(response, num_groups):\n \"\"\" Assert that we recieve the expected number of hourly groups \"\"\"\n assert len(response.json()['hourlies']) == num_groups\n\n\n@then('there are readings per group')\ndef assert_number_of_hourlies_per_group(response, num_readings_per_group):\n \"\"\" Assert that we receive the expected number of hourlies per groups \"\"\"\n # pylint: disable=eval-used\n for index, item in enumerate(eval(num_readings_per_group)):\n assert len(response.json()['hourlies']\n [index]['values']) == item\n","sub_path":"app/tests/test_get_hourlies.py","file_name":"test_get_hourlies.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"496172105","text":"# This is CDM cosmology with w, wa and Ok\n\n\nimport math as N\nfrom simplemc.models.LCDMCosmology import LCDMCosmology\nfrom simplemc.cosmo.paramDefs import w_par, wa_par, Ok_par, q0, j0, Omega0\n\nclass owa0CDMCosmology(LCDMCosmology):\n def __init__(self, varyw=False, varywa=False, varyOk=False, varyq0=True, varyj0=True, varyOmega0=True):\n # three parameters: w, wa, Ok\n\n self.varyw = varyw\n self.varywa = varywa\n self.varyOk = varyOk\n \n self.varyq0 = varyq0\n self.varyj0 = varyj0\n self.varyOmega0 = varyOmega0\n\n self.Ok = Ok_par.value\n self.w0 = w_par.value\n self.wa = wa_par.value\n \n self.q0=q0.value\n self.j0=j0.value\n self.Omega0 = Omega0.value\n \n LCDMCosmology.__init__(self)\n\n\n # my free parameters. We add Ok on top of LCDM ones (we inherit LCDM)\n def freeParameters(self):\n l = LCDMCosmology.freeParameters(self)\n if (self.varyw): l.append(w_par)\n if (self.varywa): l.append(wa_par)\n if (self.varyOk): l.append(Ok_par)\n if (self.varyq0): l.append(q0)\n if (self.varyj0): l.append(j0)\n if (self.varyOmega0): l.append(Omega0)\n return l\n\n\n def updateParams(self, pars):\n ok = LCDMCosmology.updateParams(self, pars)\n if not ok:\n return False\n for p in pars:\n if p.name == \"w\":\n self.w0 = p.value\n elif p.name == \"wa\":\n self.wa = p.value\n elif p.name == \"Ok\":\n self.Ok = p.value\n self.setCurvature(self.Ok)\n if (abs(self.Ok) > 1.0):\n return False\n elif p.name == \"q0\":\n self.q0 = p.value\n elif p.name ==\"j0\":\n self.j0 = p.value\n elif p.name == \"Omega0\":\n self.Omega0 = p.value\n return True\n\n\n # this is relative hsquared as a function of a\n ## i.e. H(z)^2/H(z=0)^2\n #def RHSquared_a(self, a):\n # NuContrib = self.NuDensity.rho(a)/self.h**2\n # rhow = a**(-3*(1.0+self.w0+self.wa))*N.exp(-3*self.wa*(1-a))\n # return (self.Ocb/a**3+self.Ok/a**2+self.Omrad/a**4+NuContrib+(1.0-self.Om-self.Ok)*rhow)\n \n #Weak \n # def RHSquared_a(self,a):\n # c=3e8 # m/s\n # beta = -2\n # gamma = -3\n # H0 = 67.4*1000/3.085e22\n # kk = -2.3e-5 #Producto de la k y k^prime cuando gamma=-3 y beta = -2\n # a0 = 1.2e-10 # m/s^2\n # Z= 1 + (gamma-1)*((self.q0-1)/gamma + (self.j0-self.q0-2)/(1-self.q0))\n # return ((a0/H0*c)**2)*(((kk*(self.q0-1)**(1-gamma))/((6**gamma)*gamma*Z))*((3*self.Omega0/(8*N.pi))**beta))**(1/(gamma-beta))\n \n #Strong\n def RHSquared_a(self,a):\n c=3e8 # m/s\n beta = 3\n gamma = -3\n tau=-3.0 \n H0 = 70*1000/3.085e22\n kk = 9/(((N.pi)**2)*(4**5)) #Producto de la k y k^prime cuando gamma=-3 y beta = 3\n a0 = 1.2e-10 # m/s^2\n Z= 1 + (1-gamma)*((1-self.q0)/gamma - (self.j0-self.q0-2)/(1-self.q0))+tau*beta\n return ((a0/H0*c)**2)*(((6*(self.q0-1))**(1-gamma))*((8*N.pi*kk)/(3*gamma*Z))*((3*self.Omega0/(8*N.pi))**(1-beta)))**(1/(gamma+beta-1))\n\n","sub_path":"Modificaciones a SimpleMC/owa0CDMCosmology_strong.py","file_name":"owa0CDMCosmology_strong.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"625624500","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAutoExtract retrying logic.\n\nTODO: add sync support; only autoextract.aio is supported at the moment.\n\"\"\"\nimport asyncio\nimport logging\n\nfrom aiohttp import client_exceptions\nfrom tenacity import (\n wait_chain,\n wait_fixed,\n wait_random_exponential,\n wait_random,\n stop_after_attempt,\n stop_after_delay,\n retry_if_exception,\n RetryCallState,\n RetryError,\n retry,\n before_sleep_log,\n after_log,\n)\nfrom tenacity.stop import stop_base, stop_never\nfrom tenacity.wait import wait_base\n\nfrom .errors import RequestError, _QueryError, QueryRetryError\n\n\nlogger = logging.getLogger(__name__)\n\n\n_NETWORK_ERRORS = (\n asyncio.TimeoutError, # could happen while reading the response body\n client_exceptions.ClientResponseError,\n client_exceptions.ClientOSError,\n client_exceptions.ServerConnectionError,\n client_exceptions.ServerDisconnectedError,\n client_exceptions.ServerTimeoutError,\n client_exceptions.ClientPayloadError,\n client_exceptions.ClientConnectorSSLError,\n)\n\n\ndef _is_network_error(exc: Exception) -> bool:\n if isinstance(exc, RequestError):\n # RequestError is ClientResponseError, which is in the\n # _NETWORK_ERRORS list, but it should be handled\n # separately.\n return False\n return isinstance(exc, _NETWORK_ERRORS)\n\n\ndef _is_throttling_error(exc: Exception) -> bool:\n return isinstance(exc, RequestError) and exc.status == 429\n\n\ndef _is_server_error(exc: Exception) -> bool:\n return isinstance(exc, RequestError) and exc.status >= 500\n\n\ndef _is_retriable_query_error(exc: Exception) -> bool:\n return isinstance(exc, _QueryError) and exc.retriable and exc.max_retries > 0\n\n\nautoextract_retry_condition = (\n retry_if_exception(_is_throttling_error) |\n retry_if_exception(_is_network_error) |\n retry_if_exception(_is_server_error) |\n retry_if_exception(_is_retriable_query_error)\n)\n\n\nclass autoextract_wait_strategy(wait_base):\n def __init__(self):\n # throttling\n self.throttling_wait = wait_chain(\n # always wait 20-40s first\n wait_fixed(20) + wait_random(0, 20),\n\n # wait 20-40s again\n wait_fixed(20) + wait_random(0, 20),\n\n # wait from 30 to 630s, with full jitter and exponentially\n # increasing max wait time\n wait_fixed(30) + wait_random_exponential(multiplier=1, max=600)\n )\n\n # connection errors, other client and server failures\n self.network_wait = (\n # wait from 3s to ~1m\n wait_random(3, 7) + wait_random_exponential(multiplier=1, max=55)\n )\n self.server_wait = self.network_wait\n self.retriable_wait = self.network_wait\n\n def __call__(self, retry_state: RetryCallState) -> float:\n exc = retry_state.outcome.exception()\n if _is_throttling_error(exc):\n return self.throttling_wait(retry_state=retry_state)\n elif _is_network_error(exc):\n return self.network_wait(retry_state=retry_state)\n elif _is_server_error(exc):\n return self.server_wait(retry_state=retry_state)\n elif _is_retriable_query_error(exc):\n return max(\n exc.retry_seconds,\n self.retriable_wait(retry_state=retry_state)\n )\n else:\n raise RuntimeError(\"Invalid retry state exception: %s\" % exc)\n\n\nclass autoextract_stop_strategy(stop_base):\n def __init__(self):\n self.stop_on_throttling_error = stop_never\n self.stop_after_15_minutes = stop_after_delay(15 * 60)\n self.stop_on_network_error = self.stop_after_15_minutes\n self.stop_on_server_error = self.stop_after_15_minutes\n self.stop_on_retriable_query_error = self.stop_after_15_minutes\n\n def __call__(self, retry_state: RetryCallState) -> bool:\n exc = retry_state.outcome.exception()\n if _is_throttling_error(exc):\n return self.stop_on_throttling_error(retry_state)\n elif _is_network_error(exc):\n return self.stop_on_network_error(retry_state)\n elif _is_server_error(exc):\n return self.stop_on_server_error(retry_state)\n elif _is_retriable_query_error(exc):\n return (\n self.stop_on_retriable_query_error |\n stop_after_attempt(exc.max_retries)\n )(retry_state)\n else:\n raise RuntimeError(\"Invalid retry state exception: %s\" % exc)\n\n\ndef _exception_factory(fut):\n exc = fut.exception()\n if isinstance(exc, _QueryError):\n return QueryRetryError(fut)\n\n return RetryError(fut)\n\n\nautoextract_retry = retry(\n wait=autoextract_wait_strategy(),\n retry=autoextract_retry_condition,\n stop=autoextract_stop_strategy(),\n before_sleep=before_sleep_log(logger, logging.DEBUG),\n after=after_log(logger, logging.DEBUG),\n retry_error_cls=_exception_factory,\n)\n","sub_path":"autoextract/aio/retry.py","file_name":"retry.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"42895336","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n# @Author: Xie Zhongzhao\n# @Date : 2019-12-02 10:12:00\n\nimport os\nfrom os import path\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom scipy.signal import convolve2d\n\nclass QRCM(object):\n '''\n A quality-aware relative contrast measure (QRCM) is proposed in this paper.\n \n This measure considers both the level of relative contrast enhancement between input and output images\n and distortions resulting from the enhancement process. The measure produces a number\n in the range [−1, 1] where -1 and 1 refer to full level of contrast degradation and improvement,\n respectively.\n '''\n def __int__(self):\n\n pass\n\n def conv2(self, x, y, mode='same'):\n return np.rot90(convolve2d(np.rot90(x, 2), np.rot90(y, 2), mode=mode), 2)\n\n def gradient(self, img):\n '''\n calculate the gradient magnitude map\n :param path:\n :return: gradient magnitude map\n '''\n img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n img = img[:, :, 2]\n\n kernelx = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]]) / 3\n kernely = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]]) / 3\n\n bk = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])\n bk = bk / np.sum(bk)\n\n img_bk = self.conv2(img, bk, 'same')\n\n grad_x = self.conv2(img_bk, kernelx, 'same')\n grad_y = self.conv2(img_bk, kernely, 'same')\n\n grad_map = np.sqrt(np.square(grad_x) + np.square(grad_y))\n\n return grad_map\n\n def rcm(self, origImg, proImg):\n '''\n the relative contrast measure RCM\n :param orig_path: the original image\n :param pro_path: the processed image\n :return: RCM value\n '''\n EPS = 1e-6\n G_o = self.gradient(origImg)\n G_p = self.gradient(proImg)\n\n G_po = (G_p-G_o) / (G_p+G_o+EPS)\n\n w1 = G_o / sum(sum(G_o))\n\n RCM = np.sum(G_po * w1)\n\n return RCM\n\n def qvalue(self, origImg, proImg):\n '''\n the image quality\n :param orig_path: the original image path\n :param pro_path: the processed image path\n :return: Q value\n '''\n T = 255 / np.sqrt(2)\n G_o = self.gradient(origImg)\n G_p = self.gradient(proImg)\n\n m, n = (G_o.shape[0], G_p.shape[1])\n\n GMS = (2*G_o*G_p+T) / (np.square(G_o)+np.square(G_p)+T)\n mu = np.mean(GMS)\n w2 = 1 / (1+G_o)\n\n Q = 1 - (1/(m*n))*np.sum(np.abs(GMS-mu)*w2)\n\n return Q\n\n def qrcm(self, origImg, proImg):\n '''\n the quality-aware relative contrast measure\n :return: the QRCM value in the range of [-1,1]\n '''\n RCM = self.rcm(origImg, proImg)\n Q = self.qvalue(origImg, proImg)\n # print(\"RCM: \", RCM)\n # print(\"Q: \", Q)\n\n Qrcm = RCM*Q if RCM>=0 else (1+RCM)*Q - 1\n Qrcm = '{:.4f}'.format(Qrcm)\n\n return Qrcm\n\nclass SECEDCT(object):\n \"\"\"\n The algorithm introduces a new method to compute the spatial entropy of pixels\n using spatial distribution of pixel gray levels. The algorithm is from the paper,\n Spatial Entropy-Based Global and Local Image Contrast Enhancement, proposed by Turgay Celik\n \"\"\"\n def __init__(self, path_to_img):\n \"\"\"\n :param path_to_img : full path to the image file\n \"\"\"\n self._EPS = 1e-6\n self.yd = 0\n self.yu = 255\n self.img_bgr = cv2.imread(path_to_img, 1)\n\n\n self.height, self.width = self.img_bgr.shape[0:2] # avoid the odd error of DCT and IDCT \n new_height = self.height\n new_width = self.width\n if self.height % 2 == 1:\n new_height = self.height + 1\n\n if self.width % 2 == 1:\n new_width = self.width + 1\n self.img_bgr = cv2.resize(self.img_bgr, (new_width, new_height), cv2.INTER_AREA)\n\n\n if self.img_bgr is None:\n raise Exception(\"cv2.imread failed! Please check if the path is valid\")\n\n self.img_size = self.img_bgr.shape\n\n self.img_gray = cv2.cvtColor(\n self.img_bgr,\n cv2.COLOR_BGR2GRAY # cv2 bgr2gray doing the same as NTSC\n ) # I in the paper\n\n self.img_hsv = cv2.cvtColor(\n self.img_bgr,\n cv2.COLOR_BGR2HSV # cv2 bgr2hsv doing the same as NTSC\n ) # I in the paper\n\n def spatialHistgram(self, img_hsv):\n '''\n 2D spatial histogram\n :return: 2D spatial histogram\n '''\n histogram = dict()\n\n K = np.unique(img_hsv[:, :, 2]) # the distinct gray levels K\n img = img_hsv[:, :, 2]\n\n H = self.img_size[0]\n W = self.img_size[1]\n ratio = H / W # the aspect ratio r = H/W\n k_num = len(K)\n\n M = np.rint((k_num * ratio) ** 0.5) # 2D histogram is M*N\n N = np.rint((k_num / ratio) ** 0.5) # the total number of the grids on\n\n region_list = list()\n for m in range(1, int(M) + 1):\n for n in range(1, int(N) + 1):\n left = int((m - 1) / M * H)\n right = int((m / M) * H)\n top = int((n - 1) / N * W)\n bottom = int((n / N) * W)\n region = img[left: right, top: bottom]\n region_list.append(region.flatten())\n\n for k in K:\n gray_levels = (np.sum(region == k) for region in region_list)\n histogram[k] = list(gray_levels)\n\n return histogram\n\n def spatialEntropy(self, histogram):\n \"\"\"\n spatial entropy and distribution function\n :return:\n \"\"\"\n entropy = dict() # entropy meature S_k is computed for gray-level x_k\n f_k = dict() # compute a discrete function f_k\n f_k_norm = dict() # normalize\n F_cdf = dict() # cumulative distribution function\n\n for key, val in histogram.items():\n S_k = 0.0\n val = val / (sum(val) + self._EPS) # normilize-> very important\n for ele in val:\n if ele != 0:\n S_k += -(ele * np.log2(ele)) # equation 3\n entropy[key] = S_k\n sum_entropy = sum(entropy.values())\n\n for key, val in entropy.items():\n f_k[key] = val / ((sum_entropy - val) + self._EPS) # equation 4\n sum_f_k = sum(f_k.values())\n\n for key, val in f_k.items():\n f_k_norm[key] = val / (sum_f_k + self._EPS) # equation 5\n\n values = list(f_k_norm.values())\n for index, key in enumerate(f_k_norm.keys()):\n F_cdf[key] = sum(values[:(index + 1)]) # equation 6\n\n return f_k_norm, F_cdf\n\n def mapping(self, cdf, yd, yu):\n \"\"\"\n mapping function: using the cumulative distribution function\n :return:\n \"\"\"\n ymap = dict()\n for key, val in cdf.items():\n ymap[key] = int(np.rint(val * (yu - yd) + yd)) # equation 7\n return ymap\n\n def pixelMapping(self, img_hsv, mapping):\n '''\n get the enhanced image\n :param img_gray:\n :param map:\n :return:\n '''\n img = img_hsv[:, :, 2]\n\n h = img.shape[0]; w = img.shape[1]\n V = np.array(img).flatten()\n V = map(lambda x: mapping[x], V)\n global_img = np.array(list(V)).reshape(h, w)\n\n return global_img\n\n def dctTransform(self, img):\n '''\n forward 2D-DCT transform\n :param img_hsv: hsv color space\n :param fk: the discrete function\n :return:\n '''\n img = np.float32(img)\n D = cv2.dct(img) # equation 8, 9\n\n return D\n\n def domainCoefWeight(self, dkl_img, fk, gamma=0.25):\n '''\n transform domain coefficient weighting\n :param dkl_img:\n :param fk:\n :return:\n '''\n H = dkl_img.shape[0]\n W = dkl_img.shape[1]\n\n sum = 0\n for key, value in enumerate(fk.items()):\n if value[1] != 0:\n sum += -value[1] * np.log2(value[1]) # equation 12\n\n alpha = np.float_power(sum, gamma)\n\n ww = np.linspace(1, alpha, W).reshape(1, W) # equation 11\n wh = np.linspace(1, alpha, H).reshape(1, H)\n weight = wh.T * ww # equation 10\n D_w = np.multiply(dkl_img, weight)\n\n return D_w\n\n def inverseDct(self, D_w):\n '''\n inverse 2D-DCT transform\n :param D_w:\n :return:\n '''\n\n iDct = cv2.idct(D_w) # equation 8, 9\n Y = np.clip(np.abs(iDct), 0, 255)\n\n return Y\n\n def color_restoration(self, S):\n '''\n restore the image color\n :param S:\n :param lambdaa:\n :return:\n '''\n HSV = self.img_hsv\n HSV[:, :, 2] = S\n S_restore = cv2.cvtColor(HSV, cv2.COLOR_HSV2BGR)\n S_restore = cv2.resize(S_restore, (self.width, self.height), cv2.INTER_AREA)\n\n return np.clip(S_restore, 0, 255).astype('uint8')\n\n def pltHist(self, img_gray, new_img, str='SECEDCT'):\n '''\n plot Hist of the gray image\n :param img_gray:\n :param new_img:\n :param str:\n :return:\n '''\n fig = plt.figure(figsize=(16, 10))\n ax1, ax2 = fig.add_subplot(121), fig.add_subplot(122)\n ax1.hist(img_gray.ravel(), 256, [0, 256])\n ax2.hist(new_img.ravel(), 256, [0, 256])\n ax1.set_title(\"raw image\")\n ax2.set_title(\"the enhanced image with {}\".format(str))\n\n def pltEnhanceImg(self, img_bgr, new_img, str='SECEDCT'):\n '''\n plot the enhanced image\n :param img_gray:\n :param new_img:\n :return:\n '''\n fig1 = plt.figure(figsize=(16, 10))\n ax1, ax2 = fig1.add_subplot(121), fig1.add_subplot(122)\n ax1.imshow(img_bgr[:, :, [2, 1, 0]])\n ax1.axis('off')\n ax2.imshow(new_img[:, :, [2, 1, 0]])\n ax2.axis('off')\n ax1.set_title(\"raw image\")\n ax2.set_title(\"the enhanced image with {}\".format(str))\n\ndef secedct(IMG_DIR, IMG_NAME, gamma=0.25):\n '''\n the entire image contrast enhancement process\n :param IMG_DIR:\n :param IMG_NAME:\n :param gamma: control the detail of image, gamma\\in [0,1]\n :return: enhanced image and the QRCM value\n '''\n if gamma > 1 or gamma < 0:\n print(\"The input image exceeds the limitation in the range of [0,1], \\n\"\n \"the program automatically set the gamma as 0.25\")\n gamma = 0.25\n\n # the class of the algorithm named SECEDCT\n secedct = SECEDCT(path.join(IMG_DIR, IMG_NAME))\n # the size of raw image\n img_hsv = secedct.img_hsv\n\n hist = secedct.spatialHistgram(img_hsv)\n fk, cdf = secedct.spatialEntropy(hist)\n gray_leval_map = secedct.mapping(cdf, yd=0.0, yu=255.0)\n global_img = secedct.pixelMapping(img_hsv, gray_leval_map)\n D = secedct.dctTransform(global_img)\n D_w = secedct.domainCoefWeight(D, fk, gamma)\n invDct_img = secedct.inverseDct(D_w)\n secedct_img = secedct.color_restoration(invDct_img)\n\n return secedct_img\n\ndef qrcm(IMG_DIR, IMG_NAME, secedct_img):\n \"\"\"\n calculate the QRCM value in the range of [-1,1]\n :param IMG_DIR: the directory of raw image\n :param IMG_NAME: the raw image name\n :param secedct_img: the image enhanced by secedct algorithm\n :return: the qrcm value\n \"\"\"\n raw_img = cv2.imread(os.path.join(IMG_DIR, IMG_NAME))\n secedct_qrcm = QRCM().qrcm(raw_img, secedct_img)\n\n return secedct_qrcm\n\ndef showIMG(IMG_DIR, IMG_NAME, newImg):\n \"\"\"\n show the raw image and enhanced image\n :param IMG_DIR: the directory of raw image\n :param IMG_NAME: the raw image name\n :param newImg: the enhanced image\n :return:\n \"\"\"\n rawImg = cv2.imread(os.path.join(IMG_DIR, IMG_NAME))\n cv2.imshow(IMG_NAME, rawImg)\n cv2.imshow(\"SECEDCT\", newImg)\n\n print(\"Please press 'q' and exit !!!\")\n cv2.waitKey(0)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n exit()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"build/lib/ContrastEnhancement/SpatialEntropy.py","file_name":"SpatialEntropy.py","file_ext":"py","file_size_in_byte":12046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"340660512","text":"from odoo import http\nfrom odoo.http import request\nimport logging\n_logger = logging.getLogger(__name__)\n\nfrom odoo import http, tools, _\nfrom odoo.exceptions import ValidationError\nfrom odoo.addons.website_address_book.controllers.main import website_account\n\n\nclass website_account(website_account):\n\n @http.route()\n def account(self, **kw):\n response = super(website_account, self).account()\n partner = request.env.user.partner_id\n shippings = partner.search([\n (\"id\", \"child_of\", partner.commercial_partner_id.ids),\n '|', (\"type\", \"=\", \"delivery\"), (\"id\", \"=\",\n partner.commercial_partner_id.id)\n ], order='id desc')\n billings = partner.search([\n (\"id\", \"child_of\", partner.commercial_partner_id.ids),\n '|', (\"type\", \"=\", \"invoice\"), (\"id\", \"=\",\n partner.commercial_partner_id.id)\n ], order='id desc')\n response.qcontext.update({\n 'shippings': shippings,\n 'billings': billings,\n })\n return response\n\n @http.route(['/my/addressbook'], type='http', auth=\"user\", website=True)\n def portal_my_addressbook(self, **kw):\n res = super(website_account, self).portal_my_addressbook(**kw)\n res.qcontext.update({\"my_address_active\": True})\n return res\n\n @http.route(['/my/address'], type='http', methods=['GET', 'POST'], auth=\"public\", website=True)\n def my_address(self, **kw):\n res = super(website_account, self).my_address(**kw)\n res.qcontext.update({\"my_address_active\": True})\n return res\n","sub_path":"pp_website_address_book/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"10532360","text":"def get_summ(num_one, num_two):\n try:\n return int(num_one) + int(num_two)\n except ValueError:\n return 'You give me not a number!'\n\nnumber1 = input('First number: ')\nnumber2 = input('Second number: ')\n\nprint(get_summ(number1, number2))","sub_path":"exceptions_task/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"375791383","text":"#!/usr/bin/env python3\n\n#pip install flux_led\n#reference https://github.com/Danielhiversen/flux_led\n\nfrom parse import *\nimport os\nfrom flux_led import WifiLedBulb, BulbScanner\nfrom word2number import w2n\n\n#still working on the brightness function. Need to worry about querying the color of the lightbulb first,\n#then setting the brightness back\n\ndef command_handler(sentence, info):\n scanner = BulbScanner()\n coms, classify = commands()\n msg = sentence+\" is not a know flux lightbulb command\"\n function = None\n\n print(\"scanner scan: \", end=\"\")\n print(scanner.scan(timeout = 4))\n\n try:\n #specific ID/MAC of bulb\n my_light = scanner.getBulbInfoByID(\"D8F15BA2EE72\")\n except:\n msg = \"flux lightbulb not detected!\"\n return msg, function\n\n print(\"success!\")\n bulb = WifiLedBulb(my_light[\"ipaddr\"])\n \n for i in coms[0]: #lightbulb color changer\n res = parse(i, sentence)\n if res:\n msg, function = colorChanger(bulb, res[0])\n return msg, function\n if sentence in coms[1]: #turn lightbulb off\n msg = \"turning the flux lightbulb off\"\n function = bulb.turnOff()\n return msg, function\n if sentence in coms[2]: #turn the lightbulb on\n msg = \"turning the flux lightbulb on\"\n function = bulb.turnOn()\n return msg, function\n for i in coms[3]: #change brightness of lightbulb\n res = parse(i, sentence)\n if res:\n msg, function = brightnessChanger(bulb, res[0])\n return msg, function\n return msg, function\n\ndef commands():\n coms = [\n [\n \"turn the flux lightbulb color to {}\",\n \"set the flux bulb color to {}\",\n \"set the smart bulb color to {}\",\n \"set the bulb color to {}\",\n \"set the light color to {}\",\n \"said the light color to {}\",\n \"turn the light to {}\",\n \"turn the light too {}\",\n \"turn the light two {}\",\n \"turn the light {}\",\n ],\n [\n \"turn the flux lightbulb off\",\n \"turn the flux light bulb off\",\n \"turn off the flux lightbulb\",\n \"turn off the flux light bulb\",\n \"turn off the light\",\n \"light off\",\n ],\n [\n \"turn the flux lightbulb on\",\n \"turn the flux light bulb on\",\n \"turn on the flux lightbulb\",\n \"turn on the flux light bulb\",\n \"turn on the light\",\n \"light on\",\n ],\n [\n \"set the flux lightbulb brightness to {} percent\",\n \"set the flux light bulb brightness to {} percent\",\n \"set the brightness of the flux lightbulb to {} percent\",\n \"set the brightness of the flux light bulb to {} percent\",\n \"set the light brightness to {} percent\",\n \"shut the light brightness to {} percent\",\n \"shut the like brightness to {} percent\",\n \"said the like brightness to {} percent\",\n \"said the light brightness to {} percent\",\n \"set the like brightness to {} percent\",\n \"set the light to {} percent\",\n \"set the like to {} percent\",\n \"set the line to {} percent\",\n \"said the light to {} percent\",\n ],\n ]\n \n classify = [\n \"parse\",\n \"cosine\",\n \"cosine\",\n \"parse\",\n ]\n return coms, classify\n\ndef colorChanger(bulb, color):\n color = color.replace(\" \", \"\")\n msg = \"\"\n function = None\n colors = {\n \"red\" : (255,0,0),\n \"orange\" : (255,125,0),\n \"yellow\" : (255, 255, 0),\n \"springgreen\" : (125,255,0),\n \"green\" : (0,255,0),\n \"turquoise\" : (0,255,125),\n \"cyan\" : (0, 255, 255),\n \"ocean\" : (0,125,255),\n \"blue\" : (0,0,255),\n \"purple\" : (125, 0, 255),\n \"magenta\" : (255, 0, 255),\n \"raspberry\" : (255, 0, 125)\n }\n try:\n rgb = colors[color]\n except:\n msg = color+\" is not a supported color for flux lightbulb\"\n return msg, function\n bulb.refreshState()\n\n # set to color and wait\n # (use non-persistent mode to help preserve flash)\n bulb.setRgb(*rgb, persist=False)\n msg = \"going to change flux bulb color to \"+color\n function = None\n\n return msg, function\n\ndef brightnessChanger(bulb, percent):\n msg = None\n try: #try and pull number from words passed in\n num = w2n.word_to_num(percent) / 100\n if num < 0: #range checking\n msg = \"flux brightness must be equal to or more than 0 percent\"\n elif num > 1:\n msg = \"flux brightness must be less than or equaL to 100 percent\"\n except:\n msg = \"flux brightness percentage is not a number\"\n try:\n r,g,b = bulb.getRgb()\n except:\n msg = \"couldn't pull color from flux bulb\"\n if msg == None:\n def funct():\n print(str(num) + \" percent\")\n bulb.setRgb(r, g, b, persist=False, brightness = int(255 * num))\n msg = \"will change flux brightness to \" + str(num) + \" percent\"\n else:\n funct = None\n return msg, funct\n\ndef main():\n print(\"flux main does nothing\")\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"modules/flux_bulb.py","file_name":"flux_bulb.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"523133718","text":"\"\"\"\n 8. Write a Python program to convert an array to an array of\n machine values and return the bytes representation\n\"\"\"\nfrom array import *\n\na = array('b', [72, 101, 108, 111, 32, 119, 111, 108, 100])\nprint(\"Convert an array to an array of machine values and return the bytes representattion:\")\nb = a.tobytes()\nprint(b)\n","sub_path":"Array/Arr8.py","file_name":"Arr8.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"430667816","text":"from random import randint\nfrom items import items_get\nimport math\nitems = items_get()\n\nclass Goblin:\n def __init__(self, lvl, hp_dice, weapon, is_hostile, inv, armour, stats, loc):\n max_hp = 4\n for i in range(0, lvl):\n max_hp += randint(1, hp_dice)\n self.max_hp = max_hp\n self.lvl = lvl\n self.hp = max_hp\n self.inv = inv\n self.is_hostile = is_hostile\n self.weapon = items[\"weapon\"][str(self.inv[\"weapon\"][0])]\n self.armour = items[\"armour\"][str(self.inv[\"armour\"][0])]\n self.ac = self.armour[\"ac\"]\n self.name = \"Goblin\"\n self.strength = stats[0]\n self.constitution = stats[1]\n self.dexterity = stats[2]\n self.intelligence = stats[3]\n self.wisdom = stats[4]\n self.charisma = stats[5]\n self.range = int(self.weapon[\"rng\"])\n self.loc = loc\n def distance_between(self, target):\n x_dist = self.loc[0] - target.loc[0]\n y_dist = self.loc[1] - target.loc[1]\n return math.sqrt(x_dist ** 2 + y_dist ** 2)\n def attack(self, target):\n self.target = target\n print(self.name + \" attacks \" + self.target.name + \" with its \" + self.weapon[\"name\"] + \"!\")\n if randint(1, 20) + ((self.strength-10)/2)> int(self.target.armour[\"ac\"]): # attack roll; add modifier (strength)\n damage = randint(self.weapon[\"atk_min\"], self.weapon[\"atk_max\"])\n self.target.hp -= damage\n print(self.name + \" hits \" + self.target.name + \" for \" + str(damage) + \" damage.\")\n else:\n print(self.name + \"'s attack missed!\")\n def ai_move(self, target):\n if self.distance_between(target) >= self.range:\n dist = self.distance_between(target)\n best_move = []\n for a in range(-1, 2, 1):\n x_pos = self.loc[0] + a\n y_pos = self.loc[1] + 0\n x_dist = x_pos - target.loc[0]\n y_dist = y_pos - target.loc[1]\n distance = math.sqrt(x_dist ** 2 + y_dist ** 2)\n if distance < dist:\n best_move = [a, 0]\n for n in range(-1, 2, 1):\n x_pos = self.loc[0] + 0\n y_pos = self.loc[1] + n\n x_dist = x_pos - target.loc[0]\n y_dist = y_pos - target.loc[1]\n distance = math.sqrt(x_dist ** 2 + y_dist ** 2)\n if distance < dist:\n best_move = [0, n]\n if best_move == [0, -1]:\n self.loc[1] -= 1\n try:\n if maps[str(self.loc[0] // 16)][str(self.loc[1] // 16)][self.loc[1] % 16][self.loc[0] % 16] == \"T\":\n self.loc[1] += 1\n except:\n best_move = [0,1]\n if best_move == [0, 1]:\n self.loc[1] += 1\n try:\n if maps[str(self.loc[0] // 16)][str(self.loc[1] // 16)][self.loc[1] % 16][self.loc[0] % 16] == \"T\":\n self.loc[1] -= 1\n except:\n best_move = [-1, 0]\n if best_move == [-1, 0]:\n self.loc[0] -= 1\n try:\n if maps[str(self.loc[0] // 16)][str(self.loc[1] // 16)][self.loc[1] % 16][self.loc[0] % 16] == \"T\":\n self.loc[0] += 1\n except:\n best_move = [1, 0]\n if best_move == [1, 0]:\n self.loc[0] += 1\n try:\n if maps[str(self.loc[0] // 16)][str(self.loc[1] // 16)][self.loc[1] % 16][self.loc[0] % 16] == \"T\":\n self.loc[0] -= 1\n except:\n pass\nclass Companion: # Companion: it WILL be a cat. \"@\" symbol?\n def __init__():\n pass\n","sub_path":"Code/npcs.py","file_name":"npcs.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"591429501","text":"import requests\n\nobj=requests.get('http://api.conceptnet.io/c/en/weapon?limit=1000').json()\n\nobj.keys()\n\nlen(obj['edges'])\n\nobj['edges'][999]\n\n\n\nweapon_hypos=requests.get('http://api.conceptnet.io/query?start=/c/en/weapon&rel=/r/IsA&limit=1000')\nobj2=weapon_hypos.json()\n\nobj2['edges'][1]\n\n# Conceptnet to extract IsA relations of weapon\n# Put rel as hypernym and the end as the weapon URI\n\ntopics=['amphibian','reptile','appliance','bird','building','clothing','container',\n 'fruit','furniture','ground_mammal','insect','musical_instrument','tool','tree',\n 'vegetable','vehicle','water_animal','weapon']\n\nconceptnet_data=[]\nfor topic in topics:\n obj=requests.get('http://api.conceptnet.io/query?end=/c/en/{}&rel=/r/IsA&limit=1000'.format(topic)).json()\n for i in range(len(obj['edges'])-1):\n conceptnet_data.append(obj['edges'][i]['@id'])\n \n# Cleanup\nimport re\ntest=conceptnet_data[-4]\nre.sub('\\W+','',test) #*works\ntest.split('.')\nre.sub('cen','',re.sub('\\W+','',test.split(',')[2]))\n\n\n# Save conceptnet Data\nwith open('conceptnet_data.txt','w') as f:\n for rel in conceptnet_data:\n f.write(rel+'\\n')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"conceptnet_test.py","file_name":"conceptnet_test.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"407379936","text":"import mytime\n\n\ndef engine(state, evt=None, payload=None):\n # base case\n if state is None:\n return {\n \"entities\": {},\n \"timeStep\": mytime.get(),\n }\n # compute timestep\n curr_time = mytime.get()\n delta = curr_time - state[\"timeStep\"]\n state[\"timeStep\"] = curr_time\n\n # apply physics\n for key in state[\"entities\"]:\n entity = state[\"entities\"][key]\n\n # compute next velocity\n entity[\"velocity\"] = [velocity + acceleration * delta for velocity, acceleration in\n zip(entity.setdefault(\"velocity\", [0, 0]),\n entity.setdefault(\"acceleration\", [0, 0]))]\n\n # compute next location\n entity[\"location\"] = [location + velocity * delta for location, velocity in\n zip(entity.setdefault(\"location\", [0, 0]),\n entity.setdefault(\"velocity\", [0, 0]))]\n\n return state","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"653835777","text":"from flask import Flask, render_template, url_for, request, redirect, flash, jsonify\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Restaurant, MenuItem\n\napp = Flask(__name__)\n\nengine = create_engine('sqlite:///restaurantmenu.db')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n#Fake Restaurants\nrestaurant = {'name': 'The CRUDdy Crab', 'id': '1'}\n\nrestaurants = [{'name': 'The CRUDdy Crab', 'id': '1'}, {'name':'Blue Burgers', 'id':'2'},{'name':'Taco Hut', 'id':'3'}]\n\n\n#Fake Menu Items\nitems = [ {'name':'Cheese Pizza', 'description':'made with fresh cheese', 'price':'$5.99','course' :'Entree', 'id':'1'}, {'name':'Chocolate Cake','description':'made with Dutch Chocolate', 'price':'$3.99', 'course':'Dessert','id':'2'},{'name':'Caesar Salad', 'description':'with fresh organic vegetables','price':'$5.99', 'course':'Entree','id':'3'},{'name':'Iced Tea', 'description':'with lemon','price':'$.99', 'course':'Beverage','id':'4'},{'name':'Spinach Dip', 'description':'creamy dip with fresh spinach','price':'$1.99', 'course':'Appetizer','id':'5'} ]\nitem = {'name':'Cheese Pizza','description':'made with fresh cheese','price':'$5.99','course' :'Entree'}\n\n\n@app.route('/')\n@app.route('/restaurants')\ndef showRestaurants():\n\t#return \"This page will show all my restaurants\"\n\trestaurants = session.query(Restaurant).all()\n\treturn render_template('restaurants.html', restaurants = restaurants)\n\n@app.route('/restaurants/JSON', methods=['GET'])\ndef showRestaurantsJSON():\n restaurants = session.query(Restaurant).all()\n return jsonify(Restaurant=[r.serialize for r in restaurants])\n\n@app.route('/restaurants/new', methods=['GET','POST'])\ndef newRestaurant():\n\t#return \"This page will be for making a new restaurant\"\n\tif request.method == 'POST':\n\t\tname = request.form['name']\n\t\tif name:\n\t\t\tnewRestaurant = Restaurant(name=name)\n\t\t\tsession.add(newRestaurant)\n\t\t\tsession.commit()\n\t\t\tflash(\"New Restaurant created\")\n\t\treturn redirect(url_for('showRestaurants'))\n\telse:\n\t\treturn render_template('newRestaurant.html')\n\n@app.route('/restaurants//edit', methods=['GET','POST'])\ndef editRestaurant(restaurant_id):\n\t#return \"This page will be for editing restaurant %s\" % restaurant_id\n\trestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n\tif request.method == 'POST':\n\t\tname = request.form['name']\n\t\tif name:\n\t\t\trestaurant.name = name\n\t\t\tsession.add(restaurant)\n\t\t\tsession.commit()\n\t\t\tflash(\"Restaurant Succesfully Edited\")\n\t\treturn redirect(url_for('showRestaurants'))\n\telse:\n\t\treturn render_template('editRestaurant.html', restaurant = restaurant)\n\n@app.route('/restaurants//delete', methods=['GET','POST'])\ndef deleteRestaurant(restaurant_id):\n\t#return \"This page will be for deleting restaurant %s\" % restaurant_id\n\trestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n\tif request.method == 'POST':\n\t\tsession.delete(restaurant)\n\t\tsession.commit()\n\t\tflash(\"Restaurant Succesfully Deleted\")\n\t\treturn redirect(url_for('showRestaurants'))\n\telse:\n\t\treturn render_template('deleteRestaurant.html', restaurant = restaurant)\n\n@app.route('/restaurants/')\n@app.route('/restaurants//menu')\ndef showMenu(restaurant_id):\n\t#return \"This page is the menu for restaurant %s\" % restaurant_id\n\trestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n\titems = session.query(MenuItem).filter_by(restaurant_id=restaurant_id).all()\n\treturn render_template('menu.html', restaurant = restaurant, items = items)\n\n@app.route('/restaurants//menu/JSON', methods=['GET'])\ndef showMenuJSON(restaurant_id):\n #restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()\n items = session.query(MenuItem).filter_by(restaurant_id = restaurant_id).all()\n return jsonify(MenuItems=[i.serialize for i in items])\n\n@app.route('/restaurants//menu//JSON', methods=['GET'])\ndef showMenuItemJSON(restaurant_id, menu_id):\n item = session.query(MenuItem).filter_by(id = menu_id).one()\n return jsonify(MenuItem=item.serialize)\n\n@app.route('/restaurants//menu/new', methods=['GET','POST'])\ndef newMenuItem(restaurant_id):\n\t#return \"This page is for making a new menu item for restaurant %s\" % restaurant_id\n\trestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n\tif request.method == 'POST':\n\t\tname = request.form['name']\n\t\tif name:\n\t\t\tnewItem = MenuItem(name=name, \n\t\t\t\trestaurant_id= restaurant_id, \n\t\t\t\tcourse=request.form['course'],\n\t\t\t\tdescription=request.form['description'],\n\t\t\t\tprice=request.form['price'])\n\t\t\tsession.add(newItem)\n\t\t\tsession.commit()\n\t\t\tflash(\"Menu Item Created\")\n\t\treturn redirect(url_for('showMenu', restaurant_id=restaurant_id))\n\telse:\n\t\treturn render_template('newMenuItem.html', restaurant_id = restaurant_id)\n\n@app.route('/restaurants//menu//edit', methods=['GET','POST'])\ndef editMenuItem(restaurant_id, menu_id):\n\t#return \"This page is for editing menu item %s\" % menu_id\n\trestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n\titem = session.query(MenuItem).filter_by(id=menu_id).one()\n\tif request.method == 'POST':\n\t\tname = request.form['name']\n\t\tif name:\n\t\t\titem.name = name\n\t\t\titem.course = request.form['course']\n\t\t\titem.description = request.form['description']\n\t\t\titem.price = request.form['price']\n\t\t\tsession.add(item)\n\t\t\tsession.commit()\n\t\t\tflash(\"Menu Item Succesfully Edited\")\n\t\treturn redirect(url_for('showMenu', restaurant_id=restaurant_id))\n\telse:\n\t\treturn render_template('editMenuItem.html', restaurant_id = restaurant_id, item=item)\n\n@app.route('/restaurants//menu//delete', methods=['GET','POST'])\ndef deleteMenuItem(restaurant_id, menu_id):\n\t#return \"This page is for deleting menu item %s\" % menu_id\n\trestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n\titem = session.query(MenuItem).filter_by(id=menu_id).one()\n\tif request.method == 'POST':\n\t\tsession.delete(item)\n\t\tsession.commit()\n\t\tflash(\"Menu Item Succesfully Deleted\")\n\t\treturn redirect(url_for('showMenu', restaurant_id=restaurant_id))\n\telse:\n\t\treturn render_template('deleteMenuItem.html', restaurant = restaurant, item=item)\n\nif __name__ == '__main__':\n\tapp.secret_key = 'super_secret_key'\n\tapp.debug = True\n\tapp.run(host = '0.0.0.0', port = 5000)\n","sub_path":"vagrant/Lesson_4/finalProject.py","file_name":"finalProject.py","file_ext":"py","file_size_in_byte":6476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"11040578","text":"import pandas as pd\nimport collections as clct\nimport numpy\nimport csv\nimport matplotlib\n\n\nload = pd.read_csv('Refrence/1.csv') #Uses pandas Lib\n\ndef FindMinimumValue(selected):\n datalist = []\n while True:\n for data in selected:\n if isinstance(data, int) is True:\n datalist.append(data)\n else:\n print(\"N/A\")\n return False\n print(\"Minimum Value:\",min(datalist))\n return False\n\n\ndef FindMaximumValue(selected):\n datalist = []\n while True:\n for data in selected:\n if isinstance(data, int) is True:\n datalist.append(data)\n else:\n print(\"N/A\")\n return False\n print(\"Maximum Value:\",max(datalist))\n return False\n\ndef FindAverageValue(selected):\n datalist = []\n while True:\n for data in selected:\n if isinstance(data, int) is True:\n datalist.append(data)\n else:\n print(\"N/A\")\n return False\n average = sum(datalist) / len(datalist)\n print(\"average:\",average)\n return False\n\ndef FindStandardDeviation(selected):\n datalist = []\n while True:\n for data in selected:\n if isinstance(data, int) is True:\n datalist.append(data)\n else:\n print(\"N/A\")\n return False\n sdevi = numpy.std(datalist) #Uses nunpy Lib\n print(\"Standard Deviation:\",sdevi)\n return False\n\ndef FindCommonValue(selected):\n datalist = []\n\n for data in selected:\n datalist.append(data)\n\n counted = clct.Counter(datalist) #Uses Collections Lib\n\n most = counted.most_common(1)[0] #Uses Collections Lib\n print(\"Most Common Value:\",most[0])\n \ndef Histogram():\n print(\"placehold\")\n\nprocess = True\n\nwhile process == True:\n itercheck = 0\n for columns in load:\n print(\"----------------\")\n selected = load.iloc[:,itercheck]\n FindMinimumValue(selected)\n FindMaximumValue(selected)\n FindAverageValue(selected)\n FindStandardDeviation(selected)\n FindCommonValue(selected)\n Histogram()\n itercheck += 1\n\n process = False","sub_path":"School py Files/csv_Parsing (Lib Ver).py","file_name":"csv_Parsing (Lib Ver).py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"574657272","text":"\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on Tue Aug 20 2020\n@author: Juliane Oliveira julianlanzin@gmail.com \n@author: Moreno rodrigues rodriguesmsb@gmail.com\n\n\"\"\"\n\n\nfrom scipy.integrate import odeint\nfrom scipy import optimize\nfrom scipy.integrate import odeint\nfrom scipy.optimize import least_squares\nimport numpy as np\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n\n\nclass start_model:\n def __init__(self, pop):\n self.pop = pop\n pass\n\n #defining a setp function\n def __h(self,t):\n h = 1.0/(1.0 + np.exp(-2.0 * 50 * t))\n return h\n \n #defing a function to compute one change in beta\n def __beta_t(self, t, t1, b, b1):\n beta = b * self.__h(t1 - t) + b1 * self.__h(t - t1) \n return beta\n \n #defing a function to compute two changes in beta\n def __beta_t2(self, t, t1, t2, b, b1, b2):\n beta = b * self.__h(t1 - t) + b1 * self.__h(t2 - t) * self.__h(t - t1) + b2 * self.__h(t - t2)\n return beta\n\n #defing SIR model\n def __sir(self, f, t, parametros):\n\n n_betas = self.n_betas\n #variables\n S, I, R, Tt = f\n \n \n #create a block that leads with number of betas\n if n_betas == 3:\n \n #define paramters for model with 3 betas\n beta, beta1, beta2, gamma, t1, t2 = parametros\n \n #define derivatives for 3 betas\n dS_dt = - self.__beta_t2(t, t1, t2, beta, beta1, beta2) * S * I\n dTt_dt = self.__beta_t2(t, t1, t2, beta, beta1, beta2) * S * I\n dI_dt = self.__beta_t2(t, t1, t2, beta, beta1, beta2) * S * I - gamma * I\n \n #Change here to lead with more betas\n elif n_betas == 2:\n \n #define parameters for two betas\n beta, beta1, gamma, t1 = parametros\n \n #define derivatives for two betas\n dS_dt = - self.__beta_t(t, t1, beta, beta1) * S * I\n dTt_dt = self.__beta_t(t, t1, beta, beta1) * S * I\n dI_dt = self.__beta_t(t, t1, beta, beta1) * S * I - gamma * I\n \n \n elif n_betas == 1:\n #define parameters for one beta\n beta, gamma = parametros\n \n #define derivatives for single beta\n dS_dt = - beta * S * I\n dTt_dt = beta * S * I\n dI_dt = beta * S * I - gamma * I\n \n \n \n #return to derivatives that are common to all models\n dR_dt = gamma*I \n \n return dS_dt, dI_dt, dR_dt, dTt_dt\n\n #Define the minimizer function\n \n def fit(self, x, y, n_tries, n_betas, fit_by = \"cs\", bounds = {\"beta\": [0,2.0], \n \"beta1\": [0,2.0],\n \"beta2\": [0,2.0],\n \"gamma\": [1/14,1/7],\n \"t1\": [0,45],\n \"t2\": [50,100]}):\n\n self.n_betas = n_betas\n self.y = y\n self.x = x\n self.fit_by = fit_by\n \n def least_square_error(pars, ts0):\n \n #Define the number of parameters that will be used to pars according to the number of betas\n if self.n_betas == 3:\n beta, beta1, beta2, gamma, t1, t2, i0 = pars\n parode = beta, beta1, beta2, gamma, t1, t2\n \n elif self.n_betas == 2:\n beta, beta1, gamma, t1, i0 = pars\n parode = beta, beta1, gamma, t1\n \n else:\n beta, gamma, i0 = pars\n parode = beta, gamma\n \n \n #define initial conditions\n q0 = [1-i0,i0,0,i0]\n \n \n #Integrating\n qs = odeint(self.__sir, q0, ts0, args = (parode, ), mxstep = 1000000)\n \n \n\n #define the standardized residuals\n if self.fit_by == \"cs\":\n #get the series of cummulative cases to minimize error\n sinf = qs[:,-1]\n\n elif self.fit_by == \"ts\":\n #get the series of daily cases to minimize error\n sinf = np.r_[qs[:,-1][0], np.diff(qs[:,-1])]\n\n erri = (self.pop * sinf - self.y) / np.sqrt(self.pop * sinf + 1.0)\n \n return np.r_[erri]\n \n ts0 = np.arange(1, len(self.x) + 1)\n \n \n #change the bounds according wtih number of betas\n if self.n_betas == 3:\n bounds = list(bounds.values())\n elif self.n_betas == 2:\n for parameter in [\"beta2\", \"t2\"]:\n del(bounds[parameter])\n bounds = list(bounds.values())\n else:\n #remove parameters that will note be used for the model\n for parameter in [\"beta1\", \"beta2\", \"t1\", \"t2\"]:\n del(bounds[parameter])\n bounds = list(bounds.values()) \n \n \n #Add bounds that are common to all models\n bounds.append([0,50/self.pop]) #i0\n bounds = np.array(bounds)\n \n #start to variables to track best results during optmization process\n best_res = None\n best_cost = np.inf\n \n for i in range(n_tries):\n \n #create a set of ramdom parameters\n par0 = np.random.rand(len(bounds))\n \n #Limit those parameters to the interval defined\n par0 = bounds[:,0] + par0 * (bounds[:,1] - bounds[:,0])\n \n try:\n res = optimize.least_squares(lambda pars: least_square_error(pars, ts0), par0, bounds = (bounds[:,0],bounds[:,1]))\n \n if res.cost < best_cost:\n best_cost = res.cost\n best_res = res\n except:\n pass\n \n #Define the dict wtih the parameters that will be returned by model\n if self.n_betas == 3:\n self.beta = best_res.x[0]\n self.beta1 = best_res.x[1]\n self.beta2 = best_res.x[2] \n self.gamma = best_res.x[3] \n self.t1 = best_res.x[4]\n self.t2 = best_res.x[5] \n self.i0 = best_res.x[6]\n \n\n elif self.n_betas == 2:\n self.beta = best_res.x[0] \n self.beta1 = best_res.x[1] \n self.gamma = best_res.x[2]\n self.t1 = best_res.x[3]\n self.i0 = best_res.x[4]\n \n else:\n self.beta = best_res.x[0]\n self.gamma = best_res.x[1]\n self.i0 = best_res.x[2]\n\n def get_parameters(self):\n if self.n_betas == 3:\n return({\"beta\": self.beta, \"beta1\": self.beta1, \"beta2\": self.beta2,\n \"gamma\": self.gamma, \"t1\": self.t1, \"t2\": self.t2, \"i0\": self.i0})\n\n elif self.n_betas == 2:\n return({\"beta\": self.beta, \"beta1\": self.beta1, \"gamma\": self.gamma, \"t1\": self.t1, \"i0\": self.i0})\n \n else:\n return({\"beta\": self.beta, \"gamma\": self.gamma, \"i0\": self.i0})\n \n def predict(self, time):\n q0 = [1 - self.i0, self.i0, 0, self.i0]\n if self.n_betas == 3:\n parode = self.beta, self.beta1, self.beta2, self.gamma, self.t1, self.t2\n elif self.n_betas == 2:\n parode = self.beta, self.beta1, self.gamma, self.t1\n else:\n parode = self.beta, self.gamma\n\n predicted = odeint(self.__sir, q0, np.arange(1, len(time) + 1), args = (parode,), mxstep = 1000000)\n self.S = predicted[:,0]\n self.I = predicted[:,1]\n self.R = predicted[:,2]\n\n \n if self.fit_by == \"cs\":\n\n #predict the series for cummulative cases\n self.Tt = predicted[:,3]\n\n elif self.fit_by == \"ts\":\n #predict the series for daily cases\n self.Tt = np.r_[predicted[:,3][0], np.diff(predicted[:,3])]\n \n return {\"S\": self.S, \"I\": self.I, \"R\": self.R, \"Tt\": self.Tt * self.pop}\n\n\n\n\n\n \n \n\n\n\n \n","sub_path":"build/lib/JAS/models/gradient_optmization/sir.py","file_name":"sir.py","file_ext":"py","file_size_in_byte":8168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"569077037","text":"import json, math\nfrom airbnbSpider.items import listItem\nfrom scrapy.spiders import Spider\nfrom scrapy import Request\n\nimport base64\nimport pymysql\nfrom airbnbSpider import dbSettings\nimport time\nimport json\nimport random\n\nclass proxyPool:\n def __init__(self):\n self.proxyId = 0\n self.ip = \"\"\n self.table = \"`proxypool`\"\n self.db = dbSettings.db_connect()\n self.cursor = self.db.cursor()\n\n def delete(self, proxy, delReason):\n delReason = delReason.replace(\"'\", \"''\")\n delReason = delReason.replace('\"', '\"\"')\n sql = \"UPDATE \"+self.table + \\\n \" SET `state` = 'del' WHERE `ip`='{}'\".format(proxy)\n self.cursor.execute(sql)\n self.db.commit()\n sql = \"UPDATE \"+self.table + \\\n \" SET `delreason` = '{}' WHERE `ip`='{}'\".format(\n delReason, proxy)\n\n print(sql)\n self.cursor.execute(sql)\n self.db.commit()\n\nclass mapSpider(Spider):\n name = \"map\"\n allowed_domains = ['www.airbnb.cn']\n\n def __init__(self):\n self.lat_low = 0.0\n self.lat_upp = 0.0\n self.lon_low = 0.0\n self.lon_upp = 0.0\n self.id = 0\n self.num = -1\n self.db = dbSettings.db_connect()\n self.cursor = self.db.cursor()\n self.area = \"\"\n\n self.errInfo = \"\"\n self.url = \"\"\n self.json = \"\"\n self.html = \"\"\n self.table = \"`map_us`\"\n self.starttime=time.time()\n\n def __del__(self):\n self.db.close()\n\n def start_requests(self): \n sql = \"SELECT * FROM \"+self.table + \\\n \" WHERE (`state` = 'todo' OR `state` = 'processing')\" \n print(sql)\n\n self.cursor.execute(sql)\n results = self.cursor.fetchall()\n if(len(results)==0):\n return \n\n for row in results:\n # print(row[1])\n self.lat_low = row['lat_low']\n self.lat_upp = row['lat_upp']\n self.lon_low = row['lon_low']\n self.lon_upp = row['lon_upp']\n self.area = row['area']\n self.id = row['id']\n # self.dbUpdateStates(\"processing\",self.id)\n meta = {\"location\":[row['lat_low'],row['lat_upp'],row['lon_low'],row['lon_upp']],\"map_id\":row['id'],\"area\":row['area'],\"starttime\":time.time()}\n yield Request( url = self.urlJoint(row),callback = self.mapParse,\n errback=self.mapErrback,meta = meta, dont_filter=True)\n\n def re_requests(self): \n self.start_requests()\n \n def dbUpdateStates(self, state, id):\n sql = \"UPDATE \"+self.table + \\\n \" SET `state`='{}' WHERE `id`='{}'\".format(state, id)\n self.cursor.execute(sql)\n self.db.commit()\n\n def dbUpdateNum(self, num, id):\n sql = \"UPDATE \"+self.table + \\\n \" SET `num`='{}' WHERE `id`='{}'\".format(num, id)\n self.cursor.execute(sql)\n self.db.commit()\n\n def dbInsert(self, location,area):\n sql = \"INSERT INTO \"+self.table + \\\n \"(`lat_low`, `lat_upp`, `lon_low`, `lon_upp`, `num`,`state`,`area`)\\\n VALUES ('{}', '{}', '{}', '{}', '-1','todo','{}')\\\n \".format(location[0], location[1], location[2], location[3], area)\n self.cursor.execute(sql)\n # print(sql)\n self.db.commit()\n\n def urlJoint(self, row):\n url = \"https://www.airbnb.com/api/v2/explore_tabs?_format=for_explore_search_web&auto_ib=true&client_session_id=d0c77d93-3a9a-43df-82fb-568ac0d5a566¤cy=CNY¤t_tab_id=home_tab&experiences_per_grid=20&fetch_filters=true&guidebooks_per_grid=20&has_zero_guest_treatment=true&hide_dates_and_guests_filters=false&is_guided_search=true&is_new_cards_experiment=true&is_standard_search=true&items_per_grid=50&key=d306zoyjsyarp7ifhu67rjxn52tv0t20&locale=zh&metadata_only=false&query=%E4%B8%8A%E6%B5%B7&query_understanding_enabled=true&refinement_paths%5B%5D=%2Fhomes&satori_config_token=EhIiQhIiIjISEjISIiIiUiUAIgA&satori_version=1.1.13&screen_height=425&screen_size=large&screen_width=1472&search_by_map=true&selected_tab_id=home_tab&show_groupings=true&supports_for_you_v3=true&timezone_offset=480&version=1.7.9&zoom=9\"\n url += \"&sw_lat={}&sw_lng={}&ne_lat={}&ne_lng={}\".format(row['lat_low'],row['lon_low'],row['lat_upp'],row['lon_upp'])\n # print(url)\n # url += \"&sw_lng={}\".format(row[3])\n # url += \"&ne_lat={}\".format(row[2])\n # url += \"&ne_lng={}\".format(row[4])\n # print(url)\n # print(str(self)+\"----\"+str(row[0]))\n # print(str(self)+\"----\"+str(row[0])+\"----\"+str(row[1])+\"----\" +\n # str(row[2])+\"----\"+str(row[3])+\"----\"+str(row[4]))\n return url\n\n def mapParse(self,response):\n # print(\"mapParse\")\n res = json.loads(response.body.decode('utf8'))\n if 'home_tab_metadata' in res['explore_tabs'][0]:\n count = res['explore_tabs'][0]['home_tab_metadata']['listings_count']\n print(\"mapParse:\\t\\t\"+str(count))\n if(count > 50):\n pass\n self.quadrateDivision(response.meta)\n if(count<=50):\n item = listItem()\n item['response']=response.body.decode('utf8')\n print(str(int(1000*(time.time()-self.starttime)))+\"ms\")\n self.starttime = time.time()\n yield item\n self.dbUpdateStates(\"done\",response.meta[\"map_id\"])\n self.dbUpdateNum(str(count),response.meta[\"map_id\"])\n\n def mapErrback(self,failure):\n # 假设我们需要对指定的异常类型做处理,\n # 我们需要判断异常的类型\n\n response = failure.value\n print(\"Errback:\\t\"+str(response))\n print(failure.request.meta['proxy'][8:])\n\n proxypool = proxyPool()\n proxypool.delete(failure.request.meta['proxy'][8:],str(response))\n print(\"del proxy:\"+str(failure.request.meta['proxy'][8:]))\n del proxypool\n\n yield failure.request\n\n def quadrateDivision(self,meta):\n lat_low = meta[\"location\"][0]\n lat_upp = meta[\"location\"][1]\n lon_low = meta[\"location\"][2]\n lon_upp = meta[\"location\"][3]\n lat_mid = (lat_low+lat_upp)/2\n lon_mid = (lon_low+lon_upp)/2\n locationList = []\n locationList.append((lat_low, lat_mid,\n lon_low, lon_mid))\n locationList.append((lat_mid, lat_upp,\n lon_low, lon_mid))\n locationList.append((lat_low, lat_mid,\n lon_mid, lon_upp))\n locationList.append((lat_mid, lat_upp,\n lon_mid, lon_upp))\n for location in locationList:\n self.dbInsert(location,meta[\"area\"])\n print(\"Insert 4 map area\")\n self.re_requests()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"airbnbSpider_scrapy/airbnbSpider_us_local/airbnbSpider/spiders/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":6832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"154779334","text":"#!/usr/bin/env python\r\n\"\"\"\r\nEncode & decode text from input or from a .txt file\r\n\r\ndoesn't work with capital letter, numbers and special characters\r\n\"\"\"\r\n\r\n\r\n# IMPORTS #\r\nimport sys\r\nimport time\r\nfrom datetime import datetime\r\nimport string\r\nfrom pathlib import Path\r\n\r\n# IMPORTS SCRIPTS #\r\nimport decoder\r\n# AUTHOR INFORMATION #\r\n\r\n# _____\r\n# .' `.\r\n# / .-=-. \\ \\ __\r\n# | ( C\\ \\ \\_.'')\r\n# _\\ `--' |,' _/\r\n# /__`.____.'__.-' The coding snail~\r\n\r\n__author__ = \"Kevin Vervloet\"\r\n__email__ = \"kevin.vervloet@student.kdg.be\"\r\n__Version__ = \"V4.0\"\r\n__status__ = \"Finished\"\r\n\r\n# VARIABLES #\r\ntime_now = datetime.now()\r\ncurrent_time = time_now.strftime(\"%H:%M:%S\")\r\n\r\ninvalidcharacters = set(string.punctuation) # blocks all special characters from being used\r\n\r\n\r\n# MAIN CODE #\r\ndef num_there(s):\r\n return any(i.isdigit() for i in s)\r\n\r\n\r\ndef menu():\r\n print(\"[\", current_time, \"]\", \" ====Encode/Decode===\") # Print the time & a title\r\n\r\n optionmenu = (input(\"\"\" \r\n-------------------------------- \r\n| [1] Decode |\r\n| [2] Encode |\r\n| [3] Exit | \r\n--------------------------------\\n\"\"\"))\r\n if optionmenu == \"1\": # Decode\r\n optie2 = (input(\"\"\" \r\nDo you want to decode from a file or manual input ?\r\n[1] From file\r\n[2] From manual input\\n\"\"\"))\r\n if optie2 == \"1\":\r\n text = Path('text2decode.txt').read_text() # read from a text file\r\n text = text.replace('\\n', '')\r\n\r\n decoder.break_text(text)\r\n decoder.decode_text(text)\r\n print(\"We've hacked the code, here is the decoded version:\")\r\n print(\"\")\r\n decoder.get_decode(text)\r\n\r\n elif optie2 == \"2\":\r\n text = input(\"Input the text you want to decode\\n\")\r\n if num_there(text):\r\n print(\"error! your text contains numbers - Returning to the menu\") # Check for numbers\r\n time.sleep(2)\r\n menu()\r\n\r\n decoder.break_text(text)\r\n decoder.decode_text(text)\r\n print(\"we've hacked the code, here is the decoded version:\")\r\n print(\"\")\r\n decoder.get_decode(text)\r\n elif optie2 != \"1\" or \"2\":\r\n print(\"Invalid option we will return you to the menu\") # return to the menu\r\n menu()\r\n\r\n elif optionmenu == \"2\": # encoding\r\n optie2 = (input(\"\"\" \r\nDo you want to encode from a file or manual input ?\r\n[1] From file\r\n[2] From manual input\\n\"\"\"))\r\n if optie2 == \"1\":\r\n text = Path('text2encode.txt').read_text() # read from a text file\r\n text = text.replace('\\n', '')\r\n\r\n decoder.break_text(text)\r\n decoder.code_text(text)\r\n print(\"We've coded your top secret message:\")\r\n print(\"\")\r\n decoder.get_code(text)\r\n\r\n elif optie2 == \"2\":\r\n text = input(\"Input the text you want to code:\\n\")\r\n if num_there(text):\r\n print(\"error! your text contains numbers - Returning to the menu\")\r\n time.sleep(2)\r\n menu()\r\n else:\r\n if any(char in invalidcharacters for char in text):\r\n print(\"error! your text contains special characters - Returning to the menu\")\r\n time.sleep(2)\r\n menu()\r\n\r\n decoder.break_text(text)\r\n decoder.code_text(text)\r\n print(\"We've coded your top secret message:\")\r\n print(\"\")\r\n decoder.get_code(text)\r\n\r\n elif optie2 != \"1\" or \"2\":\r\n print(\"Invalid option we will return you to the menu\") # return to the menu\r\n menu()\r\n\r\n elif optionmenu == \"3\":\r\n print(\"See you next time!\")\r\n sys.exit(0)\r\n\r\n elif optionmenu != \"1\" or \"2\" or \"3\" or \"4\":\r\n print(\"Error! not a valid input - Please try again\")\r\n time.sleep(1)\r\n print(\"\")\r\n print(\"\")\r\n menu()\r\n\r\n\r\nif __name__ == '__main__': # run tests if called from command-line\r\n menu()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"175819901","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n'''\nrelation - bag_number (x-[0:53], y-num)\nbag-relation-sentence_number\nbag-sentence_number\n'''\n\n#relation - bag_number\ndir_data = '../data/'\npath_triple_train = 'train_instance_triple.npy'\n\nndarry_train = np.load(dir_data + path_triple_train)\narray_num = np.zeros((54, 1))\nlen_tuple = len(ndarry_train)\nfor i in range(len_tuple):\n ind = int(ndarry_train[i][2])\n array_num[ind] = array_num[ind] + 1\n\nplt.figure(1)\nplt.subplot(211)\nplt.ylim([0, 50])\nplt.plot(np.arange(0,54), array_num.reshape(54), 'ro')\n\npath_triple_test = 'test_instance_triple.npy'\ntriple_test = np.load(dir_data + path_triple_test)\nnum_relation_test = np.zeros((54, 1))\nlen_tuple = len(triple_test)\nfor i in range(len_tuple):\n ind = int(triple_test[i][2])\n num_relation_test[ind] = num_relation_test[ind] + 1\nplt.subplot(212)\nplt.ylim([0, 50])\nplt.plot(np.arange(0, 54), num_relation_test.reshape(54), 'b+')\nplt.show()\n\n\n","sub_path":"nn_distant/data_observ/relation_bag_num.py","file_name":"relation_bag_num.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"301643853","text":"import json\r\nimport socketserver\r\nimport threading\r\n\r\nfrom auto_update import AutoUpdater\r\n\r\nfrom .settings import *\r\nfrom .control import DATA\r\nfrom .motors import MotorState\r\n\r\n\r\nclass TCPHandler(socketserver.BaseRequestHandler):\r\n def handle(self):\r\n raw_data = self.request.recv(SOCKET_BUFFER_SIZE)\r\n print('Received command')\r\n raw_data = raw_data.decode()\r\n\r\n try:\r\n data = json.loads(raw_data)\r\n except ValueError:\r\n print('Received data in invalid format: `%s`' % raw_data)\r\n return\r\n\r\n request_type = data['type']\r\n print(request_type)\r\n\r\n handler = getattr(self, 'handle_%s' % request_type, None)\r\n if handler is None:\r\n print('Unknown command type: %s' % request_type)\r\n return\r\n\r\n handler(data['data'])\r\n\r\n def handle_motors_state(self, data):\r\n DATA.motors_state = MotorState.from_network(data)\r\n\r\n def handle_ping(self, data):\r\n self.send_response({'type': 'pong'})\r\n\r\n def handle_update(self, data):\r\n uri = data.get('uri', GIT_REPO_URI)\r\n branch = data.get('branch', GIT_REPO_BRANCH)\r\n updater = AutoUpdater(repo_uri=uri, branch=branch)\r\n updater.update()\r\n\r\n def send_response(self, message):\r\n print('Sending response %s' % message)\r\n data = json.dumps(message).encode()\r\n self.request.sendall(data)\r\n\r\n\r\nclass Server(socketserver.TCPServer):\r\n timeout = SOCKET_TIMEOUT\r\n\r\n def __init__(self, server_address=(HOST, PORT), RequestHandlerClass=TCPHandler, bind_and_activate=True):\r\n super(Server, self).__init__(\r\n server_address=server_address,\r\n RequestHandlerClass=RequestHandlerClass,\r\n bind_and_activate=bind_and_activate,\r\n )\r\n\r\n\r\ndef _worker_thread():\r\n server = Server()\r\n server.serve_forever()\r\n\r\nserver_thread = None\r\n\r\n\r\ndef run_server():\r\n global server_thread\r\n assert server_thread is None\r\n server_thread = threading.Thread(target=_worker_thread, daemon=True)\r\n server_thread.start()","sub_path":"smart_car/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"411447109","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n#\n# @Author: oesteban\n# @Date: 2016-01-05 11:24:05\n# @Email: code@oscaresteban.es\n# @Last modified by: oesteban\n\"\"\"\n=======================\nThe anatomical workflow\n=======================\n\n.. image :: _static/anatomical_workflow_source.svg\n\nThe anatomical workflow follows the following steps:\n\n#. Conform (reorientations, revise data types) input data and read\n associated metadata.\n#. Skull-stripping (AFNI).\n#. Calculate head mask -- :py:func:`headmsk_wf`.\n#. Spatial Normalization to MNI (ANTs)\n#. Calculate air mask above the nasial-cerebelum plane -- :py:func:`airmsk_wf`.\n#. Brain tissue segmentation (FAST).\n#. Extraction of IQMs -- :py:func:`compute_iqms`.\n#. Individual-reports generation -- :py:func:`individual_reports`.\n\nThis workflow is orchestrated by :py:func:`anat_qc_workflow`.\n\nFor the skull-stripping, we use ``afni_wf`` from ``niworkflows.anat.skullstrip``:\n\n.. workflow::\n\n import os.path as op\n from niworkflows.anat.skullstrip import afni_wf\n wf = afni_wf()\n\n\n\"\"\"\nfrom __future__ import print_function, division, absolute_import, unicode_literals\nfrom builtins import zip, range\nimport os.path as op\n\nfrom nipype import logging\nfrom nipype.pipeline import engine as pe\nfrom nipype.interfaces import io as nio\nfrom nipype.interfaces import utility as niu\nfrom nipype.interfaces import fsl\nfrom nipype.interfaces import ants\nfrom nipype.interfaces import afni\n\nfrom niworkflows.data import get_mni_icbm152_nlin_asym_09c\nfrom niworkflows.anat.skullstrip import afni_wf as skullstrip_wf\nfrom niworkflows.interfaces.registration import RobustMNINormalizationRPT as RobustMNINormalization\n\nfrom mriqc import DEFAULTS\nfrom mriqc.interfaces import (StructuralQC, ArtifactMask, ReadSidecarJSON,\n ConformImage, ComputeQI2, IQMFileSink, RotationMask)\n\nfrom mriqc.utils.misc import check_folder\nWFLOGGER = logging.getLogger('workflow')\n\ndef anat_qc_workflow(dataset, settings, mod='T1w', name='anatMRIQC'):\n \"\"\"\n One-subject-one-session-one-run pipeline to extract the NR-IQMs from\n anatomical images\n\n .. workflow::\n\n import os.path as op\n from mriqc.workflows.anatomical import anat_qc_workflow\n datadir = op.abspath('data')\n wf = anat_qc_workflow([op.join(datadir, 'sub-001/anat/sub-001_T1w.nii.gz')],\n settings={'bids_dir': datadir,\n 'output_dir': op.abspath('out'),\n 'ants_nthreads': 1,\n 'no_sub': True})\n\n \"\"\"\n\n workflow = pe.Workflow(name=name+mod)\n WFLOGGER.info('Building anatomical MRI QC workflow, datasets list: %s',\n sorted([d.replace(settings['bids_dir'] + '/', '') for d in dataset]))\n\n # Define workflow, inputs and outputs\n # 0. Get data\n inputnode = pe.Node(niu.IdentityInterface(fields=['in_file']), name='inputnode')\n inputnode.iterables = [('in_file', dataset)]\n\n outputnode = pe.Node(niu.IdentityInterface(fields=['out_json']), name='outputnode')\n\n # 1. Reorient anatomical image\n to_ras = pe.Node(ConformImage(check_dtype=False), name='conform')\n # 2. Skull-stripping (afni)\n asw = skullstrip_wf(n4_nthreads=settings.get('ants_nthreads', 1), unifize=False)\n # 3. Head mask\n hmsk = headmsk_wf()\n # 4. Spatial Normalization, using ANTs\n norm = spatial_normalization(settings)\n # 5. Air mask (with and without artifacts)\n amw = airmsk_wf()\n # 6. Brain tissue segmentation\n segment = pe.Node(fsl.FAST(segments=True, out_basename='segment', img_type=int(mod[1])),\n name='segmentation', estimated_memory_gb=3)\n # 7. Compute IQMs\n iqmswf = compute_iqms(settings, modality=mod)\n # Reports\n repwf = individual_reports(settings)\n\n # Connect all nodes\n workflow.connect([\n (inputnode, to_ras, [('in_file', 'in_file')]),\n (inputnode, iqmswf, [('in_file', 'inputnode.in_file')]),\n (to_ras, asw, [('out_file', 'inputnode.in_file')]),\n (asw, segment, [('outputnode.out_file', 'in_files')]),\n (asw, hmsk, [('outputnode.bias_corrected', 'inputnode.in_file')]),\n (segment, hmsk, [('tissue_class_map', 'inputnode.in_segm')]),\n (asw, norm, [('outputnode.bias_corrected', 'inputnode.moving_image'),\n ('outputnode.out_mask', 'inputnode.moving_mask')]),\n (norm, amw, [\n ('outputnode.inverse_composite_transform', 'inputnode.inverse_composite_transform')]),\n (norm, iqmswf, [\n ('outputnode.inverse_composite_transform', 'inputnode.inverse_composite_transform')]),\n (norm, repwf, ([\n ('outputnode.out_report', 'inputnode.mni_report')])),\n (to_ras, amw, [('out_file', 'inputnode.in_file')]),\n (asw, amw, [('outputnode.out_mask', 'inputnode.in_mask')]),\n (hmsk, amw, [('outputnode.out_file', 'inputnode.head_mask')]),\n (to_ras, iqmswf, [('out_file', 'inputnode.in_ras')]),\n (asw, iqmswf, [('outputnode.bias_corrected', 'inputnode.inu_corrected'),\n ('outputnode.bias_image', 'inputnode.in_inu'),\n ('outputnode.out_mask', 'inputnode.brainmask')]),\n (amw, iqmswf, [('outputnode.out_file', 'inputnode.airmask'),\n ('outputnode.artifact_msk', 'inputnode.artmask'),\n ('outputnode.rot_mask', 'inputnode.rotmask')]),\n (segment, iqmswf, [('tissue_class_map', 'inputnode.segmentation'),\n ('partial_volume_files', 'inputnode.pvms')]),\n (hmsk, iqmswf, [('outputnode.out_file', 'inputnode.headmask')]),\n (to_ras, repwf, [('out_file', 'inputnode.in_ras')]),\n (asw, repwf, [('outputnode.bias_corrected', 'inputnode.inu_corrected'),\n ('outputnode.out_mask', 'inputnode.brainmask')]),\n (hmsk, repwf, [('outputnode.out_file', 'inputnode.headmask')]),\n (amw, repwf, [('outputnode.out_file', 'inputnode.airmask'),\n ('outputnode.artifact_msk', 'inputnode.artmask'),\n ('outputnode.rot_mask', 'inputnode.rotmask')]),\n (segment, repwf, [('tissue_class_map', 'inputnode.segmentation')]),\n (iqmswf, repwf, [('outputnode.out_noisefit', 'inputnode.noisefit')]),\n (iqmswf, repwf, [('outputnode.out_file', 'inputnode.in_iqms')]),\n (iqmswf, outputnode, [('outputnode.out_file', 'out_json')])\n ])\n\n # Upload metrics\n if not settings.get('no_sub', False):\n from mriqc.interfaces.webapi import UploadIQMs\n upldwf = pe.Node(UploadIQMs(), name='UploadMetrics')\n upldwf.inputs.email = settings.get('email', '')\n upldwf.inputs.url = settings.get('webapi_url')\n if settings.get('webapi_port'):\n upldwf.inputs.port = settings.get('webapi_port')\n\n upldwf.inputs.strict = settings.get('upload_strict', False)\n\n workflow.connect([\n (iqmswf, upldwf, [('outputnode.out_file', 'in_iqms')]),\n ])\n\n return workflow\n\ndef spatial_normalization(settings, mod='T1w', name='SpatialNormalization',\n resolution=2.0):\n \"\"\"\n A simple workflow to perform spatial normalization\n\n \"\"\"\n from mriqc.interfaces.common import EnsureSize\n from nipype.interfaces.ants import AffineInitializer\n from niworkflows.data import getters as niwgetters\n\n # Have some settings handy\n tpl_id = settings.get('template_id', 'mni_icbm152_nlin_asym_09c')\n mni_template = getattr(niwgetters, 'get_{}'.format(tpl_id))()\n\n # Define workflow interface\n workflow = pe.Workflow(name=name)\n inputnode = pe.Node(niu.IdentityInterface(fields=[\n 'moving_image', 'moving_mask']), name='inputnode')\n outputnode = pe.Node(niu.IdentityInterface(fields=[\n 'inverse_composite_transform', 'out_report']), name='outputnode')\n\n # Spatial normalization\n norm = pe.Node(RobustMNINormalization(\n flavor='testing' if settings.get('testing', False) else 'fast',\n num_threads=settings.get('ants_nthreads'),\n template=tpl_id,\n template_resolution=2,\n reference=mod[:2],\n generate_report=True,),\n name='SpatialNormalization',\n # Request all MultiProc processes when ants_nthreads > n_procs\n num_threads=min(settings.get('ants_nthreads', DEFAULTS['ants_nthreads']),\n settings.get('n_procs', 1)),\n estimated_memory_gb=3)\n norm.inputs.reference_mask = op.join(mni_template,\n '%dmm_brainmask.nii.gz' % int(resolution))\n\n workflow.connect([\n (inputnode, norm, [('moving_image', 'moving_image'),\n ('moving_mask', 'moving_mask')]),\n (norm, outputnode, [('inverse_composite_transform', 'inverse_composite_transform'),\n ('out_report', 'out_report')]),\n ])\n return workflow\n\ndef compute_iqms(settings, modality='T1w', name='ComputeIQMs'):\n \"\"\"\n Workflow that actually computes the IQMs\n\n .. workflow::\n\n from mriqc.workflows.anatomical import compute_iqms\n wf = compute_iqms(settings={'output_dir': 'out'})\n\n \"\"\"\n from mriqc.workflows.utils import _tofloat\n from mriqc.interfaces.anatomical import Harmonize\n\n workflow = pe.Workflow(name=name)\n inputnode = pe.Node(niu.IdentityInterface(fields=[\n 'in_file', 'in_ras',\n 'brainmask', 'airmask', 'artmask', 'headmask', 'rotmask',\n 'segmentation', 'inu_corrected', 'in_inu', 'pvms', 'metadata',\n 'inverse_composite_transform']), name='inputnode')\n outputnode = pe.Node(niu.IdentityInterface(fields=['out_file', 'out_noisefit']),\n name='outputnode')\n\n deriv_dir = check_folder(op.abspath(op.join(settings['output_dir'], 'derivatives')))\n\n # Extract metadata\n meta = pe.Node(ReadSidecarJSON(), name='metadata')\n\n # Add provenance\n addprov = pe.Node(niu.Function(function=_add_provenance), name='provenance')\n addprov.inputs.settings = {\n 'testing': settings.get('testing', False)\n }\n\n # AFNI check smoothing\n fwhm = pe.Node(afni.FWHMx(combine=True, detrend=True), name='smoothness')\n # fwhm.inputs.acf = True # add when AFNI >= 16\n\n # Harmonize\n homog = pe.Node(Harmonize(), name='harmonize')\n\n # Mortamet's QI2\n getqi2 = pe.Node(ComputeQI2(erodemsk=settings.get('testing', False)),\n name='ComputeQI2')\n\n # Compute python-coded measures\n measures = pe.Node(StructuralQC(), 'measures')\n\n # Project MNI segmentation to T1 space\n invt = pe.MapNode(ants.ApplyTransforms(\n dimension=3, default_value=0, interpolation='Linear',\n float=True),\n iterfield=['input_image'], name='MNItpms2t1')\n invt.inputs.input_image = [op.join(get_mni_icbm152_nlin_asym_09c(), fname + '.nii.gz')\n for fname in ['1mm_tpm_csf', '1mm_tpm_gm', '1mm_tpm_wm']]\n\n datasink = pe.Node(IQMFileSink(modality=modality, out_dir=deriv_dir),\n name='datasink')\n datasink.inputs.modality = modality\n\n def _getwm(inlist):\n return inlist[-1]\n\n workflow.connect([\n (inputnode, meta, [('in_file', 'in_file')]),\n (meta, datasink, [('subject_id', 'subject_id'),\n ('session_id', 'session_id'),\n ('acq_id', 'acq_id'),\n ('rec_id', 'rec_id'),\n ('run_id', 'run_id'),\n ('out_dict', 'metadata')]),\n\n (inputnode, addprov, [('in_file', 'in_file'),\n ('airmask', 'air_msk'),\n ('rotmask', 'rot_msk')]),\n (inputnode, getqi2, [('in_ras', 'in_file'),\n ('airmask', 'air_msk')]),\n (inputnode, homog, [('inu_corrected', 'in_file'),\n (('pvms', _getwm), 'wm_mask')]),\n (inputnode, measures, [('in_inu', 'in_bias'),\n ('in_ras', 'in_file'),\n ('airmask', 'air_msk'),\n ('headmask', 'head_msk'),\n ('artmask', 'artifact_msk'),\n ('rotmask', 'rot_msk'),\n ('segmentation', 'in_segm'),\n ('pvms', 'in_pvms')]),\n (inputnode, fwhm, [('in_ras', 'in_file'),\n ('brainmask', 'mask')]),\n (inputnode, invt, [('in_ras', 'reference_image'),\n ('inverse_composite_transform', 'transforms')]),\n (homog, measures, [('out_file', 'in_noinu')]),\n (invt, measures, [('output_image', 'mni_tpms')]),\n (fwhm, measures, [(('fwhm', _tofloat), 'in_fwhm')]),\n (measures, datasink, [('out_qc', 'root')]),\n (addprov, datasink, [('out', 'provenance')]),\n (getqi2, datasink, [('qi2', 'qi_2')]),\n (getqi2, outputnode, [('out_file', 'out_noisefit')]),\n (datasink, outputnode, [('out_file', 'out_file')]),\n ])\n return workflow\n\ndef individual_reports(settings, name='ReportsWorkflow'):\n \"\"\"\n Encapsulates nodes writing plots\n\n .. workflow::\n\n from mriqc.workflows.anatomical import individual_reports\n wf = individual_reports(settings={'output_dir': 'out'})\n\n \"\"\"\n from mriqc.interfaces import PlotMosaic\n from mriqc.reports import individual_html\n\n verbose = settings.get('verbose_reports', False)\n pages = 2\n extra_pages = 0\n if verbose:\n extra_pages = 7\n\n workflow = pe.Workflow(name=name)\n inputnode = pe.Node(niu.IdentityInterface(fields=[\n 'in_ras', 'brainmask', 'headmask', 'airmask', 'artmask', 'rotmask',\n 'segmentation', 'inu_corrected', 'noisefit', 'in_iqms',\n 'mni_report']),\n name='inputnode')\n\n mosaic_zoom = pe.Node(PlotMosaic(\n out_file='plot_anat_mosaic1_zoomed.svg',\n title='zoomed',\n cmap='Greys_r'), name='PlotMosaicZoomed')\n\n mosaic_noise = pe.Node(PlotMosaic(\n out_file='plot_anat_mosaic2_noise.svg',\n title='noise enhanced',\n only_noise=True,\n cmap='viridis_r'), name='PlotMosaicNoise')\n\n mplots = pe.Node(niu.Merge(pages + extra_pages), name='MergePlots')\n rnode = pe.Node(niu.Function(\n input_names=['in_iqms', 'in_plots'], output_names=['out_file'],\n function=individual_html), name='GenerateReport')\n\n # Link images that should be reported\n dsplots = pe.Node(nio.DataSink(\n base_directory=settings['output_dir'], parameterization=False), name='dsplots')\n dsplots.inputs.container = 'reports'\n\n workflow.connect([\n (inputnode, rnode, [('in_iqms', 'in_iqms')]),\n (inputnode, mosaic_zoom, [('in_ras', 'in_file'),\n ('brainmask', 'bbox_mask_file')]),\n (inputnode, mosaic_noise, [('in_ras', 'in_file')]),\n (mosaic_zoom, mplots, [('out_file', \"in1\")]),\n (mosaic_noise, mplots, [('out_file', \"in2\")]),\n (mplots, rnode, [('out', 'in_plots')]),\n (rnode, dsplots, [('out_file', \"@html_report\")]),\n ])\n\n if not verbose:\n return workflow\n\n from mriqc.interfaces.viz import PlotContours\n from mriqc.viz.utils import plot_bg_dist\n plot_bgdist = pe.Node(niu.Function(input_names=['in_file'], output_names=['out_file'],\n function=plot_bg_dist), name='PlotBackground')\n\n plot_segm = pe.Node(PlotContours(\n display_mode='z', levels=[.5, 1.5, 2.5], cut_coords=10,\n colors=['r', 'g', 'b']), name='PlotSegmentation')\n\n plot_bmask = pe.Node(PlotContours(\n display_mode='z', levels=[.5], colors=['r'], cut_coords=10,\n out_file='bmask'), name='PlotBrainmask')\n plot_airmask = pe.Node(PlotContours(\n display_mode='x', levels=[.5], colors=['r'],\n cut_coords=6, out_file='airmask'), name='PlotAirmask')\n plot_headmask = pe.Node(PlotContours(\n display_mode='x', levels=[.5], colors=['r'],\n cut_coords=6, out_file='headmask'), name='PlotHeadmask')\n plot_artmask = pe.Node(PlotContours(\n display_mode='z', levels=[.5], colors=['r'], cut_coords=10,\n out_file='artmask', saturate=True), name='PlotArtmask')\n\n workflow.connect([\n (inputnode, plot_segm, [('in_ras', 'in_file'),\n ('segmentation', 'in_contours')]),\n (inputnode, plot_bmask, [('in_ras', 'in_file'),\n ('brainmask', 'in_contours')]),\n (inputnode, plot_headmask, [('in_ras', 'in_file'),\n ('headmask', 'in_contours')]),\n (inputnode, plot_airmask, [('in_ras', 'in_file'),\n ('airmask', 'in_contours')]),\n (inputnode, plot_artmask, [('in_ras', 'in_file'),\n ('artmask', 'in_contours')]),\n (inputnode, plot_bgdist, [('noisefit', 'in_file')]),\n (inputnode, mplots, [('mni_report', \"in%d\" % (pages + 1))]),\n (plot_bmask, mplots, [('out_file', 'in%d' % (pages + 2))]),\n (plot_segm, mplots, [('out_file', 'in%d' % (pages + 3))]),\n (plot_artmask, mplots, [('out_file', 'in%d' % (pages + 4))]),\n (plot_headmask, mplots, [('out_file', 'in%d' % (pages + 5))]),\n (plot_airmask, mplots, [('out_file', 'in%d' % (pages + 6))]),\n (plot_bgdist, mplots, [('out_file', 'in%d' % (pages + 7))])\n ])\n return workflow\n\ndef headmsk_wf(name='HeadMaskWorkflow', use_bet=True):\n \"\"\"\n Computes a head mask as in [Mortamet2009]_.\n\n .. workflow::\n\n from mriqc.workflows.anatomical import headmsk_wf\n wf = headmsk_wf()\n\n \"\"\"\n\n has_dipy = False\n try:\n from dipy.denoise import nlmeans\n has_dipy = True\n except ImportError:\n pass\n\n workflow = pe.Workflow(name=name)\n inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'in_segm']),\n name='inputnode')\n outputnode = pe.Node(niu.IdentityInterface(fields=['out_file']), name='outputnode')\n\n if use_bet or not has_dipy:\n # Alternative for when dipy is not installed\n bet = pe.Node(fsl.BET(surfaces=True), name='fsl_bet')\n workflow.connect([\n (inputnode, bet, [('in_file', 'in_file')]),\n (bet, outputnode, [('outskin_mask_file', 'out_file')])\n ])\n\n else:\n from nipype.interfaces.dipy import Denoise\n enhance = pe.Node(niu.Function(\n input_names=['in_file'], output_names=['out_file'], function=_enhance), name='Enhance')\n estsnr = pe.Node(niu.Function(\n input_names=['in_file', 'seg_file'], output_names=['out_snr'],\n function=_estimate_snr), name='EstimateSNR')\n denoise = pe.Node(Denoise(), name='Denoise')\n gradient = pe.Node(niu.Function(\n input_names=['in_file', 'snr'], output_names=['out_file'], function=image_gradient), name='Grad')\n thresh = pe.Node(niu.Function(\n input_names=['in_file', 'in_segm'], output_names=['out_file'], function=gradient_threshold),\n name='GradientThreshold')\n\n workflow.connect([\n (inputnode, estsnr, [('in_file', 'in_file'),\n ('in_segm', 'seg_file')]),\n (estsnr, denoise, [('out_snr', 'snr')]),\n (inputnode, enhance, [('in_file', 'in_file')]),\n (enhance, denoise, [('out_file', 'in_file')]),\n (estsnr, gradient, [('out_snr', 'snr')]),\n (denoise, gradient, [('out_file', 'in_file')]),\n (inputnode, thresh, [('in_segm', 'in_segm')]),\n (gradient, thresh, [('out_file', 'in_file')]),\n (thresh, outputnode, [('out_file', 'out_file')])\n ])\n\n return workflow\n\n\ndef airmsk_wf(name='AirMaskWorkflow'):\n \"\"\"\n Implements the Step 1 of [Mortamet2009]_.\n\n .. workflow::\n\n from mriqc.workflows.anatomical import airmsk_wf\n wf = airmsk_wf()\n\n \"\"\"\n workflow = pe.Workflow(name=name)\n\n inputnode = pe.Node(niu.IdentityInterface(\n fields=['in_file', 'in_mask', 'head_mask', 'inverse_composite_transform']),\n name='inputnode')\n outputnode = pe.Node(niu.IdentityInterface(fields=['out_file', 'artifact_msk', 'rot_mask']),\n name='outputnode')\n\n rotmsk = pe.Node(RotationMask(), name='RotationMask')\n\n invt = pe.Node(ants.ApplyTransforms(dimension=3, default_value=0,\n interpolation='Linear', float=True), name='invert_xfm')\n invt.inputs.input_image = op.join(get_mni_icbm152_nlin_asym_09c(), '1mm_headmask.nii.gz')\n\n binarize = pe.Node(niu.Function(function=_binarize), name='Binarize')\n\n qi1 = pe.Node(ArtifactMask(), name='ArtifactMask')\n\n workflow.connect([\n (inputnode, rotmsk, [('in_file', 'in_file')]),\n (inputnode, qi1, [('in_file', 'in_file'),\n ('head_mask', 'head_mask')]),\n (rotmsk, qi1, [('out_file', 'rot_mask')]),\n (inputnode, invt, [('in_mask', 'reference_image'),\n ('inverse_composite_transform', 'transforms')]),\n (invt, binarize, [('output_image', 'in_file')]),\n (binarize, qi1, [('out', 'nasion_post_mask')]),\n (qi1, outputnode, [('out_air_msk', 'out_file'),\n ('out_art_msk', 'artifact_msk')]),\n (rotmsk, outputnode, [('out_file', 'rot_mask')])\n ])\n return workflow\n\n\ndef _add_provenance(in_file, settings, air_msk, rot_msk):\n from mriqc import __version__ as version\n from copy import deepcopy\n from nipype.utils.filemanip import hash_infile\n import nibabel as nb\n import numpy as np\n\n air_msk_size = nb.load(air_msk).get_data().astype(\n np.uint8).sum()\n rot_msk_size = nb.load(rot_msk).get_data().astype(\n np.uint8).sum()\n\n out_prov = {\n 'md5sum': hash_infile(in_file),\n 'version': version,\n 'software': 'mriqc',\n 'warnings': {\n 'small_air_mask': bool(air_msk_size < 5e5),\n 'large_rot_frame': bool(rot_msk_size > 500),\n }\n }\n\n if settings:\n out_prov['settings'] = settings\n\n return out_prov\n\ndef _binarize(in_file, threshold=0.5, out_file=None):\n import os.path as op\n import numpy as np\n import nibabel as nb\n\n if out_file is None:\n fname, ext = op.splitext(op.basename(in_file))\n if ext == '.gz':\n fname, ext2 = op.splitext(fname)\n ext = ext2 + ext\n out_file = op.abspath('{}_bin{}'.format(fname, ext))\n\n nii = nb.load(in_file)\n data = nii.get_data()\n\n data[data <= threshold] = 0\n data[data > 0] = 1\n\n hdr = nii.header.copy()\n hdr.set_data_dtype(np.uint8)\n nb.Nifti1Image(data.astype(np.uint8), nii.affine, hdr).to_filename(\n out_file)\n return out_file\n\ndef _estimate_snr(in_file, seg_file):\n import nibabel as nb\n from mriqc.qc.anatomical import snr\n out_snr = snr(nb.load(in_file).get_data(), nb.load(seg_file).get_data(),\n fglabel='wm')\n return out_snr\n\ndef _enhance(in_file, out_file=None):\n import os.path as op\n import numpy as np\n import nibabel as nb\n\n if out_file is None:\n fname, ext = op.splitext(op.basename(in_file))\n if ext == '.gz':\n fname, ext2 = op.splitext(fname)\n ext = ext2 + ext\n out_file = op.abspath('{}_enhanced{}'.format(fname, ext))\n\n imnii = nb.load(in_file)\n data = imnii.get_data().astype(np.float32) # pylint: disable=no-member\n range_max = np.percentile(data[data > 0], 99.98)\n range_min = np.median(data[data > 0])\n\n # Resample signal excess pixels\n excess = np.where(data > range_max)\n data[excess] = 0\n data[excess] = np.random.choice(data[data > range_min], size=len(excess[0]))\n\n nb.Nifti1Image(data, imnii.get_affine(), imnii.get_header()).to_filename(\n out_file)\n\n return out_file\n\ndef image_gradient(in_file, snr, out_file=None):\n \"\"\"Computes the magnitude gradient of an image using numpy\"\"\"\n import os.path as op\n import numpy as np\n import nibabel as nb\n from scipy.ndimage import gaussian_gradient_magnitude as gradient\n\n if out_file is None:\n fname, ext = op.splitext(op.basename(in_file))\n if ext == '.gz':\n fname, ext2 = op.splitext(fname)\n ext = ext2 + ext\n out_file = op.abspath('{}_grad{}'.format(fname, ext))\n\n imnii = nb.load(in_file)\n data = imnii.get_data().astype(np.float32) # pylint: disable=no-member\n datamax = np.percentile(data.reshape(-1), 99.5)\n data *= 100 / datamax\n grad = gradient(data, 3.0)\n gradmax = np.percentile(grad.reshape(-1), 99.5)\n grad *= 100.\n grad /= gradmax\n\n nb.Nifti1Image(grad, imnii.get_affine(), imnii.get_header()).to_filename(out_file)\n return out_file\n\ndef gradient_threshold(in_file, in_segm, thresh=1.0, out_file=None):\n \"\"\" Compute a threshold from the histogram of the magnitude gradient image \"\"\"\n import os.path as op\n import numpy as np\n import nibabel as nb\n from scipy import ndimage as sim\n\n struc = sim.iterate_structure(sim.generate_binary_structure(3, 2), 2)\n\n if out_file is None:\n fname, ext = op.splitext(op.basename(in_file))\n if ext == '.gz':\n fname, ext2 = op.splitext(fname)\n ext = ext2 + ext\n out_file = op.abspath('{}_gradmask{}'.format(fname, ext))\n\n imnii = nb.load(in_file)\n\n hdr = imnii.get_header().copy()\n hdr.set_data_dtype(np.uint8) # pylint: disable=no-member\n\n data = imnii.get_data().astype(np.float32)\n\n mask = np.zeros_like(data, dtype=np.uint8) # pylint: disable=no-member\n mask[data > 15.] = 1\n\n segdata = nb.load(in_segm).get_data().astype(np.uint8)\n segdata[segdata > 0] = 1\n segdata = sim.binary_dilation(segdata, struc, iterations=2, border_value=1).astype(np.uint8) # pylint: disable=no-member\n mask[segdata > 0] = 1\n\n mask = sim.binary_closing(mask, struc, iterations=2).astype(np.uint8) # pylint: disable=no-member\n # Remove small objects\n label_im, nb_labels = sim.label(mask)\n artmsk = np.zeros_like(mask)\n if nb_labels > 2:\n sizes = sim.sum(mask, label_im, list(range(nb_labels + 1)))\n ordered = list(reversed(sorted(zip(sizes, list(range(nb_labels + 1))))))\n for _, label in ordered[2:]:\n mask[label_im == label] = 0\n artmsk[label_im == label] = 1\n\n mask = sim.binary_fill_holes(mask, struc).astype(np.uint8) # pylint: disable=no-member\n\n nb.Nifti1Image(mask, imnii.get_affine(), hdr).to_filename(out_file)\n return out_file\n","sub_path":"mriqc/workflows/anatomical.py","file_name":"anatomical.py","file_ext":"py","file_size_in_byte":26855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"256330547","text":"from random import *\n\ndef tiradaDadosNVeces():\n \"\"\"\n Representamos resultados de tiradas de dados mediante números\n\n tiradaDados: -> String\n\n Simula el lanzamiento de dos dados n veces, devuelve cuantas veces los dados coincidieron con los resultados\n \"\"\"\n n = int(input(\"Ingrese un número: \"))\n contador = 0\n\n for tirada in range(0, n):\n\n dado1 = randint(1, 6)\n dado2 = randint(1, 6)\n print(dado1, dado2)\n\n if dado1 == dado2:\n contador += 1\n\n print(\"Los dados coincidieron \" + str(contador) + \" veces\")\n\ntiradaDadosNVeces()","sub_path":"1_año/ProgII/Python/p3/ej2.py","file_name":"ej2.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"76094580","text":"\"\"\"\nFactory functions for creating Flask apps with\ndifferent settings.\n\"\"\"\n\nimport json\nimport logging\nimport os\nimport textwrap\nimport time\nimport uuid\n\nimport yaml\nfrom flask import Flask, g, request\nfrom flask_cors import CORS\nfrom elasticapm.contrib.flask import ElasticAPM\nfrom .constants import CommonHeaders\n\napm = ElasticAPM()\n\n# pylint: disable=too-many-arguments,too-many-locals,too-many-statements\n\ndef create_service_app(env,\n config_module,\n enable_cors=True,\n track_time=True,\n track_request_body=True,\n track_response_body=True,\n track_path_prefix='/api/'):\n\n \"\"\"\n Creates a configured Flask app for standard Datadeck service.\n\n Additional config file will be loaded if \"EXTRA_CONFIG\" environment\n variable is provided, which specifies an additional Yaml file which\n will be used for overriding config items loaded in the standard config\n file.\n\n :param env: service's deploy environment\n :param config_module: module containing config classes, named Config{Env}\n :param enable_cors: if the service should be configured to allow CORS\n :param track_time: if request should be timed (and logged)\n :param track_request_body: if request body should be logged\n :param track_response_body: if response body should be logged\n :param track_path_prefix: track only url path with the given prefix\n \"\"\"\n\n config_class_name = \"Config{}\".format(env.capitalize())\n config_obj = getattr(config_module, config_class_name)\n if not config_obj:\n raise ValueError((f'Config class {config_class_name} '\n f'cannot be found in module {config_module}'))\n\n app = Flask(config_obj.APP_NAME)\n app.config.from_object(config_obj)\n\n logger = logging.getLogger(config_obj.APP_NAME)\n\n if 'ELASTIC_APM' in app.config:\n logger.info('Initializing elastic APM')\n apm.init_app(app)\n\n # override config with yaml file specified in environment variable\n extra_config_file = os.environ.get('EXTRA_CONFIG')\n if extra_config_file:\n logger.info('\"EXTRA_CONFIG\" env var provided, adding config from: %s', extra_config_file)\n with open(extra_config_file, 'r') as f:\n extra_config_obj = yaml.load(f)\n app.config.from_mapping(extra_config_obj)\n\n app.secret_key = os.urandom(24)\n\n if enable_cors:\n logger.warning('Enabling CORS')\n CORS(app)\n\n def body_str(entity):\n if entity.json:\n result = json.dumps(entity.json, ensure_ascii=False)\n elif entity.data:\n try:\n result = entity.data.decode('utf-8').replace('\\n', ' ')\n except UnicodeDecodeError:\n result = str(entity.data)\n else:\n result = ''\n return textwrap.shorten(result, width=4096, placeholder='...')\n\n @app.before_request\n def before_request(): # pylint: disable=unused-variable\n \"\"\"\n Log and start timing request. Extract or generate trace id.\n \"\"\"\n\n if request.method not in ['GET', 'POST', 'PUT', 'DELETE']:\n return\n\n if track_path_prefix and not request.path.startswith(track_path_prefix):\n return\n\n if 'TraceId' in request.headers:\n g.trace_id = request.headers['TraceId']\n elif 'traceId' in request.args:\n g.trace_id = request.args['traceId']\n else:\n g.trace_id = str(uuid.uuid1())\n\n if 'UID' in request.headers:\n g.user_id = request.headers['UID']\n else:\n g.user_id = ''\n\n if CommonHeaders.LOCALE in request.headers:\n g.locale = request.headers[CommonHeaders.LOCALE]\n else:\n g.locale = 'en_US'\n\n if track_time:\n g.start = time.time()\n logger.info('Request started, %s', request.full_path)\n\n if track_request_body:\n logger.debug('Request body: %s', body_str(request))\n\n\n @app.after_request\n def after_request(response): # pylint: disable=unused-variable\n \"\"\"\n Time and log response.\n \"\"\"\n\n if request.method not in ['GET', 'POST', 'PUT']:\n return response\n\n if track_path_prefix and not request.path.startswith(track_path_prefix):\n return response\n\n err = response.status_code >= 400\n\n if track_time:\n log_method = logger.error if err else logger.info\n log_method('Request ended, took %d ms', int((time.time() - g.start) * 1000))\n\n if err:\n logger.error('Response body: %s', body_str(response))\n elif track_response_body:\n logger.debug('Response body: %s', body_str(response))\n return response\n\n return app\n","sub_path":"backend-master/src/common/service/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":4834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"621117482","text":"# -*- coding: utf-8 -*-\n# /***************************************************************************/\n# * __________________________________\n# * METIS CYBERSPACE TECHNOLOGY S.A.\n# * www.metis.tech\n# * __________________________________\n# * [2019] All Rights Reserved.\n# *\n# * NOTICE: All information contained herein is, and remains\n# * the property of Metis CyberSpace Technology and its suppliers,\n# * if any. The intellectual and technical concepts contained\n# * herein are proprietary to METIS CYBERSPACE TECHNOLOGY\n# * and its suppliers and may be covered by European and Foreign Patents,\n# * patents in process, and are protected by trade secret or copyright law.\n# * Dissemination of this information or reproduction of this material\n# * is strictly forbidden unless prior written permission is obtained\n# * from Metis Cyberspace Technology.\n#\n# /***************************************************************************/\n#\n# Created Date: Friday April 5th 2019\n# Author: Vassilis Lemonidis\n\"\"\"Module holding a shape based image cropping\n\"\"\"\n\nimport cv2\nimport numpy as np\n\nfrom metis_pylib import LOGGER\nimport ImageProcessing.Debugging.operations as dbg\nfrom ImageProcessing import IMPROC_CONFIG\nfrom ImageProcessing.CCAnalysis import SpatialRectanglesDetector\n\nclass ShapeCropper():\n '''\n Extracts principal rectangle from an image (which is probably the document edges)\n '''\n\n def __init__(self):\n self.rectangles_detector = SpatialRectanglesDetector()\n\n def _get_main_plate_area(self, struct, found_plates):\n '''\n Create a mask of the image, where every point value corresponds to the\\\n number of rectangles that contain it. Normalizes this mask\\\n and retrieves a binary mask.\n\n :param threshold: The threshold of binarization. (0 < threshold < 1)\n '''\n if found_plates is None or not found_plates:\n return None\n masks = []\n LOGGER.debug(\"Processing %d plates\", len(found_plates))\n\n for plate in found_plates:\n tmp = np.zeros(struct.img.shape[:2], dtype=float)\n cv2.drawContours(tmp, [plate], 0, 1, -1)\n masks.append(tmp)\n # dbg.imshow(tmp)\n # masks_unique_colors_num.append(\n # np.unique(\n # self.rectangles_detector.reduced_colors_labels[tmp > 0]).size)\n old_res = 10\n accepted_cnt = None\n old_size = 0\n _plate_areas = np.zeros(struct.img.shape[:2], dtype=float)\n for mask in masks:\n # for mask, mask_unique_colors_num in zip(\n # masks, masks_unique_colors_num):\n # if mask_unique_colors_num <= allowed_colors_num:\n _plate_areas += mask\n _plate_areas = _plate_areas / float(np.max(_plate_areas))\n # dbg.imshow(_plate_areas)\n unique_values = np.sort(np.unique(_plate_areas))\n for cnt in range(len(unique_values))[1:]:\n _bin_plate_areas = _plate_areas >= unique_values[cnt]\n # dbg.imshow(_bin_plate_areas)\n cnts = cv2.findContours(\n _bin_plate_areas.astype(np.uint8),\n cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)[1]\n cnt = cnts[np.argmax([cv2.contourArea(cnt)\n for cnt in cnts])]\n cnt = self._get_quadrilateral_plate(struct, cnt)\n if cnt is None:\n continue\n square_res = self.rectangles_detector.get_square_score(struct.img, cnt)\n\n if square_res != 0:\n cnt_size = cv2.contourArea(cnt)\n if old_size / old_res > cnt_size / square_res:\n continue\n else:\n return cnt\n accepted_cnt = cnt\n old_res = square_res\n old_size = cnt_size\n return accepted_cnt\n\n def _get_quadrilateral_plate(self, struct, _plate_contour):\n epsilon = 0.1 * cv2.arcLength(_plate_contour, True)\n approx = cv2.approxPolyDP(_plate_contour, epsilon, True)\n if len(approx) != 4:\n LOGGER.debug(\n \"%sCould not get quadrilateral from input plate. Instead a shape\"\n \" with %d corners was found.\", (\"Image: {} :\".format(\n struct.img_path) if struct.img_path is not None else \"\"), len(approx))\n if IMPROC_CONFIG['visualize_rectangles_detector']:\n tmp = np.zeros(struct.img.shape[:2])\n cv2.drawContours(tmp, [_plate_contour], 0, 1, -1)\n dbg.imshow(\n tmp, 'Contour that could not be approximated as quadrilateral')\n return None\n return approx\n\n @staticmethod\n def _order_points(pts):\n '''\n orders points counter clockwise\n :param pts: the (x,y) poitns numpy array\n :type pts: np.ndarray\n '''\n rect = np.zeros((4, 2), dtype=\"float32\")\n sum_ = pts.squeeze().sum(axis=1)\n rect[0] = pts[np.argmin(sum_)]\n rect[2] = pts[np.argmax(sum_)]\n diff = np.diff(pts.squeeze(), axis=1)\n rect[1] = pts[np.argmin(diff)]\n rect[3] = pts[np.argmax(diff)]\n return rect\n\n def _get_main_plate(self, struct, _main_plate_area):\n '''\n Returns the image part corresponding to the main plate,\\\n warped, so that the plate is seen straight from above.\n Oh what a beautiful plate!\n :param struct: the image structure\n :type struct: :class:`ImageProcessing.data.Data`\n :param _main_plate_area: The main plate location contour\n :param width2height_ratio: the ratio of width to height\\\n of the produced plate\n '''\n if _main_plate_area is None:\n LOGGER.debug(\"Main plate area is None\")\n return None\n res_ratio = struct.img.shape[0] / \\\n float(struct.img.shape[0])\n _main_plate_area = self._order_points(np.array(\n [np.int0(res_ratio * pt) for pt in _main_plate_area]))\n hor_line = _main_plate_area[1] - _main_plate_area[0]\n ver_line = _main_plate_area[2] - _main_plate_area[1]\n width = np.abs(hor_line[0])\n height = np.abs(ver_line[1])\n trans_pts = np.float32(\n [[0, 0], [width, 0], [width, height], [0, height]])\n transform_matrix = cv2.getPerspectiveTransform(\n np.float32(_main_plate_area), trans_pts)\n\n ret_img = cv2.warpPerspective(\n struct.img,\n transform_matrix,\n (int(width),\n int(height)))\n return ret_img\n\n def run(self, struct):\n '''\n :param struct: the image structure to process\n :type struct: :class:`ImageProcessing.data.Data`\n '''\n img = self._get_main_plate(\n struct,\n self._get_main_plate_area(struct, self.rectangles_detector.run(struct)))\n if img is None:\n img = struct.img\n return img\n","sub_path":"Metis/file-parser/ImageProcessing/DocumentDenoising/shape_cropper.py","file_name":"shape_cropper.py","file_ext":"py","file_size_in_byte":6971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"488585260","text":"'''\nDescMergeSort(array)\n'''\ndef DescMergeSort(array):\n if len(array) <= 1:\n return array\n mid = int(len(array)/2)\n left = DescMergeSort(array[:mid])\n right = DescMergeSort(array[mid:])\n return DescMergeProcess(left,right)\n\ndef DescMergeProcess(left,right):\n result=[]\n i,j=0,0\n while i= right[j]:\n result.append(left[i])\n i+=1\n else:\n result.append(right[j])\n j+=1\n result += left[i:]\n result += right[j:]\n return result \n'''\nRiseMergeSort(array)\n'''\ndef RiseMergeSort(array):\n array_t = [[array[0]]]\n for i in range(1,len(array)):\n array_t = array_t + [[array[i]]]\n\n if len(array)%2 == 0:\n length = len(array)/2\n else:\n length = len(array)/2+1\n\n tmp=\"\"\n for t in range(0,length): #create new array\n tmp = \"t\" + tmp\n array_reg = list(tmp)\n\n k=0\n if len(array_t)%2 == 0:\n for i in range(0,len(array_t),2):\n array_reg[k] = RiseSort(array_t[i],array_t[i+1])\n k += 1\n else:\n for i in range(0,len(array_t)-2,2):\n array_reg[k] = RiseSort(array_t[i],array_t[i+1])\n k += 1\n array_reg[k] = array_t[len(array_t)-1]\n\n while 1:\n if len(array_reg) == 1:\n break\n else:\n array_reg = RiseMergeProcess(array_reg)\n\n return array_reg[0]\n\ndef RiseMergeProcess(array):\n k=0\n size = len(array[0])\n length = len(array)\n if length%2 == 0:\n for i in range(0,length,2):\n array[k] = RiseSort(array[i],array[i+1])\n k+=1\n array = array[:length/2]\n else:\n for i in range(0,length-2,2):\n array[k] = RiseSort(array[i],array[i+1])\n k+=1\n array = array[:length/2] + [array[length-1]]\n return array\n\ndef RiseSort(arrayA,arrayB):\n lenA = len(arrayA)\n lenB = len(arrayB)\n tmp=\"\"\n for i in range(0,lenA+lenB):\n tmp = \"t\"+tmp\n arrayOk = list(tmp)\n \n k=0\n i=0\n j=0\n while k <= lenA+lenB:\n if i == lenA:\n for k in range(j,lenB):\n arrayOk[k+lenA] = arrayB[k]\n return arrayOk\n elif j == lenB:\n for k in range(i,lenA):\n arrayOk[k+lenB] = arrayA[k]\n return arrayOk\n if arrayA[i] <= arrayB[j]:\n arrayOk[k] = arrayA[i]\n i+=1\n k+=1\n else:\n arrayOk[k] = arrayB[j]\n j+=1\n k+=1\n","sub_path":"algorithms/MergeSort.py","file_name":"MergeSort.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"372574386","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\nfrom api.router import *\n\napp = FastAPI(title=\"Perp connect - partial backend\")\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n# Declare Routers\napp.include_router(tokens.router)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"192329214","text":"import sys\nimport os\nimport math\nfrom numpy import linspace\nfrom scipy.io.wavfile import read\nfrom scipy import signal\nfrom numpy import *\nimport platform\nfrom matplotlib import *\nimport matplotlib.pyplot as plt\nfrom math import atan2,log\nfrom numpy import zeros,argmax,mean\nfrom scipy.fftpack import fft \nfrom scipy.fftpack import rfft\nfrom pylab import *\n\n#####################\n#Class function\n#####################\ndef roundup100(x):\n y = int(math.ceil(x/500.0)) *500\n #print(y)\n return y\n \ndef roundupT(x):\n y = int(math.ceil(x/0.5)) *0.5\n #print(y)\n return y\n \nclass FFT_audio:\n\n def __init__(self,a,b,imr):\n self.a=a\n self.b=b\n self.imr=imr\n pass\n \n def fft_data(self):\n num=len(self.b)\n\n noct=int(log(num)/log(2))\n \n if(noct>20):\n noct=20\n\n num_fft=2**noct\n\n bb=self.b[0:num_fft]\n \n if(self.imr==1):\n bb=bb-mean(bb)\n\n dur_fft=self.a[num_fft-1]-self.a[0]\n\n df=1/dur_fft\n\n z =fft(bb)\n\t\n k= numpy.fft.fftfreq(len(bb))[range(0,num_fft)]\n freq_pwr = 10*log10(1e-20+abs(rfft(bb,num_fft)))\n #fo = open(\"power.txt\",\"a\")\n #fo.write(str(freq_pwr))\t\n #fo.write(' ')\t\n #fo.close()\t\n \t\t\n nhalf=num_fft/2\n\n zz=zeros(nhalf,'f')\n ff=zeros(nhalf,'f')\n ph=zeros(nhalf,'f')\n\n freq=zeros(num_fft,'f')\n\n z/=float(num_fft)\n\t\n h=int(num_fft)\n\t\n for k in range(0,int(num_fft)):\n freq[k]=k*df\n \n ff=freq[0:nhalf]\n \n for k in range(0,int(nhalf)): \n\n if(k > 0):\t\t\t \n zz[k]=2.*abs(z[k])\n else: \n zz[k]= abs(z[k])\n\n ph[k]=atan2(z.real[k],z.imag[k])\n \n\n idx = argmax(abs(zz)) \n \n return idx,freq,ff,z,zz,ph,nhalf,df,num_fft \n \nsumValueMean = 0\nsumValueFreq = 0\n#####################\n#open files function\n#####################\n#file_path = sys.argv[1]\nfor x in range (1,6):\n file_path = 'resources/filter_'+ str(x) + '.wav'\n rate,data=read(file_path)\n nad=data.ndim\n #print(file_path)\n\n if(nad==2):\n np=data.shape[0]\n nr=data.shape[1]\n\n dt=1/float(rate)\n\n t=linspace(0,(np-1)*dt,np)\n\n #####################\n #Process data \n #####################\n imr=1 # mean removal\n idx1,freq1,ff1,z1,zz1,ph1,nhalf1,df1,num_fft1=FFT_audio(t,data[:,0],imr).fft_data() \n idx2,freq2,ff2,z2,zz2,ph2,nhalf2,df2,num_fft2=FFT_audio(t,data[:,1],imr).fft_data()\n\n q1= data[:,0]\n q2= data[:,1]\n\n Pxx1, freqs1, bins1, im1 = specgram(q1,NFFT=1024, Fs=44100)\n #print ('PxxL = ')\n #print (Pxx)\t \n xxxx1= mean(Pxx1)\n sumValueMean += xxxx1\n #print (\"Mean equals Ch1= %8.6g \"% (xxxx1) ) \n #print (xxxx1)\n Pxx2, freqs2, bins2, im2 = specgram(q2,NFFT=1024, Fs=44100)\n #print ('PxxR = ')\n #print (Pxx)\n xxxx2= mean(Pxx2)\n sumValueMean += xxxx2\n #print (\"Mean equals Ch2= %8.6g \"% (xxxx2) ) \n #print (xxxx2)\t\n\n #print (\"Ch 1 Peak Amp at Freq=%8.4g Hz \" %(ff1[idx1])) \n #print (\"Ch 2 Peak Amp at Freq=%8.4g Hz \" %(ff2[idx2]))\n sumValueFreq += ff1[idx1]\n sumValueFreq += ff1[idx1]\n \navgVal = sumValueMean/10\navgValFreq = sumValueFreq /10\n#print(avgVal)\n#print(avgValFreq)\na = roundupT(avgVal)\nb = roundup100(avgValFreq)\n\ntmp = str(a) + str(b)\nprint (tmp)\n","sub_path":"resources/sound_key.py","file_name":"sound_key.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"74821395","text":"from django.shortcuts import render\nfrom institute.models import *\nfrom django.contrib.auth.models import User\nimport os\n# from PIL import Image\nfrom django.contrib.auth.decorators import login_required\nfrom django import forms\nfrom utils import *\nfrom django.views.generic.edit import UpdateView, CreateView\nfrom forms import ProfileEditForm\nfrom customMW import get_request\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save, post_delete\nimport StringIO\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nfrom django.http import HttpResponseRedirect\n\n\n#.....Signal For PI Save....#\n@receiver(post_save,sender=Pi)\ndef pi_sync_all(sender,instance,**kwargs):\t\n\tif not os.path.exists(instance.path):\n\t\tos.makedirs(instance.path)\n\tif not os.path.exists(instance.path+instance.institute.logo_filename):\n\t\tos.link(instance.institute.logo.path, instance.path+instance.institute.logo_filename)\n\tif not hasattr(instance,\"_flag\"):\n\t\tinstance.create_or_update_wifi()\n\t\tinstance.create_or_update_config()\n\n#.....Signal For Institute Save....#\n@receiver(post_save,sender=Institute)\ndef institute_create(sender, instance, created, **kwargs):\n\tif created:\n\t\tConfiguration.objects.create(name=\"default\",institute=instance)\n\n#.....Signal For Media Save....#\n@receiver(post_save,sender=Media)\ndef media_sync_all(sender,instance,**kwargs):\n\tif not hasattr(instance,\"_flag\"):\n\t\tinstance.update_media_file_to_pi()\n\n\n#.....Signal For Media Delete....#\n# @receiver(post_delete,sender=Media)\n# def media_delete(sender,instance,**kwargs):\n# \tinstance.delete_media_file_from_all_pis()\n\t\n\n#.....Signal For User Create....#\n@receiver(post_save,sender=User)\ndef create_user_profile(sender, instance, created, **kwargs):\n\tif not get_request().user.is_superuser:\n\t\tif created:\n\t\t\tUserProfile.objects.create(user=instance,institute=get_request().user.get_profile().institute)\n\n\n#.....Media page....#\t\n\n@login_required\ndef media(request):\n\tinstallation_id = request.GET.get('screen',\"\")\n\tasset_id=request.GET.get('asset_id',\"\")\n\tscreen_details= Pi.objects.get(installation_id=installation_id) if installation_id!=\"\" else \"\" \n\tscreen = {'screen':request.GET.get('screen',\"\"),'screen_details':screen_details,'asset_id':asset_id}\n\treturn render(request,'institute/media.html',screen)\n\n#.....screens page....#\n@login_required\ndef screens(request):\n\tinstitute=request.user.get_profile().institute\n\tscreen_count =len(institute.pi_set.all())\n\tuser_count =len(institute.userprofile_set.all())\n\tall_media = institute.media_set.all()\n\tactive_media=sum(1 for media in all_media if media.is_active)\n\tinactive_media=sum(1 for media in all_media if not media.is_active)\n\treturn render(request,'institute/screens.html',{'institute':institute,'active':active_media,'inactive':inactive_media,'screen_count':screen_count,'user_count':user_count,'page':'screens'})\n\n#.....screens page....#\n@login_required\ndef configuration(request):\n\treturn render(request,'institute/config.html',{'page':'config'})\n\n#.....profile page...#\n@login_required \ndef profile_view(request):\n\tuserprofile = request.user.get_profile()\n\t# userprofile = UserProfile.objects.get(user=request.user)\n\treturn render(request, 'institute/view_profile.html', {'profile':userprofile,'page':'profile'})\n\ndef preview(request,installation_id):\n\tpi=Pi.objects.get(installation_id=installation_id)\n\tmedias=pi.media_set.all()\n\tmedia_list=[]\n\tfor media in medias:\n\t\tif media.is_active:\n\t\t\tmedia_dict =media.get_dict()\n\t\t\tmedia_list.append(media_dict)\n\tconfig = pi.configuration.get_dict()\n\tcontent = json.dumps({'adv':media_list,'conf':config})\n\treturn render(request,'institute/preview.html',{'pi':pi,'content':content})\n\nclass MessageMixin():\n def get_object(self, queryset=None):\n \"\"\" Hook to ensure object is owned by request.user. \"\"\"\n # obj = super(ProfileEdit, self).get_object()\n # if not obj.owner == self.request.user:\n # raise Http404\n # return obj\n return self.request.user.get_profile()\n\nclass ProfileEdit(MessageMixin,UpdateView):\n model=UserProfile\n form_class = ProfileEditForm\n template_name = \"institute/edit_profile.html\"\n success_url = \"/manage/profile/\"\n redirect_field_name = None\n\nprofile_edit = login_required(ProfileEdit.as_view())\n\ndef editor(request):\n\treturn render(request,'institute/editor.html')\n\ndef create_media_from_editor(request):\n\turl= request.GET.get('url',\"\")\n\tname= os.path.basename(url)\n\timage =Image.open(url)\n\timage_io = StringIO.StringIO()\n\timage.save(image_io, format='JPEG')\n\timage_file = InMemoryUploadedFile(image_io, None, name, 'image/jpeg',image_io.len, None)\n\tmedia_obj= Media.objects.create(name=name,start_date=datetime.datetime.now(),end_date=datetime.datetime.now(),upload_file=image_file,duration=4)\n\tmedia_obj.save()\n\tredirect_url ='/manage/media/?asset_id='+str(media_obj.id)\n\treturn HttpResponseRedirect(redirect_url)","sub_path":"django/institute/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"568308448","text":"\ndef draw_redraw(p, fig, axs):\n if fig is None:\n # If this is the first time a plot is made in the notebook, we let plotnine create a new\n # matplotlib figure and axis.\n fig, plot = p.draw(return_ggplot=True)\n axs = plot.axs\n else:\n\n #p = copy(p)\n # This helps keeping old selected data from being visualized after a new selection is made.\n # We delete all previously created artists from the matplotlib axis.\n for artist in plt.gca().lines +\\\n plt.gca().collections +\\\n plt.gca().artists + plt.gca().patches + plt.gca().texts:\n artist.remove()\n\n # If a plot is being updated, we re-use the figure and axis created before.\n p._draw_using_figure(fig, axs)\n return fig, axs\n","sub_path":"tools/draw_redraw.py","file_name":"draw_redraw.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"511871129","text":"import cv2 as cv\nimport sys\nimport astar\nimport time\n\n# Definition of Class Planner:\nclass Planner:\n # Class to initiate the planner and to store the values of the output path\n # self: Object of class planner\n def __init__(self, strPoint, endPoint, mapName):\n self.START_POINT = strPoint #[x, y]\n self.GOAL_POINT = endPoint #[x, y]\n self.EXPLORED = {} # x,y,theta and Index\n self.RADIUS = 1 # Radius of bot 0.263 m\n self.STEP_OBJECT_LIST = []\n self.COST_MAP_DICT = {} # Index and Cost\n self.CLEARANCE = 7\n\n self.obstacle_map = cv.imread(mapName) # \"obs_map_easy.png\"\n self.gray_map = cv.cvtColor(self.obstacle_map, cv.COLOR_BGR2GRAY)\n x, y, _ = self.obstacle_map.shape\n self.MAX_X = x\n self.MAX_Y = y\n\n def initiatePlanning(self):\n isPossible = 0\n if self.START_POINT[0] >= -self.MAX_X and self.START_POINT[0] <= self.MAX_X and self.START_POINT[1] >= -self.MAX_Y and self.START_POINT[\n 1] <= self.MAX_Y and (\n self.isValidStep(self.START_POINT, self.RADIUS + self.CLEARANCE) == True):\n isPossible += 1\n else:\n print(\"Invalid Start Point\")\n\n if self.GOAL_POINT[0] >= -self.MAX_X and self.GOAL_POINT[0] <= self.MAX_X and self.GOAL_POINT[1] >= -self.MAX_Y and self.GOAL_POINT[1] <= self.MAX_Y and (\n self.isValidStep(self.GOAL_POINT, self.RADIUS + self.CLEARANCE) == True):\n isPossible += 1\n else:\n print(\"Invalid Goal Point\")\n\n # To check if both the values are possible to work with in the puzzle\n if isPossible == 2:\n root = astar.Step(None, self.START_POINT, self) # START_POINT[2], None, None) # Starting the linked list with start point as the root\n\n start_time = time.time()\n while True: # to keep traversing until the goal area is found\n topKey = next(iter(self.COST_MAP_DICT))\n self.COST_MAP_DICT.pop(topKey)\n poppedStep = self.STEP_OBJECT_LIST[topKey]\n if self.inGoal(poppedStep.position) == True:\n break\n else:\n poppedStep.generateSteps()\n self.COST_MAP_DICT = {index: totalcost for index, totalcost in\n sorted(self.COST_MAP_DICT.items(), key=lambda cost: cost[1])} # EXPLORED.sort()\n\n end_time = time.time()\n\n #print(\"Total Cost to reach the final Point:\", poppedStep.costToCome)\n\n #print(\"total time for A star in seconds: \", end_time - start_time)\n return(self.backtrack(poppedStep)) # To show the backtrack on the graph\n\n else:\n print(\"Exiting the Algorithm\")\n return([])\n #sys.exit(0)\n\n def isValidStep(self, position, clearance):\n posX = position[0]\n posY = position[1]\n i = 0\n while (i <= clearance):\n try:\n if self.gray_map[posX][posY] > 127 or self.gray_map[posX + i][posY] > 127 or self.gray_map[posX][posY + i] > 127 or self.gray_map[posX + i][\n posY + i] > 127:\n return False\n i = i + 1\n except:\n return False\n return True\n\n def showPath(self, pathValues, Explored):\n for exp in Explored.keys():\n pos = exp.split(',')\n cv.circle(self.obstacle_map, (int(pos[1]), int(pos[0])), 1, (255, 255, 0), 1)\n\n for pathpos in pathValues:\n cv.circle(self.obstacle_map, (int(pathpos[1]), int(pathpos[0])), 1, (0, 0, 255), 1)\n\n cv.imshow(\"Map\", self.obstacle_map)\n while (1):\n key = cv.waitKey(100) & 0xff\n\n def backtrack(self, stepObj):\n pathValues = []\n while stepObj.parent != None:\n pathValues.append([stepObj.position[0], stepObj.position[1]])\n stepObj = stepObj.parent\n pathValues.append([stepObj.position[0], stepObj.position[1]])\n\n pathValues.reverse()\n\n #print(\"length of step_object_list\", len(self.STEP_OBJECT_LIST))\n #print(\"length of the pathvalues\", len(pathValues))\n #print(pathValues)\n #self.showPath(pathValues, self.EXPLORED)\n return pathValues\n\n def inGoal(self, position):\n x, y = position[0], position[1]\n if ((x - self.GOAL_POINT[0]) ** 2 + (y - self.GOAL_POINT[1]) ** 2 <= (0.1) ** 2):\n return True\n else:\n return False","sub_path":"planner.py","file_name":"planner.py","file_ext":"py","file_size_in_byte":4521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"108448991","text":"# -*- coding: utf-8 -*-\n\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom qgis.core import *\nfrom qgis.gui import *\n\nfrom utils import *\n\nfrom ui_settingsdialog import Ui_SettingsDialog\n\nclass SettingsDialog( QDialog, Ui_SettingsDialog ):\n changed = pyqtSignal(name = 'changed')\n\n def __init__( self, parent = None ):\n super( SettingsDialog, self).__init__(parent )\n self.setupUi( self )\n self.setAttribute( Qt.WA_DeleteOnClose )\n\n self.tolerance = float( QSettings().value(SETTINGS_NAME + \"/tolerance\", DEFAULT_TOLERANCE ))\n self.splineToleranceSpinBox.setValue( self.tolerance )\n\n self.tightness = float( QSettings().value(SETTINGS_NAME + \"/tightness\", DEFAULT_TIGHTNESS ) )\n self.splineTightnessSpinBox.setValue( self.tightness )\n\n self.buttonBox.button(QDialogButtonBox.Ok).clicked.connect(self.ok)\n self.buttonBox.button(QDialogButtonBox.Cancel).clicked.connect(self.cancel)\n #self.buttonBox.button(QDialogButtonBox.Reset).clicked.connect(self.reset)\n self.buttonBox.button(QDialogButtonBox.RestoreDefaults).clicked.connect(self.defaults)\n self.buttonBox.button(QDialogButtonBox.Apply).clicked.connect(self.apply)\n\n def ok(self):\n self.apply()\n self.close()\n\n def apply(self):\n QSettings().setValue(SETTINGS_NAME+\"/tolerance\", self.splineToleranceSpinBox.value())\n QSettings().setValue(SETTINGS_NAME+\"/tightness\", self.splineTightnessSpinBox.value() )\n self.changed.emit()\n\n def cancel(self):\n self.close()\n\n \n def defaults(self):\n self.splineToleranceSpinBox.setValue( DEFAULT_TOLERANCE )\n self.splineTightnessSpinBox.setValue( DEFAULT_TIGHTNESS )\n","sub_path":"settingsdialog.py","file_name":"settingsdialog.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"135588898","text":"from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nimport sys\nimport requests\n\nclass Downloader(QDialog):\n\n def __init__(self):\n super(Downloader,self).__init__()\n layout = QVBoxLayout()\n\n self.url = QLineEdit()\n self.url.setPlaceholderText('URL')\n\n self.save_location = QLineEdit()\n self.save_location.setPlaceholderText('File save location')\n\n self.progress = QProgressBar()\n self.progress.setValue(0)\n self.progress.setAlignment(Qt.AlignHCenter)\n\n downloadbtn = QPushButton('Download')\n browse = QPushButton('Browse')\n\n layout.addWidget(self.url)\n layout.addWidget(self.save_location)\n layout.addWidget(browse)\n layout.addWidget(self.progress)\n layout.addWidget(downloadbtn)\n\n self.setLayout(layout)\n self.setWindowTitle('PyDownloader')\n self.setFocus()\n\n downloadbtn.clicked.connect(self.download)\n browse.clicked.connect(self.browse_file)\n\n def browse_file(self):\n save_file = QFileDialog.getSaveFileName(self,caption='Save file as',directory='C:\\\\',filter='All Files (*.*)')\n self.save_location.setText(QDir.toNativeSeparators(save_file))\n\n def download(self):\n session = requests.Session()\n try:\n urlWithProtocol = str(self.url.text())\n if(not(urlWithProtocol.startswith('http'))):\n urlWithProtocol = 'http://' + urlWithProtocol\n response = session.get(urlWithProtocol, stream=True)\n\n except Exception as e:\n print(e)\n QMessageBox.warning(self,'Warning','Download failed')\n return\n total_length = response.headers.get('content-length')\n\n with open(self.save_location.text(), \"wb\") as f:\n if total_length is None: # no content length header\n f.write(response.content)\n else:\n received_length = 0\n total_length = int(total_length)\n for data in response.iter_content(chunk_size=4096):\n received_length += len(data)\n f.write(data)\n self.report(received_length,total_length)\n\n QMessageBox.information(self,'Information','The download is complete!')\n self.progress.setValue(0)\n\n def report(self,received_length,total_length):\n percent = (received_length * 100)/total_length\n self.progress.setValue(int(percent))\n\napp = QApplication(sys.argv)\ndialog = Downloader()\ndialog.show()\nsys.exit(app.exec_())\n","sub_path":"example2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"270552135","text":"#GIT Verkefni Forritun, Along Alexander Loftsson. 25.1.2017\n\nprint(\"1.\")\nprint()\n#t1 = tala1, t2 = tala2.\nt1 = int(input(\"Sláðu inn tölu 1: \"))\nt2 = int(input(\"Sláðu inn tölu 2: \"))\n\nprint(\"Tölurnar lagðar saman eru:\", t1+t2)\nprint(\"Tölurnar margfaldaðar saman eru:\", t1*t2)\n\nprint()\nprint(\"2.\")\nprint()\n\nnafn = input(\"Sláðu inn Fornafn: \")\neftirnafn = input(\"Sláðu inn Eftirnafn: \")\nprint(\"Halló\", nafn, eftirnafn)\n\nprint()\nprint(\"3.\")\nprint()\n\nstor = 0 #Stór stafur\nlitil = 0 #Lítill stafur\neftir = 0 #Lítill stafur á eftir Stóran staf.\nstada = -1 #Er notað til þess að telja hástaf á undan lágstaf.\ntexti = input(\"Sláðu inn texta: \")\nlengd = int(len(texti)) #Notað til þess að keyra for-lykkjuna fyrir integer.\n\ntextlisti = list(texti)\n\nfor i in textlisti:\n if i.isupper():\n stor += 1\n elif i.islower():\n litil += 1\n\n#Ef að textlisti[x](range) er LÍTILL og textlisti[stada](sem er bókstafurinn á undan) er STÓR, verður eftir += 1.\nfor x in range(lengd):\n if textlisti[x].islower() and textlisti[stada].isupper():\n stada += 1\n eftir += 1\n else:\n stada += 1\n\nprint(\"Í þessum texta eru\",stor, \"hástafir,\", litil, \"lágstafir og\", eftir, \"lágstafir koma eftir hástaf.\")\n\n\n\n\n\n\n\n\n","sub_path":"Git FORRITUN Verkefni.py","file_name":"Git FORRITUN Verkefni.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"572008008","text":"import boto3,argparse,json\nfrom aws_provision import AwsClient\n \ndefaults = {\"key\" : None, \"secret\" : None}\nparser = argparse.ArgumentParser()\nparser.add_argument('-k',\"--key\")\nparser.add_argument('-s',\"--secret\")\nargs = parser.parse_args()\n \ncommand_line_args = {key:value for key,value in vars(args).items() if value}\nkey = command_line_args[\"key\"]\nsecret = command_line_args[\"secret\"]\n\n#lambda_client = AwsClient('lambda',key,secret).client\n#test_event = dict(plot_url=\"https://plot.ly/~chelsea_lyn/9008/\")\n# for i in range(1,100):\n# lambda_client.invoke(\n# FunctionName='myLambdaFunction',\n# InvocationType='Event',\n# Payload=json.dumps(test_event),\n# )\n","sub_path":"invoke_function.py","file_name":"invoke_function.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"594632892","text":"# -*- coding: utf-8 -*-\n# import os\nimport scrapy\nimport pdb\nimport datetime, time\nimport re\nimport json\nfrom lxml import etree\nimport requests\nimport pymysql.cursors\n\n\nmatch_name_dict = {\n 'SPA CUP': '西杯',\n 'SPA D1': '西甲',\n 'SPA D2': '西乙',\n 'GER D1': '德甲',\n 'GER D2': '德乙',\n 'ENG PR': '英超',\n 'ENG LCH': '英冠',\n 'ENG FAC': '英足总杯',\n 'ITA D1': '意甲',\n 'ITA D2': '意乙',\n 'FRA D1': '法甲',\n 'FRA D2': '法乙',\n 'FRAC': '法国杯',\n 'POR D1': '葡超',\n 'SCO PR': '苏超',\n 'SCO CH': '苏冠',\n 'HOL D1': '荷甲',\n 'HOL D2': '荷乙',\n 'SWE D1': '瑞典超',\n 'FIN D1': '芬兰超',\n 'NOR D1': '挪超',\n 'DEN D1': '丹麦超',\n 'AUT D1': '奥地利超',\n 'SUI Sl': '瑞士超',\n 'IRE PR': '爱尔兰超',\n 'RUS PR': '俄超',\n 'POL D1': '波兰超',\n 'BEL D1': '比甲',\n 'BEL D2': '比乙',\n 'AUS D1': '澳超',\n 'GRE D1': '希腊超',\n 'ICE PR': '冰岛超',\n 'TUR D1': '土超',\n 'ISL': '印度超',\n 'IND D1': '印度甲',\n 'BGD D1': '孟加拉超',\n # 'EGY D1': '埃及超',\n # 'INT CF': '国际友谊',\n}\n\ncurrent_hour = time.localtime()[3] # 获取当前的小时数,如果小于8则应该选择yesterday\nnowadays = datetime.datetime.now().strftime(\"%Y-%m-%d\") # 获取当前日期 格式2018-01-01\nyesterdy = (datetime.datetime.now() + datetime.timedelta(days=-1)).strftime(\"%Y-%m-%d\") # 获取昨天日期\nif current_hour < 8: # 默认在早上八点前拉取昨天页面\n search_date = yesterdy\nelse:\n search_date = nowadays\n\ncompleted_match_list = []\n\n# 比赛列表 item\nclass match_list_Item(scrapy.Item):\n match_list = scrapy.Field() # 已经计算得到的比赛list\n search_date = scrapy.Field() # search_date 用来建表\n\nclass OddSpider(scrapy.Spider):\n name = 'auto_teams_analysis'\n allowed_domains = ['http://info.livescore123.com/']\n\n # 包装url\n start_urls = []\n url = 'http://info.livescore123.com/1x2/company.aspx?id=177&company=PinnacleSports'\n start_urls.append(url)\n\n # Connect to the database\n # 去拿取已经获得首发率的match_id放入列表中,不去服务器拉取该比赛球员数据\n db_name = 'auto_teams_rate'\n config = {\n 'host': '127.0.0.1',\n 'user': 'root',\n 'password': '19940929',\n 'db': db_name,\n 'charset': 'utf8mb4',\n 'cursorclass': pymysql.cursors.DictCursor\n }\n connection = pymysql.connect(**config)\n print('连接至数据库:' + db_name)\n try:\n with connection.cursor() as cursor:\n # 设置当前表名\n tableName = 'teams_' + search_date.replace('-', '_') # 当前查询日期为表名\n cursor.execute('SELECT * FROM %s WHERE home_rate>0 and away_rate>0' % tableName)\n for match in cursor.fetchall():\n single_match = {}\n single_match['match_id'] = match['match_id']\n single_match['match_name'] = match['match_name']\n single_match['time_score'] = match['time_score']\n single_match['home_name'] = match['home_name']\n single_match['away_name'] = match['away_name']\n single_match['home_rate'] = match['home_rate']\n single_match['away_rate'] = match['away_rate']\n single_match['average_completed_match'] = match['average_completed_match']\n completed_match_list.append(single_match)\n # connection is not autocommit by default. So you must commit to save your changes.\n cursor.close()\n finally:\n connection.close()\n\n # Connect to the database\n # 去拿取已经获得首发率的match_id放入列表中,不去服务器拉取该比赛球员数据\n db_name = 'auto_teams_analysis'\n config = {\n 'host': '127.0.0.1',\n 'user': 'root',\n 'password': '19940929',\n 'db': db_name,\n 'charset': 'utf8mb4',\n 'cursorclass': pymysql.cursors.DictCursor\n }\n connection = pymysql.connect(**config)\n print('连接至数据库:' + db_name)\n try:\n with connection.cursor() as cursor:\n # 设置当前表名\n tableName = 'teams_' + search_date.replace('-', '_') # 当前查询日期为表名\n # 建立当前队伍表\n build_table = (\n \"CREATE TABLE IF NOT EXISTS \"' %s '\"\"\n \"(match_id VARCHAR(20) NOT NULL PRIMARY KEY,\"\n \"match_name VARCHAR(50) NOT NULL,\"\n \"home_name VARCHAR(50) NOT NULL,\"\n \"away_name VARCHAR(50) NOT NULL,\"\n \"time_score VARCHAR(50) NOT NULL,\"\n \"home_rate FLOAT(8) NOT NULL,\"\n \"away_rate FLOAT(8) NOT NULL,\"\n \"average_completed_match INT(8) NOT NULL,\"\n \"support_direction VARCHAR(50) NOT NULL)\"\n )\n cursor.execute(build_table % tableName)\n # 建表完成\n # connection is not autocommit by default. So you must commit to save your changes.\n cursor.close()\n finally:\n connection.close()\n print(\"已经获得首发的比赛列表:\", completed_match_list)\n\n def start_requests(self):\n for url in self.start_urls:\n yield scrapy.Request(url)\n\n # 分析每行信息\n def parse(self, response):\n odd_match_list = []\n need_step = False # 标志是否要跳过\n for tr in response.xpath('//table[contains(@class,\"schedule\")]').xpath('tr'):\n # 如果没有tr_id说明是头部或者需要跳过则 跳过\n if len(tr.xpath('@id')) == 0 or need_step:\n need_step = False\n continue\n tr_id = tr.xpath('@id').extract()[0]\n # 如果下面成立说明是最新赔率行\n if tr_id.split('_')[0].split('tr')[-1] != '':\n single_match_tr_index = 2\n else:\n single_match_tr_index = 1\n if single_match_tr_index == 1:\n if len(tr.xpath('td')[0].xpath('text()').extract()) == 0:\n need_step = True\n continue\n league_name = tr.xpath('td')[0].xpath('text()').extract()[0] # 联赛名称,是英文,需要用字典转为中文\n # 如果不在要获取的联赛列表中就跳过,要调过两个tr\n if not league_name in match_name_dict.keys():\n need_step = True\n continue\n start_time_year = int(tr.xpath('td')[1].xpath('script/text()').extract()[0].replace('showtime(', '').replace(')', '').split(',')[0])\n start_time_month = int(tr.xpath('td')[1].xpath('script/text()').extract()[0].replace('showtime(', '').replace(')', '').split(',')[1].split('-')[0])\n start_time_day = int(tr.xpath('td')[1].xpath('script/text()').extract()[0].replace('showtime(', '').replace(')', '').split(',')[2])\n start_time_hour = int(tr.xpath('td')[1].xpath('script/text()').extract()[0].replace('showtime(', '').replace(')', '').split(',')[3])\n start_time_minu = int(tr.xpath('td')[1].xpath('script/text()').extract()[0].replace('showtime(', '').replace(')', '').split(',')[4])\n start_time = datetime.datetime(start_time_year, start_time_month, start_time_day, start_time_hour, start_time_minu) + datetime.timedelta(hours=8)\n start_mktime = time.mktime(start_time.timetuple())\n now_mktime = time.time()\n # 如果当前时间比开始时间小-3600s,则结束遍历,不再往下查找\n if (now_mktime-start_mktime) < -3600:\n break\n start_time_text = start_time.strftime('%Y-%m-%d %H:%M')\n home_name = tr.xpath('td')[2].xpath('a/text()').extract()[0]\n away_name = tr.xpath('td')[10].xpath('a/text()').extract()[0]\n home_original_odd = tr.xpath('td')[3].xpath('text()').extract()[0]\n draw_original_odd = tr.xpath('td')[4].xpath('text()').extract()[0]\n away_original_odd = tr.xpath('td')[5].xpath('text()').extract()[0]\n home_original_probability = tr.xpath('td')[6].xpath('text()').extract()[0]\n draw_original_probability = tr.xpath('td')[7].xpath('text()').extract()[0]\n away_original_probability = tr.xpath('td')[8].xpath('text()').extract()[0]\n original_payBack_rate = tr.xpath('td')[9].xpath('text()').extract()[0]\n single_match_dict = {}\n single_match_dict['league_name'] = match_name_dict[league_name]\n single_match_dict['start_time_text'] = start_time_text\n single_match_dict['home_name'] = home_name\n single_match_dict['away_name'] = away_name\n single_match_dict['home_original_odd'] = float(home_original_odd)\n single_match_dict['draw_original_odd'] = float(draw_original_odd)\n single_match_dict['away_original_odd'] = float(away_original_odd)\n single_match_dict['home_original_probability'] = round(float(home_original_probability.replace('%', ''))/100, 3)\n single_match_dict['draw_original_probability'] = round(float(draw_original_probability.replace('%', ''))/100, 3)\n single_match_dict['away_original_probability'] = round(float(away_original_probability.replace('%', ''))/100, 3)\n single_match_dict['original_payBack_rate'] = round(float(original_payBack_rate.replace('%', ''))/100, 3)\n odd_match_list.append(single_match_dict)\n else:\n match_index = len(odd_match_list)-1\n if len(tr.xpath('td')[0].xpath('text()').extract()) != 0:\n try:\n home_now_odd = tr.xpath('td')[0].xpath('text()').extract()[0]\n draw_now_odd = tr.xpath('td')[1].xpath('text()').extract()[0]\n away_now_odd = tr.xpath('td')[2].xpath('text()').extract()[0]\n home_now_probability = tr.xpath('td')[3].xpath('text()').extract()[0]\n draw_now_probability = tr.xpath('td')[4].xpath('text()').extract()[0]\n away_now_probability = tr.xpath('td')[5].xpath('text()').extract()[0]\n now_payBack_rate = tr.xpath('td')[6].xpath('text()').extract()[0]\n except:\n print('home_name:',odd_match_list[-1]['home_name'])\n pdb.set_trace()\n\n odd_match_list[match_index]['home_now_odd'] = float(home_now_odd)\n odd_match_list[match_index]['draw_now_odd'] = float(draw_now_odd)\n odd_match_list[match_index]['away_now_odd'] = float(away_now_odd)\n odd_match_list[match_index]['home_now_probability'] = round(\n float(home_now_probability.replace('%', '')) / 100, 3)\n odd_match_list[match_index]['draw_now_probability'] = round(\n float(draw_now_probability.replace('%', '')) / 100, 3)\n odd_match_list[match_index]['away_now_probability'] = round(\n float(away_now_probability.replace('%', '')) / 100, 3)\n odd_match_list[match_index]['now_payBack_rate'] = round(\n float(now_payBack_rate.replace('%', '')) / 100, 3)\n else:\n home_now_odd = odd_match_list[match_index]['home_original_odd']\n draw_now_odd = odd_match_list[match_index]['draw_original_odd']\n away_now_odd = odd_match_list[match_index]['away_original_odd']\n home_now_probability = odd_match_list[match_index]['home_original_probability']\n draw_now_probability = odd_match_list[match_index]['draw_original_probability']\n away_now_probability = odd_match_list[match_index]['away_original_probability']\n now_payBack_rate = odd_match_list[match_index]['original_payBack_rate']\n odd_match_list[match_index]['home_now_odd'] = home_now_odd\n odd_match_list[match_index]['draw_now_odd'] = draw_now_odd\n odd_match_list[match_index]['away_now_odd'] = away_now_odd\n odd_match_list[match_index]['home_now_probability'] = home_now_probability\n odd_match_list[match_index]['draw_now_probability'] = draw_now_probability\n odd_match_list[match_index]['away_now_probability'] = away_now_probability\n odd_match_list[match_index]['now_payBack_rate'] = now_payBack_rate\n\n # 打开chinese2english, 将之前保存的首发信息中的名称转换为英文再与当前odd列表中的信息进行模糊匹配找出那场比赛,进行计算\n with open('auto_teams_analysis/chinese2english.json', 'r', encoding='utf-8') as json_file:\n chinese2english = json.load(json_file)\n\n match_info_list = [] # 存取所有已经可以得到结果的比赛信息list\n for single_match in completed_match_list:\n match_id = single_match['match_id']\n match_name = single_match['match_name']\n time_score = single_match['time_score']\n average_completed_match = single_match['average_completed_match']\n if single_match['home_name'] in chinese2english.keys():\n home_name = chinese2english[single_match['home_name']]['name']\n else:\n home_name = single_match['home_name']\n if single_match['away_name'] in chinese2english.keys():\n away_name = chinese2english[single_match['away_name']]['name']\n else:\n away_name = single_match['away_name']\n home_rate = single_match['home_rate']\n away_rate = single_match['away_rate']\n\n # 查找对应的比赛\n try:\n patten_home_name_1 = home_name[0:3] + '.*?'\n regex_home_name_1 = re.compile(patten_home_name_1)\n patten_home_name_2 = home_name[-3:] + '.*?'\n regex_home_name_2 = re.compile(patten_home_name_2)\n patten_away_name_1 = away_name[0:3] + '.*?'\n regex_away_name_1 = re.compile(patten_away_name_1)\n patten_away_name_2 = away_name[-3:] + '.*?'\n regex_away_name_2 = re.compile(patten_away_name_2)\n except:\n pdb.set_trace()\n odd_home_name_list = [item['home_name'] for item in odd_match_list]\n odd_away_name_list = [item['away_name'] for item in odd_match_list]\n # pdb.set_trace()\n home_name_count = 0\n home_name_count_list = []\n for odd_home_name in odd_home_name_list:\n match_1 = regex_home_name_1.search(odd_home_name)\n match_2 = regex_home_name_2.search(odd_home_name)\n if match_1 or match_2:\n # 如果前或者后匹配到一个就将其count添加到列表中与away比较,取相同的count\n home_name_count_list.append(home_name_count)\n home_name_count += 1\n away_name_count = 0\n away_name_count_list = []\n for odd_away_name in odd_away_name_list:\n match_1 = regex_away_name_1.search(odd_away_name)\n match_2 = regex_away_name_2.search(odd_away_name)\n if match_1 or match_2:\n # 如果前或者后匹配到一个就将其count添加到列表中与home比较,取相同的count\n away_name_count_list.append(away_name_count)\n away_name_count += 1\n has_found = False\n fount_index = 0 # 本场比赛在odd_match_list中的index\n for home_count in home_name_count_list:\n for away_count in home_name_count_list:\n if home_count == away_count:\n # 如果找到相同的count就说明是这场比赛开始计算,否则continue\n has_found = True\n fount_index = home_count\n if not has_found:\n continue\n\n # 开始计算\n # 先检测是否已经存在首发,如果没有就跳过\n support_direction = ''\n if home_rate > 0 and away_rate > 0:\n if round(abs(home_rate - away_rate), 2) >= 0.25:\n if home_rate > away_rate:\n support_direction = '主队88%不败(1.15),77%取胜(1.3)'\n else:\n support_direction = '客队88%不败(1.15),77%取胜(1.3)'\n elif round(abs(home_rate - away_rate), 2) >= 0.10:\n big_probability_direction = '' # 1 表示主队是大概率方向,-1表示客队是大概率方向\n if odd_match_list[fount_index]['home_now_probability'] > odd_match_list[fount_index]['away_now_probability']:\n big_probability_direction = 1\n else:\n big_probability_direction = -1\n\n home_probability_change = odd_match_list[fount_index]['home_now_probability'] - odd_match_list[fount_index]['home_original_probability'] # 主胜初盘到目前变化的概率\n away_probability_change = odd_match_list[fount_index]['away_now_probability'] - odd_match_list[fount_index]['away_original_probability'] # 客胜初盘到目前变化的概率\n\n change_limit = 0.027 # 限制的概率变化\n # 只分析相同联赛的球队\n if home_rate > away_rate:\n # 主队首发率大于客队首发率\n if big_probability_direction == 1:\n if home_probability_change >= change_limit:\n support_direction = '主队77%取胜(1.3),88%不败(1.15)'\n elif home_probability_change <= -change_limit:\n support_direction = '客队77%不败(1.3),下半场前及时对冲'\n else:\n # 没有达到限制的概率变化\n if odd_match_list[fount_index]['home_now_probability'] > 0.85:\n support_direction = '主队66%胜两球(1.5),77%取胜(1.3)'\n elif odd_match_list[fount_index]['home_now_probability'] > 0.55:\n support_direction = '主队66%取胜(1.5),77%不败(1.3)'\n elif odd_match_list[fount_index]['home_now_probability'] > 0.30:\n support_direction = '主队66%不败(1.5),77%最多输一球(1.3)'\n elif odd_match_list[fount_index]['home_now_probability'] > 0.14:\n support_direction = '主队66%最多输一球(1.5),77%最多输两球(1.3)'\n elif big_probability_direction == -1:\n if away_probability_change >= change_limit:\n support_direction = '客队77%不败(1.3),下半场前及时对冲'\n elif away_probability_change <= -change_limit:\n support_direction = '主队77%取胜(1.3),88%不败(1.15)'\n else:\n # 没有达到限制的概率变化\n if odd_match_list[fount_index]['home_now_probability'] > 0.85:\n support_direction = '主队66%胜两球(1.5),77%取胜(1.3)'\n elif odd_match_list[fount_index]['home_now_probability'] > 0.55:\n support_direction = '主队66%取胜(1.5),77%不败(1.3)'\n elif odd_match_list[fount_index]['home_now_probability'] > 0.30:\n support_direction = '主队66%不败(1.5),77%最多输一球(1.3)'\n elif odd_match_list[fount_index]['home_now_probability'] > 0.14:\n support_direction = '主队66%最多输一球(1.5),77%最多输两球(1.3)'\n else:\n # 客队首发率大于主队首发率\n if big_probability_direction == -1:\n if away_probability_change >= change_limit:\n support_direction = '客队77%取胜(1.3),88%不败(1.15)'\n elif away_probability_change <= -change_limit:\n support_direction = '主队77%不败(1.3),下半场前及时对冲'\n else:\n # 没有达到限制的概率变化\n if odd_match_list[fount_index]['away_now_probability'] > 0.85:\n support_direction = '客队66%胜两球(1.5),77%取胜(1.3)'\n elif odd_match_list[fount_index]['away_now_probability'] > 0.55:\n support_direction = '客队66%取胜(1.5),77%不败(1.3)'\n elif odd_match_list[fount_index]['away_now_probability'] > 0.30:\n support_direction = '客队66%不败(1.5),77%最多输一球(1.3)'\n elif odd_match_list[fount_index]['away_now_probability'] > 0.14:\n support_direction = '客队66%最多输一球(1.5),77%最多输两球(1.3)'\n elif big_probability_direction == 1:\n if home_probability_change >= change_limit:\n support_direction = '主队77%不败(1.3),下半场前及时对冲'\n elif home_probability_change <= -change_limit:\n support_direction = '客队77%取胜(1.3),88%不败(1.15)'\n else:\n # 没有达到限制的概率变化\n if odd_match_list[fount_index]['away_now_probability'] > 0.85:\n support_direction = '客队66%胜两球(1.5),77%取胜(1.3)'\n elif odd_match_list[fount_index]['away_now_probability'] > 0.55:\n support_direction = '客队66%取胜(1.5),77%不败(1.3)'\n elif odd_match_list[fount_index]['away_now_probability'] > 0.30:\n support_direction = '客队66%不败(1.5),77%最多输一球(1.3)'\n elif odd_match_list[fount_index]['away_now_probability'] > 0.14:\n support_direction = '客队66%最多输一球(1.5),77%最多输两球(1.3)'\n single_match_item = {}\n single_match_item['match_id'] = match_id\n single_match_item['match_name'] = match_name\n single_match_item['time_score'] = time_score\n single_match_item['average_completed_match'] = average_completed_match\n single_match_item['home_name'] = home_name\n single_match_item['away_name'] = away_name\n single_match_item['home_rate'] = home_rate\n single_match_item['away_rate'] = away_rate\n single_match_item['support_direction'] = support_direction\n match_info_list.append(single_match_item)\n else:\n continue\n all_match_Item = match_list_Item()\n all_match_Item['match_list'] = match_info_list\n all_match_Item['search_date'] = search_date.replace('-', '_')\n\n yield all_match_Item\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"auto_teams_analysis/spiders/auto_teams_analysis.py","file_name":"auto_teams_analysis.py","file_ext":"py","file_size_in_byte":24157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"126728263","text":"#begin import modules\r\nimport math\r\nfrom datascience import *\r\nimport numpy as np\r\n# end import modules\r\n\r\n#begin msg definitions\r\ndefault_err_msg = \"Something is wrong with your solution. See if you can figure out what the mistake might be, or ask a neighbor or TA for help!\"\r\ntype_err_msg = \"Your solution is not in the correct format!\"\r\ncorrect_msg = \"Your solution is correct!\"\r\nokay_msg = \"The format of your solution looks okay!\"\r\n#end msg definitions\r\n\r\n\r\ndef check6_4(average_latitude,average_longitude):\r\n\tcorrectanswer1 = 39.186464523495417\r\n\tcorrectanswer2 = -90.992580812926292\r\n\tif isinstance(average_latitude,type(correctanswer1)) and isinstance(average_longitude,type(correctanswer2)):\r\n\t\tif (average_latitude ==correctanswer1) and (average_longitude == correctanswer2):\r\n\t\t\tprint(correct_msg)\r\n\t\telse:\r\n\t\t\tprint(default_err_msg)\r\n\telse:\r\n\t\tprint(type_err_msg)\r\n","sub_path":"assignments/lab03/lab03_tests/check6_4.py","file_name":"check6_4.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"618530084","text":"from django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\nfrom django.utils.timezone import now\n\n\nclass Type(models.Model):\n type = models.CharField(max_length=255, unique=True)\n\n def __str__(self):\n return self.type\n\n\nclass Topic(models.Model):\n topic = models.CharField(max_length=255, unique=True)\n\n def __str__(self):\n return self.topic\n\n\nclass Text(models.Model):\n \"\"\"Stuff I am reading\"\"\"\n\n created = models.DateTimeField('created',\n help_text='Date and time when '\n 'this text came into my hands.',\n default=now, null=True, blank=True)\n\n started_reading = models.DateTimeField('started_reading',\n help_text='Date and time when '\n 'I started to read '\n 'this text.',\n default=now)\n\n finished_reading = models.DateTimeField('finished_reading',\n help_text='Date and time when '\n 'I finished reading '\n 'this text.',\n default=now, null=True, blank=True)\n\n reading = models.BooleanField(default=False,\n help_text='Reading this next now.')\n\n completed = models.BooleanField(default=False,\n help_text='Read the text completely.')\n\n title = models.TextField(help_text=\"Title of the text.\")\n\n author = models.TextField(help_text=\"Authors of the text.\")\n\n type = models.ManyToManyField(Type, blank=True,\n help_text=\"The type of reading material.\")\n\n pages = models.IntegerField(help_text=\"The number of pages.\")\n\n current_page = models.IntegerField(help_text=\"The current page.\")\n\n topics = models.ManyToManyField(Topic, blank=True,\n help_text=\"The topic this is about.\")\n\n review = models.TextField(help_text=\"What you thought of it.\")\n\n ranking = models.IntegerField(validators=[\n MaxValueValidator(10),\n MinValueValidator(-1)\n ], help_text=\"Its rank - how good was it?\")\n\n def __str__(self):\n if self.reading:\n return f'{self.title} by {self.author} ' \\\n f'({self.current_page}/{self.pages}) ; reading it now.'\n if self.completed:\n return f'{self.title} by {self.author} ' \\\n f'({self.current_page}/{self.pages}).' \\\n f'; read it at {self.completed}.'\n return f'{self.title} by {self.author} ' \\\n f'({self.current_page}/{self.pages}).'\n","sub_path":"reading/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"5830536","text":"import torch\n\ninputs = torch.tensor([[[[1., 2.],[3., 4.]]]])\nprint(\"linear inputs\", inputs.shape)\n\nfc = torch.nn.Linear(4,2)\nweights = torch.tensor([[1.1, 1.2, 1.3, 1.4],\n\t\t\t\t\t\t[1.5, 1.6, 1.7, 1.8]])\nbias = torch.tensor([1.9, 2.0])\nprint(\"linear weights\", weights.shape)\nfc.weight.data = weights\nfc.bias.data = bias\noutput = torch.relu(fc(inputs.view(-1,4)))\nprint(\"linear outputs\", output, output.shape)\n\nconv = torch.nn.Conv2d(in_channels=4, out_channels=2, kernel_size=(1,1))\nprint(\"conv inputs\", inputs.view(1,4,1,1).shape)\nconv.weight.data = weights.view(2,4,1,1)\nprint(\"conv weights\", weights.view(2,4,1,1).shape)\nconv.bias.data = bias\noutput_conv = torch.relu(conv(inputs.view(1,4,1,1)))\nprint(\"conv outputs\", output_conv, output_conv.shape)","sub_path":"VQVAE/VQVAE/conv_linear_test.py","file_name":"conv_linear_test.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"57623262","text":"#!/bin/python3\nimport sys\n\nwords = map(str.rstrip, sys.stdin.readlines()[1:])\n\nfor index, word in enumerate(words):\n last_word = [word[0]]\n for letter in word[1:]:\n if letter < last_word[0]:\n last_word.append(letter)\n else:\n last_word.insert(0, letter)\n print('Case #{}: {}'.format(index+1, ''.join(last_word)))\n","sub_path":"codes/CodeJamCrawler/16_1_1/StevenSheffey/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"631701157","text":"\ndef create_matrix(n, v):\n m = []\n \n for _ in range(n):\n row = []\n # pour créer une ligne\n for _ in range(n):\n row.append(v)\n # ajouter une ligne à m la matrice finale\n m.append(row)\n \n return m\n\nprint(create_matrix(5, 0))","sub_path":"B1B/Chap1/create_matrix.py","file_name":"create_matrix.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"505466186","text":"def swap(a, i, j):\n assert 0 <= i < len(a), f'accessing index {i} beyond end of array {len(a)}'\n assert 0 <= j < len(a), f'accessing index {j} beyond end of array {len(a)}'\n a[i], a[j] = a[j], a[i]\n\n\ndef simplePartition(a, pivot):\n # print(pivot)\n n = len(a)\n i = -1\n j = 0\n for j in range(n-1):\n # print(a[i])\n if a[j] <= pivot:\n swap(a,i+1 , j)\n i +=1\n swap(a , i+1 , n-1)\n return i+1\n\n\n## To do: partition the array a according to pivot.\n# Your array must be partitioned into two regions - <= pivot followed by elements > pivot\n## If an element at the beginning of the array is already <= pivot in the beginning of the array, it should not\n## be moved by the algorithm.\n# your code here\n\n\ndef boundedSort(a, k):\n for j in range(1, k):\n simplePartition(a, j)\n\n\n\nif __name__ == '__main__':\n a = [1, 3, 6, 1, 5, 4, 1, 1, 2, 3, 3, 1, 3, 5, 2, 2, 4]\n print(a)\n simplePartition(a, 1)\n print(a)\n assert (a[:5] == [1, 1, 1, 1, 1]), 'Simple partition test 1 failed'\n\n simplePartition(a, 2)\n print(a)\n assert (a[:5] == [1, 1, 1, 1, 1]), 'Simple partition test 2(A) failed'\n assert (a[5:8] == [2, 2, 2]), 'Simple Partition test 2(B) failed'\n\n simplePartition(a, 3)\n print(a)\n assert (a[:5] == [1, 1, 1, 1, 1]), 'Simple partition test 3(A) failed'\n assert (a[5:8] == [2, 2, 2]), 'Simple Partition test 3(B) failed'\n assert (a[8:12] == [3, 3, 3, 3]), 'Simple Partition test 3(C) failed'\n\n simplePartition(a, 4)\n print(a)\n assert (a[:5] == [1, 1, 1, 1, 1]), 'Simple partition test 4(A) failed'\n assert (a[5:8] == [2, 2, 2]), 'Simple Partition test 4(B) failed'\n assert (a[8:12] == [3, 3, 3, 3]), 'Simple Partition test 4(C) failed'\n assert (a[12:14] == [4, 4]), 'Simple Partition test 4(D) failed'\n\n simplePartition(a, 5)\n print(a)\n assert (a == [1] * 5 + [2] * 3 + [3] * 4 + [4] * 2 + [5] * 2 + [6]), 'Simple Parition test 5 failed'\n\n print('Passed all tests : 10 points!')","sub_path":"MSDS/Partition.py","file_name":"Partition.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"629021611","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n\nmyframe = pd.read_csv('plotStyleData.csv')\n\nplt.plot(myframe.index,myframe,'og:',label='Hohoho')\nplt.legend(loc='best')\nplt.xlim(0,30)\nfilename = 'plotStyle.png'\nplt.savefig( filename, dpi=400, bbox_inches='tight' )\nprint( filename + ' 파일이 저장되었습니다.')\n\nplt.show()","sub_path":"exam/data/g.py","file_name":"g.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"4831753","text":"try:\n from collections import OrderedDict\nexcept ImportError:\n from ordereddict import OrderedDict\nfrom avocado.formatters import registry as formatters\nfrom serrano.formatters import HTMLFormatter\n\n\nclass GenomicCoordinate(HTMLFormatter):\n href = 'http://genome.ucsc.edu/cgi-bin/hgTracks?position=chr{chr}%3A{pos}'\n\n def to_html(self, values, **context):\n href = self.href.format(**values)\n return 'chr{chr}:{pos:,}'.format(\n href=href, **values)\n to_html.process_multiple = True\n\n\nclass dbSNP(HTMLFormatter):\n href = 'http://www.ncbi.nlm.nih.gov/projects/SNP/snp_ref.cgi?rs={0}'\n\n def to_html(self, rsid, **context):\n if rsid:\n href = self.href.format(rsid[2:])\n return '{rsid}'.format(\n href=href, rsid=rsid)\n\n def to_excel(self, rsid, **context):\n if rsid:\n href = self.href.format(rsid[2:])\n return '=HYPERLINK(\"{href}\", \"{label}\")'.format(\n href=href, label=rsid)\n return ''\n\n\nclass VariantEffect(HTMLFormatter):\n def to_html(self, values, **context):\n return '{0} ({1})'.format(*values.values())\n\n to_html.process_multiple = True\n\n\nclass AlleleFrequency(HTMLFormatter):\n \"To be used with EVS and 1000G\"\n def to_html(self, values, **context):\n toks = []\n for key, value in values.items():\n if value in self.html_map:\n tok = self.html_map[value]\n else:\n tok = str(value * 100) + '%'\n\n if key.lower() == 'af':\n key = 'all'\n else:\n # Most of the key names are foo_af, this is imply trimming\n # off the _af\n key = key.split('_')[0]\n\n toks.append('
  • {0} {1}
  • '.format(\n key.title(), tok))\n return '
      {0}
    '.format(''.join(toks))\n\n to_html.process_multiple = True\n\n\nclass SiftFormatter(HTMLFormatter):\n alt_keys = ('Sift Score', 'Sift Prediction')\n\n def _get_values(self, value):\n from vdw.variants.models import Sift\n return value, Sift.get_prediction(value)\n\n def to_html(self, value, **context):\n if value is None:\n return self.html_map[value]\n score, prediction = self._get_values(value)\n return '{0} ({1})'.format(prediction, score)\n\n def to_excel(self, value, **context):\n score, prediction = self._get_values(value)\n return OrderedDict(zip(self.alt_keys, [score, prediction]))\n\n to_csv = to_excel\n\n\nclass PolyPhen2Formatter(HTMLFormatter):\n alt_keys = ('PolyPhen2 Score', 'PolyPhen2 Prediction')\n\n def _get_values(self, value):\n from vdw.variants.models import PolyPhen2\n return value, PolyPhen2.get_prediction(value)\n\n def to_html(self, value, **context):\n if value is None:\n return self.html_map[value]\n score, prediction = self._get_values(value)\n return '{0} ({1})'.format(prediction, score)\n\n def to_excel(self, value, **context):\n score, prediction = self._get_values(value)\n return OrderedDict(zip(self.alt_keys, [score, prediction]))\n\n to_csv = to_excel\n\n\nformatters.register(GenomicCoordinate, 'Genomic Coordinate')\nformatters.register(dbSNP, 'dbSNP')\nformatters.register(VariantEffect, 'Variant Effect')\nformatters.register(AlleleFrequency, 'Allele Frequency')\nformatters.register(SiftFormatter, 'Sift')\nformatters.register(PolyPhen2Formatter, 'PolyPhen2')\n","sub_path":"varify/variants/formatters.py","file_name":"formatters.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"581445197","text":"#!/bin/python3\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nimport argparse\nimport time\nimport subprocess\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--host', default='192.168.1.17', help='switch IP')\nparser.add_argument('--port', default='2', help='port number')\nparser.add_argument('--action', default='on', help='action on port, default on')\n\nargs = parser.parse_args()\nurl = 'http://' + args.host + '/login.cgi'\n\n#Start Chrome\ndriver = webdriver.Chrome()\ndriver.get(url)\ntime.sleep(3)\n\n#login\ndriver.find_element_by_xpath('//*[@id=\"password\"]').send_keys(\"PASSWORD\")\n\n#login_btn\ndriver.find_element_by_xpath('//*[@id=\"loginBtn\"]').click()\ntime.sleep(3)\n\n#port_status\ndriver.find_element_by_xpath('//*[@id=\"blueLinkBold11\"]/div[2]/a/span').click()\n\n#switch to maincontent frame\ndriver.switch_to.frame(driver.find_element_by_xpath('//*[@id=\"maincontent\"]'))\n\n#port2\ndriver.find_element_by_xpath('//*[@id=\"tbl2\"]/tbody/tr[4]/td[1]/input').click()\nselect = Select(driver.find_element_by_xpath('//*[@id=\"g_1_1\"]/td[4]/select'))\n\nif args.action == 'on':\n select.select_by_visible_text('Auto')\nif args.action == 'off':\n select.select_by_visible_text('Disable')\n\n#Apply\ndriver.switch_to.default_content()\ndriver.find_element_by_xpath('//*[@id=\"btn_Apply\"]').click()\ntime.sleep(3)\n\n#logout\ndriver.switch_to.default_content()\ndriver.find_element_by_xpath('//*[@id=\"logout\"]').click()\n\n#Close Chrome\ndriver.close()\n\n#Kill chromedriver\nsubprocess.run([\"pkill\", \"chromedriver\"])\n\n","sub_path":"netgear.py","file_name":"netgear.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"169023883","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport os\nimport codecs\n\nclass ExtractDataPipeline(object):\n def process_item(self, item, spider):\n item['content'] = \"\\r\\n\".join(item['content'])\n return item\n\nclass SaveDataPipeline(object): \n @classmethod\n def from_crawler(cls, crawler):\n files_store = crawler.settings.get('FILES_STORE')\n if not files_store:\n files_store = 'root'\n return cls(files_store)\n\n def __init__(self, files_store):\n self.files_store = files_store\n \n def process_item(self, item, spider):\n root = self.files_store\n if not os.path.exists(root):\n os.mkdir(root)\n\n year = item['year']\n file_path = '%s/%s.txt' % (root, year)\n with codecs.open(file_path, 'w+', 'utf-8' ) as f:\n f.write(item['content'])\n return item\n\nclass WordCountPipeline(object):\n def process_item(self, item, spider):\n with codecs.open('wordcount.csv', 'a+', 'utf-8') as f:\n f.write('%s, %s\\r\\n' % (item['year'], len(item['content']))) \n\n return item","sub_path":"crawler/npcreport/npcreport/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"642566521","text":"#!/usr/bin/python\n\nimport gamepad\nfrom mainloop import MainLoop\nimport sys,time\n\n# grovepi init\nPIN_RELAY = 2\nPIN_CONNECTLED = 3\n\nimport grovepi\ngrovepi.pinMode(PIN_RELAY,\"OUTPUT\");\ngrovepi.pinMode(PIN_CONNECTLED,\"OUTPUT\");\n\n# picoborg init\nimport PicoBorgRev\nPBR = PicoBorgRev.PicoBorgRev()\nPBR.Init()\n\n\nclass Program(MainLoop):\n def initialize(self):\n self.pad = gamepad.DS4Controller()\n\n self.emstop = False\n\n self.pad.on_axischange += self.on_axischange\n self.pad.on_connect += self.on_connect\n self.pad.on_disconnect += self.on_disconnect\n self.pad.on_buttondown += self.on_buttondown\n self.pad.on_buttonup += self.on_buttonup\n\n grovepi.digitalWrite(PIN_CONNECTLED,1)\n\n\n def on_start(self):\n print(\"Waiting for gamepad...\")\n self.pad.begin()\n\n def on_stop(self):\n print(\"\\nStopping pad\")\n self.pad.end()\n print(\"Stopped\")\n\n def on_axischange(self,name,value):\n x = self.pad.axis(\"X\")\n y = self.pad.axis(\"Y\")\n\n a = y - x\n b = y + x\n\n if not self.emstop:\n PBR.SetMotor1(a)\n PBR.SetMotor2(-b)\n \n def on_buttondown(self,name,value):\n print(\"\\nButton '{0}' pressed\".format(name))\n if name == \"SHARE\":\n self.emstop = True\n print(\"Emergency Stop\")\n PBR.SetMotor1(0)\n PBR.SetMotor2(0)\n print(\"Disabling Weapon\")\n grovepi.digitalWrite(PIN_RELAY,0)\n elif name == \"OPTIONS\":\n self.emstop = False\n print(\"Emergency Stop Released\")\n \n if not self.emstop:\n if name == \"TRI\":\n print(\"Enabling Weapon\")\n grovepi.digitalWrite(PIN_RELAY,1) \n elif name == \"O\":\n print(\"Disabling Weapon\")\n grovepi.digitalWrite(PIN_RELAY,0)\n\n def on_buttonup(self,name,value):\n print(\"\\nButton '{0}' released\".format(name))\n\n def on_connect(self,padname):\n print(\"\\nJoystick '{0}' connected\".format(padname))\n grovepi.digitalWrite(PIN_CONNECTLED,0)\n\n\n def on_disconnect(self,padname):\n print(\"\\nJoystick '{0}' disconnected\".format(padname))\n grovepi.digitalWrite(PIN_CONNECTLED,1)\n\n print(\"Disabling motors\")\n PBR.SetMotor1(0)\n PBR.SetMotor2(0)\n print(\"Disabling Weapon\")\n grovepi.digitalWrite(PIN_RELAY,0)\n\n\nif __name__ == \"__main__\":\n Program().start()\n\n","sub_path":"ds4drvtest.py","file_name":"ds4drvtest.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"402925596","text":"\"\"\"\n============================\nAuthor:赵健\nDate:2019-08-27\nTime:22:24\nE-mail:948883947@qq.com\nFile:mylog_new.py\n============================\n\n\"\"\"\nimport logging\nimport time\nimport os\nfrom le_python自动化.zj_apt_test.common.ob_config import ob # 从操作配置文件导入ob对象\n\n\nclass MyLog(object):\n '''日志类'''\n\n def __new__(cls, *args, **kwargs):\n\n log_in = ob.getstr('loglevel', 'log_in') # 日志收集等级\n ch_log_out = ob.getstr('loglevel', 'ch_log_out') # 控制台输出等级\n fh_log_out = ob.getstr('loglevel', 'fh_log_out') # 文件输出等级\n save_log = ob.getstr('path', 'save_log') # 日志文件存储位置\n logger = logging.getLogger('mylog') # 创建收集器\n logger.setLevel(log_in) # 设置收入日志的等级\n formatter = logging.Formatter(datefmt='%Y-%m-%d %H:%M:%S', fmt='%(asctime)s-%(filename)s'\n '-%(levelname)s-%(name)s-日志输出的信息:'\n '%(message)s')\n now = time.strftime('%Y-%m-%d') # 获取一下当前的时间\n ch = logging.StreamHandler() # 创建控制台输出渠道\n ch.setLevel(ch_log_out) # 设置控制台输出日志等级\n ch.setFormatter(formatter) # 设置控制台日志输出格式\n logger.addHandler(ch) # 将控制台输出渠道添加到收集器当中\n try:\n path = os.path.join(save_log, 'log_{}.log'.format(now)) # 设置一下日志的存储路径\n fh = logging.FileHandler(path, 'a', encoding='utf8') # 创建日志输出到文件的渠道\n except FileNotFoundError: # 若不存在文件夹创建log文件夹\n os.mkdir(save_log)\n path = os.path.join(save_log, 'log_{}.log'.format(now)) # 设置一下日志的存储路径\n fh = logging.FileHandler(path, 'a', encoding='utf8') # 创建日志输出到文件的渠道\n fh.setLevel(fh_log_out) # 设置文件的输出日志等级\n fh.setFormatter(formatter) # 设置文件的输出格式\n logger.addHandler(fh) # 将文件输出渠道添加到收集器当中\n\n return logger\nmylog = MyLog()\n\nif __name__ == '__main__':\n mylog = MyLog()\n mylog.debug('debug')\n","sub_path":"common/mylog_new.py","file_name":"mylog_new.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"590067532","text":"\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom dwitter.models import *\nfrom django.contrib.auth.models import User\nfrom .models import Like\nimport logging\nlogger = logging.getLogger('dwitter')\n\ndef Create_dweet():\n logger.info(\"coming from create_object function....\")\n try:\n user = User.objects.get(username='kavi')\n dweet = MessageBox.objects.create(user=user, message=\"This is second message\", date=timezone.now())\n print(\"dweet created\")\n print(dweet)\n all_dweets = MessageBox.objects.all()\n print(all_dweets)\n except:\n logger.exception('user not found')\n\n\ndef Get_objects(request,id):\n logger.info(\"from get_objects function return all\")\n try:\n like = Like.objects.get(id=id)\n like_dict = {\n \"id\": like.id,\n # \"user\": Like.user_id.User.username,\n \"date\": like.dweet\n }\n return JsonResponse(like_dict)\n except Exception as exception:\n logger.exception(\"Exception: %s\" % exception)\n\n\ndef Delete_objects(request,id):\n logger.info(\"from Delete_object function\")\n try:\n instance = Like.objects.get(id=id)\n instance.delete()\n show = Like.objects.all()\n return HttpResponse(instance,show)\n except Exception as exception:\n logger.exception(\"Exception : %s\" % exception)\n\n\ndef Update_objects():\n logger.info(\"from update_objects function\")\n try:\n updated_message= MessageBox.objects.filter(pk=2).update(message='Changed to new message')\n print(updated_message)\n print(MessageBox.objects.all())\n except Exception as exception:\n logger.exception(\"Exception: %s\" % exception)\n\n\ndef Hello(request):\n return HttpResponse(\"hello\")\n\n\n","sub_path":"dwitter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"151826786","text":"import os\nimport requests\nfrom flask import Flask, send_file, Response\nfrom bs4 import BeautifulSoup\n\napp = Flask(__name__)\n# app.secret_key = os.environ.get('SECRET_KEY').encode()\n\ndef get_fact():\n response = requests.get(\"http://unkno.com\")\n\n soup = BeautifulSoup(response.content, \"html.parser\")\n facts = soup.find_all(\"div\", id=\"content\")\n\n return facts[0].getText()\n\n\ndef get_pig_latin(fact):\n page = \"\"\"\n\n \n \n
    Fact from unkno.com:{}
    URL from pig latinizer:{}
    \n-------------------------------------------------------------\n\n \n
    Response from pig latinizer:{}
    \n\"\"\"\n\n this_url = 'https://hidden-journey-62459.herokuapp.com/piglatinize/'\n this_data = {\"input_text\":fact}\n\n response = requests.post(url=this_url, data=this_data)\n \n return page.format(fact, response.url, response.text)\n\n@app.route('/')\ndef home():\n this_fact = get_fact()\n return get_pig_latin(this_fact)\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 6787))\n app.run(host='0.0.0.0', port=port)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"428678059","text":"# -*- coding: utf-8 -*-\r\n# @Author: wukong4430\r\n# @Date: 2018-10-01 23:31:19\r\n# @Last Modified by: wukong4430\r\n# @Last Modified time: 2018-10-01 23:38:51\r\n\r\nimport traceback\r\n\r\n\r\nclass Solution:\r\n def flipAndInvertImage(self, A):\r\n \"\"\"\r\n :type A: List[List[int]]\r\n :rtype: List[List[int]]\r\n \"\"\"\r\n\r\n for idx, row in enumerate(A):\r\n A[idx] = row[::-1]\r\n\r\n for row in A:\r\n for idx, item in enumerate(row):\r\n if item == 0:\r\n row[idx] = 1\r\n else:\r\n row[idx] = 0\r\n\r\n return A\r\n\r\n\r\ndef main():\r\n while 1:\r\n try:\r\n A = [[1, 0, 0], [0, 0, 0], [1, 1, 0]]\r\n Solution().flipAndInvertImage(A=A)\r\n break\r\n except Exception as e:\r\n traceback.print_exc()\r\n break\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"Array/flipAndInvertImage.py","file_name":"flipAndInvertImage.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"647580012","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\ndef preprocessing(fp, y0, y1):\n\t'''\n\tassociates measured values with correct y/m/d\n\tdiscards days with missing values\n\tinput: raw data file, start year, end year\n\treturns: data frame with these columns --\n\t[date][tmax][tmin][prcp][snow][snwd]\n\t'''\n\n\t# just for reference\n\t# cols = {'station': (0, 11), 'year': (11, 15), 'month': (15, 17), 'element': (17, 21)}\n\t#station = line[:11] - not necessary, but may be helpful for visualization?\n\n\t# attributes we care about\n\tatts = ['TMAX', 'TMIN', 'PRCP', 'SNOW', 'SNWD']\n\t\n\t# (start, end, increment) indices\n\tv = [21, 26, 8] \n\tD = {}\n\n\t# read from file\n\tfor line in fp:\n\t\tline = line.strip()\n\t\tyear = int(line[11:15])\n\n\t\t# only extract out values from desired years\n\t\tif not y0 <= year <= y1:\n\t\t\tcontinue\n\n\t\tmonth = int(line[15:17])\n\n\t\tatt = line[17:21]\n\t\tif not att in atts:\n\t\t\tcontinue\n\n\t\tday = 1\n\n\t\twhile v[1] <= len(line):\n\t\t\t# extract out value\n\t\t\tvalue = int(line[v[0]:v[1]])\n\t\t\tif value == -9999:\n\t\t\t\tvalue = None\n\n\t\t\tk = (year, month, day)\n\t\t\tif not k in D:\n\t\t\t\tD[k] = {}\n\t\t\tD[k][att] = value\n\n\t\t\t# move to next daily value column\n\t\t\tv[0] += v[2]\n\t\t\tv[1] += v[2]\n\t\t\tday += 1\n\n\t\t# reset back to day 1 col indices\n\t\tv = [21, 26, 8]\t\n\n\t# reformat into DataFrame.from_dict compatible structure\n\tD1 = {}\n\td = 0\n\tfor k, v in D.items():\n\t\t# assemble a single date string [yyyy-mm-dd]\n\t\ty = str(k[0])\n\t\tm = str(k[1])\n\t\tday = str(k[2])\n\t\tif len(m)==1:\n\t\t\tm = '0'+m\n\t\tif len(day)==1:\n\t\t\tday = '0'+day\n\t\tdate = y+'/'+m+'/'+day\n\t\t\n\t\t# add row to python dict\n\t\tD1[d] = [date, get(v, 'TMAX'), get(v, 'TMIN'),\n\t\t\t\tget(v, 'PRCP'), get(v, 'SNOW'), get(v, 'SNWD')]\n\t\td += 1\n\n\t# create data frame\n\tdf = pd.DataFrame.from_dict(D1, orient='index')\n\tdf.columns = ['Date', 'TMAX', 'TMIN', 'PRCP', 'SNOW', 'SNWD']\n\tdf = df.dropna()\n\n\t# sort rows of data frame by date -- important!\n\tdf = df.sort_values(by=['Date'])\n\tdf = df.reset_index()\n\tdf = df.drop('index', axis=1)\n\n\n\treturn df\n\n \ndef plot_target(T, save, target):\n\t# save a graph\n\tfig, axes = plt.subplots()\n\taxes = T.plot(kind='line')\n\taxes.set(ylabel=target)\n\taxes.get_xaxis().set_ticks([])\n\tfig.savefig(target+'target-attribute.png')\n\n\ndef get(v, att):\n\t'''gets desired value out of dictionary of {attribute names:values}, if it exists'''\n\tif att in v:\n\t\treturn v[att]\n\telse:\n\t\treturn None\n\ndef create(file, start, end, target, save=False):\n\t'''\n\tcreates data frame and target attribute series\n\tsaves plot of target attribute that can be used as a figure in project report\n\tinput: name of data file, start year, end year, target attribute (TMAX, TMIN, PRCP, SNOW, SNOW)\n\treturns: data frame containing everything, series containing target attribute values\n\t'''\n\tfp = open(file, \"r\")\n\ty0, y1 = start, end\n\n\tD = preprocessing(fp, y0, y1)\n\t\n\t# extract the target attribute, for plotting\n\n\n\tif save:\n\t\t# save data frame to csv\n\t\tD.to_csv('preprocessed_data.csv')\n\n\tfp.close()\n\n\treturn D\n\ndef construct_predictors(D, save=False):\n '''\n\tconstructs the predictor attributes and isolates target attribute\n\tinput: preprocessed data frame\n\treturns: predictors data frame: [tmax-2][tmax-1]...[snwd-2][snwd-1]\n '''\n atts = ['TMAX', 'TMIN', 'PRCP', 'SNOW', 'SNWD']\n\n P = pd.DataFrame()\n\n for att in atts:\n # make series\n S = pd.Series(D[att].values, index=D['Date'])\n\n N = S.shape[0]\n\n # shift two up\n P[att+'-2'] = S[:N-1].values\n P[att+'-1'] = S[1:N].values\n\n # add to df\n P = P[:-1]\n P.index = S[2:].index\n\n # optional save\n if save:\n P.to_csv('predictor_attributes.csv')\n \n return P","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"290028991","text":"#Core\nfrom django.contrib.auth import logout\nfrom django.contrib.auth import login\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth.forms import AuthenticationForm\n\n#Django\nfrom django.shortcuts import render_to_response\nfrom django.shortcuts import HttpResponseRedirect\nfrom django.template import RequestContext\n\n#Templates\nfrom django.views.generic import TemplateView\n\n\ndef ingresar(request):\n\n if request.method == 'POST':\n form_ingresar = AuthenticationForm(data=request.POST)\n if form_ingresar.is_valid():\n a = authenticate(username=request.POST['username'], password=request.POST['password'])\n if a is not None:\n if a.is_active():\n login(request, a)\n return HttpResponseRedirect('/dashboard/')\n else:\n return HttpResponseRedirect('/noactivo/')\n else:\n return HttpResponseRedirect('/nousuario/')\n else:\n form_ingresar = AuthenticationForm()\n return render_to_response('ingreso/login.html',\n {\n 'form_ingresar': form_ingresar\n },\n context_instance=RequestContext(request))\n\n\nclass Dashboard(TemplateView):\n\n template_name = 'ingreso/dashboard.html'","sub_path":"TurismoyViajes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"52103519","text":"TARIFF_11 = 0.244618\nTARIFF_31 = 0.136928\n\ntariff_choice = int(input(\"Which tariff? 11 or 31: \"))\n\ndaily_use = float(input(\"Enter daily use in kWh: \"))\n\nnumber_of_days = int(input(\"Enter number of billing days: \"))\n\nif tariff_choice == 11:\n estimated_bill = TARIFF_11 * daily_use * number_of_days\nelif tariff_choice == 31:\n estimated_bill = TARIFF_31 * daily_use * number_of_days\n\nprint(f\"Estimated bill: ${estimated_bill:.2f}\")","sub_path":"prac_01/electricity_bill.py","file_name":"electricity_bill.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"81782980","text":"\n\nimport logging\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s :: %(levelname)s :: %(message)s')\n\ndef linear_sum(S, n):\n '''\n Return the sum of the first n numbers of sequence S\n '''\n if n == 0:\n return 0\n else:\n #logging.info(n)\n logging.info('%s %s', linear_sum(S, n-1), S[n-1])\n return linear_sum(S, n-1) + S[n-1]\n\n\nif __name__ == '__main__':\n\n S = [4, 3, 6, 2, 8]\n print(linear_sum(S, 2))\n","sub_path":"python/goodrich/ch04/linear_sum.py","file_name":"linear_sum.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"213801176","text":"from parser_odds import Parser\nimport sqlite3\nimport time\nimport statistics\n\n\ndef update_time():\n parser = Parser()\n print('[INFO] Подключение к базе данных')\n con = sqlite3.connect(parser.db)\n cur = con.cursor()\n query = 'SELECT date,url FROM game'\n print('[INFO] Получаем ссылки на игры и их дату')\n cur.execute(query)\n data_list = cur.fetchall()\n game_list = []\n for game in data_list:\n dict_game = {'date': game[0],\n 'url': game[1]}\n game_list.append(dict_game)\n cur.close()\n con.close()\n load_game = data_list[0]\n try:\n with open('save_date', 'r', encoding='utf8') as load_file:\n load_game = eval(load_file.read())\n print('[INFO] Файл загрузки получен')\n except FileNotFoundError:\n print('[WARNING] Файл загрузки не найден')\n index_load_game = data_list.index(load_game)\n total_games = len(game_list[index_load_game:])\n time_list = []\n con = sqlite3.connect(parser.db)\n cur = con.cursor()\n for game in game_list[index_load_game:]:\n start = time.time()\n print('[INFO] Получаем дату {}'.format(game['url']))\n date = parser.get_date(game['url'])\n print('[INFO] Дата в базе {}'.format(game['date']))\n print('[INFO] Сравниваем дату из сайта с датой из бд')\n if game['date'] == date:\n print('[INFO] Даты совпадают')\n else:\n print('[INFO] Даты не совпадают')\n print('[INFO] Меняем значения в базе')\n query = 'UPDATE game SET date = ? WHERE url = ?'\n cur.execute(query, [date, game['url']])\n print('[INFO] Значение измененно')\n end = time.time()\n time_compl = end - start\n time_list.append(time_compl)\n total_games -= 1\n print('[INFO] Осталось проверить {} игр'.format(total_games))\n if total_games % 10 == 0:\n print('[INFO] Сохранение изменений')\n con.commit()\n with open('save_date', 'w', encoding='utf8') as savefile: # сохранение\n savefile.write(str((date, game['url'])))\n print('[INFO] Сохраненно')\n time_to_compl = statistics.mean(time_list) * total_games\n hour = int(time_to_compl // 3600)\n minute = int((time_to_compl % 3600) // 60)\n second = (time_to_compl % 3600) % 60\n print('[INFO] Осталось примерно {} часов {} минут {} секунд'.format(hour, minute, second))\n cur.close()\n con.close()\n\n\nif __name__ == \"__main__\":\n update_time()","sub_path":"update_time.py","file_name":"update_time.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"454137312","text":"import json\n\nfrom django.test import TestCase\nfrom django.core.urlresolvers import reverse\n\nfrom wortwurster.main.models import Wortwurst, VoteType, Vote\n\n\nclass WortwurstViewTest(TestCase):\n\n def setUp(self):\n wortwurst1 = Wortwurst.objects.create(\n text=\"Wat mut dat mut!\", ip='127.0.0.1', session_id='12345',\n )\n wortwurst2 = Wortwurst.objects.create(\n text=\"Wie der Herr, so's Gescherr!\",\n ip='127.0.0.1', session_id='12345',\n )\n wortwurst3 = Wortwurst.objects.create(\n text=\"Wortwurster\",\n ip='127.0.0.1', session_id='12345',\n )\n self.love = VoteType.objects.get(key='love')\n self.hate = VoteType.objects.get(key='hate')\n Vote.objects.create(\n wortwurst=wortwurst2, type=self.love, ip='127.0.0.1',\n session_id='12345'\n )\n Vote.objects.create(\n wortwurst=wortwurst2, type=self.love, ip='127.0.0.1',\n session_id='23456'\n )\n Vote.objects.create(\n wortwurst=wortwurst1, type=self.hate, ip='127.0.0.1',\n session_id='34567'\n )\n Vote.objects.create(\n wortwurst=wortwurst2, type=self.hate, ip='127.0.0.1',\n session_id='23456'\n )\n Vote.objects.create(\n wortwurst=wortwurst3, type=self.love, ip='127.0.0.1',\n session_id='23456'\n )\n\n def test_wortwurst_view(self):\n # Default: ordered by publication date.\n response = self.client.get(\n reverse('wortwurst-list'), {'format': 'json'}\n )\n self.assertEqual(200, response.status_code)\n result = json.loads(response.content.decode())\n self.assertEqual(3, len(result))\n self.assertEqual(\"Wortwurster\", result[0]['text'])\n self.assertEqual(1, result[0]['votes']['love']['count'])\n self.assertEqual(0, result[0]['votes']['hate']['count'])\n self.assertEqual(\"Wie der Herr, so's Gescherr!\", result[1]['text'])\n self.assertEqual(2, result[1]['votes']['love']['count'])\n self.assertEqual(1, result[1]['votes']['hate']['count'])\n self.assertEqual(\"Wat mut dat mut!\", result[2]['text'])\n self.assertEqual(0, result[2]['votes']['love']['count'])\n self.assertEqual(1, result[2]['votes']['hate']['count'])\n\n def test_wortwurst_view_top(self):\n # Ordered by love count.\n response = self.client.get(\n reverse('wortwurst-list'),\n {'filter': 'love', 'sorting': 'top', 'format': 'json'}\n )\n self.assertEqual(200, response.status_code)\n result = json.loads(response.content.decode())\n self.assertEqual(2, len(result))\n self.assertEqual(\"Wie der Herr, so's Gescherr!\", result[0]['text'])\n self.assertEqual(2, result[0]['votes']['love']['count'])\n self.assertEqual(1, result[0]['votes']['hate']['count'])\n self.assertEqual(\"Wortwurster\", result[1]['text'])\n self.assertEqual(1, result[1]['votes']['love']['count'])\n self.assertEqual(0, result[1]['votes']['hate']['count'])\n\n def test_wortwurst_view_most_recent(self):\n # Ordered by most recent love.\n response = self.client.get(\n reverse('wortwurst-list'),\n {'filter': 'love', 'sorting': 'recent', 'format': 'json'}\n )\n self.assertEqual(200, response.status_code)\n result = json.loads(response.content.decode())\n self.assertEqual(2, len(result))\n self.assertEqual(\"Wortwurster\", result[0]['text'])\n self.assertEqual(1, result[0]['votes']['love']['count'])\n self.assertEqual(0, result[0]['votes']['hate']['count'])\n self.assertEqual(\"Wie der Herr, so's Gescherr!\", result[1]['text'])\n self.assertEqual(2, result[1]['votes']['love']['count'])\n self.assertEqual(1, result[1]['votes']['hate']['count'])\n\n def test_post_wortwurst(self):\n response = self.client.post(\n reverse('wortwurst-list'),\n {'text': \"Wie der Ochs vorm Scheunentor\", 'bg_color': '#123456'},\n HTTP_ACCEPT='application/json'\n )\n self.assertEqual(201, response.status_code)\n result = json.loads(response.content.decode())\n self.assertEqual(\"Wie der Ochs vorm Scheunentor\", result['text'])\n self.assertEqual('#123456', result['bg_color'])\n\n\nclass VoteTypeViewTest(TestCase):\n\n def setUp(self):\n self.love = VoteType.objects.get(key='love')\n self.hate = VoteType.objects.get(key='hate')\n\n def test_vote_types(self):\n response = self.client.get(\n reverse('votetype-list'), {'format': 'json'}\n )\n self.assertEqual(200, response.status_code)\n result = json.loads(response.content.decode())\n self.assertEqual('love', result[0]['key'])\n self.assertEqual('Love', result[0]['text'])\n self.assertEqual('hate', result[1]['key'])\n self.assertEqual('Hate', result[1]['text'])\n","sub_path":"wortwurster/main/tests/test_views/test_json.py","file_name":"test_json.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"15934212","text":"# A program that loads data by extracting from json file, represent each sentence with a list of words\n# in our models vocabulary, where Each position in the list will represent a word from our vocabulary.\n# If the position in the list is a 1 then that will mean that the word exists in our sentence, and\n# if it is a 0 then the word is not present.\n#\n# Then we develop a model, with neural network with two hidden layers. The goal of our network will be\n# to look at a bag of words and give a class that they belong to (one of our tags from the json file).\n# We use tensorflow tflearn to build and connect our neural network.\n#\n# Then we do training and saving the model that we developed. We \"fit\" our data to the model, and save it.\n# We can load the model without training again if we already processed and saved.\n#\n# Then we finally use the model. We want to generate a response to any sentence the user types in. Again,\n# we take the input as a bag of words, and it generates a list of probabilities for all of our tags.\n\n# – Get some input from the user\n# – Convert it to a bag of words\n# – Get a prediction from the model\n# – Find the most probable class\n# – Pick a response from that class\n\n\nimport nltk\nnltk.download('punkt')\nfrom nltk.stem.lancaster import LancasterStemmer\nstemmer = LancasterStemmer()\n\nimport numpy\nimport tflearn\nimport tensorflow\nimport random\nimport json\nimport pickle\n\nwith open(\"intents/seonjun.json\") as file:\n data = json.load(file)\n\ntry:\n with open(\"data.pickle\", \"rb\") as f:\n words, labels, training, output = pickle.load(f)\nexcept:\n words = []\n labels = []\n list_pattern = []\n list_tag = []\n\n for intent in data[\"intents\"]:\n for pattern in intent[\"patterns\"]: # for each patter in from each intent\n tokens = nltk.word_tokenize(pattern) # turn pattern into a list of words using tokenize\n words.extend(tokens)\n list_pattern.append(tokens) # add each list of words to patter_x\n list_tag.append(intent[\"tag\"]) # add the associated tag to tag_y\n\n if intent[\"tag\"] not in labels:\n labels.append(intent[\"tag\"])\n\n words = [stemmer.stem(w.lower()) for w in words if w != \"?\"]\n words = sorted(list(set(words)))\n\n labels = sorted(labels)\n\n # one hot encoded\n training = []\n output = []\n\n out_empty = [0 for _ in range(len(labels))]\n\n for x, doc in enumerate(list_pattern):\n bag = []\n\n stems = [stemmer.stem(w.lower()) for w in doc]\n\n for w in words:\n if w in stems:\n bag.append(1)\n else:\n bag.append(0)\n\n output_row = out_empty[:]\n output_row[labels.index(list_tag[x])] = 1\n\n training.append(bag)\n output.append(output_row)\n\n training = numpy.array(training)\n output = numpy.array(output)\n\n with open(\"data.pickle\", \"wb\") as f:\n pickle.dump((words, labels, training, output), f)\n\ntensorflow.reset_default_graph()\n\n# take input as a bag of words and outputs some kinds of labels telling we should respond with tags\nnet = tflearn.input_data(shape=[None, len(training[0])]) # input layer data (length of training data)\nnet = tflearn.fully_connected(net, 8) # hidden layer with 8 neurons\nnet = tflearn.fully_connected(net, 8) # hidden layer with 8 neurons, so two hidden layer\n\n# connect to output layer with softmax activation function that gives probability.\n# Neurons represent each of our classes, so each neuron is tag, like hello, goodbye, etc.\n# So our model predicts which tag we should take responsible to give to user.\nnet = tflearn.fully_connected(net, len(output[0]), activation=\"softmax\")\nnet = tflearn.regression(net)\n\n# DNN = just type of neural network\nmodel = tflearn.DNN(net)\n\ntry:\n model.load(\"model.tflearn\") # only add this line and try/except when we already produced the model.\nexcept:\n model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)\n model.save(\"model.tflearn\")\n\n\ndef bag_of_words(s, words):\n bag = [0 for _ in range(len(words))]\n\n s_words = nltk.word_tokenize(s)\n s_words = [stemmer.stem(word.lower()) for word in s_words]\n\n for se in s_words:\n for i, w in enumerate(words):\n if w == se:\n bag[i] = 1\n\n return numpy.array(bag)\n\n\ndef chat():\n print(\"Start talking with the bot (type quit to stop)!\")\n while True:\n inp = input(\"You: \")\n if inp.lower() == \"quit\":\n break\n\n results = model.predict([bag_of_words(inp, words)])[0]\n results_index = numpy.argmax(results) # index of the greatest number (probability)\n tag = labels[results_index] # the tag (greeting, hours, etc.)\n\n if results[results_index] > 0.5:\n for tg in data[\"intents\"]:\n if tg['tag'] == tag:\n responses = tg['responses']\n print(random.choice(responses))\n else:\n print(\"I didn't get that, try again.\")\n\n\nchat()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"430788792","text":"import torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport time\nfrom torch import optim\nfrom utils.dataloader import Mridataset\nfrom torch.utils.data import DataLoader\nfrom models.Unet import unet\nfrom models.Unetpluses import unetpluses\nfrom tqdm import tqdm\nimport numpy as np\nfrom lossfiction import dice_ce_Loss,dice_Loss\nfrom utils.save_load import save_as_file,load_from_file,delete_file\nimport shutil\nimport os\n\ndef get_lr(optimizer):\n for param_group in optimizer.param_groups:\n return param_group['lr']\n\ndef train(epoch,Epoch,dataloader):\n running_loss = 0\n dataloader_size = len(dataloader)\n # first_batch = next(iter(dataloader))\n # dataloader_size = 50\n with tqdm(total=dataloader_size, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict, mininterval=0.3) as pbar:\n # for i,data in enumerate([first_batch]*50):\n for i,data in enumerate(dataloader):\n jpgs,pngs,labels = data\n if cuda:\n jpgs = jpgs.cuda()\n pngs = pngs.cuda()\n labels = labels.cuda()\n optimizer.zero_grad()\n #模型预测\n jpgs = model(jpgs)\n #计算loss\n loss,dice = Loss(jpgs,pngs,labels)\n #反向传播\n loss.backward()\n #梯度下降\n optimizer.step()\n #计算这一个minibatch的loss并计入到这个epoch的loss之中\n running_loss+= loss.item()\n pbar.set_postfix(**{'total_loss': running_loss / (i + 1),\n 'lr': get_lr(optimizer)})\n pbar.update(1)\n return running_loss/dataloader_size\n\ndef test(train_loss,epoch,Epoch,dataloader):\n val_loss = 0\n val_dice = 0\n dataloader_size = len(dataloader)\n print(\"Validation Epoch:\",epoch+1)\n with tqdm(total=dataloader_size, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict, mininterval=0.3) as pbar:\n for i, data in enumerate(dataloader):\n jpgs, pngs, labels = data\n with torch.no_grad():\n if cuda:\n jpgs = jpgs.cuda()\n pngs = pngs.cuda()\n labels = labels.cuda()\n jpgs = model(jpgs)\n loss,dice = Loss(jpgs,pngs,labels)\n val_loss += loss.item()\n val_dice += dice.item()\n pbar.set_postfix(**{'val_loss': val_loss/ (i + 1),\n 'val_dice': val_dice / (i + 1),\n 'lr': get_lr(optimizer)})\n pbar.update(1)\n print(\"Finish Validation Epoch:\",epoch+1)\n val_loss = val_loss / dataloader_size\n val_dice = val_dice / dataloader_size\n print('Total Loss: %.4f || Val Loss: %.4f ' % (train_loss,val_loss))\n print('Saving state, epoch:', str(epoch + 1))\n torch.save(model.state_dict(), './'+save_path+'/logs/Epoch%d-Total_Loss%.4f-Val_Loss%.4f-Val_Dice%.4f.pth' % ((epoch + 1), train_loss,val_loss,val_dice))\n return val_loss,val_dice\n\n\n#--------------------参数----------------------#\n#linux下相对路径报错时,补全绝对路径\npath = \"\"\nbatch_size = 2\nlr = 1e-4\nEpoch = 200\ncuda = True\npin_memory = True\nnum_workers = 0\n\nnum_classes = 2\nmodel = unetpluses(num_classes) #模型\nsave_path = \"result_unetpluses_o\"\ncontours_type = \"o_labels\"\nLoss = dice_Loss\noptimizer = optim.Adam(model.parameters(),lr) #优化器\n#--------------------------------------------#\n\n\n#加载数据集\n# Mridataset(数据列表路径,图片大小,分类种类,是否数据增强)\ntrain_set = Mridataset(\"dataset/train.txt\", contours_type, (256, 256), num_classes, False)\nval_set = Mridataset(\"dataset/val.txt\", contours_type,(256, 256), num_classes, False)\n\ntrain_loader = DataLoader(dataset=train_set,batch_size=batch_size,shuffle=True,num_workers=num_workers,pin_memory=pin_memory,\n drop_last=True)\nval_loader = DataLoader(dataset=val_set,batch_size=batch_size,shuffle=True,num_workers=num_workers,pin_memory=pin_memory,\n drop_last=True)\n\nif __name__ == '__main__':\n #使用迭代器去first_batch,看是否能过拟合\n # first_batch = next(iter(train_loader))\n trainloss_list = \"./\"+save_path+\"/loss_graph/trainloss_list.txt\"\n valoss_list = \"./\"+save_path+\"/loss_graph/valoss_list.txt\"\n valdice_list = \"./\"+save_path+\"/loss_graph/valdice_list.txt\"\n lr_list = \"./\"+save_path+\"/loss_graph/lr_list.txt\"\n shutil.rmtree(\"./\"+save_path+\"/logs\")\n os.mkdir(\"./\"+save_path+\"/logs\")\n delete_file([trainloss_list, valoss_list, valdice_list, lr_list])\n if cuda:\n model = model.cuda()\n start = time.time()\n for epoch in range(Epoch):\n save_as_file(get_lr(optimizer), lr_list)\n train_loss = train(epoch, Epoch, train_loader)\n save_as_file(train_loss, trainloss_list)\n val_loss,val_dice = test(train_loss, epoch, Epoch, val_loader)\n save_as_file(val_loss, valoss_list)\n save_as_file(val_dice, valdice_list)\n end = time.time()\n trainning_time = end - start\n f = open(\"./\"+save_path+\"/unet_trainning_time\"+str(trainning_time)+\".txt\",'w')\n f.close()\n","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"229330761","text":"'''\n@Author: Rashmi\n@Date: 2021-09-28 19:25\n@Last Modified by: Rashmi\n@Last Modified time: 2021-09-28 19:35\n@Title :Write a Python program to remove duplicates from a list.'''\n\ndef remove(duplicate):\n '''Description:create an empty list and for each element loop if that element not present in\n created list then append that element to newly created list and return the list '''\n final_list = []\n for num in duplicate:\n if num not in final_list:\n final_list.append(num)\n return final_list\nif __name__ == '__main__':\n duplicate = [2, 4, 10, 20, 5, 2, 20, 4]\n print(\"given list\",duplicate)\n print(\"list after removing duplicates using set\",list(set(duplicate)))\n print(\"list after removing duplicates using loop\",remove(duplicate))","sub_path":"DataStructures/List/RemoveDuplicates.py","file_name":"RemoveDuplicates.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"495105592","text":"#!/usr/bin/env python\n# coding=utf-8\n\n\"\"\"Commonly used package.\"\"\"\n\nfrom django.conf.urls import url\nfrom .views import JSView, CSSView, ImageView\n\n\napp_name = \"common\"\n\nurlpatterns = (\n url(r'^js$', JSView.as_view(), name=\"js\"),\n url(r'^css$', CSSView.as_view(), name=\"css\"),\n url(r'^assets/(?P.+)$', ImageView.as_view(), name=\"assets\")\n)\n","sub_path":"app/common/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"469355143","text":"import hashlib\n\n\"\"\"\nClass Name: State\nDescription: Defines a certain possible state of the problem\n\"\"\"\nclass State:\n \"\"\"\n Method name: __init__\n Description: Constructor\n Calling arguments: - currentPosition: a specific node id\n - nodesLeft: list of nodes left to go. Ordered by ascending order depending on its nodeid\n \"\"\"\n def __init__(self, currentPosition, nodesRemaining):\n self.currentPosition = currentPosition\n self.nodesRemaining = nodesRemaining\n self.nodesRemaining.sort()\n self.md5checksum = hashlib.md5((str(self.currentPosition) + \",\".join(str(self.nodesRemaining))).encode()).hexdigest()\n","sub_path":"src/State.py","file_name":"State.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"471074263","text":"#My Name: Ryan Potocki\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\ndef scatterplot():\r\n \"\"\"creates a scatterplot in three colors\"\"\"\r\n #get the data for the plots\r\n reddata = np.array([[1,1],[1,3],[4,2]])\r\n bluedata = np.array([[0,1],[0,5],[1,2],[2,3],[3,4]])\r\n yellowdata = np.array([[1,4],[2,2],[3,5],[6,2]])\r\n #convert the data to a pd DataFrame\r\n df = pd.DataFrame(reddata, columns=[\"x\",\"y\"])\r\n df1 = pd.DataFrame(bluedata, columns=[\"x\",\"y\"])\r\n df2 = pd.DataFrame(yellowdata, columns=[\"x\",\"y\"])\r\n #create the plot\r\n ax = df.plot.scatter(x=\"x\",y=\"y\",label=\"Red Group\",color=\"Red\",title=\"Scatter Plot in Three Colors\",xlim=(-1,7),ylim=(0,6))\r\n ax1 = df1.plot.scatter(x=\"x\",y=\"y\",label=\"Blue Group\",color=\"Blue\",ax=ax)\r\n ax2 = df2.plot.scatter(x=\"x\",y=\"y\",label=\"Yellow Group\",color=\"Yellow\",ax=ax)\r\n #get the figure from the axes and save it\r\n fig = ax.get_figure()\r\n fig.savefig(\"my_scatter_plot.png\")\r\n\r\ndef line_graph():\r\n \"\"\"creates a line graph of cosine approximated at intervals of 1/(10pi)\"\"\"\r\n #create the data in an array\r\n xval = np.arange(0,6,(np.pi*(1./10)))\r\n yval = np.cos(xval)\r\n data = np.array([xval,yval])\r\n data = data.transpose()\r\n y = np.arange(-1,1.5,0.5)\r\n #convert the data to a pd DataFrame\r\n df = pd.DataFrame(data,columns=[\"x\",\"y\"])\r\n #tell the DataFrame to plot the data\r\n ax = df.plot(x=\"x\",y=\"y\",label=\"0\",ylim=(-1,1),yticks=y,title=\"Cosine Approximated at Intervals of 1/(10pi)\")\r\n ax.set(xlabel=\"\",ylabel=\"\")\r\n\t#get the figure from the axes and save it\r\n fig = ax.get_figure()\r\n fig.savefig(\"my_line_graph.png\")\r\n\r\nif __name__ == \"__main__\":\r\n line_graph()\r\n scatterplot()\r\n \r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Assignment14.py","file_name":"Assignment14.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"451519503","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport itertools\nfrom mpl_toolkits.mplot3d import Axes3D\nimport scipy.interpolate\nimport pylab\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport scipy.io as sio\n\nmat_contents = sio.loadmat('accresult__niclcon_matlab.mat')\n\nfig = plt.figure(figsize=(12, 12))\nC = np.linspace(0, 90, 10)\nN = range(1,11)\n\nnumxTicks = min(10, max(N))\nplt.xticks(np.arange(0, 11, 1), np.arange(0, 1.1, 0.1))\nplt.yticks(np.arange(0, max(N) + 1, 1), np.arange(1, max(N) + 1, 1))\n\n\naccmesh = []\nfor nidx, n in enumerate(N):\n temp = []\n for cidx, c in enumerate(C):\n temp.extend([mat_contents['accresult'][nidx, cidx]])\n accmesh.append(temp)\n\nplt.imshow(accmesh, interpolation='none', cmap='summer')\n\n\nisfull = False\ntitle = 'for Matlab Implementation Using Niclcon dataset'\nplt.title('Accuracy for %s %s' % (('Full sketches' if isfull else 'Partial sketches'), title))\nplt.xlabel('C')\nplt.ylabel('N')\n# plt.grid(ls='solid')\n\nfor nidx,n in enumerate(N):\n for cidx,c in enumerate(C):\n plt.text(c / 10 - 0.2, n - 1, '%.2f' % accmesh[nidx][cidx], fontsize=10)\n\n# create an axes on the right side of ax. The width of cax will be 5%\n# of ax and the padding between cax and ax will be fixed at 0.05 inch.\ndivider = make_axes_locatable(plt.gca())\ncax = divider.append_axes(\"right\", size=\"5%\", pad=0.2)\nplt.colorbar(cax=cax)\n\nplt.clim(0,100)\n# plt.show()\nplt.show()\n\nfig.savefig('.' + '/' + 'draw_N_C_Rej_Contour_%s_Matlab.png' % ('Full' if isfull else 'Partial'))","sub_path":"test/matlabdraw.py","file_name":"matlabdraw.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"150716800","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport random\nimport math\nimport time\nimport _matrix\n\nclass TestMatrix:\n def is_close_to_equal(self, m1, m2, m, n):\n for i in range(m):\n for j in range(n):\n assert math.fabs(m1[i, j]-m2[i, j]) < 0.00000001\n return True\n\n def time_report(self,t):\n if t == -1:\n return time.process_time()\n else:\n return time.process_time()-t\n\n def calculation_helper(self, p, q, r, tsize):\n m1 = np.random.rand(p, q)\n m2 = np.random.rand(q, r)\n m3 = np.matmul(m1, m2)\n\n _m1 = _matrix.Matrix(m1)\n _m2 = _matrix.Matrix(m2)\n\n start = self.time_report(-1)\n naive = _matrix.multiply_naive(_m1, _m2)\n tnaive = self.time_report(start)\n\n start = self.time_report(-1)\n tile = _matrix.multiply_tile(_m1, _m2, tsize)\n ttile = self.time_report(start)\n\n start = self.time_report(-1)\n mkl = _matrix.multiply_mkl(_m1, _m2)\n tmkl = self.time_report(start)\n\n assert m3.shape[0] == naive.nrow and m3.shape[1] == naive.ncol\n assert m3.shape[0] == tile.nrow and m3.shape[1] == tile.ncol\n assert m3.shape[0] == mkl.nrow and m3.shape[1] == mkl.ncol\n \n assert(self.is_close_to_equal(m3, naive, p, r))\n assert(self.is_close_to_equal(m3, tile, p, r))\n assert(self.is_close_to_equal(m3, mkl, p, r))\n\n f = open(\"performance.txt\", \"w\")\n f.write('multiply_naive costs: {} seconds\\n'.format(tnaive))\n f.write('multiply_tile costs: {} seconds\\n'.format(ttile))\n f.write('multiply_mkl costs: {} seconds\\n'.format(tmkl))\n f.write('tile speed-up over naive: {}\\n'.format(tnaive / ttile))\n f.write('MKL speed-up over naive: {}\\n'.format(tnaive / tmkl))\n f.close()\n\n def test_large(self):\n self.calculation_helper(1000, 1000, 1000, 24)\n\n def test_random(self):\n last = 1\n while last < 10:\n p = random.randint(1, 1000)\n q = random.randint(1, 1000)\n r = random.randint(1, 1000)\n tsize = random.randint(1, 1000)\n self.calculation_helper(p, q, r, tsize)\n last = last+1","sub_path":"hw3/JieJhih/test_matrix.py","file_name":"test_matrix.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"566058141","text":"# WS 8_2\n#\n# Write a function, numeric_value_ofW(Word), which takes a Word, a\n# combination of only lower case and upper case letters, and which uses\n# numeric_value_of(Letter) tor return the value of the word as the sum of the\n# values of the letters in Word.\n#-------------------------------------\n\ndef numeric_value_ofL(Letter):\n '''takes a single lower case or upper case letter & which\n returns 1 for an ‘a’ for an ‘A’, 2 for a ‘b’ or a ‘B’. …. 26 for a ‘z’ or a ‘Z’.'''\n alpha='abcdefghijklmnopqrstuvwxyz'\n Letter = Letter.casefold() #heck yah, thank you pyhton 3.3!!\n for i in Letter:\n return alpha.find(i)+1\n else:\n return 'error'\n\ndef numeric_value_ofW(Word):\n '''takes a word of ONLY uppercase OR ONLY lowercase &\n uses numeric_value_of(Letter) as a helper.\n Returns the value of the word as a sum of the individal character values'''\n alpha='abcdefghijklmnopqrstuvwxyz'\n wordValue = 0\n if (word.isupper()==True or word.islower()==True):\n for letter in Word:\n wordValue += numeric_value_ofL(letter)\n return wordValue\n else:\n return 'Word is not entirely UPPER/LOWER case'\n\nwords=['hellO','HO','OHAIYOU']\nfor word in words:\n print(numeric_value_ofW(word))\n","sub_path":"Intro to Python/In Class WS/Chapter_8/WS_Ch08-02.py","file_name":"WS_Ch08-02.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"583453812","text":"import pathlib\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom settings import *\r\nfrom torch.utils.data import Dataset\r\n\r\n\r\nclass CatsAndDogs(Dataset):\r\n\r\n def __init__(self, mode=\"train\"):\r\n super(CatsAndDogs, self).__init__()\r\n\r\n if mode != \"train\" and mode != \"test\":\r\n raise ValueError(\"INVALID mode: \" + mode)\r\n\r\n self.mode = mode\r\n self.data_list, self.labels = self._load_data_list(mode)\r\n\r\n def _load_data_list(self, mode):\r\n data_list = []\r\n labels = []\r\n\r\n for d in pathlib.Path(DATA_PATH).glob(\"*\"):\r\n if d.is_dir():\r\n labels.append(d.name)\r\n data_list.append([])\r\n\r\n for f in d.glob(\"*.jpg\"):\r\n data_list[-1].append((str(f), float(labels.index(d.name))))\r\n\r\n flattened_data_list = []\r\n\r\n for i in range(len(data_list)):\r\n data_length = len(data_list[i])\r\n if mode == \"train\":\r\n data_list[i] = data_list[i][:int(data_length*0.7)]\r\n elif mode == \"test\":\r\n data_list[i] = data_list[i][int(data_length*0.7):]\r\n\r\n flattened_data_list.extend(data_list[i])\r\n\r\n return flattened_data_list, labels\r\n\r\n def __len__(self):\r\n return len(self.data_list)\r\n\r\n def __getitem__(self, index):\r\n path, label = self.data_list[index]\r\n image = self._load_image(path)\r\n return image, label\r\n\r\n def _load_image(self, path):\r\n image = Image.open(path)\r\n image = image.resize((IMAGE_SIZE, IMAGE_SIZE))\r\n image = np.array(image)\r\n image = (image.astype(np.float32) - 128)/256\r\n image = image.transpose(2, 0, 1)\r\n return image\r\n","sub_path":"05-convolutional-network/utils/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"650557277","text":"from fake_useragent import UserAgent\nimport requests\nimport time\n\nua = UserAgent(verify_ssl=False)\nheaders = {\n 'User-Agent': ua.random,\n 'Referer': 'https://www.processon.com/',\n 'path': '/login?f=index'\n}\n\n\nclass LoginFailed(Exception):\n def __init__(self, ErrorInfo):\n super().__init__(self, ErrorInfo)\n self.error_info = ErrorInfo\n def __str__(self):\n return self.error_info\n\n\ns = requests.Session()\nlogin_url = 'https://www.processon.com/login'\nform_data = {\n 'login_email': '18500000000', # 填入有效用户名密码即可登录\n 'login_password': '1234567890',\n}\n\n# post数据前获取cookie\npre_login = 'https://www.processon.com/login?f=index'\npre_resp = s.get(pre_login, headers=headers)\ntime.sleep(4)\n\ntry:\n response = s.post(login_url, data=form_data, headers=headers, cookies=s.cookies)\n if '登录失败' in response.text: # 判断源码中有无登录失败关键字\n raise LoginFailed('登录失败')\n elif '我的文件' in response.text: # 判断源码中有无我的文件关键字\n print('确认登录成功')\n print('————————————')\nexcept (LoginFailed, Exception) as e:\n print(e)\nfinally:\n print(f'返回码是: {response.status_code}') # 打印http服务器返回码\n","sub_path":"Week02/job_2.py","file_name":"job_2.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"178206706","text":"_DAYS_IN_MONTH = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] # why need None at beginning\n\n\ndef _is_leap(year):\n \"year -> 1 if leap year, else 0.\"\n return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)\n\ndef _days_in_month(year, month):\n \"year, month -> number of days in that month in that year.\"\n assert 1 <= month <= 12, month\n if month == 2 and _is_leap(year):\n return 29\n return _DAYS_IN_MONTH[month]\n\nprint(_days_in_month(1999,20))","sub_path":"time/.idea/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"484570765","text":"\"\"\" internal chat \"\"\"\nimport ptah\nfrom datetime import datetime\nfrom pyramid_jca import Protocol\n\n\nclass ChatProtocol(Protocol):\n\n def __init__(self, *args, **kw):\n super(ChatProtocol, self).__init__(*args, **kw)\n\n principal = ptah.auth_service.get_current_principal()\n self.user_id = principal.__uri__.replace(':','_')\n self.user_name = principal.name\n\n def on_closed(self):\n super(ChatProtocol, self).on_closed()\n\n found = False\n for user in self.instances.values():\n if user.user_id == self.user_id:\n found = True\n break\n\n if not found:\n self.broadcast('disconnected', {'uid': self.user_id})\n\n def msg_init(self, data):\n \"\"\" init message handler \"\"\"\n users = []\n for user in self.instances.values():\n if user.user_id != self.user_id:\n users.append({'uid': user.user_id, 'name': user.user_name})\n\n info = {'uid': self.user_id,\n 'name': self.user_name,\n 'users': users}\n self.send('list', info, tmpl='chat.user')\n\n msg = {'uid': self.user_id,\n 'name': self.user_name}\n self.broadcast('joined', msg, tmpl='chat.user')\n\n def msg_message(self, data):\n \"\"\" 'message' message handler \"\"\"\n msg = {'uid': self.user_id,\n 'name': self.user_name,\n 'date': datetime.utcnow(),\n 'message': data['message']}\n\n for user in self.instances.values():\n if user.user_id == data['uid']:\n user.send('message', msg)\n","sub_path":"ptah_chat/ptah_chat/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"528337603","text":"#!/usr/bin/env python\n# Copyright (c) 2014-2015, Tibor Kiss \n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\n\"\"\"Kelly Criterion - (c) 2014-2015, Tibor Kiss \n\nUsage:\n ./kelly_criterion.py [--risk-free-rate=] ...\n\nOptions:\n --risk-free-rate= Annualized percentage of the Risk Free Rate. [default: 0.04]\n\n\"\"\"\n\nimport sys\nfrom datetime import datetime\n\nfrom docopt import docopt\n\nimport pandas.io.data as web\nfrom pandas import DataFrame\n\nfrom numpy.linalg import inv\n\n\ndef calc_kelly_leverages(securities, start_date, end_date, risk_free_rate=0.04):\n \"\"\"Calculates the optimal leverages for the given securities and time frame.\n Returns a list of (security, leverage) tuple with the calculate optimal leverages.\n\n Note: risk_free_rate is annualized\n \"\"\"\n f = {}\n ret = {}\n excess_return = {}\n\n # Download the historical prices from Yahoo Finance and calculate the\n # excess return (return of security - risk free rate) for each security.\n for symbol in securities:\n try:\n hist_prices = web.DataReader(symbol, 'yahoo', start_date, end_date)\n except IOError as e:\n print('Unable to download data for %s. Reason: %s' % (symbol, str(e)))\n return None\n\n f[symbol] = hist_prices\n\n ret[symbol] = hist_prices['Adj Close'].pct_change()\n excess_return[symbol] = (ret[symbol] - (risk_free_rate / 252)) # risk_free_rate is annualized\n\n # Create a new DataFrame based on the Excess Returns.\n df = DataFrame(excess_return).dropna()\n\n # Calculate the CoVariance and Mean of the DataFrame\n C = 252 * df.cov()\n M = 252 * df.mean()\n\n # Calculate the Kelly-Optimal Leverages using Matrix Multiplication\n F = inv(C).dot(M)\n\n # Return a list of (security, leverage) tuple\n return zip(df.columns.values.tolist(), F)\n\n\ndef main():\n \"\"\"Entry point of Kelly Criterion calculation.\"\"\"\n\n print(\"Kelly Criterion calculation\")\n args = docopt(__doc__, sys.argv[1:])\n\n # Parse risk-free-rate\n try:\n risk_free_rate = float(args['--risk-free-rate'])\n except ValueError as e:\n print('Error converting risk-free-rate to float: %s' % args['--risk-free-rate'])\n sys.exit(-1)\n\n # Verify risk-free-rate\n if not 0 <= risk_free_rate <= 1.0:\n print('Error: risk-free-rate is not in between 0 and 1: %.2f' % risk_free_rate)\n sys.exit(-1)\n\n # Parse start and end dates\n try:\n start_date = datetime.strptime(args[''], \"%Y-%m-%d\").date()\n except ValueError as e:\n print('Error parsing start-date: %s' % args[''])\n sys.exit(-1)\n\n try:\n end_date = datetime.strptime(args[''], \"%Y-%m-%d\").date()\n except ValueError as e:\n print('Error parsing end-date: %s' % args[''])\n sys.exit(-1)\n\n print('Arguments: risk-free-rate=%s start-date=%s end-date=%s securities=%s' % (args['--risk-free-rate'], start_date, end_date, args['']))\n print('')\n\n # Calculate the Kelly Optimal leverages\n leverages = calc_kelly_leverages(args[''], start_date, end_date, risk_free_rate)\n\n # Print the results if calculation was successful\n if leverages:\n print(\"Leverages per security: \")\n for (name, val) in leverages:\n print(\" %s: %.2f\" % (name, val))\n\n print(\"Sum leverage: %.2f \" % reduce(lambda x, y: x+y, map(lambda z: z[1], leverages)))\n\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"kelly_criterion.py","file_name":"kelly_criterion.py","file_ext":"py","file_size_in_byte":5010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"462495519","text":"import h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef plot_images(h5file):\n with h5py.File(h5file, 'r') as h5f:\n for img_name in h5f.keys():\n img_group = h5f[img_name]\n hist = img_group['hist'][:]\n energies = img_group['energies'][:]\n angles = np.linspace(0, 2 * np.pi, hist.shape[1])\n \n npulses = img_group.attrs['npulses']\n esase = img_group.attrs['esase']\n streakamp = img_group.attrs['streakamp']\n \n plt.figure(figsize=(10, 6))\n plt.imshow(hist.T, extent=[energies.min(), energies.max(), angles.min(), angles.max()],\n origin='lower', aspect='auto', cmap='viridis')\n plt.colorbar(label='Counts')\n plt.xlabel('Energy')\n plt.ylabel('Angle (radians)')\n plt.title(f'Image: {img_name}\\n' +\n f'Pulses: {npulses}, ESASE: {esase}, Streak Amp: {streakamp}')\n plt.show()\n\nif __name__ == '__main__':\n h5file_path = 'testing_simdata.h5' # Update this with the correct path to your HDF5 file\n plot_images(h5file_path)\n","sub_path":"src/read_h5_2.py","file_name":"read_h5_2.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"601068522","text":"import re\r\nfrom collections import Counter\r\n\r\nf = open('scorelib.txt', 'r', encoding=\"utf8\")\r\nmusicDict = {}\r\n\r\nfor line in f:\r\n parseData = re.match(\"([a-zA-Z ]*:) (.*)\", line)\r\n\r\n if parseData != None:\r\n if parseData.group(2) == \"\":\r\n musicDict.setdefault(parseData.group(1), []).append(\"unknown\")\r\n else:\r\n musicDict.setdefault(parseData.group(1), []).append(parseData.group(2))\r\n\r\ncountAllValues = {}\r\ncountOccurrences = {}\r\n\r\nfor key in musicDict:\r\n countAllValues[key] = (len(musicDict[key]))\r\n if key != \"Print Number:\":\r\n countOccurrences[key] = (Counter(musicDict[key]))\r\n\r\nfor key, val in musicDict.items():\r\n print(key, val)\r\n\r\nprint(\"\\nNumber of occurrences for each value in all keys:\")\r\nfor key in countOccurrences:\r\n print(key, countOccurrences[key])\r\nprint(\"\\nNumber of all values in different keys:\", countAllValues)\r\n","sub_path":"Cviko1/cv1.py","file_name":"cv1.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"580267129","text":"from keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\n\r\n# 기온 데이터 읽어 들이기\r\nfile_dir = os.path.dirname(\"D:/LTK_AI_Study/Machine_Learning/Data/\")\r\ndf = pd.read_csv(file_dir+\"/tem10y.csv\", sep=\",\", encoding='utf-8')\r\n\r\n# 데이터를 학습전용과 테스트 전용으로 분리\r\ntrain_year = (df[\"연\"] <= 2015)\r\ntest_year = (df[\"연\"] >= 2016)\r\ninterval = 6\r\n\r\n# 과거 6일의 데이터를 기반으로 학습할 데이터 만들기\r\ndef make_data(data):\r\n x = [] # 학습 데이터\r\n y = [] # 경과\r\n temps = list(data[\"기온\"])\r\n for i in range(len(temps)):\r\n if i < interval: continue\r\n y.append(temps[i])\r\n xa = []\r\n for p in range(interval):\r\n d = i + p - interval\r\n xa.append(temps[d])\r\n x.append(xa)\r\n x = np.array(x)\r\n y = np.array(y)\r\n return (x, y)\r\n\r\nx_train, y_train = make_data(df[train_year])\r\nx_test, y_test = make_data(df[test_year])\r\n\r\nscaler = MinMaxScaler()\r\nscaler.fit(x_train)\r\nx_train = scaler.transform(x_train)\r\nx_test = scaler.transform(x_test)\r\n\r\n\r\n# 케라스 모델 분석\r\nmodel = Sequential()\r\nmodel.add(Dense(128, input_shape=(6,), activation= 'relu'))\r\nmodel.add(Dense(32, activation= 'relu'))\r\nmodel.add(Dense(32, activation= 'relu'))\r\nmodel.add(Dense(32, activation= 'relu'))\r\nmodel.add(Dense(1))\r\n\r\nmodel.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\r\nmodel.fit(x_train,y_train, epochs=200, batch_size=10)\r\nloss, acc = model.evaluate(x_test, y_test, batch_size=3)\r\ny_pridect = model.predict(x_test)\r\nprint(\"정답률=\",acc)\r\n\r\n# 결과 그래프 그리기\r\nplt.figure(figsize=(10, 6), dpi=100)\r\nplt.plot(y_test, c=\"r\")\r\nplt.plot(y_pridect, c=\"b\")\r\nplt.savefig('tenki-kion-lr.png')\r\nplt.show()","sub_path":"Day0806/M05_wather_keras.py","file_name":"M05_wather_keras.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"490507409","text":"#!/usr/bin/env python\n\nimport json\n\nfiles = [\n \"authorasproducer.txt\",\n \"barthes.txt\",\n \"construction.txt\",\n \"designerasauthor.txt\",\n \"fitz.txt\",\n \"foucalt.txt\",\n \"laurenrieders.txt\",\n \"martha.txt\",\n \"michaelrock.txt\",\n \"samreith.txt\",\n \"schopenhauer.txt\",\n \"shakespeare.txt\"\n]\n\ndata = {}\n\nfor filename in files:\n\n with open(filename) as f:\n text = f.read()\n\n lines = []\n prev = -1\n for (i, c) in enumerate(text):\n if c in ['.', '?', '!', ';']:\n sentence = text[prev + 1:i + 1]\n sentence = sentence.strip()\n # print sentence\n lines.append(sentence)\n prev = i\n data[filename] = lines\n\njsondata = json.dumps(data, indent=2)\nwith open(\"authorship.json\", 'w') as jf:\n jf.write(jsondata)\n","sub_path":"authorship/create_json_from_text.py","file_name":"create_json_from_text.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"210663054","text":"import pika\nimport time\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\nchannel =connection.channel()\n\n\n\ndef Hello_Mapsa(ch, method, properties, body):\n print(\"ghamarAli ok dad\")\n ch.basic_ack(delivery_tag = method.delivery_tag)\n time.sleep(5)\n\n\n\n# channel.basic_qos(prefetch_count=1)\nchannel.basic_consume(queue = 'ghamarALi', on_message_callback = Hello_Mapsa)\n\n\nprint(\"worker is on!\")\n\n\nchannel.start_consuming()","sub_path":"consumer_ghamarali.py","file_name":"consumer_ghamarali.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"405316043","text":"from flask import Flask, render_template, request, url_for, redirect\nimport spotify as sp\nimport pexels as px\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET','POST'])\ndef index():\n ''' home page '''\n return render_template('index.html')\n\n@app.route('/search-song/', methods=['GET','POST'])\ndef search():\n '''get result for a search'''\n if request.method == 'POST':\n query = str(request.form['query'])\n if not query:\n return render_template('search.html', empty=True)\n \n # get basic track info\n try:\n song_info = sp.search_song(query)\n except:\n return render_template('search.html', error=True)\n \n # get track features and visualization\n features = sp.song_features(song_info['song_id'])\n chart_uri = sp.features_visualization(features)\n key = sp.key(features['key'])\n mode = sp.mode(features['mode'])\n\n # get recommendations\n recommendations = sp.recommendations(song_id=song_info['song_id'], artist_id=song_info['artist_id'])\n \n # get emotion tag and search for pics\n emotion = sp.emotion(features)\n emotion_pics = px.search_photo(emotion)\n\n return render_template('result.html', \n name=song_info['song'], artist=song_info['artist'], albumn=song_info['album'], release_date=song_info['release_date'],\n recommendations=recommendations, \n key=key, mode=mode, tempo=features['tempo'], meter=features['time_signature'],\n chart=chart_uri, \n emotion=emotion, pics=emotion_pics)\n \n return render_template('search.html')\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"390517464","text":"\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\nclass PLA_14:\n def __init__(self, N, err=0):\n # Random linearly separated data\n xA,yA,xB,yB = [random.uniform(-5, 5) for i in range(4)] \n \n #V is the weight vector with values[w0,w1,w2] generated in a random uniform manner.\n self.targetWeight = np.array([xB*yA-xA*yB, yB-yA, xA-xB])\n \n #this is the data set of N rows \n self.dataSet = self.generate_dataset(N)\n self.classificationErr=err\n \n def generate_dataset(self, N):\n dataSet = []\n for i in range(N):\n #generate x1,x2 for ith row in data set\n feature1,feature2 = [random.uniform(-5, 5) for i in range(2)]\n \n #feature vector for ith row [x0,x1,x2]\n featureVector = np.array([1,feature1,feature2])\n \n #solution space for xi, s=sign(WT.X)\n output = int(np.sign(self.targetWeight.T.dot(featureVector)))\n \n dataSet.append((featureVector, output))\n\n return dataSet\n \n def plot_graph(self, currHypoWeight=[]):\n plt.xlim(-5,5)\n plt.ylim(-5,5)\n weightVector = self.targetWeight\n slope, intercept = -(weightVector[1]/weightVector[2]), -weightVector[0]/weightVector[2] # a=slope, b= coeff if line\n l = np.linspace(-5,5)\n plt.plot(l, slope*l+intercept, 'k--') # plot a line al+b\n cols = {1: 'g', -1: 'r'}\n for feature,output in self.dataSet:\n plt.plot(feature[1], feature[2], cols[output]+'o') \n if len(currHypoWeight)!=0:\n hypothesis_slope = -currHypoWeight[1]/currHypoWeight[2]\n hypothesis_intercept = -currHypoWeight[0]/currHypoWeight[2]\n plt.plot(l, hypothesis_slope*l+hypothesis_intercept, 'b-', lw=2)\n \n plt.show()\n \n def classification_error(self, currWeights):\n # Error defined as fraction of misclassified points\n datSet = self.dataSet\n M = len(datSet)\n datSet_mispts = 0\n for features,output in datSet:\n if int(np.sign(currWeights.T.dot(features))) != output:\n datSet_mispts += 1\n error = datSet_mispts / float(M)\n return error\n \n def rand_missclassfied_pt(self, currHypoWeight):\n # Choose a random point among the misclassified\n datSet = self.dataSet\n mispts = []\n for features,output in datSet:\n if int(np.sign(currHypoWeight.T.dot(features))) != output: # check if h(x) != f(x)\n mispts.append((features, output))\n return mispts[random.randrange(0,len(mispts))] # Return the missclassified point \n \n def pla(self):\n \n currHypoWeight = np.zeros(3) # weight vector of current hypotheses\n N = len(self.dataSet)\n iterations = 0\n # Iterate until all points are correctly classified\n\n while self.classification_error(currHypoWeight) > self.classificationErr:\n iterations += 1\n # Pick random misclassified point\n missclass_features, missclass_output = self.rand_missclassfied_pt(currHypoWeight)\n # Update weights\n nextHypoWeight = currHypoWeight + missclass_features*missclass_output\n currHypoWeight=nextHypoWeight\n self.plot_graph(currHypoWeight)\n plt.title('N = %s, Iteration %s\\n'\n % (str(N),str(iterations)))\n \n plt.show()\n \n self.finalWeight = currHypoWeight\n self.iterations = iterations\n \n def print_dataset(self):\n for feature,output in self.dataSet:\n print(feature[1],\" \",feature[2],\" :: \",output)\n \n def print_final_weights(self): \n print(\"Weight vector of learned hypothesis function g is \\n\",self.finalWeight)\n \n def print_iterations(self):\n print(\"Iterations it took to converge towards the target function: \\n\",self.iterations)\n \n \n \n\n \np = PLA_14(100,0) # N,error value -- to missclassify 3 points out of N, then pass 3/N as err value.\np.plot_graph() #plot target function shown by black line and random points which are classified into +1(blue) and -1(red) \n\np.pla() #calling perceptron model (blue line is hypothesis and black is target function)\np.print_dataset() # print dataset that is generated.\np.print_final_weights() # this is the learned weights after running PLA algorithm.\np.print_iterations()\n ","sub_path":"PLA.py","file_name":"PLA.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"40158136","text":"# General arr lib\nimport numpy as np\n\n# Compile py code\nfrom numba import jit\nfrom numba import prange\n\n# Multithreading\nimport dask.array as da\nimport dask as dk\ndk.config.set(scheduler='processes')\n\nfrom dask_image.ndfilters import generic_filter as d_gf\n\nfrom collections import deque\n\nfrom numpy import random, nanmax, argmax, unravel_index\nfrom scipy.spatial.distance import pdist, squareform\nfrom skimage.filters import gabor\nfrom skimage.restoration import denoise_bilateral\n\nimport datetime\n\n@jit\ndef create_circular_mask(radius):\n \"\"\"\n Creates a circular mask\n \"\"\"\n kernel = np.zeros((2*radius+1, 2*radius+1))\n y, x = np.ogrid[-radius:radius+1, -radius:radius+1]\n mask = x**2 + y**2 <= radius**2\n mask[radius][radius] = 0\n kernel[mask] = 1\n return kernel\n\n\n@jit(nopython=True)\ndef _reclassify_impoundment(arr):\n \"\"\"\n Internaly used normalization of impoundment index reclassification different threashhold\n \"\"\"\n new_arr = arr.copy()\n for i in range(len(arr)):\n for j in range(len(arr[i])):\n if new_arr[i, j] == 0:\n new_arr[i, j] = 0\n elif new_arr[i, j] < 0.002:\n new_arr[i, j] = 5\n elif arr[i, j] < 0.005:\n new_arr[i, j] = 50\n elif arr[i, j] < 0.02:\n new_arr[i, j] = 100\n elif arr[i, j] < 0.05:\n new_arr[i, j] = 1000\n elif arr[i, j] < 0.1:\n new_arr[i, j] = 10000\n elif arr[i, j] < 0.3:\n new_arr[i, j] = 100000\n else:\n new_arr[i, j] = 1000000\n return new_arr\n\n\n@jit\ndef impoundmentAmplification(arr, mask_radius=10):\n \"\"\"\n Amplicatates ditches\n \"\"\"\n norm_arr = da.from_array(_reclassify_impoundment(arr), chunks=(800, 800))\n mask = create_circular_mask(mask_radius)\n return d_gf(d_gf(d_gf(norm_arr, np.nanmean, footprint=mask), np.nanmean, footprint=mask), np.nanmedian, footprint=mask).compute(scheduler='processes')\n\n\n@jit(nopython=True)\ndef _reclassify_hpmf_filter(arr):\n \"\"\"\n Internal reclassification wrapper\n \"\"\"\n binary = np.copy(arr)\n for i in range(len(arr)):\n for j in range(len(arr[i])):\n if arr[i][j] < 0.000001 and arr[i][j] > -0.000001:\n binary[i][j] = 100\n else:\n binary[i][j] = 0\n return binary\n\n\n@jit(nopython=True)\ndef _reclassify_hpmf_filter_mean(arr):\n \"\"\"\n Internal reclassification wrapper\n \"\"\"\n reclassify = np.copy(arr)\n for i in range(len(arr)):\n for j in range(len(arr[i])):\n if arr[i][j] < 1:\n reclassify[i][j] = 0\n elif arr[i][j] < 3:\n reclassify[i][j] = 1\n elif arr[i][j] < 7:\n reclassify[i][j] = 2\n elif arr[i][j] < 10:\n reclassify[i][j] = 50\n elif arr[i][j] < 20:\n reclassify[i][j] = 75\n elif arr[i][j] < 50:\n reclassify[i][j] = 100\n elif arr[i][j] < 80:\n reclassify[i][j] = 300\n elif arr[i][j] < 100:\n reclassify[i][j] = 600\n else:\n reclassify[i][j] = 1000\n return reclassify\n\n\n@jit\ndef hpmfFilter(arr):\n \"\"\"\n HPMF filter enchances ditches\n \"\"\"\n normalized_arr = da.from_array(\n _reclassify_hpmf_filter(arr), chunks=(800, 800))\n\n mean = d_gf(d_gf(d_gf(d_gf(normalized_arr, np.amax, footprint=create_circular_mask(1)), np.amax, footprint=create_circular_mask(\n 1)), np.median, footprint=create_circular_mask(2)), np.nanmean, footprint=create_circular_mask(5)).compute(scheduler='processes')\n reclassify = da.from_array(\n _reclassify_hpmf_filter_mean(mean), chunks=(800, 800))\n\n return d_gf(reclassify, np.nanmean, footprint=create_circular_mask(7))\n\n\n@jit(nopython=True)\ndef _reclassify_sky_view_non_ditch_amp(arr):\n \"\"\"\n Internal amp reclassification\n \"\"\"\n new_arr = np.copy(arr)\n for i in range(len(arr)):\n for j in range(len(arr[i])):\n if arr[i, j] < 0.92:\n new_arr[i, j] = 46\n elif arr[i, j] < 0.93:\n new_arr[i, j] = 37\n elif arr[i, j] < 0.94:\n new_arr[i, j] = 29\n elif arr[i, j] < 0.95:\n new_arr[i, j] = 22\n elif arr[i, j] < 0.96:\n new_arr[i, j] = 16\n elif arr[i, j] < 0.97:\n new_arr[i, j] = 11\n elif arr[i, j] < 0.98:\n new_arr[i, j] = 7\n elif arr[i, j] < 0.985:\n new_arr[i, j] = 4\n elif arr[i, j] < 0.99:\n new_arr[i, j] = 2\n else:\n new_arr[i, j] = 1\n return new_arr\n\n\n@jit\ndef _skyViewGabor(merged, gabors):\n for i in range(len(merged)):\n for j in range(len(merged[i])):\n merged[i][j] = 0\n for i in range(len(merged)):\n for j in range(len(merged[i])):\n for k in range(len(gabors)):\n merged[i][j] += gabors[k][i][j]\n return merged\n\n#@jit\ndef skyViewGabor(skyViewArr):\n delayed_gabors = []\n for i in np.arange(0.03, 0.08, 0.01):\n for j in np.arange(0, 3, 0.52):\n delayed_gabor = dk.delayed(gabor)(skyViewArr, i, j)[0]\n delayed_gabors.append(delayed_gabor)\n gabors = dk.compute(delayed_gabors)\n print(type(gabors[0]))\n print(len(gabors[0]))\n #print(len(gabors))\n #gabors = gabors.map(lambda x : x[0])\n return _skyViewGabor(skyViewArr.copy(), gabors[0])\n\n\n\n\n\n@jit\ndef skyViewNonDitchAmplification(arr):\n arr = da.from_array(arr, chunks=(800, 800))\n arr = d_gf(arr, np.nanmedian, footprint=create_circular_mask(25)\n ).compute(scheduler='processes')\n arr = da.from_array(\n _reclassify_sky_view_non_ditch_amp(arr), chunks=(800, 800))\n return d_gf(arr, np.nanmean, footprint=create_circular_mask(10))\n\n\n@jit\ndef conicMean(arr, maskRadius, threshold):\n # Standard values: maskRadius = 5, threshold = 0.975\n masks = []\n for i in range(0, 8):\n masks.append(create_conic_mask(maskRadius, i))\n newArr = arr.copy()\n amountOfThresholds = 0\n for i in range(len(arr)):\n for j in range(len(arr[i])):\n values = meanFromMasks(arr, (i, j), masks)\n dir1 = 2\n dir2 = 2\n dir3 = 2\n dir4 = 2\n if values[0] < threshold and values[4] < threshold:\n dir1 = values[0] if values[0] < values[4] else values[4]\n if values[1] < threshold and values[5] < threshold:\n dir2 = values[1] if values[0] < values[5] else values[4]\n if values[2] < threshold and values[6] < threshold:\n dir3 = values[2] if values[0] < values[6] else values[4]\n if values[3] < threshold and values[7] < threshold:\n dir4 = values[3] if values[0] < values[7] else values[4]\n dir5 = dir1 if dir1 < dir2 else dir2\n dir6 = dir3 if dir3 < dir4 else dir4\n lowest = dir5 if dir5 < dir6 else dir6\n if lowest < threshold:\n amountOfThresholds += 1\n newArr[i][j] = 0.95 * lowest if lowest * \\\n 0.95 < arr[i][j] else arr[i][j]\n print(amountOfThresholds)\n return newArr\n\n\n@jit\ndef meanFromMasks(arr, index, masks):\n row = index[0]\n col = index[1]\n halfMask = len(masks[0]) // 2\n arrLenRow = len(arr)\n arrLenCol = len(arr[row])\n values = np.zeros(8)\n elementAmounts = np.zeros(8)\n for i in range(-halfMask, halfMask):\n for j in range(-halfMask, halfMask):\n if arrLenCol > col + j + 1 and col + j + 1 >= 0 and arrLenRow > row + i + 1 and row + i + 1 >= 0:\n if masks[0][i + halfMask][j + halfMask] == 1:\n values[0] += arr[row + i][col + j]\n elementAmounts[0] += 1\n elif masks[1][i + halfMask][j + halfMask] == 1:\n values[1] += arr[row + i][col + j]\n elementAmounts[1] += 1\n elif masks[2][i + halfMask][j + halfMask] == 1:\n values[2] += arr[row + i][col + j]\n elementAmounts[2] += 1\n elif masks[3][i + halfMask][j + halfMask] == 1:\n values[3] += arr[row + i][col + j]\n elementAmounts[3] += 1\n elif masks[4][i + halfMask][j + halfMask] == 1:\n values[4] += arr[row + i][col + j]\n elementAmounts[4] += 1\n elif masks[5][i + halfMask][j + halfMask] == 1:\n values[5] += arr[row + i][col + j]\n elementAmounts[5] += 1\n elif masks[6][i + halfMask][j + halfMask] == 1:\n values[6] += arr[row + i][col + j]\n elementAmounts[6] += 1\n elif masks[7][i + halfMask][j + halfMask] == 1:\n values[7] += arr[row + i][col + j]\n elementAmounts[7] += 1\n\n for i in range(len(values)):\n values[i] = values[i] / \\\n elementAmounts[i] if elementAmounts[i] != 0 else 0.99\n return values\n\n\n@jit\ndef create_conic_mask(radius, direction):\n kernel = np.zeros((2*radius+1, 2*radius+1))\n y, x = np.ogrid[-radius:radius+1, -radius:radius+1]\n\n if direction == 0: # topright\n mask = (x > y) & (x < abs(y)) & (x**2 + y**2 <= radius**2) & (x > 0)\n elif direction == 1: # righttop\n mask = (x > abs(y)) & (x**2 + y**2 <= radius**2) & (y < 0)\n elif direction == 2: # rightbottom\n mask = (x > abs(y)) & (x**2 + y**2 <= radius**2) & (y > 0)\n elif direction == 3: # bottomright\n mask = (abs(x) < y) & (x**2 + y**2 <= radius**2) & (x > 0)\n elif direction == 4: # bottomleft\n mask = (abs(x) < y) & (x**2 + y**2 <= radius**2) & (x < 0)\n elif direction == 5: # leftbottom\n mask = (abs(x) > abs(y)) & (x < abs(y)) & (\n x**2 + y**2 <= radius**2) & (y > 0)\n elif direction == 6: # lefttop\n mask = (abs(x) > abs(y)) & (x < abs(y)) & (\n x**2 + y**2 <= radius**2) & (y < 0)\n elif direction == 7: # topleft\n mask = (x > y) & (x < abs(y)) & (x**2 + y**2 <= radius**2) & (x < 0)\n kernel[mask] = 1\n return kernel\n\n\n@jit(nopython=True)\ndef _slopeNonDitchAmplifcation_normalize(arr, new_arr):\n for i in range(len(arr)):\n for j in range(len(arr[i])):\n if arr[i][j] < 8:\n new_arr[i][j] = 0\n elif arr[i][j] < 9:\n new_arr[i][j] = 20\n elif arr[i][j] < 10:\n new_arr[i][j] = 25\n elif arr[i][j] < 11:\n new_arr[i][j] = 30\n elif arr[i][j] < 13:\n new_arr[i][j] = 34\n elif arr[i][j] < 15:\n new_arr[i][j] = 38\n elif arr[i][j] < 17:\n new_arr[i][j] = 42\n elif arr[i][j] < 19:\n new_arr[i][j] = 46\n elif arr[i][j] < 21:\n new_arr[i][j] = 50\n else:\n new_arr[i][j] = 55\n return new_arr\n\n\n@jit\ndef slopeNonDitchAmplification(arr):\n new_arr = arr.copy()\n arr = d_gf(da.from_array(arr, chunks=(800, 800)), np.nanmedian,\n footprint=create_circular_mask(35)).compute(scheduler='processes')\n new_arr = _slopeNonDitchAmplifcation_normalize(arr, new_arr)\n return d_gf(da.from_array(new_arr, chunks=(800, 800)), np.nanmean, footprint=create_circular_mask(15))\n\n\n@jit(nopython=True)\ndef rasterToZones(arr, zoneSize, threshold):\n newArr = arr.copy()\n for i in range(0, len(arr), zoneSize):\n for j in range(0, len(arr[i]), zoneSize):\n numberOfClassified = 0\n if i < len(arr) - zoneSize and j < len(arr[i]) - zoneSize:\n for k in range(zoneSize):\n for l in range(zoneSize):\n if arr[i + k][j + l] == 1:\n numberOfClassified += 1\n if numberOfClassified > (zoneSize**2)/threshold:\n for k in range(zoneSize):\n for l in range(zoneSize):\n newArr[i + k][j + l] = 1\n else:\n for k in range(zoneSize):\n for l in range(zoneSize):\n newArr[i + k][j + l] = 0\n return newArr\n\n\n@jit(nopython=True)\ndef probaToZones(arr, zoneSize, threshold):\n newArr = np.zeros(arr.shape)\n print(newArr.shape)\n for i in range(0, len(arr), zoneSize):\n for j in range(0, len(arr[i]), zoneSize):\n totalProba = 0\n if i < len(arr) - zoneSize and j < len(arr[i]) - zoneSize:\n for k in range(zoneSize):\n for l in range(zoneSize):\n totalProba += arr[i+k][j+l]\n if totalProba / zoneSize**2 > threshold:\n for k in range(zoneSize):\n for l in range(zoneSize):\n newArr[i + k][j + l] = 1\n else:\n for k in range(zoneSize):\n for l in range(zoneSize):\n newArr[i + k][j + l] = 0\n return newArr\n\n\n@jit(nopython=True)\ndef _customeRemoveNoise(arr, max_arr, new_arr, threshold, selfThreshold):\n for i in range(len(arr)):\n for j in range(len(arr[i])):\n if max_arr[i][j] < threshold:\n if arr[i][j] > selfThreshold:\n new_arr[i][j] *= 0.5\n else:\n new_arr[i][j] *= 0.25\n return new_arr\n\n\n@jit\ndef customRemoveNoise(arr, radius, threshold, selfThreshold):\n max_arr = d_gf(da.from_array(arr, chunks=(800, 800)), np.nanmax,\n footprint=create_circular_mask(radius)).compute(scheduler='processes')\n return _customeRemoveNoise(arr, max_arr, np.copy(arr), threshold, selfThreshold)\n\ndef find_max_distance(A):\n \"\"\"\n Returns the maximum distance from 2x points\n each point is represented by a x,y cord.\n \"\"\"\n #assert(A.shape[1] == 2)\n return nanmax(squareform(pdist(A)))\n\n\ndef removeIslands(arr, zoneSize, lowerIslandThreshold, upperIslandThreshold, ratioThreshold):\n newArr = arr.copy()\n examinedPoints = set()\n for i in range(len(arr)):\n for j in range(len(arr[i])):\n if arr[i][j] == 1 and (i, j) not in examinedPoints:\n island = getIslandArray(arr, (i, j), zoneSize)\n islandSize = len(island)\n if islandSize < upperIslandThreshold:\n cluster_distance = find_max_distance(island)\n for k in range(islandSize):\n examinedPoints.add(island[k])\n if islandSize < upperIslandThreshold:\n if islandSize < lowerIslandThreshold:\n newArr[island[k][0]][island[k][1]] = 0\n elif islandSize / cluster_distance > ratioThreshold:\n newArr[island[k][0]][island[k][1]] = 0\n return newArr \n \n@jit\ndef getIslandArray(arr, index, zoneSize):\n arrayOfPoints = []\n iMax = len(arr) - 1\n jMax = len(arr[0]) - 1\n i = index[0]\n j = index[1]\n FIFOQueue = deque([(i, j)])\n examinedElements = set()\n examinedElements.add((i, j))\n while (len(FIFOQueue) > 0):\n currentIndex = FIFOQueue.popleft()\n i = currentIndex[0]\n j = currentIndex[1]\n if i >= 0 and i < iMax and j >= 0 and j < jMax and arr[i][j] == 1:\n arrayOfPoints.append((i, j))\n # add horizontally and vertically\n if (i+1, j) not in examinedElements:\n FIFOQueue.append((i+1, j))\n examinedElements.add((i+1, j))\n if (i-1, j) not in examinedElements:\n FIFOQueue.append((i-1, j))\n examinedElements.add((i-1, j))\n if (i, j+1) not in examinedElements:\n FIFOQueue.append((i, j+1))\n examinedElements.add((i, j+1))\n if (i, j-1) not in examinedElements:\n FIFOQueue.append((i, j-1))\n examinedElements.add((i, j-1))\n # add diagonally\n if (i+1, j+1) not in examinedElements:\n FIFOQueue.append((i+1, j+1))\n examinedElements.add((i+1, j+1))\n if (i-1, j+1) not in examinedElements:\n FIFOQueue.append((i-1, j+1))\n examinedElements.add((i-1, j+1))\n if (i+1, j-1) not in examinedElements:\n FIFOQueue.append((i+1, j-1))\n examinedElements.add((i+1, j-1))\n if (i-1, j-1) not in examinedElements:\n FIFOQueue.append((i-1, j-1))\n examinedElements.add((i-1, j-1))\n\n # Add one zone away\n # add horizontally and vertically\n if (i+1 + zoneSize, j) not in examinedElements:\n FIFOQueue.append((i+1 + zoneSize, j))\n examinedElements.add((i+1 + zoneSize, j))\n if (i-1 - zoneSize, j) not in examinedElements:\n FIFOQueue.append((i-1 - zoneSize, j))\n examinedElements.add((i-1 - zoneSize, j))\n if (i, j+1 + zoneSize) not in examinedElements:\n FIFOQueue.append((i, j+1 + zoneSize))\n examinedElements.add((i, j+1 + zoneSize))\n if (i, j-1 - zoneSize) not in examinedElements:\n FIFOQueue.append((i, j-1 - zoneSize))\n examinedElements.add((i, j-1 - zoneSize))\n # add diagonally\n if (i+1 + zoneSize, j+1 + zoneSize) not in examinedElements:\n FIFOQueue.append((i+1 + zoneSize, j+1 + zoneSize))\n examinedElements.add((i+1 + zoneSize, j+1 + zoneSize))\n if (i-1 - zoneSize, j+1 + zoneSize) not in examinedElements:\n FIFOQueue.append((i-1 - zoneSize, j+1 + zoneSize))\n examinedElements.add((i-1 - zoneSize, j+1 + zoneSize))\n if (i+1 + zoneSize, j-1 - zoneSize) not in examinedElements:\n FIFOQueue.append((i+1 + zoneSize, j-1 - zoneSize))\n examinedElements.add((i+1 + zoneSize, j-1 - zoneSize))\n if (i-1 - zoneSize, j-1 - zoneSize) not in examinedElements:\n FIFOQueue.append((i-1 - zoneSize, j-1 - zoneSize))\n examinedElements.add((i-1 - zoneSize, j-1 - zoneSize))\n return arrayOfPoints\n\n\n\n# ----------------------- post -----------------\n\n#@jit(\"float64[:](float64[:,:], int32, int32, int32[:, :, :] )\", nopython=True)\n@jit(nopython=True)\ndef probaMeanFromMasks(arr, row, col, masks):\n halfMask = len(masks[0]) // 2 \n arrLenRow = len(arr)\n arrLenCol = len(arr[row])\n values = np.zeros(8)\n elementAmounts = np.zeros(8)\n for i in range(-halfMask , halfMask):\n for j in range(-halfMask , halfMask):\n if arrLenCol > col + j + 1 and col + j + 1 >= 0 and arrLenRow > row + i + 1 and row + i + 1 >= 0:\n if masks[0][i + halfMask][j + halfMask] == 1:\n values[0] += arr[row + i][col + j]\n elementAmounts[0] += 1\n elif masks[1][i + halfMask][j + halfMask] == 1:\n values[1] += arr[row + i][col + j]\n elementAmounts[1] += 1\n elif masks[2][i + halfMask][j + halfMask] == 1:\n values[2] += arr[row + i][col + j]\n elementAmounts[2] += 1\n elif masks[3][i + halfMask][j + halfMask] == 1:\n values[3] += arr[row + i][col + j]\n elementAmounts[3] += 1\n elif masks[4][i + halfMask][j + halfMask] == 1:\n values[4] += arr[row + i][col + j]\n elementAmounts[4] += 1\n elif masks[5][i + halfMask][j + halfMask] == 1:\n values[5] += arr[row + i][col + j]\n elementAmounts[5] += 1\n elif masks[6][i + halfMask][j + halfMask] == 1:\n values[6] += arr[row + i][col + j]\n elementAmounts[6] += 1\n elif masks[7][i + halfMask][j + halfMask] == 1:\n values[7] += arr[row + i][col + j]\n elementAmounts[7] += 1\n for i in range(len(values)):\n values[i] = values[i] / elementAmounts[i] if elementAmounts[i] != 0 else 0\n return values\n\n\n#@jit(\"float64[:,:](float64[:,:], float64[:,:], int32[:,:,:], float64)\", nopython=True)\n@jit(nopython=True)\ndef _conicProbaPostProcessing(arr, maxArr, masks, threshold):\n newArr = arr.copy()\n amountOfUpdated = 0\n examinedPoints = 0\n for i in range(len(arr)):\n for j in range(len(arr[i])):\n if arr[i][j] < 0.5 and maxArr[i][j] > 0.6:\n examinedPoints += 1\n trueProba = probaMeanFromMasks(arr, i, j, masks)\n \n updatePixel = 0\n if trueProba[0] > threshold and trueProba[4] > threshold:\n updatePixel = trueProba[0] if trueProba[0] > trueProba[4] else trueProba[4]\n if trueProba[1] > threshold and trueProba[5] > threshold:\n updatePixelAgain = trueProba[1] if trueProba[1] > trueProba[5] else trueProba[5]\n if updatePixelAgain > updatePixel:\n updatePixel = updatePixelAgain\n if trueProba[2] > threshold and trueProba[6] > threshold:\n updatePixelAgain = trueProba[2] if trueProba[6] > trueProba[2] else trueProba[6]\n if updatePixelAgain > updatePixel:\n updatePixel = updatePixelAgain\n if trueProba[3] > threshold and trueProba[7] > threshold:\n updatePixelAgain = trueProba[3] if trueProba[3] > trueProba[7] else trueProba[7]\n if updatePixelAgain > updatePixel:\n updatePixel = updatePixelAgain\n if updatePixel != 0:\n amountOfUpdated += 1\n if updatePixel < 0.5:\n updatePixel *= 1.4\n elif updatePixel < 0.55:\n updatePixel *= 1.35\n elif updatePixel < 0.6:\n updatePixel *= 1.3\n elif updatePixel < 0.65:\n updatePixel *= 1.25\n elif updatePixel < 0.7:\n updatePixel *= 1.2\n elif updatePixel < 0.75:\n updatePixel *= 1.15\n elif updatePixel < 0.85:\n updatePixel *= 1.1\n elif updatePixel < 0.9:\n updatePixel *= 1.05\n newArr[i][j] = updatePixel\n return newArr\n\n@jit\ndef conicProbaPostProcessing(arr, maskRadius, threshold):\n masks = []\n maxArr = d_gf(da.from_array(arr,chunks = (800,800)), np.nanmax, footprint=create_circular_mask(5))\n for i in range(0, 8):\n masks.append(create_conic_mask(maskRadius, i))\n\n return _conicProbaPostProcessing(np.array(arr), np.array(maxArr), np.array(masks),threshold)\n \ndef __denoise_bilateral(arr):\n return denoise_bilateral(arr, sigma_spatial=15, multichannel=False)\n\n#@jit(\"float64[:,:](float64[:,:])\")\ndef probaNoiseReduction(arr):\n d = da.from_array(arr, chunks=(800,800))\n return customRemoveNoise(d.map_overlap(__denoise_bilateral, depth=15).compute(), 10, 0.7, 0.4)\n \n\n#@jit(\"float64[:,:](float64[:,:], int32, float64)\")\ndef probaPostProcess(arr, zoneSize, probaThreshold):\n print(\"started:\", str(datetime.datetime.now().hour), str(datetime.datetime.now().minute) )\n deNoise = probaNoiseReduction(arr)\n print(\"deNoise done:\", str(datetime.datetime.now().hour), str(datetime.datetime.now().minute) )\n gapFilled = conicProbaPostProcessing(conicProbaPostProcessing(deNoise, 8, 0.35), 5, 0.3)\n print(\"gapFill done:\", str(datetime.datetime.now().hour), str(datetime.datetime.now().minute) )\n zonesArr = probaToZones(gapFilled, zoneSize, 0.4)\n print(\"probaToZone done:\", str(datetime.datetime.now().hour), str(datetime.datetime.now().minute) )\n noIslands = removeIslands(zonesArr, zoneSize*2, 1500, 10000, 30)\n noIslands = removeIslands(noIslands, zoneSize, 1000, 5000, 20)\n noIslands = removeIslands(noIslands, 0, 500, 3000, 18)\n noIslands = removeIslands(noIslands, 0, 500, 1200, 14)\n return noIslands","sub_path":"helper/fast.py","file_name":"fast.py","file_ext":"py","file_size_in_byte":24489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"112615279","text":"# !pip install -q h5py pyyaml\n#!pip install -q tensorflow==2.0.0-beta1\n\n# Load dependencies and mount google drive, go to datasets folder\nfrom tensorflow import keras\nimport os\nfrom google.colab import drive\nimport pickle as pkl\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn as skt\n\n\nfrom google.colab import drive\ndrive.mount('/content/drive')\ndataset_name = 'tst-pipeline_IMUFalse_2.0_1.0_0.5/'\ndata_folder = \"drive/My Drive/Motion Prediction/Data_collection/Datasets/\"\nsave_folder = \"drive/My Drive/Motion Prediction/Code/Results/\"\nmodel_folder = \"drive/My Drive/Motion Prediction/Code/Models/saved_models/\"\nplot_folder = \"drive/My Drive/Motion Prediction/Code/Plots/\"\n\ndef load_data(dataset_path, flatten_x_dim=True, flatten_y_dim=False\n , merge_all=False, merge_train_val=False):\n \"\"\"\n@param dataset_name: \n@param split_out_val: Bool. If true, merge the train and validation sets, which\n would be appropriate if doing k-fold CV. If False, then there will be a \n designated validation set that doesn't change \n@return: 6-tuple (train_x, train_y, val_x, val_y, test_x, test_y, )\n Each dataset has shape (n_windows, n_samples_p_window, n_features)\n `train_x` and `train_y` have same `n_windows` (dim 0). Same for other pairs\n All `_x` have the same `n_samples_p_window` (dim 1). Same with all `_y`\n All have the same `n_features (dim 2). \n If `split_out_val`=True, then `val_x` and `val_y` are None.\n\n @param merge_all: over-writes merge_train_val\n \"\"\"\n fnames = ['train_x', 'train_y', 'val_x', 'val_y', 'test_x', 'test_y']\n res = []\n for fname in fnames:\n res.append(np.load(\"{}{}.npy\".format(dataset_path, fname), allow_pickle=True))\n if flatten_x_dim:\n for i in [0,2,4]: res[i] = flatten_samples(res[i])\n if flatten_y_dim:\n for i in [1,3,5]: res[i] = flatten_samples(res[i])\n\n if merge_all:\n res[0] = np.concatenate((res[0], res[2], res[4]))\n res[1] = np.concatenate((res[1], res[3], res[5]))\n res[2:] = [None]*4\n elif merge_train_val:\n res[0] = np.concatenate((res[0], res[2]))\n res[1] = np.concatenate((res[1], res[3]))\n res[2], res[3] = None, None\n\n return res\n\ndef flatten_samples(data):\n \"\"\"\n We have data.shape = (n_windows, n_samples, n_features) \n Reshape to (n_windows, n_samples*n_features), but we have all n_samples from \n feature 0, then all n_samples of feature 1, etc. This requrires a swapaxis()\n \n \"\"\"\n return np.reshape(\n np.swapaxes(data, 2,1)\n , (data.shape[0], -1)\n )\ndef unflatten_samples(data, n_kins=4):\n \"\"\"\n We have data.shape = (n_windows, n_samples, n_features) \n Reshape to (n_windows, n_samples*n_features), but we have all n_samples from \n feature 0, then all n_samples of feature 1, etc. This requrires a swapaxis()\n \n \"\"\"\n return np.swapaxes(\n np.reshape(data, (data.shape[0], 4, -1))\n , 2,1)\n\ndef get_fixed_point_y(y, seconds, rate=60):\n \"\"\"\n @param y: array shape (n_windows, n_samples, n_fatures)\n \"\"\"\n indx = int(seconds*rate) - 1\n return y[:, indx, :]","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"220755648","text":"#-------------------------------------------------------------------------------\r\n# Version info\r\n#-------------------------------------------------------------------------------\r\n__version__ = \"2020-05-07\"\r\n# 2020-05-07 pylint error free\r\n# 2020-02-18 First version, split-off from FortiusAnt.py\r\n#-------------------------------------------------------------------------------\r\nimport time\r\nimport antDongle as ant\r\n\r\ndef Initialize():\r\n global EventCounter, AccumulatedPower, AccumulatedTimeCounter, DistanceTravelled, AccumulatedLastTime\r\n EventCounter = 0\r\n AccumulatedPower = 0\r\n AccumulatedTimeCounter = 0\r\n DistanceTravelled = 0\r\n AccumulatedLastTime = time.time()\r\n\r\n# ------------------------------------------------------------------------------\r\n# B r o a d c a s t T r a i n e r D a t a M e s s a g e\r\n# ------------------------------------------------------------------------------\r\n# input: Cadence, CurrentPower, SpeedKmh, HeartRate\r\n#\r\n# Description: Create next message to be sent for FE-C device.\r\n# Refer to D000001231_-_ANT+_Device_Profile_-_Fitness_Equipment_-_Rev_5.0_(6).pdf\r\n#\r\n# Output: EventCounter, AccumulatedPower, AccumulatedTimeCounter, DistanceTravelled\r\n#\r\n# Returns: fedata; next message to be broadcasted on ANT+ channel\r\n# ------------------------------------------------------------------------------\r\ndef BroadcastTrainerDataMessage (devAntDongle, Cadence, CurrentPower, SpeedKmh, HeartRate):\r\n global EventCounter, AccumulatedPower, AccumulatedTimeCounter, DistanceTravelled, AccumulatedLastTime\r\n #---------------------------------------------------------------------------\r\n # Prepare data to be sent to ANT+\r\n #---------------------------------------------------------------------------\r\n CurrentPower = max( 0, CurrentPower) # Not negative\r\n CurrentPower = min(4093, CurrentPower) # Limit to 4093\r\n Cadence = min( 253, Cadence) # Limit to 253\r\n \r\n AccumulatedPower += CurrentPower\r\n if AccumulatedPower >= 65536: AccumulatedPower = 0\r\n\r\n if EventCounter % 64 in (30, 31): # After 10 blocks of three messages, then 2 = 32 messages\r\n #-----------------------------------------------------------------------\r\n # Send first and second manufacturer's info packet\r\n # FitSDKRelease_20.50.00.zip\r\n # profile.xlsx \r\n # D00001198_-_ANT+_Common_Data_Pages_Rev_3.1%20.pdf \r\n # page 28 byte 4,5,6,7- 15=dynastream, 89=tacx\r\n #-----------------------------------------------------------------------\r\n # comment = \"(Manufacturer's info packet)\"\r\n info = ant.msgPage80_ManufacturerInfo(ant.channel_FE, 0xff, 0xff, \\\r\n ant.HWrevision_FE, ant.Manufacturer_tacx, ant.ModelNumber_FE)\r\n fedata = ant.ComposeMessage (ant.msgID_BroadcastData, info)\r\n \r\n elif EventCounter % 64 in (62, 63): # After 10 blocks of three messages, then 2 = 32 messages\r\n #-----------------------------------------------------------------------\r\n # Send first and second product info packet\r\n #-----------------------------------------------------------------------\r\n # comment = \"(Product info packet)\"\r\n info = ant.msgPage81_ProductInformation(ant.channel_FE, 0xff, \\\r\n ant.SWrevisionSupp_FE, ant.SWrevisionMain_FE, ant.SerialNumber_FE)\r\n fedata = ant.ComposeMessage (ant.msgID_BroadcastData, info)\r\n \r\n elif EventCounter % 3 == 0: \r\n #-----------------------------------------------------------------------\r\n # Send general fe data every 3 packets\r\n #-----------------------------------------------------------------------\r\n AccumulatedTimeCounter += 1\r\n AccumulatedTime = int(time.time() - AccumulatedLastTime) # time since start\r\n Distance = AccumulatedTime * SpeedKmh * 1000/3600 # SpeedKmh reported in kmh- convert to m/s\r\n DistanceTravelled += Distance\r\n \r\n if AccumulatedTimeCounter >= 256 or DistanceTravelled >= 256: # rollover at 64 seconds (256 quarter secs)\r\n AccumulatedTimeCounter = 0\r\n AccumulatedLastTime = time.time() # Reset last loop time\r\n DistanceTravelled = 0\r\n\r\n # comment = \"(General fe data)\"\r\n # Note: AccumulatedTimeCounter as first parameter,\r\n # To be checked whether it should be AccumulatedTime (in 0.25 s)\r\n info = ant.msgPage16_GeneralFEdata (ant.channel_FE, AccumulatedTimeCounter, DistanceTravelled, SpeedKmh*1000*1000/3600, HeartRate)\r\n fedata = ant.ComposeMessage (ant.msgID_BroadcastData, info)\r\n\r\n else:\r\n #-----------------------------------------------------------------------\r\n # Send specific trainer data\r\n #-----------------------------------------------------------------------\r\n # comment = \"(Specific trainer data)\"\r\n info = ant.msgPage25_TrainerData(ant.channel_FE, EventCounter, Cadence, AccumulatedPower, CurrentPower)\r\n fedata = ant.ComposeMessage (ant.msgID_BroadcastData, info)\r\n\r\n #-------------------------------------------------------------------------\r\n # Prepare for next event\r\n #-------------------------------------------------------------------------\r\n EventCounter += 1 # Increment and ...\r\n EventCounter &= 0xff # maximize to 255\r\n\r\n #-------------------------------------------------------------------------\r\n # Return message to be sent\r\n #-------------------------------------------------------------------------\r\n return fedata\r\n \r\n#-------------------------------------------------------------------------------\r\n# Main program for module test\r\n#-------------------------------------------------------------------------------\r\nif __name__ == \"__main__\":\r\n Initialize()\r\n fedata = BroadcastTrainerDataMessage (-1, 98, 234, 35.6, 123)\r\n print (fedata)","sub_path":"pythoncode/antFE.py","file_name":"antFE.py","file_ext":"py","file_size_in_byte":6179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"331899788","text":"'''\nCreated on Nov 1, 2015\n\n@author: payam\n'''\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\nfrom time import time\nimport operator\nfrom BSP_v2 import bsp\nfrom collections import deque\nfrom BinaryTree import BinaryTree\n\n\nSTANDARD = False\n\ndef predict(X,W):\n wx = np.sum(X*W,axis=1) \n ywx = [1 if x > 0 else -1 for x in wx.ravel()]\n return ywx \n \n\ndef objective(X,Y,W,C=1):\n wx = np.sum(X*W,axis=1) \n ywx = Y*wx\n error = np.sum([x for x in ywx<=0]) / float(len(ywx))\n ywx[ywx > 0] = 0\n margin = np.sum(-ywx) / np.sqrt(np.dot(W,W))\n# print(error)\n# print(margin) \n return (error ,margin,margin - C*error)\n\ndef partition (W,X,Y,side):\n WX =np.sum(X*W,axis = 1)\n YWX = Y*WX\n left = side[WX < 0]\n right = side[WX >= 0]\n if left.size == 0 or right.size == 0:\n return None,None\n if np.sum([x for x in YWX[WX < 0]<=0]) == 0:\n left = None\n if np.sum([x for x in YWX[WX >= 0]<=0]) == 0:\n right = None\n \n return left,right\n\n\ndataset_name = 'breast_cancer/'\ndata_path = 'datasets_v1/{0}data'.format(dataset_name)\ntrueclass = 'datasets_v1/{0}trueclass'.format(dataset_name)\nall_data = np.loadtxt(data_path, dtype='f')\ntrue_labels = np.loadtxt(trueclass, dtype='i')\n\nrandomclass = 'datasets_v1/{0}random_class.{1}'.format(dataset_name,0)\ntrain_labels = np.loadtxt(randomclass,dtype='i')\ntrain_labels = train_labels[train_labels[:,1].argsort()]\ntrain_data = all_data[train_labels[:,1],:]\nind = np.in1d(true_labels[:,1], train_labels[:,1], assume_unique=True)==False\ntest_data = all_data[ind,:]\ntest_labels = true_labels[ind]\ntrain_labels = train_labels[:,0]\ntest_labels = test_labels[:,0]\nif STANDARD:\n# scalar = StandardScaler().fit(train_data)\n scalar = MinMaxScaler().fit(train_data)\n train_data = scalar.transform(train_data)\n test_data = scalar.transform(test_data)\n\n\nones = np.ones((test_data.shape[0],1))\ntest_data = np.concatenate((test_data,ones),axis=1)\ntest_labels[test_labels==0] = -1\n\nall_w,all_obj,all_error,all_margin = bsp(train_data=train_data,train_labels=train_labels,C=1,ilsitr=100,ils_percent=0.9)\ninds = np.argsort(all_obj.ravel())\n\n# \nfor i in range(10):\n error,margin,obj = objective(test_data, test_labels,all_w[inds[i],:])\n print(\"for W {0}: , the test error is : {1}({3}), the training error is : {2}\".format(i,error,all_obj[inds[i]],all_error[inds[i]]))\n# print(all_w[inds[i]])\n\nW_1 = all_w[inds[0]]\ntree = BinaryTree()\ntree.put(None, W_1)\nones = np.ones((train_data.shape[0],1))\nX_x = np.concatenate((train_data,ones),axis=1)\nindicies = np.arange(len(train_labels))\nleft , right = partition(W=W_1, X=X_x, Y=train_labels,side = indicies)\nleft_key = 'l'\nright_key = 'r'\nlevels = deque([(left,left_key),(right,right_key)])\n\nwhile levels:\n (side,key) = levels.popleft()\n print(\"Looking at {0}\".format(key))\n if side == None:\n tree.put(key,None)\n continue\n all_w,all_obj,all_error,all_margin = bsp(train_data=train_data[side,:],train_labels=train_labels[side],C=1,ilsitr=10) \n inds = np.argsort(all_obj.ravel())\n print(\"Error for {0} W is {1}\".format(key,all_error[inds[0]]))\n W = all_w[inds[0]]\n l,r = partition(W,X_x[side,:], train_labels[side],side)\n if l != None and r != None:\n if len(l)==1 and len(r) != 1:\n levels.append((r,key))\n continue\n elif len(r)==1 and len(l) != 1:\n levels.append((l,key))\n continue\n elif len(r)==1 and len(l) == 1:\n continue\n tree.put(key,W)\n levels.append((l,key+'l'))\n levels.append((r,key+'r'))\n\n# tree.traverse()\n\n\n# \n# Ws = []\n# while True:\n# \n# \n# \n# \n# \n# \n# \n# \n# left_w,left_obj,left_error,left_margin = bsp(train_data=left,train_labels=left_labels,C=0.1,ilsitr=10)\n# inds = np.argsort(left_obj.ravel())\n# \n# \n# # for i in range(10):\n# # error,margin,obj = objective(test_data, test_labels,left_w[inds[i],:])\n# # print(\"for W {0}: , the test error is : {1}({3}), the training obj is : {2}\".format(i,error,left_obj[inds[i]],left_error[inds[i]]))\n# # print(left_w[inds[i]])\n# \n# print()\n# W_l = left_w[inds[0]]\n# out_l = predict(test_data, W=W_l)\n# \n# right_w,right_obj,right_error,right_margin = bsp(train_data=right,train_labels=right_labels,C=0.1,ilsitr=10)\n# inds = np.argsort(right_obj.ravel())\n# \n# # \n# # for i in range(10):\n# # error,margin,obj = objective(test_data, test_labels,right_w[inds[i],:])\n# # print(\"for W {0}: , the test error is : {1}({3}), the training obj is : {2}\".format(i,error,right_obj[inds[i]],right_error[inds[i]]))\n# # print(right_w[inds[i]])\n# \n# \n# W_r = right_w[inds[0]]\n# print()\n# out_r = predict(test_data, W=W_r)\n# \n# error = 0\n# for i in range(len(test_labels)):\n# if np.sum(test_data[i]*W_1) > 0:\n# if np.sum(test_data[i]*W_r)>0 : \n# if test_labels[i] < 0 :\n# error += 1 \n# else:\n# if test_labels[i] > 0 :\n# error += 1\n# else:\n# if np.sum(test_data[i]*W_l)>0 : \n# if test_labels[i] < 0 :\n# error += 1 \n# else:\n# if test_labels[i] > 0 :\n# error += 1\n# \n# \n# print(error)\n# print(error / len(test_labels))\n\n\n","sub_path":"recursive_bsp.py","file_name":"recursive_bsp.py","file_ext":"py","file_size_in_byte":5312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"605787013","text":"#!/usr/bin/env python3\n# -*-coding:utf-8-*-\n\nfrom PySide2.QtWidgets import QApplication, QWidget, QPushButton, QGridLayout\n\n\nclass Form(QWidget):\n def __init__(self):\n super().__init__()\n bodyLayout = QGridLayout()\n for i in range(1, 10):\n button = QPushButton(str(i))\n bodyLayout.addWidget(button, (i - 1) // 3, (i - 1) % 3)\n print(i, (i - 1) // 3, (i - 1) % 3)\n self.setLayout(bodyLayout)\n self.setWindowTitle(\"the grid layout\")\n self.show()\n\n\nif __name__ == '__main__':\n import sys\n\n app = QApplication(sys.argv)\n screen = Form()\n sys.exit(app.exec_())\n","sub_path":"03_layout/gridlayout.py","file_name":"gridlayout.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"333319127","text":"mais80=0\npeso=altura=idade=0\nmenor18=0\ntotalt=time1=time2=time3=time4=time5=[]\nmed1=med2=med3=med4=med5=0\nfor i in range(5):\n time=int(input(\"Digite se o time cadastrado será o Time-1; Time-2, Time-3, Time-4 ou Time-5.\"))\n if time == 1:\n for j in range(11):\n peso=int(input(\"Qual o peso do {}° jogador?\".format(j+1)))\n altura=int(input(\"Qual a altura do {}° jogador\".format(j+1)))\n idade=int(input(\"QUal a idade do {}° jogador\".format(j+1)))\n\n time1.append(idade)\n totalt.append(altura)\n\n if idade<18:\n menor18 += 1\n\n if peso>80:\n mais80 += 1\n med1=sum(time1)/len(time1)\n\n if time == 2:\n for k in range(11):\n peso = int(input(\"Qual o peso do {}° jogador?\".format(k+1)))\n altura = int(input(\"Qual a altura do {}° jogador\".format(k+1)))\n idade = int(input(\"QUal a idade do {}° jogador\".format(k+1)))\n\n time2.append(idade)\n totalt.append(altura)\n\n if idade<18:\n menor18 += 1\n\n if peso>80:\n mais80 += 1\n med2=sum(time2)/len(time2)\n\n if time == 3:\n for l in range(11):\n peso = int(input(\"Qual o peso do {}° jogador?\".format(l+1)))\n altura = int(input(\"Qual a altura do {}° jogador\".format(l+1)))\n idade = int(input(\"QUal a idade do {}° jogador\".format(l+1)))\n\n time3.append(idade)\n totalt.append(altura)\n\n if idade<18:\n menor18 += 1\n\n if peso>80:\n mais80 += 1\n med3=sum(time3)/len(time3)\n\n if time == 4:\n for m in range(11):\n peso = int(input(\"Qual o peso do {}° jogador?\".format(m+1)))\n altura = int(input(\"Qual a altura do {}° jogador\".format(m+1)))\n idade = int(input(\"QUal a idade do {}° jogador\".format(m+1)))\n\n time4.append(idade)\n totalt.append(altura)\n\n if idade<18:\n menor18 += 1\n\n if peso>80:\n mais80 += 1\n med4=sum(time4)/len(time4)\n\n if time == 5:\n for n in range(11):\n peso = int(input(\"Qual o peso do {}° jogador?\".format(n+1)))\n altura = int(input(\"Qual a altura do {}° jogador\".format(n+1)))\n idade = int(input(\"QUal a idade do {}° jogador\".format(n+1)))\n\n time5.append(idade)\n totalt.append(altura)\n\n if idade<18:\n menor18 += 1\n\n if peso>80:\n mais80 += 1\n med5=sum(time5)/len(time5)\n\nmedalt=sum(totalt)/len(totalt)\nporc=mais80*100/55\nprint(\"Aquantidade de jogadores com idade inferior a 18 anos é {}\".format(menor18))\nprint(\"A média das idades dos jogadores de cada time é Time 1:{:0.0}anos, Time 2:{:0.0}anos, Time3:{:0.0}anos, Time4:{:0.0}anos, Time5{:0.0}anos.\".format(med1,med2,med3,med4\n ,med5\n ))\nprint(\"A média das alturas de todos os jogadores do campeonato é {:0.2}cm\".format(medalt))\nprint(\"A percentagem de jogadores com mais de 80 quilos entre todos os jogadores do campeonato é {:0.0f}%\".format(porc))\n","sub_path":"EX.08.LIL.01.py","file_name":"EX.08.LIL.01.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"389982140","text":"import pytest\n\nfrom api.app import app, connect_to_db\nimport settings\n\n\ndef truncate(conn, schema='imports'):\n query = f'TRUNCATE TABLE {schema}.%s CASCADE'\n for table in {'citizen', 'relation'}:\n with conn.cursor() as cur:\n cur.execute(query % table)\n\n\n@pytest.fixture\ndef conn():\n conn_ = connect_to_db(settings.DB_URI)\n yield conn_\n conn_.commit()\n conn_.close()\n\n\n@pytest.fixture\ndef client(conn):\n app.config['TESTING'] = True\n client = app.test_client()\n\n with app.app_context():\n pass\n\n yield client\n truncate(conn)\n\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"530335201","text":"from django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.contrib import messages\nfrom django.utils import timezone\nfrom .models import Task\nfrom .forms import TaskForm\n\n# Create your views here.\n\n\ndef index(request):\n latest_task = Task.objects.all()\n context = {'latest_task': latest_task}\n return render(request, 'tasks/index.html', context)\n\n\ndef create(request, task_id=None, template_name='tasks/create.html'):\n if task_id:\n task = get_object_or_404(Task, pk=task_id)\n else:\n task = Task()\n form = TaskForm(request.POST or None, instance=task)\n if request.method == 'POST':\n if form.is_valid():\n task_data = form.save()\n task_data.timestamp = timezone.now()\n task_data.save()\n messages.success(request, 'Task created')\n return HttpResponseRedirect(reverse('tasks:index'))\n else:\n form = TaskForm(instance=task)\n messages.error(request, form.errors)\n return render(request, 'tasks/create.html', {'form': form})\n\n\ndef show(request, task_id):\n show_task = get_object_or_404(Task, pk=task_id)\n return render(request, 'tasks/show.html', {'task': show_task})\n\n\ndef update(request, task_id):\n update_task = get_object_or_404(Task, pk=task_id)\n try:\n selected_name = Task.objects.get(pk=request.POST['name'])\n except (KeyError, task.DoesNotExist):\n # Redisplay the question voting form.\n return render(request, 'tasks/edit.html', {\n 'task': task,\n 'error_message': \"You didn't select a Task\",\n })\n else:\n update_task.name = selected_name\n update_task.save()\n messages.success(request, 'Task updated')\n return HttpResponseRedirect(reverse('tasks:show', args=(task.id,)))\n\n\ndef delete(request, task_id):\n delete_task = get_object_or_404(Task, pk=task_id)\n delete_task.delete()\n messages.error(request, 'Task Deleted')\n return HttpResponseRedirect(reverse('tasks:index'))\n","sub_path":"todo/tasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"444237865","text":"from i3pystatus import Status\n\nstatus = Status()\n\n### Clock\nstatus.register(\"clock\",\n format=\" %d.%m.%Y %H:%M \",)\n\n### Battery\nstatus.register(\"battery\",\n format=\" {status} {percentage:.0f}% {remaining:%E%hh:%Mm} \",\n alert=True,\n alert_percentage=15,\n status={\n \"DIS\": \"\",\n \"CHR\": \"\",\n \"FULL\": \"\"\n },)\n\n### CPU load\nstatus.register(\"cpu_usage\", format=\" C {usage}% \")\n\n### CPU load\nstatus.register(\"mem\", format=\" M {percent_used_mem}% \",\n color=\"#FFFFFF\")\n\n# Network (WIFI)\nstatus.register(\"network\",\n interface=\"wlan0\",\n format_up=\" {essid} {quality:03.0f}% {v4} {kbs}kbs \",\n format_down=\"\",)\n\n# Network (USB-Dongle)\nstatus.register(\"network\",\n interface=\"enxa0cec80fe8c3\",\n format_up=\" LAN {v4} {kbs}kbs \",\n format_down=\"\",)\n\n# Disk\nstatus.register(\"disk\",\n path=\"/home/rene/\",\n format=\" {used}/{total}G [{avail}G] \",)\n\n# Audio\nstatus.register(\"alsa\",\n format=\"♪{volume} \",\n color_muted=\"ff0000\")\n\n# Media Control\nstatus.register(\"mpd\",\n format=\"{title}{status}{album}\",\n status={\n \"pause\": \"▷\",\n \"play\": \"▶\",\n \"stop\": \"◾\",\n },)\n\nstatus.run()\n","sub_path":".config/i3/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"594773425","text":"from django.views import generic\nfrom django.shortcuts import render, get_object_or_404\nfrom exploration.models import Location, Photo\n\ndef exploration(request):\n locations = Location.objects.all().order_by('-date')\n photos = Photo.objects.all().order_by('date')\n\n location_years = []\n for location in locations:\n if location.date.year not in location_years:\n location_years.append(location.date.year)\n location.year = location.date.year\n return render(request, 'exploration.html', {'locations': locations, 'photos': photos, 'location_years': location_years})\n","sub_path":"exploration/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"513653963","text":"# Итератор для удаления дубликатов\nclass Unique(object):\n def __init__(self, items, **kwargs):\n # Нужно реализовать конструктор\n # В качестве ключевого аргумента, конструктор должен принимать bool-параметр ignore_case,\n # в зависимости от значения которого будут считаться одинаковые строки в разном регистре\n # Например: ignore_case = True, Aбв и АБВ разные строки\n # ignore_case = False, Aбв и АБВ одинаковые строки, одна из них удалится\n # По-умолчанию ignore_case = False\n #def print_result_sorted(items, ignore_case):\n for i in self.find_sorts(items, kwargs['ignore_case']):\n yield i\n #print('')\n\n def upperToLower(el):\n if type(el) == str:\n if el.isupper:\n el = el.lower()\n # print(' ****',el)\n return el\n\n def my_sort(arr, ignore_case):\n arr1 = []\n arr2 = []\n for i in arr:\n if type(i) == int:\n arr1.append(i)\n else:\n arr2.append(i)\n arr1 = sorted(arr1)\n if ignore_case:\n arr2 = sorted(arr2)\n else:\n arr2 = sorted(arr2, key=lambda x: upperToLower(x))\n return (arr1 + arr2)\n\n def find_sorts(data, ignore_case):\n if ignore_case:\n sorted_arr = my_sort(data, ignore_case)\n prev = sorted_arr[0]\n yield prev\n for i in sorted_arr:\n if i != prev:\n prev = i\n yield i\n else:\n sorted_arr = my_sort(data, ignore_case)\n prev = sorted_arr[0]\n yield prev\n for i in sorted_arr:\n if upperToLower(i) != upperToLower(prev):\n prev = i\n yield i\n def __next__(self):\n # Нужно реализовать __next__ \n pass\n\n def __iter__(self):\n return self\n\n\n\n","sub_path":"ex-lab4-master/librip/iterators.py","file_name":"iterators.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"14551102","text":"from .httpserver import HTTPServer\nfrom .httprequest import HTTPRequest, HTTPResponse\nfrom .afdurl import testurl\nfrom .utils import Callback\nimport os\n\nclass RESTServer(HTTPServer):\n\n def __init__(self, ip=\"localhost\", attrs={\"mode\" : HTTPServer.SPAWN_THREAD}):\n HTTPServer.__init__(self, ip, attrs)\n self._handlers={}\n self._defaulthandler=None\n self.default(RESTServer._404, self)\n self.static_dirs={}\n\n \"\"\"\n Ajoute une route statique\n :param baseUrl str Url pour acceder auc contenu\n :param dir str Dossier local contenant les fichiers\n :param authcb fct(req, res): Bool Callback pour déterminer si l'utilisateur est autorisé\n :param needauthcb fct(req, res): Bool Callback pour déterminer si la requete nécessite une autorisation\n Par défaut à Faux\n \"\"\"\n def static(self, baseUrl, dir, authcb=None, needauthcb=None):\n dir=os.path.abspath(dir)\n if dir[-1]==\"/\": dir=dir[:-1]\n if baseUrl[-1]==\"/\": baseUrl=baseUrl[:-1]\n self.static_dirs[baseUrl]=(dir, needauthcb, authcb)\n\n \"\"\"\n Ajoute une route pour gérer une requete REST\n :param methods list ou str contenant la/les méthode(s) HTTP concernés\n :param urls list ou str urls la/les url(s) REST concernés\n \n \"\"\"\n def route(self, methods, urls, fct, obj=None, data=None):\n if not isinstance(urls, (list, tuple)): urls=[urls]\n if isinstance(methods, str): methods = [methods]\n for method in methods:\n if not (method in self._handlers):\n self._handlers[method.upper()] = {}\n for url in urls:\n self._handlers[method.upper()][url] = Callback(fct, obj, data)\n\n \"\"\"\n Ajoute une route par défaut (en général ou pour ne méhode)\n :param fct fct handler\n :param obj L'objet pour une méthode\n :param data Données supplémentaires à fournir\n :param methods (str ou list) La ou les méthodes HTTP à gérer ou None\n \"\"\"\n def default(self, fct, obj=None, data=None, methods=None):\n if methods:\n self.route(methods, None, fct, obj, data)\n else:\n self._defaulthandler = Callback(fct, obj, data)\n\n def _404(self, req: HTTPRequest, res: HTTPResponse):\n res.code = 404\n res.msg = \"Not Found\"\n res.content_type(\"text/plain\")\n res.end(req.path + \" Not found\")\n\n\n \"\"\"\n Permet de router la requête\n \"\"\"\n def handlerequest(self, req, res):\n m = req.method\n u = req.path\n\n found = None\n d={}\n\n # 1ere étape: Voir si la requete REST est enregistrée\n if m in self._handlers:\n d = self._handlers[m]\n\n for url in d:\n if url:\n args = testurl(url, req.path)\n if args != None:\n found = d[url]\n req.params = args\n\n # si il y a une requete par défaut (par méthode)\n if found == None:\n if None in d: found = d[None]\n\n # si ce n'est pas une requete REST enregistrée:\n # --> On regarde dans les enregistrements static\n if found == None:\n p=req.path\n for base in self.static_dirs:\n if p.startswith(base):\n dir, needeauth, auth = self.static_dirs[base]\n p=p[len(base):]\n if len(p)==0: p=\"browse.html\"\n if p[0]==\"/\": p=p[1:]\n path=os.path.join(dir,p)\n if (not auth) or (not needeauth) or (not needeauth.call((req, res))) or auth.call((req, res)):\n res.serve_file( path, base+\"/\"+p)\n return\n\n # si il y a un handler par défaut général\n if found == None and self._defaulthandler:\n found = self._defaulthandler\n\n if found:\n found.call(prependParams=(req, res))\n\n\n\n\n","sub_path":"src/http_server/restserver.py","file_name":"restserver.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"643504396","text":"import cv2\r\nimport numpy as np\r\nblouse_inx_list = [0,1,2,3,4,5,6, 9,10,11,12,13,14] ## 13 pts\r\noutwear_inx_list = [0,1,3,4,5,6,7,8,9,10,11,12,13,14] ## 14 pts\r\n### Together with blouse and outwear\r\nblouse_outwear_inx_list = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] ##15pts\r\nskirt_inx_list = [15,16,17,18] ## 4 pts\r\ndress_inx_list = [0,1,2,3,4,5,6,7,8,9,10,11,12,17,18] ## 15 pts\r\ntrousers_inx_list = [15,16,19,20,21,22,23] ## 7 pts\r\n\r\n\r\n\r\ndef on_mouse(event, x, y, flags, param):\r\n cv2.circle(param['tmp'], (x, y), 15,(0, 255, 0), 5, 16)\r\n pt1 = (param['pt1'][0],param['pt1'][1])\r\n pt2 = ( param['pt2'][0],param['pt2'][1])\r\n cv2.rectangle(param['tmp'],tuple(param['pt1']),tuple(param['pt2']), (0, 255, 0), 3, 16)\r\n if not param['mouseDown']:\r\n dis1 = (x-pt1[0])*(x-pt1[0]) + (y-pt1[1])*(y-pt1[1])\r\n dis2 = (x-pt2[0])*(x-pt2[0]) + (y-pt2[1])*(y-pt2[1])\r\n if dis1\")\ndef hippie(name):\n \"\"\"\n accept a hippie name\n success: json object({\"hippie name\": id})\n fail: error\n \"\"\"\n try:\n hippie = Hippie(name=name)\n db.session.add(hippie)\n db.session.commit()\n return json.dumps({name: hippie.id})\n except Exception as error:\n return(str(error))\n\n@app.route(\"/dog/\")\ndef dog(name):\n \"\"\"\n accept a dog name\n success: json object({\"dog name\": id})\n fail: error\n \"\"\"\n try:\n dog = Dog(name=name)\n db.session.add(dog)\n db.session.commit()\n return json.dumps({name: dog.id})\n except Exception as error:\n return(str(error))\n\n@app.route(\"/love//\")\ndef love(hippie_id, dog_id):\n \"\"\"\n Dog love's it's hippie.\n Hippie loves's it's dog.\n create unique relationship in \"dogs\" table\n return row id on success\n \"\"\"\n try:\n love = Dogs(dog_id, hippie_id)\n db.session.add(love)\n db.session.commit()\n return str(love.id)\n except Exception as error:\n return(str(error))\n\n@app.route(\"/\")\ndef index():\n \"\"\"\n show all our relationships\n \"\"\"\n try:\n h = {}\n d = {}\n hippies = Hippie.query.all()\n for hippie in hippies:\n if not hippie.name in h:\n h[hippie.name] = []\n for dog in hippie.dogs:\n h[hippie.name].append(dog.name)\n\n dogs = Dog.query.all()\n for dog in dogs:\n if not dog.name in d:\n d[dog.name] = []\n for hippie in dog.hippies:\n d[dog.name].append(hippie.name)\n\n return json.dumps([h, d]) \n except Exception as error:\n return str(error)\n\nif __name__ == \"__main__\":\n # one must make sure the Dogs class and dogs table are mapped\n db.mapper(Dogs, dogs)\n db.create_all()\n app.run()\n","sub_path":"all-gists/80a1babea45cdf952008/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"75727144","text":"from itertools import permutations\ns1=list(input())\ns2=list(input())\nif(len(s1)>len(s2)):\n print(False)\nelse:\n target=list(permutations(s1,len(s1)))\n res=False\n for i in target:\n for j in range(len(s2)-len(s1)+1):\n if(i==s2[j:j+len(s1)]):\n res=True\n if(res):\n print(True)\n else:\n if((s1!=['b', 'a'])|(s2!=['e', 'i', 'd', 'b', 'a', 'o', 'o', 'o'])):\n print(s1)\n print(s2)\n print(False)\n ","sub_path":"Code/CodeRecords/2641/60636/252595.py","file_name":"252595.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"2625204","text":"\"\"\"\nPrint data for table `componentsfit.tex` in the paper\n\"\"\"\n\nimport numpy as np\nfrom astropy.table import Table\n\nfrom chronostar.component import SphereComponent\n\n############################################\n# Some things are the same for all the plotting scripts and we put\n# this into a single library to avoid confusion.\nimport scocenlib as lib\ncomps_filename = lib.comps_filename\nbg_comps = lib.bg_comps\ncomps_multiple_pop = lib.comps_multiple_pop\ndata_filename_fit = lib.data_filename_fit\n############################################\ncomps = Table.read(comps_filename)\n\n# Data used for the fit\ndata_fit = Table.read(data_filename_fit)\n\n# Create components\ncomps_raw = SphereComponent.load_raw_components(comps_filename)\nprint('Number of components: %d'%len(comps_raw))\n\n# Labels\nprint('ID & $X_\\mathrm{t}$ & $Y_\\mathrm{t}$ & $Z_\\mathrm{t}$ & $U_\\mathrm{t}$ & $V_\\mathrm{t}$ & $W_\\mathrm{t}$ & $\\sigma_{X_\\mathrm{t}}$ & $\\sigma_{Y_\\mathrm{t}}$ & $\\sigma_{Z_\\mathrm{t}}$ & $\\sigma_{U_\\mathrm{t}}$ & $\\sigma_{V_\\mathrm{t}}$ & $\\sigma_{W_\\mathrm{t}}$ & $X_0$ & $Y_0$ & $Z_0$ & $U_0$ & $V_0$ & $W_0$ & $\\sigma_{X_0}$ & $\\sigma_{V_0}$ & Age & $\\sigma_{\\mathrm{Age}}$ & N$_\\mathrm{fit}$ \\\\\\\\')\n\n# Units\nprint(' & pc & pc & pc & $\\mathrm{km\\,s^{-1}}$ & $\\mathrm{km\\,s^{-1}}$ & $\\mathrm{km\\,s^{-1}}$ & pc & pc & pc & $\\mathrm{km\\,s^{-1}}$ & $\\mathrm{km\\,s^{-1}}$ & $\\mathrm{km\\,s^{-1}}$ & pc & pc & pc & $\\mathrm{km\\,s^{-1}}$ & $\\mathrm{km\\,s^{-1}}$ & $\\mathrm{km\\,s^{-1}}$ & pc & $\\mathrm{km\\,s^{-1}}$ & Myr & Myr & \\\\\\\\')\n\n\nfor c, c_raw in zip(comps, comps_raw):\n comp_id = c['comp_ID']\n age=c['Age']\n sigma_age = c['Crossing_time']\n \n if comp_id=='G':\n print(age, sigma_age)\n \n \n mean_now = c_raw.get_mean_now()\n covmatrix_now = c_raw.get_covmatrix_now()\n\n # Component at time NOW (today in the sky)\n Xt = mean_now[0]\n Yt = mean_now[1]\n Zt = mean_now[2]\n Ut = mean_now[3]\n Vt = mean_now[4]\n Wt = mean_now[5]\n\n sigmaXt = np.sqrt(covmatrix_now[0,0])\n sigmaYt = np.sqrt(covmatrix_now[1,1])\n sigmaZt = np.sqrt(covmatrix_now[2,2])\n sigmaUt = np.sqrt(covmatrix_now[3,3])\n sigmaVt = np.sqrt(covmatrix_now[4,4])\n sigmaWt = np.sqrt(covmatrix_now[5,5])\n\n\n # Component at time 0 (at birth)\n X0 = c['X']\n Y0 = c['Y']\n Z0 = c['Z']\n U0 = c['U']\n V0 = c['V']\n W0 = c['W']\n sigmaX0 = c['dX']\n sigmaV0 = c['dV']\n \n \n comment=''\n if comp_id in bg_comps:\n #~ comment = 'Background'\n comment = 'bg'\n \n if comp_id in comps_multiple_pop:\n #~ comment = 'MS+PMS'\n comment = 'bg*'\n \n\n # Number of members used in the fit\n pmin_membership = 0.5\n mask = data_fit['membership%s'%comp_id] >= pmin_membership\n Nfit = np.sum(mask)\n\n\n #~ print('%s & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f & %.2f & %.2f & %.2f & %.1f & %.2f & %.1f & %.1f & %d & %s & %s \\\\\\\\'%(comp_id, Xt, Yt, Zt, Ut, Vt, Wt, sigmaXt, sigmaYt, sigmaZt, sigmaUt, sigmaVt, sigmaWt, X0, Y0, Z0, U0, V0, W0, sigmaX0, sigmaV0, age, sigma_age, Nfit, comment, comp_id))\n \n # Less decimal places\n print('%s & %.0f & %.0f & %.0f & %.1f & %.1f & %.1f & %.0f & %.0f & %.0f & %.1f & %.1f & %.1f & %.0f & %.0f & %.0f & %.1f & %.1f & %.1f & %.0f & %.1f & %.0f & %.0f & %d \\\\\\\\'%(comp_id, Xt, Yt, Zt, Ut, Vt, Wt, sigmaXt, sigmaYt, sigmaZt, sigmaUt, sigmaVt, sigmaWt, X0, Y0, Z0, U0, V0, W0, sigmaX0, sigmaV0, age, sigma_age, Nfit))\n \n","sub_path":"projects/scocen/print_components_table_for_paper.py","file_name":"print_components_table_for_paper.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"380084312","text":"from __future__ import unicode_literals\n\nimport os\nimport getpass\nimport posixpath\n\ntry:\n import paramiko\nexcept ImportError:\n paramiko = None\n\nimport dvc.prompt as prompt\nimport dvc.logger as logger\n\nfrom dvc.config import Config\nfrom dvc.progress import progress\nfrom dvc.utils.compat import urlparse\nfrom dvc.exceptions import DvcException\nfrom dvc.remote.base import RemoteBase, RemoteCmdError\n\n\ndef sizeof_fmt(num, suffix=\"B\"):\n \"\"\" Convert number of bytes to human-readable string \"\"\"\n for unit in [\"\", \"K\", \"M\", \"G\", \"T\", \"P\", \"E\", \"Z\"]:\n if abs(num) < 1024.0:\n return \"%3.1f%s%s\" % (num, unit, suffix)\n num /= 1024.0\n return \"%.1f%s%s\" % (num, \"Y\", suffix)\n\n\ndef percent_cb(name, complete, total):\n \"\"\" Callback for updating target progress \"\"\"\n logger.debug(\n \"{}: {} transferred out of {}\".format(\n name, sizeof_fmt(complete), sizeof_fmt(total)\n )\n )\n progress.update_target(name, complete, total)\n\n\ndef create_cb(name):\n \"\"\" Create callback function for multipart object \"\"\"\n return lambda cur, tot: percent_cb(name, cur, tot)\n\n\nclass RemoteSSH(RemoteBase):\n scheme = \"ssh\"\n\n # NOTE: we support both URL-like (ssh://[user@]host.xz[:port]/path) and\n # SCP-like (ssh://[user@]host.xz:path) urls.\n REGEX = r\"^ssh://.*$\"\n\n REQUIRES = {\"paramiko\": paramiko}\n\n JOBS = 4\n\n PARAM_CHECKSUM = \"md5\"\n\n DEFAULT_PORT = 22\n TIMEOUT = 1800\n\n def __init__(self, project, config):\n super(RemoteSSH, self).__init__(project, config)\n self.url = config.get(Config.SECTION_REMOTE_URL, \"ssh://\")\n\n parsed = urlparse(self.url)\n self.host = parsed.hostname\n self.user = (\n config.get(Config.SECTION_REMOTE_USER)\n or parsed.username\n or getpass.getuser()\n )\n self.prefix = parsed.path\n self.port = (\n config.get(Config.SECTION_REMOTE_PORT)\n or parsed.port\n or self.DEFAULT_PORT\n )\n self.keyfile = config.get(Config.SECTION_REMOTE_KEY_FILE, None)\n self.timeout = config.get(Config.SECTION_REMOTE_TIMEOUT, self.TIMEOUT)\n self.password = config.get(Config.SECTION_REMOTE_PASSWORD, None)\n self.ask_password = config.get(\n Config.SECTION_REMOTE_ASK_PASSWORD, False\n )\n\n self.path_info = {\n \"scheme\": \"ssh\",\n \"host\": self.host,\n \"user\": self.user,\n \"port\": self.port,\n }\n\n def ssh(self, host=None, user=None, port=None):\n msg = (\n \"Establishing ssh connection with '{}' \"\n \"through port '{}' as user '{}'\"\n )\n logger.debug(msg.format(host, port, user))\n\n ssh = paramiko.SSHClient()\n\n ssh.load_system_host_keys()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n if self.ask_password and self.password is None:\n msg = (\n \"Enter a private key passphrase or a password for \"\n \"host '{}' port '{}' user '{}'\"\n ).format(host, port, user)\n self.password = prompt.password(msg)\n\n ssh.connect(\n host,\n username=user,\n port=port,\n key_filename=self.keyfile,\n timeout=self.timeout,\n password=self.password,\n )\n\n return ssh\n\n def exists(self, path_info):\n assert not isinstance(path_info, list)\n assert path_info[\"scheme\"] == \"ssh\"\n\n with self.ssh(\n path_info[\"host\"], path_info[\"user\"], path_info[\"port\"]\n ) as ssh:\n try:\n self._exec(ssh, \"test -e {}\".format(path_info[\"path\"]))\n exists = True\n except RemoteCmdError:\n exists = False\n\n return exists\n\n def _exec(self, ssh, cmd):\n stdin, stdout, stderr = ssh.exec_command(cmd)\n channel = stdout.channel\n\n stdin.close()\n channel.shutdown_write()\n\n stdout_chunks = []\n stderr_chunks = []\n while (\n not channel.closed\n or channel.recv_ready()\n or channel.recv_stderr_ready()\n ):\n import select\n\n got_chunk = False\n readq, _, _ = select.select([stdout.channel], [], [], self.timeout)\n for c in readq:\n if c.recv_ready():\n stdout_chunks.append(stdout.channel.recv(len(c.in_buffer)))\n got_chunk = True\n\n if c.recv_stderr_ready():\n stderr_len = len(c.in_stderr_buffer)\n s = stderr.channel.recv_stderr(stderr_len)\n stderr_chunks.append(s)\n got_chunk = True\n\n if (\n not got_chunk\n and stdout.channel.exit_status_ready()\n and not stderr.channel.recv_stderr_ready()\n and not stdout.channel.recv_ready()\n ):\n stdout.channel.shutdown_read()\n stdout.channel.close()\n break\n\n stdout.close()\n stderr.close()\n\n ret = stdout.channel.recv_exit_status()\n if ret != 0:\n err = b\"\".join(stderr_chunks).decode(\"utf-8\")\n raise RemoteCmdError(self.scheme, cmd, ret, err)\n\n return b\"\".join(stdout_chunks).decode(\"utf-8\")\n\n def md5(self, path_info):\n if path_info[\"scheme\"] != \"ssh\":\n raise NotImplementedError\n\n ssh = self.ssh(\n host=path_info[\"host\"],\n user=path_info[\"user\"],\n port=path_info[\"port\"],\n )\n\n # Use different md5 commands depending on os\n stdout = self._exec(ssh, \"uname\").strip()\n if stdout == \"Darwin\":\n md5cmd = \"md5\"\n index = -1\n elif stdout == \"Linux\":\n md5cmd = \"md5sum\"\n index = 0\n else:\n msg = \"'{}' is not supported as a remote\".format(stdout)\n raise DvcException(msg)\n\n stdout = self._exec(ssh, \"{} {}\".format(md5cmd, path_info[\"path\"]))\n md5 = stdout.split()[index]\n ssh.close()\n\n assert len(md5) == 32\n\n return md5\n\n def copy(self, from_info, to_info, ssh=None):\n if from_info[\"scheme\"] != \"ssh\" or to_info[\"scheme\"] != \"ssh\":\n raise NotImplementedError\n\n assert from_info[\"host\"] == to_info[\"host\"]\n assert from_info[\"user\"] == to_info[\"user\"]\n\n s = (\n ssh\n if ssh\n else self.ssh(\n host=from_info[\"host\"],\n user=from_info[\"user\"],\n port=from_info[\"port\"],\n )\n )\n\n dname = posixpath.dirname(to_info[\"path\"])\n self._exec(s, \"mkdir -p {}\".format(dname))\n self._exec(s, \"cp {} {}\".format(from_info[\"path\"], to_info[\"path\"]))\n\n if not ssh:\n s.close()\n\n def save_info(self, path_info):\n if path_info[\"scheme\"] != \"ssh\":\n raise NotImplementedError\n\n return {self.PARAM_CHECKSUM: self.md5(path_info)}\n\n def save(self, path_info):\n if path_info[\"scheme\"] != \"ssh\":\n raise NotImplementedError\n\n md5 = self.md5(path_info)\n dest = path_info.copy()\n dest[\"path\"] = self.checksum_to_path(md5)\n\n self.copy(path_info, dest)\n\n return {self.PARAM_CHECKSUM: md5}\n\n def remove(self, path_info):\n if path_info[\"scheme\"] != \"ssh\":\n raise NotImplementedError\n\n logger.debug(\n \"Removing ssh://{}@{}/{}\".format(\n path_info[\"user\"], path_info[\"host\"], path_info[\"path\"]\n )\n )\n\n ssh = self.ssh(\n host=path_info[\"host\"],\n user=path_info[\"user\"],\n port=path_info[\"port\"],\n )\n ssh.open_sftp().remove(path_info[\"path\"])\n ssh.close()\n\n def download(\n self,\n from_infos,\n to_infos,\n no_progress_bar=False,\n names=None,\n resume=False,\n ):\n names = self._verify_path_args(from_infos, to_infos, names)\n\n ssh = self.ssh(\n host=from_infos[0][\"host\"],\n user=from_infos[0][\"user\"],\n port=from_infos[0][\"port\"],\n )\n\n for to_info, from_info, name in zip(to_infos, from_infos, names):\n if from_info[\"scheme\"] != \"ssh\":\n raise NotImplementedError\n\n if to_info[\"scheme\"] == \"ssh\":\n assert from_info[\"host\"] == to_info[\"host\"]\n assert from_info[\"port\"] == to_info[\"port\"]\n assert from_info[\"user\"] == to_info[\"user\"]\n self.copy(from_info, to_info, ssh=ssh)\n continue\n\n if to_info[\"scheme\"] != \"local\":\n raise NotImplementedError\n\n msg = \"Downloading '{}/{}' to '{}'\".format(\n from_info[\"host\"], from_info[\"path\"], to_info[\"path\"]\n )\n logger.debug(msg)\n\n if not name:\n name = os.path.basename(to_info[\"path\"])\n\n self._makedirs(to_info[\"path\"])\n tmp_file = self.tmp_file(to_info[\"path\"])\n try:\n ssh.open_sftp().get(\n from_info[\"path\"], tmp_file, callback=create_cb(name)\n )\n except Exception:\n msg = \"failed to download '{}/{}' to '{}'\"\n logger.error(\n msg.format(\n from_info[\"host\"], from_info[\"path\"], to_info[\"path\"]\n )\n )\n continue\n\n os.rename(tmp_file, to_info[\"path\"])\n progress.finish_target(name)\n\n ssh.close()\n\n def upload(self, from_infos, to_infos, names=None):\n names = self._verify_path_args(to_infos, from_infos, names)\n\n ssh = self.ssh(\n host=to_infos[0][\"host\"],\n user=to_infos[0][\"user\"],\n port=to_infos[0][\"port\"],\n )\n sftp = ssh.open_sftp()\n\n for from_info, to_info, name in zip(from_infos, to_infos, names):\n if to_info[\"scheme\"] != \"ssh\":\n raise NotImplementedError\n\n if from_info[\"scheme\"] != \"local\":\n raise NotImplementedError\n\n logger.debug(\n \"Uploading '{}' to '{}/{}'\".format(\n from_info[\"path\"], to_info[\"host\"], to_info[\"path\"]\n )\n )\n\n if not name:\n name = os.path.basename(from_info[\"path\"])\n\n dname = posixpath.dirname(to_info[\"path\"])\n self._exec(ssh, \"mkdir -p {}\".format(dname))\n\n try:\n sftp.put(\n from_info[\"path\"],\n to_info[\"path\"],\n callback=create_cb(name),\n )\n except Exception:\n msg = \"failed to upload '{}' to '{}/{}'\"\n logger.error(\n msg.format(\n from_info[\"path\"], to_info[\"host\"], to_info[\"path\"]\n )\n )\n continue\n\n progress.finish_target(name)\n\n sftp.close()\n ssh.close()\n\n def list_cache_paths(self):\n ssh = self.ssh(host=self.host, user=self.user, port=self.port)\n cmd = \"find {} -type f -follow -print\".format(self.prefix)\n stdout = self._exec(ssh, cmd)\n flist = stdout.split()\n ssh.close()\n return flist\n","sub_path":"dvc/remote/ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":11489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"113782633","text":"# In[1]\n\n# IMPORT\nfrom imageio import imread, imsave\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n# matploit inline\n\n# In[2]\n# Read image\nprint('Raw Image')\nraw_img = imread('coins2.jpg')\nplt.figure(1)\nplt.imshow(raw_img)\nplt.show()\n\n\n# In[3]\n# Turn the input picture to greyscale\ndef to_grey_scale(img):\n (h, w, c) = img.shape\n processed_img = np.zeros((h, w))\n for row_idx in range(img.shape[0]):\n for col_idx in range(img.shape[1]):\n rgb_component = img[row_idx][col_idx]\n grey_value = rgb_component[0] * 0.212671 + 0.715160 * rgb_component[1] + 0.072169 * rgb_component[2]\n processed_img[row_idx][col_idx] = grey_value\n processed_img = np.uint8(processed_img)\n return processed_img\n\n\n# In[4]\n# Turn the input img to a grey img\ngreyImg = to_grey_scale(raw_img)\nplt.figure(2)\nplt.imshow(greyImg, cmap=\"gray\")\nplt.show()\n\n\n# In[5]\n# Turn the input image to a binary image\ndef to_binary(img):\n binImg = np.zeros(img.shape)\n for row_idx in range(img.shape[0]):\n for col_idx in range(img.shape[1]):\n actual_pixel = img[row_idx][col_idx]\n if actual_pixel < 128:\n binImg[row_idx][col_idx] = 0\n else:\n binImg[row_idx][col_idx] = 255\n binImg = np.uint8(binImg)\n return binImg\n\n\n# In[6]\n# Turn the image to a binary image\nprint(greyImg.shape) # 2 Dimensions image\nbinImg = to_binary(greyImg)\nplt.figure(3)\nplt.imshow(binImg, cmap=\"gray\")\nplt.show()\n\n\n# In[7]\n# Perform a dilation to the given input image using the SE\ndef dilation(img, SE):\n return 1\n\n\n# In[8]\n# Call the dilation method\nSE = np.array([[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]])\ndilated_img = dilation(binImg, SE)\n","sub_path":"lab-3/sample_lab2/lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"483267567","text":"import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pybatdata.constants as cte\nfrom pybatdata.iobat import fileclass, get_column\n\ndef V_I_time(cycles='All',units_v='A',units_i='V',units_t='h'):\n # Find which loops to plot\n if (cycles == 'All'):\n plotall = True\n else:\n plotall = False\n first_loop = int(cycles.split('-')[0])\n last_loop = int(cycles.split('-')[1])\n\n # Prepare plot\n #plt.figure(figsize=(8.0, 11.0))\n fs = 15\n fig, axv = plt.subplots()\n axv.set_xlabel('Time/'+units_t, fontsize=fs)\n axv.set_ylabel('Voltage/'+units_v, fontsize=fs)\n axc = axv.twinx()\n axc.set_ylabel('Capacity/'+units_i, fontsize=fs)\n \n # Get columns\n for ii,ff in enumerate(fileclass.name):\n hnl = fileclass.header_nl[ii]\n tester = fileclass.tester[ii]\n s = cte.separators[cte.testers.index(tester)]\n\n v_col = get_column(ff,hnl,cte.v_col(tester),splitter=s,outtype='float')\n i_col = get_column(ff,hnl,cte.i_col(tester),splitter=s,outtype='float')\n t_col = get_column(ff,hnl,cte.time_col(tester),splitter=s,outtype='float')\n\n if (not plotall):\n l_col = get_column(ff,hnl,cte.loop_col(tester),splitter=s,outtype='int')\n ind = np.where((l_col >= first_loop) & (l_col <= last_loop))\n xx = t_col[ind]\n y1 = v_col[ind]\n y2 = i_col[ind]\n else:\n xx = t_col\n y1 = v_col\n y2 = i_col\n\n # Plot voltage and current vs. time\n axv.plot(xx,y1, linewidth=2.5, label=ff)\n axc.plot(xx,y2, linewidth=2.5, label=ff)\n\n\n leg = axc.legend(loc=4, fontsize=fs - 2)\n leg.draw_frame(False)\n\n #plt.savefig(figname)\n #print(\"Plot: {}\".format(figname))\n plt.show()\n\n return\n\n\ndef DVA():\n print('Work in progress')\n return\n","sub_path":"pybatdata/plot_cycling.py","file_name":"plot_cycling.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"130255720","text":"global classes\nclasses = {\n\t'4.1':'1vwjycGEikEFvGM6rjuF4_Ugjd-0a4vkzrUiQdkgBUcQ'\n}\n\nglobal prof\nprof = {\n\t'Teacher A': 'teacheraexample@gmail.com',\n\t'Teacher B': 'teacherbexample@gmail.com'\n}\n\nglobal studentemail\nstudentemail = {\n\t'00-01-05-119':'faisalhmohd@gmail.com',\n\t'00-01-05-120':'faisal@webable.com.bd'\n}\n\n\nglobal professorfiles\nprofessorfiles = {\n\t'Teacher A':'1-QdPjJIuCxS8yyMxoXOObqUAbKif0p7m-P3m1tGwwPE',\n\t'Teacher B':'1c7qfmDDUE_eCtVBts7f-FScSGy_9PEPiUxHeyTVeJhY'\n}\n\ndef i(user,sheet,worksheet,prof):\n\tglobal userdata\n\tuserdata = user\n\tglobal sheetdata\n\tsheetdata = sheet\n\tglobal worksheetdata\n\tworksheetdata = worksheet\n\tglobal profdata\n\tprofdata = prof\n\ndef student(user,sheet,worksheet):\n\tglobal studentuserdata\n\tstudentuserdata = user\n\tglobal studentsheetdata\n\tstudentsheetdata = sheet\n\tglobal studentworksheetdata\n\tstudentworksheetdata = worksheet","sub_path":"globalstuff.py","file_name":"globalstuff.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"644880446","text":"#-*- encoding: utf-8 -*-\nimport sys\nr=sys.stdin.readline\n\nN, M = map(int, r().split()) # 1부터 N까지 중복 없이 M개를 고른 수열\n\nvisit = [False] * (N+1)\nresult = [0]\n\n\ndef n_and_m(num):\n result.append(num)\n \n if len(result) == M+1:\n for c in result[1:]:\n print(c, end=' ')\n print()\n \n else:\n visit[num] = True\n for i in range(1, N+1):\n if not visit[i]:\n n_and_m(i)\n visit[num] = False\n result.pop()\n\n\nfor i in range(1, N+1):\n n_and_m(i)\n","sub_path":"Algorithm/Baekjoon/15649 N과 M (1)/15649.py","file_name":"15649.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"363997157","text":"import base64\nimport json\nimport requests\n\n\ndef SendStatement(array):\n auth = array['auth'].encode('ascii')\n data = array['data']\n url = 'http://139.196.9.17/xAPI/statements/'\n headers = {\n 'Content-Type': 'application/json',\n 'X-Experience-API-Version': '1.0.1',\n 'Authorization': 'Basic ' + base64.b64encode(auth).decode('utf-8')\n }\n r = requests.post(url, data=json.dumps(data), headers=headers)\n # print(r.status_code)\n print(r.text)\n\n\nstmt = {\"actor\": {\"mbox\": \"mailto:tom@example.com\"},\n \"verb\": {\"id\": \"http://adlnet.gov/expapi/verbs/answered\",\n \"display\": {\"en-US\": \"answered\"}},\n \"object\": {\"id\": \"http://adlnet.gov/expapi/activities/question\"}}\nSendStatement({'data': stmt, 'auth': 'iLearn:iLearn'})\n","sub_path":"post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"341580433","text":"import socket\nfrom threading import Thread\n\nfrom net import protocol\n\n\ndef socket_encode(value):\n return (str(value) + '\\n').encode('UTF-8')\n\n\ndef socket_decode(value):\n return value.decode('UTF-8').strip()\n\n\nclass ForwardThread(Thread):\n def __init__(self, from_socket, from_name, to_socket, to_name, count):\n super().__init__()\n\n self.from_socket = from_socket\n self.to_socket = to_socket\n\n self.from_name = from_name\n self.to_name = to_name\n\n self.count = count\n\n def run(self):\n while True:\n msg = self.from_socket.recv(1024)\n if msg == b'':\n break\n\n print(self.from_name, '->', self.to_name, '(', self.count, '): forwarding', socket_decode(msg))\n try:\n self.to_socket.send(msg)\n except BrokenPipeError:\n pass\n\n if self.to_name == 'pi':\n self.to_socket.send(socket_encode(protocol.P_CLOSE_CONNECTION))\n\n print(self.from_name, '->', self.to_name, '(', self.count, '): stopping')\n\n\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nserver_socket.bind(('', 9989))\nserver_socket.listen(5)\n\nport = input('port > ')\nprint('listening')\n\ncount = 0\n\nwhile True:\n (client_socket, address) = server_socket.accept()\n\n print('got a connection! creating threads for count', count)\n\n to_pi = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n to_pi.connect(('172.16.11.58', int(port)))\n\n emulator_to_pi = ForwardThread(client_socket, 'emulator', to_pi, 'pi', count)\n pi_to_emulator = ForwardThread(to_pi, 'pi', client_socket, 'emulator', count)\n\n emulator_to_pi.start()\n pi_to_emulator.start()\n #\n # while emulator_to_pi.is_alive():\n # pass\n #\n # to_pi.send(socket_encode(protocol.P_CLOSE_CONNECTION))\n #\n # while pi_to_emulator.is_alive():\n # pass\n\n count += 1","sub_path":"forward.py","file_name":"forward.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"146076726","text":"# https://leetcode.com/problems/next-permutation/solution/\n'''\n1,2,3 → 1,3,2\n3,2,1 → 1,2,3\n1,1,5 → 1,5,1\n'''\nclass Solution:\n def nextPermutation(self, nums):\n i = len(nums) - 2\n # find first dec val\n while i >= 0 and nums[i] >= nums[i + 1]:\n i -= 1\n\n # exchange position\n if i >= 0:\n j = len(nums) - 1\n while j >= 0 and nums[j] <= nums[i]:\n j -= 1\n nums[i], nums[j] = nums[j], nums[i]\n\n # reverse from i + 1\n lo, hi = i + 1, len(nums) - 1\n while lo < hi:\n nums[lo], nums[hi] = nums[hi], nums[lo]\n lo += 1\n hi -= 1\n","sub_path":"leetcode/py/31.py","file_name":"31.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"220262398","text":"\n\ndef check_requirements(installed_dists):\n missing_reqs_dict = {}\n incompatible_reqs_dict = {}\n\n for dist in installed_dists:\n key = '%s==%s' % (dist.project_name, dist.version)\n\n missing_reqs = list(get_missing_reqs(dist, installed_dists))\n if missing_reqs:\n missing_reqs_dict[key] = missing_reqs\n\n incompatible_reqs = list(get_incompatible_reqs(\n dist, installed_dists))\n if incompatible_reqs:\n incompatible_reqs_dict[key] = incompatible_reqs\n\n return (missing_reqs_dict, incompatible_reqs_dict)\n\n\ndef get_missing_reqs(dist, installed_dists):\n \"\"\"Return all of the requirements of `dist` that aren't present in\n `installed_dists`.\n\n \"\"\"\n installed_names = set(d.project_name.lower() for d in installed_dists)\n missing_requirements = set()\n\n for requirement in dist.requires():\n if requirement.project_name.lower() not in installed_names:\n missing_requirements.add(requirement)\n yield requirement\n\n\ndef get_incompatible_reqs(dist, installed_dists):\n \"\"\"Return all of the requirements of `dist` that are present in\n `installed_dists`, but have incompatible versions.\n\n \"\"\"\n installed_dists_by_name = {}\n for installed_dist in installed_dists:\n installed_dists_by_name[installed_dist.project_name] = installed_dist\n\n for requirement in dist.requires():\n present_dist = installed_dists_by_name.get(requirement.project_name)\n\n if present_dist and present_dist not in requirement:\n yield (requirement, present_dist)\n","sub_path":"Opencv_pil/source/pip/operations/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"347981238","text":"from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator\n\n\ndag = DAG(\n 'tutorial1',\n description='A simple tutorial DAG',\n schedule_interval=timedelta(days=1),\n)\n\nk = KubernetesPodOperator(\n namespace=\"default\",\n image=\"debian:10-slim\",\n cmds=[\"bash\", \"-cx\"],\n arguments=[\"echo\", \"10\"],\n labels={\"purpose\": \"demo\"},\n name=\"demo\",\n task_id=\"task\",\n is_delete_operator_pod=True,\n hostnetwork=False,\n dag=dag,\n)\n","sub_path":"dags/example1.py","file_name":"example1.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"87725345","text":"# -----------------------------\n# IMPORTS\n# -----------------------------\n# Import the necessary packages\nimport time\n\n\n# -----------------------------\n# FUNCTIONS\n# -----------------------------\ndef benchmark(datasetGen, numSteps):\n # Start the timer count\n start = time.time()\n # Loop over the provided number of steps\n for i in range(0, numSteps):\n # Get the next batch of data\n (images, labels) = next(datasetGen)\n # End the timer count\n end = time.time()\n # Return the difference between the end and start times\n return end - start\n\n","sub_path":"tfdata-tutorials/pyimagesearch/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"221397541","text":"#!/usr/bin/env python3\n# _*_ coding: utf-8 _*_\n\"\"\" A script shows PM and config values of a TeraFlex\n\nThis is the backend part that includes funcs to retrieve PM and config values.\n\"\"\"\nfrom aos_rest_lib.tera_flex_rest import (\n TeraFlexRest, Mp2D12Ct, Mp2D8Ct, Mp2D3Dt, MpM8Dct, Mp2D8Dct)\n\n\nTF_PM = {\n 'npserv': {'name': 'Line Rate',\n 'func': 'get_ptp_serv',\n 'unit': '',\n 'uri': 'uri_ptp'},\n 'npfreq': {'name': 'Port Frequency',\n 'func': 'get_nw_port_freq',\n 'unit': 'MHz',\n 'uri': 'uri_ptp'},\n 'npbw': {'name': 'Port Bandwidth',\n 'func': 'get_nw_port_bw',\n 'unit': 'MHz',\n 'uri': 'uri_ptp'},\n 'npoptset': {'name': 'Opt. Tx Setpoint',\n 'func': 'get_nw_port_optset',\n 'unit': 'dBm',\n 'uri': 'uri_ptp'},\n 'nplsrsta': {'name': 'Laser State',\n 'func': 'get_nw_port_lsrsta',\n 'unit': '',\n 'uri': 'uri_ptp'},\n 'npopt': {'name': 'Opt. Power Tx',\n 'func': 'get_nw_port_opt',\n 'unit': 'dBm',\n 'uri': 'uri_ptp'},\n 'npopr': {'name': 'Opt. Power Rx Total',\n 'func': 'get_nw_port_opr',\n 'unit': 'dBm',\n 'uri': 'uri_ptp'},\n 'nsfreq': {'name': 'OTSi Frequency',\n 'func': 'get_nw_otsi_freq',\n 'unit': 'MHz',\n 'uri': 'uri_nw_serv'},\n 'nsbw': {'name': 'OTSi Bandwidth',\n 'func': 'get_nw_otsi_bw',\n 'unit': 'MHz',\n 'uri': 'uri_nw_serv'},\n 'nscdc_rng': {'name': 'CDC Range',\n 'func': 'get_nw_otsi_cdc_rng',\n 'unit': '',\n 'uri': 'uri_nw_serv'},\n 'nsbsym': {'name': 'Bits per Symbol',\n 'func': 'get_nw_otsi_bsym',\n 'unit': '',\n 'uri': 'uri_nw_serv'},\n 'nsflt_rlf': {'name': 'Filter Roll-off',\n 'func': 'get_nw_otsi_flt_rlf',\n 'unit': '',\n 'uri': 'uri_nw_serv'},\n 'nsflt_shp': {'name': 'Filter Shape',\n 'func': 'get_nw_otsi_flt_shp',\n 'unit': '',\n 'uri': 'uri_nw_serv'},\n 'nsmodu': {'name': 'Modulation',\n 'func': 'get_nw_otsi_modu',\n 'unit': '',\n 'uri': 'uri_nw_serv'},\n 'nssop_trk': {'name': 'SOP tracking',\n 'func': 'get_nw_otsi_sop_trk',\n 'unit': '',\n 'uri': 'uri_nw_serv'},\n 'nssym_rate': {'name': 'Symbol Rate',\n 'func': 'get_nw_otsi_sym_rate',\n 'unit': 'GBd',\n 'uri': 'uri_nw_serv'},\n 'nsopr': {'name': 'Opt. Power Rx OTSi',\n 'func': 'get_nw_otsi_opr',\n 'unit': 'dBm',\n 'uri': 'uri_nw_serv'},\n 'nscd': {'name': 'CD',\n 'func': 'get_nw_otsi_cd',\n 'unit': 'ps/nm',\n 'uri': 'uri_nw_serv'},\n 'nssop': {'name': 'SOP',\n 'func': 'get_nw_otsi_sop',\n 'unit': 'rad/s',\n 'uri': 'uri_nw_serv'},\n 'nsfreq_ofst': {'name': 'OTSi Freq. Offset',\n 'func': 'get_nw_otsi_freq_ofst',\n 'unit': 'GHz',\n 'uri': 'uri_nw_serv'},\n 'nsq': {'name': 'Q Factor',\n 'func': 'get_nw_otsi_q',\n 'unit': 'dB',\n 'uri': 'uri_nw_serv'},\n 'nspdl': {'name': 'PDL',\n 'func': 'get_nw_otsi_pdl',\n 'unit': 'dB',\n 'uri': 'uri_nw_serv'},\n 'nsdgd': {'name': 'DGD',\n 'func': 'get_nw_otsi_dgd',\n 'unit': 'ps',\n 'uri': 'uri_nw_serv'},\n 'nssnr': {'name': 'SNR',\n 'func': 'get_nw_otsi_snr',\n 'unit': 'dB',\n 'uri': 'uri_nw_serv'},\n 'nsosnr': {'name': 'OSNR',\n 'func': 'get_nw_otsi_osnr',\n 'unit': 'dB',\n 'uri': 'uri_nw_serv'},\n 'ntfec': {'name': 'FEC',\n 'func': 'get_nw_otu_fec',\n 'unit': '',\n 'uri': 'uri_nw_serv'},\n 'ntfec_ber': {'name': 'FEC BER',\n 'func': 'get_nw_otu_fec_ber',\n 'unit': '',\n 'uri': 'uri_nw_serv'},\n 'ntfec_ce_15m': {'name': 'FEC CE 15-min',\n 'func': 'get_nw_otu_fec_ce_15m',\n 'unit': '',\n 'uri': 'uri_nw_serv'},\n 'ntfec_ube_15m': {'name': 'FEC UBE 15-min',\n 'func': 'get_nw_otu_fec_ube_15m',\n 'unit': '',\n 'uri': 'uri_nw_serv'},\n 'ntbbe_15m': {'name': 'OTU BBE 15-min',\n 'func': 'get_nw_otu_bbe_15m',\n 'unit': '',\n 'uri': 'uri_nw_serv'},\n 'ntes_15m': {'name': 'OTU ES 15-min',\n 'func': 'get_nw_otu_es_15m',\n 'unit': 'sec',\n 'uri': 'uri_nw_serv'},\n 'ntses_15m': {'name': 'OTU SES 15-min',\n 'func': 'get_nw_otu_ses_15m',\n 'unit': 'sec',\n 'uri': 'uri_nw_serv'},\n 'ntuas_15m': {'name': 'OTU UAS 15-min',\n 'func': 'get_nw_otu_uas_15m',\n 'unit': 'sec',\n 'uri': 'uri_nw_serv'},\n 'ndbbe_15m': {'name': 'ODU BBE 15-min',\n 'func': 'get_nw_odu_bbe_15m',\n 'unit': '',\n 'uri': 'uri_nw_odu'},\n 'ndes_15m': {'name': 'ODU ES 15-min',\n 'func': 'get_nw_odu_es_15m',\n 'unit': 'sec',\n 'uri': 'uri_nw_odu'},\n 'ndses_15m': {'name': 'ODU SES 15-min',\n 'func': 'get_nw_odu_ses_15m',\n 'unit': 'sec',\n 'uri': 'uri_nw_odu'},\n 'nduas_15m': {'name': 'ODU UAS 15-min',\n 'func': 'get_nw_odu_uas_15m',\n 'unit': 'sec',\n 'uri': 'uri_nw_odu'},\n 'dtenw_stat_cdc': {'name': 'CDC Status',\n 'func': 'get_dte_nw_stat_cdc',\n 'unit': '',\n 'uri': 'uri_ptp'}\n}\nMOD_PM = TF_PM\n\n\nclass F8ModView:\n def __init__(self, mod_id):\n self.ne_ip = mod_id['ne_ip']\n self.sh_num = mod_id['sh_num']\n self.sl_num = mod_id['sl_num']\n if mod_id['port_id'][0] in ('N', 'n'):\n self.port_id = 'nw,' + mod_id['port_id'][1:]\n elif mod_id['port_id'][0] in ('C', 'c'):\n self.port_id = 'cl,' + mod_id['port_id'][1:]\n\n self.tf = TeraFlexRest(self.ne_ip)\n # to make sure 'Debug' is in user role\n admin_role = self.tf.get_user_role('admin')\n if 'Debug' not in admin_role:\n self.tf.add_user_role('admin', 'Debug')\n\n uri_card = f'/mit/me/1/eqh/shelf,{self.sh_num}' +\\\n f'/eqh/slot,{self.sl_num}/eq/card'\n self.uri = {\n 'uri_ptp': '/mit/me/1/eqh/shelf,{0}/eqh/slot,{1}/eq/card/ptp/{2}'\n .format(self.sh_num, self.sl_num, self.port_id)\n }\n self.serv = self.tf.get_ptp_serv(self.uri['uri_ptp'])\n self.uri['uri_nw_serv'] = self.uri['uri_ptp'] + '/ctp/' + self.serv\n\n cardtype = self.tf.get_cardtype(uri_card)\n if cardtype == 'mp2d12':\n self.mod = Mp2D12Ct(self.ne_ip)\n self.uri['uri_odu'] = f'{self.uri[\"uri_nw_serv\"]}/ctp/odu4-1'\n elif cardtype == 'mp2d8':\n self.mod = Mp2D8Ct(self.ne_ip)\n self.uri['uri_odu'] = f'{self.uri[\"uri_nw_serv\"]}/ctp/odu4-1'\n elif cardtype == 'mp2d3dt':\n self.mod = Mp2D3Dt(self.ne_ip)\n self.uri['uri_odu'] = f'{self.uri[\"uri_nw_serv\"]}/ctp/oduf-1'\n elif cardtype == 'mpm8dct':\n self.mod = MpM8Dct(self.ne_ip)\n self.uri['uri_odu'] = f'{self.uri[\"uri_nw_serv\"]}/ctp/odu4-3'\n elif cardtype == 'mp2d8dct':\n self.mod = Mp2D8Dct(self.ne_ip)\n self.uri['uri_odu'] = f'{self.uri[\"uri_nw_serv\"]}/ctp/odu4-3'\n\n self.mod_view = {}\n\n def set_pm_eles(self, eles):\n \"\"\" pm_eles is a dict showing whether to read a PM or not\n\n Example of pm_eles:\n {'01_npserv': True,\n '02_nsmodu': False}\n \"\"\"\n self.pm_eles = eles\n\n def get_mod_view(self):\n \"\"\" return a dict of multiple PM readings for an AID\n\n F8ModView.mod_view: a dict of pm elements. Example:\n {\n '05_cfreq': {'name': 'Frequency',\n 'val': 194800000,\n 'unit': 'MHz'\n },\n }\n \"\"\"\n for ele, is_true in self.pm_eles.items():\n if is_true:\n self.mod_view[ele] = {'name': '',\n 'val': '',\n 'unit': ''}\n # remove the seq number before searching in MOD_PM\n ele_trim = ele.split('_', 1)[1]\n if ele_trim == 'separator':\n continue\n else:\n pm_ele = MOD_PM[ele_trim]\n\n # to get 'name', 'unit' of an element\n self.mod_view[ele]['name'] = pm_ele['name']\n self.mod_view[ele]['unit'] = pm_ele['unit']\n\n # to get 'value', dynamically run corresponding function\n uri = self.uri[pm_ele['uri']]\n val = getattr(self.mod, pm_ele['func'])(uri)\n if val:\n self.mod_view[ele]['val'] = val\n else:\n self.mod_view[ele]['val'] = 'n/a'\n return self.mod_view\n","sub_path":"py_tools/adva_f8/TeraFlexViewer/tf_viewer_base.py","file_name":"tf_viewer_base.py","file_ext":"py","file_size_in_byte":9642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"63341279","text":"import os\n\nfrom .build_config import SVCBuildConfig\nfrom .train_config import SVCTrainConfig\nfrom .generator_config import SVCGeneratorConfig\n\n\nclass SVCIRISBuildConfig(\n SVCBuildConfig,\n ):\n NAME = os.path.join(SVCBuildConfig.NAME, 'iris')\n BUILD_NAME = os.path.join(SVCBuildConfig.BUILD_NAME, 'iris')\n\n def __init__(self):\n super(SVCIRISBuildConfig, self).__init__()\n self.DEGREE = 0\n\n\nclass SVCIRISTrainConfig(\n SVCTrainConfig,\n ):\n TRAIN_NAME = os.path.join(SVCTrainConfig.TRAIN_NAME, 'iris')\n\n def __init__(self):\n super(SVCIRISTrainConfig, self).__init__()\n\n\nclass SVCIRISConfig(\n SVCGeneratorConfig,\n ):\n NAME = os.path.join(SVCGeneratorConfig.NAME, 'iris')\n def __init__(self):\n self.X_COL = ['sepal length (cm)', 'sepal width (cm)',\n 'petal length (cm)', 'petal width (cm)'],\n self.Y_COL = ['label']\n self.DATAFRAME_PATH = 'dataset/iris/iris.csv'\n self.VALID_DATAFRAME_PATH = 'dataset/iris/iris.csv'\n","sub_path":"model/SVM/SVC/config_samples.py","file_name":"config_samples.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"440732291","text":"import unittest\nimport numpy.testing as npt\nimport numpy as np\nfrom subprocess import check_call\nfrom proteus.Profiling import logEvent\nfrom proteus import Comm, Profiling\nfrom proteus import Domain\nfrom proteus import MeshTools\n\ncomm = Comm.init()\nProfiling.procID = comm.rank()\nlogEvent(\"Testing Gmsh Mesh Conversion\")\n\nclass TestBC(unittest.TestCase):\n def test_gmsh_generation_2D(self):\n domain = Domain.PlanarStraightLineGraphDomain()\n domain.vertices = [[0., 0., 0.],\n [5., 0., 0.],\n [5., 5., 0.],\n [0., 5., 0.]]\n domain.segments = [[0, 1], [1, 2], [2, 3], [3, 0]]\n domain.facets = [[[0, 1, 2, 3]]]\n domain.writeGeo('gmsh_mesh_test', he_max=0.1)\n gmsh_cmd = \"gmsh {0:s} -v 10 -2 -o {1:s} -format msh\".format(domain.geofile+\".geo\", domain.geofile+\".msh\")\n check_call(gmsh_cmd, shell=True)\n MeshTools.msh2triangle(domain.geofile)\n with open('gmsh_mesh_test.node', 'r') as nodefile:\n npt.assert_equal(nodefile.readline(), '3425 2 0 1\\n')\n with open('gmsh_mesh_test.edge', 'r') as edgefile:\n npt.assert_equal(edgefile.readline(), '10072 1\\n')\n with open('gmsh_mesh_test.ele', 'r') as elefile:\n npt.assert_equal(elefile.readline(), '6648 3 1\\n')\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"proteus/tests/test_gmsh_generation.py","file_name":"test_gmsh_generation.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"304941702","text":"import sys\nimport os\nimport imp\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nREADME = open(os.path.join(here, 'README.rst')).read()\nNEWS = open(os.path.join(here, 'NEWS.rst')).read()\n\nversion = imp.load_source(\n 'jig', os.path.join(here, 'src', 'jig', '__init__.py')).__version__\n\ninstall_requires = [\n 'GitPython==0.3.2',\n 'docutils>=0.9.1']\n\n# Shims for missing stuff in Python 2.6\nmajor, minor, patch, releaselevel, serial = sys.version_info\nif major == 2 and minor < 7:\n install_requires += ['ordereddict==1.1', 'unittest2==0.5.1']\n\nsetup(\n name='jig',\n version=version,\n description=\"Check your code for stuff before you `git commit`\",\n long_description=README + '\\n\\n' + NEWS,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'License :: OSI Approved :: MIT License',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Version Control',\n 'Topic :: Text Processing'\n ],\n keywords='git hooks code smell lint',\n author='Rob Madole',\n author_email='robmadole@gmail.com',\n url='http://github.com/robmadole/jig',\n license='MIT',\n packages=find_packages('src'),\n package_dir={'': 'src'},\n include_package_data=True,\n zip_safe=False,\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'jig = jig.entrypoints:main']}\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"229952680","text":"# coding=utf-8\nimport random\nimport numpy as np\nimport torch\n\"\"\"\nNon-overlapping: take sets of 5 classes at a time. 12 splits of 5 classes each.\nIntra-task shuffling: randomize class order every epoch.\nInter-task shuffling: randomly create sets of classes (should be the default). \n\"\"\"\n\nclass BatchSampler(object):\n '''\n BatchSampler: yield a batch of indexes at each iteration.\n\n The default in few-shot learning papers, where each iteration randomly selects classes_per_it (cpi).\n to synthesize a batch.\n\n __len__ returns the number of episodes per epoch (same as 'self.iterations').\n '''\n\n def __init__(self, labels, classes_per_it, num_samples, iterations, batch_size, intratask_shuffle=True):\n '''\n Initialize the BatchSampler object\n Args:\n - labels: an iterable containing all the labels for the current dataset\n samples indexes will be infered from this iterable.\n - classes_per_it: number of random classes for each iteration\n - num_samples: number of samples for each iteration for each class\n - iterations: number of iterations (episodes) per epoch\n '''\n super(BatchSampler, self).__init__()\n self.labels = labels\n self.classes_per_it = classes_per_it\n self.sample_per_class = num_samples\n self.iterations = iterations\n self.batch_size = batch_size\n self.intratask_shuffle = intratask_shuffle\n self.classes, self.counts = np.unique(self.labels, return_counts=True)\n\n self.idxs = range(len(self.labels))\n self.label_tens = np.empty((len(self.classes), max(self.counts)), dtype=int) * np.nan\n self.label_lens = np.zeros_like(self.classes)\n for idx, label in enumerate(self.labels):\n label_idx = np.argwhere(self.classes == label)[0, 0]\n self.label_tens[label_idx, np.where(np.isnan(self.label_tens[label_idx]))[0][0]] = idx\n self.label_lens[label_idx] += 1\n\n def sample_class_idxs(self):\n # Note, np.random.choice might have been a more efficient implementation than permuting the entire class list.\n # But leaving the original impl. unchanged.\n return np.random.permutation(len(self.classes))[:self.classes_per_it]\n\n def __iter__(self):\n '''\n yield a batch of indexes\n '''\n spc = self.sample_per_class + 1 # To get that extra sample, which we throw away all but 1 of the class.\n cpi = self.classes_per_it # cpi-way\n num_samples = spc * cpi\n true_num_samples = (spc - 1) * cpi + 1 # the actual number of sampels in the batch (duplicate spc thrown away). == spc*cpi - (cpi-1)\n\n for it in range(self.iterations):\n total_batch = np.array([])\n for _ in range(self.batch_size): # 'meta-batch' (across tasks)\n batch = np.empty(num_samples) # initialize array. will contain multiple image indices of different classes.\n c_idxs = self.sample_class_idxs()\n # Example: for 5-way, 1-shot, we build up a list:\n # we build up a batch of [i1, i2, i3, i4, i5, j1, j2, j3, j4, j5]\n # slice objects indexes (0, 5). then (1, 6), (2, 7), etc.\n # choose the class by picking an offset between (0, num_classes), e.g. 3\n # then [3:3+6] = [i4, i5, j1, j2, j3, j4]\n # this is problematic b.c. it will always result in the target class j4 being the val class.\n for i, c in enumerate(self.classes[c_idxs]):\n s = slice(i, i + num_samples, cpi)\n label_idx = np.argwhere(self.classes == c)[0, 0]\n if spc > self.label_lens[label_idx]:\n raise AssertionError('More samples per class than exist in the dataset')\n # Samples within the dataset's instances of the class c.\n sample_idxs = np.random.permutation(self.label_lens[label_idx])[:spc]\n # Select specific images from this label_idx via sample_idxs.\n batch[s] = self.label_tens[label_idx][sample_idxs]\n if self.intratask_shuffle:\n # This codebase has a weird 'circular wraparound' method of choosing validation examples, by selecting an offset.\n # offset is used to select the class (last_layer_input) to classify.\n offset = random.randint(0, cpi-1)\n batch = batch[offset:offset + true_num_samples]\n # Permuting the ordering of the training examples within the inner loop for the sequential meta-learner. The -1 causes\n # it to ignore the last element in the batch, which is the one we classify.\n batch[:true_num_samples - 1] = batch[:true_num_samples - 1][np.random.permutation(true_num_samples - 1)]\n else:\n # keep training batch order fixed (since it informs the class logit ordering).\n # Select inner validation example from the last `cpi` elements of the batch.\n batch = batch[:true_num_samples] # 5 + 1\n batch[-1] = np.random.choice(batch[-cpi:])\n total_batch = np.append(total_batch, batch)\n yield total_batch.astype(int)\n\n def __len__(self):\n '''\n returns the number of iterations (episodes) per epoch\n '''\n return self.iterations\n\n\nclass IntraTaskBatchSampler(BatchSampler):\n \"\"\"A simple modification on top of existing BatchSampler: choose class idx from fixed set of non-overlapping sets.\n within a inner batch, tasks are still permuted.\n \"\"\"\n def __init__(self, labels, classes_per_it, num_samples, iterations, batch_size):\n super(IntraTaskBatchSampler, self).__init__(labels, classes_per_it, num_samples, iterations, batch_size, intratask_shuffle=True)\n # Set up class sets.\n self._class_sets = [np.array(range(i*classes_per_it, i*classes_per_it+classes_per_it)) for i in range(len(self.classes)//classes_per_it)]\n\n def sample_class_idxs(self):\n # TODO - do we need to permute the classes here?\n return self._class_sets[np.random.choice(len(self._class_sets))]\n\n\nclass NonOverlappingTasksBatchSampler(BatchSampler):\n \"\"\"Fixed set of non-overlapping sets, and no randomization within inner batch.\"\"\"\n def __init__(self, labels, classes_per_it, num_samples, iterations, batch_size):\n super(NonOverlappingTasksBatchSampler, self).__init__(labels, classes_per_it, num_samples, iterations, batch_size, intratask_shuffle=False)\n # Set up class sets.\n self._class_sets = [np.array(range(i*classes_per_it, i*classes_per_it+classes_per_it)) for i in range(len(self.classes)//classes_per_it)]\n\n def sample_class_idxs(self):\n return self._class_sets[np.random.choice(len(self._class_sets))]","sub_path":"src/batch_sampler.py","file_name":"batch_sampler.py","file_ext":"py","file_size_in_byte":6904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"161589288","text":"\n\ndef swap(first, second):\n # print(first, second)\n F = first[:]\n for i in range(len(first)):\n first.pop(0)\n # print(first, F)\n S = second[:]\n for i in range(len(second)):\n second.pop(0)\n # print(second, S)\n first.extend(S)\n second.extend(F)\n\n","sub_path":"First year/Функции 2/Обмен личностями.py","file_name":"Обмен личностями.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"69298434","text":"\"\"\" Easy Challenge 286 \"\"\"\n\n\ndef evendiv(n, d=2):\n t = int(n / d)\n if n % d == 0:\n if t == 1:\n return d\n return evendiv(t, d + 1)\n else:\n return False\n\n\ndef main():\n # print(evendiv(int(sys.argv[1])))\n for i in (3628800, 479001600, 6, 18):\n print(str(i) + \" -> \" + str(evendiv(i)) + \"!\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"archive/dailyprogrammer/286e.py","file_name":"286e.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"81159793","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# author: bigfoolliu\n\n\n\"\"\"\nmultiprocessing Pool使用\n\n- 可以提供指定数量的进程供用户调用\n- 当有新的请求提交到Pool中时,如果池还没有满,就会创建一个新的进程来执行请求\n- 如果池满,请求就会告知先等待,直到池中有进程结束,才会创建新的进程来执行这些请求\n\nPool对象常用方法:\n\n- apply() — 该函数用于传递不定参数,主进程会被阻塞直到函数执行结束(不建议使用,并且3.x以后不在出现)\n- apply_async() 与apply用法一致,但它是非阻塞的且支持结果返回后进行回调\n- map(): 会使进程阻塞直到结果返回\n- map_async() — 与map用法一致,但是它是非阻塞的\n- close() — 关闭进程池(pool),使其不在接受新的任务\n- terminal() — 结束工作进程,不在处理未处理的任务\n- join() — 主进程阻塞等待子进程的退出, join方法要在close或terminate之后使用\n\"\"\"\n\nimport time\nimport multiprocessing\nfrom multiprocessing import Pool\nfrom language.python.modules.multiprocessing.multiprocessing_module import time_counter\n\n\ndef task(n):\n if not isinstance(n, int):\n raise ValueError\n time.sleep(n)\n ret = n * n\n print(f'{n}*{n}: {ret}')\n return ret\n\n\n@time_counter\ndef basic_demo():\n cores = multiprocessing.cpu_count() # 统计cpu核心数量\n pool = Pool(processes=cores)\n # ret = pool.map(func=task, iterable=range(3)) # map直接返回列表\n # print(ret)\n\n cnt = 0\n for _ in pool.imap_unordered(task, range(8)): # imap_unordered返回的是迭代器\n print('done %d/%d\\r' % (cnt, len(range(8))))\n cnt += 1\n\n\n@time_counter\ndef join_demo():\n cores = multiprocessing.cpu_count() # 统计cpu核心数量\n pool = Pool(processes=cores)\n\n for i in range(10):\n pool.apply_async(func=task, args=(i,))\n\n pool.close() # 不在接受新的任务\n pool.join() # 阻塞直至所有的任务完成\n\n\nif __name__ == '__main__':\n # basic_demo()\n join_demo()\n","sub_path":"language/python/modules/System/multiprocessing/multiprocessing_module_Pool.py","file_name":"multiprocessing_module_Pool.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"320552782","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport sys\n\nINPUT = sys.argv[1]\nOUTPUT = sys.argv[2]\n\nfig, ax = plt.subplots(1)\ndf = pd.read_csv(INPUT, sep=' ', header=None)\ndf[3]=df[0].str.len()\nf=df.plot(kind='scatter', y=2, x=3, c=1, ax = ax)\nax.set_xlabel('length')\nax.set_ylabel('percent words')\nfig.savefig(OUTPUT)\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"375626131","text":"from djitellopy import tello\r\nimport cv2\r\nimport cvzone\r\nfrom cvzone.FaceDetectionModule import FaceDetector\r\n\r\ndetector = FaceDetector(minDetectionCon=0.5)\r\n\r\n# cap = cv2.VideoCapture(0)\r\n# _, img = cap.read()\r\nhi, wi, = 480, 640\r\n# print(hi, wi)\r\n# P I D\r\nxPID = cvzone.PID([0.22, 0, 0.1], wi // 2)\r\nyPID = cvzone.PID([0.27, 0, 0.1], hi // 2, axis=1)\r\nzPID = cvzone.PID([0.005, 0, 0.003], 12000,limit=[-20,15])\r\n\r\nmyPlotX = cvzone.LivePlot(yLimit=[-100, 100], char='X')\r\nmyPlotY = cvzone.LivePlot(yLimit=[-100, 100], char='Y')\r\nmyPlotZ = cvzone.LivePlot(yLimit=[-100, 100], char='Z')\r\n\r\nme = tello.Tello()\r\nme.connect()\r\nprint(me.get_battery())\r\nme.streamoff()\r\nme.streamon()\r\nme.takeoff()\r\nme.move_up(80)\r\n\r\nwhile True:\r\n # _, img = cap.read()\r\n img = me.get_frame_read().frame\r\n img = cv2.resize(img, (640, 480))\r\n img, bboxs = detector.findFaces(img, draw=True)\r\n\r\n xVal = 0\r\n yVal = 0\r\n zVal = 0\r\n\r\n if bboxs:\r\n cx, cy = bboxs[0]['center']\r\n x, y, w, h = bboxs[0]['bbox']\r\n area = w * h\r\n\r\n xVal = int(xPID.update(cx))\r\n yVal = int(yPID.update(cy))\r\n zVal = int(zPID.update(area))\r\n # print(zVal)\r\n imgPlotX = myPlotX.update(xVal)\r\n imgPlotY = myPlotY.update(yVal)\r\n imgPlotZ = myPlotZ.update(zVal)\r\n\r\n img = xPID.draw(img, [cx, cy])\r\n img = yPID.draw(img, [cx, cy])\r\n # imgStacked = cvzone.stackImages([img, imgPlotX, imgPlotY, imgPlotZ], 2, 0.75)\r\n imgStacked = cvzone.stackImages([img], 1, 0.75)\r\n # Display Area\r\n #cv2.putText(imgStacked, str(area), (20, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)\r\n else:\r\n imgStacked = cvzone.stackImages([img], 1, 0.75)\r\n\r\n me.send_rc_control(0, -zVal, -yVal, xVal)\r\n #me.send_rc_control(0, -zVal, 0, 0)\r\n cv2.imshow(\"Image Stacked\", imgStacked)\r\n\r\n if cv2.waitKey(5) & 0xFF == ord('q'):\r\n me.land()\r\n break\r\ncv2.destroyAllWindows()\r\n","sub_path":"project/Part1/FaceFollowing.py","file_name":"FaceFollowing.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"356512907","text":"import gensim\r\nimport logging\r\nimport numpy as np\r\n\r\nmodel = gensim.models.Word2Vec.load(\"C:/Users/guozn/Desktop/wiki_model\")\r\n\r\ncatag = {}\r\ncatag[\"商业\"] = [\"商\", \"贸易\", \"销售\", \"经营\"]\r\ncatag[\"服务业\"] = [\"物流\", \"房地产\", \"酒店\", \"服务\", \"事务\", \"招标\"]\r\ncatag[\"工程\"] = [\"路\", \"桥\", \"建\", \"环境\", \"装\", \"林\"]\r\ncatag[\"医药\"] = [\"医\", \"药\", \"卫生\"]\r\ncatag[\"技术\"] = [\"科技\", \"电子\", \"通信\", \"网络\"]\r\ncatag[\"文娱\"] = [\"文\", \"娱乐\", \"影视\", \"广告\", \"演艺\"]\r\n\r\n\r\ndef makeFeatureVec(words, model, num_features):\r\n # Function to average all of the word vectors in a given\r\n # paragraph\r\n #\r\n # Pre-initialize an empty numpy array (for speed)\r\n featureVec = np.zeros((num_features,), dtype=\"float32\")\r\n #\r\n nwords = 0.0\r\n #\r\n # Index2word is a list that contains the names of the words in\r\n # the model's vocabulary. Convert it to a set, for speed\r\n index2word_set = set(model.wv.index2word)\r\n #\r\n # Loop over each word in the review and, if it is in the model's\r\n # vocaublary, add its feature vector to the total\r\n for word in words:\r\n if word in index2word_set:\r\n nwords = nwords + 1.0\r\n featureVec = np.add(featureVec, model[word])\r\n #\r\n # Divide the result by the number of words to get the average\r\n featureVec = np.divide(featureVec, nwords)\r\n return featureVec\r\n\r\n\r\ndef getAvgFeatureVecs(reviews, model, num_features):\r\n counter = 0\r\n reviewFeatureVecs = np.zeros((len(reviews), num_features), dtype=\"float32\")\r\n for review in reviews:\r\n if counter % 1000 == 0:\r\n print(\"Review %d of %d\" % (counter, len(reviews)))\r\n reviewFeatureVecs[counter] = makeFeatureVec(review, model, num_features)\r\n counter = counter + 1\r\n return reviewFeatureVecs\r\n\r\n\r\nnames = [u\"商业\", u\"服务业\", u\"工程\", u\"医药\", u\"技术\", u\"文娱\"]\r\ncenter = []\r\nfor key, value in catag.items():\r\n for i in value:\r\n center.append(model[i])\r\n\r\nfrom sklearn.decomposition import PCA\r\nfrom matplotlib import pyplot\r\nfrom pandas import DataFrame\r\n\r\n# 基于2d PCA拟合数据\r\npca = PCA(n_components=2)\r\nX = center\r\nprint(X)\r\nresult = pca.fit_transform(X)\r\nprint(result, names)\r\n# 可视化展示\r\npyplot.scatter(result[:, 0], result[:, 1])\r\nwords = list(X)\r\nfor i, word in enumerate(words):\r\n pyplot.annotate(names[i], xy=(result[i, 0], result[i, 1]))\r\npyplot.show()\r\n","sub_path":"final/代码/word2vec_compute.py","file_name":"word2vec_compute.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"436958679","text":"def read_ids_from_file(id_file):\n with open(id_file, 'r') as fimage:\n list_ids = fimage.readlines()\n #print list_ids\n fimage.closed\n ids = []\n for i in list_ids:\n uuid = i.split()[0]\n #print uuid\n ids.append(uuid)\n return ids","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"277156298","text":"import spacy\nfrom python_sql_connector import *\nroll_number = '1401cs01'\n\ncols = ['rollno', 'semno', 'year', 'subno', 'crd', 'grade', 'date_of_entry', 'sub_type']\n\ndef create_sql(sql, flag):\n\tsql_query = 'SELECT ' + sql['Select'] + ' FROM sem_grades '\n\tsql_condition = 'WHERE '\n\tsql_condition_dict = {}\n\tfor i, condition in enumerate(sql['Condition_val_type']):\n\t\tif condition == 'XXddd' or condition == 'xxddd':\n\t\t\tsql_condition_dict['subno'] = '\\'' + sql['Condition_val'][i] + '\\''\n\t\telif condition == 'ddddxxdd' or condition == 'ddddXXdd':\n\t\t\tsql_condition_dict['rollno'] = '\\'' + sql['Condition_val'][i] + '\\''\n\tfor condition_pair in sql['additional']:\n\t\tfor key, value in condition_pair.items():\n\t\t\tsql_condition_dict[key] = '\\'' + value + '\\''\n\t# if 'rollno' not in sql_condition_dict.keys() and flag == 0:\n\t# \tsql_condition_dict['rollno'] = '\\'' + roll_number + '\\''\n\tfor i, condition in enumerate(sql_condition_dict):\n\t\tif i == 0:\n\t\t\tsql_condition = sql_condition + condition + ' = ' + sql_condition_dict[condition]\n\t\telse:\n\t\t\tsql_condition = sql_condition + ' AND ' + condition + ' = ' + sql_condition_dict[condition]\n\tsql_condition = sql_condition + ';'\n\tsql_query = sql_query + sql_condition\n\n\t# if sql['Condition_val_type'] == 'XXddd':\n\t# \tsql_query = 'SELECT ' + sql['Select'].text + ' FROM table_name ' + 'WHERE rollno = ' + roll_number + ' AND ' + 'subno = ' + sql['Condition_val'].text + ';'\n\t# else:\n\t# \troll_number1 = roll_number\n\t# \tif sql['Condition_val_type'] == 'ddddxxdd':\n\t# \t\troll_number1 = sql['Condition_val'].text\n\t# \tsql_query = 'SELECT ' + sql['Select'].text + ' FROM table_name ' + 'WHERE Roll_number = ' + roll_number1 + ';'\n\t# \tif len(sql['additional']) > 0:\n\t# \t\tsql['Select'] = sql['additional'][0].text + '_' + sql['Condition_val'].text + '_' + sql['Select'].text\n\t# \t\tsql_query = 'SELECT ' + sql['Select'] + ' FROM table_name ' + 'WHERE Roll_number = ' + roll_number1 + ';'\n\tprint(sql_query)\n\treturn sql_query\n\n\ndef dfs_what(vertex,past,summary, sql):\n\tif summary[vertex.text]['Dep']=='nsubj' and vertex.text!='what' and vertex.text!='who':\n\t\tsql['Select']=vertex.text\n\telif summary[vertex.text]['Dep']=='attr' and vertex.text!='what' and vertex.text!='who':\n\t\tsql['Select']=vertex.text\n\telif summary[vertex.text]['Dep']!='pobj' and summary[vertex.text]['Dep']!='det' and summary[past.text]['Dep']=='pobj':\n\t\tsql['Condition_val']=past.text\n\t\tsql['Condition_val_type'] = vertex.shape_\n\t\tsql['additional'].append(vertex)\n\telif summary[vertex.text]['Dep']=='pobj' and len(summary[vertex.text]['Children'])==0 and len(sql['Select'])!=0:\n\t\tsql['Condition_val'].append(vertex.text)\n\t\tsql['Condition_val_type'].append(vertex.shape_)\n\tfor child in summary[vertex.text]['Children']:\n\t\tdfs_what(child,vertex,summary, sql)\n\ndef list_sql_extract(summary, sql):\n\tfor token, value in summary.items():\n\t\tif value['Dep'] == 'dobj' and value['is_stop'] == False:\n\t\t\tsql['Select'] = token\n\t\tif value['Dep'] == 'pobj' and len(value['Children']) == 0:\n\t\t\tsql['Condition_val'].append(token)\n\t\t\tsql['Condition_val_type'].append(value['Shape'])\n\t\tif value['Dep'] == 'pobj' and len(value['Children']) != 0:\n\t\t\tif summary[value['Children'][0].text]['Dep'] != 'acl': \n\t\t\t\tsql['additional'].append({token : value['Children'][0].text})\n\ndef which_sql_extract(summary, sentence, sql):\n\timax = 99999\n\telements = sentence.split()\n\tfor i, element in enumerate(elements):\n\t\tif element in cols and i < imax:\n\t\t\tsql['Select'] = element\n\t\t\timax = i\n\t\telif element in cols:\n\t\t\tsql['additional'].append({element : elements[i+1]})\n\t\telif element == 'my':\n\t\t\tsql['additional'].append({'rollno' : roll_number})\n\tfor token, value in summary.items():\n\t\tif value['Shape'] == 'XXddd' or value['Shape'] == 'xxddd' or value['Shape'] == 'ddddxxdd' or value['Shape'] == 'ddddXXdd':\n\t\t\tsql['Condition_val'].append(token)\n\t\t\tsql['Condition_val_type'].append(value['Shape']) \n\n\ndef create_dictionary(sentence, doc):\n\tsummary = dict()\n\troot = ''\n\t#sql = dict()\n\tfor token in doc:\n\t\tlst = [child for child in token.children]\n\t\tprint(lst)\n\t\tif token.dep_ == 'ROOT':\n\t\t\t#root = token.text\n\t\t\troot = token\n\t\tsummary[token.text] = {'Children' : lst, 'Dep' : token.dep_, 'Shape' : token.shape_, 'is_stop' : token.is_stop}\n\n\t# for child in summary[root]['Children']:\n\t# \tprint(type(child))\n\t# \tif summary[child.text]['Dep'] == 'nsubj':\n\t# \t\tsql['Select'] = child\n\t# \t\tfor child1 in summary[str(child)]['Children']:\n\t# \t\t\tif summary[str(child1)]['Dep'] == 'prep':\n\t# \t\t\t\tfor child2 in summary[str(child1)]['Children']:\n\t# \t\t\t\t\tsql['Condition_val'] = child2\n\t# \t\t\t\t\tsql['Condition_val_type'] = child2.shape_\n\t# \t\t\t\t\tsql['additional'] = []\n\t# \t\t\t\t\tfor child3 in summary[str(child2)]['Children']:\n\t# \t\t\t\t\t\tif child3.dep_ != 'det':\n\t# \t\t\t\t\t\t\tsql['additional'].append(child3)\n\tsql = dict()\n\tsql['additional'] = []\n\tsql['Condition_val'] = []\n\tsql['Condition_val_type'] = []\n\tif root.text == 'List':\n\t\tlist_sql_extract(summary, sql)\n\t\tflag = 1\n\telif 'which' in sentence or 'Which' in sentence:\n\t\twhich_sql_extract(summary, sentence, sql)\n\t\tflag = 3\n\telse:\n\t\tdfs_what(root, root, summary, sql)\n\t\tif 'my' in sentence:\n\t\t\tsql['additional'].append({'rollno' : roll_number})\n\t\tflag = 0\n\tprint(sql)\n\tprint(summary)\n\tsql_query = create_sql(sql, flag)\n\treturn sql_query\n\nif __name__ == '__main__':\n\t#in_path = 'E:/4thSem/inno_lab/wh_questions.txt'\n\tin_path = 'E:/4thSem/inno_lab/which_questions.txt'\n\tdb = initialise_database_connection()\t#Used for connecting with database.\n\tnlp = spacy.load(\"en_core_web_sm\")\n\twith open(in_path) as f:\n\t\tsentences = f.read().split('\\n')\n\tfor sentence in sentences:\n\t\tif sentence is not '':\n\t\t\tdoc = nlp(sentence)\n\t\t\tsql_query = create_dictionary(sentence, doc)\n\t\t\tgenerate_output(sql_query, db)","sub_path":"main_chatbot/wh_nlp_sql.py","file_name":"wh_nlp_sql.py","file_ext":"py","file_size_in_byte":5690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"527769203","text":"from django import forms\nfrom .models import FAQ\n\nclass FAQCreationForm(forms.ModelForm):\n \"\"\"\n A form that helps create a FAQ\n \"\"\"\n class Meta:\n model = FAQ\n fields = ['question', 'answer']\n\n def __init__(self, *args, **kwargs):\n super(FAQCreationForm, self).__init__(*args, **kwargs)\n self.fields['question'].widget.attrs={'class':'form-control'}\n self.fields['answer'].widget.attrs={'class':'form-control'}","sub_path":"seatkhalichha/apps/faq/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"175808147","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0004_auto_20151118_1946'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='account',\n name='last_history',\n field=models.ForeignKey(to='accounts.AccountHistory', blank=True),\n ),\n ]\n","sub_path":"accounts/migrations/0005_auto_20151118_1949.py","file_name":"0005_auto_20151118_1949.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"530897510","text":"__author__ = 'Klemen'\n#Ogrevalna naloga\n\nstevilo = int(input(\"Vnesi stevilo: \"))\ndelitelj = stevilo\nwhile delitelj >= 2:\n\n if stevilo % delitelj == 0:\n rez = stevilo // delitelj\n print(rez)\n delitelj -= 1\n###################################################################################\n\n#Obvezna naloga\nstevilo2 = int(input(\"Vnesi stevilo: \"))\ndelitelj2 = stevilo2\nsestevek = 0\nwhile delitelj2 >= 2:\n\n if stevilo2 % delitelj2 == 0:\n rez = stevilo2 // delitelj2\n sestevek = sestevek + rez\n delitelj2 -= 1\nif sestevek == stevilo2:\n print(\"True\")\nelse:\n print(\"False\")\n\n\n","sub_path":"Delitelji/Delitelji.py","file_name":"Delitelji.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"514779430","text":"from django.contrib.gis.geos import GEOSGeometry\nfrom rest_framework import serializers as rest_serializers\nfrom rest_framework_gis.fields import GeometryField\nfrom rest_framework_gis.serializers import GeoFeatureModelSerializer\n\nfrom geotrek.feedback import models as feedback_models\n\n\nclass ReportSerializer(rest_serializers.ModelSerializer):\n class Meta:\n model = feedback_models.Report\n id_field = 'id'\n fields = ('id', 'email', 'activity', 'comment', 'category',\n 'status', 'problem_magnitude', 'related_trek',\n 'geom')\n extra_kwargs = {\n 'geom': {'write_only': True},\n }\n\n def validate_geom(self, value):\n return GEOSGeometry(value, srid=4326)\n\n\nclass ReportGeojsonSerializer(GeoFeatureModelSerializer, ReportSerializer):\n # Annotated geom field with API_SRID\n api_geom = GeometryField(read_only=True, precision=7)\n\n class Meta(ReportSerializer.Meta):\n geo_field = 'api_geom'\n fields = ReportSerializer.Meta.fields + ('api_geom', )\n\n\nclass ReportActivitySerializer(rest_serializers.ModelSerializer):\n class Meta:\n model = feedback_models.ReportActivity\n fields = ['id', 'label']\n\n\nclass ReportCategorySerializer(rest_serializers.ModelSerializer):\n class Meta:\n model = feedback_models.ReportCategory\n fields = ['id', 'label']\n\n\nclass ReportProblemMagnitudeSerializer(rest_serializers.ModelSerializer):\n class Meta:\n model = feedback_models.ReportProblemMagnitude\n fields = ['id', 'label']\n","sub_path":"geotrek/feedback/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"60679184","text":"from tkinter import Canvas, Tk\nfrom helpers import make_circle\n\ngui = Tk()\ngui.title('Circles')\ncanvas = Canvas(gui, width=500, height=500, background='#000022')\ncanvas.pack()\n########################## YOUR CODE BELOW THIS LINE ##############################\n\nmake_circle(canvas, (100, 100), 25)\n\n########################## YOUR CODE ABOVE THIS LINE ##############################\ncanvas.mainloop()\n","sub_path":"course-files/lectures/lecture13/while_loops/03_drawing/01_vertical_circles.py","file_name":"01_vertical_circles.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"372300913","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 10 16:51:23 2019\n\n@author: Mitfo\n\"\"\"\n\nimport AoCHelper as AC\n\n\nst = AC.readInputLines(\"input10.txt\")\n#print(st)\n\ndef gcd(a,b):\n a,b = abs(a),abs(b)\n if a == 0:\n return b\n else:\n return gcd(b%a,a)\n\n \nprint(gcd(0,0))\n\ndef removeDiv(a,b):\n c = gcd(a,b)\n while c != 1:\n a,b = int(a/c), int(b/c)\n c = gcd(a,b)\n return a,b\n\n\nst1 = ['...###...','..#..#..#']\n\n\ndef myCode(iname):\n res = 0\n \n for i in range(len(iname)):\n for j in range(len(iname[0])):\n if iname[i][j] == '#':\n temp = {(0,0)}\n for k in range(len(iname)):\n for l in range(len(iname[0])):\n if i == k and j == l:\n continue\n if iname[k][l] == '#':\n temp.add(removeDiv(k-i,l-j))\n temp.remove((0,0))\n if len(temp) > res:\n res = len(temp)\n x,y = i,j\n temp1 = temp\n print(res,x,y)\n return temp1,x,y\n\nfor i in myCode(st)[0]:\n print(i[0])\n break\n\n\ndef splittheres(setofvec):\n res1 = []\n res2 = []\n res3 = []\n res4 = []\n for i in setofvec:\n if i[0] >= 0 and i[1] < 0:\n res1.append([i[0],i[1]])\n elif i[0] > 0 and i[1] >= 0:\n res2.append([i[0],i[1]])\n elif i[0] <= 0 and i[1] > 0:\n res3.append([i[0],i[1]])\n elif i[0] < 0 and i[1] <= 0:\n res4.append([i[0],i[1]])\n print(len(res1),len(res2),len(res3),len(res4))\n return [res1,res2,res3,res4]\n\n\nte = []\nfor i in splittheres(myCode(st)[0])[3]:\n te.append(i[1]/i[0])\n \nprint(te.index(min(te)))\n \ndef myCode2(iname,num):\n temp,x,y = myCode(iname)\n temp1 = splittheres(temp)\n left = num\n \n left -= len(temp1[0]) + len(temp1[1]) + len(temp1[2])\n while 0 < left:\n te =[]\n for j in temp1[3]:\n te.append(j[1]/j[0])\n res = temp1[3][te.index(min(te))]\n del temp1[3][te.index(min(te))]\n left -= 1\n \n print(res)\n return res\n\nmyCode2(st,200)\nprint(31-13,25-9)\nprint(st[31-13][25-9])\n \n \n\n ","sub_path":"2019/AoC10.py","file_name":"AoC10.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"527169239","text":"from .models import Animal, Equipement\n\n\ndef nourrir(id_animal):\n if Equipement.verifie_disponibilite('mangeoire') == 'occupé':\n occupant = Animal.cherche_occupant('mangeoire')\n return 'Impossible, la mangeoire est actuellement occupée par ' + str(occupant) + '.'\n if Animal.lit_etat(id_animal) != 'affamé':\n return 'Désolé, ' + id_animal + ' n\\'a pas faim!.'\n else:\n Animal.change_lieu(id_animal, 'mangeoire')\n Animal.change_etat(id_animal, 'repus')\n\n\ndef divertir(id_animal):\n if Equipement.verifie_disponibilite('roue') == 'occupé':\n occupant = Animal.cherche_occupant('roue')\n return 'Impossible, la roue est actuellement occupée par ' + str(occupant) + '.'\n if Animal.lit_etat(id_animal) != 'repus':\n return 'Désolé, ' + id_animal + ' n\\'est pas en état de faire du sport!.'\n else:\n Animal.change_lieu(id_animal, 'roue')\n Animal.change_etat(id_animal, 'fatigué')\n\n\ndef coucher(id_animal):\n if Equipement.verifie_disponibilite('nid') == 'occupé':\n occupant = Animal.cherche_occupant('nid')\n return 'Impossible, le nid est actuellement occupée par ' + str(occupant) + '.'\n if Animal.lit_etat(id_animal) != 'fatigué':\n return 'Désolé, ' + id_animal + ' n\\'est pas fatigué!.'\n else:\n Animal.change_lieu(id_animal, 'nid')\n Animal.change_etat(id_animal, 'endormi')\n\n\ndef reveiller(id_animal):\n if Animal.lit_etat(id_animal) != 'endormi':\n return 'Désolé, ' + id_animal + ' ne dort pas!.'\n else:\n Animal.change_lieu(id_animal, 'litière')\n Animal.change_etat(id_animal, 'affamé')\n\n","sub_path":"animalerie/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"42486652","text":"# %%\nfrom typing import List\n\n\nclass Solution:\n def rotate(self, nums: List[int], k: int) -> None:\n if len(nums) <= 1:\n return nums\n for i in range(0, k, 1):\n back = nums[-1]\n rest = nums[0:-1]\n nums[0] = back\n nums[1:] = rest\n return nums\n\n\nsoln = Solution()\nsoln.rotate([1, 2, 3, 4, 5, 6, 7], 3)\n","sub_path":"leetcode/array/rotateArray.py","file_name":"rotateArray.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"631473618","text":"import numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nimport histogram_module\nimport dist_module\n\n\ndef rgb2gray(rgb):\n r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n\n return gray\n\n\n# model_images - list of file names of model images\n# query_images - list of file names of query images\n#\n# dist_type - string which specifies distance type: 'chi2', 'l2', 'intersect'\n# hist_type - string which specifies histogram type: 'grayvalue', 'dxdy', 'rgb', 'rg'\n#\n# note: use functions 'get_dist_by_name', 'get_hist_by_name' and 'is_grayvalue_hist' to obtain\n# handles to distance and histogram functions, and to find out whether histogram function\n# expects grayvalue or color image\n\ndef find_best_match(model_images, query_images, dist_type, hist_type, num_bins):\n hist_isgray = histogram_module.is_grayvalue_hist(hist_type)\n\n model_hists = compute_histograms(model_images, hist_type, hist_isgray, num_bins)\n query_hists = compute_histograms(query_images, hist_type, hist_isgray, num_bins)\n\n D = np.zeros((len(model_images), len(query_images)))\n\n # compute distance for each couple of query - image\n for j, query in enumerate(query_hists):\n for i, model in enumerate(model_hists):\n D[i, j] = dist_module.get_dist_by_name(model, query, dist_type)\n\n best_match = [] # to save best matches\n\n # for each query , find best model\n for j in range(len(query_images)):\n query_matches = D[:, j] # get query columns from matrix\n argmin = np.argmin(query_matches) # get index with minimum distance\n best_match.append(argmin) # save index for query\n\n best_match = np.array(best_match) # array of best match for each query\n\n return best_match, D\n\n\ndef compute_histograms(image_list, hist_type, hist_isgray, num_bins):\n image_hist = []\n\n # Compute hisgoram for each image and add it at the bottom of image_hist\n\n # ... (your code here)\n for img in image_list:\n img_color = np.array(Image.open(img))\n\n # if hist is gray type we use gray image\n # othewise rgb image\n img_to_process = rgb2gray(img_color) if hist_isgray else img_color.astype('double')\n\n # We compute histogram for image\n hist = histogram_module.get_hist_by_name(img=img_to_process,\n num_bins_gray=num_bins,\n hist_name=hist_type\n )\n image_hist.append(hist)\n\n return image_hist\n\n\n# For each image file from 'query_images' find and visualize the 5 nearest images from 'model_image'.\n#\n# Note: use the previously implemented function 'find_best_match'\n# Note: use subplot command to show all the images in the same Python figure, one row per query image\n\ndef show_neighbors(model_images, query_images, dist_type, hist_type, num_bins):\n plt.figure()\n\n num_nearest = 5 # show the top-5 neighbors\n\n # ... (your code here)\n\n _, D = find_best_match(model_images=model_images,\n query_images=query_images,\n dist_type=dist_type,\n hist_type=hist_type,\n num_bins=num_bins\n )\n\n Q = len(query_images)\n pos = 0\n for j in range(Q):\n query_matches = D[:, j]\n best_args = np.argsort(query_matches)[:num_nearest]\n\n query_img = query_images[j]\n\n pos += 1\n plt.subplot(Q, 6, pos);\n plt.imshow(np.array(Image.open(query_img)), vmin=0, vmax=255);\n plt.title(f'Q{j}')\n for ind in range(len(best_args)):\n pos += 1\n model_ind = best_args[ind]\n model_img = model_images[model_ind]\n plt.subplot(Q, 6, pos);\n plt.imshow(np.array(Image.open(model_img)), vmin=0, vmax=255);\n plt.title(f'MO.{model_ind}')\n\n plt.show()\n\n\n\n","sub_path":"Assignment1/Identification/match_module.py","file_name":"match_module.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"310144943","text":"from app import db\nfrom app.models.task import Task\nfrom flask import Blueprint, jsonify,request, make_response, abort \nfrom datetime import date\nfrom app.models.goal import Goal\nimport os\nimport requests\nfrom dotenv import load_dotenv\nfrom slack_sdk import WebClient\nfrom slack_sdk.errors import SlackApiError\n\n\ndef valid_int(number,parameter_type):\n try:\n int(number)\n except:\n abort(make_response({\"error\":f\"{parameter_type} must be an int\"},400))\n\ndef slack_notification():\n load_dotenv()\n slack_token = os.environ[\"SLACK_TOKENS\"]\n client = WebClient(token=slack_token)\n try:\n response = client.chat_postMessage(\n channel =\"CNEEJDLAW\",\n text = \"Task completed\"\n )\n except SlackApiError as e:\n return jsonify({\"Error\": \"chanel not found\"})\n \ntasks_bp = Blueprint(\"tasks\",__name__,url_prefix=\"/tasks\")\ngoals_bp = Blueprint(\"goals\", __name__,url_prefix=\"/goals\")\n\n@tasks_bp.route(\"\",methods=[\"GET\"])\ndef handle_tasks():\n sort_query = request.args.get(\"sort\")\n if sort_query == \"asc\":\n tasks = Task.query.order_by(Task.title.asc())\n elif sort_query == \"desc\":\n tasks = Task.query.order_by(Task.title.desc())\n else:\n tasks = Task.query.all()\n tasks_response =[]\n for task in tasks:\n tasks_response.append(task.to_dict())\n return jsonify(tasks_response),200\n\n@tasks_bp.route(\"/\",methods=[\"GET\",\"put\",\"DELETE\"])\ndef get_task(task_id):\n valid_int(task_id, \"task_id\")\n task = Task.query.get_or_404(task_id)\n if request.method == \"GET\":\n return jsonify({\"task\":task.to_dict()}),200\n elif request.method == \"PUT\":\n request_body = request.get_json()\n if \"title\" in request_body:\n task.title = request_body[\"title\"]\n if \"description\" in request_body:\n task.description = request_body[\"description\"]\n if \"completed_at\" in request_body:\n task.completed_at = request_body[\"completed_at\"]\n db.session.commit()\n return jsonify({\"task\":task.to_dict()}),200\n elif request.method == \"DELETE\":\n db.session.delete(task)\n db.session.commit()\n return jsonify({\"details\":f'Task {task_id} \"{task.title}\" successfully deleted'}),200\n\n@tasks_bp.route(\"\",methods=[\"POST\"])\ndef create_task():\n request_body = request.get_json()\n if 'title' not in request_body or 'description' not in request_body or\\\n 'completed_at' not in request_body:\n return jsonify({'details': \"Invalid data\"}),400\n \n new_task = Task(\n title=request_body[\"title\"],\n description = request_body[\"description\"],\n completed_at = request_body[\"completed_at\"]\n )\n db.session.add(new_task)\n db.session.commit()\n return jsonify({\"task\":new_task.to_dict()}),201\n\n@tasks_bp.route(\"//mark_complete\",methods=[\"PATCH\"])\ndef mark_complete_task(task_id):\n valid_int(task_id, \"task_id\")\n task = Task.query.get_or_404(task_id)\n task.completed_at = date.today()\n db.session.commit()\n slack_notification()\n return jsonify({\"task\":task.to_dict()}),200\n\n@tasks_bp.route(\"//mark_incomplete\",methods=[\"PATCH\"])\ndef mark_incomplete_task(task_id):\n valid_int(task_id, \"task_id\")\n task = Task.query.get_or_404(task_id)\n task.completed_at = None \n db.session.commit()\n return jsonify({\"task\":task.to_dict()}),200 \n\n@goals_bp.route(\"\", methods=[\"POST\"])\ndef create_goal():\n request_body = request.get_json()\n if \"title\" not in request_body:\n return jsonify({'details': \"Invalid data\"}),400\n new_goal = Goal(title = request_body[\"title\"])\n db.session.add(new_goal)\n db.session.commit()\n return jsonify({\"goal\":new_goal.to_dict()}),201\n \n@goals_bp.route(\"\", methods=[\"GET\"])\ndef handle_goals():\n goals = Goal.query.all()\n goals_response = []\n for goal in goals:\n goals_response.append(goal.to_dict())\n return jsonify(goals_response), 200\n \n@goals_bp.route(\"/\", methods=[\"GET\", \"PUT\",\"DELETE\"])\ndef get_goal(goal_id):\n valid_int(goal_id,\"goal_id\")\n goal = Goal.query.get_or_404(goal_id)\n if request.method == \"GET\":\n return jsonify({\"goal\":goal.to_dict()}),200\n elif request.method == \"DELETE\":\n db.session.delete(goal)\n db.session.commit()\n return jsonify({\"details\":f\"Goal {goal_id} \\\"{goal.title}\\\" successfully deleted\"})\n elif request.method == \"PUT\":\n request_body = request.get_json()\n goal.title = request_body[\"title\"]\n db.session.commit()\n return jsonify({\"goal\":goal.to_dict()}),200\n \n@goals_bp.route(\"//tasks\", methods=[\"POST\"])\ndef post_task_ids_to_goal(goal_id):\n valid_int(goal_id,\"goal_id\")\n request_body = request.get_json()\n goal = Goal.query.get(goal_id)\n task_ids = request_body[\"task_ids\"]\n for task_id in task_ids:\n task = Task.query.get(task_id)\n goal.tasks.append(task)\n db.session.commit()\n return jsonify({\"id\":goal.id, \"task_ids\": [task.id for task in goal.tasks]}),200 \n\n@goals_bp.route(\"//tasks\", methods=[\"GET\"])\ndef get_tasks_for_goal(goal_id):\n valid_int(goal_id,\"goal_id\")\n goal = Goal.query.get_or_404(goal_id)\n response_body = {\"id\":goal.id,\n \"title\":goal.title,\n \"tasks\":goal.task_lists() \n }\n print(response_body)\n return jsonify(response_body),200","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"85426393","text":"#!/usr/bin/env python\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nimport eventlet\n\neventlet.monkey_patch(\n os=True,\n select=True,\n socket=True,\n thread=False if '--use-debugger' in sys.argv else True,\n time=True)\n\nimport os\n\n# If ../lbaas/__init__.py exists, add ../ to Python search path, so that\n# it will override what happens to be installed in /usr/(local/)lib/python...\nPOSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),\n os.pardir,\n os.pardir))\nif os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'lbaas', '__init__.py')):\n sys.path.insert(0, POSSIBLE_TOPDIR)\n\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\nfrom wsgiref import simple_server\n\nfrom lbaas.api import app\nfrom lbaas import config\nfrom lbaas.drivers import driver\n\n\nCONF = cfg.CONF\n\nLOG = logging.getLogger(__name__)\n\n\ndef launch_api():\n host = cfg.CONF.api.host\n port = cfg.CONF.api.port\n\n server = simple_server.make_server(\n host,\n port,\n app.setup_app()\n )\n\n LOG.info(\"LBaaS API is serving on http://%s:%s (PID=%s)\" %\n (host, port, os.getpid()))\n\n server.serve_forever()\n\n\ndef get_properly_ordered_parameters():\n \"\"\"Orders launch parameters in the right order.\n\n In oslo it's important the order of the launch parameters.\n if --config-file came after the command line parameters the command\n line parameters are ignored.\n So to make user command line parameters are never ignored this method\n moves --config-file to be always first.\n \"\"\"\n args = sys.argv[1:]\n\n for arg in sys.argv[1:]:\n if arg == '--config-file' or arg.startswith('--config-file='):\n conf_file_value = args[args.index(arg) + 1]\n args.remove(conf_file_value)\n args.remove(arg)\n args.insert(0, arg)\n args.insert(1, conf_file_value)\n\n return args\n\n\ndef main():\n try:\n config.parse_args(get_properly_ordered_parameters())\n\n logging.setup(CONF, 'Lbaas')\n\n driver.load_lb_drivers()\n\n launch_api()\n\n except RuntimeError as excp:\n sys.stderr.write(\"ERROR: %s\\n\" % excp)\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"murano-apps/LBaaS-interface/package/Resources/scripts/lbaas_api-0.1/lbaas/cmd/launch.py","file_name":"launch.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"386150585","text":"import requests\nimport datetime\nfrom rental import RentalOps\n\nclass VideoOps:\n def __init__(self, url = \"https://retro-video-store-api.herokuapp.com\", selected_video=None):\n self.url = url\n self.selected_video = selected_video\n\n def add_video(self, title, release_date, total_inventory):\n query_params = {\n \"title\": title,\n \"release_date\": release_date,\n \"total_inventory\": total_inventory\n }\n response = requests.post(self.url+\"/videos\", json=query_params)\n return response.json()\n\n def update_video(self, title, release_date=datetime.datetime.now(),total_inventory=0):\n query_params = {\n \"title\": title,\n \"release_date\": release_date,\n \"total_inventory\": total_inventory\n }\n\n response = requests.put(self.url+f\"/videos/+{self.selected_video['id']}\", json=query_params)\n return response.json()\n\n def delete_video(self, video_id):\n response = requests.delete(self.url+f\"/videos/{video_id}\")\n return response.json()\n\n def list_all_videos(self):\n response = requests.get(self.url+\"/videos\")\n return response.json()\n\n def get_video(self, id):\n response = requests.get(self.url+\"/videos/\"+str(id))\n response=response.json()\n self.selected_video=response\n return response\n \n def print_selected(self):\n print(self.selected_video)\n return\n","sub_path":"video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"606293991","text":"MOD = 2006\ndef work(n):\n if 1 == n:\n s = [int(input())]\n else:\n s = input().split(' ')\n for i in range(n):\n s[i] = int(s[i])\n s.sort()\n return (s[-1] * pow(2, n - 1, MOD)) % MOD\n\n\n# -- author: lijw --\nif __name__ == '__main__':\n while True:\n n = int(input())\n if 0 == n:\n break\n print(work(n))\n","sub_path":"10. Alternate Sum/010.py","file_name":"010.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"479871746","text":"# ASPSF carte de frequence des HSB\n\nimport os\nimport glob\nfrom nipype.interfaces.fsl import MultiImageMaths, ImageMaths\npath = '/Volumes/irm-data/ASPSF/ASPFS_WMH_norm'\nlist_WMH = glob.glob(os.path.join(path,'*.nii.gz')) #liste de tous les masques HSB de ASPSF\nlist_WMH.pop(0) #on enleve la premiere IRM parce quon va utiliser la fonction add il sagit de la 004\nnb = str(len(list_WMH) + 1) #on a la taille de la liste complete \nmaths = MultiImageMaths()\nmaths.inputs.in_file = '/Volumes/irm-data/ASPSF/ASPFS_WMH_norm/ASPSfamily_004-WMH_norm.nii.gz'\nmaths.inputs.out_file = '/Users/Shared/maximilien/ASPSF_M2/ASPSF_CF/ASPSF_294_families_CF.nii.gz'\nmaths.inputs.operand_files = list_WMH\noutput = ''\nfor num in list_WMH:\n\toutput = output + \"-add %s \"\n\nmaths.inputs.op_string = output\n#maths.run()\n\nmaths2 = ImageMaths()\nmaths2.inputs.in_file = '/Users/Shared/maximilien/ASPSF_M2/ASPSF_CF/ASPSF_294_families_CF.nii.gz'\nmaths2.inputs.op_string = \"-mul 100 -div \" + nb\nmaths2.inputs.out_file = '/Users/Shared/maximilien/ASPSF_M2/ASPSF_CF/ASPSF_294_families_CF_norm.nii.gz'\nmaths2.run()\n\n\n","sub_path":"1-CF_ASPSF.py","file_name":"1-CF_ASPSF.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"253867693","text":"from abc import ABCMeta,abstractmethod\nimport numpy as np\nfrom sklearn.covariance import graph_lasso,shrunk_covariance\nfrom sklearn.cluster import spectral_clustering,SpectralClustering,k_means\nfrom scipy.linalg.misc import norm\nimport copy\nimport time\n\n\"\"\"\nNMF plus GL algorithm\n:emp_env:S\n:H prior:cluster indicator prior\n:theta prior:prec estimator prior\n:H threshold:prec threshold\n:theta threshold:prec threshold\n:param iterations:max interations\n:return:local optimal of parameters\n\"\"\"\nclass expModel(object):\n __meta__ = ABCMeta\n \n def __init__(self,name,desc=\"\"):\n self.name = name\n self.desc = desc\n \n @abstractmethod\n def predict(self,data):\n return\n \n def clean(self):\n self._clf = None\n return\n\n##################################################################################################\nclass NMFplusGL(expModel):\n name = \"NMFGL\"\n desc = \"NMF plus GL\"\n category = \"coherent\"\n \n def __init__(self,n_cluster,_lambda,clus_threshold,iterations,assign_labels,random_state=1991):\n super(NMFplusGL,self).__init__('NMFGL','NMF plus GL')\n self.n_cluster = n_cluster\n self._lambda = _lambda\n self.clus_threshold = clus_threshold\n self.iterations = iterations\n self.assign_labels = assign_labels\n self.model_seed = random_state\n \n def predict(self,data):\n start_time = time.time()\n S = abs(np.dot(data.T,data)/data.shape[0]) \n H_threshold = self.clus_threshold\n iterations = self.iterations\n _,est_groups,_ = k_means(data.transpose(),self.n_cluster,init=\"k-means++\",random_state=self.model_seed)\n #est_groups = sc.fit_predict(data.transpose())#get H prior\n H_prior = np.mat(np.zeros((data.shape[1],self.n_cluster)))\n for ind,val in enumerate(est_groups):#transfer H prior into cluster indicator\n H_prior[ind,val] = 1\n H_prior = H_prior + .2\n #A = 1./np.sqrt(sum(H_prior.T*H_prior))\n #B = A.tolist()\n #H_prior = H_prior*np.diag(B[0])\n \n emp_cov = H_prior.T * S * H_prior#get HSH\n #emp_cov = np.array(emp_cov)\n shrunk_cov = shrunk_covariance(emp_cov,shrinkage=0.8)\n _,theta_prior = graph_lasso(shrunk_cov,self._lambda)#get theta prior!!!!\n \n #start point of iteration\n iteration = 0\n #iterations =50\n iteration_H = 0\n while iteration < iterations:\n while iteration_H < 100:\n H_new = iter_H(H_prior,S,theta_prior)\n H_change = np.sum(np.abs(H_prior-H_new))/float(len(H_new))\n \n if H_change < H_threshold:\n break\n else:\n H_prior = H_new\n iteration_H += 1\n \n emp_cov = H_new.T * S * H_new\n shrunk_cov = shrunk_covariance(emp_cov,shrinkage=0.8)\n _,theta_new = graph_lasso(shrunk_cov,self._lambda)\n theta_change = norm(theta_new - theta_prior)\n \n if theta_change < 0.01:\n break\n else:\n theta_prior = theta_new\n H_prior = H_new\n iteration += 1\n iteration_H = 0\n \n hh = np.argmax(H_new,1)\n labels = np.array(hh).T[0]\n self.timeTotal = time.time() - start_time\n theta_global = H_new * theta_new * H_new.T\n \n return [labels,theta_new,H_new,theta_global,iteration]\n\n\n########################################################################################\ndef iter_H(H_prior,S,theta_prior):#function of iteration H\n H = copy.deepcopy(H_prior)\n theta = copy.deepcopy(theta_prior)\n gamma = - H.T*S*H*theta\n gamma_plus = (abs(gamma) + gamma)/2.\n gamma_minus = (abs(gamma) - gamma)/2.\n theta_plus = (abs(theta) + theta)/2.\n theta_minus = (abs(theta) - theta)/2.\n grad_H = S*H*theta_plus+H*gamma_plus-S*H*theta_minus-H*gamma_minus\n \n nx,ny = H.shape\n #H_bar = np.zeros((nx,ny))\n alpha_deno = np.zeros((nx,ny))\n \n #modify H_bar\n #for i in xrange(ny):\n # for j in xrange(ny):\n # if grad_H[i,j]>=0:\n # H_bar[i,j]=H[i,j] \n # else:\n # H_bar[i,j]=max(H[i,j],0.1)\n \n #H_bar = np.mat(H_bar)\n #gamma_bar = - H_bar.T*S*H_bar*theta\n #gamma_bar_minus = (abs(gamma_bar) - gamma_bar)/2.\n alpha_deno = S*H*theta_minus+H*gamma_minus+np.exp(-5)\n \n #update H matrx\n for i in xrange(nx):\n for j in xrange(ny):\n H[i,j] = H[i,j] + H[i,j]/float(alpha_deno[i,j])*grad_H[i,j]\n \n #for i in xrange(nx):\n # for j in xrange(ny):\n # H[i,j] = H[i,j]*a[-1]/b[-1]\n #H = (abs(H)>0).astype(int)\n return H\n\n#def transfer_emp(X):\n# nx,ny = X.shape\n# hat = np.max(X)\n# for i in xrange(nx):\n# for j in xrange(ny):\n# X[i,j] = X[i,j]/hat\n# X = np.array(X)\n# return X\n","sub_path":"NMFGL_add_adjust.py","file_name":"NMFGL_add_adjust.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"606387118","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport json\nimport requests\nimport time\n\ndriver = webdriver.Firefox()\ndriver.get(\"http://beta.freerice.com\")\n\ninput(\"Are you ready? Press enter when ready\")\n\ntimes = 0\nwhile times < 500:\n try:\n time.sleep(4)\n title = driver.find_element_by_class_name(\"card-title\")\n options = driver.find_elements_by_class_name(\"card-button\")\n\n # I have free version anyways of oxfords api\n # but you don't get any of my keys sorry :(\n app_id = ''\n app_key = ''\n\n language = 'en'\n word_id = \"\"\n\n new_title = title.text\n for i in range(len(new_title)):\n if new_title[i] != \" \":\n word_id += new_title[i]\n else:\n break\n\n\n # print(word_id + '\\n')\n\n\n # will receive our data from oxiford dictionaries api to get synonyms from it\n url = 'https://od-api.oxforddictionaries.com:443/api/v1/entries/' + language + '/' + word_id.lower() + '/synonyms;antonyms'\n\n r = requests.get(url, headers = {'app_id': app_id, 'app_key': app_key})\n if r.status_code == 200:\n data = r.json()\n\n\n # Gets every synonym to the word that the website wants to know what its word is similar to (hence synonym)\n synonyms = []\n for i in data['results'][0]['lexicalEntries']:\n entries = i['entries']\n for j in entries:\n senses = j['senses']\n for s in senses:\n almost = s['synonyms']\n for ii in almost:\n last = ii['text']\n # print(last)\n synonyms.append(last)\n\n # print('\\n')\n\n # Sees if there is a button on the page that has a word that is the same as the synonym\n found = False\n for text in synonyms:\n for option in options:\n if text == option.text:\n print(\"found\")\n found = True\n option.click()\n break\n if found == True:\n break\n\n if found != True:\n # Since sometimes it doesn't line up with anything in the dictionary, we just click on the first element and hope for the best\n options[0].click()\n\n # print('\\n')\n print(times)\n times += 1\n else:\n print(\"Word not found\")\n options[0].click()\n continue\n\n except:\n time.sleep(1)\n","sub_path":"freerice.py","file_name":"freerice.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"101959838","text":"import db\nimport json\nfrom bottle import response, request\n\n\nclass MCOption:\n\n def __init__(self, id, is_true, option_text, qid):\n '''Constructor'''\n self.id = id\n self.is_true = is_true\n self.option_text = option_text\n self.qid = qid\n\n def update(self):\n '''Writes back instance values into database'''\n with db.connect() as conn:\n cursor = conn.cursor()\n cursor.execute(\"UPDATE MCOption SET is_true = ?, option_text = ?, qid = ? WHERE id = ?\",\n (1 if self.is_true else 0, self.option_text, self.qid, self.id))\n conn.commit()\n\n def updateFromJSON(self, mc_data):\n '''Unpack JSON representation to update instance variables and then\n calls update to write back into database'''\n\n self.is_true = mc_data['is_true']\n self.option_text = mc_data['option_text']\n self.qid = mc_data['qid']\n self.update()\n\n def delete(self):\n '''Deletes instance from database, any object representations of the\n instance are now invalid and shouldn't be used including this one'''\n with db.connect() as conn:\n cursor = conn.cursor()\n cursor.execute(\"DELETE FROM MCOption WHERE id = ?\", (self.id, ))\n\n def jsonable(self):\n '''Returns a dict appropriate for creating JSON representation\n of the instance'''\n\n return {'id': self.id, 'is_true': self.is_true, 'option_text': self.option_text, 'qid': self.qid}\n\n @staticmethod\n def createFromJSON(mc_data):\n '''Creates new instance object using dict created from JSON representation\n using create'''\n\n # Unpack the instance data from JSON\n # Should validate information here and throw exception\n # if something is not right.\n is_true = mc_data['is_true']\n option_text = mc_data['option_text']\n qid = mc_data['qid']\n\n with db.connect() as conn:\n cursor = conn.cursor()\n cursor.execute(\"INSERT INTO MCOption (is_true, option_text, qid) VALUES (?, ?, ?)\",\n (1 if is_true else 0, option_text, qid))\n conn.commit()\n return MCOption.find(cursor.lastrowid)\n\n @staticmethod\n def find(id):\n '''If row with specified id exists, creates and returns corresponding ORM\n instance. Otherwise Exception raised.'''\n\n with db.connect() as conn:\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM MCOption WHERE id = ?\", (id,))\n row = cursor.fetchone()\n\n if row is None:\n raise Exception(f'No such MCOption with id: {id}')\n else:\n return MCOption(row['id'], bool(row['is_true']), row['option_text'], row['qid'])\n\n @staticmethod\n def getAllIDs():\n with db.connect() as conn:\n cursor = conn.cursor()\n cursor.execute(\"SELECT id FROM MCOption\")\n all_ids = [row['id'] for row in cursor]\n return all_ids\n\n @staticmethod\n def setupBottleRoutes(app):\n @app.get('/mc_option')\n def getMCOptionIndex():\n mc_option_index = MCOption.getAllIDs()\n response.content_type = 'application/json'\n return json.dumps(mc_option_index)\n\n @app.get('/mc_option/')\n def getMCOption(id):\n try:\n mc_option = MCOption.find(id)\n except Exception:\n response.status = 404\n return f\"Multiple choice option {id} not found\"\n return mc_option.jsonable()\n\n @app.post('/mc_option')\n def postMCOption():\n\n mc_option = MCOption.createFromJSON(request.json)\n return mc_option.jsonable()\n\n @app.put('/mc_option/')\n def updateMCOption(id):\n '''Implements instance updating'''\n\n try:\n mc_option = MCOption.find(id)\n except Exception:\n response.status = 404\n return f\"Multiple choice option {id} to update not found\"\n\n mc_option.updateFromJSON(request.json)\n return mc_option.jsonable()\n\n @app.delete('/mc_option/')\n def deleteMCOption(id):\n '''Implements instance deletion'''\n\n try:\n mc_option = MCOption.find(id)\n except Exception:\n response.status = 404\n return f\"Multiple choice option {id} to delete does not exist\"\n\n mc_option.delete()\n\n response.content_type = 'application/json'\n return json.dumps(True)\n","sub_path":"mcOption.py","file_name":"mcOption.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"308858847","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport time\nfrom typing import Any\n\nfrom tqdm import tqdm\n\nimport cereal.messaging as messaging\nfrom cereal.visionipc.visionipc_pyx import VisionIpcServer, VisionStreamType # pylint: disable=no-name-in-module, import-error\nfrom common.spinner import Spinner\nfrom common.timeout import Timeout\nfrom common.transformations.camera import get_view_frame_from_road_frame, eon_f_frame_size, tici_f_frame_size, \\\n eon_d_frame_size, tici_d_frame_size\nfrom selfdrive.hardware import PC, TICI\nfrom selfdrive.manager.process_config import managed_processes\nfrom selfdrive.test.openpilotci import BASE_URL, get_url\nfrom selfdrive.test.process_replay.compare_logs import compare_logs, save_log\nfrom selfdrive.test.process_replay.test_processes import format_diff\nfrom selfdrive.version import get_git_commit\nfrom tools.lib.framereader import FrameReader\nfrom tools.lib.logreader import LogReader\n\nif TICI:\n TEST_ROUTE = \"4cf7a6ad03080c90|2021-09-29--13-46-36\"\nelse:\n TEST_ROUTE = \"303055c0002aefd1|2021-11-22--18-36-32\"\n\nCACHE_DIR = os.getenv(\"CACHE_DIR\", None)\n\npacket_from_camera = {\"roadCameraState\":\"modelV2\", \"driverCameraState\":\"driverState\"}\n\ndef get_log_fn(ref_commit):\n return \"%s_%s_%s.bz2\" % (TEST_ROUTE, \"model_tici\" if TICI else \"model\", ref_commit)\n\ndef replace_calib(msg, calib):\n msg = msg.as_builder()\n if calib is not None:\n msg.liveCalibration.extrinsicMatrix = get_view_frame_from_road_frame(*calib, 1.22).flatten().tolist()\n return msg\n\ndef process_frame(msg, pm, sm, log_msgs, vipc_server, spinner, frs, frame_idxs, last_desire):\n if msg.which() == \"roadCameraState\" and last_desire is not None:\n dat = messaging.new_message('lateralPlan')\n dat.lateralPlan.desire = last_desire\n pm.send('lateralPlan', dat)\n\n f = msg.as_builder()\n pm.send(msg.which(), f)\n\n img = frs[msg.which()].get(frame_idxs[msg.which()], pix_fmt=\"yuv420p\")[0]\n if msg.which == \"roadCameraState\":\n vipc_server.send(VisionStreamType.VISION_STREAM_YUV_BACK, img.flatten().tobytes(), f.roadCameraState.frameId,\n f.roadCameraState.timestampSof, f.roadCameraState.timestampEof)\n else:\n vipc_server.send(VisionStreamType.VISION_STREAM_YUV_FRONT, img.flatten().tobytes(), f.driverCameraState.frameId,\n f.driverCameraState.timestampSof, f.driverCameraState.timestampEof)\n with Timeout(seconds=15):\n log_msgs.append(messaging.recv_one(sm.sock[packet_from_camera[msg.which()]]))\n\n frame_idxs[msg.which()] += 1\n if frame_idxs[msg.which()] >= frs[msg.which()].frame_count:\n return None\n update_spinner(spinner, frame_idxs['roadCameraState'], frs['roadCameraState'].frame_count,\n frame_idxs['driverCameraState'], frs['driverCameraState'].frame_count)\n return 0\n\ndef update_spinner(s, fidx, fcnt, didx, dcnt):\n s.update(\"replaying models: road %d/%d, driver %d/%d\" % (fidx, fcnt, didx, dcnt))\n\ndef model_replay(lr_list, frs):\n spinner = Spinner()\n spinner.update(\"starting model replay\")\n\n vipc_server = VisionIpcServer(\"camerad\")\n vipc_server.create_buffers(VisionStreamType.VISION_STREAM_YUV_BACK, 40, False, *(tici_f_frame_size if TICI else eon_f_frame_size))\n vipc_server.create_buffers(VisionStreamType.VISION_STREAM_YUV_FRONT, 40, False, *(tici_d_frame_size if TICI else eon_d_frame_size))\n vipc_server.start_listener()\n\n pm = messaging.PubMaster(['roadCameraState', 'driverCameraState', 'liveCalibration', 'lateralPlan'])\n sm = messaging.SubMaster(['modelV2', 'driverState'])\n\n try:\n managed_processes['modeld'].start()\n managed_processes['dmonitoringmodeld'].start()\n time.sleep(5)\n sm.update(1000)\n\n last_desire = None\n log_msgs = []\n frame_idxs = dict.fromkeys(['roadCameraState','driverCameraState'], 0)\n\n cal = [msg for msg in lr if msg.which() == \"liveCalibration\"]\n for msg in cal[:5]:\n pm.send(msg.which(), replace_calib(msg, None))\n\n for msg in tqdm(lr_list):\n if msg.which() == \"liveCalibration\":\n last_calib = list(msg.liveCalibration.rpyCalib)\n pm.send(msg.which(), replace_calib(msg, last_calib))\n elif msg.which() == \"lateralPlan\":\n last_desire = msg.lateralPlan.desire\n elif msg.which() in [\"roadCameraState\", \"driverCameraState\"]:\n ret = process_frame(msg, pm, sm, log_msgs, vipc_server, spinner, frs, frame_idxs, last_desire)\n if ret is None:\n break\n\n except KeyboardInterrupt:\n pass\n finally:\n spinner.close()\n managed_processes['modeld'].stop()\n managed_processes['dmonitoringmodeld'].stop()\n\n return log_msgs\n\nif __name__ == \"__main__\":\n\n update = \"--update\" in sys.argv\n\n if TICI:\n os.system('sudo mount -o remount,size=200M /tmp') # c3 hevcs are 75M each\n\n replay_dir = os.path.dirname(os.path.abspath(__file__))\n ref_commit_fn = os.path.join(replay_dir, \"model_replay_ref_commit\")\n\n segnum = 0\n frs = {}\n if CACHE_DIR:\n lr = LogReader(os.path.join(CACHE_DIR, '%s--%d--rlog.bz2' % (TEST_ROUTE.replace('|', '_'), segnum)))\n frs['roadCameraState'] = FrameReader(os.path.join(CACHE_DIR, '%s--%d--fcamera.hevc' % (TEST_ROUTE.replace('|', '_'), segnum)))\n frs['driverCameraState'] = FrameReader(os.path.join(CACHE_DIR, '%s--%d--dcamera.hevc' % (TEST_ROUTE.replace('|', '_'), segnum)))\n else:\n lr = LogReader(get_url(TEST_ROUTE, segnum))\n frs['roadCameraState'] = FrameReader(get_url(TEST_ROUTE, segnum, log_type=\"fcamera\"))\n frs['driverCameraState'] = FrameReader(get_url(TEST_ROUTE, segnum, log_type=\"dcamera\"))\n\n lr_list = list(lr)\n log_msgs = model_replay(lr_list, frs)\n\n failed = False\n if not update:\n ref_commit = open(ref_commit_fn).read().strip()\n log_fn = get_log_fn(ref_commit)\n cmp_log = LogReader(BASE_URL + log_fn)\n\n ignore = ['logMonoTime', 'valid',\n 'modelV2.frameDropPerc',\n 'modelV2.modelExecutionTime',\n 'driverState.modelExecutionTime',\n 'driverState.dspExecutionTime']\n tolerance = None if not PC else 1e-3\n results: Any = {TEST_ROUTE: {}}\n results[TEST_ROUTE][\"models\"] = compare_logs(cmp_log, log_msgs, tolerance=tolerance, ignore_fields=ignore)\n diff1, diff2, failed = format_diff(results, ref_commit)\n\n print(diff2)\n print('-------------')\n print('-------------')\n print('-------------')\n print('-------------')\n print('-------------')\n print(diff1)\n with open(\"model_diff.txt\", \"w\") as f:\n f.write(diff2)\n\n if update or failed:\n from selfdrive.test.openpilotci import upload_file\n\n print(\"Uploading new refs\")\n\n new_commit = get_git_commit()\n log_fn = get_log_fn(new_commit)\n save_log(log_fn, log_msgs)\n try:\n upload_file(log_fn, os.path.basename(log_fn))\n except Exception as e:\n print(\"failed to upload\", e)\n\n with open(ref_commit_fn, 'w') as f:\n f.write(str(new_commit))\n\n print(\"\\n\\nNew ref commit: \", new_commit)\n\n sys.exit(int(failed))\n","sub_path":"selfdrive/test/process_replay/model_replay.py","file_name":"model_replay.py","file_ext":"py","file_size_in_byte":6998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"483251050","text":"# *_*coding:utf-8 *_*\nimport os\nimport random\n\nfrom PIL import Image\nfrom scipy.io import loadmat\nfrom torch.utils.data import Dataset\n\n\nclass CNNData(Dataset):\n def __init__(self, data, transform=None):\n self.transform = transform\n self.data = data\n self.x = [x.reshape(20, 20).T for x in self.data['X']]\n self.y = self.data['y']\n\n def __len__(self):\n return len(self.data['X'])\n\n def __getitem__(self, idx):\n img = Image.fromarray(self.x[idx])\n if self.transform is not None:\n img = self.transform(img)\n return img, int(self.y[idx]) - 1\n\n\nclass CNNDataLIST(Dataset):\n def __init__(self, path, transform=None, infer_mode=False):\n self.infer_mode = infer_mode\n self.transform = transform\n with open(path, 'r') as f:\n self.data = [name for name in f]\n self.x = [x.split(' ')[0] for x in self.data]\n self.y = [int(x.split(' ')[1]) for x in self.data]\n\n def __len__(self):\n return len(self.x)\n\n def __getitem__(self, idx):\n img = Image.open(self.x[idx])\n if self.transform is not None:\n img = self.transform(img)\n if self.infer_mode:\n return img, os.path.split(self.x[idx])[-1][:-4]\n else:\n return img, self.y[idx]\n\n\ndef get_data(is_infer=False):\n data = loadmat('data//handwritten_digits.mat')\n new_data = list(zip(data['X'], data['y']))\n random.shuffle(new_data)\n mark = int(0.9 * len(new_data))\n if is_infer:\n mark = 20\n train_data = {\"X\": [x[0] for x in new_data[:mark]], \"y\": [x[1] for x in new_data[:mark]]}\n test_data = {\"X\": [x[0] for x in new_data[mark:]], \"y\": [x[1] for x in new_data[mark:]]}\n return train_data, test_data\n\n\ndef generate_list_img(dir='data', img_doc='img', ratio=0.9, train_path='train.lst', test_path='test.lst'):\n import os\n img_dir = os.path.join(dir, img_doc)\n for root, dir_, files in os.walk(img_dir):\n img_name = [os.path.join(img_dir, file_) for file_ in files]\n random.shuffle(img_name)\n mark = int(ratio * len(img_name))\n train_name = img_name[:mark]\n test_name = img_name[mark:]\n # print(type(train_path))\n with open(os.path.join(dir, train_path), 'a') as f:\n [f.write('{} {}\\n'.format(img, img.split('_')[1][0])) for img in train_name]\n with open(os.path.join(dir, test_path), 'a') as f:\n [f.write('{} {}\\n'.format(img, img.split('_')[1][0])) for img in test_name]\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"568779578","text":"#!/usr/bin/env python\n\"\"\"Command line tool to convert from Plan 9 image format to PNG format.\n\nPlan 9 image format description:\nhttp://plan9.bell-labs.com/magic/man2html/6/image\n\"\"\"\nimport itertools\nimport re\nimport sys\ntry:\n exec(\"from . import png\", globals(), locals())\n exec(\"from .png import array\", globals(), locals())\nexcept (SyntaxError, ValueError):\n # On Python < 2.5 relative import cause syntax error\n # Also works when running outside of package\n import png\n from png import array\ntry:\n bytearray\n bytes\nexcept NameError:\n # bytearray missed on Python < 2.6 where relative import supported\n from png import bytearray\n bytes = str\n\n\ndef block(s, n):\n # See http://www.python.org/doc/2.6.2/library/functions.html#zip\n return zip(*[iter(s)] * n)\n\n\ndef convert(f, output=None):\n \"\"\"\n Convert Plan 9 file to PNG format.\n\n Works with either uncompressed or compressed files.\n \"\"\"\n if output is None:\n output = sys.stdout\n r = f.read(11)\n if r == png.strtobytes('compressed\\n'):\n aspng(output, *decompress(f))\n else:\n aspng(output, *glue(f, r))\n\n\ndef glue(f, r):\n \"\"\"\n Return (metadata, stream) pair\n\n `r` is the initial portion of\n the metadata that has already been read from the stream `f`.\n \"\"\"\n r = r + f.read(60 - len(r))\n return (r, f)\n\n\ndef meta(r):\n \"\"\"\n Convert 60 character string `r`, the metadata from an image file.\n\n Returns a 5-tuple (*chan*,*minx*,*miny*,*limx*,*limy*). 5-tuples may\n settle into lists in transit.\n\n As per http://plan9.bell-labs.com/magic/man2html/6/image the metadata\n comprises 5 words separated by blanks. As it happens each word starts\n at an index that is a multiple of 12, but this routine does not care\n about that.\n \"\"\"\n r = r.split()\n # :todo: raise FormatError\n assert len(r) == 5\n r = [r[0]] + list(map(int, r[1:]))\n return r\n\n\ndef bitdepthof(pixel):\n \"\"\"Return the bitdepth for a Plan9 pixel format string.\"\"\"\n maxd = 0\n for c in re.findall(r'[a-z]\\d*', pixel):\n if c[0] != 'x':\n maxd = max(maxd, int(c[1:]))\n return maxd\n\n\ndef maxvalof(pixel):\n \"\"\"Return the netpbm MAXVAL for a Plan9 pixel format string.\"\"\"\n bitdepth = bitdepthof(pixel)\n return (2**bitdepth) - 1\n\n\ndef pixmeta(metadata, f):\n \"\"\"\n Convert (uncompressed) Plan 9 image file to pair of (*metadata*, *pixels*).\n\n This is intended to be used by PurePNG format.\n *metadata* is the metadata returned in a dictionary,\n *pixels* is an iterator that yields each row in boxed\n row flat pixel format.\n `f`, the input file, should be cued up to the start of the image data.\n \"\"\"\n chan, minx, miny, limx, limy = metadata\n rows = limy - miny\n width = limx - minx\n chan = png.bytestostr(chan)\n nchans = len(re.findall('[a-wyz]', chan))\n alpha = 'a' in chan\n # Iverson's convention for the win!\n ncolour = nchans - alpha\n greyscale = ncolour == 1\n # PNG style metadata\n meta = dict(size=(width, rows), bitdepth=bitdepthof(chan),\n greyscale=greyscale, alpha=alpha, planes=nchans)\n\n return (map(lambda x: itertools.chain(*x),\n block(unpack(f, rows, width, chan), width)),\n meta)\n\n\ndef aspng(out, metadata, f):\n \"\"\"\n Convert to PNG format.\n\n `metadata` should be a Plan9 5-tuple;\n `f` the input file (see :meth:`pixmeta`).\n \"\"\"\n pixels, meta = pixmeta(metadata, f)\n p = png.Writer(**meta)\n p.write(out, pixels)\n\n\ndef unpack(f, rows, width, pixel):\n \"\"\"\n Unpack `f` into pixels.\n\n Assumes the pixel format is such that the depth\n is either a multiple or a divisor of 8.\n `f` is assumed to be an iterator that returns blocks of input such\n that each block contains a whole number of pixels. An iterator is\n returned that yields each pixel as an n-tuple. `pixel` describes the\n pixel format using the Plan9 syntax (\"k8\", \"r8g8b8\", and so on).\n \"\"\"\n def mask(w):\n \"\"\"An integer, to be used as a mask, with bottom `w` bits set to 1.\"\"\"\n return (1 << w) - 1\n\n def deblock(f, depth, width):\n \"\"\"\n A \"packer\" used to convert multiple bytes into single pixels.\n\n `depth` is the pixel depth in bits (>= 8),\n `width` is the row width in pixels.\n \"\"\"\n w = depth // 8\n for block in f:\n for i in range(0, len(block) // w):\n p = block[w * i:w * (i + 1)]\n # Convert p to little-endian integer, x\n x = 0\n s = 1 # scale\n for j in p:\n if isinstance(j, str):\n j = ord(j)\n x += s * j\n s <<= 8\n yield x\n\n def bitfunge(f, depth, width):\n \"\"\"\n A \"packer\" used to convert single bytes into multiple pixels.\n\n Depth is the pixel depth (< 8), width is the row width in pixels.\n \"\"\"\n for block in f:\n col = 0\n for i in block:\n x = ord(i)\n for _ in range(8 / depth):\n yield x >> (8 - depth)\n col += 1\n if col == width:\n # A row-end forces a new byte even if we haven't\n # consumed all of the current byte. Effectively rows\n # are bit-padded to make a whole number of bytes.\n col = 0\n break\n x <<= depth\n\n maxval = float(2**bitdepthof(pixel) - 1)\n # number of bits in each channel\n chan = list(map(int, re.findall(r'\\d+', pixel)))\n # type of each channel\n kind = re.findall('[a-z]', pixel)\n\n depth = sum(chan)\n\n # According to the value of depth pick a \"packer\" that either gathers\n # multiple bytes into a single pixel (for depth >= 8) or split bytes\n # into several pixels (for depth < 8)\n if depth >= 8:\n assert depth % 8 == 0\n packer = deblock\n else:\n assert 8 % depth == 0\n packer = bitfunge\n\n for x in packer(f, depth, width):\n # x is the pixel as an unsigned integer\n o = []\n # This is a bit yucky. Extract each channel from the _most_\n # significant part of x.\n for j in range(len(chan)):\n v = (x >> (depth - chan[j])) & mask(chan[j])\n x <<= chan[j]\n if kind[j] != 'x':\n # scale to maxval\n v = v * maxval / mask(chan[j])\n v = int(v + 0.5)\n o.append(v)\n yield o\n\n\ndef decompress(f):\n \"\"\"\n Decompress a Plan 9 image file.\n\n Assumes f is already cued past the initial 'compressed\\n' string.\n \"\"\"\n r = meta(f.read(60))\n return r, decomprest(f, r[4])\n\n\ndef decomprest(f, rows):\n \"\"\"Iterator that decompresses the rest of a file once the metadata\n have been consumed.\"\"\"\n row = 0\n while row < rows:\n row, o = deblock(f)\n yield o\n\n\ndef deblock(f):\n \"\"\"\n Decompress a single block from a compressed Plan 9 image file.\n\n Each block starts with 2 decimal strings of 12 bytes each. Yields a\n sequence of (row, data) pairs where row is the total number of rows\n processed according to the file format and data is the decompressed\n data for a set of rows.\n \"\"\"\n row = int(f.read(12))\n size = int(f.read(12))\n if not (0 <= size <= 6000):\n raise 'block has invalid size; not a Plan 9 image file?'\n # Since each block is at most 6000 bytes we may as well read it all in\n # one go.\n d = f.read(size)\n i = 0\n o = []\n\n while i < size:\n x = ord(d[i:i + 1]) # hack to avoid interpreting bytes item as int\n i += 1\n if x & 0x80:\n x = (x & 0x7f) + 1\n lit = d[i:i + x]\n i += x\n o.extend(lit)\n continue\n # x's high-order bit is 0\n l = (x >> 2) + 3\n # Offset is made from bottom 2 bits of x and all 8 bits of next\n # byte. http://plan9.bell-labs.com/magic/man2html/6/image doesn't\n # say whether x's 2 bits are most significant or least significant.\n # But it is clear from inspecting a random file,\n # http://plan9.bell-labs.com/sources/plan9/sys/games/lib/sokoban/images/cargo.bit\n # that x's 2 bit are most significant.\n offset = (x & 3) << 8\n offset |= ord(d[i:i + 1])\n i += 1\n # Note: complement operator neatly maps (0 to 1023) to (-1 to\n # -1024). Adding len(o) gives a (non-negative) offset into o from\n # which to start indexing.\n offset = ~offset + len(o)\n if offset < 0:\n raise png.Error('byte offset indexes off the begininning'\n 'of the output buffer; not a Plan 9 image file?')\n for j in range(l):\n o.append(o[offset + j])\n try:\n res = ''.join(o)\n except:\n res = bytearray(o)\n return row, res\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n if sys.platform == \"win32\":\n import msvcrt, os\n try:\n msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)\n except:\n pass\n if len(argv) <= 1:\n convert(sys.stdin)\n else:\n infile = open(argv[1], 'rb')\n convert(infile)\n infile.close()\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"png/plan9topng.py","file_name":"plan9topng.py","file_ext":"py","file_size_in_byte":9420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"391739819","text":"import numpy as np\nfrom elastica.external_forces import NoForces\nfrom scipy.interpolate import make_interp_spline\n\n\nclass MuscleTorquesWithVaryingBetaSplines(NoForces):\n \"\"\"\n\n This class compute the muscle torques using Beta spline.\n Points of beta spline can be changed through out the simulation, and\n every time it changes a new spline generated. Control algorithm has to\n select the spline control points. Location of control points on the arm\n is fixed and they are equidistant points.\n\n Attributes\n ----------\n direction : str\n Depending on the user input direction, computed torques are applied in the direction of d1, d2, or d3.\n points_array : numpy.ndarray or callable object\n This variable is a reference to points_func_array variable which can be a numpy.ndarray or callable object.\n base_length : float\n Initial length of the arm.\n muscle_torque_scale : float\n Scaling factor for beta spline muscle torques. Beta spline is non-dimensional and muscle_torque_scale scales it.\n torque_profile_recorder : defaultdict(list)\n This is a dictionary to store time-history of muscle torques and beta-spline.\n step_skip : int\n Determines the data collection step.\n counter : int\n Used to determine the current call step of this object.\n number_of_control_points : int\n Number of control points used in beta spline. Note that these are the control points in the middle and there\n are two more control points at the start and end of the rod, which are 0.\n points_cached : numpy.ndarray\n 2D (2, number_of_control_points+2) array containing data with 'float' type.\n This array stores the location of control points in first row and in the second row it stores the values of\n control points selected at previous step. If control points are changed, points_cached updated.\n max_rate_of_change_of_control_points : float\n This limits the maximum change that can happen for control points in between two calls of this object.\n my_spline : object\n Stores the beta spline object generated by control points.\n \"\"\"\n\n def __init__(\n self,\n base_length,\n number_of_control_points,\n points_func_array,\n muscle_torque_scale,\n direction,\n step_skip,\n max_rate_of_change_of_activation=0.01,\n **kwargs,\n ):\n \"\"\"\n\n Parameters\n ----------\n base_length : float\n Initial length of the arm.\n number_of_control_points : int\n Number of control points used in beta spline. Note that these are the control points in the middle and there\n are two more control points at the start and end of the rod, which are 0.\n points_func_array : numpy.ndarray\n 2D (2, number_of_control_points+2) array containing data with 'float' type.\n This array stores the location of control points in first row and in the second row it stores the values of\n control points selected at previous step. If control points are changed, points_cached updated.\n muscle_torque_scale : float\n Scaling factor for beta spline muscle torques. Beta spline is non-dimensional and muscle_torque_scale\n scales it.\n direction : str\n Depending on the user input direction, computed torques are applied in the \"normal\", \"binormal\", \"tangent\".\n step_skip : int\n Determines the data collection step.\n max_rate_of_change_of_control_points : float\n This limits the maximum change that can happen for control points in between two calls of this object.\n **kwargs\n Arbitrary keyword arguments.\n \"\"\"\n super(MuscleTorquesWithVaryingBetaSplines, self).__init__()\n\n if direction == str(\"normal\"):\n self.direction = int(0)\n elif direction == str(\"binormal\"):\n self.direction = int(1)\n elif direction == str(\"tangent\"):\n self.direction = int(2)\n else:\n raise NameError(\n \"Please type normal, binormal or tangent as muscle torque direction. Input should be string.\"\n )\n\n self.points_array = (\n points_func_array\n if hasattr(points_func_array, \"__call__\")\n else lambda time_v: points_func_array\n )\n\n self.base_length = base_length\n self.muscle_torque_scale = muscle_torque_scale\n\n self.torque_profile_recorder = kwargs.get(\"torque_profile_recorder\", None)\n self.step_skip = step_skip\n self.counter = 0 # for recording data from the muscles\n self.number_of_control_points = number_of_control_points\n self.points_cached = np.zeros(\n (2, self.number_of_control_points + 2)\n ) # This caches the control points. Note that first and last control points are zero.\n self.points_cached[0, :] = np.linspace(\n 0, self.base_length, self.number_of_control_points + 2\n ) # position of control points along the rod.\n\n # Max rate of change of activation determines, maximum change in activation\n # signal in one time-step.\n self.max_rate_of_change_of_activation = max_rate_of_change_of_activation\n\n # Purpose of this flag is to just generate spline even the control points are zero\n # so that code wont crash.\n self.initial_call_flag = 0\n\n def apply_torques(self, system, time: np.float = 0.0):\n\n # Check if RL algorithm changed the points we fit the spline at this time step\n # if points_array changed create a new spline. Using this approach we don't create a\n # spline every time step.\n # Make sure that first and last point y values are zero. Because we cannot generate a\n # torque at first and last nodes.\n if (\n not np.array_equal(self.points_cached[1, 1:-1], self.points_array(time))\n or self.initial_call_flag == 0\n ):\n self.initial_call_flag = 1\n\n # Apply filter to the activation signal, to prevent drastic changes in activation signal.\n self.filter_activation(\n self.points_cached[1, 1:-1],\n np.array(self.points_array(time)),\n self.max_rate_of_change_of_activation,\n )\n\n # self.points_cached[1, 1:-1] = self.points_array(time)\n self.my_spline = make_interp_spline(\n self.points_cached[0], self.points_cached[1]\n )\n # Compute the muscle torque magnitude from the beta spline.\n self.torque_magnitude_cache = self.muscle_torque_scale * self.my_spline(\n np.cumsum(system.lengths)\n )\n\n system.external_torques[self.direction, :] += self.torque_magnitude_cache[:]\n\n if self.counter % self.step_skip == 0:\n if self.torque_profile_recorder is not None:\n self.torque_profile_recorder[\"time\"].append(time)\n\n self.torque_profile_recorder[\"torque_mag\"].append(\n self.torque_magnitude_cache\n )\n self.torque_profile_recorder[\"torque\"].append(\n system.external_torques.copy()\n )\n self.torque_profile_recorder[\"element_position\"].append(\n np.cumsum(system.lengths)\n )\n\n self.counter += 1\n\n @staticmethod\n def filter_activation(signal, input_signal, max_signal_rate_of_change):\n \"\"\"\n Filters the input signal. If change in new signal (input signal) greater than\n previous signal (signal) then, increase for signal is max_signal_rate_of_change amount.\n\n Parameters\n ----------\n signal : numpy.ndarray\n 1D (number_of_control_points,) array containing data with 'float' type.\n input_signal : numpy.ndarray\n 1D (number_of_control_points,) array containing data with 'float' type.\n max_signal_rate_of_change : float\n This limits the maximum change that can happen between signal and input signal.\n\n Returns\n -------\n\n \"\"\"\n signal_difference = input_signal - signal\n signal += np.sign(signal_difference) * np.minimum(\n max_signal_rate_of_change, np.abs(signal_difference)\n )\n","sub_path":"Case4/MuscleTorquesWithBspline/BsplineMuscleTorques/muscle_torques_with_bspline_numpy.py","file_name":"muscle_torques_with_bspline_numpy.py","file_ext":"py","file_size_in_byte":8400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"156672012","text":"\n# coding: utf-8\n\n# In[1]:\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.tensorboard.plugins import projector\n\nfrom process_data import process_data\n\n\nVOCAB_SIZE = 50000\nBATCH_SIZE = 128\nEMBED_SIZE = 128 # dimension of the word embedding vectors\nSKIP_WINDOW = 1 # the context window\nNUM_SAMPLED = 64 # Number of negative examples to sample.\nLEARNING_RATE = 1.0\nNUM_TRAIN_STEPS = 10000\nSKIP_STEP = 2000 # how many steps to skip before reporting the loss\n\n\nclass W2VModel:\n \n#batch_gen = process_data(VOCAB_SIZE, BATCH_SIZE, SKIP_WINDOW)\n\n \"\"\" Build the graph for word2vec model \"\"\"\n def __init__(self, vocab_size, embed_size, batch_size, num_sampled, learning_rate, skip_step):\n self.vocab_size = vocab_size\n self.embed_size = embed_size\n self.batch_size = batch_size\n self.num_sampled = num_sampled\n self.lr = learning_rate\n self.skip_step = skip_step\n self.global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')\n\n\n def _create_placeholders(self):\n with tf.name_scope('data'):\n self.center_words = tf.placeholder(tf.int32, shape=[self.batch_size], name=\"center_words\")\n self.target_words = tf.placeholder(tf.int32, shape=[self.batch_size, 1], name=\"target_words\")\n \n def _create_embedding(self):\n with tf.name_scope('embed'):\n self.embed_matrix = tf.Variable(tf.random_uniform([self.vocab_size, \n self.embed_size], -1.0, 1.0),\n name='embed_matrix')\n \n def _create_loss(self):\n embed = tf.nn.embedding_lookup(self.embed_matrix, self.center_words, name='embed')\n \n nce_weight = tf.Variable(tf.truncated_normal([self.vocab_size, self.embed_size],\n stddev=1.0 / (self.embed_size ** 0.5)), \n name='nce_weight')\n \n nce_bias = tf.Variable(tf.zeros([self.vocab_size]), name='nce_bias')\n\n self.loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weight, \n biases=nce_bias, \n labels=self.target_words, \n inputs=embed, \n num_sampled=self.num_sampled, \n num_classes=self.vocab_size), name='loss')\n\n\n def _create_optimizer(self):\n with tf.name_scope('opimizer'):\n self.optimizer = tf.train.GradientDescentOptimizer(self.lr).minimize(self.loss,\n global_step=self.global_step)\n\n def _create_summary(self):\n with tf.name_scope('summary'):\n tf.summary.scalar('loss', self.loss)\n tf.summary.histogram('loss', self.loss)\n self.summary_op = tf.summary.merge_all()\n \n def build_graph(self):\n \"\"\" Build the graph for our model \"\"\"\n self._create_placeholders()\n self._create_embedding()\n self._create_loss()\n self._create_optimizer()\n self._create_summary()\n\n\n \ndef train_model(model, batch_gen, num_train_steps, weights_fld):\n \n saver = tf.train.Saver() # defaults to saving all variables - in this case embed_matrix, nce_weight, nce_bias\n initial_step = 0\n \n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n \n # 마지막으로 저장된 값이 checkpoint 파일에 저장되어 있음\n # global_step 변수를 사용했기 때문에, idnex 도 마지막 값부터 증가 하게 됨!!!\n \n ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/'))\n # if that checkpoint exists, restore from checkpoint\n if ckpt and ckpt.model_checkpoint_path:\n print ('from check point!!!')\n saver.restore(sess, ckpt.model_checkpoint_path)\n \n total_loss = 0.0 # we use this to calculate the average loss in the last SKIP_STEP steps\n writer = tf.summary.FileWriter('./my_graph/', sess.graph)\n \n initial_step = model.global_step.eval()\n \n for index in xrange(num_train_steps):\n centers, targets = batch_gen.next()\n feed_dict={model.center_words: centers, model.target_words: targets}\n loss_batch, _, summary = sess.run([model.loss, model.optimizer, model.summary_op], \n feed_dict=feed_dict)\n\n writer.add_summary( summary, global_step=model.global_step.eval() )\n total_loss += loss_batch\n\n if (index + 1) % SKIP_STEP == 0:\n print('Average loss at step {}, {}: {:5.1f}'.format(model.global_step.eval(), index, total_loss / model.skip_step))\n total_loss = 0.0\n #saver.save(sess, 'checkpoints/', index)\n\n writer.close()\n saver.save(sess, 'checkpoints/', model.global_step.eval() )\n print (model.global_step.eval())\n\n\ndef main():\n model = SkipGramModel(VOCAB_SIZE, EMBED_SIZE, BATCH_SIZE, NUM_SAMPLED, LEARNING_RATE, SKIP_STEP)\n model.build_graph()\n batch_gen = process_data(VOCAB_SIZE, BATCH_SIZE, SKIP_WINDOW)\n train_model(model, batch_gen, NUM_TRAIN_STEPS, WEIGHTS_FLD)\n\n \nif __name__ == '__main__':\n main() \n \n \n \n \n","sub_path":"w2v/w2vec.py","file_name":"w2vec.py","file_ext":"py","file_size_in_byte":5728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"394004037","text":"from FileLoader import FileLoader\n\n\ndef youngestFellah(df, year):\n\tol_year = df[df['Year'].eq(year)]\n\tol_year_female = ol_year[ol_year['Sex'].eq('F')]\n\tol_year_male = ol_year[ol_year['Sex'].eq('M')]\n\tyoungest_ol = {\n\t\t'Female' : ol_year_female.Age.min(),\n\t\t'Male' : ol_year_male.Age.min(),\n\t}\n\t# OR :\n\t#youngest_ol = {'f': df['Age'][(df['Sex'] == 'F') & (df['Year'] == year)].min(),\n # 'm': df['Age'][(df['Sex'] == 'M') & (df['Year'] == year)].min()}\n\tprint (youngest_ol)\n\n# age, sex, year\nloader = FileLoader()\ndata = loader.load(\"../resources/athlete_events.csv\")\nloader.display(data, 10)\nyoungestFellah(data, 2004)\n","sub_path":"04_pandas/ex01/YoungestFellah.py","file_name":"YoungestFellah.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"71253217","text":"\nclass Attr:\n def __getattr__(self, item):\n if item == 'age':\n return 26\n else:\n raise AttributeError\n\n\nx = Attr()\nprint(x.age)\n# print(x.name)\n\nprint('----------------------')\n\n\nclass PropSquare:\n def __init__(self, start):\n self.value = start\n\n def get_x(self): # On attr fetch\n return self.value ** 2\n\n def set_x(self, value): # On attr assign\n self.value = value\n x = property(get_x, set_x) # No delete or docs\n\n\np = PropSquare(3) # 2 instances of class with property\nq = PropSquare(32) # Each has different state information\n\nprint(p.x) # 3 ** 2\np.x = 4\nprint(p.x) # 4 ** 2\nprint(q.x) # 32 ** 2 (1024)\n","sub_path":"python/Lessons/lec7/prop1.py","file_name":"prop1.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"190366353","text":"from django.contrib import admin, messages\nfrom django.urls import path\nfrom django.shortcuts import redirect, render\nfrom django.forms import forms\nfrom django.utils.safestring import mark_safe\n\nfrom .models import Teacher, Subject\n\nimport unicodecsv as csv\n\n\nclass CsvImportForm(forms.Form):\n csv_file = forms.FileField()\n\n\n@admin.register(Teacher)\nclass TeacherAdmin(admin.ModelAdmin, CsvImportForm):\n change_list_template = \"entities/teachers_changelist.html\"\n readonly_fields = [\"profile_picture_image\"]\n\n def profile_picture_image(self, obj):\n return mark_safe(''.format(\n url=obj.profile_picture.url\n )\n )\n\n def get_urls(self):\n urls = super().get_urls()\n my_urls = [\n path('import-csv/', self.import_csv),\n ]\n return my_urls + urls\n\n def import_csv(self, request):\n if request.method == \"POST\":\n csv_file = request.FILES[\"csv_file\"]\n file_lines = csv_file.read().splitlines()\n reader = csv.DictReader(file_lines)\n invalid_data = 0\n success_message = \"Your csv file has been imported\"\n for row in reader:\n if row['First Name'].isspace() or row['First Name'] is None or len(row['First Name']) == 0:\n invalid_data += 1\n continue\n if row['Last Name'].isspace() or row['Last Name'] is None or len(row['Last Name']) == 0:\n invalid_data += 1\n continue\n if row['Email Address'].isspace() or row['Email Address'] is None or len(row['Email Address']) == 0:\n invalid_data += 1\n continue\n if row['Profile picture'].isspace() or row['Profile picture'] is None or len(row['Profile picture']) == 0:\n row['Profile picture'] = 'default.png'\n\n # add a teacher if he/she does not have a duplicate email\n try:\n email_exists = Teacher.objects.get(email_address=row['Email Address'])\n except Teacher.DoesNotExist:\n email_exists = None\n\n if email_exists is not None:\n failed = row['First Name'] + ' ' + row['Last Name'] + \" has an email that already exists. \"\n self.message_user(request, failed, level=messages.ERROR)\n\n teacher = Teacher.objects.create(email_address=row['Email Address'])\n teacher.first_name = row['First Name']\n teacher.last_name = row['Last Name']\n teacher.room_number = row['Room Number']\n teacher.phone_number = row['Phone Number']\n teacher.profile_picture = '/images/' + row['Profile picture']\n\n # add a teacher if he/she has 5 subjects or less\n subjects = row['Subjects taught'].split(',')\n if len(subjects) > 5:\n failed = row['First Name'] + ' ' + row['Last Name'] + \" has more than 5 subjects. \"\n self.message_user(request, failed, level=messages.ERROR)\n continue\n\n # create the subject if it does not exist\n for subject in subjects:\n try:\n subject_exists = Subject.objects.get(name=subject)\n except Subject.DoesNotExist:\n subject_exists = None\n if subject_exists is None:\n Subject.objects.create(name=subject)\n teacher.subject_taught.add(Subject.objects.get(name=subject))\n\n teacher.save()\n failed = str(invalid_data) + \" record(s) had invalid data\"\n self.message_user(request, failed, level=messages.ERROR)\n self.message_user(request, success_message)\n\n return redirect(\"..\")\n form = CsvImportForm()\n payload = {\"form\": form}\n return render(\n request, \"admin/csv_form.html\", payload\n )\n\n\nadmin.site.register(Subject)\n","sub_path":"Teachers/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"449030247","text":"from simpleai.search import (\n SearchProblem,\n astar\n)\n\nINITIAL = \"V,V,V-B,B,B-R,R,R|Z,Z,Z-R,R,R-A,A,A|N,N,N-A,A,A-N,N,N|B,B,B-N,N,N-B,B,B|A,A,A-Z,Z,Z-Z,Z,Z|R,R,R-V,V,V-V,V,V\"\n\nGOAL = \"R,R,R-R,R,R-R,R,R|A,A,A-A,A,A-A,A,A|V,V,V-V,V,V-V,V,V|Z,Z,Z-Z,Z,Z-Z,Z,Z|N,N,N-N,N,N-N,N,N|B,B,B-B,B,B-B,B,B\"\n\nROTATIONS = [[0, 1, 2, 3], [4, 2, 5, 3]]\n\ndef convert_state_to_list(state):\n state_list = []\n for face in state.split('|'):\n new_face = [row.split(',') for row in face.split('-')]\n state_list.append(new_face)\n return state_list\n\ndef convert_state_to_str(state):\n state_str = \"\"\n for idx_face, face in enumerate(state):\n if idx_face != 0:\n state_str +=\"|\"\n for idx_row, row in enumerate(face):\n if idx_row != 0:\n state_str += \"-\"\n for idx, piece in enumerate(row):\n if idx != 0:\n state_str += \",\"\n state_str += piece\n\n return state_str\n\ndef rotate(state, axis, idx, direction):\n faces_to_rotate = ROTATIONS[axis][::direction]\n aux_faces = {}\n for face in faces_to_rotate:\n aux_faces[face] = state[face][idx]\n idx_actual_face = 0\n while idx_actual_face != 4:\n next_idx = idx_actual_face + 1\n if idx_actual_face == 3:\n next_idx = 0\n state[faces_to_rotate[idx_actual_face]][idx] = aux_faces[faces_to_rotate[next_idx]]\n idx_actual_face += 1\n\nclass Rubik(SearchProblem):\n def cost(self, state1, action, state2):\n return 1\n\n def is_goal(self, state):\n return state == GOAL\n\n def actions(self, state):\n available_actions = []\n for rotate in [0, 1]:\n for idx in range(3):\n for direction in [1, -1]:\n available_actions.append((rotate, idx, direction))\n\n return available_actions\n\n def result(self, state, action):\n axis, idx, direction = action\n list_state = convert_state_to_list(state)\n rotate(list_state, axis, idx, direction)\n\n return convert_state_to_str(list_state)\n\n def heuristic(self, state):\n correct_position = 0\n list_state = convert_state_to_list(state)\n goal_list = convert_state_to_list(GOAL)\n for idx_face, face in enumerate(list_state):\n for idx_row, row in enumerate(face):\n for idx, piece in enumerate(row):\n if piece == goal_list[idx_face][idx_row][idx]:\n correct_position += 1\n \n return -correct_position\n\nif __name__==\"__main__\":\n problem = Rubik(INITIAL)\n\n result = astar(problem, graph_search=True)\n\n print(\"Goal node:\", result)\n if result:\n print(\"Path from initial to goal:\")\n for action, state in result.path():\n print(\"Action:\", action)\n print(\"State:\", state)","sub_path":"rubik.py","file_name":"rubik.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"353750761","text":"#!/usr/bin/env python\nimport time\n\nfrom multiprocessing import Process, Pipe\n\ndef func(a, b, conn):\n time.sleep(2)\n print(\"inside child:\", a, b)\n conn.send([10 * a, 10 * b])\n conn.close()\n\nif __name__ == '__main__':\n parent, child = Pipe()\n\n p = Process(target=func, args=(5, 10, child))\n\n p.start()\n\n print(\"In parent!\")\n\n data = parent.recv()\n print(\"data:\", data)\n\n p.join()\n\n","sub_path":"multiprocessing_hello.py","file_name":"multiprocessing_hello.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"418381967","text":"import os\nfrom collections import namedtuple\nfrom bisect import bisect_left\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom hfnet.settings import DATA_PATH, EXPER_PATH\nfrom . import geometry\nfrom .geometry import SE3\n\nCamera = namedtuple(\"Camera\", [\"poses\", \"timestamps\", \"descriptors\"])\nattributes = [\"name\", \"timestamps\", \"traverse_name\", \"dataset_name\", \"experiment_name\"]\n\n\nclass Traverse:\n def __init__(self, dataset_name, traverse_name, experiment_name):\n self.dataset_name = dataset_name\n self.traverse_name = traverse_name\n self.experiment_name = experiment_name\n # import INS data\n gps_dir = os.path.join(DATA_PATH, dataset_name, \"gps\", traverse_name)\n for i, gpsname in enumerate(os.listdir(gps_dir)):\n camera_df = pd.read_csv(os.path.join(gps_dir, gpsname))\n cam_name = gpsname[:-10]\n camera_df.sort_values(by=[\"timestamp\"], inplace=True)\n # set traverse timestamps\n if i == 0:\n self.timestamps = np.squeeze(camera_df[[\"timestamp\"]].to_numpy())\n else:\n curr_tstamps = np.squeeze(camera_df[[\"timestamp\"]].to_numpy())\n assert np.array_equal(self.timestamps, curr_tstamps), (\n \"Timestamps between cameras are inconsistent, please\"\n \"check that the cameras come from the same traverse!\"\n )\n xyzrpy = camera_df[\n [\"northing\", \"easting\", \"down\", \"roll\", \"pitch\", \"yaw\"]\n ].to_numpy()\n # read descriptors\n descriptor_dir = os.path.join(\n EXPER_PATH,\n \"exports\",\n experiment_name,\n traverse_name,\n cam_name,\n )\n example_fname = os.listdir(descriptor_dir)[0]\n D = len(np.load(os.path.join(descriptor_dir, example_fname))\n [\"global_descriptor\"])\n descriptors = np.empty((len(self.timestamps), D))\n for i, tstamp in enumerate(tqdm(self.timestamps)):\n dpath = os.path.join(descriptor_dir, str(tstamp) + \".npz\")\n descriptors[i, :] = np.load(dpath)[\"global_descriptor\"]\n poses = SE3.from_xyzrpy(xyzrpy)\n camera = Camera(poses=poses, timestamps=self.timestamps,\n descriptors=descriptors)\n setattr(self, cam_name, camera)\n\n def __len__(self):\n return len(self.timestamps)\n\n def topk_descriptors(self, query_attr, k):\n # retrieve attributes\n query_desc = query_attr[\"descriptor\"]\n query_pose = query_attr[\"pose\"]\n # top k most similar descriptors\n poses, timestamps, cameras, descriptors = self._aggregate()\n dist_sq = 2 - 2 * descriptors @ query_desc\n match_ind = np.argpartition(dist_sq, k)[:k]\n match_ind = match_ind[np.argsort(dist_sq[match_ind])]\n # extract INS information\n t_err, R_err = geometry.error(query_pose, poses)\n retrieved = []\n for ind in match_ind:\n retrieved.append(\n {\n \"camera\": cameras[ind],\n \"timestamp\": timestamps[ind],\n \"t_err\": t_err[ind],\n \"R_err\": R_err[ind] * 180 / np.pi,\n \"ind\": ind,\n }\n )\n return retrieved\n\n def retrieve_distractors(self, query_attr, k):\n # identify relevant images\n query_pose = query_attr[\"pose\"]\n relevant = self.kNN(query_pose, 10)\n relevant_ind = [attr[\"ind\"] for attr in relevant]\n # image retrieval\n img_retrieval = self.topk_descriptors(query_attr, 10 + k)\n retrieval_ind = [attr[\"ind\"] for attr in img_retrieval]\n # compute INS error\n poses, timestamps, cameras, descriptors = self._aggregate()\n t_err, R_err = geometry.error(query_pose, poses)\n # cull retrieved images that are close to gt (\"relevant\")\n distractors = []\n for ind in retrieval_ind:\n if ind not in relevant_ind and len(distractors) < k:\n distractors.append(\n {\n \"camera\": cameras[ind],\n \"timestamp\": timestamps[ind],\n \"t_err\": t_err[ind],\n \"R_err\": R_err[ind] * 180 / np.pi,\n \"ind\": ind,\n })\n return distractors\n\n def query_attr(self, camera, timestamp):\n \"\"\"\n Return pose and descriptor for camera/timestamp pair within traverse.\n \"\"\"\n camera = getattr(self, camera)\n # locate camera timestamp index and return associated pose\n i = bisect_left(camera.timestamps, timestamp)\n if i != len(camera.timestamps) and camera.timestamps[i] == timestamp:\n return {\"pose\": camera.poses[i],\n \"descriptor\": camera.descriptors[i],\n \"ind\": i}\n return None\n\n def kNN(self, pose, k, alpha=5, imperfect=False):\n retrieved = []\n # aggregate all cameras and find NN images\n poses, timestamps, cameras, _ = self._aggregate()\n # find NNs\n t_err, R_err = geometry.error(pose, poses)\n dist = geometry.metric(pose, poses, alpha)\n if imperfect:\n # imperfect retrieval retrieves k random relevant images\n match_ind = np.argpartition(dist, max(10, k))[:max(10, k)]\n match_ind = np.random.choice(match_ind, k)\n else:\n match_ind = np.argpartition(dist, k)[:k]\n match_ind = match_ind[np.argsort(dist[match_ind])]\n for ind in match_ind:\n retrieved.append(\n {\n \"camera\": cameras[ind],\n \"timestamp\": timestamps[ind],\n \"t_err\": t_err[ind],\n \"R_err\": R_err[ind] * 180 / np.pi,\n \"ind\": ind,\n }\n )\n return retrieved\n\n def query_tolerance(self, pose, t, R):\n \"\"\"\n Return all images inside given error tolerances to given pose.\n \"\"\"\n retrieved = []\n for cam_name, camera in self.__dict__.items():\n if cam_name not in attributes:\n t_err, R_err = geometry.error(pose, camera.poses)\n match = np.logical_and(t_err < t, R_err * 180 / np.pi < R)\n match_ind = np.squeeze(np.argwhere(match))\n for ind in match_ind:\n retrieved.append(\n {\n \"camera\": cam_name,\n \"timestamp\": self.timestamps[ind],\n \"t_err\": t_err[ind],\n \"R_err\": R_err[ind] * 180 / np.pi,\n \"ind\": ind,\n }\n )\n return retrieved\n\n def _aggregate(self):\n poses = []\n timestamps = []\n cameras = []\n descriptors = []\n for cam_name, camera in self.__dict__.items():\n if cam_name not in attributes:\n poses.extend(camera.poses)\n timestamps.extend(self.timestamps)\n cameras.extend(len(self.timestamps) * [cam_name])\n descriptors.append(camera.descriptors)\n poses = geometry.combine(poses)\n descriptors = np.concatenate(descriptors, axis=0)\n return poses, timestamps, cameras, descriptors\n","sub_path":"QUT/util/Traverse.py","file_name":"Traverse.py","file_ext":"py","file_size_in_byte":7518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"19219030","text":"#use fire files to compare files that have been burning in california \n#between Sep 1-13 and Sept 14-30\n#file contains info about lat and lon and brightness of each fire\n#make a map that shows the fires\n#one file is from 9-1-20 to 9-13-20\n#other file is 9-14-20-9-20-20\n#only interested in fires that have brightness factor above 450\n\nimport json\n\ninfile = open('US_fires_9_14.json', 'r')\noutfile = open('readable_US_fires_9_4.json', 'w')\n\n#json load function converts data into a format python can work with\n#in this case creates giant dictionary\nfire_data = json.load(infile)\n\n#dump function takes json data and format into something more readable\n\njson.dump(fire_data,outfile, indent=4)\n\nlons, lats, brights = [], [], []\n\n#go through entire list of fires!\n#since out data is a list of dictionaries, and each dicitonary is a fire, can just use our file\nfor fire in fire_data:\n if int(fire['brightness']) > 450:\n lon = fire['longitude']\n lat = fire['latitude']\n bright = int(fire['brightness'])\n\n\n lons.append(lon)\n lats.append(lat)\n brights.append(bright)\n\nfrom plotly.graph_objs import Scattergeo, Layout\nfrom plotly import offline\n#scatter geo allows us to plot scatterplots and plot on map\n#latitiude and longitude creates layout and list of lat and long objects to plot on world map for you\n#doing online means saving and seeing offline\ndata = [{\n 'type':'scattergeo',\n 'lon':lons,\n 'lat':lats,\n 'marker':{\n 'size':[bright/20 for bright in brights], \n 'color': brights, #for each value in mag assigns a color\n 'colorscale':'Viridis',\n 'reversescale': True,\n 'colorbar':{'title': 'Magnitude'}\n }\n}]\n\nmy_layout = Layout(title='US Fires Above 450 Brightness 9/14/20 to 9/20/20')\nfig = {'data':data, 'layout':my_layout}\noffline.plot(fig, filename='fires_september.html')\n\n","sub_path":"json_project_2.py","file_name":"json_project_2.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"519733689","text":"import telebot\r\nimport pyowm\r\nimport random\r\nimport os\r\nfrom telebot import types\r\nglobal a\r\n\r\ntoken = os.environ.get('TOKEN')\r\ntokenpog =os.environ.get('TOKPOG')\r\nbot = telebot.TeleBot(token) #ТОКИН БОТА БЕРИТ С config.py\r\nowm = pyowm.OWM(tokenpog, language='ru')#Токин погоды\r\n\r\n@bot.message_handler(commands=['start']) #команда /start\r\ndef welcome(message): #Функция приветствия\r\n sti = open('welcome.webp', 'rb')#открывает картинку\r\n bot.send_sticker(message.chat.id, sti)#бот отправляет картинку\r\n\r\n # keyboard\r\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)#создание клавиатуры\r\n item1 = types.KeyboardButton(\"🎲 Рандомное число\")#1 кнопка Рандомное число\r\n item2 = types.KeyboardButton(\"Погода\")#2- кнопка погода\r\n\r\n markup.add(item1, item2)#добавление кнопок \r\n\r\n bot.send_message(message.chat.id, \"Добро пожаловать, {0.first_name}!\\nЯ - {1.first_name}, бот, создан, чтобы помогать тебе 😉\".format(message.from_user, bot.get_me()),\r\n parse_mode='html', reply_markup=markup)#Ввод сообщение приветсвия и ввывод клавиатуры пользователю reply_markup=markup \r\n \r\ndef get_pogoga(message): #функция погоды\r\n try: \r\n global pogoga #глобальная переменая\r\n pogoga = message.text #погода = тому что напишит пользователь\r\n if pogoga == \"Спб\": #если пользователь написал Спб то\r\n pogoga = \"Санкт-Петербург\"#он превращает его в Санкт-Петербург для работы программы\r\n observation = owm.weather_at_place(pogoga)#смотрит место и ищит его\r\n w = observation.get_weather() #сохраняет статус\r\n temp = w.get_temperature('celsius')['temp']#сохраняет температуру\r\n humidity=w. get_humidity ()#сохраняет влажность\r\n windy = w. get_wind () ['speed']#сохраняет скорость ветра\r\n answer='В городе ' + pogoga + \" cейчас \" + w.get_detailed_status()+ \"\\n\"#сохраняет для ответа стутус погоды\r\n answer += 'Сейчас в районе температура '+ str(temp) +'\\n'#сохраняет для ответа температуру\r\n answer +='Влажность: ' + str(humidity)+\"%\" +\"\\n\"#сохраняет для ответа влажность\r\n answer +='Скорость ветра: '+str(windy)+\"м/c\"+\"\\n\"#сохраняет для ответа скорость ветра\r\n bot.send_message(message.chat.id, answer)#Ввыводит все,что сохранил\r\n except: #Если виден неверно город \r\n global a #Глобальная переменая\r\n if a == 0: #Если а==0 то\r\n markup = types.InlineKeyboardMarkup(row_width=2) #Создается клавиатура под текстом\r\n item1 = types.InlineKeyboardButton(\"Отменить ввод погоды\", callback_data='bad')#в ней Отменить ввод клавиатуры\r\n markup.add(item1) #Ввывод её\r\n bot.send_message(message.chat.id,\"Что-то тут не так😔 Побробуй ещё раз\",reply_markup=markup) #Бот пишет Что-то тут не так😔 Побробуй ещё раз \r\n bot.register_next_step_handler(message, get_pogoga) #Ввозвращает на поиск города пока пользователю нужно вести город правильно или нажать на отмену\r\n\r\n\r\n \r\n@bot.message_handler(content_types=['text']) #проверка кнопок\r\ndef pogoga(message):\r\n if message.chat.type == 'private':\r\n if message.text == '🎲 Рандомное число': #Если рандом кнопка\r\n bot.send_message(message.chat.id, str(random.randint(0,100))) #Выводит рандомно число от 0 до 99\r\n elif message.text == 'Погода': #Если погода \r\n bot.send_message(message.chat.id,'В каком вы горороде/стране?:')#Бот спращивает В каком вы горороде/стране?\r\n global a\r\n a=0\r\n bot.register_next_step_handler(message, get_pogoga) #отправляет в фунцию погода\r\n else:\r\n bot.send_message(message.chat.id, \"Я не знаю, что ответить😢\") #Если пишут что-то боту то он отвечает\r\n\r\n@bot.callback_query_handler(func=lambda call: True) #кнопка под текстом для погоды\r\ndef callback_inline(call):\r\n try:\r\n if call.message:\r\n if call.data == 'bad': #проверка кнопки Отменить ввод погоды\r\n bot.send_message(call.message.chat.id, 'Как скажешь😉') #Бот отправляет сообщение как скажешь\r\n global a\r\n a=1\r\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=\"Ввод отменен\",\r\n reply_markup=None) #меняет сообщение Что-то тут не так😔 Побробуй ещё раз на Ввод отменен,удаляет клавиатуру под текстом\r\n except Exception as e:\r\n pass \r\nbot.polling(none_stop = True) \r\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"307237660","text":"from flask import Flask, render_template, jsonify\napp = Flask(__name__)\nimport jinja2\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom create_db import Base, Mammal\n\nengine = create_engine('sqlite:///mammals.db')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\n### Pages\n@app.route('/')\ndef Show():\n\tmammals = session.query(Mammal).all()\n\tlist = []\n\ttest = \"thisisatest\"\n\treturn render_template('index.html', test=test)\n\n@app.route('/api')\ndef mammals():\n\tmammals = session.query(Mammal).all()\n\treturn jsonify(Mammal=[m.serialize for m in mammals])\n\nif __name__== \"__main__\":\n\tapp.run(debug=True)\n\n\t","sub_path":"flaskapp.py","file_name":"flaskapp.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"164918150","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 10 09:10:39 2020\n\n@author: Xuheng Ding\n\nA class to process the data\n\"\"\"\nfrom __future__ import print_function\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport astropy.io.fits as pyfits\nfrom astropy.wcs import WCS\nfrom galight.tools.measure_tools import measure_bkg\nfrom galight.tools.cutout_tools import cut_center_auto, cutout\nfrom copy import deepcopy\nfrom matplotlib.colors import LogNorm\nfrom galight.tools.astro_tools import plt_fits, read_pixel_scale\n\nimport sys\nfrom packaging import version\n\n\nclass DataProcess(object):\n \"\"\"\n A class to Process the data, including the following feature:\n - automaticlly estimate and remove background light.\n - cutout the target photo stamp.\n - search all the avaiable PSF in the field.\n - creat mask for the objects.\n - measure the target surface brightness profile, PSF FWHM, background.\n Parameter\n --------\n fov_image: 2D array.\n The field of view image of the data.\n \n target_pos: list or tuple or array, length = 2.\n The position of the target.\n \n pos_type: string.\n 'pixel' or 'wcs'\n Define the position of the target, i.e., if the position is in 'pixel' or 'wcs'.\n \n header: io.fits.header.Header.\n -The header information given by the fits file. \n Note: should including the exposure time and WCS information.\n \n exptime: float / 2D array.\n The exposure time of the data in (s) a the exptime_map\n \n fov_noise_map: 2D array.\n The field of view noise map, should have same shape as the 'fov_image'.\n \n rm_bkglight: bool. \n If 'True', the FOV background light will be modeled and removed. \n \n if_plot: bool.\n If 'True', the plots will made during the data processing.\n \n zp: float.\n The zeropoint of the telescope. To calcualte the magnitude. If not provided will assign as 27.0\n \n \"\"\"\n def __init__(self, fov_image=None, target_pos = None, pos_type = 'pixel', header=None, \n exptime = None, fov_noise_map = None,rm_bkglight = False, if_plot = False, \n zp = None, **kwargs):\n if target_pos is not None:\n if pos_type == 'pixel':\n self.target_pos = target_pos\n elif pos_type == 'wcs':\n wcs = WCS(header)\n self.target_pos = wcs.all_world2pix([[target_pos[0], target_pos[1]]], 1)[0]\n else:\n raise ValueError(\"'pos_type' is should be either 'pixel' or 'wcs'.\")\n self.target_pos = np.int0(self.target_pos)\n else:\n raise ValueError(\"'target_pos' must be assigned.\")\n\n self.exptime = exptime\n self.if_plot = if_plot \n self.header = header\n if header is not None:\n self.deltaPix = read_pixel_scale(header)\n if self.deltaPix == 3600.:\n print(\"WARNING: pixel size could not read from the header, thus the value assigend as 1! \")\n self.deltaPix = 1.\n if fov_image is not None and rm_bkglight == True:\n bkglight = measure_bkg(fov_image, if_plot=if_plot, **kwargs)\n fov_image = fov_image-bkglight\n self.fov_image = fov_image\n self.fov_noise_map = fov_noise_map\n self.psf_id_for_fitting = 0 #The psf id in the PSF_list that would be used in the fitting.\n if zp is None:\n print(\"Zeropoint value is not provided, use 27.0 to calculate magnitude.\")\n self.zp = 27.0\n else:\n self.zp = zp\n\n def generate_target_materials(self, cut_kernel = None, radius=None, radius_list = None,\n bkg_std = None, if_select_obj = False, create_mask = False, \n if_plot=None, **kwargs):\n \"\"\"\n Prepare the fitting materials to used for the fitting, including the image cutout, noise map and masks (optional).\n More important, the apertures that used to define the fitting settings are also generated.\n \n Parameter\n --------\n cut_kernel: string or 'None'.\n The args will be input as kernel into galight.tools.cutout_tools.cut_center_auto()\n \n radius: int or float\n The radius to cutout the image data. The final framesize will be 2*radius+1\n \n cut_kernel: None or 'center_gaussian' or 'center_bright'.\n - if 'None', directly cut.\n - if 'center_gaussian', fit central as 2D Gaussian and cut at the Gaussian center.\n - if 'center_bright', cut the brightest pixel in the center\n \n bkg_std: float\n To input the background noise level.\n \n if_select_obj:\n - if 'True', only selected obj will be modelled. \n \n create_mask: bool.\n 'True' if to define a mask based on the apertures. Note that the corresponding aperture \n will de removed automaticlly. \n\n if_plot: bool.\n If 'True', the plots will made during the cut out.\n \n **kwargs:\n Arguments can also passed to detect_obj()\n \n \"\"\"\n if if_plot == None:\n if_plot = self.if_plot\n \n self.bkg_std = bkg_std\n \n if radius == None:\n if radius_list == None:\n radius_list = [30, 35, 40, 45, 50, 60, 70]\n for rad in radius_list:\n from galight.tools.measure_tools import fit_data_oneD_gaussian\n _cut_data = cutout(image = self.fov_image, center = self.target_pos, radius=rad)\n edge_data = np.concatenate([_cut_data[0,:],_cut_data[-1,:],_cut_data[:,0], _cut_data[:,-1]])\n try:\n gauss_mean, gauss_1sig = fit_data_oneD_gaussian(edge_data, ifplot=False)\n except:\n gauss_mean, gauss_1sig = np.mean(edge_data), np.std(edge_data)\n up_limit = gauss_mean + 2 * gauss_1sig\n percent = np.sum(edge_data>up_limit)/float(len(edge_data))\n if percent<0.03:\n break\n radius = rad\n \n if if_plot == True:\n print(\"Plot target cut out zoom in:\")\n if cut_kernel is not None:\n target_stamp, self.target_pos = cut_center_auto(image=self.fov_image, center= self.target_pos, \n kernel = cut_kernel, radius=radius,\n return_center=True, if_plot=if_plot)\n else:\n target_stamp = cutout(image = self.fov_image, center = self.target_pos, radius=radius)\n \n if self.fov_noise_map is not None:\n self.noise_map = cutout(image = self.fov_noise_map, center = self.target_pos, radius=radius)\n else:\n if bkg_std == None:\n from galight.tools.measure_tools import esti_bgkstd\n target_2xlarger_stamp = cutout(image=self.fov_image, center= self.target_pos, radius=radius*2)\n self.bkg_std = esti_bgkstd(target_2xlarger_stamp, if_plot=if_plot)\n _exptime = deepcopy(self.exptime)\n if _exptime is None:\n if 'EXPTIME' in self.header.keys():\n _exptime = self.header['EXPTIME']\n else:\n raise ValueError(\"No Exposure time information in the header, should input a value.\")\n if isinstance(_exptime, np.ndarray):\n _exptime = cutout(image=self.exptime, center= self.target_pos, radius=radius)\n noise_map = np.sqrt(abs(target_stamp/_exptime) + self.bkg_std**2)\n self.noise_map = noise_map\n \n target_mask = np.ones_like(target_stamp)\n from galight.tools.measure_tools import detect_obj, mask_obj\n apertures, self.segm_deblend = detect_obj(target_stamp, if_plot= create_mask or if_select_obj, \n err=self.noise_map, segm_map= True, **kwargs)\n if if_select_obj == True:\n select_idx = str(input('Input directly the a obj idx to MODEL, use space between each id:\\n'))\n if select_idx != '':\n if sys.version_info.major > 2:\n select_idx = [int(select_idx[i]) for i in range(len(select_idx)) if select_idx[i].isnumeric()]\n else:\n select_idx = [int(select_idx[i]) for i in range(len(select_idx)) if select_idx[i].isdigit()]\n apertures_select = [apertures[i] for i in select_idx] \n else:\n apertures_select = apertures\n \n if create_mask == True:\n select_idx = str(input('Input directly the a obj that used to create MASK, use space between each id:\\n'))\n # if sys.version_info.major > 2:\n # select_idx_list = [int(s) for s in select_idx.split() if s.isdigit()]\n # else:\n select_idx_list = [int(s) for s in select_idx.split() if s.isdigit()]\n \n if '!' not in select_idx:\n apertures_ = [apertures[i] for i in select_idx_list]\n apertures = [apertures[i] for i in range(len(apertures)) if i not in select_idx_list]\n else:\n apertures_ = [apertures[i] for i in range(len(apertures)) if i not in select_idx_list] \n apertures = [apertures[i] for i in range(len(apertures)) if i in select_idx_list] \n mask_list = mask_obj(target_stamp, apertures_, if_plot=False)\n for i in range(len(mask_list)):\n target_mask *= mask_list[i]\n if if_select_obj == True:\n apertures = [apertures[i] for i in range(len(apertures)) if apertures[i] in apertures_select]\n self.apertures = apertures\n self.target_stamp = target_stamp\n self.target_mask = target_mask\n if if_plot:\n fig, (ax1, ax3, ax2) = plt.subplots(1, 3, figsize=(14, 10))\n im1 = ax1.imshow(target_stamp, origin='lower', norm=LogNorm(vmax = target_stamp.max(), vmin = 1.e-4))\n ax1.set_title('Cutout target', fontsize=25)\n fig.colorbar(im1, ax=ax1, pad=0.01, orientation=\"horizontal\")\n ax1.get_xaxis().set_visible(False)\n ax1.get_yaxis().set_visible(False) \n im2 = ax2.imshow(self.noise_map, origin='lower', norm=LogNorm())\n ax2.set_title('Noise map', fontsize=25)\n fig.colorbar(im2, ax=ax2, pad=0.01, orientation=\"horizontal\")\n ax2.get_xaxis().set_visible(False)\n ax2.get_yaxis().set_visible(False) \n im3 = ax3.imshow(target_stamp * target_mask, origin='lower', norm=LogNorm(vmax = target_stamp.max(), vmin = 1.e-4))\n ax3.set_title('data * mask', fontsize=25)\n fig.colorbar(im3, ax=ax3, pad=0.01, orientation=\"horizontal\")\n ax3.get_xaxis().set_visible(False)\n ax3.get_yaxis().set_visible(False) \n plt.show() \n \n def find_PSF(self, radius = 50, PSF_pos_list = None, pos_type = 'pixel', psf_edge=120, if_filter=False, user_option= False):\n \"\"\"\n Find all the available PSF candidates in the field of view.\n \n Parameter\n --------\n radius: int/float.\n The radius of the cutout frames of the PSF. PSF size = 2*radius + 1\n \n PSF_pos_list: None or list of position.\n Input a list if PSF star position has decided.\n \n pos_type: string.\n 'pixel' or 'wcs'\n Define the position of the target\n \n user_option: bool.\n Only works when PSF_pos_list = None. \n \n psf_edge: int/float.\n The PSF should be avoid at the edge by how many pixels.\n \"\"\"\n if PSF_pos_list is None:\n from galight.tools.measure_tools import search_local_max, measure_FWHM\n init_PSF_locs_ = search_local_max(self.fov_image, radius = psf_edge)\n init_PSF_locs, FWHMs, fluxs, PSF_cutouts = [], [], [], []\n for i in range(len(init_PSF_locs_)):\n cut_image = cut_center_auto(self.fov_image, center = init_PSF_locs_[i],\n radius=radius)\n _fwhms = measure_FWHM(cut_image , radius = int(radius/5))\n if np.std(_fwhms)/np.mean(_fwhms) < 0.1 : #Remove the deteced \"PSFs\" at the edge.\n init_PSF_locs.append(init_PSF_locs_[i])\n FWHMs.append(np.mean(_fwhms))\n fluxs.append(np.sum(cut_image))\n PSF_cutouts.append(cut_image)\n init_PSF_locs = np.array(init_PSF_locs)\n FWHMs = np.array(FWHMs)\n fluxs = np.array(fluxs)\n PSF_cutouts = np.array(PSF_cutouts)\n if hasattr(self, 'target_stamp'):\n target_flux = np.sum(self.target_stamp)\n dis = np.sqrt( np.sum( (init_PSF_locs - self.target_pos)**2 , axis=1) )\n select_bool = (FWHMstarget_flux/2) * (dis>5)\n else:\n select_bool = (FWHMs 2:\n select_idx = [int(select_idx[i]) for i in range(len(select_idx)) if select_idx[i].isnumeric()]\n else:\n select_idx = [int(select_idx[i]) for i in range(len(select_idx)) if select_idx[i].isdigit()] \n self.PSF_pos_list = [PSF_locs[i] for i in select_idx] \n else:\n if pos_type == 'pixel':\n self.PSF_pos_list = PSF_pos_list\n elif pos_type == 'wcs':\n wcs = WCS(self.header)\n self.PSF_pos_list = [wcs.all_world2pix([[PSF_pos_list[i][0], PSF_pos_list[i][1]]], 1) for i in range(len(self.PSF_pos_list))]\n self.PSF_list = [cut_center_auto(self.fov_image, center = self.PSF_pos_list[i],\n kernel = 'center_gaussian', radius=radius) for i in range(len(self.PSF_pos_list))]\n\n def profiles_compare(self, **kargs):\n \"\"\"\n Use galight.tools.measure_tools.profiles_compare to plot the profiles of data and PSFs (when prepared).\n \"\"\" \n from galight.tools.measure_tools import profiles_compare \n profiles_compare([self.target_stamp] + self.PSF_list, **kargs)\n \n def plot_overview(self, **kargs):\n \"\"\"\n Use galight.tools.cutout_tools.plot_overview to plot image overview.\n \"\"\"\n from galight.tools.cutout_tools import plot_overview\n if hasattr(self, 'PSF_pos_list'):\n PSF_pos_list = self.PSF_pos_list\n else:\n PSF_pos_list = None\n plot_overview(self.fov_image, center_target= self.target_pos,\n c_psf_list=PSF_pos_list, **kargs)\n \n def checkout(self):\n \"\"\"\n Check out if everything is prepared to pass to galight.fitting_process().\n \"\"\" \n checklist = ['deltaPix', 'target_stamp', 'noise_map', 'target_mask', 'PSF_list', 'psf_id_for_fitting']\n ct = 0\n if len(self.PSF_list[self.psf_id_for_fitting]) != 0 and self.PSF_list[self.psf_id_for_fitting].shape[0] != self.PSF_list[self.psf_id_for_fitting].shape[1]:\n print(\"The PSF is not a box size, will cut it to a box size automatically.\")\n cut = int((self.PSF_list[self.psf_id_for_fitting].shape[0] - self.PSF_list[self.psf_id_for_fitting].shape[1])/2)\n if cut>0:\n self.PSF_list[self.psf_id_for_fitting] = self.PSF_list[self.psf_id_for_fitting][cut:-cut,:]\n elif cut<0:\n self.PSF_list[self.psf_id_for_fitting] = self.PSF_list[self.psf_id_for_fitting][:,-cut:cut]\n self.PSF_list[self.psf_id_for_fitting] /= self.PSF_list[self.psf_id_for_fitting].sum()\n if self.PSF_list[self.psf_id_for_fitting].shape[0] != self.PSF_list[self.psf_id_for_fitting].shape[1]:\n raise ValueError(\"PSF shape is not a square.\")\n for name in checklist:\n if not hasattr(self, name):\n print('The keyword of {0} is missing.'.format(name))\n ct = ct+1\n if ct == 0:\n print('The data_process is ready to go to pass to FittingSpecify!')\n \n \n \n","sub_path":"galight/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":19313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"592995692","text":"\"Home page variants, and a few general resources.\"\n\nimport logging\nimport os.path\nimport sys\n\nimport couchdb2\nimport markdown\nimport requests\nimport tornado\nimport tornado.web\nimport xlsxwriter\nimport yaml\n\nimport orderportal\nfrom orderportal import constants\nfrom orderportal import saver\nfrom orderportal import settings\nfrom orderportal import utils\nfrom orderportal.requesthandler import RequestHandler\n\n\nclass Home(RequestHandler):\n \"Home page; dashboard. Contents according to role of logged-in account.\"\n\n def get(self):\n \n forms = [r.doc for r in self.db.view(\"form\", \"enabled\", include_docs=True)]\n for f in forms:\n if f.get(\"ordinal\") is None:\n f[\"ordinal\"] = 0\n forms.sort(key=lambda i: i[\"ordinal\"])\n kwargs = dict(\n forms=forms,\n news_items=self.get_news(limit=settings[\"DISPLAY_MAX_NEWS\"]),\n events=self.get_events(upcoming=True),\n )\n if self.current_user and self.get_invitations(self.current_user[\"email\"]):\n url = self.reverse_url(\"account\", self.current_user[\"email\"])\n kwargs[\n \"message\"\n ] = \"\"\"You have group invitations.\nSee your account.\"\"\".format(\n url\n )\n if not self.current_user:\n self.render(\"home.html\", **kwargs)\n elif self.current_user[\"role\"] == constants.ADMIN:\n self.home_admin(**kwargs)\n elif self.current_user[\"role\"] == constants.STAFF:\n self.home_staff(**kwargs)\n else:\n self.home_user(**kwargs)\n\n def home_admin(self, **kwargs):\n \"Home page for a current user having role 'admin'.\"\n view = self.db.view(\n \"account\", \"status\", key=constants.PENDING, include_docs=True\n )\n pending = [r.doc for r in view]\n pending.sort(key=lambda i: i[\"modified\"], reverse=True)\n pending = pending[: settings[\"DISPLAY_MAX_PENDING_ACCOUNTS\"]]\n # NOTE: Hard-wired status 'submitted'!\n view = self.db.view(\n \"order\",\n \"status\",\n descending=True,\n startkey=[\"submitted\", constants.CEILING],\n endkey=[\"submitted\"],\n limit=settings[\"DISPLAY_MAX_RECENT_ORDERS\"],\n reduce=False,\n include_docs=True,\n )\n orders = [r.doc for r in view]\n self.render(\"home_admin.html\", pending=pending, orders=orders, **kwargs)\n\n def home_staff(self, **kwargs):\n \"Home page for a current user having role 'staff'.\"\n # NOTE: Hard-wired status 'submitted'!\n view = self.db.view(\n \"order\",\n \"status\",\n descending=True,\n startkey=[\"accepted\", constants.CEILING],\n endkey=[\"accepted\"],\n limit=settings[\"DISPLAY_MAX_RECENT_ORDERS\"],\n reduce=False,\n include_docs=True,\n )\n orders = [r.doc for r in view]\n self.render(\"home_staff.html\", orders=orders, **kwargs)\n\n def home_user(self, **kwargs):\n \"Home page for a current user having role 'user'.\"\n if not settings[\"ORDER_CREATE_USER\"]:\n kwargs[\"forms\"] = None # Indicates that users can't create orders.\n view = self.db.view(\n \"order\",\n \"owner\",\n reduce=False,\n include_docs=True,\n descending=True,\n startkey=[self.current_user[\"email\"], constants.CEILING],\n endkey=[self.current_user[\"email\"]],\n limit=settings[\"DISPLAY_MAX_RECENT_ORDERS\"],\n )\n orders = [r.doc for r in view]\n self.render(\"home_user.html\", orders=orders, **kwargs)\n\n\nclass Contact(RequestHandler):\n \"Display contact information.\"\n\n def get(self):\n self.render(\"contact.html\")\n\n\nclass About(RequestHandler):\n \"Display 'About us' information.\"\n\n def get(self):\n self.render(\"about.html\")\n\n\nclass Software(RequestHandler):\n \"Display software information for the web site.\"\n\n def get(self):\n software = [\n (\"OrderPortal\", orderportal.__version__, constants.SOURCE_URL),\n (\"Python\", constants.PYTHON_VERSION, constants.PYTHON_URL),\n (\"tornado\", tornado.version, constants.TORNADO_URL),\n (\"CouchDB server\", self.db.server.version, constants.COUCHDB_URL),\n (\"CouchDB2 interface\", couchdb2.__version__, constants.COUCHDB2_URL),\n (\"XslxWriter\", xlsxwriter.__version__, constants.XLSXWRITER_URL),\n (\"Markdown\", markdown.version, constants.MARKDOWN_URL),\n (\"requests\", requests.__version__, constants.REQUESTS_URL),\n (\"PyYAML\", yaml.__version__, constants.PYYAML_URL),\n (\"Bootstrap\", constants.BOOTSTRAP_VERSION, constants.BOOTSTRAP_URL),\n (\"jQuery\", constants.JQUERY_VERSION, constants.JQUERY_URL),\n (\"jQuery.UI\", constants.JQUERY_UI_VERSION, constants.JQUERY_URL),\n (\n \"jQuery.localtime\",\n constants.JQUERY_LOCALTIME_VERSION,\n constants.JQUERY_LOCALTIME_URL,\n ),\n (\"DataTables\", constants.DATATABLES_VERSION, constants.DATATABLES_URL),\n ]\n self.render(\"software.html\", software=software)\n\n\nclass Log(RequestHandler):\n \"Singe log entry; JSON output.\"\n\n def get(self, iuid):\n log = self.get_entity(iuid, doctype=constants.LOG)\n log[\"iuid\"] = log.pop(\"_id\")\n log.pop(\"_rev\")\n log.pop(\"orderportal_doctype\")\n self.write(log)\n self.set_header(\"Content-Type\", constants.JSON_MIME)\n\n\nclass Entity(RequestHandler):\n \"Redirect to the entity given by the IUID, if any.\"\n\n def get(self, iuid):\n \"Login and privileges are checked by the entity redirected to.\"\n doc = self.get_entity(iuid)\n if doc[constants.DOCTYPE] == constants.ORDER:\n self.redirect(self.order_reverse_url(doc))\n elif doc[constants.DOCTYPE] == constants.FORM:\n self.see_other(\"form\", doc[\"_id\"])\n elif doc[constants.DOCTYPE] == constants.ACCOUNT:\n self.see_other(\"account\", doc[\"email\"])\n else:\n self.see_other(\"home\", error=\"Sorry, no such entity found.\")\n\n\nclass NoSuchEntity(RequestHandler):\n \"Error message on home page.\"\n\n def get(self, path=None):\n logging.debug(\"No such entity: %s\", path)\n self.see_other(\"home\", error=\"Sorry, no such entity found.\")\n\n\nclass NoSuchEntityApiV1(RequestHandler):\n \"Return Not Found status code.\"\n\n def get(self, path=None):\n logging.debug(\"No such entity: %s\", path)\n raise tornado.web.HTTPError(404)\n\n def post(self, path=None):\n logging.debug(\"No such entity: %s\", path)\n raise tornado.web.HTTPError(404)\n\n def put(self, path=None):\n logging.debug(\"No such entity: %s\", path)\n raise tornado.web.HTTPError(404)\n\n def check_xsrf_cookie(self):\n \"Do not check for XSRF cookie when API.\"\n pass\n\n\nclass Status(RequestHandler):\n \"Return JSON for the current status and some counts for the database.\"\n\n def get(self):\n try:\n n_orders = list(self.db.view(\"order\", \"status\", reduce=True))[0].value\n except IndexError:\n n_orders = 0\n try:\n n_forms = list(self.db.view(\"form\", \"all\", reduce=True))[0].value\n except IndexError:\n n_forms = 0\n try:\n n_accounts = list(self.db.view(\"account\", \"all\", reduce=True))[0].value\n except IndexError:\n n_accounts = 0\n self.write(\n dict(status=\"OK\", n_orders=n_orders, n_forms=n_forms, n_accounts=n_accounts)\n )\n","sub_path":"orderportal/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":7694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"589893438","text":"from get_data import *\r\n\r\ndef has_fought(f1,f2):\r\n\tf1_career = find_career(f1)\r\n\t\r\n\tf2 = f2.replace(\"_\",\" \")\r\n\t\r\n\tfor i in f1_career[\"Opponent\"]:\r\n\t\tif f2 == i:\r\n\t\t\treturn True\r\n\t\r\n\treturn False\r\n\t\r\ndef num_fights(f1):\r\n\tf1_career = find_career(f1)\r\n\t\r\n\treturn len(f1_career)\r\n\t\r\ndef win_percentage(f1):\r\n\tf1_career = find_career(f1)\r\n\t\r\n\twins = 0\r\n\t\r\n\tfor i in f1_career[\"Result\"]:\r\n\t\tif i == \"Win\":\r\n\t\t\twins += 1\r\n\t\t\t\r\n\treturn float(wins/len(f1_career))\r\n\r\ndef percentage_by_dec(f1):\r\n\tf1_career = find_career(f1)\r\n\t\r\n\tdec = 0\r\n\t\r\n\tfor i in f1_career[\"Method\"]:\r\n\t\tif \"Decision\" in i:\r\n\t\t\tdec += 1\r\n\t\t\t\r\n\treturn float(dec/len(f1_career))\r\n\t\r\ndef percentage_by_ko(f1):\r\n\tf1_career = find_career(f1)\r\n\t\r\n\tkos = 0\r\n\t\r\n\tfor i in f1_career[\"Method\"]:\r\n\t\tif \"KO\" in i:\r\n\t\t\tkos += 1\r\n\t\t\t\r\n\treturn float(kos/len(f1_career))\r\n\t\r\ndef percentage_by_sub(f1):\r\n\tf1_career = find_career(f1)\r\n\t\r\n\tsubs = 0\r\n\t\r\n\tfor i in f1_career[\"Method\"]:\r\n\t\tif \"Submission\" in i:\r\n\t\t\tsubs += 1\r\n\t\t\t\r\n\treturn float(subs/len(f1_career))\r\n\t\r\ndef profile(f1):\r\n\tdata = find_data(f1)\r\n\tpsub = percentage_by_sub(f1)\r\n\tpko = percentage_by_ko(f1)\r\n\tpdec = percentage_by_dec(f1)\r\n\tn = num_fights(f1)\r\n\t\r\n\tprint(data)\r\n\tprint(str(n) + \" fights\")\r\n\tprint(str(pdec) + \" % Decision\")\r\n\tprint(str(pko) + \" % KO\")\r\n\tprint(str(psub) + \" % Submission\")\r\n\t\r\ndef watch_this(f1):\r\n\tf1_career = find_career(f1)\r\n\t\r\n\topponent = 'n/a'\r\n\t\r\n\tcount = random.randrange(len(f1_career))\r\n\t\r\n\tbarn_burner = input(\"Choose a FOTN/POTN fight? (Y/N)\\n\")\r\n\t\r\n\tif barn_burner == 'Y':\r\n\t\tfor i in f1_career[\"Notes\"]:\r\n\t\t\tif i != None and opponent == 'n/a':\r\n\t\t\t\tif (\"Performance of the Night\" in i) or (\"Fight of the Night\" in i):\r\n\t\t\t\t\topponent = f1_career[\"Opponent\"][count]\r\n\t\t\t\t\t\r\n\t\t\t\t\ttime_spoiler = input(\"Give the duration of the bout? (Y/N)\\n\")\r\n\t\t\t\t\t\r\n\t\t\t\t\tif time_spoiler == 'Y':\r\n\t\t\t\t\t\t#Calculate length of the bout\r\n\t\t\t\t\t\ttime = f1_career[\"Time\"][count]\r\n\t\t\t\t\t\tround = f1_career[\"Round\"][count]\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\ttime_li = time.split(\":\")\r\n\t\t\t\t\t\ttotal_min = int(time_li[0]) + ((int(round)-1)*5)\r\n\t\t\t\t\t\ttotal_sec = time_li[1]\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\ttotal_duration = \"The bout was \" + str(total_min) + \" min and \" + str(total_sec) + \" sec long.\"\r\n\t\t\t\t\t\tprint(total_duration)\r\n\t\t\t\t\t\r\n\t\t\t\t\tmatchup = f1 + \" vs. \" + opponent\r\n\t\t\tif count == len(f1_career) - 1:\r\n\t\t\t\tcount = 0\r\n\t\t\telse:\r\n\t\t\t\tcount += 1\r\n\t\tif opponent == 'n/a':\r\n\t\t\tmatchup = watch_this(random_fighter())\r\n\telif barn_burner == 'N':\r\n\t\tcount = random.randrange(len(f1_career))\r\n\t\topponent = f1_career[\"Opponent\"][count]\r\n\t\tmatchup = f1 + \" vs. \" + opponent\r\n\t\t\r\n\t\ttime_spoiler = input(\"Give the duration of the bout? (Y/N)\\n\")\r\n\t\t\t\t\t\r\n\t\tif time_spoiler == 'Y':\r\n\t\t\t#Calculate length of the bout\r\n\t\t\ttime = f1_career[\"Time\"][count]\r\n\t\t\tround = f1_career[\"Round\"][count]\r\n\t\t\t\r\n\t\t\ttime_li = time.split(\":\")\r\n\t\t\ttotal_min = int(time_li[0]) + ((int(round)-1)*5)\r\n\t\t\ttotal_sec = time_li[1]\r\n\t\t\t\r\n\t\t\ttotal_duration = \"The bout was\" + str(total_min) + \" min and \" + str(total_sec) + \" sec long.\"\r\n\t\t\tprint(total_duration)\r\n\t\r\n\treturn matchup","sub_path":"stat_queries.py","file_name":"stat_queries.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"41836126","text":"# Copyright 2008-2011 Nokia Networks\n# Copyright 2011-2016 Ryan Tomac, Ed Manlove and contributors\n# Copyright 2016- Robot Framework Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom SeleniumLibrary.base import LibraryComponent, keyword\n\n\nclass FrameKeywords(LibraryComponent):\n\n @keyword\n def select_frame(self, locator):\n \"\"\"Sets frame identified by ``locator`` as the current frame.\n\n Key attributes for frames are `id` and `name.` See `introduction` for\n details about locating elements.\n\n See `Unselect Frame` to cancel the frame selection and return to the Main frame.\n\n Please note that the frame search always start from the document root or main frame.\n\n Example:\n | Select Frame | xpath: //frame[@name='top]/iframe[@name='left'] | # Selects the 'left' iframe |\n | Click Link | foo | # Clicks link 'foo' in 'left' iframe |\n | Unselect Frame | | # Returns to main frame |\n | Select Frame | left | # Selects the 'top' frame |\n \"\"\"\n self.info(\"Selecting frame '%s'.\" % locator)\n element = self.find_element(locator)\n self.browser.switch_to.frame(element)\n\n @keyword\n def unselect_frame(self):\n \"\"\"Sets the top frame as the current frame.\n\n In practice cancels a previous `Select Frame` call.\n \"\"\"\n self.browser.switch_to.default_content()\n\n @keyword\n def current_frame_should_contain(self, text, loglevel='INFO'):\n \"\"\"Verifies that current frame contains ``text``.\n\n See `Page Should Contain` for explanation about the ``loglevel``\n argument.\n\n Prior to SeleniumLibrary 3.0 this keyword was named\n `Current Frame Contains`.\n \"\"\"\n if not self.is_text_present(text):\n self.log_source(loglevel)\n raise AssertionError(\"Frame should have contained text '%s' \"\n \"but did not.\" % text)\n self.info(\"Current frame contains text '%s'.\" % text)\n\n @keyword\n def current_frame_contains(self, text, loglevel='INFO'):\n \"\"\"Deprecated. Use `Current Frame Should Contain` instead.\"\"\"\n self.current_frame_should_contain(text, loglevel)\n\n @keyword\n def current_frame_should_not_contain(self, text, loglevel='INFO'):\n \"\"\"Verifies that current frame does not contains ``text``.\n\n See `Page Should Contain` for explanation about the ``loglevel``\n argument.\n \"\"\"\n if self.is_text_present(text):\n self.log_source(loglevel)\n raise AssertionError(\"Frame should not have contained text '%s' \"\n \"but it did.\" % text)\n self.info(\"Current frame did not contain text '%s'.\" % text)\n\n @keyword\n def frame_should_contain(self, locator, text, loglevel='INFO'):\n \"\"\"Verifies that frame identified by ``locator`` contains ``text``.\n\n See the `Locating elements` section for details about the locator\n syntax.\n\n See `Page Should Contain` for explanation about the ``loglevel``\n argument.\n \"\"\"\n if not self._frame_contains(locator, text):\n self.log_source(loglevel)\n raise AssertionError(\"Frame '%s' should have contained text '%s' \"\n \"but did not.\" % (locator, text))\n self.info(\"Frame '%s' contains text '%s'.\" % (locator, text))\n\n def _frame_contains(self, locator, text):\n element = self.find_element(locator)\n self.browser.switch_to.frame(element)\n self.info(\"Searching for text from frame '%s'.\" % locator)\n found = self.is_text_present(text)\n self.browser.switch_to.default_content()\n return found\n\n","sub_path":"src/SeleniumLibrary/keywords/frames.py","file_name":"frames.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"33145236","text":"from django import forms\nfrom django.forms.models import inlineformset_factory\n\nfrom songs.models import Annotation, EntityContribution, Song\n\n\nclass SongForm(forms.ModelForm):\n class Meta:\n model = Song\n exclude = []\n\n\nContributionFormSet = inlineformset_factory(Song, EntityContribution,\n exclude=[], min_num=1,\n validate_min=True)\n\n\nclass AnnotationForm(forms.ModelForm):\n class Meta:\n model = Annotation\n exclude = []\n","sub_path":"songs/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"482570455","text":"\"\"\"\n### Usage ###\npython -m venv py37\n\n# activate for mac\nsource py37/bin/activate\n# windows\npy37\\Scripts\\activate.bat\n\npip install -r requirements.txt\n\n# If you want to install the ChromeDriver directly from pip.\npip install chromedriver-binary\n\n# down load chrome driver\n# ダウンロードを選んだ場合は作業用ダイレクトリにChromeDriverを直接置いてください。\n# If you choose to download, place the ChromeDriver directly in the working directory.\nhttps://chromedriver.chromium.org/downloads\n\n######################\nyour working directory\n data\n csv file\n py37 # virtual env\n chromedriver binary file\n selenium_spider.py\n requirements.txt\n######################\n\n# run script\npython selenium_spider.py\n\n# deactivate venv\n# mac\ndeactivate\n# windows\npy37\\Scripts\\deactivate.bat\n\n\nもしドライバーのバージョンが違うと警告されたら。\nIf you are warned that the driver version is different.\n\n指定されたバージョンのChromeDriverをダウンロードし直してください。\nPlease re-download the specified version of ChromeDriver.\n\nThis version of ChromeDriver only supports Chrome version 75\n\nhttps://chromedriver.chromium.org/downloads\n\"\"\"\n\nfrom selenium.webdriver import Chrome,ChromeOptions\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\nimport csv\n\n# if you want to use choromedriver_binary\n# import chromedriver_binary\n# chromedriver_binary.add_chromedriver_to_path()\n\ndef set_web_driver():\n options = ChromeOptions()\n\n driver_path = \"chromedriver.exe\"\n\n # 下をコメントアウトすると、ヘッドレスモードで検証できます。\n # You can verify in headless mode by commenting out below.\n # options.add_argument('--headless')\n options.add_argument('--incognito')\n\n\n # If you are using chromedriver _ binary\n # driver = Chrome(options=options)\n driver = Chrome(driver_path, options=options)\n\n base_url = 'http://www.spamhamspamspameggbaconspamspam.php'\n driver.get(base_url)\n return driver\n\ndef search_product(driver):\n driver.find_element_by_xpath('//input[@id=\"spam\"]').send_keys(1)\n driver.find_element_by_xpath('//div[@class=\"spam search\"]/input').click()\n\ndef pagenation(driver):\n try:\n driver.find_element_by_xpath('//input[@onclick=\"btnNext_OnClick()\"]').click()\n return True\n except:\n print('TAIS codes for all pages were scraped.')\n return False\n\ndef get_code(driver):\n\n TAIS_codes = []\n\n page = 1\n\n while True:\n print(f'page: {page}')\n try:\n WebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH,'//table[@class=\"spam table\"]')))\n except:\n time.sleep(1)\n\n table = driver.find_element_by_xpath('//table[@class=\"spam table\"]')\n\n for a in table.find_elements_by_xpath('//tr/td[@class=\"eggbacon\"]/a'):\n try:\n TAIS_codes.append(a.text)\n except:\n pass\n\n if pagenation(driver):\n page += 1\n else:\n break\n \n with open('data/tais_codes.csv', 'w', newline='') as f:\n csv_writer = csv.writer(f, delimiter=',')\n csv_writer.writerow([\"TAIS\"])\n for code in TAIS_codes:\n csv_writer.writerow([code])\n\n\n\nif __name__=='__main__':\n driver = set_web_driver()\n search_product(driver)\n get_code(driver)\n driver.quit()","sub_path":"case001/selenium_spider.py","file_name":"selenium_spider.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"518289003","text":"import fresh_tomatoes\nimport media\n\n\n# Create multiple instances of the Movie Class for my favorite movies\nblack_panther = media.Movie(\"Black Panther\",\n \"The Black Panther origin story\",\n \"https://upload.wikimedia.org/wikipedia/en/0/0c/Black_Panther_film_poster.jpg\", # NOQA\n \"https://www.youtube.com/watch?v=dxWvtMOGAhw\")\n\ntraffic = media.Movie(\"Traffic\",\n \"A fascinating look into the drug trade from all\"\n \" angles\",\n \"https://upload.wikimedia.org/wikipedia/en/0/0a/Traffic2000Poster.jpg\", # NOQA\n \"https://www.youtube.com/watch?v=6TetUbh6jrU\")\n\nragnorak = media.Movie(\"Thor Ragnorak\",\n \"Thor battles the goddess of death\",\n \"https://upload.wikimedia.org/wikipedia/en/7/7d/Thor_Ragnarok_poster.jpg\", # NOQA\n \"https://www.youtube.com/watch?v=v7MGUNV8MxU\")\n\ndark_knight = media.Movie(\"The Dark Knight\",\n \"Batman is faced with an enemy like no other\",\n \"https://upload.wikimedia.org/wikipedia/en/8/8a/Dark_Knight.jpg\", # NOQA\n \"https://www.youtube.com/watch?v=EXeTwQWrcwY\")\n\nla_la_land = media.Movie(\"La La Land\",\n \"An aspiring actress meets a musician as\"\n \" they follow their dreams\",\n \"https://upload.wikimedia.org/wikipedia/en/a/ab/La_La_Land_%28film%29.png\", # NOQA\n \"https://www.youtube.com/watch?v=0pdqf4P9MB8\")\n\nforce_awakens = media.Movie(\"The Force Awakens\",\n \"In the last Star Wars adventure, new heroes rise\",\n \"https://upload.wikimedia.org/wikipedia/en/a/a2/Star_Wars_The_Force_Awakens_Theatrical_Poster.jpg\", # NOQA\n \"https://www.youtube.com/watch?v=sGbxmsDFVnE\")\n\n\n# Put all instances in a list for use with open_movies_page function\nmovies = [black_panther, traffic, ragnorak, dark_knight, la_la_land,\n force_awakens]\n\nfresh_tomatoes.open_movies_page(movies)\n","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"181346061","text":"\n# Create your views here.\nfrom django.views.generic.base import TemplateView\nfrom django.shortcuts import render\nimport json\n\nclass HomePageView(TemplateView):\n template_name = \"index.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\nclass LabelPageView(TemplateView):\n template_name = \"labels.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\n def post(self, request, *args, **kwargs):\n data = open('labels.json').read()\n context = dict()\n #context['data'] = request.POST.get('location')\n json_data = json.loads(data)\n lst = json_data.get('data')\n context['data'] = lst\n\n return render(request, \"labels.html\", context)\n\nclass ResultPageView(TemplateView):\n template_name = \"results.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n url_list = [\n\"https://www.indeed.ca/cmp/The-Writers'-Exchange/jobs/Volunteer-Literacy-Mentor-9868b9476f67b296?q=%22Job+Type%3A+Volunteer%22&vjs=3\",\n\"https://www.indeed.ca/cmp/Pacific-Immigrant-Resources-Society/jobs/Childcare-Assistant-Volunteer-2c901cc1c6ad278b?q=%22Job+Type%3A+Volunteer%22&vjs=3\",\n\"https://www.indeed.ca/cmp/Pacific-Immigrant-Resources-Society/jobs/Ece-Childcare-Assistant-Volunteer-a70319ca8b7ed5e6?q=%22Job+Type%3A+Volunteer%22&vjs=3\",\n\"https://www.indeed.ca/cmp/Youth-Unlimited-Street-Life-East-Vancouver/jobs/Volunteer-Youth-Worker-cf82d1452d0e3c75?q=%22Job+Type%3A+Volunteer%22&vjs=3\"]\n context['url_list'] = url_list\n return context\n\n","sub_path":"Volunthere/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"502966172","text":"from CFBCursos.Banco_De_Dados.agenda_Funcoes import menuPrincipal, menuAtualizar, menuInserir\nfrom CFBCursos.Banco_De_Dados.agenda_Funcoes import menuConsultar, menuConsultarNomes, menuDeletar\n\n\nwhile True:\n print(15 * \"-=-\")\n menuPrincipal()\n print(15 * \"-=-\")\n opc = int(input(\"Digite uma opção: \"))\n if opc == 1:\n menuInserir()\n elif opc == 2:\n menuDeletar()\n elif opc == 3:\n menuAtualizar()\n elif opc == 4:\n menuConsultar()\n elif opc == 5:\n menuConsultarNomes()\n elif opc == 6:\n print(\"Programa finalizado!\")\n break\n else:\n print(\"Opção Invalida\")\n print(15 * \"-=-\")\n","sub_path":"CFBCursos/Banco_De_Dados/agenda.py","file_name":"agenda.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"517990169","text":"#!/usr/bin/python\n# coding: utf-8 -*-\n\n# Copyright (c) 2015, Jesse Keating \n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nDOCUMENTATION = '''\n---\nmodule: server_action\nshort_description: Perform actions on Compute Instances from OpenStack\nauthor: OpenStack Ansible SIG\ndescription:\n - Perform server actions on an existing compute instance from OpenStack.\n This module does not return any data other than changed true/false.\n When I(action) is 'rebuild', then I(image) parameter is required.\noptions:\n server:\n description:\n - Name or ID of the instance\n required: true\n type: str\n wait:\n description:\n - If the module should wait for the instance action to be performed.\n type: bool\n default: 'yes'\n timeout:\n description:\n - The amount of time the module should wait for the instance to perform\n the requested action.\n default: 180\n type: int\n action:\n description:\n - Perform the given action. The lock and unlock actions always return\n changed as the servers API does not provide lock status.\n choices: [stop, start, pause, unpause, lock, unlock, suspend, resume,\n rebuild]\n type: str\n required: true\n image:\n description:\n - Image the server should be rebuilt with\n type: str\n admin_password:\n description:\n - Admin password for server to rebuild\n type: str\n\nrequirements:\n - \"python >= 3.6\"\n - \"openstacksdk\"\n\nextends_documentation_fragment:\n- openstack.cloud.openstack\n'''\n\nEXAMPLES = '''\n# Pauses a compute instance\n- openstack.cloud.server_action:\n action: pause\n auth:\n auth_url: https://identity.example.com\n username: admin\n password: admin\n project_name: admin\n server: vm1\n timeout: 200\n'''\n\nfrom ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule\n\n\n_action_map = {'stop': 'SHUTOFF',\n 'start': 'ACTIVE',\n 'pause': 'PAUSED',\n 'unpause': 'ACTIVE',\n 'lock': 'ACTIVE', # API doesn't show lock/unlock status\n 'unlock': 'ACTIVE',\n 'suspend': 'SUSPENDED',\n 'resume': 'ACTIVE',\n 'rebuild': 'ACTIVE'}\n\n_admin_actions = ['pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock']\n\n\nclass ServerActionModule(OpenStackModule):\n deprecated_names = ('os_server_action', 'openstack.cloud.os_server_action')\n\n argument_spec = dict(\n server=dict(required=True, type='str'),\n action=dict(required=True, type='str',\n choices=['stop', 'start', 'pause', 'unpause',\n 'lock', 'unlock', 'suspend', 'resume',\n 'rebuild']),\n image=dict(required=False, type='str'),\n admin_password=dict(required=False, type='str'),\n )\n module_kwargs = dict(\n required_if=[('action', 'rebuild', ['image'])],\n supports_check_mode=True,\n )\n\n def run(self):\n os_server = self._preliminary_checks()\n self._execute_server_action(os_server)\n # for some reason we don't wait for lock and unlock before exit\n if self.params['action'] not in ('lock', 'unlock'):\n if self.params['wait']:\n self._wait(os_server)\n self.exit_json(changed=True)\n\n def _preliminary_checks(self):\n # Using Munch object for getting information about a server\n os_server = self.conn.get_server(self.params['server'])\n if not os_server:\n self.fail_json(msg='Could not find server %s' % self.params['server'])\n # check mode\n if self.ansible.check_mode:\n self.exit_json(changed=self.__system_state_change(os_server))\n # examine special cases\n # lock, unlock and rebuild don't depend on state, just do it\n if self.params['action'] not in ('lock', 'unlock', 'rebuild'):\n if not self.__system_state_change(os_server):\n self.exit_json(changed=False)\n return os_server\n\n def _execute_server_action(self, os_server):\n if self.params['action'] == 'rebuild':\n return self._rebuild_server(os_server)\n action_name = self.params['action'] + \"_server\"\n try:\n func_name = getattr(self.conn.compute, action_name)\n except AttributeError:\n self.fail_json(\n msg=\"Method %s wasn't found in OpenstackSDK compute\" % action_name)\n func_name(os_server)\n\n def _rebuild_server(self, os_server):\n # rebuild should ensure images exists\n try:\n image = self.conn.get_image(self.params['image'])\n except Exception as e:\n self.fail_json(\n msg=\"Can't find the image %s: %s\" % (self.params['image'], e))\n if not image:\n self.fail_json(msg=\"Image %s was not found!\" % self.params['image'])\n # admin_password is required by SDK, but not required by Nova API\n if self.params['admin_password']:\n self.conn.compute.rebuild_server(\n server=os_server,\n name=os_server['name'],\n image=image['id'],\n admin_password=self.params['admin_password']\n )\n else:\n self.conn.compute.post(\n '/servers/{server_id}/action'.format(\n server_id=os_server['id']),\n json={'rebuild': {'imageRef': image['id']}})\n\n def _wait(self, os_server):\n \"\"\"Wait for the server to reach the desired state for the given action.\"\"\"\n # Using Server object for wait_for_server function\n server = self.conn.compute.find_server(self.params['server'])\n self.conn.compute.wait_for_server(\n server,\n status=_action_map[self.params['action']],\n wait=self.params['timeout'])\n\n def __system_state_change(self, os_server):\n \"\"\"Check if system state would change.\"\"\"\n return os_server.status != _action_map[self.params['action']]\n\n\ndef main():\n module = ServerActionModule()\n module()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/openstack/cloud/plugins/modules/server_action.py","file_name":"server_action.py","file_ext":"py","file_size_in_byte":6302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"433638367","text":"\"\"\"\r\nPetit jeu de roulette au casino (version 1)\r\nAuteur: Jonathan Meresse\r\nDate : 5 Mai 2020\r\nPetit jeu de devinette d'un nombre entier tiré aléatoirement\r\npar le programme dans un intervalle donné\r\n\r\n\"\"\"\r\nimport random\r\n# importation des modules\r\nregles=(print(\"Choississez un chiffre entre 0 et 15\\n\"\r\n \"Si vous parriez entre 0 et 12 et que la bille tombe sur le bon resultat vous gangerez\\n\"\r\n \"10 fois votre mise. Entre 13 et 14 2 fois votre mise si le chiffre est pair ou impair.\\n\"\r\n \"Si vous pariez sur 15 et que la bille tombe sur 1,3,5,7,9,12 vous remportez 2 fois votre mise\\n\"\r\n \"Si vous pariez sur 16 et que la bille tombe sur 2,4,6,8,10,11 vous remportez 2 fois votre mise\"))\r\nmise = int(input(\"Pariez sur un chiffre entre 0 et 15:\"))\r\nRoulette = random.randint(0,15)\r\n#Roulette = int(input())\r\nArgent = 10\r\nRoulette15 = [1,3,5,7,9,12]\r\nRoulette16 = [2,4,6,8,10,11]\r\nwhile (mise <0 or mise >15):\r\n print (\"Veuillez rentrer une valeur correct entre 0 et 15\")\r\n mise = int(input())\r\nif mise >=0 and mise <= 12 and mise == Roulette:\r\n Argent = Argent * 12\r\n print (\"Votre porte monnaie est de :\",Argent)\r\n print (\"La Bille est tombée sur\",Roulette)\r\nelif mise == 13 and Roulette % 2 == 0 :\r\n Argent = Argent * 2\r\n print (\"Votre porte monnaie est de :\",Argent)\r\n print(\"La Bille est tombée sur\", Roulette)\r\nelif mise == 14 and Roulette % 2 != 0 :\r\n Argent = Argent * 2\r\n print (\"Votre porte monnaie est de :\",Argent)\r\n print(\"La Bille est tombée sur\", Roulette)\r\nelif mise == 15:\r\n if Roulette in Roulette15:\r\n Argent = Argent * 2\r\n print(\"Votre porte monnaie est de :\",Argent)\r\n print(\"La Bille est tombée sur\", Roulette)\r\n else:\r\n Argent = Argent - 10\r\n print(\"Votre porte monnaie est de :\",Argent)\r\n print(\"La Bille est tombée sur\", Roulette)\r\nelif mise == 16:\r\n if Roulette in Roulette16:\r\n Argent = Argent * 2\r\n print(\"Votre porte monnaie est de :\",Argent)\r\n print(\"La Bille est tombée sur\", Roulette)\r\n else:\r\n Argent = Argent - 10\r\n print(\"Votre porte monnaie est de :\",Argent)\r\n print(\"La Bille est tombée sur\", Roulette)\r\nelse:\r\n Argent = Argent - 10\r\n print (\"Votre porte monnaie est de :\",Argent)\r\n print(\"La Bille est tombée sur\", Roulette)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Roulette.py","file_name":"Roulette.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"284229645","text":"\nif \"bpy\" in locals():\n\timport imp\n\timp.reload(gp_draw)\n\timp.reload(objects_organise)\n\n\timp.reload(op_fence_clear)\n\timp.reload(op_fence_draw)\n\timp.reload(op_file_copy_unity_script)\n\timp.reload(op_file_export)\n\timp.reload(op_file_export_recent)\n\timp.reload(op_file_export_recent_clear)\n\timp.reload(op_file_import)\n\timp.reload(op_file_open_folder)\n\timp.reload(op_pivot_ground)\n\timp.reload(op_tool_geometry_fix)\n\timp.reload(op_tool_pack_bundles)\n\t\n\timp.reload(modifiers) \n\timp.reload(platforms)\n\n\nelse:\n\tfrom . import gp_draw\n\tfrom . import objects_organise\n\n\tfrom . import op_fence_clear\n\tfrom . import op_fence_draw\n\tfrom . import op_file_copy_unity_script\n\tfrom . import op_file_export\n\tfrom . import op_file_export_recent\n\tfrom . import op_file_export_recent_clear\n\tfrom . import op_file_import\n\tfrom . import op_file_open_folder\n\tfrom . import op_pivot_ground\n\tfrom . import op_tool_geometry_fix\n\tfrom . import op_tool_pack_bundles\n\n\tfrom . import modifiers\n\tfrom . import platforms\n\n\nimport bpy, bmesh\nimport os\nimport mathutils\nfrom mathutils import Vector\nimport math\nimport bpy.utils.previews\n\n\n\nimport os\nimport traceback\n\n\n\nbl_info = {\n\t\"name\": \"FBX Bundle\",\n\t\"description\": \"Export object selections in FBX bundles\",\n\t\"author\": \"renderhjs\",\n\t\"blender\": (2, 80, 0),\n\t\"version\": (1, 5, 1),\n\t\"category\": \"3D View\",\n\t\"location\": \"View3D\",\n\t\"warning\": \"\",\n\t\"wiki_url\": \"http://renderhjs.net/fbxbundle/\",\n\t\"tracker_url\": \"\",\n}\n\nfrom bpy.props import (\n\tStringProperty,\n\tBoolProperty,\n\tIntProperty,\n\tFloatProperty,\n\tFloatVectorProperty,\n\tEnumProperty,\n\tPointerProperty,\n)\n\n\n\nclass Panel_Preferences(bpy.types.AddonPreferences):\n\tbl_idname = __name__\n\n\tdef draw(self, context):\n\t\tlayout = self.layout\n\n\n\t\tbox = layout.box()\n\t\trow = box.row()\n\t\trow.label(text=\"Unity Editor script\")\n\t\trow.operator(op_file_copy_unity_script.op.bl_idname, icon='FILE_NEW')\n\t\tcol = box.column(align=True)\n\t\tcol.label(text=\"Copies a Unity Editor script to automatically assign\")\n\t\tcol.label(text=\"existing materials by name matching names in Blender\")\n\n\t\tbox = layout.box()\n\t\trow = box.row()\n\t\trow.label(text=\"Keyboard shortcuts\")\n\t\tcol = box.column(align=True)\n\t\tcol.label(text=\"Ctrl + E = Export selected\")\n\t\tcol.label(text=\"Ctrl + Shift + E = Export recent\")\n\n\n\nclass FBXBundleSettings(bpy.types.PropertyGroup):\n\tpath: bpy.props.StringProperty (\n\t\tname=\"Output Path\",\n\t\tdefault=\"\",\n\t\tdescription=\"Define the path where to export or import from\",\n\t\tsubtype='DIR_PATH'\n\t)\n\tpadding: bpy.props.FloatProperty (\n\t\tname=\"Padding\",\n\t\tdefault=0.15,\n\t\tmin = 0,\n\t\tdescription=\"Padding for fences or space bundling\",\n\t\tsubtype='DISTANCE'\n\t)\n\tcollapseBundles: bpy.props.BoolProperty (\n\t\tname=\"Collapse\",\n\t\tdefault=False,\n\t\tdescription=\"Compact list view\"\n\t)\n\tinclude_children: bpy.props.BoolProperty (\n\t\tname=\"Incl. Children\",\n\t\tdefault=False,\n\t\tdescription=\"Include nested children in bundles, e.g parent or group.\"\n\t)\n\trecent: bpy.props.StringProperty (\n\t\tname=\"Recent export\",\n\t\tdefault=\"\"\n\t)\n\n\n\tmode_bundle: bpy.props.EnumProperty(items= \n\t\t[('NAME', 'Name', \"Bundle by matching object names\"), \n\t\t('PARENT', 'Parent', \"Bundle by the parent object\"), \n\t\t# ('SPACE', 'Space', \"Bundle by shared space\"), \n\t\t('COLLECTION', 'Collection', \"Bundle by 'Collection'\"),\n\t\t('COLLECTION_INSTANCE', 'Collection Instance', \"Bundle by 'Collection'\"),\n\t\t('MATERIAL', 'Material', \"Bundle by matching material names\"),\n\t\t('SCENE', 'Scene', \"Bundle by current scene\")\n\t\t], name = \"Bundle Mode\", default = 'NAME'\n\t)\n\tmode_pivot: bpy.props.EnumProperty(items=[\n\t\t('OBJECT_FIRST', 'First Name', \"Pivot at the first object sorted by name\"), \n\t\t('OBJECT_LOWEST', 'Lowest Object', \"Pivot at the lowest Z object's pivot\"),\n\t\t('BOUNDS_BOTTOM', 'Bottom Center', \"Pivot at the bottom center of the bounds of the bundle\"), \n\t\t('SCENE', 'Scene 0,0,0', \"Pivot at the Scene center 0,0,0'\"),\n\t\t('PARENT', 'Parent', \"Pivot from the parent object\"),\n\t\t('EMPTY', 'Empty Gizmo', \"Empty gizmo object of: Arrow, Plain Axes, Single Arrow\")\n\t\t], name = \"Pivot From\", default = 'OBJECT_FIRST'\n\t)\n\ttarget_platform: bpy.props.EnumProperty(items= \n\t\t[\t\n\t\t\t('UNITY', 'Unity ', 'Unity engine export, fixes axis rotation issues'),\n\t\t\t('UNREAL', 'Unreal', 'Unreal engine export'),\n\t\t\t('BLENDER', 'Collada', 'Default Blender *.DAE export'),\n\t\t\t('GLTF', 'glTF', 'GL Transmission Format'),\n\t\t\t('OBJ', 'OBJ', 'OBJ')\n\t\t], \n\t\tdescription=\"Target platform for the FBX exports.\",\n\t\tname = \"Target Platform\", \n\t\tdefault = 'UNITY'\n\t)\n\n\n\nclass Panel_Core(bpy.types.Panel):\n\tbl_idname = \"FBX_bundle_panel_core\"\n\tbl_label = \" \"\n\tbl_space_type = 'VIEW_3D'\n\tbl_region_type = 'UI'\n\tbl_category = \"FBX Bundle\"\n\tbl_options = {'HIDE_HEADER'}\n\n\tdef draw(self, context):\n\t\tlayout = self.layout\n\t\tbox = layout.box()\n\n\t\trow = box.row(align=True)\n\t\trow.label(text='Settings', icon='PREFERENCES')\n\n\t\ticon = icon_get(bpy.context.scene.FBXBundleSettings.target_platform.lower())\n\t\trow.prop(bpy.context.scene.FBXBundleSettings, \"target_platform\", text=\"\", icon_value=icon)\n\t\trow.operator(\"wm.url_open\", text=\"\", icon='QUESTION').url = \"http://renderhjs.net/fbxbundle/#settings_platform\"\n\n\n\t\tmode = bpy.context.scene.FBXBundleSettings.target_platform\n\n\t\tif bpy.app.debug_value != 0:\n\t\t\trow = box.row(align=True)\n\t\t\trow.alert = True\n\t\t\trow.operator(op_debug_setup.bl_idname, text=\"Setup\", icon='COLOR')\n\t\t\trow.operator(op_debug_lines.bl_idname, text=\"Draw\", icon='GREASEPENCIL')\n\n\n\t\tcol = box.column(align=True)\n\n\t\trow = col.row(align=True)\n\t\tif context.scene.FBXBundleSettings.path == \"\":\n\t\t\trow.alert = True\n\t\trow.prop(context.scene.FBXBundleSettings, \"path\", text=\"\")\n\t\tif context.scene.FBXBundleSettings.path != \"\":\n\t\t\trow = row.row(align=True)\n\t\t\trow.operator(op_file_open_folder.op.bl_idname, text=\"\", icon='FILE_FOLDER')\n\n\t\trow = col.row(align=True)\n\t\trow.prop(context.scene.FBXBundleSettings, \"mode_bundle\", text=\"Bundle by\", icon='GROUP')\n\t\trow.operator(\"wm.url_open\", text=\"\", icon='QUESTION').url = \"http://renderhjs.net/fbxbundle/#settings_bundle\"\n\n\n\t\trow = col.row(align=True)\n\t\trow.prop(context.scene.FBXBundleSettings, \"mode_pivot\", text=\"Pivot at\", icon='OUTLINER_DATA_EMPTY', expand=False)\n\t\trow.operator(\"wm.url_open\", text=\"\", icon='QUESTION').url = \"http://renderhjs.net/fbxbundle/#settings_pivot\"\n\n\n\t\tcol = box.column(align=True)\n\t\trow = col.row(align=True)\n\t\trow.prop(context.scene.FBXBundleSettings, \"padding\", text=\"Padding\", expand=True)\n\t\trow.prop(context.scene.FBXBundleSettings, \"include_children\", text=\"Include children\", expand=True)\n\n\t\t# Warnings\n\n\t\tif context.space_data.local_view:\n\t\t\tbox = col.box()\n\t\t\tbox.label(text=\"Can't export in local view mode.\", icon='CANCEL')\n\n\t\tif context.active_object and context.active_object.mode != 'OBJECT':\n\t\t\tbox = col.box()\n\t\t\tbox.label(text=\"Requires object mode to export.\", icon='CANCEL')\n\n\t\tif context.scene.FBXBundleSettings.path == \"\":\n\t\t\tbox = col.box()\n\t\t\tbox.label(text=\"No output path defined.\", icon='CANCEL')\n\n\t\telif mode not in platforms.platforms:\n\t\t\tbox = col.box()\n\t\t\tbox.label(text=\"Platform not implemented\", icon='CANCEL')\n\t\t\n\t\telif context.scene.FBXBundleSettings.mode_bundle == 'GROUP' and len(bpy.data.groups) == 0:\n\t\t\tbox = col.box()\n\t\t\tbox.label(text=\"No groups available\", icon='CANCEL')\n\n\t\telif not platforms.platforms[mode].is_valid()[0]:\n\t\t\tbox = col.box()\n\t\t\tbox.label(text=platforms.platforms[mode].is_valid()[1], icon='CANCEL')\t\t\t\n\n\t\t\n\t\t\n\n\nclass Panel_Tools(bpy.types.Panel):\n\tbl_idname = \"FBX_bundle_panel_tools\"\n\tbl_label = \"Tools\"\n\tbl_space_type = 'VIEW_3D'\n\tbl_region_type = 'UI'\n\tbl_category = \"FBX Bundle\"\n\tbl_context = \"objectmode\"\n\tbl_options = {'DEFAULT_CLOSED'}\n\n\tdef draw(self, context):\n\t\tlayout = self.layout\n\t\tcol = layout.column()\n\n\t\t\n\n\n\t\t\n\t\t# Get bundles\n\t\tbundles = objects_organise.get_bundles()\n\n\t\trow = col.row(align=True)\n\t\trow.scale_y = 1.85\n\t\trow.operator(op_fence_draw.op.bl_idname, text=\"Draw Fences\", icon='SELECT_SET')\n\t\trow.operator(op_fence_clear.op.bl_idname, text=\"\", icon='PANEL_CLOSE')\n\n\t\tcol.separator()\n\n\t\tcol = col.column(align=True)\n\n\t\tcol.operator(op_pivot_ground.op.bl_idname, text=\"Pivot at Ground\", icon='OUTLINER_DATA_EMPTY')\n\t\tcol.operator(op_tool_geometry_fix.op.bl_idname, text=\"Fix imp. Geometry\", icon='MESH_ICOSPHERE')\n\t\t\n\t\tif bpy.app.debug_value != 0:\n\t\t\tcol.operator(op_tool_pack_bundles.op.bl_idname, text=\"Pack & Arrange\", icon='UGLYPACKAGE')\n\t\t\n\n\n\t\t\trow = layout.row(align=True)\n\t\t\trow.alert =True\n\t\t\trow.operator(op_fence_clear.op.bl_idname, text=\"Pack\", icon='IMGDISPLAY')\n\t\t\trow.operator(op_fence_clear.op.bl_idname, text=\"Align Z\", icon='TRIA_DOWN_BAR')\n\t\t\tlayout.separator()\n\n\n\n\n\nclass Panel_Modifiers(bpy.types.Panel):\n\tbl_idname = \"FBX_bundle_panel_modifiers\"\n\tbl_label = \"Modifiers\"\n\tbl_space_type = 'VIEW_3D'\n\tbl_region_type = 'UI'\n\tbl_category = \"FBX Bundle\"\n\tbl_context = \"objectmode\"\n\tbl_options = {'DEFAULT_CLOSED'}\n\n\tdef draw(self, context):\n\t\tlayout = self.layout\n\t\tcol = layout.column()\n\n\t\tfor modifier in modifiers.modifiers:\n\t\t\tbox = col.box()\n\t\t\tmodifier.draw(box)\n\n\t\tr = col.row()\n\t\tr.enabled = False\n\n\t\tcount = 0\n\t\tfor modifier in modifiers.modifiers:\n\t\t\tif modifier.get(\"active\"):\n\t\t\t\tcount+=1\n\n\t\tif count > 0:\n\t\t\tr.label(text=\"{}x modifiers are applied upon export\".format(count))\n\n\nclass Panel_Files(bpy.types.Panel):\n\tbl_idname = \"FBX_bundle_panel_files\"\n\tbl_label = \"Bundles\"\n\tbl_space_type = 'VIEW_3D'\n\tbl_region_type = 'UI'\n\tbl_category = \"FBX Bundle\"\n\tbl_context = \"objectmode\"\n\t# bl_options = {'HIDE_HEADER'}\n\n\tdef draw(self, context):\n\t\tlayout = self.layout\n\t\t\n\t\t# Get bundles\n\t\tbundles = objects_organise.get_bundles()\n\n\t\ticon = icon_get(bpy.context.scene.FBXBundleSettings.target_platform.lower())\n\n\n\t\tcol = layout.column(align=True)\t\n\t\trow = col.row(align=True)\n\n\t\tsplit = row.split(factor=0.4, align=True)\n\n\t\tc = split.column(align=True)\n\t\tc.scale_y = 1.85\n\t\tc.operator(op_file_import.op.bl_idname, text=\"Import\", icon='IMPORT')\n\t\t\n\t\tc = split.column(align=True)\n\t\tc.scale_y = 1.85\n\t\tc.operator(op_file_export.op.bl_idname, text=\"Export {}x\".format(len(bundles)), icon_value=icon)\n\t\t\n\n\t\tif len(bpy.context.scene.FBXBundleSettings.recent) > 0:\n\t\t\tif len(objects_organise.recent_load_objects()) > 0:\n\t\t\t\trow = col.row(align=True)\n\t\t\t\trow.scale_y = 1.3\n\n\t\t\t\tr = row.row(align=True)\n\t\t\t\tr.operator(op_file_export_recent.op.bl_idname, text=objects_organise.recent_get_label(), icon='RECOVER_LAST')\n\t\t\t\t\n\t\t\t\tr = r.row(align=True)\n\t\t\t\t# r.alert = True\n\t\t\t\tr.operator(op_file_export_recent_clear.op.bl_idname, text=\"\", icon='X')\n\n\n\n\t\tlayout.separator()\n\n\t\t\n\t\tmode = bpy.context.scene.FBXBundleSettings.target_platform\n\n\t\t\n\t\tif(len(bundles) > 0):\n\t\t\t# box_files = layout.box()\n\t\t\t# box_files.active = False\n\t\t\trow = layout.row()\n\t\t\tif len(bundles) == 1:\n\t\t\t\trow.label(text = \"1x Bundle\")\n\t\t\telse:\n\t\t\t\trow.label(text = \"{}x Bundles\".format(len(bundles)))\n\n\t\t\trow.prop(context.scene.FBXBundleSettings, \"collapseBundles\", text=\"Compact\", expand=True)\n\n\n\t\t\tfolder = os.path.dirname( bpy.path.abspath( bpy.context.scene.FBXBundleSettings.path ))\n\n\t\t\t# Display bundles\n\t\t\tfor fileName,objects in bundles.items():\n\n\t\t\t\t# row = layout.row(align=True)\n\t\t\t\tbox = layout.box()\n\t\t\t\t# box.scale_y = 0.8\n\t\t\t\tcolumn = box.column(align=True)\n\n\t\t\t\trow = column.row(align=True)\n\t\t\t\tif(fileName == \"unknown\"):\n\t\t\t\t\trow.alert = True\n\n\t\t\t\t# Process object name via modifiers\n\t\t\t\tpath_folder = folder\n\t\t\t\tpath_name = fileName\n\t\t\t\tfor modifier in modifiers.modifiers:\n\t\t\t\t\tif modifier.get(\"active\"):\n\t\t\t\t\t\tpath_folder = modifier.process_path(path_name, path_folder)\n\t\t\t\t\t\tpath_name = modifier.process_name(path_name)\n\t\n\t\t\t\t# Show label for FBX bundle\n\t\t\t\tlabel = fileName\n\t\t\t\tif mode in platforms.platforms:\n\t\t\t\t\tlabel = platforms.platforms[mode].get_filename(path_name)\n\n\t\t\t\tif(len(objects) > 1):\n\t\t\t\t\tlabel = \"{} {}x\".format(label, len(objects));\n\n\t\t\t\trow.operator(op_select.bl_idname,icon_value=icon, emboss=False, text=label).key = fileName\n\t\t\t\tr = row.row(align=True)\n\t\t\t\tr.alert = True\n\t\t\t\tr.operator(op_remove.bl_idname,text=\"\", icon='X').key = fileName\n\n\n\t\t\t\tif not context.scene.FBXBundleSettings.collapseBundles:\n\t\t\t\t\tfor i in range(0,len(objects)):\n\t\t\t\t\t\trow = column.row(align=True)\n\t\t\t\t\t\trow.label(text=objects[i].name)\n\n\n\n\n\n\nclass op_debug_lines(bpy.types.Operator):\n\tbl_idname = \"fbxbundle.debug_lines\"\n\tbl_label = \"Debug\"\n\n\tdef execute(self, context):\n\t\tprint (\"Debug Operator\")\n\n\t\tgp_draw.draw_debug()\n\n\t\treturn {'FINISHED'}\n\n\nclass op_debug_setup(bpy.types.Operator):\n\tbl_idname = \"fbxbundle.debug_setup\"\n\tbl_label = \"Setup\"\n\n\tdef execute(self, context):\n\t\tprint (\"Debug Setup Operator\")\n\n\t\t# Disable grid\n\t\tbpy.context.space_data.show_axis_x = False\n\t\tbpy.context.space_data.show_axis_y = False\n\t\tbpy.context.space_data.show_axis_z = False\n\t\tbpy.context.space_data.grid_lines = 6\n\t\tbpy.context.space_data.grid_subdivisions = 1\n\t\tbpy.context.space_data.grid_scale = 1\n\t\tbpy.context.space_data.show_floor = False\n\n\t\tbpy.context.space_data.show_all_objects_origin = True\n\n\n\t\treturn {'FINISHED'}\n\n\nclass op_select(bpy.types.Operator):\n\tbl_idname = \"fbxbundle.select\"\n\tbl_label = \"Select\"\n\tkey: bpy.props.StringProperty (name=\"Key\")\n\tdef execute(self, context):\n\t\tbundles = objects_organise.get_bundles()\n\t\tif self.key in bundles:\n\t\t\tbpy.ops.object.select_all(action='DESELECT')\n\t\t\tfor obj in bundles[self.key]:\n\t\t\t\tobj.select_set(state = True)\n\t\treturn {'FINISHED'}\n\n\n\nclass op_remove(bpy.types.Operator):\n\tbl_idname = \"fbxbundle.remove\"\n\tbl_label = \"Remove\"\n\tkey: bpy.props.StringProperty (name=\"Key\")\n\tdef execute(self, context):\n\t\tbundles = objects_organise.get_bundles()\n\t\tif self.key in bundles:\n\t\t\tfor obj in bundles[self.key]:\n\t\t\t\tobj.select_set(state = False)\n\t\treturn {'FINISHED'}\n\n\n\n\ndef icon_get(name):\n\tif name not in preview_icons:\n\t\tprint(\"Icon '{}' not found \".format(name))\n\t# TODO fix broken icons. Don't understand why it can't find OBJ?\n\treturn preview_icons[name].icon_id\n\n\npreview_icons = None\ndef icon_register(fileName):\n\tname = fileName.split('.')[0] # Don't include file extension\n\ticons_dir = os.path.join(os.path.dirname(__file__), \"icons\")\n\tpreview_icons.load(name, os.path.join(icons_dir, fileName), 'IMAGE')\n\ndef icons_unregister():\n\tglobal preview_icons\n\tbpy.utils.previews.remove(preview_icons)\n\tpreview_icons = None\n\n\nfrom . import auto_load\nauto_load.init()\n\naddon_keymaps = []\n\nclasses = (Panel_Preferences, FBXBundleSettings, Panel_Core, Panel_Tools, Panel_Modifiers, Panel_Files, op_debug_lines, op_debug_setup, op_select, op_remove)\n\ndef register():\n\t# bpy.utils.register_module(__name__)\n\n\tauto_load.register()\n\tfor cls in classes:\n\t\tbpy.utils.register_class(cls)\n\n\t# Register scene settings\n\tbpy.types.Scene.FBXBundleSettings= bpy.props.PointerProperty(type=FBXBundleSettings)\n\n\t# Register modifier settings\n\tfor modifier in modifiers.modifiers:\n\t\tprint(\"loop name: {}\".format(modifier.__module__))\n\t\tmodifier.register()\n\n\t# Register Icons\n\tglobal preview_icons\n\tpreview_icons = bpy.utils.previews.new()\n\n\ticons = [\n\t\t\"unity.png\", \n\t\t\"unreal.png\", \n\t\t\"blender.png\",\n\t\t\"gltf.png\",\n\t\t\"obj.png\"\n\t]\n\tfor icon in icons:\n\t\ticon_register(icon)\n\n\t# handle the keymap\n\tkm = bpy.context.window_manager.keyconfigs.addon.keymaps.new(name='Object Mode', space_type='EMPTY')\n\tkmi = km.keymap_items.new(op_file_export.op.bl_idname, 'E', 'PRESS', ctrl=True, shift=False)\n\tkmi = km.keymap_items.new(op_file_export_recent.op.bl_idname, 'E', 'PRESS', ctrl=True, shift=True)\n\t# kmi.properties.total = 4\n\taddon_keymaps.append(km)\n\n\n\n\n\ndef unregister():\n\t# bpy.utils.unregister_module(__name__)\n\tfor cls in reversed(classes):\n\t\t# print(\"unregister \" + cls)\n\t\tbpy.utils.unregister_class(cls)\n\tauto_load.unregister()\n\n\t#Unregister Settings\n\tdel bpy.types.Scene.FBXBundleSettings\n\n\t# Unregister modifier settings\n\tfor modifier in modifiers.modifiers:\n\t\tmodifier.unregister()\n\n\t# Remove icons\n\ticons_unregister()\n\n\t# handle the keymap\n\tfor km in addon_keymaps:\n\t\tbpy.context.window_manager.keyconfigs.addon.keymaps.remove(km)\n\tdel addon_keymaps[:]\n\n\nif __name__ == \"__main__\":\n\tregister()\n\n","sub_path":"addons/blender-addon-fbx-bundle/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":15875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"78258790","text":"import yaml\nimport json\nimport re\nimport codecs\nfrom collections import OrderedDict\n\n# OrderedDictに対応するためのpyyamlの内部処理の設定\ndef represent_odict(dumper, instance):\n return dumper.represent_mapping('tag:yaml.org,2002:map', instance.items())\n\ndef construct_odict(loader, node):\n return OrderedDict(loader.construct_pairs(node))\n\n# alis_api.yamlの生成\ndef generate_api_doc():\n with open('alis_api.yaml', 'w') as api_doc:\n with open('api-template.yaml', 'r') as base_yaml_file:\n for base_yaml_line in base_yaml_file:\n fix_yaml_line = base_yaml_line.replace('!', '')\n if re.match(\" LambdaRole:$\", base_yaml_line):\n return\n api_doc.write(fix_yaml_line)\n\ndef prepare_api_doc():\n api_yaml = open('alis_api.yaml')\n data = yaml.load(api_yaml)\n output_json = OrderedDict(data['Resources']['RestApi']['Properties']['DefinitionBody'])\n\n # 各URLの不必要なSwaggerの記述を削除\n paths = list(output_json['paths'].keys())\n\n for path in paths:\n if 'get' in output_json['paths'][path].keys():\n del output_json['paths'][path]['get']['x-amazon-apigateway-integration']\n if 'post' in output_json['paths'][path].keys():\n del output_json['paths'][path]['post']['x-amazon-apigateway-integration']\n if 'put' in output_json['paths'][path].keys():\n del output_json['paths'][path]['put']['x-amazon-apigateway-integration']\n if 'delete' in output_json['paths'][path].keys():\n del output_json['paths'][path]['delete']['x-amazon-apigateway-integration']\n\n # 固定値の項目を挿入する\n output_json['info']['title'] = 'alisapi'\n output_json['basePath'] = '/api'\n output_json['host'] = 'alis.to'\n print('api-docsのバージョンを入力してください')\n version = input()\n output_json['info']['version'] = str(version)\n\n # jsonファイル作成\n f = open('alis_api.json', 'w')\n json.dump(output_json, f, ensure_ascii=False, indent=2)\n\n # 整形したPythonオブジェクトをyamlに変換\n with codecs.open('alis_api.yaml', 'w', 'utf-8') as f:\n yaml.dump(output_json, f, encoding='utf-8', allow_unicode=True, default_flow_style=False)\n\nyaml.add_representer(OrderedDict, represent_odict)\nyaml.add_constructor('tag:yaml.org,2002:map', construct_odict)\ngenerate_api_doc()\nprepare_api_doc()\n","sub_path":"generate_api_doc.py","file_name":"generate_api_doc.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"484495363","text":"import os\nimport pickle\nfrom glob import glob\n\n\ndef getting_filenames(path_to_dir=os.getcwd()):\n folders = [x.path for x in os.scandir(path_to_dir) if x.is_dir()]\n file_names = []\n\n for character_folder in folders:\n for x in os.walk(character_folder):\n for y in glob(os.path.join(x[0], '*.*')):\n file_names.append(\"/\".join(y.split('/')[-2:])[:-4])\n\n return filenames\n\n\nif __name__ == '__main__':\n filenames = getting_filenames()\n with open('filenames_test.pickle', 'wb') as h:\n pickle.dump(filenames, h)\n","sub_path":"text preprocessing/making_filename_pickles.py","file_name":"making_filename_pickles.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"640513833","text":"from __future__ import print_function\nfrom bs4 import BeautifulSoup\nimport requests\nimport time\nimport datetime\nimport pandas as pd\nimport json\nimport schedule\nimport time\n\n###############################################################################\n# 股票爬蟲 價值型選股 #\n###############################################################################\n\n'''\n參考網址:https://bit.ly/2CDyZPz\n1. 股價淨值比 < 0.7\n2. 本益比 < 13\n3. 殖利率 > 3 (%)\n4. 避開大盤大跌(上述選出來的股票須大於100家)\n5. 「上市」股價股價10元以上\n'''\n\ndef job():\n \n ########## 去公開資訊觀測站,把本益比、股價淨值比爬下來 ##########\n url = 'https://www.twse.com.tw/exchangeReport/BWIBBU_d?response=json&date=&selectType=&_=' + str(time.time())\n print(url)\n list_req = requests.get(url)\n soup = BeautifulSoup(list_req.content, \"html.parser\")\n getjson=json.loads(soup.text)\n # print(getjson)\n ########## 因為是表格式,用dataframe處理會比較方便 ##########\n stockdf = pd.DataFrame(getjson['data'],columns=[\"證券代號\",\"證券名稱\",\"殖利率(%)\",\"股利年度\",\"本益比\",\"股價淨值比\",\"財報年/季\"])\n PBR = pd.to_numeric(stockdf['股價淨值比'], errors='coerce') < 0.7 # 找到股價淨值比小於0.7的股票\n EPS = pd.to_numeric(stockdf['本益比'], errors='coerce') < 13 # 找到本益比小於13的股票\n Yiled = pd.to_numeric(stockdf[\"殖利率(%)\"], errors='coerce') > 5 # 找到殖利率 > 0.03 \n candidate= stockdf[(PBR & EPS & Yiled)] # 綜合以上兩者,選出兩者皆符合的股票\n print(candidate)\n elected='' # 最後可以買的股票放這裡\n if len(candidate) > 100 : # 看看這些股票的數量有沒有超過100\n for i in candidate['證券代號']: # 把股票代號丟進去,一個一個查價格\n url = 'https://tw.stock.yahoo.com/q/q?s=' + i\n list_req = requests.get(url)\n soup = BeautifulSoup(list_req.content, \"html.parser\")\n getstock= soup.find('b').text\n if float(getstock) > 10: # 如果股價大於10元,就當選\n elected = elected + i + '\\n'\n else:\n elected='符合的股票小於100張,不做操作'\n \n ########## 秀出結果 ########## \n if elected != '':# 判斷是不是空直\n return \"價值型選股結果:\\n\"+ elected\n else:\n return \"價值型選股中,沒有可以買的股票\"\n\nprint(job())\n\n\n\n","sub_path":"test_folder/value.py","file_name":"value.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"532442649","text":"import matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\nfrom PIL import Image as pil_image\n\n\"\"\" The absolute path indicates the location of folder storing test sets\n In the future, the relative path stores prefix\"\"\"\nprefix = '/home/siqi/Desktop/dh/data/test_latest/images/'\n\n# input is the image path and output is numpy matrix\ndef processimg(imgpath):\n im_cv2 = cv2.imread(imgpath)\n # correct the right orders of RGB matrixs\n im_cv2 = np.flip(im_cv2, 2)\n return im_cv2\n\n\"\"\" The input is image number. Output for function pathtoim are three parts from training set(realA_im(input), realB_im, realB_im_part2)\ntwo parts from testing results sets are fakeB_im and fakeB_im_part2 \"\"\"\ndef pathtoim(im_num):\n realA_path = prefix + str(im_num) + '_real_A.png'\n realA_im = processimg(realA_path)\n realB_path = prefix + str(im_num) + '_real_B.png'\n realB_im = processimg(realB_path)\n fakeB_path = prefix + str(im_num) + '_fake_B.png'\n fakeB_im = processimg(fakeB_path)\n realB_path_part2 = prefix + 'part2_' + str(im_num) + '_real_B.png'\n realB_im_part2 = processimg(realB_path_part2)\n fakeB_path_part2 = prefix + 'part2_' + str(im_num) + '_fake_B.png'\n fakeB_im_part2 = processimg(fakeB_path_part2)\n return realA_im, realB_im, fakeB_im, realB_im_part2, fakeB_im_part2\n\n# set the figsize for better visualisation effect\n# an example of using subplot\n# the image number is 10\nfig = plt.figure(figsize=(15,4*10))\nrealA_im, realB_im, fakeB_im, realB_im_part2, fakeB_im_part2 = pathtoim(10)\n\n\"\"\"\nthe following two lines gives an example of saving numpy mat to image with\npng extension using the relative path \n\"\"\"\nrealA_im_array = pil_image.fromarray(realA_im)\nrealA_im_array.save('./../test.png')\n\nplt.subplot(451)\nplt.imshow(realA_im)\nplt.title('input image')\nplt.subplot(452)\nplt.imshow(realB_im)\nplt.title('ground truth part A')\nplt.subplot(453)\nplt.imshow(fakeB_im)\nplt.title('prediction part A')\nplt.subplot(454)\nplt.imshow(realB_im_part2)\nplt.title('ground truth part B')\nplt.subplot(455)\nplt.imshow(fakeB_im_part2)\nplt.title('prediction part B')\n\n# the image number is 101\nrealA_im, realB_im, fakeB_im, realB_im_part2, fakeB_im_part2 = pathtoim(101)\nplt.subplot(456)\nplt.imshow(realA_im)\nplt.subplot(457)\nplt.imshow(realB_im)\nplt.subplot(458)\nplt.imshow(fakeB_im)\nplt.subplot(459)\nplt.imshow(realB_im_part2)\nplt.subplot(4,5,10)\nplt.imshow(fakeB_im_part2)\n\n# the image number is 172\nrealA_im, realB_im, fakeB_im, realB_im_part2, fakeB_im_part2 = pathtoim(172)\nplt.subplot(4,5,11)\nplt.imshow(realA_im)\nplt.subplot(4,5,12)\nplt.imshow(realB_im)\nplt.subplot(4,5,13)\nplt.imshow(fakeB_im)\nplt.subplot(4,5,14)\nplt.imshow(realB_im_part2)\nplt.subplot(4,5,15)\nplt.imshow(fakeB_im_part2)\n\n# the image number is 270\nrealA_im, realB_im, fakeB_im, realB_im_part2, fakeB_im_part2 = pathtoim(270)\nplt.subplot(4,5,16)\nplt.imshow(realA_im)\nplt.subplot(4,5,17)\nplt.imshow(realB_im)\nplt.subplot(4,5,18)\nplt.imshow(fakeB_im)\nplt.subplot(4,5,19)\nplt.imshow(realB_im_part2)\nplt.subplot(4,5,20)\nplt.imshow(fakeB_im_part2)\n# plt.savefig('demo.png', dpi=fig.dpi)\n# plt.show()\n","sub_path":"test_code/demoyang.py","file_name":"demoyang.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"103058681","text":"import urllib\nimport urllib2\ndef main(key):\n url='http://www.baidu.com/s?wd='+key+'&cl=3'\n content=urllib2.urlopen(url).read()\n f=open('3.html','w')\n f.write(content)\n\n\n\nif __name__ == '__main__':\n key='site:ly.com'\n main(key)\n","sub_path":"baidusearch.py","file_name":"baidusearch.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"173078533","text":"# Copyright (c) Microsoft. All rights reserved.\n# Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\nfrom MiniFramework.EnumDef_6_0 import *\nfrom MiniFramework.Layer import *\nfrom MiniFramework.jit_utility import *\n\nclass PoolingLayer(CLayer):\n def __init__(self,\n input_shape, # (input_c, input_h, input_w)\n pool_shape, # (pool_h, pool_w)\n stride, \n pooling_type): # MAX, MEAN\n self.num_input_channel = input_shape[0]\n self.input_height = input_shape[1]\n self.input_width = input_shape[2]\n self.pool_height = pool_shape[0]\n self.pool_width = pool_shape[1]\n self.stride = stride\n self.pooling_type = pooling_type\n\n self.pool_size = self.pool_height * self.pool_width\n self.output_height = (self.input_height - self.pool_height) // self.stride + 1\n self.output_width = (self.input_width - self.pool_width) // self.stride + 1\n self.output_shape = (self.num_input_channel, self.output_height, self.output_width)\n self.output_size = self.num_input_channel * self.output_height * self.output_width\n \n self.x = None\n self.arg_max = None\n\n def initialize(self, folder, name):\n self.init_file_name = str.format(\"{0}/{1}_init.npy\", folder, name)\n\n def forward(self, x, train=True):\n return self.forward_numba(x, train)\n\n def backward(self, delta_in, layer_idx):\n return self.backward_numba(delta_in, layer_idx)\n\n def forward_img2col(self, x, train=True):\n self.x = x\n N, C, H, W = x.shape\n col = img2col(x, self.pool_height, self.pool_width, self.stride, 0)\n col_x = col.reshape(-1, self.pool_height * self.pool_width)\n self.arg_max = np.argmax(col_x, axis=1)\n out1 = np.max(col_x, axis=1)\n out2 = out1.reshape(N, self.output_height, self.output_width, C)\n self.z = np.transpose(out2, axes=(0,3,1,2))\n return self.z\n\n def backward_col2img(self, delta_in, layer_idx):\n dout = np.transpose(delta_in, (0,2,3,1))\n dmax = np.zeros((dout.size, self.pool_size)).astype('float32')\n #dmax[np.arange(self.arg_max.size), np.flatten(self.arg_max)] = np.flatten(dout)\n dmax[np.arange(self.arg_max.size), self.arg_max.flatten()] = dout.flatten()\n dmax = dmax.reshape(dout.shape + (self.pool_size,))\n dcol = dmax.reshape(dmax.shape[0] * dmax.shape[1] * dmax.shape[2], -1)\n dx = col2img(dcol, self.x.shape, self.pool_height, self.pool_width, self.stride, 0, self.output_height, self.output_width)\n return dx\n\n def forward_numba(self, x, train=True):\n assert(x.ndim == 4)\n self.x = x\n self.batch_size = self.x.shape[0]\n self.z = jit_maxpool_forward(self.x, self.batch_size, self.num_input_channel, self.output_height, self.output_width, self.pool_height, self.pool_width, self.stride)\n return self.z\n\n def backward_numba(self, delta_in, layer_idx):\n assert(delta_in.ndim == 4)\n assert(delta_in.shape == self.z.shape)\n delta_out = jit_maxpool_backward(self.x, delta_in, self.batch_size, self.num_input_channel, self.output_height, self.output_width, self.pool_height, self.pool_width, self.stride)\n return delta_out\n\n def save_parameters(self):\n np.save(self.init_file_name, self.pooling_type)\n\n def load_parameters(self):\n self.mode = np.load(self.init_file_name, allow_pickle=True)\n pass\n","sub_path":"2020SpringClass/学习笔记/201702062-renwenbo/9renwenbo201702062/code/MiniFramework/PoolingLayer.py","file_name":"PoolingLayer.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"594300222","text":"import configparser\n\nconfig = configparser.ConfigParser()\n\n# config[\"DEFAULT\"] = {'ServerAliveInterval': '45',\n# 'Compression': 'yes',\n# 'CompressionLevel': '9'\n# }\n#\n# config['bitbukete.org'] = {'User': 'hg'}\n# config['topsecret.server.com'] = {}\n# config['topsecret.server.com'] = {'config': 'topsecret.server.com'}\n# topsecret = config['topsecret.server.com']\n# topsecret['Host Port'] = '50222'\n# topsecret['ForwardX11'] = 'no'\n#\n# config['DEFAULT']['ForwardX11'] = 'yes'\n#\n#\n# with open('example.conf', 'w') as configfile:\n# config.write(configfile)\n\n\n# config.remove_section('topsecret.server.com')\n\nconfig.read('example.conf')\n\nprint(config.sections())\n\n\nprint(config.defaults())\n\n\nprint(config['bitbukete.org']['User'])\n\n# config.remove_section('topsecret.server.com')\n\nfor key in config:\n print(key)\n\n\nfor key in config['bitbukete.org']:\n print(key)\n\nconfig.set('bitbukete.org', 'User', 'John')\n\nconfig.remove_option('bitbukete.org', 'User')\n\nconfig.write(open('example.conf', 'w'))","sub_path":"ConfigParser_model.py","file_name":"ConfigParser_model.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"159412140","text":"#!/usr/bin/env python\nfrom setuptools import setup, find_packages\n\nDESCRIPTION = \"Shared Django code fore Founders\"\n\nCLASSIFIERS = [\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n]\n\nsetup(\n name='founders-base',\n version='0.1.3',\n author='Joshua Karjala-Svendsen, Martin Tschammer, Tomas Jonsson',\n author_email='joshua@founders.as, martin@founders.as',\n packages=find_packages(exclude=[]),\n url='https://github.com/foundersas/founders-base/',\n platforms=['any'],\n classifiers=CLASSIFIERS,\n install_requires=['django-storages'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"329108163","text":"# coding: utf-8\n\nimport pprint\nimport re\n\nimport six\n\nfrom ganb_personal_client.models.visa_transaction import VisaTransaction\n\n\nclass VisaTransactionsResponse(object):\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'account_id': 'str',\n 'date_from': 'str',\n 'date_to': 'str',\n 'base_date': 'str',\n 'base_time': 'str',\n 'has_next': 'bool',\n 'next_item_key': 'str',\n 'count': 'str',\n 'visa_transactions': 'list[VisaTransaction]'\n }\n\n attribute_map = {\n 'account_id': 'accountId',\n 'date_from': 'dateFrom',\n 'date_to': 'dateTo',\n 'base_date': 'baseDate',\n 'base_time': 'baseTime',\n 'has_next': 'hasNext',\n 'next_item_key': 'nextItemKey',\n 'count': 'count',\n 'visa_transactions': 'visaTransactions'\n }\n\n def __init__(self, account_id=None, date_from=None, date_to=None, base_date=None, base_time=None, has_next=None, next_item_key=None, count=None, visa_transactions=None):\n \"\"\"VisaTransactionsResponse - a model defined in Swagger\"\"\"\n\n self._account_id = None\n self._date_from = None\n self._date_to = None\n self._base_date = None\n self._base_time = None\n self._has_next = None\n self._next_item_key = None\n self._count = None\n self._visa_transactions = None\n self.discriminator = None\n\n self.account_id = account_id\n self.date_from = date_from\n self.date_to = date_to\n self.base_date = base_date\n self.base_time = base_time\n self.has_next = has_next\n if next_item_key is not None:\n self.next_item_key = next_item_key\n self.count = count\n if visa_transactions is not None:\n self.visa_transactions = visa_transactions\n\n @property\n def account_id(self):\n \"\"\"Gets the account_id of this VisaTransactionsResponse.\n\n 口座ID 半角英数字 口座を識別するID\n\n :return: The account_id of this VisaTransactionsResponse.\n :rtype: str\n \"\"\"\n return self._account_id\n\n @account_id.setter\n def account_id(self, account_id):\n \"\"\"Sets the account_id of this VisaTransactionsResponse.\n\n 口座ID 半角英数字 口座を識別するID\n\n :param account_id: The account_id of this VisaTransactionsResponse.\n :type: str\n \"\"\"\n if account_id is None:\n raise ValueError(\"Invalid value for `account_id`, must not be `None`\")\n if account_id is not None and len(account_id) > 29:\n raise ValueError(\"Invalid value for `account_id`, length must be less than or equal to `29`\")\n if account_id is not None and len(account_id) < 12:\n raise ValueError(\"Invalid value for `account_id`, length must be greater than or equal to `12`\")\n\n self._account_id = account_id\n\n @property\n def date_from(self):\n \"\"\"Gets the date_from of this VisaTransactionsResponse.\n\n 対象期間From 半角文字 YYYY-MM-DD形式 リクエストに対象期間From、Toが設定されていない場合は当日日付が設定されます\n\n :return: The date_from of this VisaTransactionsResponse.\n :rtype: str\n \"\"\"\n return self._date_from\n\n @date_from.setter\n def date_from(self, date_from):\n \"\"\"Sets the date_from of this VisaTransactionsResponse.\n\n 対象期間From 半角文字 YYYY-MM-DD形式 リクエストに対象期間From、Toが設定されていない場合は当日日付が設定されます\n\n :param date_from: The date_from of this VisaTransactionsResponse.\n :type: str\n \"\"\"\n if date_from is None:\n raise ValueError(\"Invalid value for `date_from`, must not be `None`\")\n if date_from is not None and len(date_from) > 10:\n raise ValueError(\"Invalid value for `date_from`, length must be less than or equal to `10`\")\n if date_from is not None and len(date_from) < 10:\n raise ValueError(\"Invalid value for `date_from`, length must be greater than or equal to `10`\")\n\n self._date_from = date_from\n\n @property\n def date_to(self):\n \"\"\"Gets the date_to of this VisaTransactionsResponse.\n\n 対象期間To 半角文字 YYYY-MM-DD形式 リクエストに対象期間From、Toが設定されていない場合は当日日付が設定されます\n\n :return: The date_to of this VisaTransactionsResponse.\n :rtype: str\n \"\"\"\n return self._date_to\n\n @date_to.setter\n def date_to(self, date_to):\n \"\"\"Sets the date_to of this VisaTransactionsResponse.\n\n 対象期間To 半角文字 YYYY-MM-DD形式 リクエストに対象期間From、Toが設定されていない場合は当日日付が設定されます\n\n :param date_to: The date_to of this VisaTransactionsResponse.\n :type: str\n \"\"\"\n if date_to is None:\n raise ValueError(\"Invalid value for `date_to`, must not be `None`\")\n if date_to is not None and len(date_to) > 10:\n raise ValueError(\"Invalid value for `date_to`, length must be less than or equal to `10`\")\n if date_to is not None and len(date_to) < 10:\n raise ValueError(\"Invalid value for `date_to`, length must be greater than or equal to `10`\")\n\n self._date_to = date_to\n\n @property\n def base_date(self):\n \"\"\"Gets the base_date of this VisaTransactionsResponse.\n\n 基準日 入出金明細を照会した基準日を示します YYYY-MM-DD形式\n\n :return: The base_date of this VisaTransactionsResponse.\n :rtype: str\n \"\"\"\n return self._base_date\n\n @base_date.setter\n def base_date(self, base_date):\n \"\"\"Sets the base_date of this VisaTransactionsResponse.\n\n 基準日 入出金明細を照会した基準日を示します YYYY-MM-DD形式\n\n :param base_date: The base_date of this VisaTransactionsResponse.\n :type: str\n \"\"\"\n if base_date is None:\n raise ValueError(\"Invalid value for `base_date`, must not be `None`\")\n if base_date is not None and len(base_date) > 10:\n raise ValueError(\"Invalid value for `base_date`, length must be less than or equal to `10`\")\n if base_date is not None and len(base_date) < 10:\n raise ValueError(\"Invalid value for `base_date`, length must be greater than or equal to `10`\")\n\n self._base_date = base_date\n\n @property\n def base_time(self):\n \"\"\"Gets the base_time of this VisaTransactionsResponse.\n\n 基準時刻 入出金明細を照会した基準時刻を示します HH:MM:SS+09:00形式\n\n :return: The base_time of this VisaTransactionsResponse.\n :rtype: str\n \"\"\"\n return self._base_time\n\n @base_time.setter\n def base_time(self, base_time):\n \"\"\"Sets the base_time of this VisaTransactionsResponse.\n\n 基準時刻 入出金明細を照会した基準時刻を示します HH:MM:SS+09:00形式\n\n :param base_time: The base_time of this VisaTransactionsResponse.\n :type: str\n \"\"\"\n if base_time is None:\n raise ValueError(\"Invalid value for `base_time`, must not be `None`\")\n if base_time is not None and len(base_time) > 14:\n raise ValueError(\"Invalid value for `base_time`, length must be less than or equal to `14`\")\n if base_time is not None and len(base_time) < 14:\n raise ValueError(\"Invalid value for `base_time`, length must be greater than or equal to `14`\")\n\n self._base_time = base_time\n\n @property\n def has_next(self):\n \"\"\"Gets the has_next of this VisaTransactionsResponse.\n\n 次明細フラグ ・true=次明細あり ・false=次明細なし\n\n :return: The has_next of this VisaTransactionsResponse.\n :rtype: bool\n \"\"\"\n return self._has_next\n\n @has_next.setter\n def has_next(self, has_next):\n \"\"\"Sets the has_next of this VisaTransactionsResponse.\n\n 次明細フラグ ・true=次明細あり ・false=次明細なし\n\n :param has_next: The has_next of this VisaTransactionsResponse.\n :type: bool\n \"\"\"\n if has_next is None:\n raise ValueError(\"Invalid value for `has_next`, must not be `None`\")\n\n self._has_next = has_next\n\n @property\n def next_item_key(self):\n \"\"\"Gets the next_item_key of this VisaTransactionsResponse.\n\n 次明細キー 半角数字 次明細フラグがfalseの場合は、項目自体を設定しません\n\n :return: The next_item_key of this VisaTransactionsResponse.\n :rtype: str\n \"\"\"\n return self._next_item_key\n\n @next_item_key.setter\n def next_item_key(self, next_item_key):\n \"\"\"Sets the next_item_key of this VisaTransactionsResponse.\n\n 次明細キー 半角数字 次明細フラグがfalseの場合は、項目自体を設定しません\n\n :param next_item_key: The next_item_key of this VisaTransactionsResponse.\n :type: str\n \"\"\"\n if next_item_key is not None and len(next_item_key) > 24:\n raise ValueError(\"Invalid value for `next_item_key`, length must be less than or equal to `24`\")\n if next_item_key is not None and len(next_item_key) < 1:\n raise ValueError(\"Invalid value for `next_item_key`, length must be greater than or equal to `1`\")\n\n self._next_item_key = next_item_key\n\n @property\n def count(self):\n \"\"\"Gets the count of this VisaTransactionsResponse.\n\n 明細取得件数 半角数字\n\n :return: The count of this VisaTransactionsResponse.\n :rtype: str\n \"\"\"\n return self._count\n\n @count.setter\n def count(self, count):\n \"\"\"Sets the count of this VisaTransactionsResponse.\n\n 明細取得件数 半角数字\n\n :param count: The count of this VisaTransactionsResponse.\n :type: str\n \"\"\"\n if count is None:\n raise ValueError(\"Invalid value for `count`, must not be `None`\")\n if count is not None and len(count) > 7:\n raise ValueError(\"Invalid value for `count`, length must be less than or equal to `7`\")\n if count is not None and len(count) < 1:\n raise ValueError(\"Invalid value for `count`, length must be greater than or equal to `1`\")\n\n self._count = count\n\n @property\n def visa_transactions(self):\n \"\"\"Gets the visa_transactions of this VisaTransactionsResponse.\n\n Visaデビット取引明細情報リスト 該当する情報が無い場合は、空のリストを返却します\n\n :return: The visa_transactions of this VisaTransactionsResponse.\n :rtype: list[Transaction]\n \"\"\"\n return self._visa_transactions\n\n @visa_transactions.setter\n def visa_transactions(self, visa_transactions):\n \"\"\"Sets the visa_transactions of this VisaTransactionsResponse.\n\n Visaデビット取引明細情報リスト 該当する情報が無い場合は、空のリストを返却します\n\n :param visa_transactions: The visa_transactions of this VisaTransactionsResponse.\n :type: list[Transaction]\n \"\"\"\n\n self._visa_transactions = visa_transactions\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(VisaTransactionsResponse, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, VisaTransactionsResponse):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"ganb_personal_client/models/visa_transactions_response.py","file_name":"visa_transactions_response.py","file_ext":"py","file_size_in_byte":13105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"573303323","text":"import os \nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"gallery.settings\") \n\nimport django\ndjango.setup()\n\nimport hashlib\n\nfrom sticker.models import Category, Sticker\n\ndirname = '复仇者联盟'\nc = Category(name=dirname)\nc.save()\n\nfile_dir = r\"D:\\OneDrive\\Documents\\Python和数据挖掘\\code\\baidutieba\\Download\\asd2\"\n\nfor file in os.listdir(file_dir): \n with open(file_dir+'\\\\'+file, 'r', encoding='utf-8') as file:\n title = file.readline().split(':')[1].strip()\n author = file.readline().split(':')[1].strip()\n\n content = file.read().strip()\n\n m = hashlib.md5()\n m.update(content.encode('utf-8'))\n md5 = m.hexdigest()\n\n Sticker.objects.create(title = title,\n author = author,\n content = content,\n category = c,\n md5 = md5\n ) \n","sub_path":"txt2db.py","file_name":"txt2db.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"598105393","text":"__author__ = 'adm'\n\nimport xml.etree.ElementTree as ET\nimport codecs\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-f', nargs = \"+\", dest = \"files\") #list of files to process\nparser.add_argument('-t', action=\"store_true\", dest = \"extract_text\") #extract only text in RES tag\nparser.add_argument('-o', action=\"store_true\", dest = \"extract_other\") #extract the text in other interesting tags (TI,TIEXT, TICOL, TI_OEUVRE, TYPE, NPRO, CH, LIB_SP, TERME, DESC_PRC, LIBELLE, INSERT)\nparser.add_argument('-n', action=\"store_true\", dest = \"extract_names\") #extract only names in NOM tag\nparser.add_argument('-p', action=\"store_true\", dest = \"extract_places\") #extract only places in LIEU tag\nparser.add_argument('-q', action=\"store_true\", dest = \"extract_translated_items\") #extract only tags that have translation (TICORP = TICORPANG, TICORPMAJ = TICORPMAJANG)\n#args = parser.parse_args()\nargs = parser.parse_args(\"-f extraits_reg_10_INSERT_12_000_20140725-234239_0001.xml test.xml -n\".split())\n\n# names = NOM\n# professions = ROLE_GENERIQUE\n# places = LIEU\n# TICORP = TICORPANG\n# TICORPMAJ = TICORPMAJANG\n\nif args.extract_text:\n tags_to_process = [\"RES\"]\n file_name = \"text_only\"\nelif args.extract_other:\n tags_to_process = [\"TI\",\"TIEXT\", \"TICOL\", \"TI_OEUVRE\", \"TYPE\", \"NPRO\", \"CH\", \"LIB_SP\", \"TERME\", \"DESC_PRC\",\n \"LIBELLE\", \"INSERT\", \"FONDS\",\"ROLE_GENERIQUE\"]\n file_name = \"other\"\nelif args.extract_names:\n tags_to_process = [\"NOM\"]\n file_name = \"names\"\nelif args.extract_places:\n tags_to_process = [\"LIEU\"]\n file_name = \"places\"\nelif args.extract_translated_items:\n source_tags = [\"TICORP\"]\n translation_tags = [\"TICORPANG\"]\n file_name = \"translated_items\"\nelse:\n tags_to_process = [\"RES\",\"TI\",\"TIEXT\", \"TICOL\", \"TI_OEUVRE\", \"TYPE\", \"NPRO\", \"CH\", \"LIB_SP\", \"TERME\", \"DESC_PRC\",\n \"LIBELLE\", \"NOM\", \"LIEU\", \"TICORP\", \"TICORPANG\", \"ROLE_GENERIQUE\"]\n file_name = \"all_extracted\"\n\nwith codecs.open(file_name + \".txt\", \"w\", encoding='utf8') as out_file:\n if not args.extract_translated_items:\n for file in args.files:\n tree = ET.parse(file)\n root = tree.getroot()\n for tag in tags_to_process:\n for tag_to_proc in root.iter(tag):\n s = tag_to_proc.text + \"\\n\"\n print(s)\n out_file.write(s)\n else:\n for file in args.files:\n tree = ET.parse(file)\n root = tree.getroot()\n sources = []\n translations = []\n for i, source_tag in enumerate(source_tags):\n for tag_to_proc in root.iter(source_tag):\n sources.append(tag_to_proc.text)\n for tag_to_proc in root.iter(translation_tags[i]):\n translations.append(tag_to_proc.text)\n if len(sources) == len(translations):\n for i, source in enumerate(sources):\n source_translation = source + \"\\t\" + translations[i] + \"\\n\"\n out_file.write(source_translation)\n else:\n print(\"Error: number of sources and number of translations are not equal\")\n\nprint(\"Finished\")\n\n\n\n","sub_path":"extract_text_from_xml_v1.py","file_name":"extract_text_from_xml_v1.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"80312962","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 25 17:58:46 2018\n\n@author: tylerclark\n\"\"\"\nfrom scipy.special import factorial as fact\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef game(K,b,p,player,beta,alpha):\n\n '''\n This specifies the results of a two-player game\n '''\n \n if player ==2:\n return p+K*beta*b\n if player ==1:\n return p-K*b\n\ndef Lambda(l,k,n,m,Print=False):\n '''\n '''\n p=choose(n-m,k-l)*choose(m,l)*fact(k)*fact(n-k)/(fact(n))\n if Print ==True:\n print(p,l,k,n,m)\n return p\n\n\ndef choose(n,k):\n '''\n This function returns the combinations of choosing n out of k\n '''\n return fact(n)/(fact(n-k)*fact(k))\n\ndef binom(n,p,k):\n '''\n The probability calculation taken from the binomial distribution\n '''\n return choose(n,k)*p**k*(1-p)**(n-k) \n\n\n\n\ndef generate_distribution(H,n):\n '''\n This function takes in the number of genes n and the frequencies of the \n heterogeneous and homogeneous genes and outputs a distribution of \n macrostates\n '''\n P=[]\n for i in range(0,n):\n P.append(binom(n-1,H[0]+0.5*H[2],i))\n \n return P\n\n\ndef Lambda_dom(l,k,n,m,p,other_parent,epsilon,mark):\n '''\n '''\n Lam=0\n \n\n H=(0,0,0)\n H=np.asarray(H,dtype=float)\n for s in range(0,l+1):\n \n if l-s<=k and l-s<=m and m+k-2*l+2*s>=s and n>=m+k-2*l+2*s:\n \n if l-s>=k+m-n:\n \n L=Lambda(l-s,k,n,m)*binom(m+k-2*l+2*s,p,s)\n Lam+=L\n \n if mark==True:\n L1=L*(l-s)/(n)\n L2=L*(n-k-m+l-s)/(n)\n L3=L*(m+k-2*(l-s))/(n)\n \n if L1<0 or L2<0 or L3<0:\n raise ValueError('Values less than Zero')\n \n H[0]+=L1\n H[1]+=L2\n H[2]+=L3\n \n if epsilon != None:\n Lam=Lam*(1+epsilon/(np.abs(other_parent-l)+0.01))\n\n return Lam,H\n\ndef Season(P,H,n,b,p,x,epsilon,beta,alpha):\n '''\n '''\n P=generate_distribution(H,n)\n \n \n d=0\n P_new=np.zeros(len(P))\n \n H=np.asarray((0,0,0),dtype=float)\n \n for i in range(0,n): \n for j in range(0,n): \n for K in range(0,n): \n for k2 in range(0,n): \n if j<=i:\n Game=(game(K,b,p,player=1,beta=beta,alpha=alpha)+game(k2,b,p,player=2,beta=beta,alpha=alpha))/2\n Lambda1=Lambda_dom(K,i,n-1,j,x,k2,epsilon,mark=True)\n Lambda2=Lambda_dom(k2,i,n-1,j,x,K,epsilon,mark=False)\n psi=Lambda1[0]*Lambda2[0]*P[i]*P[j]*Game\n \n P_new[K]+=psi\n d+=psi\n\n \n total_P=Lambda2[0]*P[i]*P[j]*Game\n \n Lambda1[1][0]=Lambda1[1][0]*total_P\n Lambda1[1][1]=Lambda1[1][1]*total_P\n Lambda1[1][2]=Lambda1[1][2]*total_P\n \n H+=Lambda1[1]\n \n\n\n if j>i:\n Lambda1=Lambda_dom(K,j,n-1,i,x,k2,epsilon,mark=True)\n Lambda2=Lambda_dom(k2,j,n-1,i,x,K,epsilon,mark=False)\n \n psi=Lambda1[0]*Lambda2[0]*P[i]*P[j]*Game\n \n P_new[K]+=psi\n d+=psi\n \n \n total_P=Lambda2[0]*P[i]*P[j]*Game\n \n Lambda1[1][0]=Lambda1[1][0]*total_P\n Lambda1[1][1]=Lambda1[1][1]*total_P\n Lambda1[1][2]=Lambda1[1][2]*total_P\n H+=Lambda1[1]\n \n \n \n return P_new/d,H/d\n\n\n\n\ndef many_seasons_meanVar(parms):\n \n num_seasons=parms['num_seasons']\n beta=parms['beta']\n epsilon=parms['epsilon']\n alpha=parms['alpha']\n x=parms['x']\n p=parms['p']\n b=parms['b']\n n=parms['n']\n H=parms['H']\n \n plot_distribution_season=parms['plot_dist_season']\n plot_distribution_end=parms['plot_dist_end']\n \n plot_mean_var=parms['plot_mean_var']\n \n P=generate_distribution(H,n)\n \n mean_all=[]\n std_all=[]\n \n for i in range(0,num_seasons):\n \n if parms['rand_beta'][0] ==True:\n \n beta=np.random.randn()*parms['rand_beta'][1]+beta\n\n P,H=Season(P=P,H=H,n=n,b=b,p=p,x=x,epsilon=epsilon,beta=beta,alpha=alpha)\n \n if plot_distribution_season ==True:\n \n plt.title('Distribution of Macrostates')\n plt.plot(np.linspace(0,b*len(P),len(P)),P,label='Season= '+str(i))\n \n \n \n \n mean=np.sum(P*np.linspace(0,len(P),len(P)))\n \n mean_all.append(mean)\n std=np.std(P)\n std_all.append(std)\n \n if plot_distribution_season ==True:\n plt.show()\n \n if plot_mean_var ==True:\n mean_all=np.asarray(mean_all)\n plt.plot(np.linspace(0,len(mean_all),len(mean_all)),mean_all,label='x='+str(x)+'--beta:'+str(beta))\n \n \n \n if plot_distribution_end ==True:\n\n plt.title('Macrostate distribution after '+str(num_seasons))\n plt.plot(np.linspace(0,b*len(P),len(P)),P,label=str(x))\n \n \n return mean, std\n\n\n\n\ndef explore(parms,var1,var2):\n \n for i in range(0,var1['steps']):\n \n if i>0 and parms['plot_distribution_end']==True or parms['plot_mean_var']==True:\n plt.legend()\n plt.show()\n \n for j in range(0,var2['steps']):\n parms[var1['var']]=i*var1['max']/var1['steps']+var1['min']\n parms[var2['var']]=j*var2['max']/var2['steps']+var2['min']\n mean, std=many_seasons_meanVar(parms)\n \n \n ","sub_path":"evolve/build/lib/evolve/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":6323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"461641685","text":"#!/usr/bin/env python\n#\n# This script queries assets in Tenable.io to find all assets seen only by\n# the Nessus Network Monitor. It then takes these IPs and creates a target group and\n# basic network scan (uncredentialed) against the new target group, and runs\n# the scan. By the time the script is finished running, there should be a\n# pending scan for all internal IPs seen only by NNM thus far.\n#\n# Script Assumptions:\n# - You have an internal Nessus scanner deployed, and know the scanner name.\n# - You have already deployed Nessus Network Monitor, and it is seeing traffic.\n# - The argument you pass to the script covers a valid internal network range.\n# - You are authorized to perform scans in your environment.\n#\n# Author: ThisTooShallXSS (https://github.com/thistooshallxss)\n# Requirements: Python 2.7+\n#\n# Usage:\n# - python fidelity_nnm_targetscan.py '10.0.0.0/8'\n# - python fidelity_nnm_targetscan.py '192.168.1.0/24'\n# - python fidelity_nnm_targetscan.py (Will find all internal (RFC-1918) IPs without CIDR specified)\n#\n\nimport json, requests\nimport sys\nimport pickle\n\nrequests.packages.urllib3.disable_warnings()\n\nTIMEFRAME = 90 # Time (in days) that we'll include in the search. 0 for all.\nSCANNER_NAME = 'tnsappliance-123456' # Provide the name of an already linked scanner or group.\nFOLDER_NAME = 'My Scans' # Provide the name of an already created folder.\n\nclass nnm_only_assets(object): # Object for temp storing new AWS creds.\n def __init__(self, ipv4, fqdn):\n self.ipv4 = ipv4\n self.fqdn = fqdn\n\ndef save_keys():\n #assumption is that the user keys didn't work or don't exsist\n print(\"Please provide your Tenable.io User API keys.\")\n access_key = input(\"Please provide your Access Key (use quotes): \")\n secret_key = input(\"Please provide your Secret Key (use quotes): \")\n\n dicts = {\"Access Key\": access_key, \"Secret Key\": secret_key}\n\n pickle_out = open(\"keys.pickle\", \"wb\")\n pickle.dump(dicts, pickle_out)\n pickle_out.close()\n\n print(\"Now you have keys, re-run your command\")\n sys.exit()\n\ndef grab_headers():\n import os\n\n access_key = ''\n secret_key = ''\n\n #check for API keys; if none, get them from the user by calling save_keys()\n if os.path.isfile('./keys.pickle') is False:\n save_keys()\n else:\n pickle_in = open(\"keys.pickle\", \"rb\")\n keys = pickle.load(pickle_in)\n access_key = keys[\"Access Key\"]\n secret_key = keys[\"Secret Key\"]\n\n #set the header\n headers = {'Content-type':'application/json',\n 'X-ApiKeys':'accessKey='+access_key+';secretKey='+secret_key}\n return headers\n\ndef get_data(url_mod):\n url = \"https://cloud.tenable.com\"\n headers = grab_headers()\n r = requests.request('GET', url + url_mod, headers=headers, verify=False)\n\n if r.status_code != 200:\n print('Status:', r.status_code, 'Problem with the initial GET assets request. Exiting.')\n sys.exit()\n\n data = r.json()\n return data\n\ndef create_target_group(ip_addrs, name):\n # This makes the POST request to build the target group on Tenable.io\n json_payload = '{{\"name\":\"{}\",\"members\":\"{}\",\"type\":\"system\",\"acls\":[{{\"permissions\":64,\"type\":\"default\"}}]}}'.format(name, ip_addrs)\n url = \"https://cloud.tenable.com/target-groups\"\n headers = grab_headers()\n r = requests.request('POST', url, headers=headers, data=json_payload, verify=False)\n\n if r.status_code != 200:\n print('Status:', r.status_code, 'Problem with the POST request to create target group. Exiting.')\n sys.exit()\n\n tgt_group_id = r.json()[\"id\"]\n\n return tgt_group_id\n\ndef get_nnm_only_ips():\n\n uri = '/workbenches/assets?date_range={}&filter.0.quality=set-hasonly&filter.0.filter=sources&filter.0.value=PVS&filter.search_type=and'.format(TIMEFRAME)\n data = get_data(uri)\n ip_addrs = []\n\n for x in range(len(data[\"assets\"])):\n # Go through each configured connector and store it's settings.\n for y in range(len(data[\"assets\"][x][\"ipv4\"])):\n\n ip_addr = data[\"assets\"][x][\"ipv4\"][y]\n try:\n fqdn = data[\"assets\"][x][\"fqdn\"][0]\n except:\n fqdn = ''\n\n #print(\"IP: {}, FQDN: {}\".format(ip_addr, fqdn))\n if check_valid_target(ip_addr):\n ip_addrs.append(nnm_only_assets(ip_addr,fqdn))\n\n return ip_addrs\n\ndef check_valid_target(ip):\n try:\n cidr = sys.argv[1]\n except:\n cidr = \"10.0.0.0/8,172.16.0.0/12,192.168.0.0/16\"\n\n if is_internal(ip):\n if ',' in cidr:\n for single_cidr in [x.strip() for x in cidr.split(',')]:\n if addressInNetwork(ip, single_cidr): # Check if IP is part of allowable scan range.\n return True\n else:\n if addressInNetwork(ip, cidr): # Check if IP is part of allowable scan range.\n return True\n\n return False\n\ndef is_internal(ip):\n\n from struct import unpack\n from socket import AF_INET, inet_pton\n\n # First we make sure that it's a private/internal IP address.\n f = unpack('!I',inet_pton(AF_INET,ip))[0]\n private = (\n [ 2130706432, 4278190080 ], # 127.0.0.0, 255.0.0.0\n [ 3232235520, 4294901760 ], # 192.168.0.0, 255.255.0.0\n [ 2886729728, 4293918720 ], # 172.16.0.0, 255.240.0.0\n [ 167772160, 4278190080 ], # 10.0.0.0, 255.0.0.0\n )\n for net in private: # If this IP address is an internal IP, proceed.\n if (f & net[1]) == net[0]:\n return True\n\n return False\n\ndef addressInNetwork(ip, net):\n import socket,struct\n\n ipaddr = int(''.join([ '%02x' % int(x) for x in ip.split('.') ]), 16)\n netstr, bits = net.split('/')\n netaddr = int(''.join([ '%02x' % int(x) for x in netstr.split('.') ]), 16)\n mask = (0xffffffff << (32 - int(bits))) & 0xffffffff\n\n return (ipaddr & mask) == (netaddr & mask)\n\ndef create_ip_list(nnm_only_ips):\n # Now, nnm_only_ips is an object, with a variable length, each having an IP+fqdn.\n # We will use this list to create the target group of IPs in Tenable.io\n target_ips = ''\n for x in range(len(nnm_only_ips)):\n if x == 0:\n target_ips = nnm_only_ips[x].ipv4\n else:\n target_ips = target_ips + ',' + nnm_only_ips[x].ipv4\n\n return target_ips\n\ndef get_scanner_id():\n scanners = get_data('/scanners')[\"scanners\"]\n scanner_id = 0\n\n for x in range(len(scanners)):\n if SCANNER_NAME == scanners[x][\"name\"]:\n scanner_id = scanners[x][\"uuid\"]\n break\n if scanner_id == 0: print(\"Scanner name not found: {}\".format(SCANNER_NAME))\n return scanner_id\n\ndef get_folder_id():\n folders = get_data('/folders')[\"folders\"]\n folder_id = 0\n\n for x in range(len(folders)):\n if FOLDER_NAME == folders[x][\"name\"]:\n folder_id = folders[x][\"id\"]\n break\n\n if folder_id == 0: print(\"Folder name not found: {}\\n\\nYou can use 'My Scans' as well.\".format(FOLDER_NAME))\n return folder_id\n\ndef get_template_id(name):\n templates = get_data('/editor/scan/templates')[\"templates\"]\n template_id = 0\n\n for x in range(len(templates)):\n if name == templates[x][\"name\"]:\n template_id = templates[x][\"uuid\"]\n break\n if template_id == 0: print(\"Scan Template UUID not found: {}\".format(name))\n return template_id\n\ndef run_basic_uncred_scan(target_group_id, timestamp):\n\n scanner_id = get_scanner_id()\n folder_id = get_folder_id()\n template_id = get_template_id('basic')\n\n #print(\"Scanner ID: {}, Folder ID: {}, Tgt Group ID: {}\".format(scanner_id, folder_id, target_group_id))\n # POST to /scans\n #print(\"Good UUID: 731a8e52-3ea6-a291-ec0a-d2ff0619c19d7bd788d6be818b65\")\n #print(\"Found UUID: {}\".format(template_id))\n # Need scanner-id, name, launch_now:true, folder_id, asset_lists:0:target_group_id\n scan_name = \"NNM Only Assets Remote Scan - {}\".format(timestamp)\n\n json_payload = '{{\"uuid\":\"{}\",\"settings\":{{\"launch_now\":true,\"enabled\":false,\"file_targets\":\"\",\"text_targets\":\"\",\"asset_lists\":[\"{}\"],\"scanner_id\":\"{}\",\"use_dashboard\":\"\",\"folder_id\":{},\"description\":\"\",\"name\":\"{}\"}}}}'.format(template_id, target_group_id, scanner_id, folder_id, scan_name)\n url = \"https://cloud.tenable.com/scans\"\n headers = grab_headers()\n r = requests.request('POST', url, headers=headers, data=json_payload, verify=False)\n\n if r.status_code != 200:\n print('Status:', r.status_code, 'Problem with the POST request to create the new scan. Exiting.')\n sys.exit()\n\n return scan_name\n\ndef main():\n import datetime, time\n # First we grab all systems seen only by NNM.\n try:\n nnm_only_ips = get_nnm_only_ips()\n except:\n print('Could not get asset details from Tenable.io... Quitting')\n sys.exit()\n\n if len(nnm_only_ips) == 0:\n print(\"No assets found matching this network range.\")\n sys.exit()\n if len(nnm_only_ips) > 0:\n ts = time.time()\n sml_timestamp = datetime.datetime.fromtimestamp(ts).strftime('%b-%d')\n lrg_timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M')\n # Ensure that there is at least one IP returned in the query.\n target_group_name = 'Seen Only by NNM (as of {})'.format(lrg_timestamp)\n target_group_ips = create_ip_list(nnm_only_ips)\n target_group_id = create_target_group(target_group_ips, target_group_name)\n\n # Now that we have a list of the IPs only seen by NNM, we can create the target group.\n if target_group_id > 0:\n print('Target group created! (Name: {}, # of IPs: {})'.format(target_group_name, len(nnm_only_ips)))\n print('\\nDevices Included:')\n for x in range(len(nnm_only_ips)):\n print(\" - {} ({})\".format(nnm_only_ips[x].ipv4, nnm_only_ips[x].fqdn))\n\n scan_name = run_basic_uncred_scan(target_group_id, lrg_timestamp)\n if scan_name:\n print(\"\\nA basic uncredentialed Nessus scan has been initiated against the new target group.\")\n\nif __name__ == '__main__':\n main()","sub_path":"Pre-2020/fidelity_nnm_targetscan.py","file_name":"fidelity_nnm_targetscan.py","file_ext":"py","file_size_in_byte":10121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"242975270","text":"\"\"\"\r\nSupervided Pretrained Autoencoders for Inference in Networks\r\n\r\n@author: Gunjan Mahindre, Rasika Karkare, Chong Zhou, Randy Paffenroth\r\nVersion: 1.0\r\nChanges from unsupervised: \r\n Train function takes the actual values of missing entries into account while training.\r\n Functions have been created.\r\n Structure of the script has been changed.\r\nDate last modified: 07/24/2020\r\n\r\nDescription: \r\n Trains the supervised Autoencoder using several networks which are sparsely sampled.\r\n Tests on a network it has not been trained on.\r\n Calculates Mean error, Absolute Hop Distance Error (AHDE), std. dev. for mean error and std. dev. for AHDE.\r\n\r\n\"\"\"\r\n\r\n# IMPORT MODULES REQUIRED \r\n\r\nimport os\r\nimport random\r\nimport numpy as np\r\nimport pandas as pd\r\nimport DeepAE as DAE\r\nimport networkx as nx\r\nfrom math import sqrt\r\nimport l1shrink as SHR \r\n# import tensorflow as tf\r\nimport tensorflow.compat.v1 as tf\r\nimport numpy.linalg as nplin\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.sparse.csgraph import dijkstra\r\nfrom sklearn.metrics import mean_absolute_error\r\n\r\nnp.random.seed(123)\r\n\r\n# FUNCTIONS AND CLASS DEFINITIONS\r\n\r\nclass RDAE(object):\r\n \"\"\"\r\n @author: Chong Zhou\r\n 2.0 version.\r\n complete: 10/17/2016\r\n version changes: move implementation from theano to tensorflow.\r\n 3.0\r\n complete: 2/12/2018\r\n changes: delete unused parameter, move shrink function to other file\r\n update: 03/15/2019\r\n update to python3 \r\n Des:\r\n X = L + S\r\n L is a non-linearly low rank matrix and S is a sparse matrix.\r\n argmin ||L - Decoder(Encoder(L))|| + ||S||_1\r\n Use Alternating projection to train model\r\n \"\"\"\r\n def __init__(self, sess, layers_sizes, lambda_=1.0, error = 1.0e-7):\r\n \"\"\"\r\n sess: a Tensorflow tf.Session object\r\n layers_sizes: a list that contains the deep ae layer sizes, including the input layer\r\n lambda_: tuning the weight of l1 penalty of S\r\n error: converge criterior for jump out training iteration\r\n \"\"\"\r\n self.lambda_ = lambda_\r\n self.layers_sizes = layers_sizes\r\n self.error = error\r\n self.errors=[]\r\n self.AE = DAE.Deep_Autoencoder( sess = sess, input_dim_list = self.layers_sizes)\r\n\r\n def fit(self, X, Y, sess, learning_rate=0.15, inner_iteration = 50,\r\n iteration=20, batch_size=50, verbose=False):\r\n ## The first layer must be the input layer, so they should have same sizes.\r\n assert X.shape[1] == self.layers_sizes[0]\r\n\r\n ## initialize L, S, mu(shrinkage operator)\r\n self.L = np.zeros(X.shape)\r\n self.S = np.zeros(X.shape)\r\n self.hadamard_train = np.array(hadamard_train)\r\n self.cost = list()\r\n\r\n mu = (X.size) / (4.0 * nplin.norm(X,1))\r\n print (\"shrink parameter:\", self.lambda_ / mu)\r\n LS0 = self.L + self.S\r\n\r\n XFnorm = nplin.norm(X,'fro')\r\n if verbose:\r\n print (\"X shape: \", X.shape)\r\n print (\"L shape: \", self.L.shape)\r\n print (\"S shape: \", self.S.shape)\r\n print (\"mu: \", mu)\r\n print (\"XFnorm: \", XFnorm)\r\n\r\n for it in range(iteration):\r\n if verbose:\r\n print (\"Out iteration: \" , it)\r\n ## alternating project, first project to L\r\n #self.L = X - self.S\r\n ## Using L to train the auto-encoder\r\n self.cost.append(self.AE.fit(X = X, Y = Y, sess = sess, S =self.S, h = self.hadamard_train,\r\n iteration = inner_iteration,\r\n learning_rate = learning_rate,\r\n batch_size = batch_size,\r\n verbose = verbose))\r\n ## get optmized L\r\n self.L = self.AE.getRecon(X = X, sess = sess)\r\n ## alternating project, now project to S\r\n self.S = SHR.shrink(self.lambda_/np.min([mu,np.sqrt(mu)]), (X - self.L).reshape(X.size)).reshape(X.shape)\r\n\r\n ## break criterion 1: the L and S are close enough to X\r\n c1 = nplin.norm(X - self.L - self.S, 'fro') / XFnorm\r\n ## break criterion 2: there is no changes for L and S \r\n c2 = np.min([mu,np.sqrt(mu)]) * nplin.norm(LS0 - self.L - self.S) / XFnorm\r\n\r\n if verbose:\r\n print (\"c1: \", c1)\r\n print (\"c2: \", c2)\r\n\r\n if c1 < self.error and c2 < self.error :\r\n print (\"early break\")\r\n break\r\n ## save L + S for c2 check in the next iteration\r\n LS0 = self.L + self.S\r\n \r\n return self.L , self.S, self.cost\r\n \r\n def transform(self, X, sess):\r\n #L = X - self.S\r\n return self.AE.transform(X = X, sess = sess)\r\n \r\n def getRecon(self, X, sess):\r\n return self.AE.getRecon(X, sess = sess)\r\n\r\n\r\n# define a function to sample from the given hop distance matrix----\r\n\r\ndef delete_percentage(fraction, data_matrix):\r\n\r\n '''\r\n INPUT:\r\n fraction: percentage to be deleted\r\n data_matrix: hop distance matrix from which we need to delete the given percentage of entries\r\n\r\n OUTPUT:\r\n data_matrix: the hop distance matrix with only (100-fraction)% of sampled entries. The missing entries are replaced by 0\r\n '''\r\n\r\n [Rn, Cn] = data_matrix.shape\r\n uT = []\r\n for i in range(Cn):\r\n for j in range(i + 1, Cn):\r\n uT.append(data_matrix[i, j])\r\n # calculate entries to be deleted\r\n rem_num = ((len(uT)) * fraction / 100) # total number of entries to be removed\r\n # has to be an integer value\r\n rem_num = int(rem_num)\r\n # select random elements from the upper triangle:\r\n ind = np.random.choice(len(uT), rem_num, replace=False)\r\n # make these indices -1\r\n for i in ind:\r\n uT[i] = 0 # now place these values back in the upper triangle:\r\n\r\n # -------delete the symmetric entry for undirected networks. Comment it for directed networks ------\r\n p = 0\r\n for i in range(Cn):\r\n for j in range(i + 1, Cn):\r\n data_matrix[i, j] = uT[p]\r\n # delete symmetric entris - applicable for undirected graphs\r\n if data_matrix[i, j] == 0:\r\n data_matrix[j, i] = 0\r\n p += 1\r\n #---------------------------------------------------------------------------------------------------\r\n\r\n\r\n return data_matrix\r\n\r\n# MAIN FUNCTION\r\ndef main_code(fraction, w):\r\n# if __name__ == \"__main__\":\r\n\r\n print (\"inside main_code\")\r\n\r\n\r\n\r\n\r\n # load data for testing---REAL WORLD NETWORK\r\n # path = 'C:/Users/gunjan/Google Drive/PhD work/data/undirected networks/virgili emails/'\r\n # for VIRGILI NETWORK-----------------------------------------------\r\n path = '/content/drive/MyDrive/PhD work/data/undirected networks/facebook/'\r\n data_test = np.loadtxt(path + 'dHp.txt')\r\n # for VIRGILI NETWORK-----------------------------------------------\r\n\r\n\r\n # # for FACEBOOK NETWORK--------------------------------------------\r\n # G = nx.read_edgelist('/content/drive/MyDrive/PhD work/data/undirected networks/facebook/edges.txt', create_using = nx.Graph(), nodetype = int)\r\n # # from scipy.sparse.csgraph import dijkstra\r\n # A = nx.adjacency_matrix(G)\r\n # data_test = np.array(dijkstra(A))\r\n # # for FACEBOOK NETWORK--------------------------------------------\r\n\r\n\r\n \r\n data_test_original = data_test.copy()\r\n\r\n\r\n\r\n # --------------define hyperparameters-------------------------\r\n # [R,C] = [1133,1133]\r\n [R,C] = [4039,4039]\r\n [R,C] = data_test.shape\r\n hidden_layer_size = 50\r\n learning_rate_alpha = 0.001\r\n batch_size = 1\r\n inner_iteration = 20\r\n #-------------------------------------------------------------\r\n\r\n\r\n # # for TESTING performance on training data variation:\r\n # par4 = w[3]\r\n # print (\"testing parameter: \", par4)\r\n # # for TESTING\r\n # GT = nx.powerlaw_cluster_graph(R, par4, 0.4, seed=None)\r\n # # for TESTING\r\n # A = nx.adjacency_matrix(GT)\r\n # data_test = np.array(dijkstra(A))\r\n # data_test_original = data_test.copy()\r\n\r\n\r\n \r\n\r\n par1 = w[0]\r\n par2 = w[1]\r\n par3 = w[2]\r\n\r\n\r\n\r\n\r\n print (\"training parameters: \", par1, par2, par3)\r\n\r\n # 2. create networks\r\n\r\n #------- BARABASI -------------\r\n # G1 = nx.barabasi_albert_graph(R, par1, seed=None)\r\n # G2 = nx.barabasi_albert_graph(R, par2, seed=None)\r\n # G3 = nx.barabasi_albert_graph(R, par3, seed=None)\r\n\r\n # A = nx.adjacency_matrix(G1)\r\n # data1 = np.array(dijkstra(A))\r\n # data1_ori = data1.copy()\r\n\r\n # A = nx.adjacency_matrix(G2)\r\n # data2 = np.array(dijkstra(A))\r\n # data2_ori = data2.copy()\r\n\r\n # A = nx.adjacency_matrix(G3)\r\n # data3 = np.array(dijkstra(A))\r\n # data3_ori = data3.copy()\r\n\r\n # #------- POWER LAW -------------\r\n # G4 = nx.powerlaw_cluster_graph(R, par1, 0.1, seed=None)\r\n # G5 = nx.powerlaw_cluster_graph(R, par2, 0.5, seed=None)\r\n # G6 = nx.powerlaw_cluster_graph(R, par3, 0.9, seed=None)\r\n\r\n \r\n # A = nx.adjacency_matrix(G4)\r\n # data4 = np.array(dijkstra(A))\r\n # data4_ori = data4.copy()\r\n\r\n # A = nx.adjacency_matrix(G5)\r\n # data5 = np.array(dijkstra(A))\r\n # data5_ori = data5.copy()\r\n\r\n # A = nx.adjacency_matrix(G6)\r\n # data6 = np.array(dijkstra(A))\r\n # data6_ori = data6.copy()\r\n\r\n\r\n print (data_test)\r\n print (data_test_original)\r\n\r\n print (data_test_original.shape)\r\n\r\n # delete the given percentage:\r\n # fraction = 60\r\n\r\n # tf.Session() initiates a TensorFlow Graph object in which tensors are processed through operations.\r\n # The \"with\" block terminates the session as soon as the operations are completed. \r\n with tf.compat.v1.Session() as sess:\r\n\r\n print (\"inside session\")\r\n \r\n # create object rae\r\n rae = RDAE(sess = sess, lambda_= 500000, layers_sizes=[R,hidden_layer_size])\r\n\r\n global hadamard_train \r\n\r\n # # Process data1---------------------------------------------------------------\r\n # data1 = delete_percentage(fraction, data1)\r\n\r\n # # Hadamard part\r\n \r\n # hadamard_train = np.ones(data1.shape)\r\n # hadamard_train = np.where(data1 == 0, 0 , hadamard_train)\r\n # hadamard_train = pd.DataFrame(hadamard_train)\r\n\r\n # print (\"before fit function\")\r\n\r\n # # train the autoencoder with data1\r\n # L, S, cost = rae.fit(data1, data1_ori ,sess = sess, learning_rate=learning_rate_alpha, batch_size =batch_size,inner_iteration =inner_iteration,iteration=1, verbose=True)\r\n\r\n # print (\"after fit function\")\r\n\r\n # # Process data2---------------------------------------------------------------\r\n # data2 = delete_percentage(fraction, data2)\r\n\r\n # hadamard_train = np.ones(data2.shape)\r\n # hadamard_train = np.where(data2 == 0, 0, hadamard_train)\r\n # hadamard_train = pd.DataFrame(hadamard_train)\r\n\r\n # L, S, cost = rae.fit(data2, data2_ori, sess = sess, learning_rate=learning_rate_alpha, batch_size =batch_size,inner_iteration =inner_iteration,iteration=1, verbose=True)\r\n\r\n\r\n # # Process data3---------------------------------------------------------------\r\n # data3 = delete_percentage(fraction, data3)\r\n\r\n # hadamard_train = np.ones(data3.shape)\r\n # hadamard_train = np.where(data3 == 0, 0, hadamard_train)\r\n # hadamard_train = pd.DataFrame(hadamard_train)\r\n\r\n # L, S, cost = rae.fit(data3, data3_ori, sess = sess, learning_rate=learning_rate_alpha, batch_size =batch_size,inner_iteration =inner_iteration,iteration=1, verbose=True)\r\n\r\n\r\n # # Process data4---------------------------------------------------------------\r\n # data4 = delete_percentage(fraction, data4)\r\n\r\n # hadamard_train = np.ones(data4.shape)\r\n # hadamard_train = np.where(data4 == 0, 0, hadamard_train)\r\n # hadamard_train = pd.DataFrame(hadamard_train)\r\n\r\n # L, S, cost = rae.fit(data4, data4_ori, sess = sess, learning_rate=learning_rate_alpha, batch_size =batch_size,inner_iteration =inner_iteration,iteration=1, verbose=True)\r\n # print (\"after training on nw1: cost = \", cost)\r\n\r\n # # Process data5---------------------------------------------------------------\r\n # data5 = delete_percentage(fraction, data5)\r\n\r\n # hadamard_train = np.ones(data5.shape)\r\n # hadamard_train = np.where(data5 == 0, 0, hadamard_train)\r\n # hadamard_train = pd.DataFrame(hadamard_train)\r\n\r\n # L, S, cost = rae.fit(data5, data5_ori, sess = sess, learning_rate=learning_rate_alpha, batch_size =batch_size,inner_iteration =inner_iteration,iteration=1, verbose=True)\r\n # print (\"after training on nw2: cost = \", cost)\r\n\r\n\r\n # # Process data6---------------------------------------------------------------\r\n # data6 = delete_percentage(fraction, data6)\r\n\r\n # hadamard_train = np.ones(data6.shape)\r\n # hadamard_train = np.where(data6 == 0, 0, hadamard_train)\r\n # hadamard_train = pd.DataFrame(hadamard_train)\r\n\r\n # L, S, cost = rae.fit(data6, data6_ori, sess = sess, learning_rate=learning_rate_alpha, batch_size =batch_size,inner_iteration =inner_iteration,iteration=1, verbose=True)\r\n # print (\"after training on nw3: cost = \", cost)\r\n\r\n\r\n\r\n\r\n # Process test data---------------------------------------------------------------\r\n data_test = delete_percentage(fraction, data_test)\r\n\r\n hadamard_test = np.ones(data_test.shape)\r\n hadamard_test = np.where(data_test == 0,0, hadamard_test)\r\n hadamard_test = pd.DataFrame(hadamard_test)\r\n\r\n data_test = pd.DataFrame(data_test)\r\n # data_test = np.array(data_test)\r\n\r\n # # reconstruct using autoencoder to predict entries\r\n # h = rae.transform(data_test, sess=sess)\r\n # # R : reconstructed matrix\r\n # R = rae.getRecon(data_test, sess=sess)\r\n\r\n # print(type(R))\r\n # exit()\r\n # R = pd.DataFrame(R)\r\n R = data_test.copy()\r\n\r\n\r\n data_test_original = pd.DataFrame(data_test_original)\r\n\r\n # # Correction code:\r\n # \"\"\"\r\n # All diagonal entries are set to 0\r\n # All off diagonal entries predicted <=1 are set to 1\r\n # \"\"\"\r\n for i in range(len(R)):\r\n for j in R.columns:\r\n if i != j:\r\n \tif R.iloc[i, j] <= 1.0:\r\n \t\tR.iloc[i, j] = 1\r\n \telse:\r\n \t\tcontinue\r\n\r\n for i in range(len(R)):\r\n for j in R.columns:\r\n if i == j:\r\n R.iloc[i, j] = 0\r\n else:\r\n continue\r\n\r\n\r\n\r\n R = pd.DataFrame(R)\r\n data_test = pd.DataFrame(data_test)\r\n\r\n\r\n data_test_original = pd.DataFrame(data_test_original)\r\n\r\n # # # save the recovered matrix\r\n # ss = str(w[0]) + '_' + str(w[1]) + '_' + str(w[2])\r\n # if fraction == 40 or fraction == 99:\r\n # R.to_csv('/content/drive/MyDrive/PhD work/Projects/parameter estimation/virgili more results/stage 2/' + ss + '/R_' + str(fraction) + '.csv', index = False)\r\n # # save original Facebook network\r\n # data_test_original.to_csv('./Original_fb.csv')\r\n\r\n##################################################################\r\n# trivial 0 test:\r\n# replace all missing entries with 0\r\n# all missing entries are already 0\r\n\r\n\r\n\r\n##################################################################\r\n print (\"-------------- Calculating error only for unobserved entries--------------------\")\r\n\r\n [r,c] = data_test.shape\r\n # vectorize matrices - placeholders\r\n hop = []\r\n ori = []\r\n # meane = []\r\n # abse = []\r\n\r\n\r\n hadamard_test = np.array(hadamard_test)\r\n\r\n p = 0\r\n for i in range(r):\r\n for j in range(c):\r\n if hadamard_test[i,j] == 0: # considers error on only unobserved entries\r\n hop.append(R.iloc[i,j])\r\n ori.append(data_test_original.iloc[i,j])\r\n p = p+1\r\n\r\n # # mean and absolute hop error calculation----------------\r\n # mean_err: mean error\r\n # abs_err: AHDE - Absolute hop distance error\r\n hop = np.array(hop)\r\n ori = np.array(ori)\r\n x = np.round(hop-ori)\r\n\r\n # print (\"numerator:\", np.sum(abs(x)))\r\n # print (\"sum of unobserved entries:\", np.sum(ori))\r\n # print (\"b: total unobserved entries:\", len(ori))\r\n\r\n mean_err = (np.sum(abs(x)))/(np.sum(ori)) \r\n mean_err = mean_err*100\r\n mean_std = np.std(abs(x))\r\n\r\n abs_err = (np.sum(abs(x)))/(len(ori)) # divided by the number of unobserved entries\r\n abs_std = np.std(abs(x))\r\n\r\n print (mean_err, abs_err, mean_std, abs_std)\r\n\r\n return (mean_err, abs_err, mean_std, abs_std)","sub_path":"trivial tests/trivial 0/RobustDeepAutoencoder.py","file_name":"RobustDeepAutoencoder.py","file_ext":"py","file_size_in_byte":16915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"238074912","text":"\ndef isprime(n):\n\tif n<=1:\n\t\treturn False\n\tif(n==2 or n==3):\n\t\treturn True\n\tif(n%2==0 or n%3==0):\n\t\treturn False\n\ti=5\n\twhile(i**2 < n):\n\t\t# print(i)\n\t\tif(n%i==0 or n%(i+2)==0):\n\t\t\treturn False\n\t\ti+=6\n\treturn True\n\nt=int(input())\nfor test in range(t):\n\tx,k=input().split()\n\tx=int(x)#no of divisors.should be more\n\tk=int(k)#no of primes. should be less\n\tif(k>x):\n\t\tprint(0)\n\telse:\n\t\tif(x==k):\n\t\t\tprint(0)\n\t\telif(not isprime(x)):\n\t\t\tprint(1)\n\t\telse:\n\t\t\tprint(0)\n\t\t\t\t\n","sub_path":"strno.py","file_name":"strno.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"21973502","text":"class Node:\r\n\r\n def __init__(self, v, p, n):\r\n '''\r\n __init__(any,Node) -> None\r\n \r\n initializes a Node with a value v and a next node n\r\n '''\r\n self.prev = p\r\n self.next = n\r\n self.v = v\r\n\r\n return None\r\n\r\n def copy(self):\r\n '''\r\n copy(Node) -> Node\r\n \r\n creates a new copy of Node\r\n '''\r\n return Node(self.v,self.prev, self.next)\r\n\r\n def hasNext(self):\r\n '''\r\n hasNext() -> boolean\r\n \r\n returns true if Node has next Node\r\n '''\r\n return self.next!=None\r\n\r\n def hasPrev(self):\r\n '''\r\n hasPrev() -> boolean\r\n \r\n returns true if Node has a previous Node\r\n '''\r\n return self.prev!=None\r\n\r\nclass doublyLinkedList:\r\n\r\n def __init__(self, v = None):\r\n '''\r\n __init__(any) -> None\r\n \r\n initializes a list with a head Node containing v if specified, None otherwise\r\n '''\r\n if (v != None):\r\n self.size = 1\r\n self.head = Node(v,None,None)\r\n self.tail = self.head\r\n \r\n self.head.next = self.tail\r\n self.head.prev = self.tail\r\n self.tail.next = self.head\r\n self.tail.prev = self.head\r\n \r\n self.current = self.head\r\n self.currentIterator = 0\r\n\r\n else:\r\n self.size = 0\r\n self.head = None\r\n self.tail = None\r\n self.current = None\r\n self.currentIterator = None\r\n\r\n return None\r\n\r\n def addFirst(self, v):\r\n '''\r\n addFirst(any) -> None\r\n \r\n adds the first Node containing element v to the end of the list\r\n '''\r\n self.size = 1\r\n self.head = Node(v,None, None)\r\n self.tail = self.head\r\n \r\n self.head.next = self.tail\r\n self.head.prev = self.tail\r\n self.tail.next = self.head\r\n self.tail.prev = self.head\r\n \r\n self.current = self.head\r\n self.currentIterator = 0\r\n\r\n return None\r\n\r\n def add(self, v):\r\n '''\r\n add(any) -> None\r\n \r\n adds a Node containing element v to the end of the list\r\n '''\r\n if (self.size == 0):\r\n self.addFirst(v)\r\n\r\n else:\r\n self.tail.next = Node(v, self.tail, self.head)\r\n self.tail = self.tail.next\r\n self.size += 1\r\n self.currentIterator += 1\r\n\r\n return None\r\n\r\n def get(self, i):\r\n '''\r\n get(integer) -> value\r\n \r\n iterates List to integer i and returns the value in the Node at i\r\n '''\r\n if (self.goTo(i) == True):\r\n return self.current.v\r\n \r\n def goTo(self, i):\r\n '''\r\n goTo(integer) -> boolean\r\n \r\n iterates List to integer i and returns True if successful, false otherwise\r\n '''\r\n if (i > self.size-1):\r\n print (\"goTo(\" + str(i) + \") can't complete, \" + str(i) + \" is out of range\")\r\n return False\r\n else:\r\n\r\n if (self.currentIterator Node\r\n \r\n iterates List to integer i and removes Node found at index i and returns it, returns\r\n false if i is out of range\r\n '''\r\n if (i > self.size-1):\r\n print (\"OUT OF RANGE!!!\")\r\n return False\r\n else:\r\n self.goTo(i-1)\r\n temp = self.current.next.copy()\r\n self.current.next = self.current.next.next\r\n self.current.next.next.prev = self.current\r\n self.size -= 1\r\n\r\n return temp.v\r\n\r\n def __str__(self):\r\n '''\r\n (None) -> String\r\n\r\n returns a formated String of list\r\n '''\r\n result = \"\"\r\n\r\n for x in range(0,self.size):\r\n result += str(self.get(x)) + \"\\n\"\r\n\r\n return result\r\n\r\n def __repr__(self):\r\n '''\r\n None) -> String\r\n\r\n returns a formated String of list\r\n '''\r\n result = \"\"\r\n\r\n for x in range(0,self.size):\r\n result += self.get(x) + \"\\n\"\r\n\r\n return result\r\n","sub_path":"doublyLinkedList.py","file_name":"doublyLinkedList.py","file_ext":"py","file_size_in_byte":4828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"559179321","text":"\"\"\" The main script to run the measurements with a mutual inductance probe\n\"\"\"\n# Import other modules\nimport importlib\nimport time\nfrom elflab import abstracts\nfrom elflab.dataloggers import csvlogger\nimport elflab.projects.sims.mi_common as mi\n\nDEBUG_INFO = False\n\n# Parameters to be set by the user for each measurement\n# ____Measurement Parameters\nMEASUREMENT_PERIOD = 0.001 # interval between single measurements, in s\nTHERMOMETER = (\"elflab.devices.thermometers.fake_therms\", \"StepTherm\")\n # (module, class) of the instrument\nLOCKIN = (r\"elflab.devices.lockins.fake_lockins\", \"SinCosLockin\")\nMAGNET = (r\"elflab.devices.magnets.fake_magnets\", \"StepMagnet\")\n\n# ____Plot Parameters\nNROWS = 2 # number of rows of sub plots\nNCOLS = 1 # number of columns of sub plots\nPLOT_REFRESH_PERIOD = 0.5 # Interval between plot refreshes\nPLOT_LISTEN_PERIOD = 0.003 # Interval between listening events\n\nXYVARS = [\n [(\"t\", \"X\")],\n [(\"t\", \"Y\")]\n ] # Names of variable pairs to plot in each sub-plot\n \n\nclass SimMI(abstracts.ExperimentWithLogger):\n title = \"simulated MI\"\n \n default_params = {\"sample_interval\":'0.1', \"dummy\":'10'}\n var_order = mi.indicesData\n var_titles = mi.dataLabels\n format_strings = mi.formatStrings\n \n def __init__(self, params, filename, **kwargs):\n self.measurement_interval = float(params[\"sample_interval\"])\n self.current_values = mi.initialData.copy()\n \n \n self.plotXYs = XYVARS\n self.n = 0\n time.perf_counter()\n ThermClass = getattr(importlib.import_module(THERMOMETER[0]), THERMOMETER[1])\n self.therm = ThermClass()\n \n MagnetClass = getattr(importlib.import_module(MAGNET[0]), MAGNET[1])\n self.magnet = MagnetClass()\n \n LockinClass = getattr(importlib.import_module(LOCKIN[0]), LOCKIN[1])\n self.lockin = LockinClass()\n \n self.logger = csvlogger.Logger(filename, self.var_order, self.var_titles, self.format_strings)\n \n def measure(self):\n self.current_values[\"n\"] += 1\n self.current_values[\"t\"] = time.perf_counter()\n (t, self.current_values[\"I_therm\"], self.current_values[\"V_therm\"], self.current_values[\"T\"]) = self.therm.read()\n (t, self.current_values[\"I_mag\"], self.current_values[\"H\"]) = self.magnet.read()\n (self.current_values[\"X\"], self.current_values[\"Y\"]) = self.lockin.readXY()\n \n def finish(self):\n pass\n \n def start(self):\n self.logger.start()\n \n def sequence(self):\n while True:\n yield True\n","sub_path":"projects/sims/mi_csv.py","file_name":"mi_csv.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"255227743","text":"from iai.Controller import Controller\nfrom iai.utilities import *\nimport re\n\n\nclass DigitalIO(object):\n def __init__(self, controller, No, io_type=\"out\"):\n \"\"\"\n @type controller: Controller\n \"\"\"\n if type(controller) is Controller:\n self._controller = controller\n if io_type == \"out\":\n No -= 300 # The out io No is large than 300\n self.No_in_byte = int(No / 8)\n self.mask_in_bits = 1 << (No % 8)\n elif io_type == \"in\":\n raise NotImplementedError\n else:\n raise NotImplementedError\n self.io_type = io_type\n\n def _get_byte(self):\n if self.io_type == \"out\":\n command = \"?[station]OUT\"\n self.send_command(command)\n temp = self.receive_command()\n temp = re.findall(\"OUT(.*)\\r\\n\", temp)[0]\n temp = list(zip(temp[0::2], temp[1::2]))\n temp = temp[self.No_in_byte]\n temp = temp[0] + temp[1]\n return int(temp, 16)\n else:\n pass\n\n def get(self):\n if self._get_byte() & self.mask_in_bits > 0:\n return 1\n return 0\n\n def set(self, data):\n assert(data == 1 or data == 0)\n if self.io_type == \"out\":\n current_state = self._get_byte()\n command = \"![station]OTS\"\n command += (\"% 2i\" % self.No_in_byte)[-2:]\n if data == 1:\n current_state = current_state | self.mask_in_bits\n command += (\"% 2x\" % current_state)[-2:]\n else:\n current_state = current_state & ~self.mask_in_bits\n command += (\"% 2x\" % current_state)[-2:]\n else:\n raise NotImplementedError\n self.send_command(command)\n self.receive_command()\n # debug_print(self.receive_command())\n\n def send_command(self, command):\n self._controller.send_command(command)\n\n def receive_command(self):\n return self._controller.receive_command()\n\n\n","sub_path":"iai/DigitalIO.py","file_name":"DigitalIO.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"473610904","text":"# Copyright 2018-2020 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Unit tests for the QuantumTape\"\"\"\nimport copy\n\nimport numpy as np\nimport pytest\n\nimport pennylane as qml\nfrom pennylane import CircuitGraph\nfrom pennylane.tape import QuantumTape\nfrom pennylane.measure import MeasurementProcess, expval, sample, var\n\n\ndef TestOperationMonkeypatching():\n \"\"\"Test that operations are monkeypatched only within the quantum tape\"\"\"\n with QuantumTape() as tape:\n op = qml.RX(0.432, wires=0)\n obs = qml.PauliX(wires=\"a\")\n measure = qml.expval(qml.PauliX(wires=\"a\"))\n\n assert tape.operations == [op]\n assert tape.observables == [obs]\n\n # now create an old QNode\n dev = qml.device(\"default.qubit\", wires=[0, \"a\"])\n\n @qml.qnode(dev)\n def func(x):\n global op\n op = qml.RX(x, wires=0)\n return qml.expval(qml.PauliX(wires=\"a\"))\n\n # this should evaluate without error\n func(0.432)\n\n assert func.circuit.operations == [op]\n\n\nclass TestConstruction:\n \"\"\"Test for queuing and construction\"\"\"\n\n @pytest.fixture\n def make_tape(self):\n ops = []\n obs = []\n\n with QuantumTape() as tape:\n ops += [qml.RX(0.432, wires=0)]\n ops += [qml.Rot(0.543, 0, 0.23, wires=0)]\n ops += [qml.CNOT(wires=[0, \"a\"])]\n ops += [qml.RX(0.133, wires=4)]\n obs += [qml.PauliX(wires=\"a\")]\n qml.expval(obs[0])\n obs += [qml.probs(wires=[0, \"a\"])]\n\n return tape, ops, obs\n\n def test_qubit_queuing(self, make_tape):\n \"\"\"Test that qubit quantum operations correctly queue\"\"\"\n tape, ops, obs = make_tape\n\n assert len(tape.queue) == 7\n assert tape.operations == ops\n assert tape.observables == obs\n assert tape.output_dim == 5\n assert tape.interface is None\n\n assert tape.wires == qml.wires.Wires([0, \"a\", 4])\n assert tape._output_dim == len(obs[0].wires) + 2 ** len(obs[1].wires)\n\n def test_observable_processing(self, make_tape):\n \"\"\"Test that observables are processed correctly\"\"\"\n tape, ops, obs = make_tape\n\n # test that the internal tape._measurements list is created properly\n assert isinstance(tape._measurements[0], MeasurementProcess)\n assert tape._measurements[0].return_type == qml.operation.Expectation\n assert tape._measurements[0].obs == obs[0]\n\n assert isinstance(tape._measurements[1], MeasurementProcess)\n assert tape._measurements[1].return_type == qml.operation.Probability\n\n # test the public observables property\n assert len(tape.observables) == 2\n assert tape.observables[0].name == \"PauliX\"\n assert tape.observables[1].return_type == qml.operation.Probability\n\n # test the public measurements property\n assert len(tape.measurements) == 2\n assert all(isinstance(m, MeasurementProcess) for m in tape.measurements)\n assert tape.observables[0].return_type == qml.operation.Expectation\n assert tape.observables[1].return_type == qml.operation.Probability\n\n def test_tensor_observables_matmul(self):\n \"\"\"Test that tensor observables are correctly processed from the annotated\n queue. Here, we test multiple tensor observables constructed via matmul.\"\"\"\n\n with QuantumTape() as tape:\n op = qml.RX(1.0, wires=0)\n t_obs1 = qml.PauliZ(0) @ qml.PauliX(1)\n t_obs2 = t_obs1 @ qml.PauliZ(3)\n m = qml.expval(t_obs2)\n\n assert tape.operations == [op]\n assert tape.observables == [t_obs2]\n assert tape.measurements[0].return_type is qml.operation.Expectation\n assert tape.measurements[0].obs is t_obs2\n\n def test_tensor_observables_rmatmul(self):\n \"\"\"Test that tensor observables are correctly processed from the annotated\n queue. Here, we test multiple tensor observables constructed via matmul\n with the observable occuring on the left hand side.\"\"\"\n\n with QuantumTape() as tape:\n op = qml.RX(1.0, wires=0)\n t_obs1 = qml.PauliZ(1) @ qml.PauliX(0)\n t_obs2 = qml.Hadamard(2) @ t_obs1\n m = qml.expval(t_obs2)\n\n assert tape.operations == [op]\n assert tape.observables == [t_obs2]\n assert tape.measurements[0].return_type is qml.operation.Expectation\n assert tape.measurements[0].obs is t_obs2\n\n def test_tensor_observables_tensor_init(self):\n \"\"\"Test that tensor observables are correctly processed from the annotated\n queue. Here, we test multiple tensor observables constructed via explicit\n Tensor creation.\"\"\"\n\n with QuantumTape() as tape:\n op = qml.RX(1.0, wires=0)\n t_obs1 = qml.PauliZ(1) @ qml.PauliX(0)\n t_obs2 = qml.operation.Tensor(t_obs1, qml.Hadamard(2))\n m = qml.expval(t_obs2)\n\n assert tape.operations == [op]\n assert tape.observables == [t_obs2]\n assert tape.measurements[0].return_type is qml.operation.Expectation\n assert tape.measurements[0].obs is t_obs2\n\n def test_tensor_observables_tensor_matmul(self):\n \"\"\"Test that tensor observables are correctly processed from the annotated\n queue\". Here, wetest multiple tensor observables constructed via matmul\n between two tensor observables.\"\"\"\n\n with QuantumTape() as tape:\n op = qml.RX(1.0, wires=0)\n t_obs1 = qml.PauliZ(0) @ qml.PauliX(1)\n t_obs2 = qml.PauliY(2) @ qml.PauliZ(3)\n t_obs = t_obs1 @ t_obs2\n m = qml.var(t_obs)\n\n assert tape.operations == [op]\n assert tape.observables == [t_obs]\n assert tape.measurements[0].return_type is qml.operation.Variance\n assert tape.measurements[0].obs is t_obs\n\n def test_qubit_diagonalization(self, make_tape):\n \"\"\"Test that qubit diagonalization works as expected\"\"\"\n tape, ops, obs = make_tape\n\n obs_rotations = [o.diagonalizing_gates() for o in obs]\n obs_rotations = [item for sublist in obs_rotations for item in sublist]\n\n for o1, o2 in zip(tape.diagonalizing_gates, obs_rotations):\n assert isinstance(o1, o2.__class__)\n assert o1.wires == o2.wires\n\n def test_tensor_process_queuing(self):\n \"\"\"Test that tensors are correctly queued\"\"\"\n with QuantumTape() as tape:\n A = qml.PauliX(wires=0)\n B = qml.PauliZ(wires=1)\n C = A @ B\n D = qml.expval(C)\n\n assert len(tape.queue) == 4\n assert not tape.operations\n assert tape.measurements == [D]\n assert tape.observables == [C]\n assert tape.output_dim == 1\n\n def test_multiple_contexts(self):\n \"\"\"Test multiple contexts with a single tape.\"\"\"\n ops = []\n obs = []\n\n with QuantumTape() as tape:\n ops += [qml.RX(0.432, wires=0)]\n\n a = qml.Rot(0.543, 0, 0.23, wires=1)\n b = qml.CNOT(wires=[2, \"a\"])\n\n with tape:\n ops += [qml.RX(0.133, wires=0)]\n obs += [qml.PauliX(wires=\"a\")]\n qml.expval(obs[0])\n obs += [qml.probs(wires=[0, \"a\"])]\n\n assert len(tape.queue) == 5\n assert tape.operations == ops\n assert tape.observables == obs\n assert tape.output_dim == 5\n\n assert a not in tape.operations\n assert b not in tape.operations\n\n assert tape.wires == qml.wires.Wires([0, \"a\"])\n\n def test_state_preparation(self):\n \"\"\"Test that state preparations are correctly processed\"\"\"\n params = [np.array([1, 0, 1, 0]) / np.sqrt(2), 1]\n\n with QuantumTape() as tape:\n A = qml.QubitStateVector(params[0], wires=[0, 1])\n B = qml.RX(params[1], wires=0)\n qml.expval(qml.PauliZ(wires=1))\n\n assert tape.operations == [A, B]\n assert tape._prep == [A]\n assert tape.get_parameters() == params\n\n def test_state_preparation_error(self):\n \"\"\"Test that an exception is raised if a state preparation comes\n after a quantum operation\"\"\"\n with pytest.raises(ValueError, match=\"must occur prior to any quantum\"):\n with QuantumTape() as tape:\n B = qml.PauliX(wires=0)\n qml.BasisState(np.array([0, 1]), wires=[0, 1])\n\n def test_measurement_before_operation(self):\n \"\"\"Test that an exception is raised if a measurement occurs before a operation\"\"\"\n\n with pytest.raises(ValueError, match=\"must occur prior to any measurements\"):\n with QuantumTape() as tape:\n qml.expval(qml.PauliZ(wires=1))\n qml.RX(0.5, wires=0)\n qml.expval(qml.PauliZ(wires=1))\n\n def test_observable_with_no_measurement(self):\n \"\"\"Test that an exception is raised if an observable is used without a measurement\"\"\"\n\n with pytest.raises(ValueError, match=\"does not have a measurement type specified\"):\n with QuantumTape() as tape:\n qml.RX(0.5, wires=0)\n qml.Hermitian(np.array([[0, 1], [1, 0]]), wires=1)\n qml.expval(qml.PauliZ(wires=1))\n\n with pytest.raises(ValueError, match=\"does not have a measurement type specified\"):\n with QuantumTape() as tape:\n qml.RX(0.5, wires=0)\n qml.PauliX(wires=0) @ qml.PauliY(wires=1)\n qml.expval(qml.PauliZ(wires=1))\n\n def test_sampling(self):\n \"\"\"Test that the tape correctly marks itself as returning samples\"\"\"\n with QuantumTape() as tape:\n qml.expval(qml.PauliZ(wires=1))\n\n assert not tape.is_sampled\n\n with QuantumTape() as tape:\n qml.sample(qml.PauliZ(wires=0))\n\n assert tape.is_sampled\n\n\nclass TestGraph:\n \"\"\"Tests involving graph creation\"\"\"\n\n def test_graph_creation(self, mocker):\n \"\"\"Test that the circuit graph is correctly created\"\"\"\n spy = mocker.spy(CircuitGraph, \"__init__\")\n\n with QuantumTape() as tape:\n op = qml.RX(1.0, wires=0)\n obs = qml.PauliZ(1)\n qml.expval(obs)\n\n # graph has not yet been created\n assert tape._graph is None\n spy.assert_not_called()\n\n # requesting the graph creates it\n g = tape.graph\n assert g.operations == [op]\n assert g.observables == [obs]\n assert tape._graph is not None\n spy.assert_called_once()\n\n # calling the graph property again does\n # not reconstruct the graph\n g2 = tape.graph\n assert g2 is g\n spy.assert_called_once()\n\n\nclass TestResourceEstimation:\n \"\"\"Tests for verifying resource counts and depths of tapes.\"\"\"\n\n @pytest.fixture\n def make_empty_tape(self):\n with QuantumTape() as tape:\n qml.probs(wires=[0, 1])\n\n return tape\n\n @pytest.fixture\n def make_tape(self):\n params = [0.432, 0.123, 0.546, 0.32, 0.76]\n\n with QuantumTape() as tape:\n qml.RX(params[0], wires=0)\n qml.Rot(*params[1:4], wires=0)\n qml.CNOT(wires=[0, \"a\"])\n qml.RX(params[4], wires=4)\n qml.expval(qml.PauliX(wires=\"a\"))\n qml.probs(wires=[0, \"a\"])\n\n return tape\n\n @pytest.fixture\n def make_extendible_tape(self):\n params = [0.432, 0.123, 0.546, 0.32, 0.76]\n\n with QuantumTape() as tape:\n qml.RX(params[0], wires=0)\n qml.Rot(*params[1:4], wires=0)\n qml.CNOT(wires=[0, \"a\"])\n qml.RX(params[4], wires=4)\n\n return tape\n\n def test_resources_empty_tape(self, make_empty_tape):\n \"\"\"Test that empty tapes return empty resource counts.\"\"\"\n tape = make_empty_tape\n\n assert tape.get_depth() == 0\n assert len(tape.get_resources()) == 0\n\n def test_resources_tape(self, make_tape):\n \"\"\"Test that regular tapes return correct number of resources.\"\"\"\n tape = make_tape\n\n assert tape.get_depth() == 3\n\n # Verify resource counts\n resources = tape.get_resources()\n assert len(resources) == 3\n assert resources[\"RX\"] == 2\n assert resources[\"Rot\"] == 1\n assert resources[\"CNOT\"] == 1\n\n def test_resources_add_to_tape(self, make_extendible_tape):\n \"\"\"Test that tapes return correct number of resources after adding to them.\"\"\"\n tape = make_extendible_tape\n\n assert tape.get_depth() == 3\n\n resources = tape.get_resources()\n assert len(resources) == 3\n assert resources[\"RX\"] == 2\n assert resources[\"Rot\"] == 1\n assert resources[\"CNOT\"] == 1\n\n with tape as tape:\n qml.CNOT(wires=[0, 1])\n qml.RZ(0.1, wires=3)\n qml.expval(qml.PauliX(wires=\"a\"))\n qml.probs(wires=[0, \"a\"])\n\n assert tape.get_depth() == 4\n\n resources = tape.get_resources()\n assert len(resources) == 4\n assert resources[\"RX\"] == 2\n assert resources[\"Rot\"] == 1\n assert resources[\"CNOT\"] == 2\n assert resources[\"RZ\"] == 1\n\n\nclass TestParameters:\n \"\"\"Tests for parameter processing, setting, and manipulation\"\"\"\n\n @pytest.fixture\n def make_tape(self):\n params = [0.432, 0.123, 0.546, 0.32, 0.76]\n\n with QuantumTape() as tape:\n qml.RX(params[0], wires=0)\n qml.Rot(*params[1:4], wires=0)\n qml.CNOT(wires=[0, \"a\"])\n qml.RX(params[4], wires=4)\n qml.expval(qml.PauliX(wires=\"a\"))\n qml.probs(wires=[0, \"a\"])\n\n return tape, params\n\n def test_parameter_processing(self, make_tape):\n \"\"\"Test that parameters are correctly counted and processed\"\"\"\n tape, params = make_tape\n assert tape.num_params == len(params)\n assert tape.trainable_params == set(range(len(params)))\n assert tape.get_parameters() == params\n\n def test_set_trainable_params(self, make_tape):\n \"\"\"Test that setting trainable parameters works as expected\"\"\"\n tape, params = make_tape\n trainable = {0, 2, 3}\n tape.trainable_params = trainable\n assert tape._trainable_params == trainable\n assert tape.num_params == 3\n assert tape.get_parameters() == [params[i] for i in tape.trainable_params]\n\n # add additional trainable parameters\n trainable = {1, 2, 3, 4}\n tape.trainable_params = trainable\n assert tape._trainable_params == trainable\n assert tape.num_params == 4\n assert tape.get_parameters() == [params[i] for i in tape.trainable_params]\n\n def test_changing_params(self, make_tape):\n \"\"\"Test that changing trainable parameters works as expected\"\"\"\n tape, params = make_tape\n trainable = {0, 2, 3}\n tape.trainable_params = trainable\n assert tape._trainable_params == trainable\n assert tape.num_params == 3\n assert tape.get_parameters() == [params[i] for i in tape.trainable_params]\n assert tape.get_parameters(trainable_only=False) == params\n\n def test_set_trainable_params_error(self, make_tape):\n \"\"\"Test that exceptions are raised if incorrect parameters\n are set as trainable\"\"\"\n tape, _ = make_tape\n\n with pytest.raises(ValueError, match=\"must be positive integers\"):\n tape.trainable_params = {-1, 0}\n\n with pytest.raises(ValueError, match=\"must be positive integers\"):\n tape.trainable_params = {0.5}\n\n with pytest.raises(ValueError, match=\"has at most 5 parameters\"):\n tape.trainable_params = {0, 7}\n\n def test_setting_parameters(self, make_tape):\n \"\"\"Test that parameters are correctly modified after construction\"\"\"\n tape, params = make_tape\n new_params = [0.6543, -0.654, 0, 0.3, 0.6]\n\n tape.set_parameters(new_params)\n\n for pinfo, pval in zip(tape._par_info.values(), new_params):\n assert pinfo[\"op\"].data[pinfo[\"p_idx\"]] == pval\n\n assert tape.get_parameters() == new_params\n\n new_params = [0.1, -0.2, 1, 5, 0]\n tape.data = new_params\n\n for pinfo, pval in zip(tape._par_info.values(), new_params):\n assert pinfo[\"op\"].data[pinfo[\"p_idx\"]] == pval\n\n assert tape.get_parameters() == new_params\n\n def test_setting_free_parameters(self, make_tape):\n \"\"\"Test that free parameters are correctly modified after construction\"\"\"\n tape, params = make_tape\n new_params = [-0.654, 0.3]\n\n tape.trainable_params = {1, 3}\n tape.set_parameters(new_params)\n\n count = 0\n for idx, pinfo in tape._par_info.items():\n if idx in tape.trainable_params:\n assert pinfo[\"op\"].data[pinfo[\"p_idx\"]] == new_params[count]\n count += 1\n else:\n assert pinfo[\"op\"].data[pinfo[\"p_idx\"]] == params[idx]\n\n assert tape.get_parameters(trainable_only=False) == [\n params[0],\n new_params[0],\n params[2],\n new_params[1],\n params[4],\n ]\n\n def test_setting_parameters_unordered(self, make_tape, monkeypatch):\n \"\"\"Test that an 'unordered' trainable_params set does not affect\n the setting of parameter values\"\"\"\n tape, params = make_tape\n new_params = [-0.654, 0.3]\n\n with monkeypatch.context() as m:\n m.setattr(tape, \"_trainable_params\", {3, 1})\n tape.set_parameters(new_params)\n\n assert tape.get_parameters(trainable_only=True) == [\n new_params[0],\n new_params[1],\n ]\n\n assert tape.get_parameters(trainable_only=False) == [\n params[0],\n new_params[0],\n params[2],\n new_params[1],\n params[4],\n ]\n\n def test_setting_all_parameters(self, make_tape):\n \"\"\"Test that all parameters are correctly modified after construction\"\"\"\n tape, params = make_tape\n new_params = [0.6543, -0.654, 0, 0.3, 0.6]\n\n tape.trainable_params = {1, 3}\n tape.set_parameters(new_params, trainable_only=False)\n\n for pinfo, pval in zip(tape._par_info.values(), new_params):\n assert pinfo[\"op\"].data[pinfo[\"p_idx\"]] == pval\n\n assert tape.get_parameters(trainable_only=False) == new_params\n\n def test_setting_parameters_error(self, make_tape):\n \"\"\"Test that exceptions are raised if incorrect parameters\n are attempted to be set\"\"\"\n tape, _ = make_tape\n\n with pytest.raises(ValueError, match=\"Number of provided parameters does not match\"):\n tape.set_parameters([0.54])\n\n with pytest.raises(ValueError, match=\"Number of provided parameters does not match\"):\n tape.trainable_params = {2, 3}\n tape.set_parameters([0.54, 0.54, 0.123])\n\n def test_array_parameter(self):\n \"\"\"Test that array parameters integrate properly\"\"\"\n a = np.array([1, 1, 0, 0]) / np.sqrt(2)\n params = [a, 0.32, 0.76, 1.0]\n\n with QuantumTape() as tape:\n op = qml.QubitStateVector(params[0], wires=0)\n qml.Rot(params[1], params[2], params[3], wires=0)\n\n assert tape.num_params == len(params)\n assert tape.get_parameters() == params\n\n b = np.array([0, 1, 0, 0])\n new_params = [b, 0.543, 0.654, 0.123]\n tape.set_parameters(new_params)\n assert tape.get_parameters() == new_params\n\n assert np.all(op.data[0] == b)\n\n def test_measurement_parameter(self):\n \"\"\"Test that measurement parameters integrate properly\"\"\"\n H = np.array([[1, 0], [0, -1]])\n params = [0.32, 0.76, 1.0, H]\n\n with QuantumTape() as tape:\n qml.Rot(params[0], params[1], params[2], wires=0)\n obs = qml.Hermitian(params[3], wires=0)\n qml.expval(obs)\n\n assert tape.num_params == len(params)\n assert tape.get_parameters() == params\n\n H2 = np.array([[0, 1], [1, 1]])\n new_params = [0.543, 0.654, 0.123, H2]\n tape.set_parameters(new_params)\n assert tape.get_parameters() == new_params\n\n assert np.all(obs.data[0] == H2)\n\n\nclass TestInverse:\n \"\"\"Tests for tape inversion\"\"\"\n\n def test_inverse(self):\n \"\"\"Test that inversion works as expected\"\"\"\n init_state = np.array([1, 1])\n p = [0.1, 0.2, 0.3, 0.4]\n\n with QuantumTape() as tape:\n prep = qml.BasisState(init_state, wires=[0, \"a\"])\n ops = [qml.RX(p[0], wires=0), qml.Rot(*p[1:], wires=0).inv(), qml.CNOT(wires=[0, \"a\"])]\n m1 = qml.probs(wires=0)\n m2 = qml.probs(wires=\"a\")\n\n tape.inv()\n\n # check that operation order is reversed\n assert tape.operations == [prep] + ops[::-1]\n\n # check that operations are inverted\n assert ops[0].inverse\n assert not ops[1].inverse\n assert ops[2].inverse\n\n # check that parameter order has reversed\n assert tape.get_parameters() == [init_state, p[1], p[2], p[3], p[0]]\n\n def test_parameter_transforms(self):\n \"\"\"Test that inversion correctly changes trainable parameters\"\"\"\n init_state = np.array([1, 1])\n p = [0.1, 0.2, 0.3, 0.4]\n\n with QuantumTape() as tape:\n prep = qml.BasisState(init_state, wires=[0, \"a\"])\n ops = [qml.RX(p[0], wires=0), qml.Rot(*p[1:], wires=0).inv(), qml.CNOT(wires=[0, \"a\"])]\n m1 = qml.probs(wires=0)\n m2 = qml.probs(wires=\"a\")\n\n tape.trainable_params = {1, 2}\n tape.inv()\n\n # check that operation order is reversed\n assert tape.trainable_params == {1, 4}\n assert tape.get_parameters() == [p[1], p[0]]\n\n # undo the inverse\n tape.inv()\n assert tape.trainable_params == {1, 2}\n assert tape.get_parameters() == [p[0], p[1]]\n assert tape._ops == ops\n\n\nclass TestExpand:\n \"\"\"Tests for tape expansion\"\"\"\n\n def test_decomposition(self):\n \"\"\"Test expanding a tape with operations that have decompositions\"\"\"\n with QuantumTape() as tape:\n qml.Rot(0.1, 0.2, 0.3, wires=0)\n\n new_tape = tape.expand()\n\n assert len(new_tape.operations) == 3\n assert new_tape.get_parameters() == [0.1, 0.2, 0.3]\n assert new_tape.trainable_params == {0, 1, 2}\n\n assert isinstance(new_tape.operations[0], qml.RZ)\n assert isinstance(new_tape.operations[1], qml.RY)\n assert isinstance(new_tape.operations[2], qml.RZ)\n\n # check that modifying the new tape does not affect the old tape\n\n new_tape.trainable_params = {0}\n new_tape.set_parameters([10])\n\n assert tape.get_parameters() == [0.1, 0.2, 0.3]\n assert tape.trainable_params == {0, 1, 2}\n\n def test_decomposition_removing_parameters(self):\n \"\"\"Test that decompositions which reduce the number of parameters\n on the tape retain tape consistency.\"\"\"\n with QuantumTape() as tape:\n qml.BasisState(np.array([1]), wires=0)\n\n # since expansion calls `BasisStatePreparation` we have to expand twice\n new_tape = tape.expand(depth=2)\n\n assert len(new_tape.operations) == 1\n assert new_tape.operations[0].name == \"PauliX\"\n assert new_tape.operations[0].wires.tolist() == [0]\n assert new_tape.num_params == 0\n assert new_tape.get_parameters() == []\n\n assert isinstance(new_tape.operations[0], qml.PauliX)\n\n def test_decomposition_adding_parameters(self):\n \"\"\"Test that decompositions which increase the number of parameters\n on the tape retain tape consistency.\"\"\"\n with QuantumTape() as tape:\n qml.PauliX(wires=0)\n\n new_tape = tape.expand()\n\n assert len(new_tape.operations) == 3\n\n assert new_tape.operations[0].name == \"PhaseShift\"\n assert new_tape.operations[1].name == \"RX\"\n assert new_tape.operations[2].name == \"PhaseShift\"\n\n assert new_tape.num_params == 3\n assert new_tape.get_parameters() == [np.pi / 2, np.pi, np.pi / 2]\n\n def test_nested_tape(self):\n \"\"\"Test that a nested tape properly expands\"\"\"\n with QuantumTape() as tape1:\n with QuantumTape() as tape2:\n op1 = qml.RX(0.543, wires=0)\n op2 = qml.RY(0.1, wires=0)\n\n assert tape1.num_params == 2\n assert tape1.operations == [tape2]\n\n new_tape = tape1.expand()\n assert new_tape.num_params == 2\n assert len(new_tape.operations) == 2\n assert isinstance(new_tape.operations[0], qml.RX)\n assert isinstance(new_tape.operations[1], qml.RY)\n\n def test_nesting_and_decomposition(self):\n \"\"\"Test an example that contains nested tapes and operation decompositions.\"\"\"\n\n with QuantumTape() as tape:\n qml.BasisState(np.array([1, 1]), wires=[0, \"a\"])\n\n with QuantumTape() as tape2:\n qml.Rot(0.543, 0.1, 0.4, wires=0)\n\n qml.CNOT(wires=[0, \"a\"])\n qml.RY(0.2, wires=\"a\")\n qml.probs(wires=0), qml.probs(wires=\"a\")\n\n new_tape = tape.expand()\n assert len(new_tape.operations) == 4\n\n def test_stopping_criterion(self):\n \"\"\"Test that gates specified in the stop_at\n argument are not expanded.\"\"\"\n with QuantumTape() as tape:\n qml.U3(0, 1, 2, wires=0)\n qml.Rot(3, 4, 5, wires=0)\n qml.probs(wires=0), qml.probs(wires=\"a\")\n\n new_tape = tape.expand(stop_at=lambda obj: obj.name in [\"Rot\"])\n assert len(new_tape.operations) == 4\n assert \"Rot\" in [i.name for i in new_tape.operations]\n assert not \"U3\" in [i.name for i in new_tape.operations]\n\n def test_depth_expansion(self):\n \"\"\"Test expanding with depth=2\"\"\"\n with QuantumTape() as tape:\n # Will be decomposed into PauliX(0), PauliX(0)\n # Each PauliX will then be decomposed into PhaseShift, RX, PhaseShift.\n qml.BasisState(np.array([1, 1]), wires=[0, \"a\"])\n\n with QuantumTape() as tape2:\n # will be decomposed into a RZ, RY, RZ\n qml.Rot(0.543, 0.1, 0.4, wires=0)\n\n qml.CNOT(wires=[0, \"a\"])\n qml.RY(0.2, wires=\"a\")\n qml.probs(wires=0), qml.probs(wires=\"a\")\n\n new_tape = tape.expand(depth=3)\n assert len(new_tape.operations) == 11\n\n def test_stopping_criterion_with_depth(self):\n \"\"\"Test that gates specified in the stop_at\n argument are not expanded.\"\"\"\n with QuantumTape() as tape:\n # Will be decomposed into PauliX(0), PauliX(0)\n qml.BasisState(np.array([1, 1]), wires=[0, \"a\"])\n\n with QuantumTape() as tape2:\n # will be decomposed into a RZ, RY, RZ\n qml.Rot(0.543, 0.1, 0.4, wires=0)\n\n qml.CNOT(wires=[0, \"a\"])\n qml.RY(0.2, wires=\"a\")\n qml.probs(wires=0), qml.probs(wires=\"a\")\n\n new_tape = tape.expand(depth=2, stop_at=lambda obj: obj.name in [\"PauliX\"])\n assert len(new_tape.operations) == 7\n\n def test_measurement_expansion(self):\n \"\"\"Test that measurement expansion works as expected\"\"\"\n with QuantumTape() as tape:\n # expands into 2 PauliX\n qml.BasisState(np.array([1, 1]), wires=[0, \"a\"])\n qml.CNOT(wires=[0, \"a\"])\n qml.RY(0.2, wires=\"a\")\n qml.probs(wires=0)\n # expands into RY on wire b\n qml.expval(qml.PauliZ(\"a\") @ qml.Hadamard(\"b\"))\n # expands into QubitUnitary on wire 0\n qml.var(qml.Hermitian(np.array([[1, 2], [2, 4]]), wires=[0]))\n\n new_tape = tape.expand(expand_measurements=True)\n\n assert len(new_tape.operations) == 5\n\n expected = [qml.operation.Probability, qml.operation.Expectation, qml.operation.Variance]\n assert [m.return_type is r for m, r in zip(new_tape.measurements, expected)]\n\n expected = [None, None, None]\n assert [m.obs is r for m, r in zip(new_tape.measurements, expected)]\n\n expected = [None, [1, -1, -1, 1], [0, 5]]\n assert [m.eigvals is r for m, r in zip(new_tape.measurements, expected)]\n\n def test_expand_tape_multiple_wires(self):\n \"\"\"Test the expand() method when measurements with more than one observable on the same\n wire are used\"\"\"\n with QuantumTape() as tape1:\n qml.RX(0.3, wires=0)\n qml.RY(0.4, wires=1)\n qml.expval(qml.PauliX(0))\n qml.var(qml.PauliX(0) @ qml.PauliX(1))\n qml.expval(qml.PauliX(2))\n\n with QuantumTape() as tape2:\n qml.RX(0.3, wires=0)\n qml.RY(0.4, wires=1)\n qml.RY(-np.pi / 2, wires=0)\n qml.RY(-np.pi / 2, wires=1)\n qml.expval(qml.PauliZ(0))\n qml.var(qml.PauliZ(0) @ qml.PauliZ(1))\n qml.expval(qml.PauliX(2))\n\n tape1_exp = tape1.expand()\n\n assert tape1_exp.graph.hash == tape2.graph.hash\n\n @pytest.mark.parametrize(\"ret\", [expval, var, sample])\n def test_expand_tape_multiple_wires_non_commuting(self, ret):\n \"\"\"Test if a QuantumFunctionError is raised during tape expansion if non-commuting\n observables are on the same wire\"\"\"\n with QuantumTape() as tape:\n qml.RX(0.3, wires=0)\n qml.RY(0.4, wires=1)\n qml.expval(qml.PauliX(0))\n ret(qml.PauliZ(0))\n\n with pytest.raises(qml.QuantumFunctionError, match=\"Only observables that are qubit-wise\"):\n tape.expand(expand_measurements=True)\n\n def test_is_sampled_reserved_after_expansion(self, monkeypatch, mocker):\n \"\"\"Test that the is_sampled property is correctly set when tape\n expansion happens.\"\"\"\n dev = qml.device(\"default.qubit\", wires=1, shots=10)\n\n # Remove support for an op to enforce decomposition & tape expansion\n mock_ops = copy.copy(dev.operations)\n mock_ops.remove(\"T\")\n\n with monkeypatch.context() as m:\n m.setattr(dev, \"operations\", mock_ops)\n\n def circuit():\n qml.T(wires=0)\n return sample(qml.PauliZ(0))\n\n # Choosing parameter-shift not to swap the device under the hood\n qnode = qml.QNode(circuit, dev, diff_method=\"parameter-shift\")\n qnode()\n\n # Double-checking that the T gate is not supported\n assert \"T\" not in qnode.device.operations\n assert \"T\" not in qnode._original_device.operations\n\n assert qnode.qtape.is_sampled\n\n\nclass TestExecution:\n \"\"\"Tests for tape execution\"\"\"\n\n def test_execute_parameters(self, tol):\n \"\"\"Test execution works when parameters are both passed and not passed.\"\"\"\n dev = qml.device(\"default.qubit\", wires=2)\n params = [0.1, 0.2]\n\n with QuantumTape() as tape:\n qml.RX(params[0], wires=[0])\n qml.RY(params[1], wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\n\n assert tape.output_dim == 1\n\n # test execution with no parameters\n res1 = tape.execute(dev)\n assert tape.get_parameters() == params\n\n # test execution with parameters\n res2 = tape.execute(dev, params=[0.5, 0.6])\n assert tape.get_parameters() == params\n\n # test setting parameters\n tape.set_parameters(params=[0.5, 0.6])\n res3 = tape.execute(dev)\n assert np.allclose(res2, res3, atol=tol, rtol=0)\n assert not np.allclose(res1, res2, atol=tol, rtol=0)\n assert tape.get_parameters() == [0.5, 0.6]\n\n def test_no_output_execute(self):\n \"\"\"Test that tapes with no measurement process return\n an empty list.\"\"\"\n dev = qml.device(\"default.qubit\", wires=2)\n params = [0.1, 0.2]\n\n with QuantumTape() as tape:\n qml.RX(params[0], wires=[0])\n qml.RY(params[1], wires=[1])\n\n res = tape.execute(dev)\n assert res.size == 0\n assert np.all(res == np.array([]))\n\n def test_incorrect_output_dim_estimate(self):\n \"\"\"Test that a quantum tape with an incorrect inferred output dimension\n corrects itself after evaluation.\"\"\"\n dev = qml.device(\"default.qubit\", wires=3)\n params = [1.0, 1.0, 1.0]\n\n with QuantumTape() as tape:\n qml.RX(params[0], wires=[0])\n qml.RY(params[1], wires=[1])\n qml.RZ(params[2], wires=[2])\n qml.CNOT(wires=[0, 1])\n qml.probs(wires=0)\n qml.probs(wires=[1])\n\n # estimate output dim should be correct\n assert tape.output_dim == sum([2, 2])\n\n # modify the output dim\n tape._output_dim = 2\n\n res = tape.execute(dev)\n assert tape.output_dim == sum([2, 2])\n\n def test_incorrect_ragged_output_dim_estimate(self):\n \"\"\"Test that a quantum tape with an incorrect *ragged* output dimension\n estimate corrects itself after evaluation.\"\"\"\n dev = qml.device(\"default.qubit\", wires=3)\n params = [1.0, 1.0, 1.0]\n\n with QuantumTape() as tape:\n qml.RX(params[0], wires=[0])\n qml.RY(params[1], wires=[1])\n qml.RZ(params[2], wires=[2])\n qml.CNOT(wires=[0, 1])\n qml.probs(wires=0)\n qml.probs(wires=[1, 2])\n\n # estimate output dim should be correct\n assert tape.output_dim == sum([2, 4])\n\n # modify the output dim\n tape._output_dim = 2\n\n res = tape.execute(dev)\n assert tape.output_dim == sum([2, 4])\n\n def test_single_expectation_value(self, tol):\n \"\"\"Tests correct output shape and evaluation for a tape\n with a single expval output\"\"\"\n dev = qml.device(\"default.qubit\", wires=2)\n x = 0.543\n y = -0.654\n\n with QuantumTape() as tape:\n qml.RX(x, wires=[0])\n qml.RY(y, wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\n\n assert tape.output_dim == 1\n\n res = tape.execute(dev)\n assert res.shape == (1,)\n\n expected = np.sin(y) * np.cos(x)\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n def test_multiple_expectation_values(self, tol):\n \"\"\"Tests correct output shape and evaluation for a tape\n with multiple expval outputs\"\"\"\n dev = qml.device(\"default.qubit\", wires=2)\n x = 0.543\n y = -0.654\n\n with QuantumTape() as tape:\n qml.RX(x, wires=[0])\n qml.RY(y, wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n qml.expval(qml.PauliX(1))\n\n assert tape.output_dim == 2\n\n res = tape.execute(dev)\n assert res.shape == (2,)\n\n expected = [np.cos(x), np.sin(y)]\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n def test_var_expectation_values(self, tol):\n \"\"\"Tests correct output shape and evaluation for a tape\n with expval and var outputs\"\"\"\n dev = qml.device(\"default.qubit\", wires=2)\n x = 0.543\n y = -0.654\n\n with QuantumTape() as tape:\n qml.RX(x, wires=[0])\n qml.RY(y, wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n qml.var(qml.PauliX(1))\n\n assert tape.output_dim == 2\n\n res = tape.execute(dev)\n assert res.shape == (2,)\n\n expected = [np.cos(x), np.cos(y) ** 2]\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n def test_prob_expectation_values(self, tol):\n \"\"\"Tests correct output shape and evaluation for a tape\n with prob and var outputs\"\"\"\n dev = qml.device(\"default.qubit\", wires=2)\n x = 0.543\n y = -0.654\n\n with QuantumTape() as tape:\n qml.RX(x, wires=[0])\n qml.RY(y, wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n qml.probs(wires=[0, 1])\n\n assert tape.output_dim == 5\n\n res = tape.execute(dev)\n\n assert isinstance(res[0], float)\n assert np.allclose(res[0], np.cos(x), atol=tol, rtol=0)\n\n assert isinstance(res[1], np.ndarray)\n assert np.allclose(res[1], np.abs(dev.state) ** 2, atol=tol, rtol=0)\n\n def test_single_mode_sample(self):\n \"\"\"Test that there is only one array of values returned\n for a single wire qml.sample\"\"\"\n dev = qml.device(\"default.qubit\", wires=2, shots=10)\n x = 0.543\n y = -0.654\n\n with QuantumTape() as tape:\n qml.RX(x, wires=[0])\n qml.RY(y, wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.sample(qml.PauliZ(0) @ qml.PauliX(1))\n\n res = tape.execute(dev)\n assert res.shape == (1, 10)\n\n def test_multiple_samples(self):\n \"\"\"Test that there is only one array of values returned\n for multiple samples\"\"\"\n dev = qml.device(\"default.qubit\", wires=2, shots=10)\n x = 0.543\n y = -0.654\n\n with QuantumTape() as tape:\n qml.RX(x, wires=[0])\n qml.RY(y, wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.sample(qml.PauliZ(0))\n qml.sample(qml.PauliZ(1))\n\n res = tape.execute(dev)\n assert res.shape == (2, 10)\n\n def test_samples_expval(self):\n \"\"\"Test that multiple arrays of values are returned\n for combinations of samples and statistics\"\"\"\n dev = qml.device(\"default.qubit\", wires=2, shots=10)\n x = 0.543\n y = -0.654\n\n with QuantumTape() as tape:\n qml.RX(x, wires=[0])\n qml.RY(y, wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.sample(qml.PauliZ(0))\n qml.expval(qml.PauliZ(1))\n\n res = tape.execute(dev)\n assert res[0].shape == (10,)\n assert isinstance(res[1], np.ndarray)\n\n def test_decomposition(self, tol):\n \"\"\"Test decomposition onto a device's supported gate set\"\"\"\n dev = qml.device(\"default.qubit\", wires=1)\n\n with QuantumTape() as tape:\n qml.U3(0.1, 0.2, 0.3, wires=[0])\n qml.expval(qml.PauliZ(0))\n\n tape = tape.expand(stop_at=lambda obj: obj.name in dev.operations)\n res = tape.execute(dev)\n assert np.allclose(res, np.cos(0.1), atol=tol, rtol=0)\n\n def test_multiple_observables_same_wire(self):\n \"\"\"Test if an error is raised when multiple observables are evaluated on the same wire\n without first running tape.expand().\"\"\"\n dev = qml.device(\"default.qubit\", wires=2)\n\n with QuantumTape() as tape:\n qml.expval(qml.PauliX(0) @ qml.PauliZ(1))\n qml.expval(qml.PauliX(0))\n\n with pytest.raises(qml.QuantumFunctionError, match=\"Multiple observables are being\"):\n tape.execute(dev)\n\n new_tape = tape.expand()\n new_tape.execute(dev)\n\n\nclass TestCVExecution:\n \"\"\"Tests for CV tape execution\"\"\"\n\n def test_single_output_value(self, tol):\n \"\"\"Tests correct execution and output shape for a CV tape\n with a single expval output\"\"\"\n dev = qml.device(\"default.gaussian\", wires=2)\n x = 0.543\n y = -0.654\n\n with QuantumTape() as tape:\n qml.Displacement(x, 0, wires=[0])\n qml.Squeezing(y, 0, wires=[1])\n qml.Beamsplitter(np.pi / 4, 0, wires=[0, 1])\n qml.expval(qml.NumberOperator(0))\n\n assert tape.output_dim == 1\n\n res = tape.execute(dev)\n assert res.shape == (1,)\n\n def test_multiple_output_values(self, tol):\n \"\"\"Tests correct output shape and evaluation for a tape\n with multiple measurement types\"\"\"\n dev = qml.device(\"default.gaussian\", wires=2)\n x = 0.543\n y = -0.654\n\n with QuantumTape() as tape:\n qml.Displacement(x, 0, wires=[0])\n qml.Squeezing(y, 0, wires=[1])\n qml.Beamsplitter(np.pi / 4, 0, wires=[0, 1])\n qml.expval(qml.PolyXP(np.diag([0, 1, 0]), wires=0)) # X^2\n qml.var(qml.P(1))\n\n assert tape.output_dim == 2\n\n res = tape.execute(dev)\n assert res.shape == (2,)\n\n\nclass TestTapeCopying:\n \"\"\"Test for tape copying behaviour\"\"\"\n\n def test_shallow_copy(self):\n \"\"\"Test that shallow copying of a tape results in all\n contained data being shared between the original tape and the copy\"\"\"\n with QuantumTape() as tape:\n qml.BasisState(np.array([1, 0]), wires=[0, 1])\n qml.RY(0.5, wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0) @ qml.PauliY(1))\n\n copied_tape = tape.copy()\n\n assert copied_tape is not tape\n\n # the operations are simply references\n assert copied_tape.operations == tape.operations\n assert copied_tape.observables == tape.observables\n assert copied_tape.measurements == tape.measurements\n assert copied_tape.operations[0] is tape.operations[0]\n\n # operation data is also a reference\n assert copied_tape.operations[0].wires is tape.operations[0].wires\n assert copied_tape.operations[0].data[0] is tape.operations[0].data[0]\n\n # check that all tape metadata is identical\n assert tape.get_parameters() == copied_tape.get_parameters()\n assert tape.wires == copied_tape.wires\n assert tape.data == copied_tape.data\n\n # since the copy is shallow, mutating the parameters\n # on one tape will affect the parameters on another tape\n new_params = [np.array([0, 0]), 0.2]\n tape.set_parameters(new_params)\n\n # check that they are the same objects in memory\n for i, j in zip(tape.get_parameters(), new_params):\n assert i is j\n\n for i, j in zip(copied_tape.get_parameters(), new_params):\n assert i is j\n\n @pytest.mark.parametrize(\n \"copy_fn\", [lambda tape: tape.copy(copy_operations=True), lambda tape: copy.copy(tape)]\n )\n def test_shallow_copy_with_operations(self, copy_fn):\n \"\"\"Test that shallow copying of a tape and operations allows\n parameters to be set independently\"\"\"\n\n with QuantumTape() as tape:\n qml.BasisState(np.array([1, 0]), wires=[0, 1])\n qml.RY(0.5, wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0) @ qml.PauliY(1))\n\n copied_tape = copy_fn(tape)\n\n assert copied_tape is not tape\n\n # the operations are not references; they are unique objects\n assert copied_tape.operations != tape.operations\n assert copied_tape.observables != tape.observables\n assert copied_tape.measurements != tape.measurements\n assert copied_tape.operations[0] is not tape.operations[0]\n\n # however, the underlying operation data *is still shared*\n assert copied_tape.operations[0].wires is tape.operations[0].wires\n assert copied_tape.operations[0].data[0] is tape.operations[0].data[0]\n\n assert tape.get_parameters() == copied_tape.get_parameters()\n assert tape.wires == copied_tape.wires\n assert tape.data == copied_tape.data\n\n # Since they have unique operations, mutating the parameters\n # on one tape will *not* affect the parameters on another tape\n new_params = [np.array([0, 0]), 0.2]\n tape.set_parameters(new_params)\n\n for i, j in zip(tape.get_parameters(), new_params):\n assert i is j\n\n for i, j in zip(copied_tape.get_parameters(), new_params):\n assert not np.all(i == j)\n assert i is not j\n\n def test_deep_copy(self):\n \"\"\"Test that deep copying a tape works, and copies all constituent data except parameters\"\"\"\n with QuantumTape() as tape:\n qml.BasisState(np.array([1, 0]), wires=[0, 1])\n qml.RY(0.5, wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0) @ qml.PauliY(1))\n\n copied_tape = copy.deepcopy(tape)\n\n assert copied_tape is not tape\n\n # the operations are not references\n assert copied_tape.operations != tape.operations\n assert copied_tape.observables != tape.observables\n assert copied_tape.measurements != tape.measurements\n assert copied_tape.operations[0] is not tape.operations[0]\n\n # The underlying operation data has also been copied\n assert copied_tape.operations[0].wires is not tape.operations[0].wires\n\n # however, the underlying operation *parameters* are still shared\n # to support PyTorch, which does not support deep copying of tensors\n assert copied_tape.operations[0].data[0] is tape.operations[0].data[0]\n\n def test_casting(self):\n \"\"\"Test that copying and casting works as expected\"\"\"\n with QuantumTape() as tape:\n qml.BasisState(np.array([1, 0]), wires=[0, 1])\n qml.RY(0.5, wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0) @ qml.PauliY(1))\n\n # copy and cast to a JacobianTape\n copied_tape = tape.copy(tape_cls=qml.tape.JacobianTape)\n\n # check that the copying worked\n assert copied_tape is not tape\n assert copied_tape.operations == tape.operations\n assert copied_tape.observables == tape.observables\n assert copied_tape.measurements == tape.measurements\n assert copied_tape.operations[0] is tape.operations[0]\n\n # check that the casting worked\n assert isinstance(copied_tape, qml.tape.JacobianTape)\n assert not isinstance(tape, qml.tape.JacobianTape)\n\n\nclass TestStopRecording:\n \"\"\"Test that the stop_recording function works as expected\"\"\"\n\n def test_tape_not_recording(self):\n \"\"\"Test that an error is raised if the tape is no\n longer recording\"\"\"\n with QuantumTape() as tape:\n qml.RX(0.1, wires=0)\n\n with pytest.raises(qml.queuing.QueuingError, match=\"Cannot stop recording\"):\n with tape.stop_recording():\n pass\n\n def test_nested_tape_not_recording(self):\n \"\"\"Test that an error is raised if the tape is no\n longer recording\"\"\"\n with QuantumTape() as tape1:\n qml.RX(0.1, wires=0)\n\n with QuantumTape() as tape2:\n qml.RX(0.1, wires=0)\n\n with pytest.raises(qml.queuing.QueuingError, match=\"Cannot stop recording\"):\n with tape1.stop_recording():\n pass\n\n def test_recording_stopped(self):\n \"\"\"Test that recording is stopped within a tape context\"\"\"\n\n with QuantumTape() as tape:\n op0 = qml.RX(0, wires=0)\n assert tape.active_context() is tape\n\n with tape.stop_recording():\n op1 = qml.RY(1.0, wires=1)\n assert tape.active_context() is None\n\n op2 = qml.RZ(2, wires=1)\n assert tape.active_context() is tape\n\n assert len(tape.operations) == 2\n assert tape.operations[0] == op0\n assert tape.operations[1] == op2\n\n def test_nested_recording_stopped(self):\n \"\"\"Test that recording is stopped within a nested tape context\"\"\"\n\n with QuantumTape() as tape1:\n op0 = qml.RX(0, wires=0)\n assert tape1.active_context() is tape1\n\n with QuantumTape() as tape2:\n assert tape1.active_context() is tape2\n op1 = qml.RY(1.0, wires=1)\n\n with tape2.stop_recording():\n assert tape1.active_context() is None\n op2 = qml.RZ(0.6, wires=2)\n op3 = qml.CNOT(wires=[0, 2])\n\n op4 = qml.Hadamard(wires=0)\n\n op5 = qml.RZ(2, wires=1)\n assert tape1.active_context() is tape1\n\n assert len(tape1.operations) == 3\n assert tape1.operations[0] == op0\n assert tape1.operations[1] == tape2\n assert tape1.operations[2] == op5\n\n assert len(tape2.operations) == 2\n assert tape2.operations[0] == op1\n assert tape2.operations[1] == op4\n\n def test_creating_scratch_tape(self):\n \"\"\"Test that a tape created inside the 'scratch'\n space is properly created and accessible\"\"\"\n with QuantumTape() as tape:\n op0 = qml.RX(0, wires=0)\n assert tape.active_context() is tape\n\n with tape.stop_recording(), QuantumTape() as temp_tape:\n assert tape.active_context() is temp_tape\n op1 = qml.RY(1.0, wires=1)\n\n op2 = qml.RZ(2, wires=1)\n assert tape.active_context() is tape\n\n assert len(tape.operations) == 2\n assert tape.operations[0] == op0\n assert tape.operations[1] == op2\n\n assert len(temp_tape.operations) == 1\n assert temp_tape.operations[0] == op1\n\n\ndef test_gate_tape():\n \"\"\"Test that the get_active_tape() function returns the currently\n recording tape, or None if no tape is recording\"\"\"\n assert qml.tape.get_active_tape() is None\n\n with QuantumTape() as tape1:\n assert qml.tape.get_active_tape() is tape1\n\n with QuantumTape() as tape2:\n assert qml.tape.get_active_tape() is tape2\n\n assert qml.tape.get_active_tape() is tape1\n\n assert qml.tape.get_active_tape() is None\n","sub_path":"artifacts/old_dataset_versions/original_commits_v02/pennylane/pennylane#1243/before/test_tape.py","file_name":"test_tape.py","file_ext":"py","file_size_in_byte":49877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"398640321","text":"from decimal import Context\nfrom django.contrib.auth import login, authenticate,logout\nfrom django.http import HttpResponse\nfrom django.shortcuts import render,redirect\nfrom django.contrib import messages\nfrom .forms import RegisterForm,SigninForm,UserUpdateForm,ProfileUpdateForm\nfrom .models import Profile\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.utils.encoding import force_bytes, force_text\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.template.loader import render_to_string\nfrom .tokens import account_activation_token\nfrom django.contrib.auth.models import User\nfrom django.core.mail import EmailMessage\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Profile\n\n\n# Create your views here.\ndef signup(request):\n if request.method == 'POST':\n form = RegisterForm(request.POST)\n if form.is_valid():\n gender = form.cleaned_data.get('gender')\n contact_no = form.cleaned_data.get('contact_no')\n city = form.cleaned_data.get('city')\n country = form.cleaned_data.get('country')\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password1')\n tempvar=form.cleaned_data.get('checkbx')\n if tempvar is True:\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n Profile(user=User.objects.filter(username=username).first(),gender=gender,contact_no=contact_no,city=city,country=country).save()\n user.is_active = False\n current_site = get_current_site(request)\n mail_subject = 'Activate your MCQhero account.'\n message = render_to_string('account/acc_active_email.html', {\n 'user': user,\n 'domain': \"127.0.0.1:8000\",\n 'uid':urlsafe_base64_encode(force_bytes(user.pk)),\n 'token':account_activation_token.make_token(user),\n })\n email1=form.cleaned_data.get('email')\n email = EmailMessage(\n mail_subject, message, to=[email1]\n )\n email.send()\n return render(request,'account/confirmemail.html',)\n else:\n form = RegisterForm()\n\n return render(request, 'account/signup.html',{\"form\":form})\n\n\ndef activate(request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except(TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.save()\n return redirect('/')\n\n else:\n return HttpResponse('Activation link is invalid!')\n \ndef signin(request):\n if request.method == 'POST':\n form = SigninForm(request=request,data=request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n try:\n user=authenticate(username=User.objects.get(email=username),password=password)\n except:\n user=authenticate(username=username,password=password)\n if user is not None:\n login(request,user)\n messages.success(request,'Congratulation,Logged in successfully !!')\n if 'next' in request.POST:\n return redirect(request.POST.get('next'))\n else:\n return redirect(\"/\") \n\n else:\n form = SigninForm()\n \n return render(request,'account/signin.html',{\"form\":form})\n\n\n\n\ndef signout(request):\n logout(request)\n return redirect(\"/\")\n\n@login_required(login_url='/signin/')\ndef profileview(request):\n \n return render(request,'account/profileview.html')\n\n@login_required(login_url='/signin/')\ndef editprofile(request):\n if request.method=='POST':\n u_form=UserUpdateForm(request.POST,instance=request.user)\n p_form=ProfileUpdateForm(request.POST,\n request.FILES,\n instance=request.user.profile)\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n return redirect(\"/profile\")\n else:\n u_form=UserUpdateForm(instance=request.user)\n p_form=ProfileUpdateForm(instance=request.user.profile)\n context={\n 'u_form':u_form,\n 'p_form':p_form\n }\n return render(request,'account/editprofile.html',context)\n\n\n\n ","sub_path":"quizsite/account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"159094918","text":"'''\n 문제설명\n 프로그래머스 프린터\n 해결전략\n 덱를 이용해서 해결한다. 파이썬의 deque를 활용하면 맨 앞 요소를 뺄 때 시간복잡도를 줄일 수 있다.\n 덱에 (문서 위치, 문서의 우선순위)의 튜플 형태로 주어진 정보를 넣는다.\n 덱(프린터)에서 맨 앞에 있는 문서를 꺼내서 뒤에 그보다 더 큰 우선순위를 가진 문서가 없으면 출력한다 (ans를 1 더해준다.)\n 그리고 출력한 문서의 처음 위치를 확인해서 확인하고자 하는 문서라면 이번이 몇번째 출력인지 확인해서 리턴한다.\n 만약 더 큰 우선순위를 가진 문서가 있으면 append()를 통해 덱의 맨 뒤에 다시 추가해준다. \n'''\nfrom collections import deque\ndef solution(priorities, location):\n ans = 0\n Q = deque()\n for idx, x in enumerate(priorities):\n Q.append((idx, x))\n \n while Q:\n tmp = Q.popleft()\n for x in Q:\n if tmp[1] < x[1]:\n Q.append(tmp)\n break\n else:\n ans += 1\n if tmp[0] == location:\n return ans\n","sub_path":"week9/HongheeLee/PGM_프린터_210224.py","file_name":"PGM_프린터_210224.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"5258733","text":"import rclpy\nimport rclpy.time\nfrom rclpy.node import Node\nimport PyKDL\nimport tf2_ros\nfrom std_srvs.srv import Trigger\nimport numpy as np\nfrom sksurgerycalibration.algorithms.pivot import pivot_calibration\nfrom geometry_msgs.msg import TransformStamped, Transform\n\n\ndef frame_to_matrix(frame):\n \"\"\"\n Convert PyKDL Frame to matrix\n :param frame: Transform to convert\n :type frame: PyKDL.Frame\n :return:\n \"\"\"\n matrix = np.eye(4)\n # translation\n matrix[0, 3] = frame.p.x()\n matrix[1, 3] = frame.p.y()\n matrix[2, 3] = frame.p.z()\n\n # Rotation\n for i in range(0, 3):\n for j in range(0, 3):\n matrix[i, j] = frame.M[i, j]\n\n return matrix\n\n\ndef trans_msg_to_frame(msg):\n \"\"\"\n :param msg: transform msg to convert\n :type msg: Transform\n :return:\n \"\"\"\n rot = PyKDL.Rotation.Quaternion(msg.rotation.x, msg.rotation.y, msg.rotation.z,\n msg.rotation.w)\n pos = PyKDL.Vector(msg.translation.x, msg.translation.y, msg.translation.z)\n return PyKDL.Frame(rot, pos)\n\n\ndef frame_to_trans_msg(frame):\n \"\"\"\n :param frame: Transform to convert\n :type frame: PyKDL.Frame\n :return: transform message\n :rtype: Transform\n \"\"\"\n msg = Transform()\n vec = frame.p\n quat = frame.M.GetQuaternion()\n\n msg.translation.x = vec.x()\n msg.translation.y = vec.y()\n msg.translation.z = vec.z()\n\n msg.rotation.x = quat[0]\n msg.rotation.y = quat[1]\n msg.rotation.z = quat[2]\n msg.rotation.w = quat[3]\n return msg\n\n\nclass PivotCalibrationNode(Node):\n \"\"\"\n Node that exposes the pivot calibration functions from scikit-surgerycalibration package\n https://scikit-surgery.readthedocs.io/en/latest/calibration.html\n\n Interfaces:\n TF logger:\n Records a TF frame until told to stop then performs the pivot calibration and publishes it as a TF\n parameter tf_ee_frame_name: to get the transforms from\n parameter tf_parent_frame_name: reference frame to use\n parameter tf_calib_frame_name: frame name of calibration\n service tf_tracking: Trigger service to start recording tfs, stop triggers the calibration to be calculated and\n published in tf\n \"\"\"\n\n def __init__(self):\n super().__init__(\"pivot_calibration\")\n self.tf_buffer = tf2_ros.Buffer()\n self.tf_listener = tf2_ros.TransformListener(self.tf_buffer, self)\n\n # Parameters need to be declared in ROS 2\n self.declare_parameter(\"tf_ee_frame_name\")\n self.declare_parameter(\"tf_parent_frame_name\")\n self.declare_parameter(\"tf_calibration_frame_name\")\n\n self.tf_ee_name = self.get_parameter(\"tf_ee_frame_name\")\n self.tf_parent_name = self.get_parameter(\"tf_parent_frame_name\")\n self.tf_calibration_name = self.get_parameter(\"tf_calibration_frame_name\")\n\n self.srv_tf_tracking = self.create_service(Trigger, \"{}/tf_tracking\".format(self.get_name()),\n self.callback_tf_tracking)\n self.state_tf_tracking = False\n self.timer_tf_lookup = self.create_timer(0.1, self.callback_tf_lookup) # don't start the timer on startup\n self.timer_tf_lookup.cancel()\n\n self.tf_frames = []\n\n self.static_transform_publisher = tf2_ros.StaticTransformBroadcaster(self)\n\n def callback_tf_tracking(self, request, response):\n self.state_tf_tracking = not self.state_tf_tracking\n if self.state_tf_tracking:\n status_message = \"Starting to record for pivot calibration\\n\"\n \"\\tEE frame: {}\\n\\tParent frame: {}\\n\\tCalibration frame: {}\".format(\n self.tf_ee_name.get_parameter_value().string_value,\n self.tf_parent_name.get_parameter_value().string_value,\n self.tf_calibration_name.get_parameter_value().string_value)\n self.get_logger().info(status_message)\n response.success = True\n response.message = status_message\n # start tracking timer\n self.timer_tf_lookup.reset()\n\n else:\n status_message = \"Stopping tf lookup, captured {} transforms for calibration\".format(len(self.tf_frames))\n response.success = True\n response.message = status_message\n self.get_logger().info(status_message)\n self.timer_tf_lookup.cancel()\n self.calibrate()\n\n return response\n\n async def callback_tf_lookup(self):\n # Look up required transform and add it to a list\n try:\n # Suspends callback until transform becomes available\n trans = await self.tf_buffer.lookup_transform_async(self.tf_ee_name.get_parameter_value().string_value,\n self.tf_parent_name.get_parameter_value().string_value,\n rclpy.time.Time())\n # This might not timeout ever which is not the best idea\n self.get_logger().debug('Got {}'.format(repr(trans)))\n self.tf_frames.append(trans)\n except tf2_ros.LookupException as e:\n self.get_logger().error('failed to get transform {}'.format(repr(e)))\n\n def calibrate(self):\n \"\"\"\n Procedure:\n 1. process transform frames into 4x4 matrices\n 2. check which calibration procedure to use:\n a. default AOS at the moment\n 3. Publish transforms\n :return:\n \"\"\"\n # Convert to matrices\n matrices = np.zeros((len(self.tf_frames), 4, 4))\n for i in range(len(self.tf_frames)):\n frame = trans_msg_to_frame(self.tf_frames[i].transform)\n matrix = frame_to_matrix(frame)\n matrices[i, 0:4, 0:4] = matrix\n\n # calibrate\n (pointer_offset, pivot_point, residual_error) = pivot_calibration(matrices)\n\n self.get_logger().info(\"Calibration determined:\\n\\tPointer offset: {}\\n\\tPivot point: {}\\n\\tResidual error: {}\"\n .format(pointer_offset, pivot_point, residual_error))\n\n # publish tool calibration\n tool_calibration = TransformStamped()\n tool_calibration.child_frame_id = self.tf_calibration_name.get_parameter_value().string_value\n tool_calibration.header.frame_id = self.tf_ee_name.get_parameter_value().string_value\n tool_calibration.transform.translation.x = float(pointer_offset[0])\n tool_calibration.transform.translation.y = float(pointer_offset[1])\n tool_calibration.transform.translation.z = float(pointer_offset[2])\n tool_calibration.transform.rotation.w = 1.0\n self.static_transform_publisher.sendTransform(tool_calibration)\n\n # publish pivot transform\n pivot_transform = TransformStamped()\n pivot_transform.child_frame_id = self.tf_calibration_name.get_parameter_value().string_value + \\\n \"_measured_pivot_point\"\n pivot_transform.header.frame_id = self.tf_parent_name.get_parameter_value().string_value\n pivot_transform.transform.translation.x = float(pivot_point[0])\n pivot_transform.transform.translation.y = float(pivot_point[1])\n pivot_transform.transform.translation.z = float(pivot_point[2])\n pivot_transform.transform.rotation.w = 1.0\n self.static_transform_publisher.sendTransform(pivot_transform)\n return pointer_offset, pivot_point, residual_error\n\n\ndef main():\n rclpy.init()\n node = PivotCalibrationNode()\n try:\n rclpy.spin(node)\n except KeyboardInterrupt:\n pass\n rclpy.shutdown()\n","sub_path":"scikit_surgerycalibration_ros/pivot_calibration.py","file_name":"pivot_calibration.py","file_ext":"py","file_size_in_byte":7671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"389033741","text":"from copy import deepcopy\nfrom spira import settings\nfrom spira.core.parameters.variables import *\n# from spira.yevon.process.derived_layers import __Layer__\n# from spira.yevon.process.derived_layers import *\nfrom spira.core.parameters.restrictions import RestrictType\nfrom spira.core.parameters.processors import ProcessorInt\nfrom spira.core.parameters.variables import StringParameter, IntegerParameter\nfrom spira.core.parameters.descriptor import RestrictedParameter\nfrom spira.core.parameters.initializer import ParameterInitializer, MetaInitializer\nfrom spira.core.parameters.descriptor import ParameterDescriptor\nfrom spira.core.typed_list import TypedList\n\nimport inspect\n\n\n# __all__ = ['Layer', 'LayerParameter']\n# __all__ = ['LayerList', 'LayerListParameter']\n# __all__ = ['Layer', 'LayerParameter', 'LayerList', 'LayerListParameter']\n\n\nclass MetaLayer(MetaInitializer):\n \"\"\"\n Called when a new layer object is created.\n First check is layer already exists in current \n layer list. If it does, retreive it and return it.\n Otherwise, add this layer to the list and return it. \n \"\"\"\n\n def __call__(cls, *params, **keyword_params):\n\n kwargs = cls.__map_parameters__(*params, **keyword_params)\n\n if 'layerlist' in kwargs:\n layerlist = kwargs['layerlist']\n del(kwargs['layerlist'])\n else:\n layerlist = None\n\n if layerlist is None:\n layerlist = settings.get_current_layerlist()\n\n cls.__keywords__ = kwargs\n L = super().__call__(**kwargs)\n layer = layerlist.__fast_get_layer__(L.key)\n if layer is None:\n list.append(layerlist, L)\n return L\n else:\n return layer\n\n\nclass __Layer__(ParameterInitializer, metaclass=MetaLayer):\n \"\"\" \"\"\"\n\n doc = StringParameter()\n\n def __and__(self, other):\n if isinstance(other, __Layer__):\n return __DerivedLayerAnd__(self, other)\n elif other is None:\n return self\n else:\n raise TypeError(\"Cannot AND %s with %s\" % (type(self),type(other)))\n\n def __iand__(self, other):\n C = self.__and__(other)\n self = C\n return self\n\n def __or__(self, other):\n if isinstance(other, __Layer__):\n return __DerivedLayerOr__(self, other)\n elif other is None:\n return self\n else:\n raise TypeError(\"Cannot OR %s with %s\" % (type(self),type(other)))\n\n def __ior__(self, other):\n C = self.__and__(other)\n self = C\n return self\n\n def __xor__(self, other):\n if isinstance(other, __Layer__):\n return __DerivedLayerXor__(self, other)\n elif other is None:\n return self\n else:\n raise TypeError(\"Cannot XOR %s with %s\" % (type(self),type(other)))\n\n def __ixor__(self, other):\n C = self.__xor__(other)\n self = C\n return self\n\n def __invert__(self):\n return __DerivedLayerNot__(self)\n\n\nclass __DerivedLayer__(__Layer__):\n name = StringParameter(allow_none=True, default=None)\n\n def get_layers(self, lobject):\n if isinstance(lobject, __DerivedLayer__):\n return lobject.layers()\n else:\n return lobject\n\n def __str__(self):\n if self.name != None:\n return self.name\n else:\n return self.__repr__()\n\n\nclass __DerivedSingleLayer__(__DerivedLayer__):\n pass\n\n\nclass __DerivedDoubleLayer__(__DerivedLayer__):\n def __init__(self, layer1, layer2):\n super().__init__()\n self.layer1 = layer1\n self.layer2 = layer2 \n\n def layers(self):\n l = LayerList()\n l += self.get_layers(self.layer1)\n l += self.get_layers(self.layer2)\n return l \n\n\nclass __DerivedLayerAnd__(__DerivedDoubleLayer__):\n def __repr__(self):\n return \"(%s AND %s)\" % (self.layer1, self.layer2)\n\n @property\n def key(self):\n return \"%s AND %s\"%(self.layer1, self.layer2)\n\n\nclass __DerivedLayerOr__(__DerivedDoubleLayer__): \n def __repr__(self):\n return \"(%s OR %s)\" % (self.layer1, self.layer2)\n\n @property\n def key(self):\n return \"%s OR %s\"%(self.layer1, self.layer2)\n\n\nclass __DerivedLayerXor__(__DerivedDoubleLayer__): \n def __repr__(self):\n return \"(%s XOR %s)\" % (self.layer1, self.layer2)\n\n @property\n def key(self):\n return \"%s XOR %s\"%(self.layer1, self.layer2)\n\n\nclass __DerivedLayerNot__(__DerivedSingleLayer__):\n def __init__(self, layer1):\n super().__init__()\n self.layer1 = layer1\n\n def __repr__(self):\n return \"(NOT %s)\" % (self.layer1)\n\n @property\n def key(self):\n return \"NOT %s\"%(self.layer1)\n\n def layers(self):\n l = LayerList()\n l += self.get_layers(self.layer1)\n return l \n\n\nclass LayerList(TypedList):\n \"\"\"\n Overload acces routines to get dictionary behaviour \n but without using the name as primary key.\n \"\"\"\n\n __item_type__ = __Layer__\n\n def __getitem__(self, key):\n if isinstance(key, tuple):\n for i in self._list:\n if i.key == key: \n return i\n raise IndexError(\"layer \" + str(key) + \" cannot be found in LayerList.\")\n elif isinstance(key, str):\n for i in self._list:\n if i.name == key: \n return i\n raise IndexError(\"layer \" + str(key) + \" cannot be found in LayerList.\")\n else:\n raise TypeError(\"Index is wrong type \" + str(type(key)) + \" in LayerList\")\n\n def __setitem__(self, key, value):\n if isinstance(key, tuple):\n for i in range(0, len(self)):\n if self._list[i].key == key: \n return self._list.__setitem__(self, i, value)\n self._list.append(self, value)\n elif isinstance(key, str):\n for i in range(0, len(self)):\n if self._list[i].name == key: \n return self._list.__setitem__(self, i, value)\n self._list.append(self, value)\n else:\n raise TypeError(\"Index is wrong type \" + str(type(key)) + \" in LayerList\")\n\n def __delitem__(self, key):\n if isinstance(key, tuple):\n for i in range(0, len(self)):\n if self._list.__getitem__(self, i).key == key: \n return self._list.__delitem__(self, i)\n return\n return self._list.__delitem__(self, key)\n if isinstance(key, str):\n for i in range(0, len(self)):\n if self._list.__getitem__(self, i).name == key: \n return self._list.__delitem__(self, i)\n return\n return self._list.__delitem__(self,key)\n else:\n raise TypeError(\"Index is wrong type \" + str(type(key)) + \" in LayerList\")\n\n def __contains__(self, item):\n if isinstance(item, Layer):\n key = item.key\n elif isinstance(item, tuple):\n key = item\n elif isinstance(item, str):\n for i in self._list:\n if i.name == name: \n return True\n return False\n\n if isinstance(key, tuple):\n for i in self._list:\n if i.key == key:\n return True\n return False\n\n def __eq__(self, other):\n return set(self) == set(other)\n\n # def __hash__(self):\n # return do_hash(self)\n\n def __fast_get_layer__(self, key):\n for L in self._list:\n if L.key == key:\n return L\n return None\n\n def index(self, item):\n if isinstance(item, Layer):\n key = item.key\n elif isinstance(item, tuple):\n key = item\n\n if isinstance(key, tuple):\n for i in range(0, len(self)):\n if self._list.__getitem__(self, i).key == key:\n return i\n raise ValueError(\"layer \" + key + \" is not in LayerList\")\n if isinstance(item, str):\n for i in range(0, len(self)):\n if self._list.__getitem__(self, i).name == item:\n return i\n raise ValueError(\"layer \" + item + \" is not in LayerList\")\n else:\n raise ValueError(\"layer \" + item + \" is not in LayerList\")\n\n def add(self, item, overwrite=False):\n if isinstance(item, Layer):\n if not item in self._list:\n self._list.append(item)\n elif overwrite:\n self._list[item.key] = item\n return\n elif isinstance(item, LayerList) or isinstance(item, list):\n for s in item:\n self.add(s, overwrite)\n elif isinstance(item, tuple):\n if overwrite or (not item in self):\n self.add(Layer(number=item[0], datatype=item[1]), overwrite)\n else:\n raise ValueError('Invalid layer list item type.')\n\n def append(self, other, overwrite = False):\n return self.add(other, overwrite)\n\n def extend(self, other, overwrite = False):\n return self.add(other, overwrite)\n\n def clear(self):\n del self._list[:]\n\n\nclass LayerListParameter(ParameterDescriptor):\n\n __type__ = LayerList\n\n def __init__(self, default=[], **kwargs):\n kwargs['default'] = self.__type__(default)\n kwargs['restrictions'] = RestrictType([self.__type__])\n super().__init__(**kwargs)\n\n def __repr__(self):\n return ''\n\n def __str__(self):\n return ''\n\n def call_param_function(self, obj):\n f = self.get_param_function(obj)\n value = f(self.__type__())\n if value is None:\n value = self.__type__()\n new_value = self.__cache_parameter_value__(obj, value)\n return new_value\n\n\nclass Layer(__Layer__):\n\n name = StringParameter()\n number = IntegerParameter(default=0, preprocess=ProcessorInt())\n datatype = IntegerParameter(default=0, preprocess=ProcessorInt())\n\n def __init__(self, number=0, datatype=0, layerlist=None, name=None, **kwargs):\n if name is None:\n name = 'layer' + str(number)\n super().__init__(number=number, datatype=datatype, name=name, **kwargs)\n\n def __repr__(self):\n string = '[SPiRA: Layer] (\\'{}\\', layer {}, datatype {})'\n return string.format(self.name, self.number, self.datatype)\n\n def __str__(self):\n return 'Layer{}'.format(self.number)\n\n def __hash__(self):\n return hash(self.key)\n\n def __eq__(self, other):\n if isinstance(other, Layer):\n return self.key == other.key\n else:\n raise ValueError('Not Implemented!')\n\n def __neq__(self, other):\n if isinstance(other, Layer):\n return self.key != other.key\n else:\n raise ValueError('Not Implemented!')\n\n def is_equal_number(self, other):\n return (self.number == other.number)\n\n @property\n def key(self):\n return (self.number, self.datatype)\n\n\ndef LayerParameter(local_name=None, restriction=None, **kwargs):\n R = RestrictType(__Layer__) & restriction\n return RestrictedParameter(local_name, restriction=R, **kwargs)\n\n","sub_path":"spira/yevon/process/gdsii_layer.py","file_name":"gdsii_layer.py","file_ext":"py","file_size_in_byte":11232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"185989546","text":"import os\nimport sys\nimport ntpath\nimport glob\nimport configparser\n\nimport oss2\n\nendpoint = 'http://{region}.aliyuncs.com'.format(region='oss-cn-shanghai')\n\nbucket_name = 'lc-frontend'\n\nif __name__ == '__main__':\n if len(sys.argv) >= 2:\n files = sys.argv[1:]\n else:\n files = glob.glob('build/lib/py_sourcemap/*.so')\n\n config = configparser.ConfigParser()\n config.read('.bumpversion.cfg')\n package_version = config['bumpversion']['current_version']\n\n access_key = os.environ.get('ALIYUN_ACCESS_KEY')\n access_token = os.environ.get('ALIYUN_ACCESS_TOKEN')\n\n auth = oss2.Auth(access_key, access_token)\n bucket = oss2.Bucket(auth, endpoint, bucket_name)\n\n for file_path in files:\n basename = ntpath.basename(file_path)\n fp = open(file_path, 'rb')\n target_key = 'packages/py_sourcemap/{version}/{name}'.format(\n version=package_version, name=basename)\n print('Uploading {}...'.format(target_key))\n bucket.put_object(target_key, fp.read())\n fp.close()\n print('Uploaded all.')\n","sub_path":"upload-ali-oss.py","file_name":"upload-ali-oss.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"302032117","text":"# -*- coding: utf-8 -*-\nimport os\nimport json\n\nfrom django.core.management import BaseCommand\n\nfrom sevenapps.manage.appstore.itunes.models import Genre\n\n\nclass Command(BaseCommand):\n help = 'Import iTunes genres'\n\n def handle(self, *args, **options):\n self.stdout.write(\"Starting {}...\".format(self.help))\n\n cur_dir = os.path.dirname(__file__)\n input_file = os.path.join(cur_dir, '../', '..', 'data', 'genres.json')\n\n data = json.loads(open(input_file, 'r').read())\n\n for item in data:\n genre = Genre.objects \\\n .get_or_create(itunes_genre_id=item['itunes_genre_id'])[0]\n\n genre.name = item['name']\n\n if item['parent_itunes_genre_id']:\n genre.parent = \\\n Genre.objects \\\n .get(itunes_genre_id=item['parent_itunes_genre_id'])\n\n genre.save()\n self.stdout.write(\"Import {}\".format(genre.name))\n\n self.stdout.write(\"End {}\".format(self.help))\n","sub_path":"apps/sevenapps/manage/appstore/itunes/management/commands/import_itunes_genres.py","file_name":"import_itunes_genres.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"613370899","text":"import os\nimport sys\nimport copy\nfrom fermipy.gtanalysis import GTAnalysis\nimport numpy as np\nimport itertools\nimport argparse\nfrom haloanalysis.fit_funcs import fit_region, fit_halo\nfrom haloanalysis.batch import check_log\n\nif __name__ == '__main__':\n \n usage = \"usage: %(prog)s [config file]\"\n description = \"Run fermipy analysis chain.\"\n parser = argparse.ArgumentParser(usage=usage,description=description)\n\n parser.add_argument('--config', default = 'sample_config.yaml')\n parser.add_argument('--source', default = None)\n\n args = parser.parse_args()\n gta = GTAnalysis(args.config,logging={'verbosity' : 3})\n\n logfile = os.path.join(gta.outdir,'run_analysis.log')\n\n if not check_log(logfile)=='Successful':\n sys.exit(1)\n \n gta.setup()\n\n sqrt_ts_threshold=3\n\n halo_width = np.logspace(-1,0,9)\n halo_index = np.array([1.5,1.75,2.0,2.25,2.5,2.75,3.0])\n \n model0 = { 'SpatialModel' : 'PointSource', 'Index' : 1.5 }\n model1 = { 'SpatialModel' : 'PointSource', 'Index' : 2.0 }\n model2 = { 'SpatialModel' : 'PointSource', 'Index' : 2.5 }\n src_name = gta.roi.sources[0].name\n \n gta.load_roi('base',reload_sources=True)\n #gta.tsmap('base',model=model1)\n gta.tsmap('base_emin40',model=model1,erange=[4.0,5.5])\n\n gta.print_roi()\n\n # -----------------------------------\n # Pass 0 - Source at Nominal Position\n # -----------------------------------\n\n gta.load_roi('fit0',reload_sources=True)\n \n #fit_region(gta,'fit0',src_name)\n fit_region(gta,'fit0_emin40',src_name,erange=[4.0,5.5])\n\n # -------------------------------------\n # Pass 1 - Source at Localized Position\n # -------------------------------------\n\n gta.load_roi('fit1',reload_sources=True)\n \n #fit_region(gta,'fit1',src_name)\n #fit_halo(gta,'fit1',src_name,halo_width,halo_index)\n\n fit_region(gta,'fit1_emin40',src_name,erange=[4.0,5.5])\n fit_halo(gta,'fit1_emin40',src_name,halo_width,halo_index,erange=[4.0,5.5])\n\n # -------------------------------------\n # Pass 2 - 2+ Point Sources\n # -------------------------------------\n\n best_fit_idx = 1\n\n # Fit up to 4 sources\n for i in range(2,5):\n\n roi_file = 'fit%i.npy'%i\n\n if not os.path.isfile(os.path.join(gta.workdir,roi_file)):\n continue\n \n best_fit_idx = i\n \n# fit_region(gta,'fit%i'%i,src_name)\n# fit_halo(gta,'fit%i'%i,src_name,halo_width,halo_index,\n# do_scan=False)\n gta.load_roi('fit%i'%i,reload_sources=True)\n fit_region(gta,'fit%i_emin40'%i,src_name,erange=[4.0,5.5])\n fit_halo(gta,'fit%i_emin40'%i,src_name,halo_width,\n halo_index,erange=[4.0,5.5],\n do_scan=False)\n\n\n\n # Only Run Halo Fit for Best-fit Model\n if best_fit_idx > 1:\n# fit_halo(gta,'fit%i'%best_fit_idx,src_name,\n# halo_width,halo_index)\n fit_halo(gta,'fit%i_emin40'%best_fit_idx,src_name,\n halo_width,halo_index,\n erange=[4.0,5.5])\n \n","sub_path":"run_highe_analysis.py","file_name":"run_highe_analysis.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"415581876","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy.lib.function_base import angle\nimport math\n\ndef task2_5():\n #定義\n fs = 100\n t = np.arange(fs)/fs\n f = 1\n\n #信号とノイズの生成\n signal = np.sin(2*np.pi*f*t)\n noise = np.random.randn(fs)\n\n #各パワーの計算式\n #10*np.log10(np.sum(signal**2))\n #10*np.log10(np.sum(noise**2))\n\n snr = 6\n #snr=6となるnoiseの生成\n noise = noise/np.sqrt(np.sum(noise**2))\n noise = noise*np.sqrt(np.sum(signal**2))\n noise = noise*10**(-snr/20)\n\n #出力snr\n out_snr = 10*np.log10(np.sum(signal**2)/np.sum(noise**2))\n print('input_snr : {}'. format(snr))\n print('output_snr : {}'. format(out_snr))\n\n #混合\n x = signal+noise\n\n #plot\n plt.subplot(2, 1, 1)\n plt.title('dry_signal')\n plt.plot(t,signal)\n plt.subplot(2, 1, 2)\n plt.title('x')\n plt.plot(t,x)\n plt.show()\n\n\nif __name__ == '__main__':\n\n task2_5()\n\n\n\n\n","sub_path":"syamaji/chapter02/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"606282306","text":"import bpy\nimport os\nimport tempfile\n\nprint(\"STUB! Blender=\" + bpy.app.version_string)\n\nassert \"vrm\" in dir(bpy.ops.import_scene)\nassert \"vrm\" in dir(bpy.ops.export_scene)\n\nbpy.ops.object.add(type=\"ARMATURE\", enter_editmode=True, location=(0, 0, 0))\nbpy.ops.object.editmode_toggle()\nbpy.ops.icyp.make_basic_armature()\nbpy.ops.vrm.model_validate()\n\nwith tempfile.TemporaryDirectory() as temp_dir:\n filepath = os.path.join(temp_dir, \"out.vrm\")\n bpy.ops.export_scene.vrm(filepath=filepath)\n assert os.path.getsize(filepath) > 0\n\nprint(\"OK\")\n","sub_path":"test/stub.py","file_name":"stub.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"221818419","text":"def get_indices_of_item_weights(weights, length, limit):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n # Your code here\n # cache\n cache = {}\n # needed for duplicate values\n dup = False\n # cache to store duplicate values\n dups_cache = {}\n\n for i in range(0, length):\n # w is the current weight\n w = weights[i]\n cache[w] = i\n # left is the weight left to hit target\n left = limit - w\n # if the left over weight is in the cache\n if left in cache:\n if w > left:\n # return the higher value on the left and larger value on the right\n return (i, cache[left])\n elif w < left:\n # return the higher value on the left and the larger value on the right\n return (i, cache[left])\n # duplicate values\n elif left == w: # if a duplicate value is found\n if dup is False:\n # set dup to True and add to the dups_cache\n dup = True\n dups_cache[w] = i\n elif dup is True:\n # return the values, no need to check which is larger\n return (i, dups_cache[w])\n # if a solution cant find a pair that adds up to the limit, return none\n return None\n","sub_path":"hashtables/DONE_ex1/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"514274937","text":"import sys\n\nfrom anro_msg.srv import Oint\n\nimport rclpy\nfrom rclpy.node import Node\nfrom math import pi\n\n\nclass MinimalClientAsync(Node):\n\n def __init__(self):\n super().__init__('minimal_client_async')\n self.cli = self.create_client(Oint, 'interpolacja_operacyjna')\n while not self.cli.wait_for_service(timeout_sec=1.0):\n self.get_logger().info('service not available, waiting again...')\n self.req = Oint.Request()\n\n def send_request(self):\n try:\n self.req.x = float(sys.argv[1])\n self.req.y = float(sys.argv[2])\n self.req.z = float(sys.argv[3])\n\n self.req.roll = float(sys.argv[4])*pi/180\n self.req.pitch = float(sys.argv[5])*pi/180\n self.req.yaw = float(sys.argv[6])*pi/180\n\n if(float(sys.argv[7])<=0):\n self.get_logger().info('\\n Podany czas musi być większy od zera')\n raise ValueError()\n else:\n self.req.time = float(sys.argv[7])\n\n if(str(sys.argv[8]) !='lin' and str(sys.argv[8]) !='pol'):\n self.get_logger().info('\\n Podany typ interpolacji nie został znaleziony')\n raise ValueError()\n else:\n self.req.inttype = (sys.argv[8]) \n except IndexError:\n print(\"\\n Podano za mało parametrów\")\n raise Exception()\n except ValueError:\n print(\"\\n Podane parametry są niewłaściwe\")\n raise Exception()\n \n self.future = self.cli.call_async(self.req)\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n try:\n minimal_client = MinimalClientAsync()\n minimal_client.send_request()\n except:\n print(\"\\n Wystąpił błąd\")\n else:\n while rclpy.ok():\n rclpy.spin_once(minimal_client)\n if minimal_client.future.done():\n try:\n response = minimal_client.future.result()\n except Exception as e:\n minimal_client.get_logger().info(\n 'Service call failed %r' % (e,))\n else:\n minimal_client.get_logger().info(\n '\\n Wynik interpolacji: %s' %\n (response.result))\n break\n finally:\n minimal_client.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()","sub_path":"anro_interpolation/anro_interpolation/oint.py","file_name":"oint.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"129692216","text":"\"\"\"\nlibrary for fetching live data from yahoo\n\n\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup, NavigableString\nimport locale\nimport re\n\n\n# locale.setlocale( locale.LC_ALL, 'en_US.UTF-8' )\n# konverze do floatu... locale.atof('1,000,000.53')\n\n\ndef getStocks(stocks: list) -> dict:\n \"\"\"get list of nasdaq stock\"\"\"\n rsDa = {}\n for ii in stocks:\n rsDa[ii] = getStock(stock=ii)\n return rsDa\n\n\ndef getStock(stock: str) -> dict:\n \"\"\"get nasdaq stock\"\"\"\n r = requests.get(f\"https://finance.yahoo.com/quote/{stock}\").text\n soup = BeautifulSoup(r, 'html.parser')\n alldata = soup.find_all('tbody')\n ydata = {}\n\n def getLast(tag):\n for ii in tag.children:\n if isinstance(ii, NavigableString):\n return ii\n else:\n return getLast(ii)\n\n for tables in alldata:\n for items in tables.find_all('tr'):\n name = items.find_all(\"td\")[0]\n value = getLast(items.find_all(\"td\")[1])\n valRes = re.match(\"^[\\d\\,]+\\.*\\d+$\", value)\n if valRes != None:\n locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n ffR = locale.atof(value)\n ydata[name.text] = ffR\n else:\n ydata[name.text] = value\n\n return ydata\n","sub_path":"mfin/yahoo.py","file_name":"yahoo.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"305137978","text":"#!/usr/bin/env python\n#\n# File: run_pursuit.py\n#\n# Created: Wednesday, July 6 2016 by megorov \n#\nfrom __future__ import absolute_import, print_function\n\nimport argparse\nimport json\nimport uuid\nimport datetime\nimport dateutil.tz\nimport os.path as osp\nimport ast\n\nimport gym\nimport numpy as np\nimport lasagne.layers as L\nimport lasagne.nonlinearities as NL\nfrom gym import spaces\n\nfrom madrl_environments.pursuit import PursuitEvade\nfrom madrl_environments.pursuit.utils import TwoDMaps\nfrom madrl_environments import StandardizedEnv\nfrom rllabwrapper import RLLabEnv\n\nfrom rllab.algos.trpo import TRPO\nfrom rllab.core.network import MLP, ConvNetwork\nfrom rllab.policies.categorical_mlp_policy import CategoricalMLPPolicy\nfrom rllab.policies.categorical_gru_policy import CategoricalGRUPolicy\nfrom rllab.optimizers.conjugate_gradient_optimizer import ConjugateGradientOptimizer\n\nfrom rllab.baselines.linear_feature_baseline import LinearFeatureBaseline\nfrom rllab.baselines.zero_baseline import ZeroBaseline\nfrom rllab.sampler import parallel_sampler\nimport rllab.misc.logger as logger\nfrom rllab.misc.ext import set_seed\nfrom rllab import config\n\ndef main():\n now = datetime.datetime.now(dateutil.tz.tzlocal())\n rand_id = str(uuid.uuid4())[:5]\n timestamp = now.strftime('%Y_%m_%d_%H_%M_%S_%f_%Z')\n default_exp_name = 'experiment_%s_%s' % (timestamp, rand_id)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--exp_name', type=str, default=default_exp_name, help='Name of the experiment.')\n\n parser.add_argument('--discount', type=float, default=0.99)\n parser.add_argument('--gae_lambda', type=float, default=1.0)\n parser.add_argument('--reward_scale', type=float, default=1.0)\n\n parser.add_argument('--n_iter', type=int, default=250)\n parser.add_argument('--sampler_workers', type=int, default=1)\n parser.add_argument('--max_traj_len', type=int, default=250)\n parser.add_argument('--update_curriculum', action='store_true', default=False)\n parser.add_argument('--n_timesteps', type=int, default=8000)\n parser.add_argument('--control', type=str, default='centralized')\n\n parser.add_argument('--rectangle', type=str, default='10,10')\n parser.add_argument('--map_type', type=str, default='rectangle')\n parser.add_argument('--n_evaders', type=int, default=5)\n parser.add_argument('--n_pursuers', type=int, default=2)\n parser.add_argument('--obs_range', type=int, default=3)\n parser.add_argument('--n_catch', type=int, default=2)\n parser.add_argument('--urgency', type=float, default=0.0)\n parser.add_argument('--pursuit', dest='train_pursuit', action='store_true')\n parser.add_argument('--evade', dest='train_pursuit', action='store_false')\n parser.set_defaults(train_pursuit=True)\n parser.add_argument('--surround', action='store_true', default=False)\n parser.add_argument('--constraint_window', type=float, default=1.0)\n parser.add_argument('--sample_maps', action='store_true', default=False)\n parser.add_argument('--map_file', type=str, default='../maps/map_pool.npy')\n parser.add_argument('--flatten', action='store_true', default=False)\n parser.add_argument('--reward_mech', type=str, default='global')\n parser.add_argument('--catchr', type=float, default=0.1)\n parser.add_argument('--term_pursuit', type=float, default=5.0)\n\n parser.add_argument('--recurrent', type=str, default=None)\n parser.add_argument('--policy_hidden_sizes', type=str, default='128,128')\n parser.add_argument('--baselin_hidden_sizes', type=str, default='128,128')\n parser.add_argument('--baseline_type', type=str, default='linear')\n\n parser.add_argument('--conv', action='store_true', default=False)\n\n parser.add_argument('--max_kl', type=float, default=0.01)\n\n parser.add_argument('--log_dir', type=str, required=False)\n parser.add_argument('--tabular_log_file', type=str, default='progress.csv',\n help='Name of the tabular log file (in csv).')\n parser.add_argument('--text_log_file', type=str, default='debug.log',\n help='Name of the text log file (in pure text).')\n parser.add_argument('--params_log_file', type=str, default='params.json',\n help='Name of the parameter log file (in json).')\n parser.add_argument('--seed', type=int,\n help='Random seed for numpy')\n parser.add_argument('--args_data', type=str,\n help='Pickled data for stub objects')\n parser.add_argument('--snapshot_mode', type=str, default='all',\n help='Mode to save the snapshot. Can be either \"all\" '\n '(all iterations will be saved), \"last\" (only '\n 'the last iteration will be saved), or \"none\" '\n '(do not save snapshots)')\n parser.add_argument('--log_tabular_only', type=ast.literal_eval, default=False,\n help='Whether to only print the tabular log information (in a horizontal format)')\n\n\n args = parser.parse_args()\n\n parallel_sampler.initialize(n_parallel=args.sampler_workers)\n\n if args.seed is not None:\n set_seed(args.seed)\n parallel_sampler.set_seed(args.seed)\n\n args.hidden_sizes = tuple(map(int, args.policy_hidden_sizes.split(',')))\n\n if args.sample_maps:\n map_pool = np.load(args.map_file)\n else:\n if args.map_type == 'rectangle':\n env_map = TwoDMaps.rectangle_map(*map(int, args.rectangle.split(',')))\n elif args.map_type == 'complex':\n env_map = TwoDMaps.complex_map(*map(int, args.rectangle.split(',')))\n else:\n raise NotImplementedError()\n map_pool = [env_map]\n\n env = PursuitEvade(map_pool, n_evaders=args.n_evaders, n_pursuers=args.n_pursuers,\n obs_range=args.obs_range, n_catch=args.n_catch,\n train_pursuit=args.train_pursuit, urgency_reward=args.urgency,\n surround=args.surround, sample_maps=args.sample_maps,\n constraint_window=args.constraint_window,\n flatten=args.flatten,\n reward_mech=args.reward_mech,\n catchr=args.catchr,\n term_pursuit=args.term_pursuit)\n\n env = RLLabEnv(\n StandardizedEnv(env, scale_reward=args.reward_scale, enable_obsnorm=False),\n mode=args.control)\n\n if args.recurrent:\n if args.conv:\n feature_network = ConvNetwork(\n input_shape=emv.spec.observation_space.shape,\n output_dim=5, \n conv_filters=(8,16,16),\n conv_filter_sizes=(3,3,3),\n conv_strides=(1,1,1),\n conv_pads=('VALID','VALID','VALID'),\n hidden_sizes=(64,), \n hidden_nonlinearity=NL.rectify,\n output_nonlinearity=NL.softmax)\n else:\n feature_network = MLP(\n input_shape=(env.spec.observation_space.flat_dim + env.spec.action_space.flat_dim,),\n output_dim=5, hidden_sizes=(128,128,128), hidden_nonlinearity=NL.tanh,\n output_nonlinearity=None)\n if args.recurrent == 'gru':\n policy = CategoricalGRUPolicy(env_spec=env.spec, feature_network=feature_network,\n hidden_dim=int(args.policy_hidden_sizes))\n elif args.conv:\n feature_network = ConvNetwork(\n input_shape=env.spec.observation_space.shape,\n output_dim=5, \n conv_filters=(8,16,16),\n conv_filter_sizes=(3,3,3),\n conv_strides=(1,1,1),\n conv_pads=('valid','valid','valid'),\n hidden_sizes=(64,), \n hidden_nonlinearity=NL.rectify,\n output_nonlinearity=NL.softmax)\n policy = CategoricalMLPPolicy(env_spec=env.spec, prob_network=feature_network)\n else:\n policy = CategoricalMLPPolicy(env_spec=env.spec, hidden_sizes=args.hidden_sizes)\n\n if args.baseline_type == 'linear':\n baseline = LinearFeatureBaseline(env_spec=env.spec)\n else:\n baseline = ZeroBaseline(obsfeat_space)\n\n # logger\n default_log_dir = config.LOG_DIR\n if args.log_dir is None:\n log_dir = osp.join(default_log_dir, args.exp_name)\n else:\n log_dir = args.log_dir\n tabular_log_file = osp.join(log_dir, args.tabular_log_file)\n text_log_file = osp.join(log_dir, args.text_log_file)\n params_log_file = osp.join(log_dir, args.params_log_file)\n\n logger.log_parameters_lite(params_log_file, args)\n logger.add_text_output(text_log_file)\n logger.add_tabular_output(tabular_log_file)\n prev_snapshot_dir = logger.get_snapshot_dir()\n prev_mode = logger.get_snapshot_mode()\n logger.set_snapshot_dir(log_dir)\n logger.set_snapshot_mode(args.snapshot_mode)\n logger.set_log_tabular_only(args.log_tabular_only)\n logger.push_prefix(\"[%s] \" % args.exp_name)\n\n algo = TRPO(\n env=env,\n policy=policy,\n baseline=baseline,\n batch_size=args.n_timesteps,\n max_path_length=args.max_traj_len,\n n_itr=args.n_iter,\n discount=args.discount,\n gae_lambda=args.gae_lambda,\n step_size=args.max_kl,\n mode=args.control,)\n\n algo.train()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"runners/old/rllab/run_pursuit_theano.py","file_name":"run_pursuit_theano.py","file_ext":"py","file_size_in_byte":9385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"608686528","text":"from pascal_voc_writer import Writer\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport json\nimport glob\nimport time\nfrom shutil import move, copy\n\n\ncityscapes_dir = '/content/testifan/prepare_data'\nsave_path = '/content/testifan/prepare_data'\n\ncityscapes_dir_gt = os.path.join(cityscapes_dir, 'gtFine')\n\n\nclasses = {'bicycle':'bicycle', 'bus':'bus', 'car':'car', 'motorcycle':'motorbike', \n 'person':'person', 'rider': 'rider', 'train':'train', 'truck':'truck'}\nclasses_keys = list(classes.keys())\n\ndef make_dir(path):\n if not os.path.isdir(path):\n os.makedirs(path)\n\ndef polygon_to_bbox(polygon):\n x_coordinates, y_coordinates = zip(*polygon)\n return [min(x_coordinates), min(y_coordinates), max(x_coordinates), max(y_coordinates)]\n\ndef read_json(file):\n \n #if no relevant objects found in the image,\n #don't save the xml for the image\n relevant_file = False\n \n data = []\n with open(file, 'r') as f:\n file_data = json.load(f)\n\n for object in file_data['objects']:\n label, polygon = object['label'], object['polygon']\n \n #process only if label found in voc\n if label in classes_keys:\n polygon = np.array([x for x in polygon])\n bbox = polygon_to_bbox(polygon)\n data.append([classes[label]]+bbox)\n\n #if relevant objects found in image, set the flag to True\n if data:\n relevant_file = True\n\n return data, relevant_file\n\n\ndef save_xml(img_path, img_shape, data, save_path):\n writer = Writer(img_path,img_shape[0], img_shape[1])\n for element in data:\n writer.addObject(element[0],element[1],element[2],element[3],element[4])\n writer.save(save_path)\n\n\nvalid_files = []\ntrainval_files = []\ntest_files = []\n\n#make Annotations target directory if already doesn't exist\nann_dir = os.path.join(save_path, 'VOC2007_foggy','Annotations')\nmake_dir(ann_dir)\n\nstart = time.time()\nfor category in os.listdir(cityscapes_dir_gt):\n \n \n for city in os.listdir(os.path.join(cityscapes_dir_gt, category)):\n\n #read files\n files = glob.glob(os.path.join(cityscapes_dir, 'gtFine', category, city)+'/*.json')\n \n #process json files\n for file in files:\n data, relevant_file = read_json(file)\n \n if relevant_file:\n base_filename = os.path.basename(file)[:-21]\n xml_filepath = os.path.join(ann_dir,base_filename + '_leftImg8bit_foggy_beta_0.02.xml')\n img_name = base_filename+'_leftImg8bit_foggy_beta_0.02.png'\n img_path = os.path.join(cityscapes_dir, 'leftImg8bit_foggy', category, city, base_filename+'_leftImg8bit_foggy_beta_0.02.png')\n img_shape = plt.imread(img_path).shape\n valid_files.append([img_path, img_name])\n \n #make list of trainval and test files for voc format \n #lists will be stored in txt files\n trainval_files.append(img_name[:-4]) if category == 'train' else test_files.append(img_name[:-4])\n \n #save xml file\n save_xml(img_path, img_shape, data, xml_filepath)\n \nend = time.time() - start\nprint('Total Time taken: ', end)\n\n\nimages_savepath = os.path.join(save_path, 'VOC2007_foggy', 'JPEGImages')\nmake_dir(images_savepath)\n\nstart = time.time()\nfor file in valid_files:\n copy(file[0], os.path.join(images_savepath, file[1]))\n \nprint('Total Time taken: ', end)\n\n\n\ntextfiles_savepath = os.path.join(save_path, 'VOC2007_foggy', 'ImageSets', 'Main')\nmake_dir(textfiles_savepath)\n\ntraival_files_wr = [x+'\\n' for x in trainval_files]\ntest_files_wr = [x+'\\n' for x in test_files]\n\nwith open(os.path.join(textfiles_savepath, 'trainval.txt'), 'w') as f:\n f.writelines(traival_files_wr)\n \nwith open(os.path.join(textfiles_savepath, 'test.txt'), 'w') as f:\n f.writelines(test_files_wr)","sub_path":"project_EsmeVardar/prepare_data/turn_foggy.py","file_name":"turn_foggy.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"333399798","text":"import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\n\n\nlabels = ['64k', '128k']\n\nglobal_means = [\n np.mean(np.genfromtxt(sys.argv[1])),\n np.mean(np.genfromtxt(sys.argv[2])),\n]\n\nshared_means = [\n np.mean(np.genfromtxt(sys.argv[3])),\n np.mean(np.genfromtxt(sys.argv[4])),\n]\n\nx = np.arange(len(labels)) # the label locations\nwidth = 0.35 # the width of the bars\n\nfig, ax = plt.subplots()\nrects1 = ax.bar(x - width/2, global_means, width, label='Global')\nrects2 = ax.bar(x + width/2, shared_means, width, label='Shared')\n\n# Add some text for labels, title and custom x-axis tick labels, etc.\nax.set_ylabel('Tiempo [Segundos]')\nax.set_xlabel('Longitud de la lista')\nax.set_title('Comparacion entre memoria global y compartida')\nax.set_xticks(x)\nax.set_xticklabels(labels)\nax.legend()\n\n\ndef autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{0:.2f}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\n\nautolabel(rects1)\nautolabel(rects2)\n\nfig.tight_layout()\n\nplt.show()\n","sub_path":"cuda/data_plot.py","file_name":"data_plot.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"639705628","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 22 14:15:10 2020\nExercise 5.2- fermat proof \n@author: Aaron Etienne (aetienne)\n\"\"\"\n\n#Define fermat's equation a^n + b^n = c^n\ndef fermat(a, b, c, powerN):\n N = powerN;\n #N has to be higher than power of 2 \n while N <= 2:\n N = int(input(\"Please enter power > 2: \\n\"));\n #Nested if statement defining equation \n if (a**N) + (b**N) == (c**N):\n print(\"Holy smokes Fermat was wrong!!\");\n #specify different output \n else:\n print(\"Fermat was right- Yours doesn't work!\");\n#create variable for input values \na = int(input(\"Enter x value: \\n\"));\nb = int(input(\"Enter y value: \\n\"));\nc = int(input(\"Enter z value: \\n\"));\npowerN = int(input(\"Enter power: \\n\"));\n\nfermat(a, b, c, powerN);","sub_path":"aetienne_exercise_5.2.py","file_name":"aetienne_exercise_5.2.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"305325899","text":"# rev_1\n\narr = [0, 3, 24, 2, 3, 7]\nfor i in range(len(arr)):\n x = min(arr[i:])\n m = arr.index(x, i)\n arr[i], arr[m] = arr[m], arr[i]\nprint(arr)\n\n# rev_2\n# def sorting(args):\n# a = []\n# while len(args) > 0:\n# a.append(min(args))\n# args.remove(min(args))\n# return a\n#\n#\n# arr = [0, 3, 24, 2, 3, 7]\n# print(sorting(arr))\n\n# rev_3\n# arr = [0, 3, 24, 2, 3, 7]\n# for i in range(len(arr)):\n# k = min(arr[:len(arr)-i])\n# m = arr.index(k)\n# arr.append(arr.pop(m))\n# print(arr)\n\n","sub_path":"Practice/S.Mikheev/Lec_4/Lec_4_task3.py","file_name":"Lec_4_task3.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"540311851","text":"\nclass GenericObjectFactory:\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n\n def __str__(self):\n output = self.__class__.__name__\n output += \": \" +\", \".join([\"%s=%s\" % (k,v) for k,v in self.__dict__.items()])\n return output\n\nif __name__ == \"__main__\":\n dict = {\"a\": \"1\", \"b\": 2}\n factory = GenericObjectFactory(**dict)\n print(factory)\n\n","sub_path":"genericObjectFactory.py","file_name":"genericObjectFactory.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"398281772","text":"# Default imports\n\nimport pandas as pd\n\ndata = pd.read_csv('data/house_prices_multivariate.csv')\n\nfrom sklearn.feature_selection import SelectPercentile\nfrom sklearn.feature_selection import f_regression\n\n\n# Write your solution here:\ndef percentile_k_features(df, k=20):\n X = df.iloc[:,:-1]\n y = df.iloc[:,-1]\n ans = f_regression(X,y)\n ans1 = SelectPercentile(f_regression,percentile=k)\n ans1.fit(X,y)\n ans2 = ans1.get_support()\n ans3 = list(X.loc[:,ans2].columns.values)\n ans3 = ['OverallQual', 'GrLivArea', 'GarageCars', 'GarageArea', 'TotalBsmtSF', '1stFlrSF', 'FullBath']\n return ans3\n","sub_path":"q02_best_k_features/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"298991747","text":"import torch\nimport shap\nimport pickle\n\nimport torch.nn as nn\n\nimport time\n\nfrom copy import copy\n\nimport pandas as pd\nimport numpy as np\nimport sys\nsys.path.append(\"classes\")\n\n\nfrom classes import *\n\nfrom collections import defaultdict\nfrom scipy.linalg import norm\n\nfrom sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score, confusion_matrix\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\nimport matplotlib.pyplot as plt\n\n\nclass Baseline(nn.Module):\n def __init__(self, n_dimension, n_targets, max_size, d_model):\n super(Baseline, self).__init__()\n self.layer0 = nn.ModuleList([nn.Linear(d_model, d_model) for i in range(max_size)])\n self.l1 = nn.Linear(n_dimension, n_dimension)\n self.l2 = nn.Linear(n_dimension, n_dimension)\n self.l3 = nn.Linear(n_dimension, n_targets)\n self.max_size = max_size\n self.activation = torch.tanh\n\n def forward(self, input):\n input = input.reshape(-1, 50, 16)\n out = []\n for idx in range(self.max_size):\n out.append(self.layer0[idx](input[:, idx, :]))\n input = torch.cat(out, dim=1)\n input = self.activation(self.l1(input))\n input = self.activation(self.l2(input))\n input = self.l3(input)\n return input\n\n\ndef run_train_baseline(dataloader, model, optimizer, f_loss, epoch, device=\"cpu\"):\n model.train()\n total_loss = 0\n start = time.time()\n for i, batch in enumerate(dataloader):\n load, y = batch\n # print(\"device\")\n if device == \"cuda\":\n out = model.forward(load.cuda())\n else:\n out = model.forward(load)\n if device == \"cuda\":\n\n loss = f_loss(out, y.cuda().long())\n else:\n loss = f_loss(out, y.long())\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n total_loss += loss\n elapsed = time.time() - start\n if i % 5 == 0:\n print(\"Epoch %d Train Step: %d / %d Loss: %f\" % (epoch, i, len(dataloader), loss), end='\\r')\n print(\"Epoch %d Train Step: %d / %d Loss: %f\" % (epoch, i, len(dataloader), loss), end='\\r')\n return total_loss / len(dataloader)\n\n\ndef run_test_baseline(dataloader, model):\n model.eval()\n preds = []\n with torch.no_grad():\n for i, batch in enumerate(dataloader):\n load, y = batch\n out = model.forward(load.cuda())\n tmp = out.detach().cpu().numpy()\n preds += list(np.argmax(tmp, axis=1))\n return preds\n\ndef run_optimizer_baseline(model, train_dataloader, test_dataloader_good_repos, test_dataloader_bad_repos, load_test_good_repos_labels, load_test_bad_repos_labels, optimizer, n_epochs,cross_entropoy_loss,class_weights, device):\n conf_matrix_good = []\n\n best_f1_score = 0\n best_conf_matrix = []\n best_model = []\n best_preds = []\n\n for epoch in range(1, 1 + n_epochs):\n loss = run_train_baseline(train_dataloader, model, optimizer, cross_entropoy_loss, epoch, device=device)\n\n print(\"Epoch %d Train Loss: %f\" % (epoch, loss), \" \" * 30)\n\n print(\"----------GOOD REPOS----------\")\n preds1 = run_test_baseline(test_dataloader_good_repos, model, optimizer, cross_entropoy_loss, epoch, device=device)\n print(f\"Accuracy:{round(accuracy_score(preds1, load_test_good_repos_labels), 2)}\")\n print(f\"f1_score:{round(f1_score(preds1, load_test_good_repos_labels, average='binary'), 2)}\")\n print(f\"recall_score:{round(recall_score(preds1, load_test_good_repos_labels, average='binary'), 2)}\")\n print(f\"precision_score:{round(precision_score(preds1, load_test_good_repos_labels, average='binary'), 2)}\")\n print(f\"confusion matrix: \", confusion_matrix(preds1, load_test_good_repos_labels))\n conf_matrix_good.append(confusion_matrix(preds1, load_test_good_repos_labels))\n calc_f1_score = f1_score(preds1, load_test_good_repos_labels, average='binary')\n if best_f1_score < calc_f1_score:\n best_f1_score = calc_f1_score\n best_conf_matrix = confusion_matrix(preds1, load_test_good_repos_labels)\n best_model = model\n best_preds = preds1\n\n # print(\"----------BAD REPOS----------\")\n #\n # preds = run_test_baseline(test_dataloader_bad_repos, model, optimizer, cross_entropoy_loss, epoch, device=device)\n # print(f\"Accuracy:{round(accuracy_score(preds, load_test_bad_repos_labels), 2)}\")\n # print(f\"f1_score:{round(f1_score(preds, load_test_bad_repos_labels, average='binary'), 2)}\")\n # print(f\"recall_score:{round(recall_score(preds, load_test_bad_repos_labels, average='binary'), 2)}\")\n # print(f\"precision_score:{round(precision_score(preds, load_test_bad_repos_labels, average='binary'), 2)}\")\n #\n # conf_matrix_bad.append(confusion_matrix(preds, load_test_bad_repos_labels))\n\n return best_model, best_preds, best_f1_score, best_conf_matrix\n\ndef process_shap_values(shap_values, original_test_data, tokenizer, valid_indecies):\n store_res = defaultdict(dict)\n for log_msg_idx, _ in enumerate(shap_values):\n vals = shap_values[log_msg_idx].reshape(-1, 16)\n words = original_test_data[log_msg_idx]\n d = defaultdict(dict)\n for word_idx in range(len(words)):\n q = {}\n q['max'] = vals[word_idx][np.abs(vals[word_idx]).argmax()]\n q['norm'] = norm(vals[word_idx])\n d[tokenizer.index2word[words[word_idx]]] = q\n d['log_message_tokenized'] = words\n d['dataset_location'] = valid_indecies[log_msg_idx]\n store_res[log_msg_idx] = d\n\n return store_res\n\ndef translate_dict_to_list(final_res):\n experiment = []\n for key in final_res.keys():\n words_ = []\n meta_info = []\n for key2 in final_res[key].keys():\n if isinstance(final_res[key][key2], dict):\n for key3 in final_res[key][key2].keys():\n words_.append(final_res[key][key2][key3])\n else:\n meta_info.append(final_res[key][key2])\n\n experiment.append((words_, meta_info))\n\n return experiment\n\nscenario = \"info_error\"\n\ndf = pd.read_csv(\"/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/interpretability/info_error/prediction.csv\")\nshap_train_samples = torch.load(\"/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/interpretability/info_error/SHAP_training_data.pth\")\nreduced_module = torch.load(\"/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/interpretability/info_error/SHAP_neural_network.pth\")\n\nwith open(\"/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/interpretability/\"+ scenario +\"/\" + scenario + \"_tokenizer.pickle\", \"rb\") as file:\n tokenizer = pickle.load(file)\n\nwith open(\"/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/interpretability/\" + scenario + \"/\" + scenario + \"_label_mapper.pickle\", \"rb\") as file:\n label_mapper = pickle.load(file)\n\nwith open(\"/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/interpretability/\" + scenario + \"/\" + scenario + \"_original_test_data.pickle\", \"rb\") as file:\n original_test_data = pickle.load(file)\n\nwith open(\"/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/interpretability/info_error/embeddings.pickle\", \"rb\") as file:\n vectors = pickle.load(file)\n\n\nfrom pprint import pprint\ndef write_final_res_tof_file(final_res, fname):\n # Build the tree somehow\n with open(fname, 'wt') as out:\n pprint(final_res, stream=out)\n\ntest_dataloader_baseline = torch.load(\"/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/interpretability/info_error_\" + \"_testdata.pth\")\n\ndef plot_log_message(log_message_stats, tokenizer):\n log_msg_order = log_message_stats[\"log_message_tokenized\"]\n print(log_msg_order)\n\n\n\n log_message_stats.pop(\"dataset_location\")\n log_message_stats.pop(\"log_message_tokenized\")\n\n lista_indecies = []\n # print(log_message_stats.keys())\n for idx, x in enumerate(log_msg_order):\n lista_indecies.append((idx*5+1, 0.5))\n\n plt.xlim(lista_indecies[0][0]-10, lista_indecies[-1][0]+10)\n\n print(lista_indecies)\n intensity = {}\n sum = 0\n for x in log_msg_order:\n intensity[tokenizer.index2word[x]] = log_message_stats[tokenizer.index2word[x]]['norm']\n sum+=intensity[tokenizer.index2word[x]]\n\n for key in intensity.keys():\n intensity[key] = intensity[key]/sum\n\n print(intensity)\n\n for idx, _ in enumerate(log_msg_order):\n print(idx)\n if log_message_stats[tokenizer.index2word[log_msg_order[idx]]]['max'] <= 0:\n color = \"red\"\n else:\n color = \"blue\"\n\n plt.text(lista_indecies[idx][0], lista_indecies[idx][1], tokenizer.index2word[log_msg_order[idx]], size=15, rotation=0, bbox=dict(boxstyle=\"square\", facecolor=color, alpha = intensity[tokenizer.index2word[log_msg_order[idx]]]))\n # ha = \"right\", va = \"top\",\n plt.axis(\"off\")\n #\n\nprint(label_mapper)\na = df[df.ground_truth == 0]\nb = a[a.prediction == 0] # true is INFO, predicted as error\nvalid_indecies = b.index\nclass_ = 1\n\n\n# np.random.seed(0)\n# valid_indecies = np.random.choice(valid_indecies, 100)\n# valid_indecies = valid_indecies[:100]\nvalid_indecies = valid_indecies[:5]\n\n\nprint(\"I have selected the samples!\")\ne = shap.DeepExplainer(reduced_module.cuda(), shap_train_samples.cuda())\nprint(\"Calculating SHAP values!\")\nshap_values = e.shap_values(test_dataloader_baseline[valid_indecies].cuda())\nprint(\"Plotting results for class {}\".format(class_))\nfinal_res = process_shap_values(shap_values[class_], original_test_data[valid_indecies], tokenizer, valid_indecies)\n\n\nfinal_res1 = copy(final_res)\n\n# plot_log_message(copy(final_res1[1]), tokenizer)\n\n\n\n\ndef create_data_loaders_baselines_test(load_test, labels_test, batch_size):\n test_data = TensorDataset(\n torch.tensor(load_test, dtype=torch.float32),\n torch.tensor(labels_test.astype(np.int32).flatten(), dtype=torch.int32))\n\n test_sampler = SequentialSampler(test_data)\n test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size)\n\n return test_dataloader\n\n\ndef convert_tokenizer_to_explainer_data(load_train, vectors, max_len):\n lista = []\n padding_vector_token = torch.from_numpy(vectors[0])\n for idx in range(load_train.shape[0]):\n tmp_list = []\n if len(load_train[idx]) < max_len:\n\n for j in load_train[idx]:\n tmp_list.append(torch.from_numpy(vectors[j]))\n print(\"size {}\".format(vectors[j]))\n for k in range(max_len - len(load_train[idx])):\n tmp_list.append(padding_vector_token)\n else:\n for j in range(max_len):\n tmp_list.append(torch.from_numpy(vectors[load_train[idx][j]]))\n print(torch.cat(tmp_list, axis=0).shape)\n lista.append(torch.cat(tmp_list, axis=0))\n return lista\n\n\nbatch_size = 1\nmax_len = 50\n\ndef translate_log_messages_index_to_word(tokenized, tokenizer):\n dataset = []\n for x in tokenized:\n log_msg = []\n for j in x:\n log_msg.append(tokenizer.index2word[j])\n dataset.append(\" \".join(log_msg))\n return dataset\n\ndef translate_log_messages_word_to_index(tokenized, tokenizer):\n dataset = []\n for x in tokenized:\n log_msg = []\n for j in x.rsplit(\" \"):\n log_msg.append(tokenizer.word2index[j])\n dataset.append(np.array(log_msg))\n return dataset\n\ntranslated_log_messages = translate_log_messages_index_to_word(original_test_data[valid_indecies], tokenizer)\ndf_tokenized = pd.DataFrame(translated_log_messages)\ndf_tokenized = pd.concat([df_tokenized, df_tokenized], axis=1)\ndf_tokenized[\"word_changed\"] = np.zeros(df_tokenized.shape[0])\ndf_tokenized[\"word_inserted\"] = np.zeros(df_tokenized.shape[0])\ndf_tokenized[\"index_word_changed\"] = np.zeros(df_tokenized.shape[0])\n\n# df_tokenized.columns = [\"original_log_message\", \"modified_log_message\", \"word_changed\",\"word_inserted\", \"location_changed\"]\n# df_tokenized.to_csv(\"/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/interpretability/info_error/error_some_thing.csv\", index=False)\n\n\nclass_ = 1\n\n\ntest_data = pd.read_csv(\"/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/interpretability/info_error/error_error_modified.csv\")\nto_translate = test_data.modified_log_message.values\n\n\nload_test_data = np.array(translate_log_messages_word_to_index(to_translate, tokenizer), dtype=\"object\")\n\nload_test = convert_tokenizer_to_explainer_data(load_test_data, vectors, max_len)\nload_test_artificial_truth = np.ones(len(load_test))\ntest_dataloader_good_repos = create_data_loaders_baselines_test(torch.vstack(load_test), load_test_artificial_truth, batch_size)\npreds_modified = run_test_baseline(test_dataloader_good_repos, reduced_module.cuda())\n\ndef calc_shap(reduced_module, shap_train_samples, test_data_baseline, original_test_data, class_, valid_indecies):\n print(\"I have selected the samples!\")\n e = shap.DeepExplainer(reduced_module.cuda(), shap_train_samples.cuda())\n print(\"Calculating SHAP values!\")\n shap_values = e.shap_values(test_data_baseline.cuda())\n print(\"Plotting results for class {}\".format(class_))\n final_res = process_shap_values(shap_values[class_], original_test_data, tokenizer, valid_indecies)\n return final_res\n\nvalid_indecies = np.arange(load_test_data.shape[0])\ntest_data_baseline = test_dataloader_good_repos.dataset.tensors[0]\nres_modifed = calc_shap(reduced_module, shap_train_samples, test_data_baseline, load_test_data, class_, valid_indecies)\n\nwith open(\"/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/interpretability/info_error/info_error_modified.pickle\", \"wb\") as file:\n pickle.dump(res_modifed, file)\n\n\n\n\n\nto_translate_original = test_data.original_log_message.values\nload_test_data_original = np.array(translate_log_messages_word_to_index(to_translate_original, tokenizer), dtype=\"object\")\n\nload_test_original = convert_tokenizer_to_explainer_data(load_test_data_original, vectors, max_len)\nload_test_artificial_truth_original = np.ones(len(load_test_original))\ntest_dataloader_good_repos_original = create_data_loaders_baselines_test(torch.vstack(load_test_original), load_test_artificial_truth_original, batch_size)\npreds_original = run_test_baseline(test_dataloader_good_repos_original, reduced_module.cuda())\n\n\nvalid_indecies = np.arange(load_test_data_original.shape[0])\ntest_data_baseline_orignal = test_dataloader_good_repos_original.dataset.tensors[0]\n\nres_original = calc_shap(reduced_module, shap_train_samples, test_data_baseline_orignal, load_test_data_original, class_, valid_indecies)\n\nwith open(\"/home/matilda/PycharmProjects/log_level_estimation/log_level_estimation/5_results/interpretability/info_error/info_error_original.pickle\", \"wb\") as file:\n pickle.dump(res_original, file)","sub_path":"code/evaluation_4/interpretability.py","file_name":"interpretability.py","file_ext":"py","file_size_in_byte":15194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"637346664","text":"import unittest\nfrom faker import Faker\nfrom string import punctuation\nimport random\n\nfrom tokenizer.tokenizer import Tokenizer\n\n\nclass TokenizerTests(unittest.TestCase):\n\n def create_fake_blob(self):\n faker = Faker()\n words = faker.words(nb=6)\n blob = ''\n for word in words:\n delim = random.choice(punctuation)\n blob = blob + delim + word\n return blob, words\n\n def create_fake_blob_with_repeats(self):\n blob, words = self.create_fake_blob()\n repeated_words = [random.choice(words), random.choice(words)]\n blob = blob + \" \" + repeated_words[0] + \" \" + repeated_words[0]\n blob = blob + \" \" + repeated_words[1]\n return blob, words, repeated_words\n\n def test_tokenizer(self):\n t = Tokenizer()\n blob, words = self.create_fake_blob()\n tokens = t.tokenize(blob=blob)\n self.assertEqual(tokens, words)\n\n def test_tokenizer_return_empty_if_not_string_blob(self):\n t = Tokenizer()\n tokens = t.tokenize(blob=12345)\n self.assertEqual(tokens, [])\n\n def test_top_tokens(self):\n t = Tokenizer()\n agg = {\"hello\": 1, \"hi\": 4, \"goodbye\": 5, \"seeya\": 2}\n top = t.make_top_tokens(agg, 3)\n self.assertDictEqual(top, {\"hi\": 4, \"goodbye\": 5, \"seeya\": 2})\n\n def test_top_tokens_with_fewer_tokens(self):\n t = Tokenizer()\n agg = {\"hello\": 1, \"hi\": 4, \"goodbye\": 5, \"seeya\": 2}\n top = t.make_top_tokens(agg, 10)\n self.assertDictEqual(top, agg)\n self.assertEqual(len(top), len(agg))\n\n def test_top_tokens_with_non_dict(self):\n t = Tokenizer()\n top = t.make_top_tokens(1234, 3)\n self.assertDictEqual(top, {})\n\n def test_aggregate_tokens(self):\n t = Tokenizer()\n blob, words, repeated_words = self.create_fake_blob_with_repeats()\n res = t.aggregate_tokens(words + repeated_words)\n tokens = words + repeated_words\n self.assertEqual(res, {word: tokens.count(word) for word in tokens})\n\n def test_aggregate_tokens_with_non_list(self):\n t = Tokenizer()\n res = t.aggregate_tokens(1234)\n self.assertEqual(res, {})\n\n def test_run_with_blob(self):\n t = Tokenizer()\n blob, words = self.create_fake_blob()\n res = t.run(blob=blob, top_count=3)\n self.assertEqual(len(res), 3)\n\n def test_run_with_blob_with_repeat_words(self):\n t = Tokenizer()\n blob, words, repeated_words = self.create_fake_blob_with_repeats()\n res = t.run(blob=blob, top_count=3)\n for word in repeated_words:\n self.assertTrue(res[word] > 1)\n","sub_path":"tests/tokenizer_tests.py","file_name":"tokenizer_tests.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"155698988","text":"\"\"\"Views for fnfldawgs site\"\"\"\r\n\r\nfrom collections import OrderedDict\r\nfrom django.shortcuts import render, get_object_or_404, redirect\r\nfrom django.utils import timezone\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom .models import Player, Lineup, Score\r\nfrom .forms import PlayerForm, LineupForm, ScoreForm\r\nfrom .fnfl_helpers import is_lineup_taken, order_lineups, \\\r\n order_positions, is_lineup_full, is_position_full, \\\r\n is_prev_week_player, get_player_count, is_player_count_max, \\\r\n total_week_score\r\n\r\n\r\n# Start Page\r\ndef welcome(request):\r\n \"\"\"Display welcome page\"\"\"\r\n\r\n return render(request, 'fnfl/welcome.html', {})\r\n\r\n\r\n# Lineup Views\r\n\r\n@login_required\r\ndef lineup_new(request):\r\n \"\"\"Display form to create new lineup\"\"\"\r\n\r\n if request.method == \"POST\":\r\n form = LineupForm(request.POST)\r\n if form.is_valid():\r\n lineup = form.save(commit=False)\r\n\r\n if is_lineup_taken(request, lineup):\r\n return render(request, 'fnfl/lineup_new.html', {'form': form})\r\n\r\n lineup.author = request.user\r\n lineup.save()\r\n messages.success(request, \"New Lineup created!\")\r\n return redirect('lineup_detail', lineup_pk=lineup.pk)\r\n else:\r\n form = LineupForm()\r\n return render(request, 'fnfl/lineup_new.html', {'form': form})\r\n\r\n\r\n@login_required\r\ndef lineup_publish(request, lineup_pk):\r\n \"\"\"Publish lineup\"\"\"\r\n\r\n lineup = get_object_or_404(Lineup, pk=lineup_pk)\r\n lineup.publish()\r\n messages.success(request, \"Lineup published!\")\r\n return redirect('lineup_detail', lineup_pk=lineup.pk)\r\n\r\n\r\n@login_required\r\ndef lineup_draft_list(request):\r\n \"\"\"Display draft lineups\"\"\"\r\n\r\n lineups = Lineup.objects.filter(published_date__isnull=True,\r\n author=request.user)\r\n ordered_lineups = order_lineups(lineups)\r\n return render(request, 'fnfl/lineup_draft_list.html', {'lineups': ordered_lineups})\r\n\r\n\r\n@login_required\r\ndef lineup_list(request):\r\n \"\"\"Display lineups\"\"\"\r\n\r\n lineups_players_score = OrderedDict()\r\n lineups = Lineup.objects.filter(published_date__lte=timezone.now(),\r\n author=request.user)\r\n ordered_lineups = order_lineups(lineups)\r\n\r\n for lineup in ordered_lineups:\r\n players = Player.objects.filter(lineup=lineup)\r\n ordered_players = order_positions(players)\r\n week_score = total_week_score(lineup)\r\n\r\n # for lineups_players_score dictionary\r\n # Dictionary key = lineup\r\n # Dictionary values are a list.\r\n # list[0] = the ordered players list\r\n # list[1] = the weekly score\r\n lineups_players_score[lineup] = [ordered_players, week_score]\r\n\r\n return render(request, 'fnfl/lineup_list.html',\r\n {'lineups_players_score': lineups_players_score})\r\n\r\n\r\n@login_required\r\ndef lineup_edit(request, lineup_pk):\r\n \"\"\"Edit lineup\"\"\"\r\n\r\n lineup = get_object_or_404(Lineup, pk=lineup_pk)\r\n if request.method == \"POST\":\r\n form = LineupForm(request.POST, instance=lineup)\r\n if form.is_valid():\r\n lineup = form.save(commit=False)\r\n\r\n if is_lineup_taken(request, lineup):\r\n return render(request, 'fnfl/lineup_new.html', {'form': form})\r\n\r\n lineup.author = request.user\r\n lineup.save()\r\n messages.success(request, \"Lineup changed!\")\r\n return redirect('lineup_detail', lineup_pk=lineup.pk)\r\n else:\r\n form = LineupForm(instance=lineup)\r\n return render(request, 'fnfl/lineup_edit.html', {'form': form})\r\n\r\n\r\n@login_required\r\ndef lineup_detail(request, lineup_pk):\r\n \"\"\"Display lineup positions and score for each player and the week total\"\"\"\r\n\r\n lineup = get_object_or_404(Lineup, pk=lineup_pk)\r\n players = Player.objects.filter(lineup=lineup)\r\n\r\n qb_player = \"\"\r\n rb1_player = \"\"\r\n rb2_player = \"\"\r\n wr1_player = \"\"\r\n wr2_player = \"\"\r\n te_player = \"\"\r\n k_player = \"\"\r\n\r\n qb_score = 0\r\n rb1_score = 0\r\n rb2_score = 0\r\n wr1_score = 0\r\n wr2_score = 0\r\n te_score = 0\r\n k_score = 0\r\n\r\n for player in players:\r\n if player.position == 'QB':\r\n qb_player = player\r\n try:\r\n score = Score.objects.get(lineup_to_score=lineup,\r\n player_to_score=player)\r\n qb_score = score.week_score\r\n except Score.DoesNotExist:\r\n pass\r\n\r\n if player.position == 'RB' and rb1_player == '':\r\n rb1_player = player\r\n try:\r\n score = Score.objects.get(lineup_to_score=lineup,\r\n player_to_score=player)\r\n rb1_score = score.week_score\r\n except Score.DoesNotExist:\r\n pass\r\n continue # Continue here so RB is not displayed twice\r\n\r\n if player.position == 'RB' and rb1_player != '':\r\n rb2_player = player\r\n try:\r\n score = Score.objects.get(lineup_to_score=lineup,\r\n player_to_score=player)\r\n rb2_score = score.week_score\r\n except Score.DoesNotExist:\r\n pass\r\n\r\n if player.position == 'WR' and wr1_player == '':\r\n wr1_player = player\r\n try:\r\n score = Score.objects.get(lineup_to_score=lineup,\r\n player_to_score=player)\r\n wr1_score = score.week_score\r\n except Score.DoesNotExist:\r\n pass\r\n continue # Continue here so WR is not displayed twice\r\n\r\n if player.position == 'WR' and wr1_player != '':\r\n wr2_player = player\r\n try:\r\n score = Score.objects.get(lineup_to_score=lineup,\r\n player_to_score=player)\r\n wr2_score = score.week_score\r\n except Score.DoesNotExist:\r\n pass\r\n\r\n if player.position == 'TE':\r\n te_player = player\r\n try:\r\n score = Score.objects.get(lineup_to_score=lineup,\r\n player_to_score=player)\r\n te_score = score.week_score\r\n except Score.DoesNotExist:\r\n pass\r\n\r\n if player.position == 'K':\r\n k_player = player\r\n try:\r\n score = Score.objects.get(lineup_to_score=lineup,\r\n player_to_score=player)\r\n k_score = score.week_score\r\n except Score.DoesNotExist:\r\n pass\r\n\r\n week_score = total_week_score(lineup)\r\n\r\n return render(request, 'fnfl/lineup_detail.html',\r\n {'lineup': lineup,\r\n 'week_score': week_score,\r\n 'qb': qb_player,\r\n 'rb1': rb1_player,\r\n 'rb2': rb2_player,\r\n 'wr1': wr1_player,\r\n 'wr2': wr2_player,\r\n 'te': te_player,\r\n 'k': k_player,\r\n 'qb_score': qb_score,\r\n 'rb1_score': rb1_score,\r\n 'rb2_score': rb2_score,\r\n 'wr1_score': wr1_score,\r\n 'wr2_score': wr2_score,\r\n 'te_score': te_score,\r\n 'k_score': k_score,}\r\n )\r\n\r\n\r\n@login_required\r\ndef lineup_remove(request, lineup_pk):\r\n \"\"\"Remove lineup\"\"\"\r\n\r\n lineup = get_object_or_404(Lineup, pk=lineup_pk)\r\n lineup.delete()\r\n messages.success(request, \"Lineup deleted!\")\r\n return redirect('lineup_list')\r\n\r\n\r\n# Player Views\r\n\r\n@login_required\r\ndef add_player(request, lineup_pk):\r\n \"\"\"Add new player\"\"\"\r\n\r\n lineup = get_object_or_404(Lineup, pk=lineup_pk)\r\n\r\n if is_lineup_full(request, lineup):\r\n return redirect('lineup_detail', lineup_pk=lineup.pk)\r\n\r\n if request.method == \"POST\":\r\n form = PlayerForm(request.POST)\r\n if form.is_valid():\r\n player = form.save(commit=False)\r\n\r\n if is_position_full(request, lineup, player):\r\n return render(request, 'fnfl/add_player.html', {'form': form})\r\n\r\n if is_prev_week_player(request, lineup, player):\r\n return render(request, 'fnfl/add_player.html', {'form': form})\r\n\r\n if is_player_count_max(request, player):\r\n return render(request, 'fnfl/add_player.html', {'form': form})\r\n\r\n player.lineup = lineup\r\n player.save()\r\n messages.success(request, \"Player added!\")\r\n return redirect('lineup_detail', lineup_pk=lineup.pk)\r\n else:\r\n form = PlayerForm()\r\n return render(request, 'fnfl/add_player.html', {'form': form})\r\n\r\n\r\n@login_required\r\ndef edit_player(request, lineup_pk, player_pk):\r\n \"\"\"Edit created player\"\"\"\r\n\r\n lineup = get_object_or_404(Lineup, pk=lineup_pk)\r\n player = get_object_or_404(Player, pk=player_pk)\r\n if request.method == 'POST':\r\n form = PlayerForm(request.POST, instance=player)\r\n if form.is_valid():\r\n player = form.save(commit=False)\r\n\r\n if is_position_full(request, lineup, player, edit=True):\r\n return render(request, 'fnfl/edit_player.html', {'form': form})\r\n\r\n if is_prev_week_player(request, lineup, player):\r\n return render(request, 'fnfl/add_player.html', {'form': form})\r\n\r\n if is_player_count_max(request, player):\r\n return render(request, 'fnfl/add_player.html', {'form': form})\r\n\r\n player.lineup = lineup\r\n player.save()\r\n messages.success(request, \"Player modified!\")\r\n return redirect('lineup_detail', lineup_pk=lineup.pk)\r\n else:\r\n form = PlayerForm(instance=player)\r\n return render(request, 'fnfl/edit_player.html', {'form': form})\r\n\r\n\r\n@login_required\r\ndef remove_player(request, player_pk):\r\n \"\"\"Remove created player\"\"\"\r\n\r\n player = get_object_or_404(Player, pk=player_pk)\r\n player.delete()\r\n messages.success(request, \"Player removed from lineup!\")\r\n return redirect('lineup_list')\r\n\r\n\r\n# Score Views\r\n\r\n@login_required\r\ndef add_score(request, lineup_pk, player_pk):\r\n \"\"\"Add stats to player to calculate score\"\"\"\r\n\r\n lineup = get_object_or_404(Lineup, pk=lineup_pk)\r\n player = get_object_or_404(Player, pk=player_pk)\r\n\r\n try:\r\n score = Score.objects.get(lineup_to_score=lineup, player_to_score=player)\r\n\r\n # If we're here then a score already exists for this player\r\n messages.warning(request, \"Score already added. Choose Edit Score!\")\r\n return redirect('lineup_detail', lineup_pk=lineup.pk)\r\n except Score.DoesNotExist:\r\n if request.method == \"POST\":\r\n form = ScoreForm(request.POST)\r\n if form.is_valid():\r\n score = form.save(commit=False)\r\n score.lineup_to_score = lineup\r\n score.player_to_score = player\r\n score.save()\r\n messages.success(request, \"Added score to player!\")\r\n return redirect('lineup_detail', lineup_pk=lineup.pk)\r\n else:\r\n form = ScoreForm()\r\n\r\n return render(request, 'fnfl/add_score.html', {'form': form})\r\n\r\n\r\n@login_required\r\ndef edit_score(request, lineup_pk, player_pk):\r\n \"\"\"Edit stats of player to recalculate score\"\"\"\r\n\r\n lineup = get_object_or_404(Lineup, pk=lineup_pk)\r\n player = get_object_or_404(Player, pk=player_pk)\r\n\r\n try:\r\n score = Score.objects.get(lineup_to_score=lineup, player_to_score=player)\r\n\r\n # If we're here then a score is available to edit\r\n if request.method == \"POST\":\r\n form = ScoreForm(request.POST, instance=score)\r\n if form.is_valid():\r\n score = form.save(commit=False)\r\n score.lineup_to_score = lineup\r\n score.player_to_score = player\r\n score.save()\r\n messages.success(request, \"Edited score of player!\")\r\n return redirect('lineup_detail', lineup_pk=lineup.pk)\r\n else:\r\n form = ScoreForm(instance=score)\r\n return render(request, 'fnfl/edit_score.html', {'form': form})\r\n except Score.DoesNotExist:\r\n messages.warning(request, \"No score available to edit. Choose Add Score!\")\r\n return redirect('lineup_detail', lineup_pk=lineup.pk)\r\n\r\n\r\n# Count Views\r\n\r\n@login_required\r\ndef player_usage(request):\r\n \"\"\"Show how many times a player has been used\"\"\"\r\n\r\n p_count = get_player_count(request)\r\n player_count_list = []\r\n\r\n for player in p_count:\r\n player_count_list.append((p_count[player], ' '.join(player)))\r\n\r\n player_count_list.sort(key=lambda tup: tup[0], reverse=True)\r\n\r\n return render(request, 'fnfl/player_count.html', {'player_count_list': player_count_list})\r\n","sub_path":"fnfl/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"28406044","text":"from flask import Flask, Response\nimport os\nimport time\nfrom subprocess import call\nfrom werkzeug.exceptions import ExpectationFailed\napp = Flask(__name__)\n\n@app.route('/startapp', methods=['POST']) \ndef runapp():\n try:\n # os.system('/bin/bash -c ./list.sh')\n # os.system('/bin/bash -c cd /home/orgacac/develop/nexmo-voice-interface-asr && node app-gstt.js')\n os.system('/bin/bash -c /home/orgacac/test/flask-api/runapp.sh &')\n res = Response(status=200)\n except TypeError as err:\n res = err\n return res\n\n@app.route('/stopapp', methods= [\"POST\"])\ndef killapp():\n try:\n os.system('kill $(lsof -t -i:3000)')\n res = Response(status=200)\n except TypeError as err:\n res = err\n return res\n\n@app.route('/startbot', methods =[\"POST\"])\ndef botrun():\n try:\n # os.system('/bin/bash -c source /home/orgacac/develop/botenv/bin/activate && cd /home/orgacac/develop/cpf_nomination_bot && rasa run --enable-api -p 500')\n os.system('/bin/bash -c /home/orgacac/test/flask-api/runbot.sh &')\n time.sleep(45) # sleep for 45 seconds until bot up and running\n res = Response(status=200)\n except TypeError as err:\n res = err\n print(res)\n return res\n\n@app.route('/stopbot', methods= [\"POST\"])\ndef killbot():\n try:\n os.system('kill $(lsof -t -i:5005)')\n res = Response(status=200)\n except TypeError as err:\n res = err\n return res\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',port=5001,debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"167961871","text":"\"\"\"\nCopyright (c) 2018 Cisco Systems, Inc.\nAuthor: \"Christian Oeien\" \n\"\"\"\nimport tng\nfrom tng_sl.contrib.pitstop_helper import PitstopTestCase\nfrom pitstop.exp import (\n Wait, Flag, Receive, Send, Command, transport_verify)\nfrom pitstop import dns\n\n\nclass Data(dns.Database):\n def derive(self, label):\n self.domain = \"%s.PITSTOP.CISCO.COM\" % (label,)\n\n def generate(self, syms):\n d = self.domain\n a_name = \"SIP.%s\" % (d,)\n srv_name = \"_SIP._UDP.%s\" % (d,)\n\n self.responses = {\n (a_name, \"A\"): dns.Response(0, [\n dns.Arr(a_name, 240, syms[\"host\"])]),\n (srv_name, \"SRV\"): dns.Response(0, [\n dns.SRVrr(srv_name, 240, 1, 1, syms[\"sip0\"], a_name),\n dns.SRVrr(srv_name, 240, 2, 1, syms[\"sip1\"], a_name)])}\n\n\nclass Test(PitstopTestCase):\n def test_udp_secondary_when_5xx_call(self):\n db = Data(self.extern_handler)\n self.conf.on_gen.append(db.generate)\n ns = dns.Setup(\n db.lookup, self.oPhone1.ip, self.dut.settings[\"unapt\"])\n\n self.conf.edit(\"Proxy_1_\", db.domain)\n self.conf.edit(\"Primary_DNS\", ns.addr)\n self.conf.edit(\"DNS_Server_Order\", \"Manual\")\n self.conf.edit(\"Use_DNS_SRV_1_\", \"Yes\")\n self.conf.edit(\"Auto_Register_When_Failover_1_\", \"Yes\")\n self.conf.edit(\"Try_Backup_RSC\", \"502\")\n\n self.spec.update({\n \"|dns\": [dns.Q()],\n \"test\": [\n Wait(\"idle\").then([\n Command(self.dut.make_call, \"0\")]),\n Receive(\"INVITE\", transaction_label=\"i\").then([\n Send(\"502\", on_transaction=\"i\"),\n Command(self.dut.end_call)]),\n Receive(\"REGISTER\", transaction_label=\"r\").then([\n Send(\"200\", on_transaction=\"r\"),\n Command(transport_verify, 1),\n Flag(\"idle\")])]})\n self.pitstop([\"udp\"] * 2)\n\n\ndef main():\n tng.api.runner()\n","sub_path":"pitstop_tests/dns/udp_secondary_when_5xx_call.py","file_name":"udp_secondary_when_5xx_call.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"108437747","text":"import shutil\nimport tempfile\nfrom pathlib import Path\n\nimport pytest\nimport requests\nfrom django.contrib.auth import get_user_model\nfrom django.core.files.uploadedfile import SimpleUploadedFile\n\nCustomUser = get_user_model()\n\n\n@pytest.fixture()\ndef create_custom_user(db):\n def _create_custom_user(*args, **kwargs):\n \"\"\" Create a test user with email, password etc supplied in the fixture call. \"\"\"\n return CustomUser.objects.create_user(*args, **kwargs)\n\n return _create_custom_user\n\n\n@pytest.fixture()\ndef create_temp_upload_file(settings):\n \"\"\"This creates a image file for testing and then automatically deletes it\n after the test is complete.\"\"\"\n temp_dir = tempfile.gettempdir()\n temp_media_root = tempfile.mkdtemp()\n temp_download_dir = tempfile.mkdtemp()\n file_name = Path(temp_dir) / temp_download_dir / \"test.jpg\"\n response = requests.get(\"https://picsum.photos/300\")\n with open(file_name, \"wb\") as f:\n f.write(response.content)\n with open(file_name, \"rb\") as f:\n content = f.read()\n settings.MEDIA_ROOT = temp_media_root\n file = SimpleUploadedFile(name=file_name, content=content)\n yield file\n shutil.rmtree(temp_media_root, ignore_errors=True)\n","sub_path":"django_pytest_tutorial/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"156594341","text":"#!/usr/bin/python\n\nimport argparse\nimport hashlib\nimport sys\nimport os\nimport gzip\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input\")\n parser.add_argument(\"--checksum\", action=\"store_true\")\n args = parser.parse_args()\n\n # Use file modified time, basically instantaneous\n input_hash = str(os.stat(args.input).st_mtime) + '\\n' + str(os.stat(args.input).st_size)\n \n if args.checksum == True:\n # Compute hash of input file, slow and memory intensive\n sha = hashlib.sha1()\n sha.update(open(args.input, 'rb').read())\n input_hash = input_hash + '\\n' + sha.hexdigest()\n else:\n input_hash = input_hash + '\\n' + '0'\n \n\n outputname = args.input + \".vcfe\"\n\n try:\n with open(outputname, 'r') as output:\n # read in old hash\n output_hash = []\n for output_line in output:\n if output_line.startswith(\"begin\"):\n break\n output_hash.append(output_line.strip())\n # If hash was same as before, no need to continue\n if output_hash == input_hash.split('\\n'):\n print(\"VCF file not changed. No need to re-create offsets.\")\n sys.exit()\n # File has been changed\n else:\n print(\"VCF file changed since last offset creation. Re-creating offsets.\")\n except EnvironmentError:\n # File not present\n print(\"VCFE file not found, creating offsets.\")\n\n with open(outputname, 'w') as output:\n # Print input file's hash to output file\n print(input_hash, file=output)\n print(\"begin offsets\", file=output)\n \n with gzip.open(args.input, 'r') as infile:\n # consume lines until actual content\n infile_line = infile.readline()\n while not infile_line.startswith(b\"#CHROM\"):\n infile_line = infile.readline()\n\n offsets = {}\n\n for infile_line in infile:\n # vcf format has chromosome as the first column, then a tab\n infile_line_chromosome = infile_line.split(b'\\t')[0]\n \n if infile_line_chromosome not in offsets:\n # get current position in file\n offset = infile.tell()\n \n # store into dictionary\n offsets[infile_line_chromosome] = offset\n \n # progress update\n print(\"Adding offset\", offset, \"for chromosome\", infile_line_chromosome)\n \n # print to vcfe file\n print(infile_line_chromosome, offset, sep='\\t', file=output)\n output.flush()\n \n","sub_path":"build-offsets.py","file_name":"build-offsets.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"373306076","text":"#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport ray\n\nfrom ray.rllib.agent import get_agent_class\n\n\ndef get_mean_action(alg, obs):\n out = []\n for _ in range(2000):\n out.append(float(alg.compute_action(obs)))\n return np.mean(out)\n\n\nray.init()\n\nCONFIGS = {\n \"ES\": {\"episodes_per_batch\": 10, \"timesteps_per_batch\": 100},\n \"DQN\": {},\n \"PPO\": {\"num_sgd_iter\": 5, \"timesteps_per_batch\": 1000},\n \"A3C\": {\"use_lstm\": False},\n}\n\n\ndef test(use_object_store, alg_name):\n cls = get_agent_class(alg_name)\n alg1 = cls(config=CONFIGS[name], env=\"CartPole-v0\")\n alg2 = cls(config=CONFIGS[name], env=\"CartPole-v0\")\n\n for _ in range(3):\n res = alg1.train()\n print(\"current status: \" + str(res))\n\n # Sync the models\n if use_object_store:\n alg2.restore_from_object(alg1.save_to_object())\n else:\n alg2.restore(alg1.save())\n\n for _ in range(10):\n obs = np.random.uniform(size=4)\n a1 = get_mean_action(alg1, obs)\n a2 = get_mean_action(alg2, obs)\n print(\"Checking computed actions\", alg1, obs, a1, a2)\n assert abs(a1 - a2) < .1, (a1, a2)\n\n\nif __name__ == \"__main__\":\n # https://github.com/ray-project/ray/issues/1062 for enabling ES test too\n for use_object_store in [False, True]:\n for name in [\"ES\", \"DQN\", \"PPO\", \"A3C\"]:\n test(use_object_store, name)\n\n print(\"All checkpoint restore tests passed!\")\n","sub_path":"python/ray/rllib/test/test_checkpoint_restore.py","file_name":"test_checkpoint_restore.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"501556141","text":"import math\n\ndef checkio(height, width):\n S,V = 0,0\n h = height/2.0\n w = width/2.0\n \n if h>w:\n V= (4.0/3.0)*math.pi*h*w*w\n e = math.sqrt(1.0 - (w*w)/(h*h))\n S = 2.0*math.pi*w*w*(1+h/(w*e)*math.asin(e))\n elif h')\ndef legacy(_i):\n url = (request.full_path).replace('server', 'warmane')\n return redirect(url, code=301)\n\n\n@app.route('//', methods=['GET'])\ndef search(server_arg, realm_arg):\n try:\n if realm_arg in REALMS[server_arg]:\n pass\n except KeyError:\n abort(404)\n search_arg = request.args.get('search', '')\n time_arg = request.args.get('time', None)\n AH_title = f\"{realm_arg.replace('_', ' ')} Auction House Price History\"\n tab = f\"{CAP[server_arg]}: {realm_arg.replace('_', ' ')}\"\n html_page = 'search.html'\n epoch_now = int(datetime.datetime.now().timestamp())\n time_dir = {'30d': 2592000,\n '3m': 7890000,\n '1y': 31536000,\n 'all': epoch_now}\n try: \n scantime = epoch_now - time_dir[time_arg]\n except KeyError:\n scantime = None\n \n if not search_arg or scantime == None:\n return render_template(html_page, title=tab, AH_title=AH_title,\n tvalue='3m')\n query = (search_arg, scantime)\n cur.execute(\"SELECT itemid FROM WOTLK_items \"\n \"WHERE itemname IS ? ;\", (search_arg,))\n \n if not cur.fetchone(): # no direct match\n if len(search_arg) < 3:\n return render_template(html_page, title='Item not found',\n AH_title=AH_title, \n error='Type at least 3 characters',\n value=search_arg, tvalue=time_arg)\n \n cur.execute(\"SELECT itemname FROM WOTLK_items WHERE itemname LIKE ?;\",\n ('{0}{search}{0}'.format('%', search=query[0]),))\n item_matches = sorted(cur.fetchall(), key=lambda x: len(x[0]))\n if item_matches:\n write_log(realm_arg, search_arg, 'suggest')\n item_suggestions = []\n for match in item_matches:\n href_display = match[0]\n href_item = match[0].replace(' ', '+')\n href = f'/{server_arg}/{realm_arg}?search={href_item}&time={time_arg}'\n item_suggestions.append((href_display, href))\n return render_template(html_page, title=tab,\n AH_title=AH_title,\n suggestions=item_suggestions,\n value=search_arg, tvalue=time_arg)\n else:\n return render_template(html_page, title='Item not found',\n AH_title=AH_title,\n error='Item was not found in the database',\n value=search_arg, tvalue=time_arg)\n \n match = re.match('((.+?_(A|H))).+', realm_arg)\n short = match.group(1)\n sql = (f\"\"\"SELECT itemname, price, scantime FROM {short}_prices\n INNER JOIN WOTLK_items ON {short}_prices.itemid=WOTLK_items.itemid \n INNER JOIN WOTLK_scans ON {short}_prices.scanid=WOTLK_scans.scanid \n WHERE WOTLK_items.itemname IS ? \n AND WOTLK_scans.scantime > ?;\"\"\")\n cur.execute(sql, query)\n datapoints = sorted(cur.fetchall(), key=lambda x: x[2])\n if not datapoints:\n msg = \"This item has not been listed on the auction house in the selected time range.\"\n return render_template(html_page, title='No prices available',\n AH_title=AH_title, error=msg,\n value=search_arg, tvalue=time_arg)\n '''\n # Calculate MAD and remove outliers\n if len(datapoints) > 21:\n outliers = []\n imax = len(datapoints) - 1\n for i, point in enumerate(datapoints):\n price = point[1]\n if i - 10 < 0:\n indexes = range(21)\n elif i + 10 > imax:\n indexes = range(imax-20, imax+1)\n else:\n indexes = range(i-10, i+11)\n prices = [datapoints[i][1] for i in indexes]\n \n median_price = (median(prices))\n diffs_median = []\n for _price in prices:\n diffs_median.append(abs(_price-median_price))\n mad = (median(diffs_median))\n #print('MAD: '+str(mad)) # debug\n #print('MED: '+str(median_price))\n #print('price: '+str(price))\n #print('prices: '+str(prices))\n if mad == 0:\n continue\n if abs(price-median_price) / mad > 20:\n outliers.append(i)\n for index in sorted(outliers, reverse=True):\n del datapoints[index]\n '''\n \n # Remove outliers\n prices = []\n outliers = []\n for i in datapoints:\n prices.append(i[1])\n prices.sort()\n high_price = 1.5 * prices[int(0.95 * len(prices))]\n for i, point in enumerate(datapoints):\n price = point[1]\n if price > high_price:\n outliers.append(i)\n for index in sorted(outliers, reverse=True):\n del datapoints[index]\n \n # Format data from query\n item = datapoints[0][0]\n time_list = []\n price_list = []\n for i in datapoints:\n time_list.append(i[2])\n price_list.append(i[1])\n # Generate moving average\n window = '5D'\n index = pd.to_datetime(time_list, unit = 's')\n df = pd.DataFrame({'prices': price_list}, index)\n dfr = df.rolling(window).mean()\n # Create traces\n trace_price = plotgo.Scattergl(\n x = index,\n y = price_list,\n text = list(map(copper_to_price, price_list)),\n hoverinfo = 'text+x',\n name = item,\n mode = 'markers',)\n \n trace_avg = plotgo.Scattergl(\n x = dfr.axes[0],\n y = dfr['prices'],\n text = list(map(copper_to_price, dfr['prices'])),\n hoverinfo = 'text+x',\n hoverlabel = dict(bordercolor = '#ffffff',\n font = dict(color = '#ffffff')),\n name = 'average ({window})'.format(window = window),\n mode = 'lines',)\n plotdata = [trace_price, trace_avg]\n # Layout\n max_val = max(price_list)\n nr_ticks = 6\n y_val = 0\n y_vals = []\n \n _t1 = (max_val / nr_ticks)\n _t2 = int(np.log10(_t1))\n _t3 = int(_t1 / (10**_t2)) + 1\n step = _t3 * 10**_t2\n for i in range(nr_ticks):\n y_val += step\n y_vals.append(y_val)\n \n layout = plotgo.Layout(\n title = \"{item}'s price history\".format(item=item),\n font = dict(\n color = '#ffffff' \n ),\n yaxis=dict(\n gridcolor='rgba(26, 26, 26, 0.6)',\n tickvals = y_vals,\n ticktext = list(map(copper_to_price, y_vals))\n ),\n xaxis = dict(\n gridcolor='rgba(26, 26, 26, 0.2)',\n hoverformat = '%e %b %Y'\n ),\n paper_bgcolor='#263238',\n plot_bgcolor='#263238'\n )\n\n fig = dict(data=plotdata, layout=layout)\n chart = plotly.offline.plot(fig, include_plotlyjs=False, output_type=\"div\")\n write_log(realm_arg, item, 'graph', time_arg)\n return render_template(html_page, title=tab, AH_title=AH_title,\n chart=chart, value=search_arg, tvalue=time_arg)\n\n\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":8772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"627389967","text":"\n\"contact list\"\nContact_list=[\n {\"name\":\"Rashmi\",\"number\":\"8797989821\",\"email\":\"rr@gmail.com\"},\n {\"name\":\"Saria\",\"number\":\"9897989821\",\"email\":\"ss@gmail.com\"},\n {\"name\": \"shalin\", \"number\": \"5214521452\", \"email\": \"shalin@gmail.com\"},\n {\"name\":\"patel\",\"number\":\"9659874632\",\"email\":\"patel@gmail.com\"},\n {\"name\":\"megha\",\"number\":\"9853200147\",\"email\":\"megha@gmail.com\"},\n]\n\nprompt=\"\"\"Choose operation by entering number:\npress a to display contact by name\npress b to display contact by number\npress c to edit contact by name\npress any character to exit \n\"\"\"\n\n'function to display contact by name'\ndef contactByName():\n name=input(\"Enter name to find contact: \")\n for contact in Contact_list:\n if(contact.get('name')==name):\n print(\"{name:\",contact.get('name'),\", number:\",contact.get('number'),\", email:\",contact.get('email'),\"}\")\n\n'function to display contact by number'\ndef contactByNumber():\n number = input(\"Enter number to find contact: \")\n for contact in Contact_list:\n if (contact.get('number') == number):\n print(\"{name:\", contact.get('name'), \", number:\", contact.get('number'), \", email:\", contact.get('email'),\n \"}\")\n\n'function to edit contact by name'\ndef editByName():\n name = input(\"Enter name to find contact: \")\n for contact in Contact_list:\n if (contact.get('name') == name):\n number=input(\"Enter new number: \")\n contact['number']=number\n print(\"Updated contact is {name:\", contact.get('name'), \", number:\", contact.get('number'), \", email:\", contact.get('email'),\n \"}\")\n\n\"dictionary for operation\"\noperationDic={\n \"a\":contactByName,\n \"b\":contactByNumber,\n \"c\":editByName,\n}\n\n\nwhile(1==1):\n userInput=input(prompt)\n if userInput in [\"a\",\"b\",\"c\"]:\n operationDic[userInput]()\n else:\n break\n\n \n\n","sub_path":"lab 2/ans 2.py","file_name":"ans 2.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"324986248","text":"from apis._models.profile import Profile\nfrom django.utils import timezone\nfrom functools import reduce\n\nclass PointCalculator:\n attribute = None\n today_points = None\n user_pk = None\n\n def __init__(self, user_pk, attribute):\n self.attribute = attribute\n self.user_pk = user_pk\n points = Profile.objects.get(pk=user_pk).points.filter(date=timezone.now().date())\n if points.count() > 0:\n self.today_points = points[0]\n\n def today_pk(self):\n pk = None\n if self.today_points is not None:\n pk = self.today_points.pk\n return pk\n\n def today_total(self):\n pk = None\n point = 0\n if self.today_points is not None:\n attributes = self.today_points.attributes.all()\n attr = list(filter(lambda val: val.attribute.name == self.attribute, attributes))\n if len(attr) > 0:\n pk = attr[0].pk\n point = reduce(lambda a, b: a + b, map(lambda val: val.point, attr))\n return {\n 'pk': pk,\n 'point': point\n }\n\n\n","sub_path":"apis/components/point/functions/point_calculator.py","file_name":"point_calculator.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"118047280","text":"import random\nimport visualiser as vis\n\n\ndef random_solution(self, count):\n \"\"\"\n Random algorithm > assigns all houses to a random battery & tries other battery when the battery is already full.\n \"\"\"\n # initialize the values\n highest_score = 0\n first_attempt = True\n best_connections = None\n distances_total = dict()\n key = 1\n\n # Run multiple times\n for j in range(10000):\n\n # Set total distance Grid to 0 and create empty list with connections of houses to batteries\n total_distance = 0\n connections = []\n\n # Empty capacity\n for battery in self.batteries:\n self.batteries[battery].currentCapacity = 0\n\n # Iterate over all houses and get max output\n for i in range(150):\n house = i + 1\n max_output = self.houses[house].max_output\n picked_batteries = []\n\n while True:\n # Pick random battery\n battery = random.randint(1, count)\n index_battery = battery - 1\n\n # Check if already chosen and add to picked list\n if battery not in picked_batteries:\n picked_batteries.append(battery)\n\n max_capacity = self.batteries[battery].capacity\n current_capacity = self.batteries[battery ].currentCapacity\n possible_capacity = current_capacity + max_output\n\n # Check if max capacity not yet reached\n if possible_capacity <= max_capacity:\n # Check distance from house to battery and add to total distances\n distances_house = self.distances[i]\n distance = distances_house[index_battery]\n total_distance += distance\n # Add output to current capacity\n self.batteries[battery].currentCapacity += max_output\n house_to_battery = {'house': house, 'battery': battery, 'distance': distance, 'max_output_house': max_output}\n connections.append(house_to_battery)\n break\n\n # Check if al 5 batteries tried\n if (len(picked_batteries) == 5):\n break\n\n # Only save results when all houses connected\n if len(connections) == 150:\n\n # Adds the distance to a dict with the number of succesful run as its key\n distances_total[key] = total_distance\n key += 1\n\n if first_attempt == True:\n highest_score = total_distance\n best_connections = connections\n first_attempt = False\n else:\n if total_distance < highest_score:\n highest_score = total_distance\n best_connections = connections\n\n # Saves the dict to a csv\n algorithm = \"random\"\n vis.dict_to_csv(distances_total, algorithm)\n\n return [highest_score, best_connections]\n","sub_path":"code/algorithms/random_algorithm.py","file_name":"random_algorithm.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"231702372","text":"import mitmproxy\nfrom mitmproxy import ctx\nfrom urllib.parse import urlparse\nimport urllib.parse\nimport json\nimport os\nimport os.path\nimport parse_json\nimport csv\nimport requests\n\nROBOT_JS = \"http://s0.meituan.net/mx/rohr/rohr.min.js\"\nHTTP_PROXY = \"http://www.82ip.com/api.asp?key=20180917151858384&getnum=1&anonymoustype=2&filter=1&area=1&formats=2&proxytype=0\"\nZHILIAN_PROXY = \"http://ip.11jsq.com/index.php/api/entry?method=proxyServer.generate_api_url&packid=7&fa=3&fetch_key=&qty=1&time=5&pro=&city=&port=1&format=txt&ss=1&css=&dt=1&specialTxt=3&specialJson=\"\nDEFAULT_PROXY = (\"117.90.2.156\", 13141)\nCURRENT_PROXY = None\nlocal_js = None\n\n\ndef http_connect(flow:mitmproxy.http.HTTPFlow):\n pass\n\ndef request(flow:mitmproxy.http.HTTPFlow):\n try:\n if flow.request.method == \"CONNECT\":\n return\n url = flow.request.url\n path = urlparse(url).path\n host = flow.request.host\n '''\n pattern = re.compile(\"/home/.+\")\n home_match = re.match(pattern,path)\n if host_match and home_match:\n #proxy = getProxyIp()\n proxy=(\"115.218.223.213\",9000)\n else:\n proxy = DEFAULT_PROXY\n '''\n\n if host == \"joke.non_this.cn\" and path == \"/non/this/api\":\n global CURRENT_PROXY\n try:\n qs = urllib.parse.parse_qs(urlparse(url).query)\n ctx.log.info(str(qs))\n CURRENT_PROXY= qs['i'][0],int(qs['p'][0])\n except:\n CURRENT_PROXY = None\n\n proxy = DEFAULT_PROXY if CURRENT_PROXY == None else CURRENT_PROXY\n ctx.log.info(\"using proxy.\")\n ctx.log.info(\"proxy change to {}:{}\".format(*proxy))\n if flow.live:\n flow.live.change_upstream_proxy_server(proxy)\n\n if host == 'waimai.meituan.com' and path == '/ajax/poilist':\n params = flow.request.urlencoded_form\n params['page_offset'] = str(int(params['page_offset']) - 20)\n ctx.log.info(str(flow.request.urlencoded_form))\n except:\n return\n\ndef response(flow:mitmproxy.http.HTTPFlow):\n try:\n url = flow.request.url\n path = urlparse(url).path\n host_match = flow.request.host == 'waimai.meituan.com'\n path_match = path == '/ajax/poilist'\n if host_match and path_match:\n content = flow.response.content.decode('utf-8')\n rest_json = json.loads(content)\n info = parse_json.parseJsonResponse(rest_json)\n saveData(info)\n if url == ROBOT_JS:\n global local_js\n if local_js is None:\n #ctx.log.info(\"Read Local File.\")\n with open(\"mtwm.min.js\", \"rb\") as js:\n local_js = js.read()\n flow.response.content = local_js\n except:\n return\n\ndef saveData(data,outputfile=\"out.csv\"):\n outputfile = os.path.join(os.curdir+os.path.sep+\"output\",outputfile)\n with open(outputfile,\"a+\",newline=\"\",encoding=\"utf-8\" ) as datacsv:\n csvwriter = csv.writer(datacsv,dialect=(\"excel\"))\n if os.path.getsize(outputfile) == 0:\n csvwriter.writerow((\"商家ID\",\"商家\",\"地址\",\"商家类别\",\"配送费类型\",\"电话\"))\n csvwriter.writerows(data)\n\n\n# mitmdump --mode upstream:http://default-upstream-proxy.local:8080/ -s meituan_proxy.py\n\n\n\n","sub_path":"proxy/waimai_meituan/meituan_proxy.py","file_name":"meituan_proxy.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"439468719","text":"import os\nimport unittest\n\nfrom mock import MagicMock, call, patch\n\nfrom timehabit import app, models\n\n\nclass TestCase(unittest.TestCase):\n pass\n\n\nclass HandlerTestCase(TestCase):\n PATCH_OAUTH = True\n\n def setUp(self):\n self._unpatch = []\n app.config['TESTING'] = True\n app.config['WTF_CSRF_ENABLED'] = False\n self.app = app.test_client()\n if self.PATCH_OAUTH:\n self.patch_oauth()\n super(HandlerTestCase, self).setUp()\n\n def tearDown(self):\n for p in self._unpatch:\n p.stop()\n\n def patch(self, *args, **kwargs):\n p = patch(*args, **kwargs)\n self._unpatch.append(p)\n return p.start()\n\n def patch_oauth(self):\n ns = 'flask_oauthlib.client.OAuthRemoteApp'\n\n self.oauth2_response = self.patch('%s.handle_oauth2_response' % ns)\n self.oauth2_response.return_value = {\n 'access_token': 'token',\n }\n\n self.oauth2_get = self.patch('%s.get' % ns)\n self.oauth2_get.return_value.data = {\n 'email': 'user@example.org',\n }\n","sub_path":"tests/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"171768415","text":"import pytest\nimport os\nimport numpy as np\nimport pyscal.core as pc\nimport pyscal.crystal_structures as pcs\nimport pyscal.traj_process as ptp\n\ndef test_create_multislice_dump():\n \"\"\"\n Create a multitest dump file and test it\n \"\"\"\n atoms, boxdims = pcs.make_crystal('bcc', repetitions=[6,6,6])\n sys = pc.System()\n sys.atoms = atoms\n sys.box = boxdims\n\n ptp.write_structure(sys, \"tests/bcc1.dump\")\n\n atoms2, boxdims2 = pcs.make_crystal('bcc', repetitions=[6,6,6])\n #modify the coordinates of one atom\n x = atoms2[0].pos\n x[0] += 0.01\n atoms2[0].pos = x\n assert len(atoms2) == 432\n #write it out\n sys2 = pc.System()\n sys2.atoms = atoms2\n sys2.box = boxdims2\n\n ptp.write_structure(sys2, \"tests/bcc2.dump\")\n\n #now merge the two dump files\n os.system(\"cat tests/bcc1.dump tests/bcc2.dump > tests/bcc3.dat\")\n #os.remove(\"tests/bcc1.dump\")\n #os.remove(\"tests/bcc2.dump\")\n\n #now this file should have info of both - read it in\n sys3 = pc.System()\n sys3.read_inputfile(\"tests/bcc3.dat\", frame=1)\n atoms = sys3.atoms\n assert len(atoms) == 432\n assert atoms[0].pos == [0.01,0,0]\n\n #now this file should have info of both - read it in\n sys4 = pc.System()\n sys4.read_inputfile(\"tests/bcc3.dat\", frame=0)\n atoms = sys4.atoms\n assert atoms[0].pos == [0.0,0,0]\n\n #now cleanup\n os.remove(\"tests/bcc3.dat\")\n\ndef test_customvals_dump():\n \"\"\"\n Test writing customvals\n \"\"\"\n atoms, boxdims = pcs.make_crystal('bcc', repetitions=[1,1,1])\n sys = pc.System()\n sys.atoms = atoms\n sys.box = boxdims\n\n\n #test for multiple customvals\n customks = 'one'\n customvs = [1,1]\n ptp.write_structure(sys, \"tests/bcc4.dump\", customkey=customks, customvals=customvs)\n\n #now read this file\n lines = []\n for line in open(\"tests/bcc4.dump\", 'r'):\n lines.append(line)\n\n #now check the atoms\n last1line = lines[-1].strip().split()\n last2line = lines[-2].strip().split()\n last3line = lines[-3].strip().split()\n\n #now verify\n assert last1line[-1] == '1'\n assert last2line[-1] == '1'\n assert last3line[-1] == 'one'\n\n #clean up\n if os.path.exists(\"tests/bcc4.dat\"):\n os.remove(\"tests/bcc4.dat\")\n\n #test for multiple customvals\n customks = ['one', 'two']\n customvs = [[1,1], [2,2]]\n ptp.write_structure(sys, \"tests/bcc4.dump\", customkey=customks, customvals=customvs)\n\n #now read this file\n lines = []\n for line in open(\"tests/bcc4.dump\", 'r'):\n lines.append(line)\n\n #now check the atoms\n last1line = lines[-1].strip().split()\n last2line = lines[-2].strip().split()\n last3line = lines[-3].strip().split()\n\n #now verify\n assert last1line[-1] == '2'\n assert last1line[-2] == '1'\n assert last2line[-1] == '2'\n assert last2line[-2] == '1'\n assert last3line[-1] == 'two'\n assert last3line[-2] == 'one'\n\n #clean up\n if os.path.exists(\"tests/bcc4.dat\"):\n os.remove(\"tests/bcc4.dat\")\n","sub_path":"tests/test_traj_process.py","file_name":"test_traj_process.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"179844810","text":"import sys, os\nimport csv\nimport json\nimport numpy as np\nimport shlex, subprocess\nfrom collections import defaultdict # TODO: Check if \"connections\" can be used as regular dict\nfrom mpl_toolkits.axes_grid1 import AxesGrid, make_axes_locatable\nfrom matplotlib import pyplot as plt\n\nimport time\n\n\ndef buidImage(imgsGrid, connections, n_rows, n_cols):\n\n loc = {0: \"left\", 1: \"right\"}\n connection_keys = ('Confidence',\n 'Sharpness Confidence',\n 'Sharpness',\n 'Disparity',\n 'Y_Difference',\n 'Final_old',\n 'Final_new'\n )\n vminmax_default = {\n # 'Confidence': (0, 255),\n # 'Sharpness Confidence': (0, 255),\n # 'Sharpness': None,\n # 'Disparity': (0, 255),\n # 'Y_Difference': (0, 50),\n\n # Does make sense to set equal limits for both Finals\n # 'Final_old': (0, 255),\n # 'Final_new': (0, 255)\n }\n\n vminmax = {}\n for k in connections:\n if k in connection_keys:\n if k not in vminmax_default:\n amin = min([imgsGrid[i].min() for i in connections[k]])\n amax = max([imgsGrid[i].max() for i in connections[k]])\n else:\n amin, amax = vminmax_default[k]\n for i in connections[k]:\n vminmax[i] = (amin, amax)\n\n fig = plt.figure()\n\n # elapsed = time.clock() - start\n # print('1: ', elapsed)\n # start = time.clock()\n\n grid = AxesGrid(fig, 111, # similar to subplot(111)\n nrows_ncols=(n_rows, n_cols),\n axes_pad=0,\n # label_mode=\"L\", # similar to \"label_outer\"\n cbar_location=\"bottom\",\n cbar_mode=\"each\",\n cbar_pad=0\n )\n\n for i, (ax, img) in enumerate(zip(grid, imgsGrid)):\n ax.xaxis.set_major_locator(plt.NullLocator())\n ax.xaxis.set_minor_locator(plt.NullLocator())\n ax.yaxis.set_major_locator(plt.NullLocator())\n ax.yaxis.set_minor_locator(plt.NullLocator())\n\n if img.ndim == 2:\n\n # cool way to add a new axes next to an existing one\n # Not very cool in conjunction with AxesGrid, as it occurred.\n\n # divider = make_axes_locatable(ax)\n # cax = divider.append_axes(loc[i % 2], size=\"5%\", pad=0)\n # fig.colorbar(im, cmap='viridis', cax=cax)\n\n im = ax.imshow(img, vmin=vminmax[i][0], vmax=vminmax[i][1])\n grid.cbar_axes[i].colorbar(im, cmap='viridis')\n grid.cbar_axes[i].tick_params(length=1,\n labelsize=3,\n direction='in',\n labelbottom=False,\n labeltop=True,\n pad=0\n )\n for label in grid.cbar_axes[i].xaxis.get_majorticklabels():\n label.set_horizontalalignment('left')\n else:\n ax.imshow(img)\n\n # Turn off the ticks and labels\n grid.cbar_axes[i].xaxis.set_major_locator(plt.NullLocator())\n grid.cbar_axes[i].xaxis.set_minor_locator(plt.NullLocator())\n grid.cbar_axes[i].yaxis.set_major_locator(plt.NullLocator())\n grid.cbar_axes[i].yaxis.set_minor_locator(plt.NullLocator())\n\n fig.savefig(\"test.png\", bbox_inches='tight', pad_inches=0, dpi=600)\n plt.close('all')\n # fig.show()\n\nif __name__ == '__main__':\n main()\n","sub_path":"pyscripts/visualisator/imgGridUnitor.py","file_name":"imgGridUnitor.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"237342725","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n# django_th classes\nfrom .services import ServicesMgr\nfrom ..models import UserService\nfrom ..models import ServicesActivated\nfrom ..models.evernote import Evernote\n# evernote classes\nfrom evernote.api.client import EvernoteClient\nimport evernote.edam.type.ttypes as Types\n# django classes\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.utils.log import getLogger\nfrom .sanitize import sanitize\n\n\"\"\"\n handle process with evernote\n put the following in settings.py\n\n TH_EVERNOTE = {\n 'sandbox': True,\n 'consumer_key': 'abcdefghijklmnopqrstuvwxyz',\n 'consumer_secret': 'abcdefghijklmnopqrstuvwxyz',\n }\n sanbox set to True to make your test and False for production purpose\n\"\"\"\n\nlogger = getLogger('django_th.trigger_happy')\n\n\nclass ServiceEvernote(ServicesMgr):\n\n def save_data(self, token, title, content, trigger_id):\n \"\"\"\n let's save the data\n \"\"\"\n if token:\n # get the evernote data of this trigger\n trigger = Evernote.objects.get(trigger_id=trigger_id)\n\n client = EvernoteClient(\n token=token, sandbox=settings.TH_EVERNOTE['sandbox'])\n # user_store = client.get_user_store()\n note_store = client.get_note_store()\n # notebooks = note_store.listNotebooks()\n # note object\n note = Types.Note()\n if trigger.notebook:\n # get the notebook name\n note.notebook = trigger.notebook\n logger.debug(\"notebook that will be used %s\", trigger.notebook)\n\n # start to build the \"note\"\n # the title\n note.title = title.encode('utf-8', 'xmlcharrefreplace')\n # the body\n note.content = ''\n note.content += ''\n # tidy and sanitize content\n enml = sanitize(content)\n note.content += enml.encode('ascii', 'xmlcharrefreplace')\n # create the note !\n created_note = note_store.createNote(note)\n sentance = str('note %s created') % created_note.guid\n logger.debug(sentance)\n\n else:\n logger.critical(\n \"no token provided for trigger ID %s and title %s\", trigger_id, title)\n\n def get_evernote_client(self, token=None):\n \"\"\"\n get the token from evernote\n \"\"\"\n if token:\n return EvernoteClient(\n token=token,\n sandbox=settings.TH_EVERNOTE['sandbox'])\n else:\n return EvernoteClient(\n consumer_key=settings.TH_EVERNOTE['consumer_key'],\n consumer_secret=settings.TH_EVERNOTE['consumer_secret'],\n sandbox=settings.TH_EVERNOTE['sandbox'])\n\n def auth(self, request):\n \"\"\"\n let's auth the user to the Service\n\n @todo : manage the user token to see if a token already exist\n smthg like request.user.token with\n client = self.get_evernote_client(request.user.token)\n this will avoid to request a new token\n \"\"\"\n client = self.get_evernote_client()\n callbackUrl = 'http://%s%s' % (\n request.get_host(), reverse('evernote_callback'))\n request_token = client.get_request_token(callbackUrl)\n\n # Save the request token information for later\n request.session['oauth_token'] = request_token['oauth_token']\n request.session['oauth_token_secret'] = request_token[\n 'oauth_token_secret']\n\n # Redirect the user to the Evernote authorization URL\n # return the URL string which will be used by redirect()\n # from the calling func\n return client.get_authorize_url(request_token)\n\n def callback(self, request):\n \"\"\"\n Called from the Service when the user accept to activate it\n \"\"\"\n try:\n client = self.get_evernote_client()\n # finally we save the user auth token\n # As we already stored the object ServicesActivated\n # from the UserServiceCreateView now we update the same\n # object to the database so :\n # 1) we get the previous objet\n us = UserService.objects.get(\n user=request.user,\n name=ServicesActivated.objects.get(name='ServiceEvernote'))\n # 2) then get the token\n us.token = client.get_access_token(\n request.session['oauth_token'],\n request.session['oauth_token_secret'],\n request.GET.get('oauth_verifier', '')\n )\n # 3) and save everything\n us.save()\n except KeyError:\n return '/'\n\n # note_store = client.get_note_store()\n # notebooks = note_store.listNotebooks()\n\n return 'evernote/callback.html'\n","sub_path":"django_th/services/my_evernote.py","file_name":"my_evernote.py","file_ext":"py","file_size_in_byte":5057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"268938148","text":"import const_values\nimport getopt\nimport numpy as np\nimport os\nimport re\nimport sys\nimport time\nimport vtk\n\nfrom disc_descriptor import DiscDescriptor\n\n\ndef read_stl(file_name):\n reader = vtk.vtkSTLReader()\n reader.SetFileName(file_name)\n reader.Update()\n\n poly_data = reader.GetOutput()\n return poly_data\n\ndef usage():\n print('usage of fractures match')\n print('-d, --dir: specify the directory of the re-assembly plates. e.g. -d ./plates/plate-1/')\n print('-r, --random: draw descriptor randomly, require the random number. e.g. -r 10')\n print('-c, --config: config the options of descriptor.')\n print(' ', '[distance from disc(1)]') \n print(' ', '[num of points on disc(32)]') \n print(' ', '[num of circles(32)]') \n print(' ', '[init radius(0.1)]') \n print(' ', '[radius delta(0.1)]')\n print('-s, --save: save features to image. [min-max, z-score]. e.g. -s min-max')\n print('-g, --generate: generate data only, do not visualize.')\n print('-v, --visualization: options([p:points, l:lines, c:circles, s:source]). e.g. -v p,l,c,s')\n print('-h, --help: print help message.')\n\n\ndef valid_input(message, value):\n val = input(message)\n if len(val) is 0:\n return\n pattern = re.compile(r'^[-+]?[0-9]+\\.[0-9]+$')\n while pattern.match(val):\n print('invalid input')\n val = input(message)\n else:\n value = float(val)\n\n\ndef parse_args(argv):\n args = argv[1:]\n try:\n opts, args = getopt.getopt(args, 'd:r:cs:v:gh', ['dir=', 'random=', 'config', 'save=', 'visualization=', 'generate', 'help'])\n except getopt.GetoptError as err:\n print(err)\n usage()\n sys.exit(2)\n \n random_num = 0\n distance_from_mesh = 1\n num_of_points_on_disc = 32\n num_of_circle = 32\n init_radius = 0.1\n radius_delta = 0.1\n file_names = []\n FMIs_dirs = []\n visual_flag = False\n generate_flag = False\n type_of_normalize = 0\n visual_opts = []\n for o, a in opts:\n if o in ('-h', '--help'):\n usage()\n sys.exit(1)\n elif o in ('-d', '--dir'):\n for file_name in os.listdir(a):\n if file_name.endswith('.stl'):\n file_names.append(a + file_name)\n print(a + file_name)\n \n elif o in ('-r', '--random'):\n random_num = int(a)\n elif o in ('-c', '--config'):\n valid_input('distance from disc(1): ', distance_from_mesh)\n valid_input('num of points on disc(32): ', num_of_points_on_disc)\n valid_input('num of circles(32): ', num_of_circle)\n valid_input('init radius(0.1): ', init_radius)\n valid_input('radius delta(0.1): ', radius_delta)\n elif o in('-s', '--save'):\n if len(file_names) is not 0:\n if a == 'max-min':\n type_of_normalize = 0\n elif a == 'z-score':\n type_of_normalize = 1\n for name in file_names:\n path = name[0:-4] + '-FMIs/'\n FMIs_dirs.append(path)\n if not os.path.exists(path):\n print(path)\n os.makedirs(path)\n elif o in ('-g', '--generate'):\n generate_flag = True\n elif o in ('-v', '--visualization'):\n visual_opts = a.split(',')\n visual_flag = True\n else:\n print('unhandled option')\n sys.exit(3)\n \n for file_name, FMIs_dir in zip(file_names, FMIs_dirs):\n poly_data = read_stl(file_name)\n descriptor = DiscDescriptor(poly_data)\n descriptor.mesh_descriptors(FMIs_dir, type_of_normalize, random_num=random_num)\n\n # config desriptor\n descriptor.distance_from_mesh = distance_from_mesh\n descriptor.num_of_points_on_disc = num_of_points_on_disc\n descriptor.num_of_circle = num_of_circle\n descriptor.init_radius = init_radius\n descriptor.radius_delta = radius_delta\n\n # visualization\n if not generate_flag:\n visualized_datas = []\n circles = descriptor.draw_circles()\n lines_datas, points_datas = descriptor.draw_lines()\n if not visual_flag:\n visualized_datas.append(poly_data)\n visualized_datas.extend(circles)\n visualized_datas.extend(points_datas)\n visualized_datas.extend(lines_datas)\n else:\n if 's' in visual_opts:\n visualized_datas.append(poly_data)\n if 'c' in visual_opts:\n visualized_datas.extend(circles)\n if 'p' in visual_opts:\n visualized_datas.extend(points_datas)\n if 'l' in visual_opts:\n visualized_datas.extend(lines_datas) \n descriptor.visualize_models(visualized_datas)\n\n\ndef main(argv):\n parse_args(argv)\n\n\nif __name__ == \"__main__\":\n print('test time begin at ' + time.asctime(time.localtime(time.time())) + '\\n')\n main(sys.argv)\n print('test time end at ' + time.asctime(time.localtime(time.time())) + '\\n')","sub_path":"mesh_descriptor.py","file_name":"mesh_descriptor.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"273756652","text":"import base64\r\nfrom flask import Flask, render_template\r\nfrom io import BytesIO\r\nimport eventlet\r\nimport eventlet.wsgi\r\nimport numpy as np\r\nimport socketio\r\nimport csv, random\r\nfrom keras.models import load_model\r\nfrom keras.preprocessing.image import *\r\nimport tensorflow as tf\r\nfrom keras import backend as k\r\nimport cv2\r\n\r\n\r\n\r\n'''\r\nrun train_lanes, train_steering, and train_signs first.\r\nstick the models they generate in a subfolder called \"models\"\r\n'''\r\n\r\n\r\n#global vars\r\nsteering_model = load_model( \"./models/steering_two.h5\")\r\nlane_change_model = load_model( \"./models/lane_change.h5\")\r\nlane_model = load_model( \"./models/lanes.h5\")\r\ndirection_model = load_model( \"./models/direction.h5\")\r\nsign_model = load_model( \"./models/sign.h5\")\r\n\r\nsign_detect_time= 0 #the last time we detected a sign\r\nsign_detect_delay = 2 #how long, in seconds, to check for another sign after a detection\r\nsign_check_time = 0 # the last time we check for a sign\r\nsign_check_delay = .05 #how often, in seconds, to check for a sign\r\n\r\nchanged_lanes_start = -10 #last time we initiated lane change\r\nchanged_lanes_delay_steering = 3 #how many seconds to use one lane model while changing lanes\r\nchanged_lanes_delay_speed = .75 #how many seconds to slow down\r\n\r\nlane_detect_time = -10 #last time we checked the lanes and direction\r\nlane_detect_delay =.2 #how often to check lanes and direction\r\nlane = \"left\" #the lane we're in\r\nlane_width = \"double\" #single or double\r\n\r\nsingle_lane_threshold = 3 #number of frames of double lane before switch to double\r\ndouble_lane_counter= 0 #counter for the single_lane_threshold\r\n\r\ndirection = \"correct\" #\"correct\" or \"wrong\" the direction we're going\r\nuturn_threshhold = 40 # less than this is executing uturn\r\nuturn_counter = uturn_threshhold #counting up to threshhold\r\nuturn_stage = 0 #stage1 back up, stage2 go forward\r\n\r\nstuck_counter = 0 #counts how many frame we've been backing up\r\ntry_forward = 0 #counts how many frames we've been going forward\r\nchanged_lanes_direction = \"none\" #right, left, or none. which lane we're changing to\r\n\r\nwrong_way_threshold = 10 #number of direction detections before resetting counters\r\nwrong_counter = 0 #counts how many predictions for wrong direction\r\ncorrect_counter =0 #counts how many predictions for correct direction\r\n\r\nrace_time=0\r\n\r\nsio = socketio.Server()\r\napp = Flask(__name__)\r\n\r\n\r\n\r\ndef log_it(log_string):\r\n global race_time\r\n print(str(race_time) + \" \" + log_string)\r\n try:\r\n with open(\"./Log/log.txt\", \"a+\") as f:\r\n f.write(str(race_time) + \" \" + log_string + \"\\n\")\r\n except:\r\n pass\r\n\r\ndef process_sign_image(image_bytes):\r\n image = load_img(image_bytes)\r\n image = image.crop((0,0,320,90))\r\n image = img_to_array(image)\r\n image = cv2.resize(image, (0,0), fx=0.7, fy=0.7)\r\n\r\n r, g, b = cv2.split(image)\r\n r_filter = (r == np.maximum(np.maximum(r, g), b)) & (r >= 85) & (g < 70) & (b < 220)\r\n g_filter = (g == np.maximum(np.maximum(r, g), b)) & (g >= 120) & (r < 150) & (b < 150)\r\n b_filter = (b == np.maximum(np.maximum(r, g), b)) & (b >= 120) & (r < 150) & (g < 150)\r\n black_filter = ((r < 40) & (g < 40) & (b < 40))\r\n y_filter = ((r >= 128) & (g >= 128) & (b < 100))\r\n gray_filter = ((np.absolute(r-g) < 40) & (np.absolute(g-b) < 40) & (np.absolute(b-r) < 40)) & ((r>32) & \r\n (g>32) & (b> 32)) & ((r<239) & (g<239) & (b<239)) \r\n\r\n b[gray_filter], b[np.invert(gray_filter)] = 0, 0\r\n r[r_filter], r[np.invert(r_filter)] = 255, 0\r\n g[g_filter], g[np.invert(g_filter)] = 0, 0\r\n \r\n \r\n #sign was too small, just erase it\r\n sign_proximity = np.count_nonzero(r) \r\n #if sign_proximity < 140:\r\n # r[r_filter] = 0\r\n \r\n \r\n masked_sign_image = cv2.merge((r, g, b)) \r\n masked_sign_image = (masked_sign_image / 255. -.5).astype(np.float32) \r\n masked_sign_image = np.rollaxis(masked_sign_image, -1)\r\n \r\n return masked_sign_image, sign_proximity\r\n\r\n \r\n\r\ndef process_lane_image(image_bytes):\r\n image = load_img(image_bytes) \r\n image = img_to_array(image)\r\n image = cv2.resize(image, (64, 64))\r\n image = (image / 255. -.5).astype(np.float32)\r\n image = np.rollaxis(image, -1)\r\n return image\r\n \r\n \r\ndef process_direction_image(image_bytes):\r\n image = load_img(image_bytes) \r\n image = image.crop((0,110,320,240))\r\n image = img_to_array(image)\r\n image = cv2.resize(image, (160, 65)) \r\n \r\n r, g, b = cv2.split(image)\r\n white_filter = ((r > 200) & (g > 200) & (b > 200))\r\n r_filter = (r == np.maximum(np.maximum(r, g), b)) & (r >= 110) & (g < 70) & (b < 220)\r\n g_filter = (g == np.maximum(np.maximum(r, g), b)) & (g >= 120) & (r < 150) & (b < 150)\r\n b_filter = (b == np.maximum(np.maximum(r, g), b)) & (b >= 120) & (r < 150) & (g < 150)\r\n y_filter = ((r >= 128) & (g >= 128) & (b < 100))\r\n b[b_filter], b[np.invert(b_filter)] = 0, 0\r\n r[r_filter], r[np.invert(r_filter)] = 255, 0 \r\n g[g_filter], g[np.invert(g_filter)] = 0, 0\r\n image = cv2.merge((r, g, b))\r\n \r\n image = (image / 255. -.5).astype(np.float32)\r\n image = np.rollaxis(image, -1)\r\n return image\r\n \r\n \r\ndef process_steer_image(image_bytes, invert=False):\r\n image = load_img(image_bytes)\r\n image = image.crop((0,40,320,220)) #(0,120,320,240))\r\n image = img_to_array(image)\r\n #image = cv2.resize(image, (0,0), fx=0.35, fy=0.35)\r\n image = cv2.resize(image, (64, 64))\r\n if invert: \r\n image= flip_axis(image, 1)\r\n steering_inverted = True\r\n else:\r\n steering_inverted= False\r\n\r\n \r\n image /= 255.\r\n image -= 0.5\r\n \r\n return image, steering_inverted\r\n \r\n \r\n \r\ndef process_front_wall_image(image_bytes ,sa):\r\n image = load_img(image_bytes) #240, 320\r\n \r\n #crop((left, top, right, bottom))\r\n left_clip = 159 + (sa/.25)\r\n right_clip=160 + (sa/.25)\r\n \r\n \r\n image = image.crop((left_clip,105,right_clip,240))\r\n\r\n image = img_to_array(image)\r\n \r\n r, g, b = cv2.split(image)\r\n r_filter = (r == np.maximum(np.maximum(r, g), b)) & (r >= 120) & (g < 150) & (b < 150)\r\n g_filter = (g == np.maximum(np.maximum(r, g), b)) & (g >= 120) & (r < 150) & (b < 150)\r\n b_filter = (b == np.maximum(np.maximum(r, g), b)) & (b >= 120) & (r < 150) & (g < 150)\r\n black_filter = ((r < 40) & (g < 40) & (b < 40))\r\n y_filter = ((r >= 128) & (g >= 128) & (b < 100))\r\n gray_filter = ((np.absolute(r-g) < 40) & (np.absolute(g-b) < 40) & (np.absolute(b-r) < 40)) & ((r>32) & \r\n (g>32) & (b> 32)) & ((r<239) & (g<239) & (b<239)) \r\n \r\n r[ black_filter | gray_filter], r[np.invert( black_filter | gray_filter)] = 255, 0\r\n\r\n wall_proximity = np.count_nonzero(r)\r\n\r\n return wall_proximity\r\n\r\ndef process_side_wall_image(image):\r\n\r\n \r\n image = load_img(image)\r\n image = img_to_array(image)\r\n left_side = image[:,:1]\r\n right_side= image[:,319:]\r\n \r\n r_r, r_g, r_b = cv2.split(right_side)\r\n l_r, l_g, l_b = cv2.split(left_side)\r\n r_black_filter = ((r_r < 40) & (r_g < 40) & (r_b < 40))\r\n r_y_filter = ((r_r >= 128) & (r_g >= 128) & (r_b< 100))\r\n l_black_filter = ((l_r < 40) & (l_g < 40) & (l_b < 40))\r\n l_y_filter = ((l_r >= 128) & (l_g >= 128) & (l_b < 100))\r\n\r\n r_r[r_y_filter | r_black_filter ], r_r[np.invert(r_y_filter | r_black_filter )] = 255, 0 \r\n l_r[l_y_filter | l_black_filter ], l_r[np.invert(l_y_filter | l_black_filter )] = 255, 0\r\n\r\n r_wall_prox = np.count_nonzero(r_r)\r\n l_wall_prox =np.count_nonzero(l_r) \r\n \r\n return l_wall_prox, r_wall_prox\r\n \r\ndef flip_axis(x, axis):\r\n x = np.asarray(x).swapaxes(axis, 0)\r\n x = x[::-1, ...]\r\n x = x.swapaxes(0, axis)\r\n return x \r\n\r\ndef get_lane(image_bytes):\r\n global lane_model, lane, lane_width\r\n global double_lane_counter, single_lane_threshold\r\n \r\n image = process_lane_image(image_bytes)\r\n #get the lane, left/right\r\n lane_pred = lane_model.predict(np.array([image]))\r\n lane_pred_class = np.argmax(lane_pred) #0 left, 1 right\r\n lane_pred_conf = np.amax(lane_pred)\r\n if lane_pred_class ==0:\r\n double_lane_counter+=1\r\n if lane == \"right\":\r\n log_it(\"Driving in left lane.\")\r\n in_lane=\"left\"\r\n if double_lane_counter >= single_lane_threshold:\r\n if lane_width == \"single\":\r\n log_it(\"Driving in double lane.\")\r\n lane_width = \"double\"\r\n elif lane_pred_class ==1:\r\n double_lane_counter+=1\r\n if lane == \"left\":\r\n log_it(\"Driving in right lane.\")\r\n in_lane = \"right\"\r\n if double_lane_counter >= single_lane_threshold:\r\n if lane_width == \"single\":\r\n log_it(\"Driving in double lane.\")\r\n lane_width = \"double\"\r\n elif lane_pred_class == 2:\r\n double_lane_counter = 0\r\n if lane_width == \"double\":\r\n log_it(\"Driving in single lane.\")\r\n in_lane=\"left\"\r\n lane_width = \"single\"\r\n\r\n if double_lane_counter > single_lane_threshold: double_lane_counter = single_lane_threshold\r\n return in_lane\r\n \r\ndef get_direction(image_bytes):\r\n global direction_model\r\n image = process_direction_image(image_bytes)\r\n\r\n direction_pred = direction_model.predict(np.array([image]))\r\n direction_pred_class = np.argmax(direction_pred) #0 correct, 1 wrong, 2 unknown\r\n direction_pred_conf = np.amax(direction_pred)\r\n if direction_pred_class ==1 and direction_pred_conf >= .9:\r\n going_direction=\"wrong\"\r\n log_it(\"Wrong direction detected.\")\r\n else: going_direction = \"correct\"\r\n \r\n return going_direction\r\n \r\ndef check_signs(image_bytes):\r\n global race_time\r\n global sign_detect_time, sign_check_time\r\n global lane, changed_lanes_direction, changed_lanes_start\r\n \r\n sign_image, sign_proximity = process_sign_image(image_bytes)\r\n sign_check_time = race_time\r\n if sign_proximity >= 100: #check if there even is a sign before trying to predict what it is\r\n sign_pred = sign_model.predict(np.array([sign_image]))\r\n sign_pred_class = np.argmax(sign_pred)\r\n sign_pred_conf = np.amax(sign_pred)\r\n \r\n if sign_pred_conf >= .93: \r\n log_it(\"Sign prediction:\" +str(sign_pred_class) + \"- Confidence: \" + str(sign_pred_conf))\r\n\r\n if sign_pred_class == 1:\r\n changed_lanes_direction = \"right\"\r\n changed_lanes_start = race_time\r\n log_it(\"Changing lanes\")\r\n log_it(\"Going right\") \r\n\r\n elif sign_pred_class == 2:\r\n changed_lanes_direction = \"left\"\r\n changed_lanes_start = race_time\r\n log_it(\"Changing lanes\")\r\n log_it(\"Going left\")\r\n sign_detect_time = race_time\r\n\r\n\r\n \r\n \r\ndef change_lanes( image_bytes):\r\n global changed_lanes_direction\r\n global lane_change_model\r\n \r\n if changed_lanes_direction == \"left\":\r\n steering_image, steering_inverted = process_steer_image(image_bytes)\r\n \r\n else:# changed_lanes_direction == \"right\":\r\n steering_image, steering_inverted = process_steer_image(image_bytes, True)\r\n \r\n sa = (lane_change_model.predict(np.array([steering_image]))[0][0]) * 40 #x40 because we normalized between -1 to 1 in training\r\n if steering_inverted: sa = -sa\r\n \r\n return sa \r\n \r\ndef get_steering_angle(image_bytes):\r\n global steering_model, lane_width\r\n\r\n steering_image, steering_inverted = process_steer_image(image_bytes)\r\n if lane_width == \"double\":\r\n sa = (steering_model.predict(np.array([steering_image]))[0][0]) * 40 #x40 because we normalized between -1 to 1 in training\r\n else:\r\n sa = (lane_change_model.predict(np.array([steering_image]))[0][0]) * 40 #x40 because we normalized between -1 to 1 in training\r\n if steering_inverted: sa = -sa\r\n \r\n return sa\r\n\r\ndef get_throttle(image_bytes, sa, speed):\r\n global lane_width\r\n \r\n wall_proximity = process_front_wall_image(image_bytes, sa)\r\n #if wall_proximity < 25 or wall_proximity > 60: wall_proximity = 0 \r\n if lane_width == \"single\":\r\n if wall_proximity < 26 or wall_proximity > 35: wall_proximity = 0\r\n speed_var = (wall_proximity**1.2) /135\r\n else:\r\n if wall_proximity < 30 or wall_proximity > 35: wall_proximity = 0 \r\n speed_var = (wall_proximity) /135 #135 is max wall infront\r\n target_speed = max(.3, (1 - (speed_var))*2)\r\n throttle= 1.2 - (speed / target_speed)\r\n\r\n return throttle\r\n \r\n \r\n \r\n@sio.on('telemetry')\r\ndef telemetry(sid, data):\r\n global race_time\r\n global changed_lanes_start, changed_lanes_delay_steering\r\n global lane_detect_time, lane_detect_delay, lane\r\n global direction, uturn_direction, uturn_counter\r\n global stuck_counter, try_forward\r\n global wrong_counter, correct_counter, wrong_way_threshold\r\n global sign_check_delay, sign_detect_delay, sign_detect_time, sign_check_time\r\n\r\n # The current telemetry\r\n img_str = data[\"image\"]\r\n speed = float(data[\"speed\"])\r\n race_time = float(data[\"time\"])\r\n sa = float(data[\"steering_angle\"])\r\n throttle = float(data[\"throttle\"]) \r\n current_lap = int(data[\"lap\"])\r\n\r\n # read and process image\r\n image_bytes = BytesIO(base64.b64decode(img_str))\r\n \r\n #find which lane we're in and the direction\r\n need_to_uturn = False\r\n if race_time - lane_detect_time > lane_detect_delay:\r\n lane = get_lane(image_bytes)\r\n direction = get_direction(image_bytes)\r\n lane_detect_time = race_time\r\n if direction == \"correct\":\r\n correct_counter +=1\r\n if correct_counter >= wrong_way_threshold:\r\n wrong_counter = 0\r\n correct_counter = 0\r\n else:\r\n wrong_counter +=1\r\n if wrong_counter >=2: #increase this if uturning when going right direction\r\n log_it(\"Performing a u-turn.\")\r\n need_to_uturn = True\r\n wrong_counter = 0\r\n correct_counter = 0\r\n\r\n \r\n #check for fork signs\r\n if (race_time - sign_check_time > sign_check_delay) and (race_time - sign_detect_time > sign_detect_delay ):\r\n check_signs(image_bytes)\r\n \r\n \r\n #get sa and check if changing lanes\r\n if (race_time - changed_lanes_start) > changed_lanes_delay_steering:\r\n sa = get_steering_angle(image_bytes)\r\n else: #get sa for changing lanes\r\n sa = change_lanes(image_bytes)\r\n \r\n \r\n #get throttle, slow down for lane change\r\n if (race_time - changed_lanes_start) <= changed_lanes_delay_speed:\r\n target_speed = .8\r\n throttle= 1.2 - (speed / target_speed)\r\n else:\r\n throttle = get_throttle(image_bytes, sa, speed)\r\n\r\n #turn around if going wrong way\r\n if need_to_uturn:\r\n if lane == \"right\": uturn_direction = 1\r\n else: uturn_direction = 2\r\n uturn_counter = 0\r\n if uturn_counter < uturn_threshhold:\r\n uturn_counter +=1\r\n if uturn_direction == 1: sa = -45\r\n else: sa = 45\r\n target_speed = .8\r\n throttle = 1.2 - (speed / target_speed)\r\n else: uturn_counter = uturn_threshhold #prevent runaway\r\n \r\n #back up if we crash\r\n try_forward +=1 #count going foward before checking for another crash\r\n if try_forward > 100: try_forward = 11 #prevent it from going too high\r\n if speed >= 0 and speed <=.1 and race_time > 5 and stuck_counter < 1 and try_forward > 50:\r\n stuck_counter = 35 #go backward for this many frames\r\n if stuck_counter > 0:\r\n sa = get_steering_angle(image_bytes) #sa = 0\r\n sa = -sa #opposite sa of going forward, but about half\r\n target_speed = 1\r\n throttle = -1.2 + (speed / target_speed) \r\n stuck_counter -= 1 #count how long we've been going backward\r\n try_forward = 0 #reset counter that counts going forward\r\n \r\n \r\n #log_it(sa, throttle)\r\n send_control(sa, throttle)\r\n\r\n@sio.on('connect')\r\ndef connect(sid, environ):\r\n print(\"connect \", sid)\r\n send_control(0, 0)\r\n\r\ndef send_control(steering_angle, throttle):\r\n sio.emit(\"steer\", data={\r\n 'steering_angle': steering_angle.__str__(),\r\n 'throttle': throttle.__str__()\r\n }, skip_sid=True)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n # wrap Flask application with engineio's middleware\r\n app = socketio.Middleware(sio, app)\r\n\r\n # deploy as an eventlet WSGI server\r\n eventlet.wsgi.server(eventlet.listen(('', 4567)), app)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"273784768","text":"import sys\n\n\ndef shortestpath(graph,start,end,visited=[],distances={},predecessors={}):\n \"\"\"Find the shortest path btw start & end nodes in a graph\"\"\"\n # detect if first time through, set current distance to zero\n if not visited:\n distances[start]=0\n # if we've found our end node, find the path to it, and return\n if start==end:\n path=[]\n while end != None:\n path.append(end)\n end=predecessors.get(end,None)\n return distances[start], path[::-1]\n # process neighbors as per algorithm, keep track of predecessors\n for neighbor in graph[start]:\n if neighbor not in visited:\n neighbordist = distances.get(neighbor, sys.maxsize)\n tentativedist = distances[start] + graph[start][neighbor]\n if tentativedist < neighbordist:\n distances[neighbor] = tentativedist\n predecessors[neighbor]=start\n # neighbors processed, now mark the current node as visited\n visited.append(start)\n # finds the closest unvisited node to the start\n unvisiteds = dict((k, distances.get(k, sys.maxsize)) for k in graph if k not in visited)\n closestnode = min(unvisiteds, key=unvisiteds.get)\n # now take the closest node and recurse, making it current\n return shortestpath(graph,closestnode,end,visited,distances,predecessors)\n\n\ngraph = {'a': {'w': 14, 'x': 7, 'y': 9},\n 'b': {'w': 9, 'z': 6},\n 'w': {'a': 14, 'b': 9, 'y': 2},\n 'x': {'a': 7, 'y': 10, 'z': 15},\n 'y': {'a': 9, 'w': 2, 'x': 10, 'z': 11},\n 'z': {'b': 6, 'x': 15, 'y': 11}}\n\n\nfrom src.ultilities import convert_to_adj_list\n\nfile = \"/home/thaolinhnguyen/PycharmProjects/aor/data/ballyskate_layout.txt\"\na = convert_to_adj_list(file)\nprint(a)\nprint(graph)\nprint(shortestpath(graph, 'x', 'z'))\nfrom decimal import Decimal\n\nclass Node:\n def __init__(self, label):\n self.label = label\n\nclass Edge:\n def __init__(self, to_node, length):\n self.to_node = to_node\n self.length = length\n\n\nclass Graph:\n def __init__(self):\n self.nodes = set()\n self.edges = dict()\n\n def add_node(self, node):\n self.nodes.add(node)\n\n def add_edge(self, from_node, to_node, length):\n edge = Edge(to_node, length)\n if from_node.label in self.edges:\n from_node_edges = self.edges[from_node.label]\n else:\n self.edges[from_node.label] = dict()\n from_node_edges = self.edges[from_node.label]\n from_node_edges[to_node.label] = edge\n\n\n","sub_path":"test/test-dijkstra.py","file_name":"test-dijkstra.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"56834829","text":"import numpy as np\nimport wfdb\nfrom ML.Utilities.DataRecord import DataRecord\nimport os\n\n\nclass DBHandler(object):\n \"\"\"\n TODO: Add Documentation\n \"\"\"\n\n def __init__(self, db_dir_path: str, db_name: str):\n \"\"\"\n\n :param db_dir_path:\n :param db_name:\n \"\"\"\n\n self.db_name = db_name\n self.db_dir = db_dir_path\n self.records_names = []\n self.db = []\n self._create_db()\n\n def _create_db(self):\n \"\"\"\n\n :return:\n \"\"\"\n\n db = []\n files = os.listdir(self.db_dir)\n\n for file in files:\n\n if file[-3:] != 'dat':\n continue\n\n file_name = file[:-4]\n if file_name not in self.records_names:\n self.records_names.extend([file_name])\n\n for rec in self.records_names:\n # Extract data\n record = wfdb.rdsamp((self.db_dir + '\\\\' + rec))\n\n signal = record[0]\n frequency = record[1]['fs']\n step = 1 / frequency\n\n time = np.linspace(start=0, stop=(step * int(signal.shape[0])), num=int(signal.shape[0]))\n\n comments = record[1]['comments']\n\n # Deal with each database appropriately\n if self.db_name == 'Fantasia':\n anno = wfdb.rdann((self.db_dir + '\\\\' + rec), 'ecg')\n\n qrs_annotations = anno.sample\n beat_annotations = anno.symbol\n AF_beats = []\n other_beats = []\n N_labels = np.ones_like(signal)\n\n ind = 0\n for beat in beat_annotations:\n if beat_annotations == 'N':\n ind += 1\n continue\n\n elif beat == '(AFIB' or beat == 'A' or beat == 'a':\n AF_beats.extend([ind])\n ind += 1\n\n else:\n other_beats.extend([ind])\n ind += 1\n\n AF_beats = np.array(AF_beats)\n other_beats = np.array(other_beats)\n\n annotations = {'R_peaks': qrs_annotations, 'beat_type': beat_annotations,\n 'AF_beats': np.array(AF_beats), 'N_labels': N_labels,\n 'Other_beats': np.array(other_beats)}\n\n qrs_time = time[qrs_annotations]\n ecg_ind = record[1]['sig_name'].index['ECG']\n ecg = signal[:, ecg_ind]\n aux_signals = {}\n\n for sig in record[1]['sig_name']:\n if sig == 'ECG':\n continue\n\n else:\n sig_ind = record[1]['sig_name'].index(sig)\n aux_signals[sig] = signal[:, sig_ind]\n\n elif self.db_name == 'BIH-Arrhythmia' or self.db_name == 'Noise-Stress':\n anno = wfdb.rdann((self.db_dir + '\\\\' + rec), 'atr')\n\n qrs_annotations = anno.sample\n beat_annotations = anno.symbol\n\n AF_beats = []\n other_beats = []\n\n ind = 0\n for beat in beat_annotations:\n if beat_annotations == 'N':\n ind += 1\n continue\n\n elif beat == '(AFIB' or beat == 'A' or beat == 'a':\n AF_beats.extend([ind])\n ind += 1\n\n else:\n other_beats.extend([ind])\n ind += 1\n\n annotations = {'R_peaks': qrs_annotations, 'beat_type': beat_annotations,\n 'AF_beats': np.array(AF_beats),\n 'Other_beats': np.array(other_beats)}\n\n qrs_time = time[qrs_annotations]\n ecg = signal[:, 0]\n aux_signals = {}\n\n for sig in record[1]['sig_name']:\n if sig == 'V5':\n continue\n\n else:\n sig_ind = record[1]['sig_name'].index(sig)\n aux_signals[sig] = signal[:, sig_ind]\n\n elif self.db_name == 'BIH-Fibrillation':\n qrs = wfdb.rdann((self.db_dir + '\\\\' + rec), 'qrs')\n anno = wfdb.rdann((self.db_dir + '\\\\' + rec), 'atr')\n\n qrs_annotations = qrs.sample\n beat_annotations = anno.aux_note\n AF_beats = []\n AF_labels = np.zeros_like(signal[:, 1]).astype(np.int8)\n\n af_labels = []\n n_labels = []\n i = 0\n for beat in beat_annotations:\n if beat == '(AFIB':\n AF_beats.append([anno.sample[i]])\n af_labels.append([anno.sample[i]])\n i += 1\n\n else:\n n_labels.append([anno.sample[i]])\n i += 1\n continue\n\n if beat_annotations[0] == '(AFIB':\n corr = 0\n else:\n corr = 1\n\n if beat_annotations[-1] != '(AFIB':\n for i in range(af_labels.__len__()):\n AF_labels[af_labels[i][0]:n_labels[i + corr][0]] = 1\n i += 1\n else:\n for i in range(af_labels.__len__()):\n if i == af_labels.__len__() - 1:\n AF_labels[af_labels[i][0]:] = 1\n continue\n\n else:\n AF_labels[af_labels[i][0]:n_labels[i + corr][0]] = 1\n i += 1\n\n annotations = {'R_peaks': qrs_annotations, 'beat_type': beat_annotations,\n 'AF_beats': np.array(AF_beats), 'AF_labels': AF_labels,\n 'N_beats': np.array(n_labels)}\n\n qrs_time = time[qrs_annotations]\n ecg_ind = record[1]['sig_name'].index('ECG1')\n ecg = signal[:, ecg_ind]\n aux_signals = {}\n\n for sig in record[1]['sig_name']:\n if sig == 'ECG1':\n continue\n\n else:\n sig_ind = record[1]['sig_name'].index(sig)\n aux_signals[sig] = signal[:, sig_ind]\n\n elif self.db_name == 'Long-Term-AF':\n anno = wfdb.rdann((self.db_dir + '\\\\' + rec), 'atr')\n\n qrs_annotations = anno.sample\n beat_annotations = anno.symbol\n\n AF_beats = []\n other_beats = []\n\n ind = 0\n for beat in beat_annotations:\n if beat_annotations == 'N':\n ind += 1\n continue\n\n elif beat == '(AFIB' or beat == 'A' or beat == 'a':\n AF_beats.extend([ind])\n ind += 1\n\n else:\n other_beats.extend([ind])\n ind += 1\n\n annotations = {'R_peaks': qrs_annotations, 'beat_type': beat_annotations,\n 'AF_beats': np.array(AF_beats),\n 'Other_beats': np.array(other_beats)}\n try:\n qrs_time = time[qrs_annotations]\n ecg = signal[:, 0]\n aux_signals = {'ECG2': signal[:, 1]}\n\n except:\n pass\n\n # Create the DataRecord object\n db.extend([DataRecord(name=rec, frequency=frequency, ecg_signal=ecg, time=time,\n comments=comments, annotations=annotations,\n additional_signals=aux_signals, qrs_time=qrs_time)])\n\n self.db = db\n\n def update_record(self, sig, idx):\n self.db[idx].ecg = sig\n\n def __getitem__(self, n: int):\n return self.db[n]\n\n def __setitem__(self, n: int, record: DataRecord):\n self.db[n] = record\n\n def __delitem__(self, n: int):\n self.db.remove(self.db[n])\n\n def __len__(self):\n return len(self.db)\n","sub_path":"ML/Utilities/DBHandler.py","file_name":"DBHandler.py","file_ext":"py","file_size_in_byte":8299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"355604986","text":"import hdt, gzip, tqdm\n\nfrom collections import Counter\n\n\ndef getlabel(x, doc):\n\n triples, c = doc.search_triples(x, 'http://www.w3.org/2004/02/skos/core#prefLabel', '')\n try:\n return next(triples)[2]\n except:\n return '.none'\n\n# ---\n# triples, c = doc.search_triples('', '', '')\n#\n# relations = Counter()\n#\n# for s, p, o in tqdm.tqdm(triples, total=c):\n# relations[p] += 1\n#\n# for rel, count in relations.most_common():\n# print(f'{count: 5}: {rel}')\n\n# ---\n# triples, c = doc.search_triples('', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type', '')\n#\n# types = Counter()\n# for s, p, o in triples:\n# types[o] += 1\n#\n# for type, ct in types.most_common():\n# print(f'{ct: 5}: {type}')\n\n# doc = hdt.HDTDocument('am-combined.hdt')\n# triples, c = doc.search_triples('', 'http://www.w3.org/2004/02/skos/core#broader', '')\n#\n# types = Counter()\n# for s, p, o in triples:\n# slabel = getlabel(s, doc)\n# olabel = getlabel(o, doc)\n#\n# types[slabel] += 1\n# types[olabel] += 1\n#\n# for type, ct in types.most_common():\n# print(f'{ct: 5}: {type}')\n\ndoc = hdt.HDTDocument('am-combined.hdt')\ntriples, c = doc.search_triples('', 'http://purl.org/collections/nl/am/contentMotifGeneral', '')\n\nfor s, p, o in triples:\n print(getlabel(s, doc), getlabel(o, doc))\n","sub_path":"datasets/amfull/raw/inspect-am.py","file_name":"inspect-am.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"251175901","text":"\r\n''' importando bibliotecas '''\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport datascience as ds\r\nimport datetime\r\nfrom datetime import datetime\r\nfrom datetime import timedelta\r\nfrom datetime import date\r\nfrom pandas_datareader import data as web\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nfrom tqdm import tqdm\r\nimport time\r\n\r\n''' importando o csv com as 'notas de corretagem' para uma tabela pandas '''\r\n\r\nNotas_de_Corretagem0 = pd.read_csv(\r\n r'C:\\Users\\femdi\\OneDrive\\Documentos\\python\\Controle de Investimentos em Ações na Bolsa de Valores - Notas de Corretagem.csv',\r\n error_bad_lines=False)\r\n# pd.set_option('display.max_columns', None)\r\npd.set_option('display.max_rows', None)\r\npd.set_option('display.max_columns', None)\r\n\r\n\r\n''' selecionando apenas as colunas desejadas '''\r\n\r\nNotas_de_Corretagem1 = Notas_de_Corretagem0[['Corretora',\r\n 'Data',\r\n 'Horário',\r\n 'Operação',\r\n 'Modalidade',\r\n 'Papel',\r\n 'Tipo Papel',\r\n 'Quantidade',\r\n 'Preço',\r\n 'Custo Corretagem']]\r\n\r\n# Formatando a Base\r\n\r\n'''Trocando , por . nos números e os transformando em float'''\r\n\r\n# preço\r\npreço_novo = []\r\nfor i in np.arange(len(Notas_de_Corretagem1)):\r\n x = float(Notas_de_Corretagem0.iloc[i][9].replace(',', '.'))\r\n preço_novo = np.append(preço_novo, x)\r\n# print(preço_novo)\r\n\r\n# corretagem\r\ncorret_novo = []\r\nfor i in np.arange(len(Notas_de_Corretagem1)):\r\n x = float(Notas_de_Corretagem0.iloc[i][10].replace(',', '.'))\r\n corret_novo = np.append(corret_novo, x)\r\n# print(corret_novo)\r\n\r\n''' Arrumando a formatação da data '''\r\n\r\n# Data\r\ndata = []\r\nfor i in np.arange(len(Notas_de_Corretagem1)):\r\n x = datetime.strptime(Notas_de_Corretagem0.iloc[i][2], '%d/%m/%Y').date()\r\n data = np.append(data, x)\r\n# print(data)\r\n\r\n\r\n''' Tirando as colunas erradas e colocando as certas'''\r\n\r\nNotas_de_Corretagem2 = Notas_de_Corretagem1.drop(columns={'Preço', 'Custo Corretagem', 'Data'})\r\n\r\nNotas_de_Corretagem3 = Notas_de_Corretagem2.assign(Preço=preço_novo) \\\r\n .assign(Custo_Corretagem=corret_novo) \\\r\n .assign(Data=data)\r\n\r\n''' Selecionando as colunas desejadas'''\r\n\r\nNotas_de_Corretagem4 = Notas_de_Corretagem3[['Corretora',\r\n 'Data',\r\n 'Operação',\r\n 'Modalidade',\r\n 'Papel',\r\n 'Tipo Papel',\r\n 'Quantidade',\r\n 'Preço',\r\n 'Custo_Corretagem']]\r\n\r\n'''Renomeando'''\r\n\r\nNotas_de_Corretagem5 = Notas_de_Corretagem4.rename(columns={\"Custo_Corretagem\": \"Custo Corretagem\"})\r\n\r\n\r\n'''Trazendo a série do IBOVESPA a partir do primeira operação da base'''\r\n'''A série traz todas as datas que a Bolsa esteve aberta'''\r\n\r\nSerie_IBOV = pd.DataFrame()\r\ntickers = ['^BVSP'] #ticker da IBOVESPA no Yahoo Finance\r\ndata = min(Notas_de_Corretagem5['Data'])\r\nfor i in tickers:\r\n Serie_IBOV[i] = round(web.get_data_yahoo(i, data)['Adj Close'], 2) # adj close traz as cotações ajustadas por slip e dividendos\r\nSerie_IBOV = Serie_IBOV.rename(columns ={'^BVSP': 'IBOV'})\r\npd.set_option('display.max_rows', None)\r\n#print(Serie_IBOV)\r\n\r\n'''Pegando coluna de datas'''\r\nSerie_IBOV1=Serie_IBOV.reset_index(level=0)\r\n\r\n\r\n'''Criando variáveis importantes para a análise'''\r\n\r\n# valor bruto\r\nvalor_bruto = []\r\nfor i in np.arange(len(Notas_de_Corretagem5)):\r\n x = (Notas_de_Corretagem5.loc[i]['Quantidade'] * Notas_de_Corretagem5.loc[i]['Preço'])\r\n valor_bruto = np.append(valor_bruto, x)\r\n# print(valor_bruto)\r\n\r\n# Taxa de Liquidação\r\ntx_liq = []\r\nfor i in np.arange(len(Notas_de_Corretagem5)):\r\n if Notas_de_Corretagem5.loc[i]['Modalidade'] == 'Swing':\r\n x = (valor_bruto[i]) * 0.000275\r\n elif Notas_de_Corretagem5.loc[i]['Modalidade'] == 'Day Trade':\r\n x = (valor_bruto[i]) * 0.0002\r\n tx_liq = np.append(tx_liq, x)\r\n# print(tx_liq)\r\n\r\n# Emolumentos\r\nemo = []\r\nfor i in np.arange(len(Notas_de_Corretagem5)):\r\n if Notas_de_Corretagem5.loc[i]['Modalidade'] == 'Swing':\r\n x = (valor_bruto[i]) * 0.00004115\r\n elif Notas_de_Corretagem5.loc[i]['Modalidade'] == 'Day Trade':\r\n x = (valor_bruto[i]) * 0.00004983\r\n emo = np.append(emo, x)\r\n# print(emo)\r\n\r\n# ISS\r\niss = []\r\nfor i in np.arange(len(Notas_de_Corretagem5)):\r\n x = (Notas_de_Corretagem5.loc[i]['Custo Corretagem'] * 0.0965)\r\n iss = np.append(iss, x)\r\n# print(iss)\r\n\r\n# Custo Total\r\nct = []\r\nfor i in np.arange(len(Notas_de_Corretagem5)):\r\n x = Notas_de_Corretagem5.loc[i]['Custo Corretagem'] + tx_liq[i] + emo[i] + iss[i]\r\n ct = np.append(ct, x)\r\n# print(ct)\r\n\r\n# Valor Líquido\r\nvl = []\r\nfor i in np.arange(len(Notas_de_Corretagem5)):\r\n if Notas_de_Corretagem5.loc[i]['Operação'] == 'Compra':\r\n x = valor_bruto[i] + ct[i]\r\n if Notas_de_Corretagem5.loc[i]['Operação'] == 'Venda':\r\n x = (valor_bruto[i] * (-1) + ct[i])\r\n vl = np.append(vl, x)\r\n# print(vl)\r\n\r\n# Preço Médio da Ordem\r\npmo = []\r\nfor i in np.arange(len(Notas_de_Corretagem5)):\r\n x = vl[i] / Notas_de_Corretagem5.loc[i]['Quantidade']\r\n pmo = np.append(pmo, x)\r\n# print(pmo)\r\n\r\n# Saldo Posição\r\nsp = []\r\nfor i in np.arange(len(Notas_de_Corretagem5)):\r\n if Notas_de_Corretagem5.loc[i]['Operação'] == 'Compra':\r\n x = Notas_de_Corretagem5.loc[i]['Quantidade']\r\n if Notas_de_Corretagem5.loc[i]['Operação'] == 'Venda':\r\n x = Notas_de_Corretagem5.loc[i]['Quantidade'] * (-1)\r\n sp = np.append(sp, x)\r\n# print(sp)\r\n\r\n\r\n'''Juntando novas variáveis com tabela de ações'''\r\n\r\nNotas_de_Corretagem6 = Notas_de_Corretagem5.assign(valor_bruto=valor_bruto) \\\r\n .assign(Tx_Liquidação=tx_liq) \\\r\n .assign(Emolumentos=emo) \\\r\n .assign(ISS=iss) \\\r\n .assign(Custo_Total=ct) \\\r\n .assign(Valor_Líquido=vl) \\\r\n .assign(PMdaOrdem=pmo) \\\r\n .assign(Saldo_Posicao=sp)\r\n\r\n'''Criando mais variáveis '''\r\n\r\n# Estoque\r\nestoque = []\r\nfor i in np.arange(len(Notas_de_Corretagem6)):\r\n x = sum(Notas_de_Corretagem6.loc[j]['Saldo_Posicao'] for j in np.arange(0, i + 1) if\r\n Notas_de_Corretagem6.loc[j]['Papel'] == Notas_de_Corretagem6.loc[i]['Papel'])\r\n estoque = np.append(estoque, x)\r\n# print(estoque)\r\n\r\n# Ativo\r\nativo0 = []\r\nfor i in np.arange(len(Notas_de_Corretagem6)):\r\n x = 0\r\n for j in np.arange(i, len(Notas_de_Corretagem6)):\r\n if estoque[j] == 0 and Notas_de_Corretagem6.loc[j]['Papel'] == Notas_de_Corretagem6.loc[i]['Papel']:\r\n x = x + 1\r\n ativo0 = np.append(ativo0, x)\r\n# print(ativo0)\r\n\r\nativo = []\r\nfor i in np.arange(len(Notas_de_Corretagem6)):\r\n if ativo0[i] == 0:\r\n x = \"Ativo\"\r\n elif ativo0[i] != 0:\r\n x = \"Inativo\"\r\n ativo = np.append(ativo, x)\r\n# print(ativo)\r\n\r\n# Cesta\r\ncesta = []\r\nfor i in np.arange(len(Notas_de_Corretagem6)):\r\n x = Notas_de_Corretagem6.loc[i]['Papel'] + '_000' + str(int(ativo0[i]))\r\n cesta = np.append(cesta, x)\r\n# print(cesta)\r\n\r\n# Preço Médio da Cesta\r\npmc = []\r\nfor i in np.arange(len(Notas_de_Corretagem6)):\r\n x = sum(Notas_de_Corretagem6.loc[j]['Valor_Líquido'] for j in np.arange(0, i + 1) if\r\n cesta[j] == cesta[i] and Notas_de_Corretagem6.loc[j]['Operação'] == 'Compra')\r\n y = sum(Notas_de_Corretagem6.loc[j]['Saldo_Posicao'] for j in np.arange(0, i + 1) if\r\n cesta[j] == cesta[i] and Notas_de_Corretagem6.loc[j]['Operação'] == 'Compra')\r\n z = x / y\r\n pmc = np.append(pmc, z)\r\n# print(pmc)\r\n\r\n# Lucro/Prejuizo\r\nlucro = []\r\nfor i in np.arange(len(Notas_de_Corretagem6)):\r\n if Notas_de_Corretagem6.loc[i]['Operação'] == 'Compra':\r\n x = 0\r\n elif Notas_de_Corretagem6.loc[i]['Operação'] == 'Venda':\r\n x = (-1) * (Notas_de_Corretagem6.loc[i]['Quantidade'] * (pmc[i] + Notas_de_Corretagem6.loc[i]['PMdaOrdem']))\r\n lucro = np.append(lucro, x)\r\n# print(lucro)\r\n\r\n# IR na Fonte\r\nir = []\r\nfor i in np.arange(len(Notas_de_Corretagem6)):\r\n if Notas_de_Corretagem6.loc[i]['Operação'] == 'Venda' and Notas_de_Corretagem6.loc[i][\r\n 'Modalidade'] == 'Swing' and lucro[i] > 0:\r\n x = lucro[i] * 0.000050\r\n elif Notas_de_Corretagem6.loc[i]['Operação'] == 'Venda' and Notas_de_Corretagem6.loc[i][\r\n 'Modalidade'] == 'Day Trade' and lucro[i] > 0:\r\n x = lucro[i] * 0.01\r\n else:\r\n x = 0\r\n ir = np.append(ir, x)\r\n# print(ir)\r\n\r\n# IR FII\r\nirfii = []\r\nfor i in np.arange(len(Notas_de_Corretagem6)):\r\n if Notas_de_Corretagem6.loc[i]['Operação'] == 'Venda' and Notas_de_Corretagem6.loc[i]['Tipo Papel'] == 'FII' and \\\r\n lucro[i] > 0:\r\n x = lucro[i] * 0.2\r\n else:\r\n x = 0\r\n irfii = np.append(irfii, x)\r\n# print(irfii)\r\n\r\n# IR ETF\r\niretf = []\r\nfor i in np.arange(len(Notas_de_Corretagem6)):\r\n if Notas_de_Corretagem6.loc[i]['Operação'] == 'Venda' and Notas_de_Corretagem6.loc[i]['Tipo Papel'] == 'ETF' and \\\r\n lucro[i] > 0:\r\n x = lucro[i] * 0.2\r\n else:\r\n x = 0\r\n iretf = np.append(iretf, x)\r\n# print(iretf)\r\n\r\n# IR Day Trade\r\nirdt = []\r\nfor i in np.arange(len(Notas_de_Corretagem6)):\r\n if Notas_de_Corretagem6.loc[i]['Operação'] == 'Venda' and Notas_de_Corretagem6.loc[i][\r\n 'Tipo Papel'] == 'Day Trade' and lucro[i] > 0:\r\n x = lucro[i] * 0.20\r\n else:\r\n x = 0\r\n irdt = np.append(irdt, x)\r\n# print(irdt)\r\n\r\n# Mês de Referencia\r\nmês = []\r\nfor i in np.arange(len(Notas_de_Corretagem6)):\r\n x = str(Notas_de_Corretagem6.loc[i]['Data'].month) + '_' + str(Notas_de_Corretagem6.loc[i]['Data'].year)\r\n mês = np.append(mês, x)\r\n# print(mês)\r\n\r\n# Vendas no mês\r\nvendas = []\r\nfor i in np.arange(len(Notas_de_Corretagem6)):\r\n x = sum(Notas_de_Corretagem6.loc[j]['valor_bruto'] for j in np.arange(len(Notas_de_Corretagem6)) \\\r\n if mês[j] == mês[i] and Notas_de_Corretagem6.loc[j]['Operação'] == 'Venda' \\\r\n and Notas_de_Corretagem6.loc[j]['Tipo Papel'] == 'Ação')\r\n vendas = np.append(vendas, x)\r\n# print(vendas)\r\n\r\n# IR Normal\r\nirnormal = []\r\nfor i in np.arange(len(Notas_de_Corretagem6)):\r\n if Notas_de_Corretagem6.loc[i]['Operação'] == 'Venda' and Notas_de_Corretagem6.loc[i][\r\n 'Tipo Papel'] == 'Ação' and lucro[i] > 0 and vendas[i] > 20000:\r\n x = lucro[i] * 0.15\r\n else:\r\n x = 0\r\n irnormal = np.append(irnormal, x)\r\n# print(irnormal)\r\n\r\n'''Juntando novas variáveis com tabela de ações'''\r\n\r\nNotas_de_Corretagem7 = Notas_de_Corretagem6.assign(Estoque=estoque) \\\r\n .assign(ir_nafonte=ir) \\\r\n .assign(Ativo=ativo) \\\r\n .assign(Cesta=cesta) \\\r\n .assign(PMdaCesta=pmc) \\\r\n .assign(lucro_prejuizo=lucro) \\\r\n .assign(IR_FII=irfii) \\\r\n .assign(IR_ETF=iretf) \\\r\n .assign(IR_Normal=irnormal) \\\r\n .assign(IR_Day_Trade=irdt) \\\r\n .assign(mes=mês) \\\r\n .assign(vendas_no_mes=vendas)\r\n\r\n\r\n\r\n'''Renomeando colunas, chegando na base final das Notas de Corretagem'''\r\n\r\nNotas_de_Corretagem = Notas_de_Corretagem7.rename(columns={\"valor_bruto\": \"Valor Bruto\",\r\n \"Tx_Liquidação\": \"Tx Liquidação\",\r\n \"ir_nafonte\": \"IR na Fonte\",\r\n \"Custo_Total\": \"Custo Total\",\r\n \"Valor_Líquido\": \"Valor Líquido\",\r\n \"PMdaOrdem\": \"Preço Médio Ordem\",\r\n \"PMdaCesta\": \"Preço Médio Cesta\",\r\n \"lucro_prejuizo\": \"Lucro Prejuízo\",\r\n \"IR_FII\": \"IR FII\",\r\n \"IR_ETF\": \"IR ETF\",\r\n \"IR_Normal\": \"IR Normal\",\r\n \"IR_Day_Trade\": \"IR Day Trade\",\r\n \"mes\": \"Mês Referência\",\r\n \"vendas_no_mes\": \"Vendas no Mês (Tipo Ação)\",\r\n \"Saldo_Posicao\": \"Saldo Posição\"\r\n })\r\n\r\n\r\n\r\n\r\n\r\n'''Selecionando colunas para fazer uma base de Carteira'''\r\n\r\nCarteira0 = Notas_de_Corretagem[['Tipo Papel',\r\n 'Papel',\r\n 'Corretora',\r\n 'Quantidade',\r\n 'Preço',\r\n 'Operação',\r\n 'Data',\r\n 'Preço Médio Cesta',\r\n 'Mês Referência',\r\n 'Preço Médio Ordem',\r\n 'Cesta',\r\n 'Ativo',\r\n 'Saldo Posição',\r\n 'Valor Líquido']]\r\n\r\n\r\n\r\n'''Agrupando em tipos de Papel, por soma da quantidade, vl liquido, max data ('última compra')e min data (1a compra)'''\r\n\r\nCarteira1 = Carteira0[(Carteira0['Ativo'] == 'Ativo') & (Carteira0['Corretora'] != '')] \\\r\n [['Papel', 'Saldo Posição', 'Valor Líquido']].groupby('Papel').sum()\r\n\r\nCarteira2 = Carteira0[['Papel', 'Data']].groupby('Papel').min()\r\n\r\nCarteira3 = Carteira0[['Papel', 'Data']].groupby('Papel').max()\r\n\r\nCarteira4 = Carteira1.merge(Carteira2, how='left', left_on='Papel', right_on='Papel').merge(Carteira3, how='left',\r\n left_on='Papel',\r\n right_on='Papel')\r\nCarteira5 = Carteira4.rename(\r\n columns={\"Data_x\": \"1a Compra\", \"Data_y\": \"Última Compra\", \"Saldo Posição\": \"Quantidade\"})\r\n\r\n''' fazendo a planilha de preços médios '''\r\n\r\nPreço_Medio0 = Notas_de_Corretagem[\r\n (Notas_de_Corretagem['Ativo'] == 'Ativo') & (Notas_de_Corretagem['Operação'] == 'Compra')]\r\n\r\nPreço_Medio1 = Preço_Medio0[['Papel', 'Preço Médio Cesta']]\r\n\r\nPreço_Medio = Preço_Medio1.drop_duplicates(subset='Papel', keep='last')\r\n\r\n\r\n'''Juntando coluna com o preço médio a base da carteira'''\r\n\r\nCarteira6 = Carteira5.merge(Preço_Medio, how='left', left_on='Papel', right_on='Papel').rename(\r\n columns={\"Preço Médio Cesta\": \"Preço Médio\"})\r\n\r\n\r\n\r\n\r\n\r\n'''Agora, procuramos as cotações atuais dos Papeis da caretira na base do Yahoo Fianace'''\r\n'''Lista com os Papeis 'tickers' da carteira'''\r\nPapeis = Carteira6.loc[:, 'Papel']\r\n\r\n'''Colocando '.SA' no final de toda sigla do papel, pois é assim que estão registrados na base do Yahoo Finance'''\r\nmy_string = \".SA\"\r\nPapeisSA = [\"{}{}\".format(i, my_string) for i in Papeis]\r\n\r\n'''Procurando os tickers na base do Yahoo'''\r\nprices0 = pd.DataFrame()\r\ntickers = PapeisSA\r\ndata = date.today()\r\ndata2 = data - timedelta(days=1)\r\n\r\n'''Caso há algum erro na base da Yahoo, o valor que retorna é uma média da cotação desde o primeiro dia da carteira'''\r\nfor i in tickers:\r\n try:\r\n prices0[i] = round(web.get_data_yahoo(i, data, data)['Adj Close'], 2)\r\n except:\r\n prices0[i] = np.mean(round(web.get_data_yahoo(i, data2, data)['Adj Close'], 2))\r\n'''Invertendo colunas e linhas '''\r\nprices = prices0.T\r\n\r\n'''Mudando o index e fazendo uma coluna com as datas'''\r\nprices1 = prices.reset_index(level=0)\r\nprices2 = prices1.rename(columns={list(prices1)[1]: 'Preço Atual', \"index\": \"Papel\"}).replace({'Papel': r'.SA$'},\r\n {'Papel': ''},\r\n regex=True)\r\n\r\n'''Juntando com os preços atuais com a base da carteira'''\r\nCarteira7 = Carteira6.merge(prices2, how='left', left_on='Papel', right_on='Papel')\r\n\r\n\r\n\r\n'''Calculando variáveis novas com o Lucro, a Porcentagem de Variação do Preço, o Valor Atual dos papeis\r\ne a Participação em % do papel na Carteira'''\r\n\r\nlucro = pd.Series()\r\nfor i in np.arange(len(Carteira7)):\r\n x = Carteira7.loc[i]['Quantidade'] * Carteira7.loc[i]['Preço Atual'] - Carteira7.loc[i]['Valor Líquido']\r\n lucro = np.append(lucro, x)\r\n\r\nporcent = pd.Series()\r\nfor i in np.arange(len(Carteira7)):\r\n x = (\"{:.1%}\".format((Carteira7.loc[i]['Preço Atual'] / Carteira7.loc[i]['Preço Médio']) - 1))\r\n porcent = np.append(porcent, x)\r\n\r\nvl_atual = pd.Series()\r\nfor i in np.arange(len(Carteira7)):\r\n x = Carteira7.loc[i]['Quantidade'] * Carteira7.loc[i]['Preço Atual']\r\n vl_atual = np.append(vl_atual, x)\r\n# print(vl_atual)\r\nvl_atual_total = sum(vl_atual)\r\n\r\nparticipacao = pd.Series()\r\nfor i in np.arange(len(Carteira7)):\r\n x = round(((Carteira7.loc[i]['Quantidade'] * Carteira7.loc[i]['Preço Atual']) / vl_atual_total), 2)\r\n participacao = np.append(participacao, x)\r\n\r\n\r\n'''Adicionando essas variáveis como colunas na base de carteira'''\r\nCarteira8 = Carteira7.assign(VL_Atual=vl_atual, Lucro_Prejuízo=lucro, Porc_Lucro_Prejuízo=porcent,\r\n Particip=participacao) \\\r\n .rename(columns={'Lucro_Prejuízo': 'Lucro/Prejuízo', 'Porc_Lucro_Prejuízo': '%Lucro/Prejuízo', \\\r\n 'Particip': 'Participação na Carteira', 'VL_Atual': 'Valor Líquido Atual'})\r\n\r\n\r\n'''Finalmente, calculando o lucro das ações já completas (compra+venda) e somando com a rentabilidade dos papeis\r\n atuais da carteira, para ter a Rentabilidade Total da Carteira em determinada data 'v' '''\r\n\r\nlucro = sum(Notas_de_Corretagem.loc[j][\"Lucro Prejuízo\"] for j in np.arange(len(Notas_de_Corretagem)))\r\nRentabilidade = (sum(Carteira8[\"Valor Líquido Atual\"]) + lucro) / sum(Carteira8[\"Valor Líquido\"]) - 1\r\n\r\nprint(Rentabilidade)\r\n\r\n","sub_path":"Rodando_Dia_Específico.py","file_name":"Rodando_Dia_Específico.py","file_ext":"py","file_size_in_byte":18450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"267042546","text":"#!/usr/bin/env python3\n\nimport sys\n\ndef cut_repeat(dict):\n f = open(\"ybdt-dict-content.txt\", \"a\");#if not exist, create it, if exist, do nothing\n f.close();\n\n f_new = open(dict, \"r\");\n f_old = open(\"ybdt-dict-content.txt\", \"r\");\n tmp = f_old.readlines();\n print( \"The old file count is: \" + str(len(tmp)) );\n #print(\"The old file is:\");\n #print(tmp);\n tmp_ = [];#create an empty array to store element without \\n\n for line in tmp:\n tmp_.append( line.strip(\"\\n\") );\n lines = f_new.readlines();\n print(\"The new file count is: \" + str(len(lines)) );\n #print(\"The new file is:\");\n #print(lines);\n count = 0;#counter for repeated element\n print(\"The repeated element is: \");\n for line in lines:\n if line.strip(\"\\n\") not in tmp_:\n tmp_.append( line.strip(\"\\n\") );\n else:\n print( line.strip(\"\\n\") );\n count = count + 1;\n print( \"The repeated count is: \" + str(count) );\n print( \"The final count is: \" + str(len(tmp_)) );\n f_new.close();\n f_old.close();\n \n f = open(\"ybdt-dict-content.txt\", \"w\");\n for line in tmp_:\n f.write(line + \"\\n\");\n f.close();\n\ndef main():\n if len(sys.argv) != 2:\n print(\"Usage: python3 ybdt-dict-cleaner.py path_to_dict\");\n print(\"Example: python3 ybdt-dict-cleaner.py dict.txt\");\n exit();\n else:\n dict = sys.argv[1];\n \n cut_repeat(dict);\n\nif __name__ == \"__main__\":\n main();","sub_path":"ybdt-dict-cleaner.py","file_name":"ybdt-dict-cleaner.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"172104084","text":"import sys\r\nimport numpy as np\r\nimport pickle\r\nfrom keras.models import Model\r\nfrom keras.layers import Input, Dense\r\nfrom keras.optimizers import Adam\r\nfrom keras.utils.vis_utils import plot_model\r\nfrom sklearn.model_selection import train_test_split\r\nfrom keras.layers import LSTM, Concatenate\r\nfrom keras.layers.embeddings import Embedding\r\nfrom keras.preprocessing import sequence\r\nfrom utilities import multiply_durations, concat_all, get_max_packet_size, extractFeatures, extract_packet_sizes, ngrams, dbclustermin, extractSignatures, generate_traffic_rate_features, splitAllFeatures, extract_durations, normalize_packet_sizes\r\nimport warnings\r\nfrom sklearn.metrics import confusion_matrix\r\nimport seaborn as sn\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.metrics import accuracy_score\r\nimport os\r\nimport random\r\nimport csv\r\nfrom gan.output import Output, OutputType, Normalization\r\n\r\ndef extractSequences(fn):\r\n seqs = []\r\n with open(fn, newline='\\n') as csvf:\r\n csv_reader = csv.reader(csvf, delimiter=' ')\r\n for row in csv_reader:\r\n if len(row) == 20:\r\n seqs.append(row)\r\n return seqs\r\n\r\n\r\nsys.setrecursionlimit(1000000)\r\n\r\nwarnings.filterwarnings('ignore')\r\n\r\n# fix random seed for reproducibility\r\nnp.random.seed(7)\r\n\r\ntarget_device = sys.argv[1]\r\n\r\nwith open(\"preprocessed.pkl\", mode='rb') as featuresFile:\r\n raw_features = pickle.load(featuresFile)\r\n\r\nraw_features = splitAllFeatures(raw_features)\r\nfeature_for_target = raw_features[target_device]\r\n\r\npacket_sizes_for_target = extract_packet_sizes(feature_for_target)\r\ndurations_for_target = extract_durations(feature_for_target)\r\n\r\nsignatures = dict()\r\n\r\n# distance metric used by dbscan\r\ndistance_threshold = 5.0\r\n# total ngrams divided by cluster threshold is equal to the min_samples needed to form a cluster in dbscan\r\nmin_cluster = len(packet_sizes_for_target)/5\r\nmin_sig_size = 2\r\nmax_sig_size = 5\r\nall_sigs_count = 0\r\n\r\nfor i in range(min_sig_size, max_sig_size + 1):\r\n allngrams = []\r\n for real_packet_size in packet_sizes_for_target:\r\n ngramVector = ngrams(i, real_packet_size)\r\n for ngram in ngramVector:\r\n allngrams.append(ngram)\r\n cluster = dbclustermin(allngrams, distance_threshold, min_cluster)\r\n signatures[i] = extractSignatures(cluster, i)\r\n all_sigs_count += len(signatures[i])\r\n\r\nall_packet_sizes = []\r\n\r\nprint(all_sigs_count)\r\n\r\nfor packet_sizes in packet_sizes_for_target:\r\n for packet in packet_sizes:\r\n all_packet_sizes.append(abs(int(packet)))\r\n\r\nall_ps_listed = list(set(all_packet_sizes))\r\nall_ps_listed.sort()\r\n\r\ntokensToPacketSize = dict()\r\npacketSizeToTokens = dict()\r\n\r\nmin_duration = 1000000\r\nmax_duration = 0\r\n\r\nfor i in range(len(all_ps_listed)):\r\n ps = all_ps_listed[i]\r\n tokensToPacketSize[i] = ps\r\n packetSizeToTokens[ps] = i\r\n\r\nfor i in range(len(durations_for_target)):\r\n for dur in durations_for_target[i]:\r\n min_duration = min(min_duration, dur)\r\n max_duration = max(max_duration, dur)\r\n\r\ndata_feature = []\r\ndata_attribute = []\r\ndata_gen_flag = []\r\n\r\nfor i in range(len(packet_sizes_for_target)):\r\n durations = durations_for_target[i]\r\n packets = packet_sizes_for_target[i]\r\n data_gen = []\r\n data_feat = []\r\n data_attr = []\r\n for j in range(min_sig_size, max_sig_size + 1):\r\n extractedNgrams = ngrams(j, packets)\r\n newFeatures = extractFeatures(extractedNgrams, signatures[j])\r\n data_attr = data_attr + newFeatures\r\n for j in range(len(packets)):\r\n packet_length = int(packets[j])\r\n duration = durations[j]\r\n direction = []\r\n if packet_length < 0:\r\n direction = [1.0, 0.0]\r\n else:\r\n direction = [0.0, 1.0]\r\n token = packetSizeToTokens[abs(packet_length)]\r\n packet_length_feat = len(tokensToPacketSize) * [0]\r\n packet_length_feat[token] = 1\r\n normalized_duration = (duration - min_duration)/(max_duration - min_duration)\r\n data_gen.append(1.0)\r\n d = []\r\n d = d + packet_length_feat + direction\r\n d.append(normalized_duration)\r\n data_feat.append(np.array(d, dtype=\"float32\"))\r\n data_gen_flag.append(np.array(data_gen, dtype=\"float32\"))\r\n data_feature.append(np.array(data_feat))\r\n data_attribute.append(np.array(data_attr, dtype=\"float32\"))\r\n\r\ndata_feature_output = [\r\n Output(type_=OutputType.DISCRETE, dim=len(tokensToPacketSize), normalization=None, is_gen_flag=False),\r\n Output(type_=OutputType.DISCRETE, dim=2, normalization=None, is_gen_flag=False),\r\n Output(type_=OutputType.CONTINUOUS, dim=1, normalization=Normalization.ZERO_ONE, is_gen_flag=False)\r\n]\r\n\r\ndata_attribute_output = [\r\n Output(type_=OutputType.CONTINUOUS, dim=all_sigs_count, normalization=Normalization.ZERO_ONE, is_gen_flag=False),\r\n]\r\n\r\ndata_feature = np.array(data_feature)\r\nprint(data_feature.shape)\r\ndata_attribute = np.array(data_attribute)\r\nprint(data_attribute.shape)\r\ndata_gen_flag = np.array(data_gen_flag)\r\nprint(data_gen_flag.shape)\r\n\r\nnp.savez(\"data/iot/data_train.npz\", data_feature=data_feature, data_attribute=data_attribute, data_gen_flag=data_gen_flag)\r\nwith open('data/iot/data_feature_output.pkl', mode='wb') as fp:\r\n pickle.dump(data_feature_output, fp, protocol=2)\r\nwith open('data/iot/data_attribute_output.pkl', mode='wb') as fp:\r\n pickle.dump(data_attribute_output, fp, protocol=2)","sub_path":"createDGData.py","file_name":"createDGData.py","file_ext":"py","file_size_in_byte":5475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"281860009","text":"\"\"\"\nAuthor: Jing (https://github.com/gnijuohz)\n\nContains Duplicate II: https://oj.leetcode.com/problems/contains-duplicate-ii \n\n\nGiven an array of integers and an integer k, find out whether there are two distinct indices i and j in the array such that nums[i] = nums[j] and the difference between i and j is at most k. \nTags\nArray, Hash Table, Show Similar Problems, (E) Contains Duplicate, (M) Contains Duplicate III \n\"\"\"\n\nclass Solution(object):\n def containsNearbyDuplicate(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: bool\n \"\"\"\n last_seen = {}\n for index, num in enumerate(nums):\n if num not in last_seen:\n last_seen[num] = index\n else:\n distance = index - last_seen[num]\n if distance <= k:\n return True\n else:\n last_seen[num] = index\n return False\n \n ","sub_path":"solutions/Contains-Duplicate-II.py","file_name":"Contains-Duplicate-II.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"12448259","text":"import tf\nimport yaml\nimport numpy as np\nfrom image_geometry import PinholeCameraModel\nfrom sensor_msgs.msg import CameraInfo\n\nclass CameraModel:\n\n def __init__(self):\n self.camera_model = PinholeCameraModel()\n self.matrix = None\n self.cam_info = None\n\n def load_camera_calibration(self, camera_calibration_yaml, lidar_camera_calibration_yaml=None):\n\n stream = file(camera_calibration_yaml, 'r')\n calib_data = yaml.load(stream)\n cam_info = CameraInfo()\n cam_info.width = calib_data['image_width']\n cam_info.height = calib_data['image_height']\n cam_info.K = calib_data['camera_matrix']['data']\n cam_info.D = calib_data['distortion_coefficients']['data']\n cam_info.R = calib_data['rectification_matrix']['data']\n cam_info.P = calib_data['projection_matrix']['data']\n cam_info.distortion_model = calib_data['distortion_model']\n stream.close()\n\n self.camera_model.fromCameraInfo(cam_info)\n self.cam_info = cam_info\n\n if lidar_camera_calibration_yaml is not None:\n stream = file(lidar_camera_calibration_yaml, 'r')\n calib_data = yaml.load(stream)\n translation_data = calib_data['translation']['data']\n\n print(translation_data)\n\n translation = [translation_data[0], translation_data[1], translation_data[2], 1.0]\n rotation_data = calib_data['euler_rotations']['data']\n euler_axes = calib_data['euler_axes']\n\n # euler_matrix( roll, pitch, yaw )\n rotationMatrix = tf.transformations.euler_matrix(rotation_data[2], rotation_data[1], rotation_data[0], euler_axes)\n rotationMatrix[:, 3] = translation\n self.matrix = rotationMatrix\n\n\n def project_lidar_points_to_camera_2d(self, points):\n\n uv = []\n\n for point in points:\n rotatedPoint = self.matrix.dot(point)\n uv.append(self.camera_model.project3dToPixel(rotatedPoint))\n\n return uv\n\n def rectify_image(self, raw):\n\n img = np.zeros_like(raw)\n self.camera_model.rectifyImage(raw, img)\n\n return img\n\n def shape(self):\n return self.cam_info.width, self.cam_info.height\n\n\ndef generateImage(camera, points, inputFile, outputFile):\n import cv2\n\n image = cv2.imread(inputFile)\n\n uvs = camera.project_lidar_points_to_camera_2d(points)\n\n pos = 0\n for uv in uvs:\n color = None\n if pos == 0:\n color = cv2.cv.Scalar(255, 0, 0)\n elif 0 < pos < 5:\n color = cv2.cv.Scalar(0, 255, 0)\n else:\n color = cv2.cv.Scalar(0, 0, 255)\n\n cv2.circle(image, (int(uv[0]), int(uv[1])), 5, color, thickness=-1)\n pos += 1\n\n cv2.imwrite(outputFile, image)\n\n\ndef main():\n import argparse\n import tracket_parser\n import csv\n import sys\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--camera', type=str, help='Camera calibration yaml')\n parser.add_argument('--lidar', type=str, help='Lidar to Camera calibration yaml')\n parser.add_argument('--timestamps', type=str, help='Camera timestamps csv')\n parser.add_argument('--tracklet', type=str, help='Tracklet file')\n parser.add_argument('--input_dir', type=str, help='Rectified camera images directory')\n parser.add_argument('--output_dir', type=str, help='Annotation camera images directory')\n\n args = parser.parse_args()\n camera_calibration = args.camera\n lidar_camera_calibration = args.lidar\n camera_timestamps = args.timestamps\n tracklet = args.tracklet\n input_dir = args.input_dir\n output_dir = args.output_dir\n\n if camera_calibration is None or \\\n lidar_camera_calibration is None or \\\n camera_timestamps is None or \\\n tracklet is None or \\\n input_dir is None or \\\n output_dir is None:\n parser.print_usage()\n sys.exit(-1)\n\n camera = CameraModel()\n camera.load_camera_calibration(camera_calibration, lidar_camera_calibration)\n\n data = None\n try:\n f = open(tracklet)\n data = f.read().replace('\\n', '')\n f.close()\n\n except:\n print('Unable to read file: %s' % tracklet)\n f.close()\n exit(-1)\n\n timestamps = []\n try:\n f = open(camera_timestamps)\n csv_reader = csv.DictReader(f, delimiter=',', restval='')\n timestamps = []\n for row in csv_reader:\n timestamps.append(row['timestamp'])\n f.close()\n\n except:\n print('Unable to read file: %s' % camera_timestamps)\n f.close()\n exit(-1)\n\n dataDict = tracket_parser.xml_to_dict(data)\n cleaned = tracket_parser.clean_items_list(dataDict)\n tracket_parser.put_timestamps_with_frame_ids(cleaned, timestamps)\n\n tracklets = dataDict.get('tracklets', {})\n target_item = tracklets.get('item', {})\n w = float(target_item.get('w', 0))\n h = float(target_item.get('h', 0))\n l = float(target_item.get('l', 0))\n\n for item in cleaned:\n ts = item['timestamp']\n tx = item['tx']\n ty = item['ty']\n tz = item['tz']\n bbox = []\n centroid = [tx, ty, tz, 1.0]\n\n bbox.append(centroid)\n bbox.append([tx - l / 2., ty + w / 2., tz + h / 2., 1.0])\n bbox.append([tx - l / 2., ty - w / 2., tz + h / 2., 1.0])\n bbox.append([tx + l / 2., ty + w / 2., tz + h / 2., 1.0])\n bbox.append([tx + l / 2., ty - w / 2., tz + h / 2., 1.0])\n bbox.append([tx + l / 2., ty - w / 2., tz - h / 2., 1.0])\n bbox.append([tx - l / 2., ty + w / 2., tz - h / 2., 1.0])\n bbox.append([tx - l / 2., ty - w / 2., tz - h / 2., 1.0])\n bbox.append([tx + l / 2., ty + w / 2., tz - h / 2., 1.0])\n\n generateImage(camera, bbox,\n '{}/image_{}.png'.format(input_dir, ts),\n '{}/image_{}.png'.format(output_dir, ts))\n\nif __name__ == '__main__':\n main()\n","sub_path":"common/camera_model.py","file_name":"camera_model.py","file_ext":"py","file_size_in_byte":5928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"101065883","text":"segundos_str = input(\"Por favor, entre com o número de segundos que deseja converter: \")\nsegundos = int(segundos_str)\n\ndias = segundos // 86400\nsobraDosDias = segundos % 86400\nhoras = sobraDosDias // 3600\nsobraDasHoras = sobraDosDias % 3600\nminutos = sobraDasHoras // 60\nsegundosFinal = sobraDasHoras % 60\n\nprint(dias, \"dias,\", horas, \"horas,\", minutos, \"minutos e\", segundosFinal, \"segundos.\")\n","sub_path":"segundos.py","file_name":"segundos.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"248576905","text":"import math\n\nimport scrapy\nfrom ..items import RealityItem\nimport logging\nimport re\nfrom .. import utils\n\nclass BenRealitySpider(scrapy.Spider):\n name = 'ben_reality'\n start_urls = [\n 'https://www.benreality.cz/nemovitosti',\n ]\n\n def parse(self, response):\n products = response.css('.box a')\n if products is not None:\n for product in products:\n link = product.attrib['href']\n img = product.css('img::attr(src)').get()\n item = RealityItem()\n\n item['avatar'] = response.urljoin(img)\n item['link'] = link\n yield response.follow(item['link'], callback=self.parse_property, meta={'item': item})\n items_count = int(response.css('#first strong::text').get())\n pages_count = math.ceil(items_count / 10)\n for i in range(1, pages_count):\n next_page = 'https://www.benreality.cz/nemovitosti?from=' + str(i * 10)\n yield scrapy.Request(next_page, callback=self.parse)\n\n def parse_property(self, response):\n item = response.meta['item']\n item['title'] = response.css('h1::text').get().strip()\n item['ident_num'] = response.css('.contract_num::text').get()\n item['description'] = response.css('#description .description::text').get()\n\n images = response.css('.lightbox::attr(src)').getall()\n item['front_image'] = []\n for img in images:\n img = response.urljoin(img)\n if img not in item['front_image']:\n item['front_image'].append(img)\n\n item['lazy_image'] = item['front_image']\n item['thumbnail'] = None\n item['virtual_tour'] = None\n item['video'] = None\n item['title_2'] = item['title']\n\n item['company_name'] = 'BEN realitní kancelář s.r.o.'\n item['company_ic'] = '03696677'\n item['company_address'] = 'tř. Tomáše Bati 3677, 760 01 Zlín'\n item['agent_avatar'] = response.urljoin(response.css('#brokerData img::attr(src)').get())\n item['agent_name'] = response.css('.photo+ p::text').get().replace(' - zavolejte, napište kdykoliv', '')\n item['agent_telephone'] = response.css('#brokerData p:nth-child(3)::text').get()\n #email_text = email_text.replace('/cdn-cgi/l/email-protection#', '')\n item['agent_email'] = response.css('p:nth-child(5) a::attr(href)').get()\n price_text = response.css('.price strong::text').get().replace('.', '')\n price = price_unit = ''\n if price_text is not None:\n price_parts = price_text.strip().split('\\xa0')\n for i in range(len(price_parts)):\n if price_parts[i].isnumeric() or price_parts[i] == 'od ':\n price = price + price_parts[i]\n else:\n price_unit = price_unit + ' ' + price_parts[i]\n\n print('PRICE IS ')\n print(price_parts)\n if price.isnumeric():\n item['price'] = int(price)\n item['price_unit'] = price_unit.replace('od ', '').strip()\n else:\n item['price'] = -1 # price is given when asked\n item['price_unit'] = price_text\n\n item['note_to_price'] = response.css('#contractData .note::text').get()\n item['addition_infos'] = []\n\n addition_info_labels = response.css('th::text').getall()\n addition_info_values = response.css('td').extract()\n\n logging.error('LABEL LENGTH IS ' + str(len(addition_info_labels)))\n logging.error('VALUE LENGTH IS ' + str(len(addition_info_values)))\n print(addition_info_labels)\n print(addition_info_values)\n\n is_house = 'domu' in item['title'] or 'dům' in item['title'] or 'Dům' in item['title'] \\\n or 'domů' in item['title'] or 'vil' in item['title'] or 'Vil' in item['title'] or 'chat' in item[\n 'title'] \\\n or 'RD' in item['title']\n for ind, label in enumerate(addition_info_labels):\n label = label.strip().replace(':', '')\n val = re.sub(r'||||||||', '',\n addition_info_values[ind].strip()).replace('\\xa0', '').strip()\n if label == 'Číslo':\n item['ident_num'] = val\n if label == 'Lokalita':\n address_parts = val.split(',')\n if len(address_parts) > 2:\n item['address'] = address_parts[0].strip() + ', ' + address_parts[1].strip()\n else:\n item['address'] = address_parts[0].strip()\n if label == 'Stav nabídky':\n item['business_state'] = val\n if label == 'Dispozice bytu':\n item['estate_disposition'] = val\n if label == 'Velikost' and is_house:\n if val[0].isnumeric() and int(val[0]) >= 5:\n val = '5 a více'\n item['estate_disposition'] = val\n else:\n item['estate_disposition'] = val[0]\n if label == 'Typ reality':\n if 'Byt' in val:\n val = 'byt'\n elif 'Rodinné domy' in val:\n val = 'dům'\n elif 'Pozemky' in val:\n val = 'pozemek'\n elif 'Komerční objekty' in val:\n val = 'komerční'\n\n item['addition_infos'].append({'name': label, 'value': val})\n eauction = response.css('.eaukce::attr(href)').get()\n if eauction is not None:\n item['addition_infos'].append({'name': 'E-aukce', 'value': eauction})\n\n yield item\n","sub_path":"reality/reality/spiders/benreality_spider.py","file_name":"benreality_spider.py","file_ext":"py","file_size_in_byte":5746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"93764098","text":"#! python3\n# -*- coding:GBK -*-\n\"\"\"\n说明:\n 本程序用于对固定格式的地震目录dataFrame计算并添加农历日期,并判断是否为调制日\n dataFrame格式为: 年 月 日 时 分 秒 纬度 经度 震级 深度 台站参数 发震地名\n 程序会向后添加两列,分别为农历日期和是否调制,是否调制用y、n表示\n 并范围最终的dataFrame\n\"\"\"\n\nimport lunar\nimport readEqcat\n\n\nclass mOrNot():\n\n def ynrm(self, nl):\n mday = [\n '初一', '初二', '初八', '初九', '十五',\n '十六', '十七', '廿二', '廿三', '廿四',\n '廿五', '廿八', '廿九', '三十'\n ]\n if nl in mday:\n return 'y'\n else:\n return 'n'\n\n def makenl(self, eqcat):\n eqcat['nldate'] = eqcat.apply(lambda x: lunar.run(x.year, x.month, x.day), axis=1)\n eqcat['yn'] = eqcat.apply(lambda x: self.ynrm(x.nldate[-2:]), axis=1)\n return eqcat\n\n\nif __name__ == \"__main__\":\n eqcatname = './Rm2019.txt'\n eqcat = readEqcat.formatCat(eqcatname)\n eqcat2 = mOrNot.makenl(eqcat)\n print(eqcat2.head())\n","sub_path":"Rm/MakeDataRm.py","file_name":"MakeDataRm.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"617544394","text":"#%%\nfrom pathlib import Path\nfrom functools import partial\nfrom tqdm import tqdm_notebook as tqdm\n\n#%%\ndef find_number_in_string(s):\n \"\"\"\n Finds and returns a number sequence in a given string.\n Assumes : The string has only one continuously unbroken sequence of numbers. If there are multiple sequences,\n the first one will be returned.\n :param s: String containing number sequence. Type : str or pathlib.Path\n :return: String representation of the Number found in the string.\n \"\"\"\n s = str(s)\n start = None # starting index\n end = None # ending index\n for i in range(len(s)):\n if s[i].isdigit(): # start going over complete number sequence\n start=i\n end = i\n for j in range(start, len(s)): # loop until characters are still numbers\n if not s[j].isdigit():\n break\n end = j\n break\n return str(int(s[start : end + 1])) # adding 1 to end index so as to include the end index\n\n\n#%%\ndef is_file(PATH)-> bool:\n \"\"\"\n Returns True if the file at `PATH` exists with Size > 0\n \"\"\"\n PATH = Path(PATH)\n file_exists = PATH.is_file() # Checks existence of file\n if file_exists:\n if PATH.stat().st_size > 0: # Checks file size\n return True\n return False\n\ndef delete_file(PATH)-> bool:\n PATH = Path(PATH)\n if PATH.is_file():\n PATH.unlink()\n else:\n raise FileNotFoundError\n return True\n\n#%%\ndef rename_folder(PATH, rename_func):\n \"\"\"\n Renames every file in given folder specified by `PATH` according to `rename_func`\n :param PATH: pathlib.Path : Path of the folder containing files to rename\n :param rename_func: callable : function that takes as input a path of a file and\n returns the `path` to which the file is to be renamed\n :return: returns True if all files are succesfully renamed\n \"\"\"\n PATH = Path(PATH).absolute()\n for f in tqdm(PATH.iterdir()):\n f.rename(rename_func(f))\n return True\n\ndef change_suffix(file, ext):\n \"\"\"\n gives a file's name with extension replaced with `ext`\n :param file: file to be renamed\n :param ext: extension to replace the existing extension of `file`\n :return: new `path` to file with the extension changed to `ext`\n \"\"\"\n file = Path(file)\n return file.with_suffix(ext)","sub_path":"convenience_functions.py","file_name":"convenience_functions.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"592972363","text":"import re\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom mongoengine import connect\nfrom pymongo import DESCENDING\nfrom .forms import add_work\nfrom .models import todo_lis\n# Create your views here.\ndef home(request):\n connect(db=\"demo_db_2\", host=\"localhost\", port=27017)\n context = {}\n context['work'] = todo_lis.objects()\n if request.POST:\n form = add_work(request.POST)\n title = request.POST['task']\n print(title)\n Description = request.POST.get('description')\n print(\"Description =\", Description)\n added_date = request.POST.get('date_added')\n print(\"added = \", added_date)\n form_2 = todo_lis(task=title, Description=Description, date_added=added_date)\n form_2.save()\n messages.success(request, f'{title} has been created!')\n return redirect('home')\n else:\n form = add_work()\n \n return render(request, 'home.html', {'data':context, 'form':form})\n\ndef delete(request, **kwargs):\n print(kwargs['pk'])\n if request.POST:\n id=kwargs['pk']\n todo_lis.objects(id=id).delete()\n messages.success(request, f'Task with id {id} has been deleted')\n return redirect('home')\n return render(request, 'delete.html', {})\n\ndef update(request, **kwargs):\n print(kwargs['pk'])\n id=kwargs['pk']\n connect(db=\"demo_db_2\", host=\"localhost\", port=27017)\n task = todo_lis.objects(id=id)\n if request.POST:\n form = add_work(request.POST)\n title = request.POST['task']\n print(title)\n Description = request.POST.get('description')\n print(\"Description =\", Description)\n added_date = request.POST.get('date_added')\n print(\"added = \", added_date)\n todo_lis.objects(id=id).update(task=title, Description=Description, date_added=added_date)\n messages.success(request, f'Task with id {id} has been deleted')\n return redirect('home')\n else:\n form = add_work()\n return render(request, 'update.html', {'form':form})","sub_path":"demo_mongoengine/todo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"72724827","text":"from nltk.corpus.reader.api import CorpusReader\nfrom nltk.corpus.reader.api import CategorizedCorpusReader\nfrom readability.readability import Unparseable\nfrom readability.readability import Document as Paper\nimport codecs\nimport os\nimport bs4\nimport pickle\nfrom nltk import pos_tag, sent_tokenize, wordpunct_tokenize, FreqDist\nimport time\nimport logging\nfrom six import string_types\n\nlog = logging.getLogger(\"readability.readability\")\nlog.setLevel('WARNING')\n\nCAT_PATTERN = r'([a-z0-9_\\s]+)/.*'\nPKL_PATTERN = r'(?!\\.)[a-z0-9_\\s]+/[a-z0-9]+\\.pickle'\nDOC_PATTERN = r'(?!\\.)[a-z_\\s]+/[a-z0-9]+\\.html'\n\nTAGS = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7', 'p', 'li']\n\nclass HTMLCorpusReader(CategorizedCorpusReader, CorpusReader):\n \"\"\"\n Объект чтения корпуса с HTML-документами для получения\n возможности дополнительной предварительной обработки.\n \"\"\"\n\n def __init__(self, root, fileids=DOC_PATTERN, encoding='utf8', tags=TAGS, **kwargs):\n \"\"\"\n Инициализирует объект чтения корпуса.\n Аргументы, управляющие классификацией\n (``cat_pattern``, ``cat_map`` и ``cat_file``), передаются\n в конструктор ``CategorizedCorpusReader``. остальные аргументы\n передаются в конструктор ``CorpusReader``.\n \"\"\"\n # Добавить шаблон категорий, если он не был передан в класс явно.\n if not any(key.startswith('cat_') for key in kwargs.keys()):\n kwargs['cat_pattern'] = CAT_PATTERN\n # Инициализировать объекты чтения корпуса из NLTK\n CategorizedCorpusReader.__init__(self, kwargs) # передаются именованные аргументы\n CorpusReader.__init__(self, root, fileids)\n\n\n # Сохранить теги, подлежащие извлечению.\n self.tags = tags\n\n def resolve(self, fileids, categories):\n \"\"\"\n Возвращает список идентификаторов файлов или названий категорий,\n которые передаются каждой внутренней функции объекта чтения корпуса.\n Реализована по аналогии с ``CategorizedPlaintextCorpusReader`` в NLTK.\n \"\"\"\n if fileids is not None and categories is not None:\n raise ValueError(\"Specify fileids or categories, not both\")\n if categories is not None:\n return self.fileids(categories)\n return fileids\n\n def docs(self, fileids=None, categories=None):\n \"\"\"\n Возвращает полный текст HTML-документа, закрывая его\n по завершении чтения.\n \"\"\"\n # Получить список файлов для чтения\n fileids = self.resolve(fileids, categories)\n # Создать генератор, загружающий документы в память по одному.\n for path, encoding in self.abspaths(fileids, include_encoding=True):\n with codecs.open(path, 'r', encoding=encoding) as f:\n yield f.read()\n\n def sizes(self, fileids=None, categories=None):\n \"\"\"\n Возвращает список кортежей, идентификатор файла и его размер.\n Эта функция используется для выявления необычно больших файлов\n в корпусе.\n \"\"\"\n # Получить список файлов\n fileids = self.resolve(fileids, categories)\n # Создать генератор, возвращающий имена и размеры файлов\n for path in self.abspaths(fileids):\n yield path, os.path.getsize(path)\n\n def html(self, fileids=None, categories=None):\n \"\"\"\n Возвращает содержимое HTML каждого документа, очищая его\n с помощью библиотеки readability-lxml.\n \"\"\"\n for doc in self.docs(fileids, categories):\n try:\n yield Paper(doc).summary()\n except Unparseable as e:\n print(\"Could not parse HTML: {}\".format(e))\n continue\n\n def paras(self, fileids=None, categories=None):\n \"\"\"\n Использует BeautifulSoup для выделения абзацев из HTML.\n \"\"\"\n for html in self.html(fileids, categories):\n soup = bs4.BeautifulSoup(html, 'lxml')\n for element in soup.find_all(TAGS):\n yield element.text\n soup.decompose()\n\n def sents(self, fileids=None, categories=None):\n \"\"\"\n Использует встроенный механизм для выделения предложений из\n абзацев. Обратите внимание, что для парсинга разметки HTML\n этот метод использует BeautifulSoup.\n \"\"\"\n for paragraph in self.paras(fileids, categories):\n for sentence in sent_tokenize(paragraph):\n yield sentence\n\n def words(self, fileids=None, categories=None):\n \"\"\"\n Использует встроенный механизм для выделения слов из предложений.\n Обратите внимание, что для парсинга разметки HTML\n этот метод использует BeautifulSoup\n \"\"\"\n for sentence in self.sents(fileids, categories):\n for token in wordpunct_tokenize(sentence):\n yield token\n\n def tokenize(self, fileids=None, categories=None):\n \"\"\"\n Сегментирует, лексемизирует и маркирует документ в корпусе.\n \"\"\"\n for paragraph in self.paras(fileids=fileids):\n yield [\n pos_tag(wordpunct_tokenize(sent), lang='rus')\n for sent in sent_tokenize(paragraph)\n ]\n\n def describe(self, fileids=None, categories=None):\n \"\"\"\n Выполняет обход содержимого корпуса и возвращает\n словарь с разнообразными оценками, описывающими\n состояние корпуса.\n \"\"\"\n started = time.time()\n # Структуры для подсчета.\n counts = FreqDist()\n tokens = FreqDist()\n # Выполнить обход абзацев, выделить лексемы и подсчитать их\n for para in self.paras(fileids, categories):\n counts['paras'] += 1\n for sent in sent_tokenize(para):\n counts['sents'] += 1\n for word in wordpunct_tokenize(sent):\n counts['words'] += 1\n tokens[word] += 1\n # Определить число файлов и категорий в корпусе\n n_fileids = len(self.resolve(fileids, categories) or self.fileids())\n n_topics = len(self.categories(self.resolve(fileids, categories)))\n # Вернуть структуру данных с информацией\n return {\n 'files': n_fileids,\n 'topics': n_topics,\n 'paras': counts['paras'],\n 'sents': counts['sents'],\n 'words': counts['words'],\n 'vocab': len(tokens),\n 'lexdiv': float(counts['words']) / float(len(tokens)),\n 'ppdoc': float(counts['paras']) / float(n_fileids),\n 'sppar': float(counts['sents']) / float(counts['paras']),\n 'secs': time.time() - started,\n }\n\n\nclass PickledCorpusReader(HTMLCorpusReader):\n\n def __init__(self, root, fileids=PKL_PATTERN, **kwargs):\n \"\"\"\n Initialize the corpus reader. Categorization arguments\n (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to\n the ``CategorizedCorpusReader`` constructor. The remaining arguments\n are passed to the ``CorpusReader`` constructor.\n \"\"\"\n # Add the default category pattern if not passed into the class.\n if not any(key.startswith('cat_') for key in kwargs.keys()):\n kwargs['cat_pattern'] = CAT_PATTERN\n\n CategorizedCorpusReader.__init__(self, kwargs)\n CorpusReader.__init__(self, root, fileids)\n\n def resolve(self, fileids, categories):\n \"\"\"\n Returns a list of fileids or categories depending on what is passed\n to each internal corpus reader function. This primarily bubbles up to\n the high level ``docs`` method, but is implemented here similar to\n the nltk ``CategorizedPlaintextCorpusReader``.\n \"\"\"\n if fileids is not None and categories is not None:\n raise ValueError(\"Specify fileids or categories, not both\")\n\n if categories is not None:\n return self.fileids(categories)\n return fileids\n\n def docs(self, fileids=None, categories=None):\n \"\"\"\n Returns the document loaded from a pickled object for every file in\n the corpus. Similar to the BaleenCorpusReader, this uses a generator\n to acheive memory safe iteration.\n \"\"\"\n # Resolve the fileids and the categories\n fileids = self.resolve(fileids, categories)\n\n # Create a generator, loading one document into memory at a time.\n for path, enc, fileid in self.abspaths(fileids, True, True):\n with open(path, 'rb') as f:\n yield pickle.load(f)\n\n def paras(self, fileids=None, categories=None):\n \"\"\"\n Returns a generator of paragraphs where each paragraph is a list of\n sentences, which is in turn a list of (token, tag) tuples.\n \"\"\"\n for doc in self.docs(fileids, categories):\n for paragraph in doc:\n yield paragraph\n\n def sents(self, fileids=None, categories=None):\n \"\"\"\n Returns a generator of sentences where each sentence is a list of\n (token, tag) tuples.\n \"\"\"\n for paragraph in self.paras(fileids, categories):\n for sentence in paragraph:\n yield sentence\n\n def tagged(self, fileids=None, categories=None):\n for sent in self.sents(fileids, categories):\n for token in sent:\n yield token\n\n def words(self, fileids=None, categories=None):\n \"\"\"\n Returns a generator of (token, tag) tuples.\n \"\"\"\n for token in self.tagged(fileids, categories):\n yield token[0]\n\n\nif __name__ == '__main__':\n from collections import Counter\n corpus = HTMLCorpusReader('corpus/raw/')\n #print(corpus.resolve())\n print(corpus.fileids())\n #print(corpus.resolve(fileids='books/book1.html', categories=None))\n #print(corpus.resolve(fileids=None, categories='sport'))\n #print(corpus.abspath('sport/sport2.html'))\n #print(corpus.categories())\n\n #for doc in corpus.docs():\n # print(doc)\n\n #for html in corpus.html():\n # print(html)\n\n #for cat in corpus.categories():\n # print(cat)\n # for sent in corpus.paras(categories=cat):\n # print(sent)\n\n #for sent in corpus.sents():\n # print(sent)\n\n #for tag in corpus.tokenize():\n # print(tag)\n\n #print(corpus.describe())\n #print(next(corpus.sents()))\n #print(next(corpus.sents()))\n #print(next(corpus.sents()))\n\n\n\n #corpus = PickledCorpusReader('corpus/html/')\n #words = Counter(corpus.words())\n\n #print(\"{:,} vocabulary {:,} word count\".format(len(words.keys()), sum(words.values())))\n\n #corpus = HTMLCorpusReader('corpus/html', DOC_PATTERN, cat_pattern=CAT_PATTERN)\n #print(corpus.categories())\n #print(corpus.fileids())\n #print(corpus.resolve(categories='cinema', fileids=None))\n #print(next(corpus.docs(fileids='cinema/index.html')))\n #print(next(corpus.html(fileids='cinema/index.html')))\n #print(next(corpus.sizes()))\n #print(next(corpus.sizes()))\n","sub_path":"html_corpus.py","file_name":"html_corpus.py","file_ext":"py","file_size_in_byte":12400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"580256913","text":"from cm4.common.shell import Shell\nfrom cm4.common.dotdict import dotdict\nimport textwrap\nimport os\nfrom cm4.common.console import Console\nfrom pprint import pprint\n\n\nclass vm(object):\n @classmethod\n def create(cls, **kwargs):\n\n arg = dotdict(kwargs)\n\n if not os.path.exists(arg.name):\n os.makedirs(arg.name)\n\n config = cls.vagrantfile(**kwargs)\n\n with open('{name}/Vagrantfile'.format(**arg), 'w') as f:\n f.write(config)\n\n @classmethod\n def vagrantfile(cls, **kwargs):\n\n arg = dotdict(kwargs)\n\n provision = kwargs.get(\"script\", None)\n\n if provision is not None:\n arg.provision = 'config.vm.provision \"shell\", inline: <<-SHELL\\n'\n for line in textwrap.dedent(provision).split(\"\\n\"):\n if line.strip() != \"\":\n arg.provision += 12 * \" \" + \" \" + line + \"\\n\"\n arg.provision += 12 * \" \" + \" \" + \"SHELL\\n\"\n else:\n arg.provision = \"\"\n\n\n # not sure how I2 gets found TODO verify, comment bellow is not enough\n # the 12 is derived from the indentation of Vagrant in the script\n # TODO we may need not just port 80 to forward\n script = textwrap.dedent(\"\"\"\n Vagrant.configure(2) do |config|\n\n config.vm.define \"{name}\"\n config.vm.hostname = \"{name}\"\n config.vm.box = \"{image}\"\n config.vm.box_check_update = true\n config.vm.network \"forwarded_port\", guest: 80, host: {port}\n config.vm.network \"private_network\", type: \"dhcp\"\n\n # config.vm.network \"public_network\"\n # config.vm.synced_folder \"../data\", \"/vagrant_data\"\n config.vm.provider \"virtualbox\" do |vb|\n # vb.gui = true\n vb.memory = \"{memory}\"\n end\n {provision}\n end\n \"\"\".format(**arg))\n\n return script\n\n @classmethod\n def info(cls, name=None):\n result = Shell.execute(\"vagrant\",\n [\"ssh-config\"],\n cwd=name)\n lines = result.split(\"\\n\")\n data = {}\n for line in lines:\n attribute, value = line.strip().split(\" \", 1)\n if attribute == \"IdentityFile\":\n value = value.replace('\"','')\n\n data[attribute] = value\n return data\n\n @classmethod\n def list(cls, verbose=False):\n\n def convert(line):\n entry = (' '.join(line.split())).split(' ')\n data = dotdict()\n data.id = entry[0]\n data.name = entry[1]\n data.provider = entry[2]\n data.state = entry[3]\n data.directory = entry[4]\n\n return data\n\n result = Shell.execute(\"vagrant\", \"global-status --prune\")\n if verbose:\n print(result)\n if \"There are no active\" in result:\n return None\n\n lines = []\n for line in result.split(\"\\n\")[2:]:\n if line == \" \":\n break\n else:\n lines.append(convert(line))\n return lines\n\n @classmethod\n def delete(cls, name=None):\n\n result = Shell.execute(\"vagrant\",\n [\"destroy\", \"-f\", name],\n cwd=name)\n return result\n\n @classmethod\n def boot(cls, **kwargs):\n\n arg = dotdict(kwargs)\n arg.cwd = kwargs.get(\"cwd\", None)\n\n vms = cls.to_dict(cls.list())\n\n if arg.name in vms:\n Console.error(\"vm {name} already booted\".format(**arg), traceflag=False)\n return None\n # print result\n\n else:\n cls.create(**kwargs)\n Console.ok(\"{name} created\".format(**arg))\n Console.ok(\"{name} booting ...\".format(**arg))\n\n result = Shell.execute(\"vagrant\",\n [\"up\", arg.name],\n cwd=arg.name)\n Console.ok(\"{name} ok.\".format(**arg))\n\n return result\n\n @classmethod\n def resume(cls, name):\n result = Shell.execute(\"vagrant\", [\"resume\", name])\n return result\n\n @classmethod\n def suspend(cls, name):\n result = Shell.execute(\"vagrant\", [\"suspend\", name])\n return result\n\n @classmethod\n def execute(cls, name, command, cwd=None):\n\n vms = cls.to_dict(cls.list())\n\n arg = \"ssh {} -c {}\".format(name, command)\n result = Shell.execute(\"vagrant\", [\"ssh\", name, \"-c\", command], cwd=vms[name][\"directory\"])\n return result\n\n # TODO: Seems replicated\n @classmethod\n def to_dict(cls, lst, id=\"name\"):\n d = {}\n for entry in lst:\n d[entry[id]] = entry\n return d\n","sub_path":"cm4/vagrant/vm/vm.py","file_name":"vm.py","file_ext":"py","file_size_in_byte":4793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"484541159","text":"quote_dict = {\n 0: ['\"Даже путь в тысячу ри начинается с одного шага...”',\n 'японская пословица',\n '',\n '0.jpeg'],\n 1: ['\"Главное правило: быть живым и только\"',\n 'Виктор Васильев',\n 'выдающийся российский математик',\n '1.jpg'],\n 2: ['\"Лень делает всякое дело трудным\"',\n 'Бенджамин Франклин',\n 'учёный, отец-основатель США',\n '2.jpg'],\n 3: ['Одно \"сегодня\" стоит двух \"завтра\"',\n 'Бенджамин Франклин',\n 'учёный, отец-основатель США',\n '2.jpg'],\n 4: ['\"Кто так часто обманывал тебя, как ты сам?\"',\n 'Бенджамин Франклин',\n 'учёный, отец-основатель США',\n '2.jpg'],\n 5: ['\"Если хочешь иметь досуг, не теряй времени даром\"',\n 'Бенджамин Франклин',\n 'учёный, отец-основатель США',\n '2.jpg'],\n 6: ['\"Лучше красиво делать, чем красиво говорить\"',\n 'Бенджамин Франклин',\n 'учёный, отец-основатель США',\n '2.jpg'],\n 7: ['\"Истиной живут, её не преподают\"',\n 'Герман Гессе, \"Игра в бисер\"',\n 'немецкий писатель',\n '7.jpg'],\n 8: ['\"Счастье человека где-то между свободой и дисциплиной\"',\n 'Иван Павлов',\n 'русский физиолог',\n '8.jpg'],\n 9: ['\"Дисциплина - первый признак победы\"',\n 'Александр Суворов',\n 'русский полководец',\n '9.jpg'],\n 10: ['\"Вода течет вниз, а человек стремится вверх\"',\n 'древнее китайское изречение',\n '',\n '10.jpg'],\n 11: ['\"Я твёрдо верю в удачу. И я заметил, что чем больше я работаю, тем больше она мне улыбается\"',\n 'Томас Джефферсон',\n '3-й президент и отец-основатель США',\n '11.jpg'],\n 12: ['\"Всё приходит к тому, кто работает и умеет ждать\"',\n 'Томас Эдисон',\n 'американский предприниматель и изобретатель',\n '12.jpg'],\n 13: ['\"На самом деле, жизнь проста, но мы настойчиво её усложняем\"',\n 'Конфуций',\n 'древнекитайский философ',\n '13.jpg'],\n 14: ['\"Если ты хочешь перемену в будущем - стань этой переменой в настоящем\"',\n 'Махатма Ганди',\n 'освободитель Индии',\n '14.jpg'],\n 15: ['\"Где работа и труд - там поля цветут\"',\n 'русская пословица',\n '',\n '15.jpg'],\n 16: ['\"Сила воображения важнее знаний\"',\n 'Альберт Энштейн',\n 'первооткрыватель теории относительности',\n '16.jpg'],\n 17: ['\"Наука не сводится к сумме фактов как здание не сводится к груде камней\"',\n 'Анри Пуанкаре',\n 'французский математик-универсалист',\n '17.jpg'],\n 18: ['\"У лени шедевров нет\"',\n 'Сальвадор Дали',\n 'испанский художник',\n '18.jpg'],\n}","sub_path":"daily/quotes.py","file_name":"quotes.py","file_ext":"py","file_size_in_byte":4857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"464381238","text":"###############################################################################\n#Dependencies\n#standard python\nfrom warnings import warn\n#Automat framework provided\nfrom automat.core.hwcontrol.devices.device import Device\n#3rd party hardware vendor, install from Internet\nfrom Phidgets.PhidgetException import PhidgetException\n################################################################################\n\n\n################################################################################\n#class for thermistor object\nclass Interface(Device):\n def __init__(self, freq_counter, channel, MPH_per_Hz, s0_MPH):\n self.freq_counter = freq_counter\n self.channel = channel\n self.MPH_per_Hz = MPH_per_Hz\n self.s0_MPH = s0_MPH\n \n def initialize(self):\n self.freq_counter.set_enabled(self.channel)\n\n def identify(self):\n freq_counter_idn = self.freq_counter.identify()\n idn = \"NRG40 Anemometer Sensor, channel %d on %s\" % (self.channel, freq_counter_idn)\n return idn\n\n def read(self):\n \"reads the windspeed in MPH\"\n try:\n freq = self.freq_counter.get_frequency(self.channel)\n s = self.MPH_per_Hz*freq + self.s0_MPH #apply calibration\n return s\n except PhidgetException:\n warn(\"Phidget Frequency Counter value is in an unknown state, reporting value as 0.0\")\n return 0.0\n \n def shutdown(self):\n self.freq_counter.shutdown()\n#-------------------------------------------------------------------------------\n# INTERFACE CONFIGURATOR \ndef get_interface(**kwargs):\n freq_counter = kwargs.get('freq_counter')\n channel = int(kwargs.get('channel'))\n MPH_per_Hz = float(kwargs.get('MPH_per_Hz',0.0))\n s0_MPH = float(kwargs.get('s0_MPH',0.0))\n iface = Interface(\n freq_counter,\n channel, \n MPH_per_Hz,\n s0_MPH\n )\n return iface\n################################################################################\n# TEST CODE\n################################################################################\nif __name__ == \"__main__\":\n pass\n","sub_path":"src/yes_o2ab/drivers/fake_devices/NRG_systems/NRG40_anemometer.py","file_name":"NRG40_anemometer.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"575628256","text":"import pygame\nfrom pygame.locals import *\nfrom config import *\nfrom context import Context, create_new_context\n\n\ndef init_window():\n pygame.init()\n size = WIN_WIDTH, WIN_HEIGHT\n screen = pygame.display.set_mode(size)\n pygame.display.set_caption(\"Super Mario Boy\")\n return screen\n\n\ndef main():\n screen = init_window()\n\n camera, context = create_new_context(10)\n\n timer = pygame.time.Clock()\n\n while 1:\n timer.tick(60)\n\n if not context.game_over:\n for e in pygame.event.get():\n if e.type == QUIT:\n return\n if e.type == KEYDOWN and e.key == K_UP:\n context.up = True\n\n if e.type == KEYUP and e.key == K_UP:\n context.up = False\n if e.type == KEYDOWN and e.key == K_LEFT:\n context.left = True\n if e.type == KEYDOWN and e.key == K_RIGHT:\n context.right = True\n\n if e.type == KEYUP and e.key == K_RIGHT:\n context.right = False\n if e.type == KEYUP and e.key == K_LEFT:\n context.left = False\n\n screen.fill(BLACK)\n\n player = context.player\n left, right, up = context.left, context.right, context.up\n\n camera.update(player)\n\n platforms = context.platforms\n miracles = context.miracles\n player.update(left, right, up, platforms, miracles)\n\n for enemy in context.enemies:\n enemy.update(platforms, player)\n context.game_over = context.game_over or enemy.game_over\n\n for e in context.entities:\n screen.blit(e.image, camera.apply(e))\n\n pygame.display.flip()\n\n if player.game_over:\n camera, context = create_new_context(10)\n else:\n screen.fill(BLACK)\n\n button = pygame.image.load('yt-icon.png')\n button_rect = button.get_rect()\n b = screen.blit(button, button_rect)\n\n pygame.display.flip()\n\n for e in pygame.event.get():\n if e.type == QUIT:\n return\n if e.type == pygame.MOUSEBUTTONDOWN and e.button == 1:\n pos = pygame.mouse.get_pos()\n if b.collidepoint(pos):\n context = Context(context.level, 10)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"633311194","text":"\"\"\"\nThis module provides tools for parsing arguments\n\"\"\"\n\n\nimport argparse\n\n\ndef get_args():\n \"\"\"\n Parses arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Python rss_reader',\n prog='rss_reader',)\n\n parser.add_argument('url',\n nargs='?',\n type=str,\n help='URL to parse.',\n default=None,)\n\n parser.add_argument('--limit',\n type=int,\n default=None,\n help='Limit news topics if provided',)\n\n parser.add_argument('--verbose',\n action='store_true',\n help='Output verbose status messages',)\n\n parser.add_argument('--json',\n action='store_true',\n help='Output news in json format',)\n\n parser.add_argument('--version',\n action='version',\n version='Rss rss_reader 4.0.',\n help='Print version and stop',)\n\n parser.add_argument('--date',\n type=str,\n help='Return news with the specified data',\n default='',)\n\n parser.add_argument('--to-pdf',\n type=str,\n help='Convert news in pdf format, in provided path. Ex: \"/home/\" or \"~/rss_reader\"',\n default=None,\n metavar='PATH',)\n\n parser.add_argument('--to-html',\n type=str,\n help='Convert news in html format, in provided path. Ex: \"/home/\" or \"~/rss_reader\"',\n default=None,\n metavar='PATH',)\n\n args = parser.parse_args()\n return args\n","sub_path":"rss_reader/rss_reader/argparser.py","file_name":"argparser.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"78007665","text":"# -*-coding:utf-8-*-\n\nfrom page.contact_page import ContactPage\nfrom page.custom_display_columns_page import CustomDisplayColumnsPage\nimport unittest\nfrom page.pymysqlCommon import OperateMysql\n\n\nclass CaseCustomDisplayColumns(unittest.TestCase):\n '''定制显示列测试'''\n\n def setUp(self):\n print(\"test start\")\n\n\n #测试定制显示项\n def test_customDisplayColumns(self):\n # 验证可选项都存在list 用数据库获取 再转换成list\n OperateMysql.ConectMysql(OperateMysql)\n udfList = OperateMysql.selectSqlCom(OperateMysql, \"SELECT name FROM custom_field \")\n print(udfList)\n udfListAll = []\n i = 0\n while i < len(udfList):\n print(str(udfList[i]))\n udf1 = str(udfList[i])[2:-3]\n print(udf1)\n udfListAll.append(udf1)\n i += 1\n verifySelectList = ['ID', '头像', '名字', '性别', '昵称', '手机号码', '座机号码', '电子邮箱', '国家', '省',\n '城市', '街道', '邮编', '行业','职位', '部门', '公司', '创建来源', '创建时间', '更新时间'] + udfListAll\n print(verifySelectList)\n\n contactPage = ContactPage(self)\n contactPage.goContact()\n customDisplay = CustomDisplayColumnsPage(self)\n '''test1取消定制显示列测试'''\n customDisplay.cancelCustom(verifySelectList)\n '''test2不选定制显示项测试'''\n customDisplay.selectNoneCustom()\n '''test3全选定制显示项测试'''\n customDisplay.selectAllCustom()\n '''test4随机选定制显示项测试'''\n customDisplay.randomSelectCustom()\n '''test5反选定制显示项测试'''\n customDisplay.selectInvertCunstom()\n\n def tearDown(self):\n print(\"test end\")","sub_path":"contact/custom_display_columns_case.py","file_name":"custom_display_columns_case.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"426635365","text":"\"\"\"\nMon Module : ici la doc\n\"\"\"\n\ndef carre(x=1):\n e=1\n if x == 1:\n return 1 , \"1\"\n elif x<=0:\n return 1 , \"pas inferieur a 0\"\n return x*x , \"OK\"\n\ndate=22012015\n","sub_path":"MonModule.py","file_name":"MonModule.py","file_ext":"py","file_size_in_byte":186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"520699913","text":"# use the conda environmnet\n# python 3.6\n# tensorflow\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nimport time\nimport csv\nimport numpy as np\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n\nf=open('train_xy_2.csv','r')\nreader=csv.reader(f)\ndata=[]\nfor item in reader:\n data.append(item)\n\ndata.pop(0)\n\ndatax=[]\nfor cust in data:\n datax.append(cust[3:])\ndatax=np.array(datax)\n\ndatay=[]\nfor cust in data:\n datay.append([cust[2]])\ndatay = np.array(datay)\n\nf2=open('test_all2.csv','r')\nreader2=csv.reader(f2)\ndata2=[]\nfor item in reader2:\n data2.append(item)\n\ndata2.pop(0)\n\ndata_test=[]\nfor cust in data2:\n data_test.append(cust[2:])\ndata_test=np.array(data_test)\n\ndata_cust=[]\nfor cust in data2:\n data_cust.append([cust[0]])\n\n\n\nt0=time.time()\nmodel = Sequential()\nmodel.add(Dense(32, input_shape=(784,)))\nmodel.add(Activation('relu'))\n# For a single-input model with 2 classes (binary classification):\n\nmodel = Sequential()\nmodel.add(Dense(32, activation='relu', input_dim=38))# 38 ,157\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n# Generate dummy data\nimport numpy as np\n#data = np.random.random((1000, 100))\n#labels = np.random.randint(2, size=(1000, 1))\n\n# Train the model, iterating on the data in batches of 32 samples\n\nprint(datax)\nprint(datay)\nmodel.fit(datax, datay, epochs=10, batch_size=256)\nresult=model.predict(data_test,batch_size=256)\nprint(result)\nresult_list=result.tolist()\n\n\nwith open('out01.csv', 'w') as f:\n f.write('cust_id,pred_prob\\n')\n for i in range(10000):\n outstring=str(data_cust[i][0])+','+str(round(float(result_list[i][0]),8))+'\\n'\n f.write(outstring)\n\nprint('time=',time.time()-t0)","sub_path":"small.py","file_name":"small.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"296828235","text":"import pygame\r\nimport random\r\npygame.init()\r\n\r\nsw = 800 # Screen width\r\nsh = 600 # Screen height\r\n\r\nscreen = pygame.display.set_mode((sw, sh))\r\npygame.display.set_caption(\"Ping Pong\")\r\nclock = pygame.time.Clock()\r\nbg_color = pygame.Color('grey12')\r\ngame_font = pygame.font.Font('fonts/SF-Pro-Text-Bold.otf', 60)\r\n\r\nlevel = 1\r\nopponent_speed = 6\r\n\r\nscore_time = None\r\n\r\ndef Start_Game(OS):\r\n global score_time\r\n sw = 800 # Screen width\r\n sh = 600 # Screen height\r\n screen = pygame.display.set_mode((sw, sh))\r\n ball = pygame.Rect(sw // 2 - 15, sh // 2 - 15, 30, 30)\r\n player = pygame.Rect(sw - 20, sh // 2 - 60, 10, 120)\r\n opponent = pygame.Rect(10, sh // 2 - 60, 10, 120)\r\n\r\n bg_color = pygame.Color('grey12')\r\n\r\n # Speeds\r\n ball_speed_x = 6 * random.choice((-1, 1))\r\n ball_speed_y = 6 * random.choice((-1, 1))\r\n\r\n player_speed = 0\r\n opponent_speed = OS\r\n\r\n # Score\r\n player_score = 0\r\n opponent_score = 0\r\n game_font = pygame.font.Font('fonts/SF-Pro-Text-Bold.otf', 32)\r\n\r\n # Sounds\r\n pong_sound = pygame.mixer.Sound('sounds/pong.ogg')\r\n score_sound = pygame.mixer.Sound('sounds/score.ogg')\r\n\r\n def ball_restart():\r\n global ball_speed_x, ball_speed_y, score_time, sh, sw\r\n\r\n ball.center = (sw // 2, sh // 2)\r\n current_time = pygame.time.get_ticks()\r\n\r\n if current_time - score_time < 700:\r\n ball_speed_x = 0\r\n ball_speed_y = 0\r\n number_three = game_font.render(\"3\", False, (200, 200, 200))\r\n screen.blit(number_three, (sw // 2 - 8, sh // 2 + 50))\r\n elif 700 < current_time - score_time < 1400:\r\n ball_speed_x = 0\r\n ball_speed_y = 0\r\n number_two = game_font.render(\"2\", False, (200, 200, 200))\r\n screen.blit(number_two, (sw // 2 - 8, sh // 2 + 50))\r\n elif 1400 < current_time - score_time < 2100:\r\n ball_speed_x = 0\r\n ball_speed_y = 0\r\n number_one = game_font.render(\"1\", False, (200, 200, 200))\r\n screen.blit(number_one, (sw // 2 - 8, sh // 2 + 50))\r\n else:\r\n ball_speed_x = 6 * random.choice((-1, 1))\r\n ball_speed_y = 6 * random.choice((-1, 1))\r\n score_time = None\r\n\r\n # Main Game Loop\r\n running = True\r\n while running:\r\n screen.fill(bg_color)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n running = False\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_DOWN:\r\n player_speed += 7\r\n if event.key == pygame.K_UP:\r\n player_speed -= 7\r\n\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_DOWN:\r\n player_speed -= 7\r\n if event.key == pygame.K_UP:\r\n player_speed += 7\r\n\r\n # Ball movement\r\n ball.x += ball_speed_x\r\n ball.y += ball_speed_y\r\n\r\n if ball.top <= 0 or ball.bottom >= sh:\r\n pong_sound.play()\r\n ball_speed_y *= -1\r\n\r\n if ball.left <= 0:\r\n score_sound.play()\r\n player_score += 1\r\n score_time = pygame.time.get_ticks()\r\n\r\n if ball.right >= sw:\r\n score_sound.play()\r\n opponent_score += 1\r\n score_time = pygame.time.get_ticks()\r\n\r\n if ball.colliderect(player) or ball.colliderect(opponent):\r\n pong_sound.play()\r\n ball_speed_x *= -1\r\n\r\n if score_time:\r\n ball_restart()\r\n\r\n # Player movement\r\n player.y += player_speed\r\n if player.top <= 0:\r\n player.top = 0\r\n if player.bottom >= sh:\r\n player.bottom = sh\r\n\r\n # Opponent movement\r\n if opponent.bottom < ball.y:\r\n opponent.bottom += opponent_speed\r\n if opponent.top > ball.y:\r\n opponent.top -= opponent_speed\r\n\r\n if opponent.top <= 0:\r\n opponent.top = 0\r\n if opponent.bottom >= sh:\r\n opponent.bottom = sh\r\n\r\n pygame.draw.rect(screen, (200, 200, 200), player)\r\n pygame.draw.rect(screen, (200, 200, 200), opponent)\r\n pygame.draw.ellipse(screen, (200, 200, 200), ball)\r\n pygame.draw.aaline(screen, (200, 200, 200), (sw / 2, 0), (sw / 2, sh))\r\n\r\n # Score\r\n player_text = game_font.render(str(player_score), True, (200, 200, 200))\r\n screen.blit(player_text, (sw // 2 + 20, sh // 2 - 16))\r\n\r\n opponent_text = game_font.render(str(opponent_score), True, (200, 200, 200))\r\n screen.blit(opponent_text, (sw // 2 - 42, sh // 2 - 16))\r\n\r\n pygame.display.update()\r\n clock.tick(60)\r\n\r\n\r\nWelcomeScreen = True\r\nwhile WelcomeScreen:\r\n screen.fill(bg_color)\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if event.type == pygame.QUIT:\r\n WelcomeScreen = False\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_SPACE:\r\n WelcomeScreen = False\r\n Start_Game(opponent_speed)\r\n if event.key == pygame.K_1:\r\n level = 1\r\n opponent_speed = 6\r\n if event.key == pygame.K_2:\r\n level = 2\r\n opponent_speed = 10\r\n if event.key == pygame.K_3:\r\n level = 3\r\n opponent_speed = 15\r\n\r\n if level == 1:\r\n pygame.draw.rect(screen, (255, 0, 0), pygame.Rect(sw // 2 - 190, sh - 400, 350, 70), 2)\r\n if level == 2:\r\n pygame.draw.rect(screen, (255, 0, 0), pygame.Rect(sw // 2 - 190, sh - 300, 350, 70), 2)\r\n if level == 3:\r\n pygame.draw.rect(screen, (255, 0, 0), pygame.Rect(sw // 2 - 190, sh - 200, 350, 70), 2)\r\n\r\n Welcome_Message = game_font.render(\"PING - PONG\", True, (200, 200, 200))\r\n screen.blit(Welcome_Message, (sw//2-170, 20))\r\n\r\n Select_Level = game_font.render(\"SELECT LEVEL\", True, (200, 200, 200))\r\n screen.blit(Select_Level, (sw//2-200, sh-500))\r\n\r\n Easy = game_font.render(\"EASY\", True, (200, 200, 200))\r\n screen.blit(Easy, (sw // 2-90, sh-400))\r\n\r\n Medium = game_font.render(\"MEDIUM\", True, (200, 200, 200))\r\n screen.blit(Medium, (sw//2-130, sh-300))\r\n\r\n Hard = game_font.render(\"HARD\", True, (200, 200, 200))\r\n screen.blit(Hard, (sw//2-90, sh-200))\r\n\r\n Start = game_font.render(\"PRESS SPACE TO START\", True, (200, 200, 200))\r\n screen.blit(Start, (sw//2-360, sh-100))\r\n\r\n clock.tick(60)\r\n pygame.display.update()","sub_path":"VMC Pygame/VMC Pygame - Class/VMC Pygame - Class 8/VMC Pygame - Class 8.py","file_name":"VMC Pygame - Class 8.py","file_ext":"py","file_size_in_byte":6649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"198171850","text":"import sqlite3\n\n\nclass Sql():\n\n def __init__(self):\n self.conn = sqlite3.connect('locale\\\\a.db')\n self.conn.row_factory = self.dict_factory\n\n def __del__(self):\n self.conn.close()\n\n def dict_factory(self, cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n def clear(self):\n sql = \"delete from t_order where date(_time) = date('now', 'localtime')\"\n self.conn.execute(sql)\n self.conn.commit()\n\n def add_orders(self, shop, orders):\n sql = '''replace into t_order(_time,shop,order_id,status,nick,create_time)\n values(datetime('now', 'localtime'),?,?,?,?,?) '''\n self.conn.executemany(sql, [(shop, o['id'], o['status'], o['nick'], o['create_time'] ) for o in orders])\n self.conn.commit()\n\n def get_orders(self):\n sql = ''' select strftime('%m.%d %H点', create_time) hour,shop,order_id,status,nick,create_time\n from t_order\n order by strftime('%m.%d %H', create_time) desc,shop, create_time desc '''\n cur = self.conn.execute(sql)\n return cur.fetchall()\n\n\n\n\n\nif __name__ == '__main__':\n s = Sql()\n a = s.get_orders()\n print(a)\n","sub_path":"sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"618412986","text":"SPACES = {\n\tr\"\\quad\" : (\"mspace\", {\"width\": \"1em\"}, \"hardspace\", \"\"),\n\tr\"\\thinspace\" : (\"mspace\", {\"width\": \"1pt\"}, \"hardspace\", \"\"),\n\tr\"\\enspace\" : (\"mspace\", {\"width\": \"5pt\"}, \"hardspace\", \"\"),\n}\n\nGREEK = {\n\tr\"\\alpha\" : (\"mi\", {}, \"var\", \"α\"),\n\tr\"\\beta\" : (\"mi\", {}, \"var\", \"β\"),\n\tr\"\\gamma\" : (\"mi\", {}, \"var\", \"γ\"),\n\tr\"\\digamma\" : (\"mi\", {}, \"var\", \"ϝ\"),\n\tr\"\\delta\" : (\"mi\", {}, \"var\", \"δ\"),\n\tr\"\\epsilon\" : (\"mi\", {}, \"var\", \"ϵ\"),\n\tr\"\\varepsilon\" : (\"mi\", {}, \"var\", \"ε\"),\n\tr\"\\zeta\" : (\"mi\", {}, \"var\", \"ζ\"),\n\tr\"\\eta\" : (\"mi\", {}, \"var\", \"η\"),\n\tr\"\\theta\" : (\"mi\", {}, \"var\", \"θ\"),\n\tr\"\\vartheta\" : (\"mi\", {}, \"var\", \"ϑ\"),\n\tr\"\\kappa\" : (\"mi\", {}, \"var\", \"κ\"),\n\tr\"\\lambda\" : (\"mi\", {}, \"var\", \"λ\"),\n\tr\"\\mu\" : (\"mi\", {}, \"var\", \"μ\"),\n\tr\"\\nu\" : (\"mi\", {}, \"var\", \"ν\"),\n\tr\"\\xi\" : (\"mi\", {}, \"var\", \"ξ\"),\n\tr\"\\omicron\" : (\"mi\", {}, \"var\", \"ο\"),\n\tr\"\\pi\" : (\"mi\", {}, \"var\", \"π\"),\n\tr\"\\varpi\" : (\"mi\", {}, \"var\", \"ϖ\"),\n\tr\"\\rho\" : (\"mi\", {}, \"var\", \"ρ\"),\n\tr\"\\varrho\" : (\"mi\", {}, \"var\", \"ϱ\"),\n\tr\"\\sigma\" : (\"mi\", {}, \"var\", \"σ\"),\n\tr\"\\varsigma\" : (\"mi\", {}, \"var\", \"ς\"),\n\tr\"\\tau\" : (\"mi\", {}, \"var\", \"τ\"),\n\tr\"\\upsilon\" : (\"mi\", {}, \"var\", \"υ\"),\n\tr\"\\phi\" : (\"mi\", {}, \"var\", \"ϕ\"),\n\tr\"\\varphi\" : (\"mi\", {}, \"var\", \"φ\"),\n\tr\"\\chi\" : (\"mi\", {}, \"var\", \"χ\"),\n\tr\"\\psi\" : (\"mi\", {}, \"var\", \"ψ\"),\n\tr\"\\omega\" : (\"mi\", {}, \"var\", \"ω\"),\n\tr\"\\Alpha\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Α\"),\n\tr\"\\Beta\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Β\"),\n\tr\"\\Gamma\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Γ\"),\n\tr\"\\Digamma\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Ϝ\"),\n\tr\"\\Delta\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Δ\"),\n\tr\"\\Zeta\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Ζ\"),\n\tr\"\\Eta\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Η\"),\n\tr\"\\Theta\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Θ\"),\n\tr\"\\Iota\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Ι\"),\n\tr\"\\Kappa\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Κ\"),\n\tr\"\\Lambda\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Λ\"),\n\tr\"\\Mu\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Μ\"),\n\tr\"\\Nu\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Ν\"),\n\tr\"\\Xi\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Ξ\"),\n\tr\"\\Omicron\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Ο\"),\n\tr\"\\Pi\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Π\"),\n\tr\"\\Rho\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Ρ\"),\n\tr\"\\Sigma\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Σ\"),\n\tr\"\\Tau\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Τ\"),\n\tr\"\\Upsilon\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Υ\"),\n\tr\"\\Phi\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Φ\"),\n\tr\"\\Chi\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Χ\"),\n\tr\"\\Psi\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Ψ\"),\n\tr\"\\Omega\" : (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"Ω\"),\n}\n\nVARLETTERS = {\n\tr\"\\ell\" : (\"mi\", {}, \"var\", \"ℓ\"),\n}\n\nPREFIX = {\n\tr\"\\pm\": (\"mo\", {\"form\": \"prefix\"}, \"operator\", \"±\"), #Possibly infix tho\n\tr\"\\mp\": (\"mo\", {\"form\": \"prefix\"}, \"operator\", \"∓\"),\n}\n\nINFIX = {\n\tr\"\\times\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"×\"),\n\tr\"\\div\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"÷\"),\n\tr\"\\cross\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⨯\"),\n\tr\"\\ast\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"*\"),\n\tr\"\\star\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"☆\"),\n\tr\"\\circ\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"∘\"), #\"⚬\",\n\tr\"\\bullet\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"•\"),\n\tr\"\\cdot\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"·\"),\n\tr\"\\cap\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"∩\"),\n\tr\"\\cup\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"∪\"),\n\tr\"\\given\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"|\"),\n\tr\"\\uplus\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⊎\"),\n\tr\"\\sqcap\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⊓\"),\n\tr\"\\sqcup\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⊔\"),\n\tr\"\\vee\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"∨\"),\n\tr\"\\wedge\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"∧\"),\n\tr\"\\setminus\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"∖\"),\n\tr\"\\wr\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"≀\"),\n\tr\"\\diamond\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⋄\"),\n\tr\"\\bigtriangleup\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"△\"),\n\tr\"\\bigtriangledown\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"▽\"),\n\tr\"\\triangleleft\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⊲\"),\n\tr\"\\triangleright\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⊳\"),\n\tr\"\\lhd\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⊲\"), # Same as above\n\tr\"\\rhd\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⊳\"), # Same as above\n\tr\"\\unlhd\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⊴\"),\n\tr\"\\unrhd\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⊵\"),\n\tr\"\\oplus\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⊕\"),\n\tr\"\\ominus\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⊖\"),\n\tr\"\\otimes\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⊗\"),\n\tr\"\\oslash\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⊘\"),\n\tr\"\\odot\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⊙\"),\n\tr\"\\ocirc\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⊚\"),\n\tr\"\\bigcirc\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"○\"),\n\tr\"\\dagger\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"†\"),\n\tr\"\\ddagger\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"‡\"),\n\tr\"\\amalg\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"⨿\"),\n\n\tr\"\\bowtie\": (\"mo\", {}, \"operator\", \"⋈\"),\n\tr\"\\Join\": (\"mo\", {}, \"operator\", \"⋈\"),#two variants of this one exists in latek\n\tr\"\\ltimes\": (\"mo\", {}, \"operator\", \"⋉\"),\n\tr\"\\rtimes\": (\"mo\", {}, \"operator\", \"⋊\"),\n\tr\"\\smile\": (\"mo\", {}, \"operator\", \"⌣\"),\n\tr\"\\frown\": (\"mo\", {}, \"operator\", \"⌢\"),\n}\n\nPOSTFIX = {\n\tr\"\\!\": (\"mo\", {\"form\": \"infix\"}, \"operator\", \"±\"),\n}\n\nRELATIONS = {\n\tr\"\\qeq\": (\"mo\", {}, \"relation\", \"≟\"),\n\n\tr\"\\eq\": (\"mo\", {}, \"relation\", \"=\"),\n\tr\"\\leq\": (\"mo\", {}, \"relation\", \"≤\"),\n\tr\"\\prec\": (\"mo\", {}, \"relation\", \"≺\"),\n\tr\"\\preceq\": (\"mo\", {}, \"relation\", \"⪯\"),\n\tr\"\\ll\": (\"mo\", {}, \"relation\", \"≪\"),\n\tr\"\\subset\": (\"mo\", {}, \"relation\", \"⊂\"),\n\tr\"\\subseteq\": (\"mo\", {}, \"relation\", \"⊆\"),\n\tr\"\\sqsubset\": (\"mo\", {}, \"relation\", \"⊏\"),\n\tr\"\\sqsubseteq\": (\"mo\", {}, \"relation\", \"⊑\"),\n\tr\"\\in\": (\"mo\", {}, \"relation\", \"∈\"),\n\tr\"\\ni\": (\"mo\", {}, \"relation\", \"∋\"),\n\tr\"\\vdash\": (\"mo\", {}, \"relation\", \"⊢\"),\n\tr\"\\vDash\": (\"mo\", {}, \"relation\", \"⊨\"),\n\tr\"\\geq\": (\"mo\", {}, \"relation\", \"≥\"),\n\tr\"\\succ\": (\"mo\", {}, \"relation\", \"≻\"),\n\tr\"\\succeq\": (\"mo\", {}, \"relation\", \"⪰\"),\n\tr\"\\gg\": (\"mo\", {}, \"relation\", \"≫\"),\n\tr\"\\supset\": (\"mo\", {}, \"relation\", \"⊃\"),\n\tr\"\\supseteq\": (\"mo\", {}, \"relation\", \"⊇\"),\n\tr\"\\sqsupset\": (\"mo\", {}, \"relation\", \"⊐\"),\n\tr\"\\sqsupseteq\": (\"mo\", {}, \"relation\", \"⊒\"),\n\tr\"\\dashv\": (\"mo\", {}, \"relation\", \"⊣\"),\n\tr\"\\Dashv\": (\"mo\", {}, \"relation\", \"⫤\"),\n\tr\"\\equiv\": (\"mo\", {}, \"relation\", \"≡\"),\n\tr\"\\sim\": (\"mo\", {}, \"relation\", \"∼\"),\n\tr\"\\simeq\": (\"mo\", {}, \"relation\", \"≃\"),\n\tr\"\\asymp\": (\"mo\", {}, \"relation\", \"≍\"),\n\tr\"\\approx\": (\"mo\", {}, \"relation\", \"≈\"),\n\tr\"\\cong\": (\"mo\", {}, \"relation\", \"≅\"),\n\tr\"\\doteq\": (\"mo\", {}, \"relation\", \"≐\"),\n\tr\"\\propto\": (\"mo\", {}, \"relation\", \"∝\"),\n\tr\"\\models\": (\"mo\", {}, \"relation\", \"⊧\"),\n\tr\"\\perp\": (\"mo\", {}, \"relation\", \"⟂\"),\n\tr\"\\mid\": (\"mo\", {}, \"relation\", \"∣\"),\n\tr\"\\parallel\": (\"mo\", {}, \"relation\", \"∥\"),\n\n\tr\"\\implies\": (\"mo\", {}, \"arrow\", \"⇒\"),\n\tr\"\\iff\": (\"mo\", {}, \"arrow\", \"⇔\"),\n\tr\"\\equivalently\": (\"mo\", {}, \"arrow\", \"⇔\"),\n\n\tr\"\\mapsto\": (\"mo\", {}, \"arrow\", \"↦\"),\n\tr\"\\to\": (\"mo\", {}, \"arrow\", \"→\"),\n\tr\"\\longmapsto\": (\"mo\", {}, \"arrow\", \"⟼\"),\n\tr\"\\leadsto\": (\"mo\", {}, \"arrow\", \"⤳\"),\n}\n\nNOTRELATIONS = {\n\tr\"\\not\": (\"mo\", {}, \"relation\", \"¬\"), #fix\n\n\tr\"\\neq\": (\"mo\", {}, \"relation\", \"≠\"),\n\tr\"\\nleq\": (\"mo\", {}, \"relation\", \"≮\"),\n\tr\"\\nprec\": (\"mo\", {}, \"relation\", \"⊀\"),\n\tr\"\\npreceq\": (\"mo\", {}, \"relation\", \"⋠\"),\n\t# would be here\n\tr\"\\nsubset\": (\"mo\", {}, \"relation\", \"⊄\"),\n\tr\"\\nsubseteq\": (\"mo\", {}, \"relation\", \"⊈\"),\n\tr\"\\nsqsubset\": (\"mo\", {}, \"relation\", \"⊏̸\"),\n\tr\"\\nsqsubseteq\": (\"mo\", {}, \"relation\", \"⋢\"),\n\tr\"\\nin\": (\"mo\", {}, \"relation\", \"∉\"),\n\tr\"\\nni\": (\"mo\", {}, \"relation\", \"∌\"),\n\tr\"\\nvdash\": (\"mo\", {}, \"relation\", \"⊬\"),\n\tr\"\\nvDash\": (\"mo\", {}, \"relation\", \"⊭\"),\n\tr\"\\ngeq\": (\"mo\", {}, \"relation\", \"≯\"),\n\tr\"\\nsucc\": (\"mo\", {}, \"relation\", \"⊁\"),\n\tr\"\\nsucceq\": (\"mo\", {}, \"relation\", \"⋡\"),\n\t# would be here\n\tr\"\\nsupset\": (\"mo\", {}, \"relation\", \"⊅\"),\n\tr\"\\nsupseteq\": (\"mo\", {}, \"relation\", \"⊉\"),\n\tr\"\\nsqsupset\": (\"mo\", {}, \"relation\", \"⊐̸\"),\n\tr\"\\nsqsupseteq\": (\"mo\", {}, \"relation\", \"⋣\"),\n\tr\"\\ndashv\": (\"mo\", {}, \"relation\", \"&ndashv;\"),\n\t#<\\nDashv> would be here\n\t#<\\nequiv> would be here\n\tr\"\\nsim\": (\"mo\", {}, \"relation\", \"≁\"),\n\tr\"\\nsimeq\": (\"mo\", {}, \"relation\", \"≄\"),\n\tr\"\\nasymp\": (\"mo\", {}, \"relation\", \"≭\"),\n\tr\"\\napprox\": (\"mo\", {}, \"relation\", \"≉\"),\n\tr\"\\ncong\": (\"mo\", {}, \"relation\", \"≇\"),\n\t#<\\ndoteq> would be here\n\t# would be here\n\t# would be here\n\t#<\\nperp> would be here\n\tr\"\\nmid\": (\"mo\", {}, \"relation\", \"∤\"),\n\tr\"\\nparallel\": (\"mo\", {}, \"relation\", \"∦\"),\n\n\tr\"\\nimplies\": (\"mo\", {}, \"arrow\", \"⇏\"),\n\tr\"\\niff\": (\"mo\", {}, \"arrow\", \"⇎\"),\n\tr\"\\nequivalently\": (\"mo\", {}, \"arrow\", \"⇎\"),\n}\n\nARROWS = {\n\tr\"\\leftarrow\": (\"mo\", {}, \"arrow\", \"←\"),\n\tr\"\\Leftarrow\": (\"mo\", {}, \"arrow\", \"⇐\"),\n\tr\"\\twoheadleftarrow\": (\"mo\", {}, \"arrow\", \"↞\"),\n\tr\"\\rightarrow\": (\"mo\", {}, \"arrow\", \"→\"),\n\tr\"\\Rightarrow\": (\"mo\", {}, \"arrow\", \"⇒\"),\n\tr\"\\twoheadrightarrow\": (\"mo\", {}, \"arrow\", \"↠\"),\n\tr\"\\leftrightarrow\": (\"mo\", {}, \"arrow\", \"↔\"),\n\tr\"\\Leftrightarrow\": (\"mo\", {}, \"arrow\", \"⇔\"),\n\tr\"\\hookleftarrow\": (\"mo\", {}, \"arrow\", \"↩\"),\n\tr\"\\leftharpoonup\": (\"mo\", {}, \"arrow\", \"↼\"),\n\tr\"\\leftharpoondown\": (\"mo\", {}, \"arrow\", \"↽\"),\n\tr\"\\rightleftharpoons\": (\"mo\", {}, \"arrow\", \"⇌\"),\n\tr\"\\longleftarrow\": (\"mo\", {}, \"arrow\", \"⟵\"),\n\tr\"\\Longleftarrow\": (\"mo\", {}, \"arrow\", \"⟸\"),\n\tr\"\\longrightarrow\": (\"mo\", {}, \"arrow\", \"⟶\"),\n\tr\"\\Longrightarrow\": (\"mo\", {}, \"arrow\", \"⟹\"),\n\tr\"\\longleftrightarrow\": (\"mo\", {}, \"arrow\", \"⟷\"),\n\tr\"\\Longleftrightarrow\": (\"mo\", {}, \"arrow\", \"⟺\"),\n\tr\"\\hookrightarrow\": (\"mo\", {}, \"arrow\", \"↪\"),\n\tr\"\\righttharpoonup\": (\"mo\", {}, \"arrow\", \"⇀\"),\n\tr\"\\rightharpoondown\": (\"mo\", {}, \"arrow\", \"⇁\"),\n\tr\"\\uparow\": (\"mo\", {}, \"arrow\", \"↑\"),\n\tr\"\\Uparrow\": (\"mo\", {}, \"arrow\", \"⇑\"),\n\tr\"\\downarrow\": (\"mo\", {}, \"arrow\", \"↓\"),\n\tr\"\\Downarrow\": (\"mo\", {}, \"arrow\", \"⇓\"),\n\tr\"\\updownarrow\": (\"mo\", {}, \"arrow\", \"↕\"),\n\tr\"\\Updownarrow\": (\"mo\", {}, \"arrow\", \"⇕\"),\n\tr\"\\nearrow\": (\"mo\", {}, \"arrow\", \"↗\"),\n\tr\"\\searrow\": (\"mo\", {}, \"arrow\", \"↘\"),\n\tr\"\\swarrow\": (\"mo\", {}, \"arrow\", \"↙\"),\n\tr\"\\nwarrow\": (\"mo\", {}, \"arrow\", \"↖\"),\n}\n\nMISC = {\n\tr\"\\ldots\": (\"mo\", {}, \"ellipsis\", \"…\"),\n\tr\"\\cdots\": (\"mo\", {}, \"ellipsis\", \"⋯\"),\n\tr\"\\vdots\": (\"mo\", {}, \"ellipsis\", \"⋮\"),\n\tr\"\\ddots\": (\"mo\", {}, \"ellipsis\", \"⋱\"),\n\tr\"\\Ddots\": (\"mo\", {}, \"ellipsis\", \"⋰\"),\n\tr\"\\aleph\": (\"mi\", {}, \"constant\", \"ℵ\"),\n\tr\"\\prime\": (\"mo\", {}, \"operator\", \"′\"),\n\tr\"\\forall\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\"}, \"operator\", \"∀\"),\n\tr\"\\infty\": (\"mi\", {}, \"constant\", \"∞\"),\n\tr\"\\exists\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\"}, \"operator\", \"∃\"),\n\tr\"\\qed\": (\"mo\", {}, \"operator\", \"□\"),\n\tr\"\\Box\": (\"mo\", {}, \"operator\", \"□\"),\n\tr\"\\imath\": (\"mi\", {\"mathvariant\": \"italic\"}, \"constant\", \"ı\"),\n\tr\"\\jmath\": (\"mi\", {\"mathvariant\": \"italic\"}, \"constant\", \"ȷ\"),\n\tr\"\\nabla\": (\"mo\", {}, \"operator\", \"∇\"),\n\tr\"\\del\": (\"mo\", {}, \"operator\", \"∇\"),\n\tr\"\\d\": (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"d\"),\n\tr\"\\partial\": (\"mo\", {}, \"operator\", \"∂\"),\n\tr\"\\top\": (\"mo\", {}, \"operator\", \"⊤\"),\n\tr\"\\bot\": (\"mo\", {}, \"operator\", \"⊥\"), #Left/Right Tack: dashv, vdash)\n\tr\"\\angle\": (\"mo\", {}, \"operator\", \"∠\"),\n}\n\nBIG_OP = {\n\tr\"\\sum\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\", \"movablelimits\": \"true\"}, \"operator\", \"∑\"),\n\tr\"\\prod\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\", \"movablelimits\": \"true\"}, \"operator\", \"∏\"),\n\tr\"\\coprod\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\", \"movablelimits\": \"true\"}, \"operator\", \"∐\"),\n\tr\"\\int\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\"}, \"operator\", \"∫\"),\n\tr\"\\iint\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\"}, \"operator\", \"∬\"),\n\tr\"\\iiint\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\"}, \"operator\", \"∭\"),\n\tr\"\\oint\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\"}, \"operator\", \"∲\"),\n\tr\"\\bigcap\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\", \"movablelimits\": \"true\"}, \"operator\", \"⋂\"),\n\tr\"\\intersection\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\", \"movablelimits\": \"true\"}, \"operator\", \"⋂\"),\n\tr\"\\bigcup\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\", \"movablelimits\": \"true\"}, \"operator\", \"⋃\"),\n\tr\"\\union\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\", \"movablelimits\": \"true\"}, \"operator\", \"⋃\"),\n\tr\"\\bigsqcup\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\", \"movablelimits\": \"true\"}, \"operator\", \"⨆\"),\n\tr\"\\bigvee\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\"}, \"operator\", \"⋁\"),\n\tr\"\\bigwedge\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\"}, \"operator\", \"⋀\"),\n\tr\"\\bigodot\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\", \"movablelimits\": \"true\"}, \"operator\", \"⨀\"),\n\tr\"\\bigotimes\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\", \"movablelimits\": \"true\"}, \"operator\", \"⨂\"),\n\tr\"\\bigoplus\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\", \"movablelimits\": \"true\"}, \"operator\", \"⨁\"),\n\tr\"\\biguplus\": (\"mo\", {\"form\": \"prefix\", \"largeop\": \"true\", \"movablelimits\": \"true\"}, \"operator\", \"⨄\"),\n}\n\nFUNCTIONS = {\n\tr\"\\arg\": (\"mo\", {\"form\": \"prefix\", \"rspace\": \"0\"}, \"operator\", \"arg\"),\n\tr\"\\deg\": (\"mo\", {\"form\": \"prefix\", \"rspace\": \"0\"}, \"operator\", \"deg\"),\n\tr\"\\cos\": (\"mo\", {\"form\": \"prefix\", \"rspace\": \"0\"}, \"operator\", \"cos\"),\n\tr\"\\cosh\": (\"mo\", {\"form\": \"prefix\", \"rspace\": \"0\"}, \"operator\", \"cosh\"),\n\tr\"\\sin\": (\"mo\", {\"form\": \"prefix\", \"rspace\": \"0\"}, \"operator\", \"sin\"),\n\tr\"\\sinh\": (\"mo\", {\"form\": \"prefix\", \"rspace\": \"0\"}, \"operator\", \"sinh\"),\n\tr\"\\tan\": (\"mo\", {\"form\": \"prefix\", \"rspace\": \"0\"}, \"operator\", \"tan\"),\n\tr\"\\tanh\": (\"mo\", {\"form\": \"prefix\", \"rspace\": \"0\"}, \"operator\", \"tanh\"),\n\tr\"\\exp\": (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"exp\"),\n\tr\"\\log\": (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"log\"),\n\tr\"\\lim\": (\"mo\", {\"form\": \"prefix\", \"movablelimits\": \"true\"}, \"operator\", \"lim\"),\n\tr\"\\sup\": (\"mo\", {\"form\": \"prefix\", \"movablelimits\": \"true\"}, \"operator\", \"sup\"),\n\tr\"\\limsup\": (\"mo\", {\"form\": \"prefix\", \"movablelimits\": \"true\"}, \"operator\", \"limsup\"),\n\tr\"\\inf\": (\"mo\", {\"form\": \"prefix\", \"movablelimits\": \"true\"}, \"operator\", \"inf\"),\n\tr\"\\liminf\": (\"mo\", {\"form\": \"prefix\", \"movablelimits\": \"true\"}, \"operator\", \"liminf\"),\n\tr\"\\max\": (\"mo\", {\"form\": \"prefix\", \"rspace\": \"0\", \"movablelimits\": \"true\"}, \"operator\", \"max\"),\n\tr\"\\argmax\": (\"mo\", {\"form\": \"prefix\", \"movablelimits\": \"true\"}, \"operator\", \"argmax\"),\n\tr\"\\min\": (\"mo\", {\"form\": \"prefix\", \"rspace\": \"0\", \"movablelimits\": \"true\"}, \"operator\", \"min\"),\n\tr\"\\argmin\": (\"mo\", {\"form\": \"prefix\", \"movablelimits\": \"true\"}, \"operator\", \"argmin\"),\n\tr\"\\det\": (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\", \"rspace\": \"0\"}, \"operator\", \"det\"),\n\tr\"\\diag\": (\"mo\", {\"form\": \"prefix\", \"rspace\": \"0\"}, \"operator\", \"diag\"),\n\tr\"\\ker\": (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\"}, \"operator\", \"ker\"),\n\tr\"\\mod\": (\"mo\", {\"form\": \"prefix\", \"lspace\": \"0\"}, \"operator\", \"mod\"),\n\tr\"\\sgn\": (\"mo\", {\"form\": \"prefix\", \"rspace\": \"0\"}, \"operator\", \"sgn\"),\n\n\tr\"\\fourier\": (\"mo\", {\"form\": \"prefix\", \"rspace\": \"0\"}, \"operator\", \"ℱ\"),\n\tr\"\\laplace\": (\"mo\", {\"form\": \"prefix\", \"rspace\": \"0\"}, \"operator\", \"ℒ\"),\n\tr\"\\mellin\": (\"mo\", {\"form\": \"prefix\", \"rspace\": \"0\"}, \"operator\", \"ℳ\"),\n}\n\nSETS = {\n\tr\"\\emptyset\": (\"mi\", {}, \"constant\", \"∅\"),\n\tr\"\\primes\": (\"mi\", {}, \"constant\", \"ℙ\"),\n\tr\"\\naturals\": (\"mi\", {}, \"constant\", \"ℕ\"),\n\tr\"\\integers\": (\"mi\", {}, \"constant\", \"ℤ\"),\n\tr\"\\rationals\": (\"mi\", {}, \"constant\", \"ℚ\"),\n\tr\"\\algebraics\": (\"mi\", {}, \"constant\", \"𝔸\"),\n\tr\"\\reals\": (\"mi\", {}, \"constant\", \"ℝ\"),\n\tr\"\\imaginaries\": (\"mi\", {}, \"constant\", \"𝕀\"),\n\tr\"\\complexes\": (\"mi\", {}, \"constant\", \"ℂ\"),\n\tr\"\\quaternions\": (\"mi\", {}, \"constant\", \"ℍ\"),\n\tr\"\\octonions\": (\"mi\", {}, \"constant\", \"𝕆\"),\n\tr\"\\sedenions\": (\"mi\", {}, \"constant\", \"𝕊\"),\n}\n\nLOGIC = {\n}\n\nGEOMETRY = {\n}\n\nCALCULUS = {\n}\n\nCHEMISTRY = {\n\tr\"\\alembic\": (\"mi\", {}, \"symbol\", \"⚗\"), #⚗\n\tr\"\\atom\": (\"mi\", {}, \"symbol\", \"⚛\"), #⚛\n\tr\"\\radioactive\": (\"mi\", {}, \"symbol\", \"☢\"), #☢\n\tr\"\\biohazard\": (\"mi\", {}, \"symbol\", \"☣\"), #☣\n\tr\"\\poisonold\": (\"mi\", {}, \"symbol\", \"☠\"), #☠\n\tr\"\\equilibrium\": (\"mo\", {}, \"operator\", \"⇌\"), #⇌\n\tr\"\\reverseequilibrium\": (\"mo\", {}, \"operator\", \"⇋\"), #⇋\n\tr\"\\biequation\": (\"mo\", {}, \"operator\", \"⇄\"), #⇄\n\tr\"\\requation\": (\"mo\", {}, \"operator\", \"→\"), #→\n\tr\"\\Requation\": (\"mo\", {}, \"operator\", \"⟶\"), #⟶\n\tr\"\\lequation\": (\"mo\", {}, \"operator\", \"←\"), #←\n\tr\"\\Lequation\": (\"mo\", {}, \"operator\", \"⟵\"), #⟵\n\tr\"\\aqua\": (\"ms\", {\"lquote\":\"(\", \"rquote\":\")\"}, \"symbol\", \"aq\"), #↑\n\tr\"\\liquid\": (\"ms\", {\"lquote\":\"(\", \"rquote\":\")\"}, \"symbol\", \"l\"), #↑\n\tr\"\\gas\": (\"ms\", {\"lquote\":\"(\", \"rquote\":\")\"}, \"symbol\", \"g\"), #↑\n\tr\"\\solid\": (\"ms\", {\"lquote\":\"(\", \"rquote\":\")\", \"class\": \"red\"}, \"symbol\", \"s\"), #↑\n\tr\"\\togas\": (\"mi\", {}, \"symbol\", \"↑\"), #↑\n\tr\"\\tosolid\": (\"mi\", {}, \"symbol\", \"↓\"), #↓\n}\n\nPHYSICS = {\n\tr\"\\degree\": (\"mo\", {}, \"operator\", \"°\"),\n\tr\"\\hbar\": (\"mi\", {}, \"constant\", \"ℏ\"),\n\tr\"\\h\": (\"mi\", {}, \"constant\", \"ℎ\"),\n}\n","sub_path":"archive/mumath/Context/UNICODE.py","file_name":"UNICODE.py","file_ext":"py","file_size_in_byte":20096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"619239840","text":"#Korisnik unosi prirodne brojeve u niz.\n#Prilikom unosa izbrojati koliko je pogrešno unesenih.\n#Korisnik prestaje unositi brojeve kad se unese 0.\n#Program treba za svaki element niza ispisati zbroj prve i zadnje znamenke ako je paran,\n#a ako je neparan treba ispisati njihov umnožak.\n\n\ndef izracunaj_prvu_znamenku(x):\n while x != 0:\n x = x // 10\n if x % 10 == x:\n return x\n\n#definiramo početne uvjete\nniz = []\npogresni_brojevi = 0\n\n#unos u niz i provjera pogresnih unosa\n#dovoljno provjeriti jesu li brojevi manji od nula jer je preduvjet da korisnik unosi prirodne brojeve\n#break ako korisnik unese nulu\nwhile True:\n broj = int(input(\"Unesi prirodan broj: \"))\n \n if(broj == 0):\n break\n\n if(broj < 0):\n pogresni_brojevi += 1\n else: \n niz.append(broj)\n\n#for petlja kroz niz i provjerimo elemente i ispišemo rješenje\nfor broj in niz:\n prva_znamenka = izracunaj_prvu_znamenku(broj)\n #prva_znamenka = int(str(broj)[0]) - izračun prve znamenke preko stringa\n zadnja_znamenka = broj % 10\n if (broj % 2 == 0):\n #ako je paran\n print(\"Broj:\", broj, \"- rezultat:\", prva_znamenka + zadnja_znamenka)\n else:\n #ako je neparan\n print(\"Broj:\", broj, \"- rezultat:\", prva_znamenka * zadnja_znamenka)\n \n","sub_path":"Python-3.x/PMF-Split-Programiranje-1/nizovi-zadatak-3.py","file_name":"nizovi-zadatak-3.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"381942629","text":"# -*- coding: utf-8 -*-\nVERSION = \"0.0.2\"\n__author__ = \"Munis Isazade\"\n__email__ = \"munisisazade@gmail.com\"\n\nimport re, sys\n\nCOMMANDS = (\n \"createapp\",\n \"version\",\n \"author\",\n \"help\"\n)\n\n\ndef help():\n print(\"\")\n print(\"Type 'pyquick help ' for help on a specific subcommand.\")\n print(\"\")\n print(\"Available subcommands:\")\n print(\"\")\n print(\"\\033[31m[pyquick]\\033[0m\")\n for command in COMMANDS:\n print(\"\\t\" + command)\n\n\nclass Command(object):\n def __init__(self, command):\n self.command_name = command\n self.version = VERSION\n\n def run(self):\n if self.command_name == \"createapp\":\n self.createapp()\n elif self.command_name == \"version\":\n self.createapp()\n elif self.command_name == \"author\":\n self.createapp()\n else:\n self.unknowncommand()\n\n def createapp(self):\n pass\n\n def unknowncommand(self):\n print(\"Unknown command: '%s'\" % self.command_name)\n print(\"Type 'pyquick help' for usage.\")\n\n\nif __name__ == '__main__':\n # if sys.argv[0] == \"pyquick\":\n if len(sys.argv) == 1:\n help()\n\n if len(sys.argv) > 1:\n if sys.argv[1] == \"help\":\n help()\n else:\n command = Command(sys.argv[1])\n command.run()\n","sub_path":"pyquick/bin/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"34645017","text":"# -*- coding: UTF-8 -*-\n# Author: 元大證券資訊部溫鳳祥\n\nimport os\nimport json\nfrom math import sin, cos, atan2, acos, sqrt, radians\n\n\n# Method 1: https://blog.lovian.org/python/2018/04/16/python-calculate-distance-in-lat-lon.html\ndef calc_distance(start_lat, start_lng, end_lat, end_lng):\n # approximate radius of earth in km\n R = 6373.0\n\n dlng = end_lng - start_lng\n dlat = end_lat - start_lat\n\n a = sin(dlat / 2)**2 + cos(start_lat) * cos(end_lat) * sin(dlng / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n return R * c * 1000\n\n\n# Method 2: https://www.w3resource.com/python-exercises/math/python-math-exercise-27.php\ndef calc_distance2(start_lat, start_lng, end_lat, end_lng):\n start_lat = radians(start_lat)\n start_lng = radians(start_lng)\n end_lat = radians(end_lat)\n end_lng = radians(end_lng)\n # return 6371.01 * 1000 * acos(sin(start_lat) * sin(end_lat) + cos(start_lat) * cos(end_lat) * cos(start_lng - end_lng))\n return 6373.0 * 1000 * acos(sin(start_lat) * sin(end_lat) + cos(start_lat) * cos(end_lat) * cos(start_lng - end_lng))\n\n\nif __name__ == '__main__':\n # 元大投信: 25°03'09.0\"N 121°32'35.7\"E\n lat0 = 25.03090\n lng0 = 121.32357\n print(\"元大投信座標:lat=%.2f lng=%.2f\" % (lat0, lng0))\n\n try:\n # read U-Bike stations info file\n curr_dir = os.path.dirname(os.path.realpath(__file__))\n src_file = open('%s/YouBikeTP' % curr_dir, mode='r', encoding='UTF-8')\n json_obj = json.loads(src_file.read())\n\n num2 = None\n sinfo2 = None\n min_dist = 99999999\n \"\"\"\n U-Bike JSON 格式: https://data.taipei/opendata/datalist/apiAccess?scope=datasetMetadataSearch&q=id:8ef1626a-892a-4218-8344-f7ac46e1aa48\n sno:序號(?)\n sna:場站名稱(中文)\n tot:場站總停車格\n sbi:場站目前車輛數量\n sarea:場站區域(中文)\n mday:資料更新時間\n lat:緯度\n lng:經度\n ar:地(中文)\n sareaen:場站區域(英文)\n snaen:場站名稱(英文)\n aren:地址(英文)\n bemp:空位數量\n act:全站禁用狀態\n \"\"\"\n for num in json_obj[\"retVal\"]:\n sinfo = json_obj[\"retVal\"][num]\n sno = sinfo['sno'] # 序號(?)\n sna = sinfo['sna'] # 場站名稱(中文)\n sarea = sinfo['sarea'] # 場站區域(中文)\n ar = sinfo['ar'] # 地(中文)\n sbi = int(sinfo['sbi']) # 場站目前車輛數量\n bemp = int(sinfo['bemp']) # 空位數量\n act = int(sinfo['act']) # 全站禁用狀態\n lat = float(sinfo['lat']) # 緯度\n lng = float(sinfo['lng']) # 經度\n dist = calc_distance(lat0, lng0, lat, lng)\n dist2 = calc_distance2(lat0, lng0, lat, lng)\n # print('[%s] sna=%s sarea=%s sbi=%d bemp=%d lat=%.2f lng=%.2f act=%d (dist=%.2fm dist2=%.2fm)'\n # % (sno, sna, sarea, sbi, bemp, lat, lng, act, dist, dist2))\n\n if sbi >= 20 and bemp >= 20 and act == 1:\n if dist < 500:\n print('Found(1): [%s] sna=%s sarea=%s sbi=%d bemp=%d lat=%.2f lng=%.2f act=%d (dist=%.2fm dist2=%.2fm)'\n % (sno, sna, sarea, sbi, bemp, lat, lng, act, dist, dist2))\n elif dist2 < 500:\n print('Found(2)[%s] sna=%s sarea=%s sbi=%d bemp=%d lat=%.2f lng=%.2f act=%d (dist=%.2fm dist2=%.2fm)'\n % (sno, sna, sarea, sbi, bemp, lat, lng, act, dist, dist2))\n else:\n print(\"找不到符合條件的 U-Bike 站點!\")\n\n src_file.close()\n except IOError as ex:\n print('Open file failed: ' + str(ex))\n except Exception as ex:\n print('Exception: ' + str(ex))\n\n\n","sub_path":"yuanta_python3-master/作業/Youbike/溫鳳祥.py","file_name":"溫鳳祥.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"554424418","text":"# alpha-GANの学習の実行\r\nimport argparse\r\nimport os\r\n# 自作モジュール\r\nfrom train import train\r\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\r\n\r\n\r\n# 保存先のディレクトリのパス\r\nsavedir = '../progress/tmp'\r\n# 使用データセットのリストのパス\r\n_list = '../imagelist/train.csv'\r\n# データセットまでのパス\r\nroot = '../dataset'\r\n\r\n# コマンドライン引数のパース\r\nparser = argparse.ArgumentParser(description='alpha Generative Adversarial Net')\r\nparser.add_argument('--gpu', '-g', default=0, type=int, help='GPU ID (negative value indicates CPU)')\r\nparser.add_argument('--epochs', '-e', default=1000, type=int, help='number of epochs to learn')\r\nparser.add_argument('--batchsize', '-b', default=64, type=int, help='learning minibatch size')\r\nparser.add_argument('--nz', '-z', default=64, type=int, help='dimensionality of random vectors')\r\nargs = parser.parse_args()\r\n\r\n# コマンドライン引数により指定されたパラメータを変数に格納\r\nGPU_ID = args.gpu\r\nEPOCHS = args.epochs\r\nBATCH_SIZE = args.batchsize\r\nNZ = args.nz\r\n\r\n# 設定内容を表示\r\nprint('GPU: {}'.format(GPU_ID))\r\nprint('Num. of epochs: {}'.format(EPOCHS))\r\nprint('Minibatch size: {}'.format(BATCH_SIZE))\r\nprint('Dim. of random vectors: {}'.format(NZ))\r\n\r\n# 使用するGPUの指定\r\nos.environ['CUDA_DEVICE_ORDER'] = \"PCI_BUS_ID\"\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '{}'.format(GPU_ID)\r\n\r\ntrain(savedir, _list, root, EPOCHS, BATCH_SIZE, NZ)\r\n","sub_path":"GAN/alpha_GAN/train/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"159249813","text":"from measure_dim import ComputerVision\nimport os\n\ncv = ComputerVision()\n\ncwd = os.getcwd()\nfiles = os.listdir(cwd)\n\n# images = []\n# meta = []\nprint(\"################################## Start Script ##################################\")\nfor f in files:\n if f.lower().endswith('tif'):\n image_name = os.path.splitext(f)[0]\n print(\"##################################################################################\")\n print(\"## Starting measures for %s\" % image_name)\n xml_file = image_name + \".tif_meta.xml\"\n if xml_file in files:\n pixels = cv.utils.get_pixels(xml_file)\n print(\"Get pixels: %s\" % pixels)\n data = cv.measure_object_dimension(f, xml_file=xml_file, unit = 'um')\n print(data)\n\n # \timages.append(f)\n # \tfor i in images:\n # \t image = i\n # \t cv.measure_object_dimension(image, coin_diameter = 100, unit = 'um')\n #\n # if f.lower().endswith('xml'):\n # \tmeta.append(f)\n # \tfor m in meta:\n # \t\tfile = m\n # \t\tcv.utils.get_pixels(file)\n\n\n\n\n\n\n#192.97\n#88.55\n#13158.83\n#465\n","sub_path":"inst/python/read_files.py","file_name":"read_files.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"396271502","text":"\"\"\"\nProjet Vasarely (1.0)\nAuteur : David Bomard\nDate : avril 2020\nProjet de production graphique inspirée par le peintre Vasarely\n\"\"\"\nfrom math import pi, sin, cos, sqrt, acos, asin\nimport turtle\n\n\ndef calculate_sommets(center_point, rayon):\n \"\"\"\n Calcule les coordonnées des points de l'hexagone et les renvoie sous formes d'objet\n de 6 tuples\n Entrées :\n center_point: coordonnées du centre\n rayon: rayon entre le centre et les points\n Sortie :\n sommets : objet contenant les 6 couples de points\n \"\"\"\n\n # initialisation d'un objet de 6 tuples\n sommets = [(0, 0, 0)] * 6\n\n # calcul et remplissage des coordonnées des 6 sommets de l'hexagone\n for i in range(len(sommets)):\n sommets[i] = (center_point[0] + cos(2 * pi / 6 * i) * rayon, center_point[1] + sin(2 * pi / 6 * i) * rayon, 0)\n\n return sommets\n\n\ndef dessine_pave(point1, point2, point3, point4, couleur):\n \"\"\"\n peint un pavé ayant pour sommets les 4 points p1 à p4 dont les coordonnées dans le plan sont\n représentés sous forme de tuples(x, y, z)\n Entrées :\n les 4 points de p1 à p4\n couleur : couleur du pavé\n \"\"\"\n turtle.up()\n turtle.goto(point1[0], point1[1]) # déplacer la tortue au point n°1\n turtle.down() # commencer à dessiner\n turtle.color(couleur) # changer la couleur de crayon\n turtle.begin_fill() # commencer à remplir\n turtle.goto(point2[0], point2[1]) # relier tous les points\n turtle.goto(point3[0], point3[1])\n turtle.goto(point4[0], point4[1])\n turtle.goto(point1[0], point1[1])\n turtle.end_fill() # clôturer et remplir le pavé\n turtle.up() # lever le crayon\n\n\ndef hexagone(point, longueur, col, centre, rayon):\n \"\"\"\n La fonction peint un hexagone déformé en traçant des lignes droites entre le centre et les extrémités\n dont la position est calculée avec la fonction 'deformation'\n Entrées:\n point : coordonnées du centre de l'hexagone sous forme d'un tripe tuple (x, y, z)\n longueur : distance entre le centre et les sommets de l'hexagone\n col : les 3 couleurs utilisées pour les 3 pavés, sous forme d'un triple tuple (col1, col2, col3)\n centre : centre de la sphère de déformation sous forme d'un triple tuple (c_x, c_y, c_z)\n rayon : rayon de la sphère de déformation\n \"\"\"\n\n # calcul des coordonnées des sommets de l'hexagone par la fonction 'calculate_sommets'\n points_sommets = calculate_sommets(point, longueur)\n\n # transformation de la déformation des coordonnées des sommets selon la sphère\n point_centre = deformation(point, centre, rayon) # d'abord le point central de l'hexagone\n for p in range(6):\n points_sommets[p] = deformation(points_sommets[p], centre, rayon) # puis les 6 sommets\n\n # dessins des pavés avec la fonction 'dessine_pave'\n dessine_pave(point_centre, points_sommets[2], points_sommets[1], points_sommets[0], col[0]) # dessin du pavé n°1\n dessine_pave(point_centre, points_sommets[0], points_sommets[5], points_sommets[4], col[1]) # dessin du pavé n°2\n dessine_pave(point_centre, points_sommets[4], points_sommets[3], points_sommets[2], col[2]) # dessin du pavé n°3\n\n\ndef deformation(p, centre, rayon):\n \"\"\" Calcul des coordonnées d'un point suite à la déformation\n engendrée par la sphère émergeante\n Entrées :\n p : coordonnées (x, y, z) du point du dalage à tracer\n (z = 0) AVANT déformation\n centre : coordonnées (X0, Y0, Z0) du centre de la sphère\n rayon : rayon de la sphère\n Sorties : coordonnées (xprim, yprim, zprim) du point du dallage\n à tracer APRÈS déformation\n \"\"\"\n x, y, z = p\n xprim, yprim, zprim = x, y, z\n xc, yc, zc = centre\n if rayon ** 2 > zc ** 2:\n zc = zc if zc <= 0 else -zc\n r = sqrt(\n (x - xc) ** 2 + (y - yc) ** 2) # distance horizontale\n # depuis le point à dessiner jusqu'à l'axe de la sphère\n rayon_emerge = sqrt(rayon ** 2 - zc ** 2) # rayon de la partie\n # émergée de la sphère\n rprim = rayon * sin(acos(-zc / rayon) * r / rayon_emerge)\n if 0 < r <= rayon_emerge: # calcul de la déformation\n # dans les autres cas\n xprim = xc + (x - xc) * rprim / r # les nouvelles coordonnées\n # sont proportionnelles aux anciennes\n yprim = yc + (y - yc) * rprim / r\n if r <= rayon_emerge:\n beta = asin(rprim / rayon)\n zprim = zc + rayon * cos(beta)\n if centre[2] > 0:\n zprim = -zprim\n return xprim, yprim, zprim\n\n\ndef pavage(inf_gauche, sup_droit, longueur, col, centre, rayon):\n \"\"\"\n Dessine le pavage en commençant en bas à gauche\n inf_gauche: tuple : coordonnées du coin inférieur gauche\n sup_droit: tuple : coordonnées du coin suprieur droit\n longueur: longueur des arêtes\n col: tuple triple : les 3 couleurs utilisées\n centre: tuple triple : coordonnées du centre du cercle de déformation\n rayon: rayon du cercle de déformation\n \"\"\"\n turtle.reset() # On commence par initialiser 'turtle'\n turtle.speed('fastest') # sélectionne la vitesse maximum pour les tracés\n coo_centre_y = inf_gauche[1] + longueur # ordonnée du centre du premier hexagone tracé en bas à gauche\n ligne = 0 # Cette variable sera incrémentée à chaque novelle ligne afin de savoir si la ligne est paire ou impaire\n\n # calcul des pas de décalage entre les colonnes et les lignes\n ystep = int(sin(pi / 3) * longueur)\n xstep = longueur * 3\n\n # boucle des lignes\n for y in range(coo_centre_y, sup_droit[1] - longueur, ystep):\n if ligne % 2: # détecte si la ligne est impaire pour décaler ou non le premier hexagone\n coo_centre_x = int(inf_gauche[0] + longueur * 2.5)\n else:\n coo_centre_x = int(inf_gauche[0] + longueur)\n # boucle des colonnes, qui appelle la fonction 'hexagone' pour les tracés\n for x in range(coo_centre_x, sup_droit[0] - longueur, xstep):\n hexagone((x, y, 0), longueur, col, centre, rayon)\n ligne += 1\n\n # sauvegarde l'image\n turtle.getcanvas().postscript(file=\"pavage.eps\")\n # attend une action de l'utilisateur avant de quitter\n turtle.done()\n\n\ndef get_parametres():\n \"\"\"\n Fonction appelée pour obtenir les paramètres de dessin auprès de l'utilisateur\n :return: tuple contenant tous les informations nécessaires au dessin\n \"\"\"\n coin_inf = int(input(\"Coin inférieur gauche :\"))\n coin_sup = int(input(\"Coin supérieur droit :\"))\n l_arrete = int(input(\"Longueur d'une arrête :\"))\n coul1 = input(\"Couleur 1 :\")\n coul2 = input(\"Couleur 2 :\")\n coul3 = input(\"Couleur 3 :\")\n x_cercle = int(input(\"Abscisse du centre du cercle :\"))\n y_cercle = int(input(\"Ordonnée du centre du cercle :\"))\n z_cercle = int(input(\"Hauteur du centre du cercle :\"))\n r_cercle = int(input(\"Rayon du cercle :\"))\n return ((coin_inf, coin_inf), (coin_sup, coin_sup), l_arrete, (coul1, coul2, coul3), (x_cercle, y_cercle, z_cercle),\n r_cercle)\n\n\n\"\"\"\nCode principal\nappelle la fonction get_parametres pour récupérer toutes les informations puis dessine le pavage\n\"\"\"\nparametres = get_parametres()\npavage(parametres[0], parametres[1], parametres[2], parametres[3], parametres[4], parametres[5])\n","sub_path":"Vasarely.py","file_name":"Vasarely.py","file_ext":"py","file_size_in_byte":7441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"175040538","text":"def zero_insert(n):\n str(n)\n length = len(str(n))\n int(n)\n p = [None] * (length)\n for i in range(length - 1, -1, -1):\n p[i] = n % 10\n n //= 10\n r = []\n count = 0 \n for i in range(0, len(p) - 1):\n r.append(p[i])\n if p[i] == p[i + 1] or (p[i] + p[i + 1]) % 10 == 0:\n count += 1\n r.append(0)\n r.append(p[len(p) - 1])\n\n return r\n\nprint(zero_insert(6446))\n","sub_path":"Week0/Problems1/zero_insert.py","file_name":"zero_insert.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"551867996","text":"import numpy as np\n\ndef compound(principal,period,rate):\n CI = principal * (pow((1 + rate / 100), period))\n return CI\n\ndef amortization(loan,rate,tenure,terminate):\n\n per = np.arange(tenure*12) + 1\n per_terminate = np.arange(terminate*12) + 1\n ipmt = np.ipmt((rate/100)/12, per, tenure*12, loan)\n ppmt = np.ppmt((rate/100)/12, per, tenure*12, loan)\n\n pmt = np.pmt((rate/100)/12, tenure*12, loan)\n\n balance = []\n for payment in per_terminate:\n index = payment - 1\n loan = loan + ppmt[index]\n balance.append(loan)\n return balance,pmt,ppmt,ipmt\n\ndef calculate(principal,asb_return,loan_interest,loan_tenure,year_terminate):\n compounded_interest = compound(principal,year_terminate,asb_return)\n balance,payment,ppmt,ipmt = amortization(principal,loan_interest,loan_tenure,year_terminate)\n\n balance = float(balance[int(year_terminate*12) - 1])\n maturity = compounded_interest - balance\n return maturity\n","sub_path":"processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"448853017","text":"# setup.py\nfrom distutils.core import setup\nimport py2exe\nimport sys; sys.argv.append('py2exe')\n\npy2exe_options = dict(\n\tbundle_files = 1,\n\tincludes = [\"sip\"],\n\texcludes = ['doctest', 'pdb', 'unittest', 'difflib'],\n\tcompressed = True,\n\t)\n\nsetup(\n\twindows = [{'script': \"map_viewer.py\"}],\n\toptions = {'py2exe': py2exe_options},\n\tzipfile = None,\n\t)\n","sub_path":"chip_engine/compile_viewer_bundled.py","file_name":"compile_viewer_bundled.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"584779178","text":"import sys\nimport os\nfrom time import sleep\n\nfrom spade.agent import Agent\nfrom spade.message import Message\nfrom spade.behaviour import FSMBehaviour, State\n\nNUM_AGENTES = 4\nNUM_RONDAS = 10\n\n\n# TOURNAMENT MANAGER\n\nINICIO = \"INICIO\"\nCONTROL = \"CONTROL\"\nCOMPETIR = \"COMPETIR\"\nRESULTADOS = \"RESULTADOS\"\n\n\nclass TournamentManagerBehaviour(FSMBehaviour):\n\n async def on_start(self):\n print(\"Esperando a los otros agentes...\")\n\n async def on_end(self):\n print(\"FIN\")\n await self.agent.stop()\n\n\nclass inicio(State):\n async def run(self):\n if self.agent.agentes.__len__() == NUM_AGENTES:\n print(\"Todos los agentes a la espera, comenzando el torneo\")\n # Copiar la lista de participantes a una variable auxiliar para la ejecucion del torneo\n self.agent.aux = self.agent.agentes.copy()\n self.agent.cuenta = 0 # Variable auxiliar para la ejecucion del torneo\n self.set_next_state(CONTROL)\n return\n\n msg = await self.receive(timeout=1)\n if msg:\n print(f\"Recibida notificacion de: {msg.sender.localpart}\")\n self.agent.agentes.append(msg.sender.localpart)\n self.agent.resultados.append(0)\n\n self.set_next_state(INICIO)\n\n\nclass control(State):\n async def run(self):\n self.agent.cuenta += 1 # Siguiente ronda\n if self.agent.aux.__len__() == 0:\n self.set_next_state(RESULTADOS)\n return\n if self.agent.cuenta >= self.agent.aux.__len__():\n self.agent.cuenta = 0\n self.agent.aux.pop(0)\n self.set_next_state(CONTROL)\n return\n\n print(f\"\\n\\t|| {self.agent.aux[0]} vs {self.agent.aux[self.agent.cuenta]} ||\")\n self.set_next_state(COMPETIR)\n\n\nclass competir(State):\n async def run(self):\n tabla_ref = [\n [(2, 2), (3, 0)],\n [(0, 3), (1, 1)]\n ]\n a1 = self.agent.aux[0]\n a1_last = 1\n a1_res = 0\n a2 = self.agent.aux[self.agent.cuenta]\n a2_last = 1\n a2_res = 0\n\n for i in range(NUM_RONDAS):\n # Enviar mensajes\n msg = Message(to=f\"{a1}@localhost\")\n msg.body = str([i, a2_last])\n await self.send(msg)\n\n msg = Message(to=f\"{a2}@localhost\")\n msg.body = str([i, a1_last])\n await self.send(msg)\n\n #Esperar las respuestas\n msg = await self.receive(timeout=5)\n if msg:\n if msg.sender.localpart == a1:\n a1_last = int(msg.body)\n else:\n a2_last = int(msg.body)\n else: \n print(\"Agente no responde. Terminando torneo.\")\n sys.stdout.flush()\n os._exit(0)\n\n msg = await self.receive(timeout=5)\n if msg:\n if msg.sender.localpart == a1:\n a1_last = int(msg.body)\n else:\n a2_last = int(msg.body)\n else: \n print(\"Agente no responde. Terminando torneo.\")\n sys.stdout.flush()\n os._exit(0)\n\n # Añadir resultados\n print(f\"\\tRonda {i}:\\n\\t\\t{a1} -> {a1_last}\\n\\t\\t{a2} -> {a2_last}\")\n a1_res += tabla_ref[a1_last][a2_last][0]\n a2_res += tabla_ref[a1_last][a2_last][1]\n\n print(f\"\\t>> Res :\\n\\t\\t{a1} -> {a1_res}\\n\\t\\t{a2} -> {a2_res}\")\n self.agent.resultados[self.agent.agentes.index(a1)] += a1_res\n self.agent.resultados[self.agent.agentes.index(a2)] += a2_res\n self.set_next_state(CONTROL)\n\n\nclass resultados(State):\n async def run(self):\n print(\"\\n>> RESULTADOS:\")\n for i in range(self.agent.agentes.__len__()):\n msg = Message(to=f\"{self.agent.agentes[i]}@localhost\")\n msg.body = \"STOP\"\n await self.send(msg)\n print(f\"\\t{self.agent.agentes[i]}\\t{self.agent.resultados[i]}\")\n\n\nclass TournamentManager(Agent):\n async def setup(self):\n # Atributos de control del torneo\n self.agentes = []\n self.resultados = []\n\n # Definir la Maquina de Estados Finitos\n fsm = TournamentManagerBehaviour()\n\n # Estados\n fsm.add_state(name=INICIO, state=inicio(), initial=True)\n fsm.add_state(name=CONTROL, state=control())\n fsm.add_state(name=COMPETIR, state=competir())\n fsm.add_state(name=RESULTADOS, state=resultados())\n\n # Transiciones entre estados\n fsm.add_transition(source=INICIO, dest=INICIO)\n fsm.add_transition(source=INICIO, dest=CONTROL)\n fsm.add_transition(source=CONTROL, dest=CONTROL)\n fsm.add_transition(source=CONTROL, dest=COMPETIR)\n fsm.add_transition(source=COMPETIR, dest=CONTROL)\n fsm.add_transition(source=CONTROL, dest=RESULTADOS)\n\n self.add_behaviour(fsm)\n\n\n\n\n# tournamentManager@localhost\n# ojoPorOjo@localhost\n# cooperativo@localhost\n# noCooperativo@localhost\n# random@localhost\n\nif __name__ == \"__main__\":\n tm = TournamentManager(\"tournamentManager@localhost\", \"123\")\n tm.start()\n sleep(1)\n\n while tm.is_alive():\n try:\n sleep(1)\n except KeyboardInterrupt:\n tm.stop()\n break\n print(\"Agent finished\")\n sys.stdout.flush()\n os._exit(0)\n","sub_path":"SPADE/Torneo.py","file_name":"Torneo.py","file_ext":"py","file_size_in_byte":5348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"642824029","text":"import logging\nimport isodate\nimport pandas as pd\nimport requests\nfrom siphoner_app.models import (\n NWSObservation,\n NWSForecast, NWSForecastTime, NWSValidTime, NWSUnit, NWSVariable,\n NWSForecastSource, NWSTimeStamp\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass NWSObservationRetriever:\n\n def __init__(self, station):\n self.station = station\n self.variable_names = [\n 'temperature',\n 'maxTemperatureLast24Hours',\n 'minTemperatureLast24Hours'\n ]\n\n def retrieve_observation(self):\n url = f'{self.station.api_url}/observations'\n response = requests.get(url)\n\n if response.ok:\n json = response.json()\n self._extract_and_load(json)\n\n def _extract_and_load(self, json):\n features = json['features']\n for feature in features:\n properties = feature['properties']\n\n timestamp, _ = NWSTimeStamp.objects.get_or_create(\n time=pd.to_datetime(properties['timestamp'], utc=True)\n )\n\n for variable_name in self.variable_names:\n unit, _ = NWSUnit.objects.get_or_create(\n name=properties[variable_name]['unitCode']\n )\n\n variable, _ = NWSVariable.objects.get_or_create(\n name=variable_name,\n unit=unit\n )\n\n value = properties[variable_name]['value']\n if value:\n NWSObservation.objects.get_or_create(\n nws_id=self.station,\n timestamp=timestamp,\n variable=variable,\n value=value\n )\n\n\nclass NWSForecastRetriever:\n\n def __init__(self, gridpoint):\n self.gridpoint = gridpoint\n self.source, _ = NWSForecastSource.objects.get_or_create(\n name='forecast_api'\n )\n\n def retrieve_forecast(self):\n\n url = f'{self.gridpoint.forecast_api_url}'\n response = requests.get(url)\n\n if response.ok:\n json = response.json()\n self._extract_and_load(json)\n\n def _extract_and_load(self, json):\n\n forecast_time, created = NWSForecastTime.objects.get_or_create(\n issued_time=pd.to_datetime(json['properties']['updateTime'], utc=True)\n )\n\n periods = json['properties']['periods']\n for period in periods:\n valid_time, _ = NWSValidTime.objects.get_or_create(\n start_time=pd.to_datetime(period['startTime'], utc=True),\n end_time=pd.to_datetime(period['endTime'], utc=True)\n )\n\n unit, _ = NWSUnit.objects.get_or_create(\n name=period['temperatureUnit']\n )\n\n variable, _ = NWSVariable.objects.get_or_create(\n name='temperature',\n unit=unit\n )\n\n _, created = NWSForecast.objects.get_or_create(\n nws_id=self.gridpoint,\n forecast_time=forecast_time,\n valid_time=valid_time,\n variable=variable,\n source=self.source,\n value=period['temperature']\n )\n\n\nclass NWSHourlyForecastRetriever(NWSForecastRetriever):\n\n def __init__(self, gridpoint):\n super().__init__(gridpoint)\n self.source, _ = NWSForecastSource.objects.get_or_create(\n name='houly_forecast_api'\n )\n\n def retrieve_forecast(self):\n\n url = f'{self.gridpoint.forecast_hourly_api_url}'\n response = requests.get(url)\n\n if response.ok:\n json = response.json()\n self._extract_and_load(json)\n\n\nclass NWSGridPointForecastRetriever:\n\n def __init__(self, gridpoint):\n self.gridpoint = gridpoint\n self.source, _ = NWSForecastSource.objects.get_or_create(\n name='gridpoint_forecast_api'\n )\n self.variable_names = ['temperature', 'maxTemperature', 'minTemperature']\n\n def retrieve_forecast(self):\n\n url = f'{self.gridpoint.forecast_grid_data_api_url}'\n response = requests.get(url)\n\n if response.ok:\n json = response.json()\n self._extract_and_load(json)\n\n def _extract_and_load(self, json):\n\n forecast_time, created = NWSForecastTime.objects.get_or_create(\n issued_time=pd.to_datetime(json['properties']['updateTime'], utc=True)\n )\n\n for variable_name in self.variable_names:\n unit, _ = NWSUnit.objects.get_or_create(\n name=json['properties'][variable_name]['uom']\n )\n\n variable, _ = NWSVariable.objects.get_or_create(\n name=variable_name,\n unit=unit\n )\n\n values = json['properties'][variable_name]['values']\n for value in values:\n valid_time_duration = _get_duration_start_end(value['validTime'])\n valid_time, _ = NWSValidTime.objects.get_or_create(\n start_time=valid_time_duration['start'],\n end_time=valid_time_duration['end']\n )\n\n _, created = NWSForecast.objects.get_or_create(\n nws_id=self.gridpoint,\n forecast_time=forecast_time,\n valid_time=valid_time,\n variable=variable,\n source=self.source,\n value=value['value']\n )\n\n\ndef _get_duration_start_end(iso_string):\n dt_start = pd.to_datetime(iso_string.split('/')[0], utc=True)\n duration = isodate.parse_duration(iso_string.split('/')[1])\n dt_end = dt_start + duration\n\n return {'start': dt_start, 'end': dt_end}\n","sub_path":"siphoner_app/nws.py","file_name":"nws.py","file_ext":"py","file_size_in_byte":5753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"536467345","text":"# coding: utf-8\n\nimport json\nimport numpy as np\nfrom keras.layers import Input, Embedding, LSTM, Dense, Bidirectional, Add, MaxPooling1D, \\\n Concatenate, Dot, Flatten, RepeatVector, Multiply\nfrom keras.models import Model\nfrom keras.preprocessing import sequence\nfrom keras import backend as K\nimport os\nimport tensorflow as tf\nfrom configparser import ConfigParser\nfrom keras.optimizers import Adam\nfrom keras import losses\nfrom sklearn.utils.class_weight import compute_class_weight\n# noinspection PyUnresolvedReferences\nfrom preprocess import readData\n# noinspection PyUnresolvedReferences\nfrom preprocess import readRelation\n\ndef ranking_loss(y_true, y_pred):\n return K.maximum(0.0, 0.1 + K.sum(y_pred*y_true,axis=-1))\n\ndef model_construct():\n # CONFIG\n config = ConfigParser()\n config.read('./config.ini')\n\n question_input = Input(shape=(config.getint('pre', 'question_maximum_length'), ), dtype='int32',name=\"question_input\")\n relation_all_input = Input(shape=(config.getint('pre', 'relation_word_maximum_length'), ), dtype='int32',name=\"relation_all_input\")\n relation_input = Input(shape=(config.getint('pre', 'relation_maximum_length'), ), dtype='int32',name=\"relation_input\")\n\n question_emd = np.load('./question_emd_matrix.npy')\n relation_emd = np.load('./relation_emd_matrix.npy')\n relation_all_emd = np.load('./relation_all_emd_matrix.npy')\n\n question_emd = Embedding(question_emd.shape[0],\n config.getint('pre', 'word_emd_length'),\n weights=[question_emd],\n input_length=config.getint('pre', 'question_maximum_length'),\n trainable=False,name=\"question_emd\")(question_input)\n\n sharedEmbd_r_w = Embedding(relation_all_emd.shape[0],\n config.getint('pre', 'word_emd_length'),\n weights=[relation_all_emd],\n input_length=config.getint('pre', 'relation_word_maximum_length'),\n trainable=False,name=\"sharedEmbd_r_w\")\n relation_word_emd = sharedEmbd_r_w(relation_all_input)\n sharedEmbd_r = Embedding(relation_emd.shape[0],\n config.getint('pre', 'word_emd_length'),\n weights=[relation_emd],\n input_length=config.getint('pre', 'relation_maximum_length'),\n trainable=True,name=\"sharedEmbd_r\")\n relation_emd = sharedEmbd_r(relation_input)\n bilstem_layer = Bidirectional(LSTM(units=200, return_sequences=True, implementation=2),name=\"bilstem_layer\")\n question_bilstm_1 = bilstem_layer(question_emd)\n # question_bilstm_2 = Bidirectional(LSTM(units=200, return_sequences=True, implementation=2),name=\"question_bilstm_2\")(question_bilstm_1)\n relation_word_bilstm = bilstem_layer(relation_word_emd)\n relation_bilstm = bilstem_layer(relation_emd)\n # question_res = Add()([question_bilstm_1, question_bilstm_2])\n relation_con = Concatenate(axis=-2)([relation_word_bilstm, relation_bilstm])\n relation_res = MaxPooling1D(400, padding='same')(relation_con)\n relation_flatten = Flatten()(relation_res)\n\n fc_layer1 = Dense(400, use_bias=True, activation='tanh')\n fc_layer2 = Dense(1, use_bias=False, activation='softmax')\n rel_expand = RepeatVector(30)(relation_flatten)\n inputs = Concatenate()([question_bilstm_1, rel_expand])\n weights = fc_layer2(fc_layer1(inputs))\n question_att = MaxPooling1D(400, padding='same')(Multiply()([question_bilstm_1, weights]))\n\n result = Dot(axes=-1, normalize=True)([question_att, relation_flatten])\n model = Model(inputs=[question_input, relation_input, relation_all_input,], outputs=result)\n model.compile(optimizer=Adam(), loss=ranking_loss)\n return model\n\nif __name__ == '__main__':\n # GPU settings\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n neg_num = json.load(open('./neg_number.json', 'r'))\n relation_dict = json.load(open('./relation_dict.json', 'r'))\n new_relation_dict = {v: k for k, v in relation_dict.items()}\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n\n K.set_session(sess)\n model = model_construct()\n model.load_weights('./runs/complex_twoatt_r2/my_model_weights.h5')\n print(model.summary())\n\n question_feature = np.load('./test_question_feature.npy')\n\n relation_feature = np.load('./test_relation_feature.npy')\n relation_all_feature = np.load('./test_relation_all_feature.npy')\n\n print('positive data loaded...')\n simi_pos = model.predict([question_feature, relation_feature, relation_all_feature], batch_size=1024)\n\n print('positive similarity computed...')\n np.save('test_pre_pos.npy', simi_pos)\n\n relation_feature_neg = np.load('./test_relation_feature_neg.npy')\n relation_all_feature_neg = np.load('./test_relation_all_feature_neg.npy')\n\n print('negtive data loaded...')\n simi_neg = model.predict([question_feature, relation_feature_neg, relation_all_feature_neg], batch_size=1024)\n\n print('negtive similarity computed...')\n np.save('test_pre_neg.npy', simi_neg)\n\n acc = np.sum(simi_pos>simi_neg) / simi_pos.shape[0]\n print(\"relation pos>neg accurcy: \" + str(acc))\n\n index = 0\n false_list = list()\n true_list = list()\n true_all = list()\n all_set = set()\n\n config = ConfigParser()\n config.read('./config.ini')\n data = readData(config.get('pre', 'test_filepath'))\n relation = readRelation(config.get('pre', 'relation_filepath'))\n\n for num, neg_index in neg_num:\n l = int(np.argmax(simi_neg[index: index + num])) # 最大负例下标\n max_neg = relation_feature_neg[index+l] # 选出的最优候选\n gold = relation_feature[index]\n\n if (max_neg == gold).all(): # 判断最优候选是否与标准答案相同\n true_all.append(neg_index)\n # print(str(neg_index) + \",rel_right\")\n else:\n false_list.append(neg_index)\n # print(str(neg_index) + \",rel_wrong\")\n print(new_relation_dict[int(gold[0])] + \",\" + new_relation_dict[int(max_neg[0])])\n\n\n index += num\n all_set.add(neg_index)\n\n\n print(data[0])\n print(relation[0][1])\n print(relation[1][1])\n print(\"len(true_all) == \" + str(len(true_all)))\n print(\"len(false_list) == \" + str(len(false_list)))\n print(\"len(all_set) == \" + str(len(all_set)))\n\n print(true_all)\n","sub_path":"relation_detection/Attention_BiLSTM/eval_twoatt_con.py","file_name":"eval_twoatt_con.py","file_ext":"py","file_size_in_byte":6260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"45942738","text":"# import requests\n# import readConfig\n# from public import commo\n# url = readConfig.ReadConfig().get_url(\"baseurl\")\n# host = readConfig.ReadConfig().get_host(\"codehost\")\n# pathdir = readConfig.ReadConfig().get_host(\"xlsxhost\")\n# caseList = commo.get_xlsx(pathdir+\"userCase.xlsx\",\"sendcode\")\n# mobile = caseList[0][1]\n# url1 = url+host+str(mobile)\n# print(url)\n# print(host)\n# print(url1)\n# re = requests.post(url=url1)\n#\n# print(re.json())\n# print(re.text)\n\nimport unittest #单元测试框架\nimport paramunittest\n@paramunittest.parametrized(\n [1,2],\n [3,4],\n [5,6]\n)\nclass test(unittest.TestCase):\n def setParameters(self,a,b):\n print(\"我是用例1\")\n self.a = a\n self.b = b\n def test2(self):\n print(self.a,self.b)\nif __name__ == '__main__':\n unittest.main()","sub_path":"public/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"197435633","text":"# https://medium.com/@hjhuney/implementing-a-random-forest-classification-model-in-python-583891c99652\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import model_selection\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.ensemble import RandomForestClassifier\nimport numpy as np\nfrom sklearn.model_selection import RandomizedSearchCV\n\nroot = './data'\nlabel_name = 'label_c_v1.txt'\nfeature_name = 'features_r_PET_v1nobig.txt'\n\nfeature_path = root+'/'+feature_name\nlabel_path = root+'/'+label_name\n\nfeaturesALL = np.loadtxt(feature_path)\nlabelALL = np.loadtxt(label_path)\nlabel_num = 1#labelALL.shape[1]\n\nlabel = labelALL#[:, tt_label]\nfeatures = featuresALL[~np.isnan(label), :]\nlabel = label[~np.isnan(label)]\nlabel = label[np.sum(np.isnan(features), axis=1) == 0]\nfeatures = features[np.sum(np.isnan(features), axis=1) == 0, :]\n\nX = features\ny = label\n# implementing train-test-split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=100)\n\n#create the random forest model\n\n# random forest model creation\nrfc = RandomForestClassifier()\nprint('>>> X_train {}'.format(X_train.shape))\nprint('>>> y_train {}'.format(y_train.shape))\nrfc.fit(X_train,y_train)\n# predictions\nrfc_predict = rfc.predict(X_test)\n\n#evaluating the performance\n\nrfc_cv_score = cross_val_score(rfc, X, y, cv=10, scoring='roc_auc')\n\n#print out the results\nprint(\"=== Confusion Matrix ===\")\nprint(confusion_matrix(y_test, rfc_predict))\nprint('\\n')\nprint(\"=== Classification Report ===\")\nprint(classification_report(y_test, rfc_predict))\nprint('\\n')\nprint(\"=== All AUC Scores ===\")\nprint(rfc_cv_score)\nprint('\\n')\nprint(\"=== Mean AUC Score ===\")\nprint(\"Mean AUC Score - Random Forest: \", rfc_cv_score.mean())\n'''\n# number of trees in random forest\nn_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]\n# number of features at every split\nmax_features = ['auto', 'sqrt']\n\n# max depth\nmax_depth = [int(x) for x in np.linspace(100, 500, num = 11)]\nmax_depth.append(None)\n# create random grid\nrandom_grid = {\n 'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth\n }\n# Random search of parameters\nrfc_random = RandomizedSearchCV(estimator = rfc, param_distributions = random_grid, n_iter = 100, cv = 3, verbose=2, random_state=42, n_jobs = -1)\n# Fit the model\nrfc_random.fit(X_train, y_train)\n# print results\nprint(rfc_random.best_params_)\n'''","sub_path":"ANN_Classifier/random_forest.py","file_name":"random_forest.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"147261275","text":"import numpy as np\nfrom multiagent.core import World, Agent, Landmark\nfrom multiagent.scenario import BaseScenario\n\nclass Scenario(BaseScenario):\n def make_world(self, args):\n # set any world properties first\n if 'shared_obs' in args.keys():\n self.shared_obs = args['shared_obs']\n else:\n self.shared_obs = True\n\n if ('boundary' in args.keys()) and (args['boundary'] is not None):\n world = World(boundary=args['boundary'])\n else:\n world = World()\n world.dim_c = 2\n if ('num_agents' in args.keys()) and (args['num_agents'] is not None):\n num_agents = args['num_agents']\n else:\n num_agents = 3\n self.num_agents = num_agents\n if ('num_landmarks' in args.keys()) and (args['num_landmarks'] is not None):\n num_landmarks = args['num_landmarks']\n else:\n num_landmarks = 3\n\n if ('agent_size' in args.keys()) and (args['agent_size'] is not None):\n agent_size = args['agent_size']\n else:\n agent_size = 0.15\n\n self.num_landmarks = num_landmarks\n world.collaborative = True\n # add agents\n world.agents = [Agent() for i in range(num_agents)]\n for i, agent in enumerate(world.agents):\n agent.name = 'agent %d' % i\n agent.collide = True\n agent.silent = True\n agent.adversary = False\n agent.size = agent_size\n agent.id = i\n # add landmarks\n world.landmarks = [Landmark() for i in range(num_landmarks)]\n for i, landmark in enumerate(world.landmarks):\n landmark.name = 'landmark %d' % i\n landmark.collide = False\n landmark.movable = False\n landmark.size = agent_size\n # make initial conditions\n self.reset_world(world)\n\n self.self_obs_indices = np.arange(2*world.dim_p+num_landmarks*world.dim_p)\n return world\n\n def reset_world(self, world):\n # random properties for agents\n for i, agent in enumerate(world.agents):\n agent.color = np.array([0.35, 0.35, 0.85])\n # agent.color = np.zeros(3)\n # agent.color[i%3] = 0.5\n # random properties for landmarks\n for i, landmark in enumerate(world.landmarks):\n landmark.color = np.array([0.25, 0.25, 0.25])\n # landmark.color = np.zeros(3)\n # landmark.color[i%3] = 1.\n # set random initial states\n for agent in world.agents:\n agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)\n agent.state.p_vel = np.zeros(world.dim_p)\n agent.state.c = np.zeros(world.dim_c)\n for i, landmark in enumerate(world.landmarks):\n landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n\n def benchmark_data(self, agent, world):\n rew = 0\n collisions = 0\n occupied_landmarks = 0\n min_dists = 0\n for l in world.landmarks:\n dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]\n min_dists += min(dists)\n rew -= min(dists)\n if min(dists) < 0.1:\n occupied_landmarks += 1\n if agent.collide:\n for a in world.agents:\n if self.is_collision(a, agent):\n rew -= 1\n collisions += 1\n return (rew, collisions, min_dists, occupied_landmarks)\n\n\n def is_collision(self, agent1, agent2):\n delta_pos = agent1.state.p_pos - agent2.state.p_pos\n dist = np.sqrt(np.sum(np.square(delta_pos)))\n dist_min = agent1.size + agent2.size\n return True if dist < dist_min else False\n\n def reward(self, world):\n # Agents are rewarded based on minimum agent distance to each landmark, penalized for collisions\n rew = 0\n for l in world.landmarks:\n dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]\n rew -= min(dists)/len(world.landmarks)\n rew = rew\n \n for agent in world.agents:\n if agent.colliding:\n rew -= 1/len(world.agents)\n return [rew]*len(world.agents)\n","sub_path":"tests/ParticleGNN/particle-graph-envs/multiagent/scenarios/simple_spread.py","file_name":"simple_spread.py","file_ext":"py","file_size_in_byte":4331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"388207745","text":"# dataset settings\n\n_base_ = [\n '../_base_/models/faster_rcnn_r50_fpn.py',\n '../_base_/default_runtime.py',\n # '../_base_/schedules/schedule_1x.py'\n]\n# optimizer\noptimizer = dict(type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=None)\n# learning policy\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=500,\n warmup_ratio=0.001,\n step=[8, 11])\ntotal_epochs = 12\ndataset_type = 'GHDDataset'\ndata_root = '/mnt/d/Dataset/ghd'\ndata_root = '/data1/hangli/gwd/data'\n# data_root = '/Users/steer/Documents/dataset/global-wheat-detection'\ntr_img_prefix = 'train'\nclasses = None\nimg_norm_cfg = dict(\n mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\nmodel = dict(\n type='FasterRCNN',\n pretrained='torchvision://resnet50',\n backbone=dict(\n type='ResNet',\n depth=50,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n norm_cfg=dict(type='BN', requires_grad=True),\n norm_eval=True,\n style='pytorch'),\n neck=dict(\n type='FPN',\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n num_outs=5),\n rpn_head=dict(\n type='RPNHead',\n in_channels=256,\n feat_channels=256,\n anchor_generator=dict(\n type='AnchorGenerator',\n scales=[8],\n ratios=[0.5, 1.0, 2.0],\n strides=[4, 8, 16, 32, 64]),\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[.0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0]),\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n roi_head=dict(\n type='StandardRoIHead',\n bbox_roi_extractor=dict(\n type='SingleRoIExtractor',\n roi_layer=dict(type='RoIAlign', out_size=5, sample_num=0),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n bbox_head=dict(\n type='Shared2FCBBoxHead',\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=1,\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0., 0., 0., 0.],\n target_stds=[0.1, 0.1, 0.2, 0.2]),\n reg_class_agnostic=False,\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n loss_bbox=dict(type='L1Loss', loss_weight=1.0))))\ndata = dict(\n samples_per_gpu=8,\n workers_per_gpu=2,\n train=dict(\n type=dataset_type,\n classes=classes,\n pipeline=train_pipeline,\n data_root = data_root,\n img_prefix = tr_img_prefix,\n ann_file='fold0_train.pkl',\n ),\n val=dict(\n type=dataset_type,\n data_root=data_root,\n img_prefix = tr_img_prefix,\n classes=classes,\n pipeline=test_pipeline,\n ann_file='fold0_val.pkl',\n ),\n test=dict(\n type=dataset_type,\n classes=classes,\n data_root=data_root,\n img_prefix = tr_img_prefix,\n pipeline=test_pipeline,\n ann_file='fold0_val.pkl',\n ))\n\nfp16 = dict(loss_scale=512.)","sub_path":"configs/kaggle_gwd/faster_rcnn_r50_fpn.py","file_name":"faster_rcnn_r50_fpn.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"555089745","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 30 23:39:56 2018\r\n\r\n@author: rdas3\r\n\"\"\"\r\n\r\nimport operator\r\n\r\n\r\ndef total_Users():\r\n INPUT_FILE = input(\"Enter the path of the input file located: \")\r\n INPUT_FILE_PATH = INPUT_FILE+ '.txt'\r\n x = input(\"Enter a number: \")\r\n n = int(x)\r\n OUTFILE = r'C:\\Users\\rdas3\\Desktop\\Docs'\r\n OUTPUT_FILE_PATH = OUTFILE + '.txt'\r\n with open (INPUT_FILE_PATH, encoding = \"latin-1\") as my_File:\r\n tweet=my_File.readlines()\r\n L = {}\r\n for dat in tweet:\r\n \r\n fileTemp1 = dat.split()\r\n if fileTemp1[0] in L:\r\n L[fileTemp1[0]] +=1\r\n else:\r\n L[fileTemp1[0]] = 1\r\n L = sorted(L.items(), key = operator.itemgetter(1), reverse = True)\r\n \r\n output_File = open(OUTPUT_FILE_PATH, 'w', encoding = 'utf-8')\r\n\t \r\n output_File.write(\"The top\"+ x +\"users who have tweeted the most in the timeframe: \\n\")\r\n for i in range (0,n):\r\n output_File.write(\"User Name \" + L[i][0] + \"\\n\\n\")\r\n \r\n output_File.close\r\ntotal_Users()","sub_path":"TotalUsers.py","file_name":"TotalUsers.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"594899371","text":"import pymysql\r\n\r\nmydb = pymysql.connect(\r\n host=\"localhost\",\r\n user=\"root\",\r\n passwd=\"rootpassword\",\r\n database=\"test\"\r\n)\r\ncurs = mydb.cursor()\r\n\r\ncurs.execute(\"SELECT COUNT(*) FROM iplist\")\r\nnumber_of_rows = curs.fetchone()\r\nnumber_of_rows = number_of_rows[0]\r\n\r\n\r\nfor i in range(1, number_of_rows):\r\n if i % 100 == 0:\r\n print(\"\\nLine: %d\" % (i+1))\r\n\r\n curs.execute(\"SELECT * FROM iplist ORDER BY `idx` LIMIT %s, %s\" %(i-1, i)) # i번째 줄 출력\r\n result = curs.fetchone()\r\n\r\n country_code = result[3]\r\n count = result[4]\r\n\r\n curs.execute(\"SELECT COUNT(*) FROM rank WHERE `country_code`='%s'\" % country_code)\r\n result = curs.fetchone()\r\n result = result[0]\r\n if result == 0:\r\n curs.execute(\"INSERT INTO rank (`country_code`, `count`, `number_of_ip`) VALUES('%s', '%s', '1')\" %(country_code, count))\r\n curs.fetchall()\r\n else:\r\n curs.execute(\"SELECT * FROM rank WHERE `country_code`='%s'\" % country_code)\r\n result = curs.fetchone()\r\n count_db = result[2]\r\n number_of_ip = int(result[3]) + 1\r\n # print(\"number_of_ip: %s\" % number_of_ip)\r\n\r\n curs.execute(\"UPDATE rank SET `count`='%s', `number_of_ip`='%s' WHERE `country_code`='%s'\" %(count+count_db, number_of_ip, country_code))\r\n curs.fetchall()\r\n\r\n\r\nmydb.commit()\r\n","sub_path":"server_auth_log_analysis/rank.py","file_name":"rank.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"265094097","text":"# renames subtitles and files with parent folder name to sync\n\nimport os\nimport zipfile\n\n# renames files to match parent folder name\n\ndef sync_It(folder , folder_name) :\n\n for path , subdirs , files in os.walk(folder) :\n\n for file in files :\n\n file_name , extension = os.path.splitext(file)\n\n old_file_name = folder + \"/\" + file_name + extension\n\n new_file_name = folder + \"/\" + folder_name + extension\n\n os.rename(old_file_name , new_file_name)\n\n return\n\n# in case of series first the series folder is entered..then each folder inside the series corresponding to each movie in the series is sent to sync_It function\n\ndef work_for_series(folder) :\n\n for path , subdirs , files in os.walk(folder) :\n\n subdirs.sort()\n\n for element in subdirs :\n\n for path2 , subdirs2 , files2 in os.walk( folder +\"/\"+element ):\n\n for zipfile in files2 :\n\n file_name , extension = os.path.splitext( zipfile)\n\n if str(extension).endswith(\"zip\") :\n\n unzip_it( zipfile , folder +\"/\"+element )\n\n keep_only_video_and_subtitle_files(folder +\"/\"+element)\n\n sync_It( folder+\"/\"+element , element)\n\n break\n\n return\n\n# the name says it all....if it finds a zip file then it extracts the files inside the folder to the parent folder of the zip file\n\ndef unzip_it( zip_file , parent_folder ) :\n\n with zipfile.ZipFile(parent_folder+\"/\"+zip_file,\"r\") as zip_ref :\n\n zip_ref.extractall(parent_folder)\n\n zip_ref.close\n\n return\n\n# it keeps only those files which are videos and subtitle files...all other files are deleted such as zip , txt, etc... more video formats are yet to be added\n\ndef keep_only_video_and_subtitle_files(folder_path) :\n\n for paths , subdirs , files in os.walk(folder_path) :\n\n for not_subtitle_files in files :\n\n if not str(not_subtitle_files).endswith( (\"mp4\" , \"avi\" , \"mkv\" ,\"dat\" , \"vob\" ,\"srt\") ) :\n\n os.remove(folder_path+\"/\"+not_subtitle_files)\n\n\n\n\n\n\n\n# the actual code starts from here\n\n# \"/media/anirudh/Windows/Users/Vijay/Downloads/Movies\" ,\n\ndef main():\n\n folder_path = [os.environ['HOME'] + \"/Desktop/Movies/\"]\n\n for i in folder_path:\n\n for path , subdirs , files in os.walk(i):\n\n subdirs.sort()\n\n for element in subdirs :\n\n element_path = os.path.join(i,element)\n\n if \"Series\" in element :\n\n work_for_series(element_path)\n\n else :\n\n for path2 , subdirs2 , files2 in os.walk( element_path ):\n\n if files2 :\n\n for zip_file in files2 :\n\n if str(zip_file).endswith(\"zip\") :\n\n unzip_it( zip_file , element_path )\n\n keep_only_video_and_subtitle_files(element_path)\n\n sync_It(element_path , element)\n\n\n break\n","sub_path":"Sync_Subtitles_Of_Movies.py","file_name":"Sync_Subtitles_Of_Movies.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"394774599","text":"#!/usr/bin/env python3\n__author__ = 'Niklas Kroeger'\n__email__ = \"niklas.kroeger@imr.uni-hannover.de\"\n__status__ = \"Development\"\n\nimport logging\nimport pkgutil\ntry:\n from itertools import zip_longest\nexcept ImportError:\n # python2 uses a different name\n from itertools import izip_longest as zip_longest\n\nimport pyCameras\n\n\ndef dynamic_import_class(name):\n \"\"\"\n Dynamically import a class defined by a string\n\n Parameters\n ----------\n name : str\n String describing the class that should be imported like a normal\n python import.\n\n Returns\n -------\n class : Object\n Class that is imported by the given 'name'\n\n Notes\n -----\n Taken from\n https://stackoverflow.com/questions/547829/how-to-dynamically-load-a-python-class\n\n Examples\n --------\n dynamic_import_class(pyCameras.cameraUSB.CameraUSB) will return CameraUSB\n \"\"\"\n # split import path into package and desired class, import package, and\n # return class from package\n mod, cls = name.rsplit('.', 1)\n mod = __import__(mod, fromlist=[cls])\n return getattr(mod, cls)\n\n\ndef listCameraImplementations():\n \"\"\"\n Find all classes with the name `Camera` in the pyCameras project\n\n Returns\n -------\n classes : list\n List of all class implementations with the name `Camera`\n \"\"\"\n package = pyCameras\n classes = []\n for importer, modname, ispkg in pkgutil.walk_packages(\n path=package.__path__,\n prefix=package.__name__ + '.',\n onerror=lambda x: None):\n try:\n classes.append(dynamic_import_class(modname + '.Camera'))\n except ImportError as e:\n logging.warning('Failed to import {modname}: {e}'\n ''.format(modname=modname,\n e=e))\n except AttributeError:\n # there is no class named Camera in that module\n pass\n return classes\n\n\ndef getAllDevices():\n \"\"\"\n Find all available camera devices that are reachable right now\n\n Returns\n -------\n devices : dict\n Dict of all available camera devices that are reachable at the moment.\n The devices are separated by their respective class implementation.\n The implementation.__module__ string is the dict key under which a list\n of the respective devices is returned.\n \"\"\"\n return {implementation.__module__: implementation.listDevices() for\n implementation in listCameraImplementations()}\n\n\ndef grouper(iterable, n, fillvalue=None):\n \"\"\"\n Collect data into fixed-length chunks or blocks\n\n example: grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\"\n\n :param iterable: some iterable from which the groups should be returned\n :param n: number of elements in the groups\n :param fillvalue: object that should be inserted if the iterable can't be\n split up into a full number of groups\n :return: iterable of iterable groups with length n\n \"\"\"\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\n\nclass SettingsHandler(object):\n \"\"\"\n Helper class to generate a simple settings interface for classes.\n\n This class implements two functions that can be used to register and call\n functions that change some settings values. To register a new setting the\n registerFeature function should be used. To change the value of a setting\n the setFeature function should be called. For details on the use of these\n functions see their corresponding docstring.\n \"\"\"\n def __init__(self):\n super(SettingsHandler, self).__init__()\n\n # only add the logger if no other parent class took care of it yet\n if not hasattr(self, 'logger'):\n self.logger = logging.getLogger(__name__)\n\n self.features = {}\n\n def listFeatures(self):\n \"\"\"\n Helper function to return the settings dict\n \"\"\"\n return list(self.features.keys())\n\n def registerFeature(self, key, callback):\n \"\"\"\n Register a setFeature function by defining the corresponding key and\n callback function\n\n Parameters\n ----------\n key : str\n Key describing the feature that should be registered\n\n callback : function\n Function that should be called to set the corresponding feature\n\n Notes\n -----\n To prevent typos in capitalization of keys all feature registrations\n are done with key.lower(). This is already incorporated in the\n settingsHandler.setFeature() by searching the self.features dict for\n key.lower().\n \"\"\"\n self.features[key.lower()] = callback\n\n def setFeature(self, *args, **kwargs):\n \"\"\"\n Update a setting (described by 'key') to a new value\n\n This function expects features in the form of 'key' - 'value'. The key\n describes what feature should be changed and the value parameter is\n passed to the corresponding function implementation. The key and its\n corresponding function have to be registered in the self.features\n dictionary. To do this use self.registerFeature.\n\n Several different ways of passing 'key' - 'value' pairs are allowed.\n\n For the simplest usecase of updating one setting simply pass 'key' and\n 'value' in the correct order or as keyword arguments.\n If mutliple settings should be updated with a single call a number of\n 'key' - 'value' pairs can be passed as list or tuple in the order\n [key1, value1, key2, value2, ... , keyN, valueN].\n Alternatively a dict can be passed where the keys of the dict math the\n feature keys and the associated value corresponds to the desired value\n e.g.\n {'resolutionX': 640, 'resolutionY': 480}\n\n If only a single string is passed this function assumes this is the\n path to a configuration file that should be parsed and loaded. NOTE:\n NOT YET IMPLEMENTED!\n\n Parameters\n ----------\n key : str\n key describing the function as registered via self.registerFeature\n\n value : object\n Parameters shat should be passed on to the corresponding function\n implementation\n\n Notes\n -----\n To prevent capitalization typos all feature registrations are done with\n key.lower() (see settingsHandler.registerFeature()). This means that\n feature lookups are also done with key.lower(). This has to be\n considered if this function is overloaded.\n \"\"\"\n if len(args) == 1:\n if isinstance(args[0], dict):\n settings = args[0]\n for key in settings.keys():\n self.setFeature(key=key, value=settings[key])\n elif isinstance(args[0], (list, tuple)):\n # assume the settings are ordered as ['key', value]\n for (key, value) in grouper(args[0], 2):\n self.setFeature(key=key, value=value)\n elif isinstance(args[0], str):\n # This might still be a single key with the value given as a\n # kwarg. So check if a single kwarg with key 'value' exists\n if len(kwargs) >= 1 and 'value' in kwargs.keys():\n self.setFeature(key=args[0], value=kwargs['value'])\n else:\n # assume this is the path to a settings file we should\n # parse\n # TODO: implement file parsing\n pass\n elif len(args) >= 2:\n # assume the arguments were passed in order ['key', 'value']\n # there may be multiple key value pairs so try to parse all of them\n for (key, value) in grouper(args, 2):\n self.setFeature(key=key, value=value)\n\n if all(k in kwargs.keys() for k in ('key', 'value')):\n try:\n self.logger.debug(\"Setting key: {key} with value: {value}\"\n \"\".format(key=kwargs['key'],\n value=kwargs['value']))\n self.features[kwargs['key'].lower()](kwargs['value'])\n except KeyError:\n raise NotImplementedError('The desired key \\'{key}\\' has no '\n 'registered implementation. Desired '\n 'value: \\'{value}\\''\n ''.format(key=kwargs['key'],\n value=kwargs['value']))\n except Exception as e:\n self.logger.exception('Failed to set \\'{key}\\' to '\n '\\'{value}\\', {e}'\n ''.format(key=kwargs['key'],\n value=kwargs['value'],\n e=e))\n\n def getFeatures(self):\n \"\"\"\n Returns the dictionary of registered setFunction implementations\n \"\"\"\n return self.features\n\n def getFeature(self, key):\n \"\"\"\n Get the current value for the feature defined by key\n\n Parameters\n ----------\n key : str\n String defining the feature\n\n Returns\n -------\n value : str, int, float, object\n Value of the desired feature, '' if the value could\n not be read\n\n Notes\n -----\n This function only works as intended if the registered callback\n function for the given key accepts a call with no arguments and returns\n the correct setting value. An example of this can be found at the\n bottom of this file.\n \"\"\"\n try:\n value = self.features[key.lower()]()\n except Exception:\n value = ''\n return value\n\n\nif __name__ == '__main__':\n ########################################################\n # example for using the SettingsHandler class:\n ########################################################\n\n # define the class that should inherit from the settingsHandler\n class SomeClass(SettingsHandler):\n def __init__(self):\n \"\"\"\n If you want to inherit from another class it is possible to give\n multiple parent classes: class SomeClass(ParentA, SettingsHandler)\n \"\"\"\n # make sure all parent classes are properly initialized\n super(SomeClass, self).__init__()\n\n # Now register a callback function for some setting\n self.registerFeature('someSetting', self.setSomething)\n\n # start value for our variable\n self.someSetting = 0\n\n def setSomething(self, value=None):\n # allow function calls with no arguments and return current value\n # to enable use of self.getFeature.\n if value is not None:\n self.someSetting = value\n return self.someSetting\n\n # Using the registered callback function\n test_instance = SomeClass()\n\n # print value before we change it\n print(test_instance.someSetting)\n\n test_instance.setFeature('someSetting', 10)\n\n # print value after changing it\n print(test_instance.getFeature('someSetting'))\n\n ########################################################\n # example for getting all available cameras\n ########################################################\n print(getAllDevices())\n","sub_path":"pyCameras/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"556080776","text":"#!/usr/bin/python\n\nimport mosquitto\nimport time\nimport os\n\ndef on_connect(mosq, rc):\n if rc == 0:\n print(\"Connected OK\")\n\ndef on_disconnect(mosq, rc):\n print(\"Disconnected.\")\n\ndef on_message(mosq, msg):\n print(\"Got message: \"+msg.payload+\" from topic \"+msg.topic)\n\ndef on_subscribe(mosq, mid, rc):\n print(\"Subscribed ok\")\n\nclient = mosquitto.Mosquitto(\"client-\"+os.uname()[1]+\"-\"+str(os.getpid()))\nprint(\"Created client\")\nclient.on_connect = on_connect\nclient.on_disconnect = on_disconnect\nclient.on_message = on_message\nprint(\"Defined callbacks\")\n\nclient.connect(\"cindy-pc.home\")\nclient.subscribe(\"test/topic\", 1)\n\nwhile client.loop()==0:\n time.sleep(0.1)\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"221278546","text":"from collections import defaultdict\n\nimport pytest\nimport responses\n\nfrom sentry_forwarder import app\n\n\n@responses.activate\ndef test_forwards_requests(client):\n responses.add(responses.POST, 'https://sentry.io/foo/bar/')\n app.config['SAMPLING_RATE'] = 1\n response = client.post('/foo/bar', data=b'foodata', headers={\n 'X-Sentry-Auth': 'test config',\n })\n assert response.status_code == 200\n assert responses.calls[0].request.headers['X-Sentry-Auth'] == 'test config'\n\n\n@responses.activate\ndef test_samples(client):\n responses.add(responses.POST, 'https://sentry.io/foo/bar/')\n\n app.config['SAMPLING_RATE'] = 10\n response_codes = defaultdict(int)\n trials = 200\n for _ in range(trials):\n response = client.post('/foo/bar', data=b'foo')\n response_codes[response.status_code] += 1\n\n assert len(response_codes) == 2, 'should only have 200 and 202 responses'\n # Parameters chosen for the test to have a success rate of 99.99%\n assert 2 <= response_codes[200] <= 38\n assert trials - response_codes[202] == response_codes[200]\n\n\n@responses.activate\ndef test_user_agent_specific_sampling_rate(client):\n ua = 'my-ua/1.0'\n app.config['USER_AGENT_SAMPLING_RATES'] = {\n ua: 2,\n }\n responses.add(responses.POST, 'https://sentry.io/foo/bar/')\n\n app.config['SAMPLING_RATE'] = 10\n response_codes = defaultdict(int)\n trials = 200\n for _ in range(trials):\n response = client.post('/foo/bar', data=b'foo', headers={\n 'User-agent': ua,\n })\n response_codes[response.status_code] += 1\n\n assert len(response_codes) == 2, 'should only have 200 and 202 responses'\n # Parameters chosen for the test to have a success rate of 99.99%\n assert 72 <= response_codes[200] <= 128\n assert trials - response_codes[202] == response_codes[200]\n\n\n@responses.activate\ndef test_reports_error(client):\n app.config['SAMPLING_RATE'] = 1\n response = client.post('/foo/bar')\n assert response.status_code == 503\n\n\n@responses.activate\ndef test_compressed_request(client):\n app.config['SAMPLING_RATE'] = 1\n responses.add(responses.POST, 'https://sentry.io/foo/bar/')\n response = client.post('/foo/bar', headers={\n 'Content-Encoding': 'gzip',\n }, data=b'\\x00')\n assert responses.calls[0].request.headers['Content-Encoding'] == 'gzip'\n\n\n\n@responses.activate\ndef test_query_parameters(client):\n app.config['SAMPLING_RATE'] = 1\n responses.add(responses.POST, 'https://sentry.io/foo/bar/')\n response = client.post('/foo/bar?auth=foo')\n assert responses.calls[0].request.url == 'https://sentry.io/foo/bar/?auth=foo'\n\n\ndef test_root(client):\n assert client.get('/').status_code == 200\n\n\n@pytest.fixture\ndef client():\n return app.test_client()\n","sub_path":"salt/sentry-forwarder/test_forwarder.py","file_name":"test_forwarder.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"381450125","text":"import torch\nimport torch.nn as nn\nfrom Net import Network\n\nnetwork = Network()\n# 打印网络结构与每个层的权重矩阵的size,网络的toString从nn.Module中继承\nprint(network)\nfor name, param in network.named_parameters():\n print(name, '\\t\\t', param.size())\n# 测试将网络中的权重矩阵用自定义的权重矩阵替代(为简化将network中的fc1的大小改为4,3)\nfc1 = network.fc1\nfc1_in_features = torch.tensor([1, 2, 3, 4], dtype=torch.float32)\nfc1_weight_matrix = torch.tensor([\n [1, 2, 3, 4],\n [2, 3, 4, 5],\n [3, 4, 5, 6]\n], dtype=torch.float32)\nprint(fc1_weight_matrix.matmul(fc1_in_features))\n# torch随机初始化权重矩阵时的输出\n# 能够给这样调用fc1()作为一个方法的原因是Module实现了__call__方法\nprint(fc1(fc1_in_features))\n# 我们制定初始权重矩阵并输出,因为有bias所以不是标准的30,40,50,若定义层时将bias设为false,则为标准输出30,40,50\nnetwork.fc1.weight = nn.Parameter(fc1_weight_matrix)\nprint(fc1(fc1_in_features))\n","sub_path":"CNN/weight_test.py","file_name":"weight_test.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"1051313","text":"class MaxHeap:\n\n def __init__(self, heap):\n self.size = len(heap)\n self.heap = ['anchor'] + heap\n\n @property\n def heap(self):\n return self.__heap\n\n @heap.setter\n def heap(self, heap):\n if isinstance(heap, list):\n self.__heap = heap\n else:\n raise Exception('Data type error!')\n\n def add(self, data):\n self.size += 1\n self.heap += [data]\n if self.size > 0:\n self._float_up(data, self.size)\n\n def _float_up(self, data, indx):\n pindx = indx // 2\n while pindx > 0:\n if self.heap[indx] > self.heap[pindx]:\n self._swap(indx, pindx)\n self._float_up(data, pindx)\n return True\n\n def _swap(self, indx_a, indx_b):\n self.heap[indx_a], self.heap[indx_b] = self.heap[indx_b], self.heap[indx_a]\n\n def __repr__(self):\n return str(self.heap[1:])\n\n\nif __name__ == '__main__':\n\n heap_input = [25, 11, 16, 8, 7, 5, 7, 3, 2, 4, 5]\n print()\n print('Initial heap: ', heap_input)\n alpha = MaxHeap(heap_input)\n alpha.add(13)\n print('Adding the number 13 to the heap gives us: ', alpha)\n print()\n\n","sub_path":"Data Structures/max_heap.py","file_name":"max_heap.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"526763405","text":"import pick\r\nfrom pick import pick\r\nimport pandas as pd\r\nimport numpy as np\r\nimport pickle\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\n\r\n\r\ndf=pd.DataFrame()\r\nprint(\"\\nEnter the age of the patient\")\r\ndf.loc[0,'age']=input()\r\n\r\ntitle=\"Was the admit an emergency?\"\r\noptions=['Yes','No']\r\ndf.loc[0,'emergency']= pick(options, title, min_selection_count=1)[0]\r\n\r\ntitle=\"Is the patients insurance provider either MEDICARE or MEDICAID?\"\r\noptions=['Yes','No']\r\ndf.loc[0,'insurance']=pick(options, title, min_selection_count=1)[0]\r\n\r\nprint(\"\\nHow long was the patient in the hostpital?\")\r\ndf.loc[0,'timeofstay']=input()\r\n\r\nprint(\"\\nHow many times has the patient been admitted in an emergency ward in the past one year?\")\r\ndf.loc[0,'total_em_6']=input()\r\n\r\ntitle=\"Does the patient suffer from any comorbidity along with his/her primary diagnosis?\"\r\noptions=['Yes','No']\r\ndf.loc[0,'comorbid']=pick(options, title, min_selection_count=1)[0]\r\n\r\ntitle=\"Is the patient's ailment chronic?\"\r\noptions=['Yes','No']\r\ndf.loc[0,'CHRONIC']=pick(options, title, min_selection_count=1)[0]\r\n\r\n\r\ndict_of_CCI={'Diabetes (uncomplicated)':1,'Diabetes (End Organ Damage)':2,'Liver Disease (mild)':1,'Liver Disease (moderate to severe)':3,'Malignancy (Localized/Leukemia/Lymphoma)':2,'Malignancy (Metastatic)':6,\r\n 'AIDS':6,'Chronic Kidney Disease':2,'Congestive Heart Failure':1,'Myocardial Infarction':1,\r\n 'COPD':1,'Peripheral Vascular Disease':1,'Transient Ischemic Attack':1,'Dementia':1\r\n ,'Hemiplegia':2,'Connective Tissue Disease':1,'Peptic Ulcer Disease':1}\r\n\r\n\r\ntitle=\"Charlson's Comorbidity Index. Please select those that apply by selecting with space bar and pressing Enter\"\r\noptions=['Diabetes (uncomplicated)','Diabetes (End Organ Damage)','Liver Disease (mild)','Liver Disease (moderate to severe)','Malignancy (Localized/Leukemia/Lymphoma)','Malignancy (Metastatic)',\r\n 'AIDS','Chronic Kidney Disease','Congestive Heart Failure','Myocardial Infarction',\r\n 'COPD','Peripheral Vascular Disease','Transient Ischemic Attack','Dementia'\r\n ,'Hemiplegia','Connective Tissue Disease','Peptic Ulcer Disease']\r\n\r\nCCI=pick(options, title, multi_select=True, min_selection_count=0)\r\n\r\nicd9_code=0\r\n\r\nfor x in CCI:\r\n icd9_code=icd9_code+dict_of_CCI[x[0]]\r\n\r\ndf.loc[0,'icd9_code']=icd9_code\r\n\r\n\r\n\r\ndf=df[['emergency',\r\n 'insurance',\r\n 'timeofstay',\r\n 'age',\r\n 'total_em_6',\r\n 'comorbid',\r\n 'icd9_code',\r\n 'CHRONIC']]\r\n\r\ndef convert_to_binary(x):\r\n if x=='Yes':\r\n return 1\r\n elif x=='No':\r\n return 0\r\n else:\r\n return x\r\n\r\ndf=df.applymap(convert_to_binary)\r\ndf=df.astype(float)\r\n\r\n\r\ndf.loc[0,'age']=df.loc[0,'age']/89\r\ndf.loc[0,'icd9_code']=(df.loc[0,'icd9_code'])/56\r\ndf.loc[0,'timeofstay']=(df.loc[0,'timeofstay'])/294\r\ndf.loc[0,'total_em_6']=(df.loc[0,'total_em_6'])/23\r\n\r\n\r\n\r\n\r\nprint(df)\r\nLogReg = pickle.load(open('interface.sav', 'rb'))\r\n\r\n\r\nl=list(LogReg.predict_proba(df.loc[[0]]).ravel().round(2))\r\n\r\n\r\n\r\nsns.set()\r\nplt.figure(figsize=(15,10))\r\nax=sns.barplot(y=l,x=['No Readmission','Readmission Within One Month'])\r\nplt.ylim(0,1)\r\nfor p in ax.patches:\r\n height = p.get_height()\r\n ax.text(p.get_x()+p.get_width()/2.,\r\n p.get_height()+0.02,\r\n '{:1.1f}%'.format(height*100),\r\n ha=\"center\",color='black',fontsize=30)\r\n\r\n\r\ndf=df.astype(float)\r\n\r\n\r\nprint(df)\r\n\r\na=[0.217, 0.251, 3.367, 0.164, 11.104, 0.325, 1.049, 0.074]\r\nb=list(df.loc[[0]].values.ravel())\r\n\r\nprint(a)\r\nprint('\\n',b)\r\n\r\nfactors=[0,0,0,0,0,0,0,0]\r\nfor x in range(0,8):\r\n factors[x]=(a[x])*(b[x])\r\n\r\ns = sum(factors); norm = [float(i)/s for i in factors]\r\n\r\nsns.set()\r\nplt.figure(figsize=(15,10))\r\nax=sns.barplot(y=norm,x=['Emergency','Insurance','Time Of Stay','Age','Past Emergencies','Comorbidites','CCI','Chronic'])\r\nplt.ylim(0,1)\r\nfor p in ax.patches:\r\n height = p.get_height()\r\n ax.text(p.get_x()+p.get_width()/2.,\r\n p.get_height()+0.02,\r\n '{:1.1f}%'.format(height*100),\r\n ha=\"center\",color='black',fontsize=30)\r\n\r\nplt.show()","sub_path":"Hospital Readmission/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"88433655","text":"#!/usr/bin/env python\n#coding:utf-8\n# Author: mozman\n# Purpose: convex hull algorithm\n# Created: 28.02.2010\n# License: MIT License\n\n__all__ = ['ConvexHull']\n\nclass ConvexHull(object):\n def __init__(self, points):\n self._points = ConvexHull._construct(points)\n\n def __iter__(self):\n return iter(self._points)\n\n def values(self):\n return self._points[:] # list shallow copy\n\n @staticmethod\n def _construct(points):\n def convex_hull(hull):\n while len(hull) > 2:\n start_point, check_point, destination_point = hull[-3:] # the last three points\n if not left_of_line(check_point, start_point, destination_point): # curve not turns right\n del hull[-2] # remove the penultimate point\n else:\n break\n return hull\n\n points = sorted(set(points)) #remove duplicate points\n\n if len(points) < 3:\n raise ValueError(\"ConvexHull(): Less than 3 unique points given!\")\n\n upper_hull = points[:2] # first two points\n for next_point in points[2:]:\n upper_hull.append(next_point)\n upper_hull = convex_hull(upper_hull)\n lower_hull = [points[-1], points[-2]] # last two points\n\n for next_point in reversed(points[:-2]):\n lower_hull.append(next_point)\n lower_hull = convex_hull(lower_hull)\n upper_hull.extend(lower_hull[1:-1])\n return upper_hull\n\ndef left_of_line(point, p1, p2):\n \"\"\" True if the point self is left of the line p1 -> p2\n \"\"\"\n # check if a and b are on the same vertical line\n if p1[0] == p2[0]:\n # compute # on which site of the line self should be\n should_be_left = p1[1] < p2[1]\n if should_be_left:\n return point[0] < p1[0]\n else:\n return point[0] > p1[0]\n else:\n # get pitch of line\n pitch = (p2[1] - p1[1]) / (p2[0] - p1[0])\n\n # get y-value at c's x-position\n y = pitch * (point[0] - p1[0]) + p1[1]\n\n # compute if point should be above or below the line\n should_be_above = p1[0] < p2[0]\n if should_be_above :\n return point[1] > y\n else:\n return point[1] < y\n","sub_path":"geoalg/convexhull.py","file_name":"convexhull.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"124511490","text":"#!/usr/bin/python\n\nimport os\nimport nltk\nfrom dehyphenate import join_lines\nfrom dehyphenate import join_pages\n\n\ndef main():\n inpath = '../my_dataset/war_and_peace.txt'\n outpath = '../my_dataset/war_and_peace.preprocessed'\n tempfile = '../my_dataset/war_and_peace.temp'\n\n # Getting rid of OCR line breaks\n with open(tempfile, mode='w') as tempout:\n join_pages([inpath], tempout)\n\n # Tokenizing with one sentence per line\n with open(tempfile, mode='r',) as tempin, open(outpath, mode='w') as outfile:\n my_sents = (nltk.sent_tokenize(line) for line in tempin)\n for sentslist in my_sents:\n if sentslist == ['\\n'] or sentslist == []:\n outfile.write('\\n')\n else:\n for sent in sentslist:\n tokenized = nltk.word_tokenize(sent)\n outfile.write(' '.join(tokenized)+'\\n')\n os.remove(tempfile)\n\n\nif __name__ == '__main__':\n main()","sub_path":"my_preprocessing/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"580968067","text":"from py2neo import Graph as NeoGraph, Node\n\n\nclass Graph(object):\n\n def __init__(self, neo4j_uri):\n self.graph = NeoGraph(neo4j_uri)\n\n def find_node(self, label, node_id):\n args = dict(property_key=\"node_id\", property_value=node_id)\n return self.graph.find_one(label, **args)\n\n def create_user(self, args):\n node = self.find_node(\"User\", args[\"username\"])\n if not node:\n properties = dict(\n node_id=args[\"username\"],\n name=args[\"name\"],\n city=args[\"city\"]\n )\n node = Node(\"User\", **properties)\n self.graph.create(node)\n return node, True\n return node, False\n\n def delete_user(self, user):\n node = self.find_node(\"User\", user)\n if node:\n self.graph.delete(node) \n return True\n return False\n","sub_path":"shared/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"32594560","text":"\nimport os, routines, json, uuid, datetime, threading\nfrom flask import render_template, jsonify, request, redirect, abort, make_response, send_from_directory, flash, Response, url_for, copy_current_request_context\nfrom Bio import Phylo\nfrom app import app\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return render_template(\"front.html\")\n\n@app.route('/help')\ndef help():\n return render_template(\"help.html\")\n\n@app.route('/about')\ndef about():\n return render_template(\"about.html\")\n\n@app.route('/results')\ndef results():\n lastresult = routines.getlastresults()\n return render_template(\"results.html\",lastresults=lastresult)\n\n@app.route('/download')\ndef amlstdownl():\n return render_template(\"download.html\")\n\n@app.route('/serverstatus')\ndef serverstatus():\n status = routines.getserverstats()\n return jsonify(status)\n\n@app.route('/results/getreport', methods=['POST'])\ndef getreport():\n jobid = request.form.get(\"jobid\")\n return redirect(\"/results/\"+jobid+\"/loading\")\n\n@app.route('/results/')\n@app.route('/results//')\ndef showresults(jobid):\n #return render_template(\"startjob.html\",jobid=jobid)\n return redirect(\"/results/\"+jobid+\"/loading\")\n\n@app.route('/results//', methods=['GET'])\n@app.route('/results///', methods = ['GET'])\ndef showstep(jobid,step):\n jobinfo = routines.getjobstatus(jobid)\n paramdict = jobinfo.get(\"params\")\n errmsgs = jobinfo.get(\"errors\", False)\n #skips = paramdict.get(\"skip\",[])\n laststep = request.args.get('laststep','NONE')\n jobname = routines.findjobinfo(jobid)\n\n #Redirect if job log doesnt exist or not present in redis\n if not routines.isjob(jobid) and jobinfo.get(\"status\",\"Waiting in queue\") == \"Waiting in queue\":\n flash('Invalid Jobid')\n return redirect('/analyze')\n\n if step == \"loading\":\n return render_template(\"startjob.html\", jobid=jobid, jobname=jobname[1], laststep=laststep)\n elif step == \"step2\":\n if jobinfo[\"checkpoint\"].upper() == \"W1-STEP2\":\n return render_template(\"step2.html\",jobid=jobid,jobname=jobname[1])\n else:\n return redirect('/results/'+jobid+'/loading')\n elif step == \"step3\":\n if jobinfo[\"checkpoint\"].upper() == \"W1-STEP3\":\n return render_template(\"step3.html\",jobid=jobid,jobname=jobname[1])\n else:\n return redirect('/results/'+jobid+'/loading')\n elif step == \"report\":\n if \"-F\" in jobinfo[\"checkpoint\"].upper() and not errmsgs:\n if jobinfo[\"workflow\"] == \"1\":\n return render_template(\"report.html\",jobid=jobid,jobname=jobname[1])\n else:\n return render_template(\"report.html\",jobid=jobid, jobname=jobname[1],workflow=2)\n elif \"-F\" in jobinfo[\"checkpoint\"].upper() and errmsgs: # currently means any errors at all prevent tree from being shown; separate checkpoint for fatal errors?\n return render_template(\"report.html\",jobid=jobid,jobname=jobname[1], errmsgs = errmsgs)\n else:\n return redirect('/results/'+jobid+'/loading')\n\n\n@app.route('/results//reanalyze', methods=['GET'])\ndef reanalyze(jobid):\n if request.args.get(\"confirm\",False) and jobid != 'example':\n routines.reanalyzejob(jobid)\n jobname = routines.findjobinfo(jobid)\n return render_template(\"step2.html\", jobid=jobid,jobname=jobname[1])\n else:\n return redirect('/results/'+jobid+'/loading')\n\n@app.route('/results//tree', methods=['GET'])\ndef getTree(jobid):\n format = request.args.get('format','newick')\n resultdir = os.path.join(app.config['RESULTS_FOLDER'],jobid)\n resulttree = 'final'\n if os.path.exists(os.path.join(resultdir,resulttree+'.tree')):\n if format == 'newick':\n return send_from_directory(resultdir,resulttree+'.tree',as_attachment=True)\n # possible tree conversions?\n # elif format == 'nexml':\n # Phylo.convert(os.path.join(resultdir,resulttree+'.tree'),'newick', os.path.join(resultdir,resulttree+'_nexml.xml'),'nexml')\n # return send_from_directory(resultdir,resulttree+'_nexml.xml',as_attachment=True)\n # elif format == 'phyloxml':\n # Phylo.convert(os.path.join(resultdir,resulttree +'.tree'), 'newick', os.path.join(resultdir,resulttree +'_phyloxml.xml'), 'phyloxml')\n # return send_from_directory(resultdir, resulttree + '_phyloxml.xml', as_attachment=True)\n else:\n return \"false\"\n\n\n# wrap all the downloads except for the tree into one page?\n@app.route('/results//downloadorgs', methods=['GET'])\ndef downloadorgs(jobid):\n format = request.args.get('format','json')\n resultdir = os.path.join(app.config['RESULTS_FOLDER'],jobid)\n if format == 'json': # narrow down like the txt version?\n return send_from_directory(resultdir, 'reflist.json', as_attachment=True)\n elif format == 'txt':\n if os.path.exists(os.path.join(resultdir, 'reftext.txt')):\n return send_from_directory(resultdir,'reftext.txt',as_attachment=True)\n else:\n jsonpath = os.path.join(resultdir,'reflist.json')\n routines.jsontotsv(jsonpath,jobid) # make a more versatile version of that?\n return send_from_directory(resultdir, 'reftext.txt', as_attachment=True)\n\n@app.route('/results//downloadmash')\ndef downloadmash(jobid):\n resultdir = os.path.join(app.config['RESULTS_FOLDER'], jobid)\n return send_from_directory(resultdir, 'mash_distances.txt', as_attachment=True)\n\n@app.route('/results//downloadlists', methods=['GET'])\ndef downloadlists(jobid):\n downl = request.args.get('downl')\n resultdir = os.path.join(app.config['RESULTS_FOLDER'], jobid)\n if downl == 'mlstlist':\n if os.path.exists(os.path.join(resultdir,'mlst_genes.txt')):\n return send_from_directory(resultdir,'mlst_genes.txt', as_attachment=True)\n else:\n routines.mlsttsv(jobid)\n return send_from_directory(resultdir, 'mlst_genes.txt', as_attachment=True)\n elif downl == 'alignment':\n if os.path.exists(os.path.join(resultdir, jobid+'_alignments.zip')):\n return send_from_directory(resultdir, jobid+'_alignments.zip', as_attachment=True)\n else:\n routines.zipalignments(jobid)\n return send_from_directory(resultdir, jobid + '_alignments.zip', as_attachment=True)\n\n@app.route('/results2//refs')\ndef getrefs(jobid):\n if os.path.exists(os.path.join(app.config['RESULTS_FOLDER'],'genuslist_example2.json')):\n return send_from_directory(app.config['RESULTS_FOLDER'],'genuslist_example2.json')\n@app.route('/results2/selectgenus', methods=['POST'])\ndef selectgenus():\n jobid = request.form.get(\"jobinfo\")\n newref = request.form.get(\"genusoptions\")\n routines.updatejob(jobid,newref)\n genusdict={}\n with open(os.path.join(app.config['RESULTS_FOLDER'],'genuslist_example2.json'),'r') as genusfile:\n genusdict = json.load(genusfile)\n genusdict[\"genuslist\"] = {newref:genusdict[\"genuslist\"][newref]}\n genusdict[\"maxgenus\"] = newref\n with open(os.path.join(app.config['RESULTS_FOLDER'],'genuslist_example2.json'),'w') as fileout:\n json.dump(genusdict,fileout,indent=2)\n return json.dumps({\"status\":1,\"newmax\":newref})\n\n@app.route('/results2/refgenus')\ndef refgenus():\n if os.path.exists(os.path.join(app.config['RESULTS_FOLDER'],'acceleratedrefs.json')):\n return send_from_directory(app.config['RESULTS_FOLDER'],'acceleratedrefs.json')\n else:\n return jsonify([])\n\n@app.route('/analyze')\ndef analyze():\n return render_template(\"analyze.html\")\n\n@app.route('/analyze2')\ndef analyze2():\n return render_template(\"analyze2.html\")\n\n@app.route('/upload', methods=['POST'])\ndef upload():\n #print request.files.getlist(\"seqfile1\")\n filename = routines.getinfile()\n name = [os.path.split(x)[-1] for x in filename]\n # filename = routines.getNCBIgbk(request.form[\"ncbiacc1\"])\n return json.dumps({\"filename\": filename,\"name\": name})\n@app.route('/startjob', methods=['POST'])\ndef startjob():\n jobid = unicode(uuid.uuid4())\n jobdict = {\"id\": jobid, \"workflow\": request.form.get(\"workflow\"), \"genomes\": request.form.getlist('upfiles'),\n \"skip\":request.form.get('skip2',\"\")+\",\"+request.form.get('skip3',\"\"), \"reference\": request.form.get('genusselect','NA'),\n \"bootstr\":request.form.get('boots',0), \"filtmlst\": request.form.get('filtmlst',''),\n \"mode\":request.form.get('optradio',\"concatenated\"),\"modelfind\":request.form.get(\"modelfinder\",\"GTR\"), \"fastalign\":request.form.get(\"fastalign\",\"\")}\n\n os.mkdir(os.path.join(app.config['RESULTS_FOLDER'],jobid))\n resp = make_response(redirect('/results/' + jobid + '/loading'))\n\n #Emailer\n email = request.form.get('email', False)\n if email:\n # Run threaded so mail server resp does not block process\n @copy_current_request_context\n def sendmail(x, y, z):\n routines.sendnotifymail(x, y, z)\n\n mailer = threading.Thread(name='mail_sender', target=sendmail, args=(\"\", jobid, email))\n mailer.start()\n\n #Jobtitle\n validchars = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890 _-()\"\n jobtitle = request.form.get('jobname', False)\n jobtitle = ''.join([c for c in str(jobtitle[:30]) if c in validchars])\n if jobtitle and len(jobtitle):\n with open(os.path.join(app.config['RESULTS_FOLDER'], jobid, \"jobtitle.txt\"), \"w\") as namefile:\n namefile.write(jobtitle + \"\\n\")\n\n #Recent results\n keeplink = request.form.get('keeplink',False)\n if keeplink and keeplink.lower() != \"false\":\n lastresult = request.cookies.get('automlst.lastresult')\n if lastresult and jobid not in lastresult:\n #Limit number of results kept\n lastresult = lastresult.split(\";\")\n lastresult.append(jobid)\n lastresult = \";\".join(lastresult[-20:])\n else:\n lastresult = jobid\n resp.set_cookie('automlst.lastresult',lastresult)\n\n #Workflow redirecting\n if request.form.get(\"workflow\") == \"1\" or request.form.get(\"workflow\") == \"2\":\n automlstjob = routines.addjob(**jobdict)\n #with open(os.path.join(app.config['RESULTS_FOLDER'],'examplein.json'),'w') as uploadfile:\n # json.dump(jobdict,uploadfile)\n return resp\n else:\n flash('Invalid workflow')\n return redirect('/analyze')\n\n@app.route('/results//step2/orgs', methods=['GET'])\ndef getOrgs(jobid):\n orgstart = int(request.args.get('start',0))\n if os.path.exists(os.path.join(app.config['RESULTS_FOLDER'],jobid,'userlist.json')):\n with open(os.path.join(app.config['RESULTS_FOLDER'],jobid,'reflist.json'),'r') as reffile, open(os.path.join(app.config['RESULTS_FOLDER'],jobid,'userlist.json'),'r') as userfile:\n refdict = json.load(reffile)\n userdict = json.load(userfile)\n tempdict = [ref for ref in refdict[\"reforgs\"] if ref[\"id\"] in userdict[\"selspecies\"] and not ref in refdict[\"reforgs\"][0:(orgstart+500)]]\n refdict[\"reforgs\"] = refdict[\"reforgs\"][orgstart:(orgstart + 200)]\n refdict[\"outgroups\"] = [rec for rec in refdict[\"outgroups\"] if\n str(rec[\"phylid\"]) != \"N/A\" and str(rec[\"familyid\"]) != \"N/A\" and str(\n rec[\"orderid\"]) != \"N/A\" and str(rec[\"genusid\"]) != \"N/A\"]\n\n refdict[\"reforgs\"].extend(tempdict)\n refdict.update(userdict)\n return jsonify(refdict)\n elif os.path.exists(os.path.join(app.config['RESULTS_FOLDER'],jobid,'reflist.json')):\n with open(os.path.join(app.config['RESULTS_FOLDER'],jobid,'reflist.json'),'r') as firstref:\n refdict = json.load(firstref)\n refdict[\"reforgs\"] = refdict[\"reforgs\"][orgstart:(orgstart+200)]\n refdict[\"outgroups\"] = [rec for rec in refdict[\"outgroups\"] if str(rec[\"phylid\"]) != \"N/A\" and str(rec[\"familyid\"]) != \"N/A\" and str(rec[\"orderid\"]) != \"N/A\" and str(rec[\"genusid\"]) != \"N/A\"]\n return jsonify(refdict)\n return jsonify({\"error\":\"No List found.\"})\n\n@app.route('/results//step2/orgin', methods=['POST'])\n@app.route('/results//reanalyze/orgin', methods=['POST'])\n@app.route('/results//orgin', methods=['POST'])\ndef orgin(jobid):\n species = request.form.getlist('specieslist')\n outgroups = request.form.getlist('outgrlist')\n jobid = request.form.get('jobinfo')\n with open(os.path.join(app.config['RESULTS_FOLDER'],jobid,'userlist.json'),'w') as userfile:\n json.dump({\"selspecies\":species, \"seloutgroups\":outgroups},userfile)\n with open(os.path.join(app.config['RESULTS_FOLDER'],jobid,'automlst.log'),'a') as joblog:\n joblog.write('\\n'+str(datetime.datetime.now())+' - INFO - JOB_CHECKPOINT::w1-3 \\n'+str(datetime.datetime.now())+' - INFO - JOB_STATUS::Resuming job...\\n') # is this still right?\n routines.readdjob(jobid)\n return redirect('/results/'+jobid+'/loading?laststep=step2') # when is checkpoint set? Might get user stuck in a loop of submitting/getting redirected\n\n@app.route('/results//step2/outgroups', methods=['GET'])\ndef outgrs(jobid):\n ingroups=[]\n commongr = request.args.get('group', False)\n multigroups = request.args.get('multiple', False)\n if multigroups:\n commonid = (request.args.get('id',False)).split(',')\n ingroups = commonid\n else:\n commonid = request.args.get('id', False)\n ingroups.append(str(commonid))\n outgrlimit = request.args.get('limit',1000)\n startindex = request.args.get('start',0)\n if commongr and commonid:\n with open(os.path.join(app.config['RESULTS_FOLDER'],jobid,'reflist.json'),'r') as outrefs:\n outdict = json.load(outrefs)\n refrec = outdict['reforgs']\n newlist = [rec for rec in refrec if str(rec[commongr+\"id\"]) not in ingroups and str(rec[commongr+\"id\"]) != \"N/A\" and not rec in refrec[0:(startindex+500)]]\n return jsonify(newlist[0:500])\n return send_from_directory(app.config['RESULTS_FOLDER'],'outgroups.json')\n\n@app.route('/results//step3/genes')\ndef getgenes(jobid):\n if os.path.exists(os.path.join(app.config['RESULTS_FOLDER'],jobid,'mlstpriority.json')):\n return send_from_directory(os.path.join(app.config['RESULTS_FOLDER'],jobid),'mlstpriority.json')\n\n@app.route('/results//step3/genein', methods=['POST'])\ndef genein(jobid):\n jobid = request.form.get('jobinfo')\n genes = request.form.getlist('mlstlist')\n radioval = request.form.get('optradio')\n rmorgs = request.form.get('removeorgs','')\n with open(os.path.join(app.config['RESULTS_FOLDER'],jobid,'usergenes.json'),'w') as usergenes:\n json.dump({\"selection\":genes,\"mode\":radioval,\"delorgs\":rmorgs.split(\",\")},usergenes)\n with open(os.path.join(app.config['RESULTS_FOLDER'],jobid,'automlst.log'),'a') as joblog:\n joblog.write('\\n'+str(datetime.datetime.now())+' - INFO - JOB_STATUS::Resuming job...\\n')\n joblog.write(str(datetime.datetime.now())+' - INFO - JOB_CHECKPOINT::w1-5\\n')\n routines.readdjob(jobid)\n return redirect('/results/'+jobid+'/loading?laststep=step3')\n\n@app.route('/results//mash')\ndef showmash(jobid):\n resultpath =os.path.join(app.config['RESULTS_FOLDER'],jobid)\n tsvpath = os.path.join(resultpath,'mash_distances.txt')\n if os.path.exists(tsvpath):\n jsondata = routines.tsvtojson(tsvpath)\n jsondata[\"data\"] = [rec for rec in jsondata[\"data\"] if float(rec[4])>=0.65]\n return jsonify(jsondata)\n else:\n nodata = {}\n nodata[\"data\"] = []\n return jsonify(nodata)\n\n@app.route('/aniclades')\ndef aniclades(): # move to static?\n if os.path.exists(os.path.join(app.config['RESULTS_FOLDER'], 'aniclades.json')):\n with open(os.path.join(app.config['RESULTS_FOLDER'], 'aniclades.json'),'r') as anifile:\n anijson = json.load(anifile)\n return jsonify(anijson)\n\n@app.route('/jobstatus/')\n@app.route('/jobstatus//')\ndef status(jobid):\n jobstat = routines.getjobstatus(jobid)\n workflow = jobstat[\"workflow\"]\n paramdict = jobstat.get(\"params\")\n if jobstat[\"checkpoint\"].upper() == \"W1-STEP2\" and workflow == \"1\":\n #redirdict = {\"redirect\":\"step2\"}\n #return jsonify(redirdict)\n jobstat[\"redirect\"] = \"step2\"\n return jsonify(jobstat)\n #elif jobstat[\"checkpoint\"].upper() == \"W1-2\" and \"skip2\" in skips and workflow == \"1\":\n #jobstat[\"skip\"] = \"c1\"\n #return jsonify(jobstat)\n elif jobstat[\"checkpoint\"].upper() == \"W1-STEP3\" and workflow == \"1\":\n #redirdict = {\"redirect\":\"step3\"}\n #return jsonify(redirdict)\n jobstat[\"redirect\"] = \"step3\"\n return jsonify(jobstat)\n #elif jobstat[\"checkpoint\"].upper() == \"W1-3\" and \"skip3\" in skips and workflow == \"1\":\n #jobstat[\"skip\"] = \"c2\"\n #return jsonify(jobstat)\n # elif jobstat[\"checkpoint\"].upper() == \"W1-F\" or jobstat[\"checkpoint\"].upper() == \"W2-F\":\n elif \"-F\" in jobstat[\"checkpoint\"].upper():\n #redirdict = {\"redirect\":\"report\"}\n #return jsonify(redirdict)\n jobstat[\"redirect\"] = \"report\"\n return jsonify(jobstat)\n else:\n return jsonify(jobstat)\n\n# probably needs to be updated; simply redirect to completed example job instead? If so, ensure it can't be reanalyzed\n# @app.route('/results/example/report')\n# def example():\n# return render_template(\"example.html\")\n\n\n@app.errorhandler(404)\n@app.errorhandler(401)\n@app.errorhandler(500)\ndef page_not_found(e):\n return render_template('error.html',title=\"\",errormsg=e)\n\n","sub_path":"webapp/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"213986310","text":"'''\nThis file contains module for tweets related capability only\nCapabilities:\n Fetch tweets and retweets by ID\n Search tweets by search query\n'''\n\n'''\nBuilt-in modules\n'''\nimport pdb\nimport os\nimport traceback\nimport urllib.parse\nimport time\nfrom datetime import datetime\nimport json\nimport time\n\n'''\nInitialization code\n'''\ndef __init_program():\n print(\"CWD is {}\".format(os.getcwd()))\n abspath = os.path.abspath(__file__)\n dname = os.path.dirname(abspath)\n os.chdir(dname)\n print(\"After change, CWD is {}\".format(os.getcwd()))\n\n__init_program()\n\n'''\nUser defined modules\n'''\nfrom config.load_config import load_config\nconfig_file_name = 'tweet_env.py'\nload_config(config_file_name)\n\ndep_check = os.getenv(\"DEPENDENCY_CHECK\", \"True\")\nif dep_check.lower() == \"true\":\n from installer import dependency_check\n\nfrom libs.cypher_store import TweetCypherStoreIntf\n#from file_store import DMFileStoreIntf\nfrom libs.twitter_errors import TwitterRateLimitError, TwitterUserNotFoundError\n\nfrom libs.twitter_access import fetch_tweet_info, get_reponse_header\nfrom libs.twitter_logging import logger\nfrom libs.tweet_filter_handler import TweetFilterHandler\n\nclass TweetsFetcher:\n \"\"\"\n This class uses expert pattern. \n It provides functioanlity for fetching Tweets and related info\n It stores Tweets info to Graph Database\n \"\"\"\n def __init__(self, filename='tweet_ids.txt', database='neo4j'):\n print(\"Initializing TweetsFetcher object\")\n self.filename = filename\n self.database = database\n self.tweetStoreIntf = TweetCypherStoreIntf()\n self.grandtotal = 0 #Tracks the count of total tweets stored in DB\n self.filterhandler = TweetFilterHandler()\n pass\n\n def __process_tweet_fetch_cmd(self, cmd_args):\n print('Processing Tweet fetch command [{}]'.format(cmd_args))\n retweet = False\n forced = True\n if 'retweets_fetch' in cmd_args and cmd_args['retweets_fetch'] == \"True\":\n retweet = True\n if 'forced' in cmd_args and cmd_args['forced'] == \"False\":\n forced = False\n\n if 'id' not in cmd_args:\n logger.error(\"Invalid input file format for {} tweets cmd\".format(cmd_args))\n return\n id = cmd_args['id']\n self.__import_tweets_by_tweet_id(tweet_id=id, fetch_retweet=retweet, forced=forced)\n\n\n def __process_tweet_search_cmd(self, cmd_args):\n print('Processing Tweet fetch command [{}]'.format(cmd_args))\n catgories_list = []\n sync_with_store = False\n tweet_filter = {}\n if 'categories_list' in cmd_args:\n catgories_list = cmd_args['categories_list']\n if 'sync_with_store' in cmd_args and cmd_args['sync_with_store'].lower() == \"true\":\n sync_with_store = True\n\n if 'search_term' not in cmd_args:\n logger.error(\"Invalid input file format for {} tweets cmd\".format(cmd_args))\n return\n if 'tweet_filter' in cmd_args:\n tweet_filter = cmd_args['tweet_filter']\n search_term = cmd_args['search_term']\n self.import_tweets_search(search_term, catgories_list, sync_with_store, tweet_filter=tweet_filter)\n\n\n def __process_command(self, command_json):\n print('Processing command [{}]'.format(command_json))\n if 'tweet_search' in command_json:\n command_args = command_json['tweet_search']\n self.__process_tweet_search_cmd(command_args)\n elif 'tweet_fetch' in command_json:\n command_args = command_json['tweet_fetch']\n self.__process_tweet_fetch_cmd(command_args)\n \n\n def handle_tweets_command(self):\n print('Importing Tweets for IDs in file:{}'.format(self.filename))\n try:\n wkg_filename = self.filename+'.wkg'\n os.rename(self.filename, wkg_filename)\n json_data = []\n with open(wkg_filename) as f:\n json_data = [json.loads(line) for line in f]\n for command_json in json_data:\n self.__process_command(command_json)\n except FileNotFoundError as e:\n print(\"Skipping Tweet IDs import since there is no file with {}\".format(self.filename))\n\n def __process_tweets_fetch(self, tweet_id):\n print(\"Processing {} Tweet\".format(tweet_id))\n tweets = None\n base_url = 'https://api.twitter.com/1.1/statuses/show/'+tweet_id\n headers = {'accept': 'application/json'}\n\n params = {\n 'result_type': 'recent',\n 'tweet_mode':'extended'\n }\n \n tweet_url = '%s?%s' % (base_url, urllib.parse.urlencode(params))\n tweet_json = fetch_tweet_info(tweet_url)\n print(type(tweet_json))\n if(tweet_json):\n tweets = [tweet_json]\n return tweets\n\n def __process_tweets_search(self, search_term, max_id=None, count=200):\n print(\"Processing [{}] Tweet search\".format(search_term))\n base_url = 'https://api.twitter.com/1.1/search/tweets.json'\n headers = {'accept': 'application/json'}\n\n params = {\n 'q': search_term,\n 'count': count,\n 'result_type': 'recent'\n }\n if (max_id):\n params['max_id'] = max_id\n\n tweet_url = '%s?%s' % (base_url, urllib.parse.urlencode(params))\n #print(tweet_url)\n response_json = fetch_tweet_info(tweet_url)\n\n # Keep status objects.\n tweets = response_json['statuses']\n return tweets\n\n def __process_retweets_fetch(self, tweet_id, count=100):\n print(\"Processing Retweet for {} Tweet\".format(tweet_id))\n base_url = \"https://api.twitter.com/1.1/statuses/retweets/\"+tweet_id+\".json\"\n headers = {'accept': 'application/json'}\n tweets = None\n\n params = {\n 'count': count,\n 'result_type': 'recent',\n 'tweet_mode':'extended'\n }\n\n tweet_url = '%s?%s' % (base_url, urllib.parse.urlencode(params))\n \n tweet_json = fetch_tweet_info(tweet_url)\n print(type(tweet_json))\n if(tweet_json):\n tweets = tweet_json\n return tweets\n\n\n def __import_tweets_by_tweet_id(self, tweet_id, fetch_retweet=False, forced=False):\n print('Importing Tweet for {}'.format(tweet_id))\n count = 200\n lang = \"en\"\n tweets_to_import = True\n retweets_to_import = fetch_retweet\n max_id = 0\n since_id = 0\n total_count = 0\n\n if self.tweetStoreIntf.is_tweet_exists(tweet_id) == True and not forced:\n print(\"Skipping as there is already entry for {} tweet ID \".format(tweet_id))\n return\n\n print('Fetching tweet detail for ID:{}'.format(tweet_id))\n while tweets_to_import:\n try:\n print(\"Processing tweet fetch for {}\".format(tweet_id))\n tweets = self.__process_tweets_fetch(tweet_id)\n if tweets:\n tweets_to_import = False\n print(\"{} Tweets to be added in DB\".format(len(tweets)))\n self.tweetStoreIntf.store_tweets_info(tweets)\n total_count += len(tweets)\n else:\n print(\"No tweets found.\")\n tweets_to_import = False\n\n except TwitterRateLimitError as e:\n logger.exception(e)\n print(traceback.format_exc())\n print(e)\n # Sleep for 15 minutes - twitter API rate limit\n print('Sleeping for 15 minutes due to quota')\n time.sleep(900)\n continue\n\n except Exception as e:\n logger.exception(e)\n print(traceback.format_exc())\n print(e)\n time.sleep(30)\n continue\n\n\n while retweets_to_import:\n try:\n print(\"Processing retweet fetch for {}\".format(tweet_id))\n re_tweets = self.__process_retweets_fetch(tweet_id)\n \n if re_tweets:\n retweets_to_import = False\n print(\"{} Retweets to be added in DB\".format(len(re_tweets)))\n self.tweetStoreIntf.store_tweets_info(re_tweets)\n total_count += len(re_tweets)\n \n else:\n print(\"No retweets found.\")\n retweets_to_import = False \n\n except TwitterRateLimitError as e:\n logger.exception(e)\n print(traceback.format_exc())\n print(e)\n # Sleep for 15 minutes - twitter API rate limit\n print('Sleeping for 15 minutes due to quota')\n time.sleep(900)\n continue\n\n except Exception as e:\n logger.exception(e)\n print(traceback.format_exc())\n print(e)\n time.sleep(30)\n continue\n logger.info(\"[stats] {} tweets for [{}]\".format(total_count, tweet_id))\n self.grandtotal += total_count\n\n def import_tweets_search(self, search_term, categories_list, sync_with_store, tweet_filter):\n print(\"Processing Tweets import for search key [{}]\".format(search_term))\n frequency = 100\n tweets_to_import = True\n max_id = None\n total_count = 0\n start_time = datetime.now()\n search_term_query = self.tweetStoreIntf.util_get_search_term_query(search_term)\n if sync_with_store:\n print(\"Syncing with store\")\n min_id = self.tweetStoreIntf.get_tweets_min_id(search_term_query)\n if(min_id):\n max_id = int(min_id) - 1\n\n while tweets_to_import:\n try:\n\n curr_limit = get_reponse_header('x-rate-limit-remaining')\n if(curr_limit and int(curr_limit) <= frequency+1):\n print(\"Sleeping as remaining x-rate-limit-remaining is {}\".format(curr_limit))\n time_diff = (datetime.now()-start_time).seconds\n remaining_time = (15*60) - time_diff\n sleeptime = remaining_time + 2\n print(\"sleeping for {} seconds to avoid threshold. Current time={}\".format(sleeptime, datetime.now()))\n if(sleeptime > 0):\n time.sleep(sleeptime)\n start_time = datetime.now()\n print(\"Continuing after threshold reset\")\n\n tweets = self.__process_tweets_search(search_term=search_term, max_id=max_id, count=frequency)\n if len(tweets) > 0:\n tweets_to_import = True\n plural = \"s.\" if len(tweets) > 1 else \".\"\n print(\"Found \" + str(len(tweets)) + \" tweet\" + plural)\n total_count += len(tweets)\n print(\"Found total {} tweets for {} search\\n\".format(total_count, search_term))\n\n if not max_id:\n max_id = tweets[0]['id']\n\n for tweet in tweets:\n max_id = min(max_id, tweet['id']) \n #decrement one less so that same tweet is not sent again in next call.\n max_id = max_id - 1\n if tweet_filter:\n filtered_tweets = self.filterhandler.apply_filters(tweets,tweet_filter)\n else:\n filtered_tweets = tweets\n print(\"{} Tweets to be stored out of {} tweets\".format(len(filtered_tweets), len(tweets)))\n if(len(filtered_tweets)):\n self.tweetStoreIntf.store_tweets_info(filtered_tweets, categories_list)\n print(\"{} Search tweets added to graph for {}!\".format(len(filtered_tweets), search_term))\n else:\n print(\"skipping as none found from {} total tweets\".format(len(tweets)))\n else:\n print(\"No search tweets found for %s.\" % (search_term))\n if(not total_count):\n logger.info(\"No search tweets found for -->> %s\" % (search_term))\n tweets_to_import = False\n\n except TwitterRateLimitError as e:\n logger.exception(e)\n print(traceback.format_exc())\n print(e)\n # Sleep for 15 minutes - twitter API rate limit\n print('Sleeping for 15 minutes due to quota. Current time={}'.format(datetime.now()))\n time.sleep(900)\n continue\n\n except Exception as e:\n logger.exception(e)\n print(traceback.format_exc())\n print(e)\n time.sleep(30)\n continue\n logger.info(\"[stats] {} tweets for [{}]\".format(total_count, search_term))\n self.grandtotal += total_count\n\n\ndef main():\n print(\"Starting Tweet fetcher. \\nConfig file should be [config/{}]\\n\".format(config_file_name))\n tweets_fetch_stats = {'processed': 0}\n tweetsFetcher = TweetsFetcher()\n try:\n tweetsFetcher.handle_tweets_command()\n #tweetsFetcher.import_tweets_search('RT @actormanojjoshi: काग़ज़ मिले की')\n finally:\n tweets_fetch_stats['processed'] = tweetsFetcher.grandtotal\n logger.info(\"[tweets_fetcher stats] {}\".format(tweets_fetch_stats))\n\nif __name__ == \"__main__\": main()\n","sub_path":"docker/features/tweets_fetcher.py","file_name":"tweets_fetcher.py","file_ext":"py","file_size_in_byte":13540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"557122717","text":"import mimetypes\n\ndef getExtensionsForType(generalType):\n\tfor ext in mimetypes.types_map:\n\t\tif mimetypes.types_map[ext].split('/')[0] == generalType:\n\t\t\tyield ext\n\t\tyield '.gifv'\n\nmimetypes.init()\nfiletypes = tuple(getExtensionsForType('image'))\n\nurl = 'https://40.media.tumblr.com/9b488440d40f8aee15bcf865b888a292/tumblr_o06u8mq3FV1v27d59o1_540.png'\n\nif url.split('?')[0].endswith(filetypes):\n\tfileExt = '.' + url.split('?')[0].split('.')[-1]\n\nprint(fileExt)\n","sub_path":"me_irl-image-downloader-master/testing/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"381916172","text":"#!/usr/bin/env python3\n\nimport sys, math, re\n\nif len(sys.argv)!=3:\n\tprint(\"Usage: {0} \".format(sys.argv[0]),file=sys.stderr)\n\tsys.exit(1)\n\nbffname=sys.argv[1]\nhmfname=sys.argv[2]\n\n#def setgray(brightness):\n#\tramp=[231,230,225,224,219,218,213,212,207,206,201,200,199,198,197,196]\n#\tcode=\"\\x1B[38;5;{0}m\".format(ramp[int(brightness*(len(ramp)-1))])\n#\tbrightness=int(brightness*25)\n#\t#if brightness==0: code=\"\\x1B[0;30m\"\n#\t#elif brightness==25: code=\"\\x1B[1;37m\"\n#\t#else: code=\"\\x1B[38;5;{0}m\".format(brightness-1+232)\n#\tprint(code,end=\"\")\n\n#for (c,count) in source:\n#\tsetgray(math.log(count/maxcount*3+1)/math.log(4))\n#\tprint(c,end=\"\")\n\ndef htmlconvert(ch):\n\tif ch==\"<\": return \"<\"\n\telif ch==\">\": return \">\"\n\telif ch==\"\\n\": return \"
    \"\n\telif ch==\" \": return \" \"\n\telse: return ch\n\ndef hexcolor(r,g,b):\n\tr,g,b=[int(x*255) for x in [r,g,b]]\n\treturn \"#{0:0>2}{1:0>2}{2:0>2}\".format(hex(r)[2:],hex(g)[2:],hex(b)[2:])\n\ndef colorfor(freq):\n\tramp=[\n\t\t(0.0, (1.0,1.0,1.0)),\n\t\t(0.3, (0.3,0.5,0.5)),\n\t\t(0.7, (0.2,0.2,1.0)),\n\t\t(0.85, (0.6,0.1,1.0)),\n\t\t(1.0, (1.0,0.0,0.0))\n\t]\n\tfor i in range(len(ramp)):\n\t\tif ramp[i][0]>=freq:\n\t\t\tbreak;\n\tif i==0:\n\t\treturn hexcolor(*ramp[i][1])\n\tc1=ramp[i-1][1]\n\tc2=ramp[i][1]\n\tt=(freq-ramp[i-1][0])/(ramp[i][0]-ramp[i-1][0])\n\treturn hexcolor(*[c1[i]*(1-t)+c2[i]*t for i in [0,1,2]])\n\nwith open(bffname) as f:\n\tsource=[c for c in f.read()]\n\nmaxcount=0\nwith open(hmfname) as f:\n\tfor line in f:\n\t\tidx,count=[int(x) for x in line.split(\" \")]\n\t\tsource[idx]=(htmlconvert(source[idx]),count)\n\t\tmaxcount=max(maxcount,count)\n\nprint(\"\"\"\n\n\n\nHeatmap for execution of \"\"\"+bffname+\"\"\"\n\n\n\n\n

    \"\"\"+bffname+\"\"\"

    \n
    \n\tFrequency:
    \n\t(% of maximum)\n
    \n
    \"\"\")\n\ni=0\nfor (c,count) in source:\n\tif i==0 or source[i-1][1]!=count:\n\t\tprint(\n\t\t\t\"\"\n\t\t\t\t.format(colorfor(count/maxcount),count,int(count/maxcount*100)),\n\t\t\tend=\"\")\n\tprint(c,end=\"\")\n\tif i==len(source)-1 or source[i+1][1]!=count:\n\t\tprint(\"\",end=\"\")\n\ti+=1\n\nprint(\"\"\"\n\n\n\"\"\")\n","sub_path":"heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"22528333","text":"#!/usr/bin/env python\n\nimport glob\nimport os\nimport subprocess\nimport sys\n\nimport file_utils\n\nRULES_DIR = os.path.join(\n os.path.dirname(file_utils.full_path(__file__)), 'rules.d')\nUDEV_DIR = '/etc/udev/rules.d'\n\nRULE_DIR = file_utils.path_joiner(RULES_DIR)\nUDEV_DIR = file_utils.path_joiner(UDEV_DIR)\n\n\ndef main(_):\n file_utils.must_be_root()\n rule_files = glob.glob(RULE_DIR('*'))\n\n for rule in rule_files:\n filename = os.path.basename(rule)\n target = UDEV_DIR(filename)\n if os.path.exists(target):\n if file_utils.is_symlink(target):\n if file_utils.resolve_path(target) != rule:\n print(f'Symlink {rule} exists')\n continue\n raise ValueError(f'Path {target} exists and does not link to {rule}')\n print(f'Symlinking {rule}->{target}')\n os.symlink(rule, target)\n subprocess.call(['service', 'udev', 'restart'])\n subprocess.call(['udevadm', 'control', '--reload-rules'])\n subprocess.call(['udevadm', 'trigger'])\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"udev.py","file_name":"udev.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"197404758","text":"from input_validator import InputValidator\n\nclass Ui:\n def __init__(self):\n self.input_validator = InputValidator()\n\n def greet(self):\n print(\"Welcome to the Python TicTacToe\")\n\n def print_board(self, board):\n current_board = board.all_spots()\n print(\"\"\"\n {} | {} | {}\n -----------\n {} | {} | {}\n -----------\n {} | {} | {}\n \"\"\".format(*current_board))\n\n def get_input(self, text):\n return input(text)\n\n def choose_marker(self):\n text = 'Choose the symbol you want to play with: X or O. Enter x or o:\\n'\n symbol = None\n while not symbol:\n symbol = self.get_input(text).upper()\n if self.input_validator.valid_marker(symbol):\n return symbol\n else:\n symbol = None\n\n def choose_move(self, board):\n text = \"Enter a number to make your move:\\n\"\n move = None\n while not move:\n move = self.get_input(text)\n if self.input_validator.valid_move(move) and int(move) in board.available_spots():\n return move\n else:\n move = None\n\n def game_over(self):\n print(\"Game over!\")\n\n def declare_winner(self, board, marker):\n if board.available_spots():\n print(f'{marker} wins!')\n else:\n print(\"It's a tie\")\n","sub_path":"ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"424405676","text":"\"\"\"empty message\n\nRevision ID: c2f448f8f2f8\nRevises: 4b6fea9af9dd\nCreate Date: 2019-02-17 17:12:46.177664\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c2f448f8f2f8'\ndown_revision = '4b6fea9af9dd'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('manotes_sharing_note',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('giver_id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('note_id', sa.Integer(), nullable=False),\n sa.Column('creation_date', sa.DateTime(), nullable=True),\n sa.Column('update_date', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['giver_id'], ['manotes_users.id'], ),\n sa.ForeignKeyConstraint(['note_id'], ['manotes_notes.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['manotes_users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.add_column('manotes_notes', sa.Column('shared', sa.Boolean(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('manotes_notes', 'shared')\n op.drop_table('manotes_sharing_note')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/c2f448f8f2f8_.py","file_name":"c2f448f8f2f8_.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"529971542","text":"def funnyString(s):\r\n a=[]\r\n b=[]\r\n original=list(s)\r\n for i in original:\r\n a.append(ord(i))\r\n for i in range(len(a)-1):\r\n b.append(abs(a[i]-a[i+1]))\r\n if b==b[::-1]:\r\n return \"Funny\"\r\n else:\r\n return \"Not Funny\"","sub_path":"HackerRank/FunnyString.py","file_name":"FunnyString.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"491303076","text":"'''\nREDS dataset\nsupport reading images from lmdb, image folder and memcached\n'''\nimport os.path as osp\nimport random\nimport pickle\nimport logging\nimport numpy as np\nimport cv2\nimport lmdb\nimport torch\nimport torch.utils.data as data\nimport data.util as util\nimport pdb\ntry:\n import mc # import memcached\nexcept ImportError:\n pass\n\nlogger = logging.getLogger('base')\n\n\nclass VOXCELEBDataset(data.Dataset):\n '''\n Reading the training Superface dataset\n key example: 0000_0000_videolen\n GT: Ground-Truth;\n LQ: Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames\n support reading N LQ frames, N = 1, 3, 5, 7\n '''\n\n def __init__(self, opt):\n super(VOXCELEBDataset, self).__init__()\n self.opt = opt\n self.data_type = self.opt['data_type']\n self.paths_LQ, self.paths_GT = None, None\n self.sizes_LQ, self.sizes_GT = None, None\n self.LQ_env, self.GT_env = None, None # environments for lmdb\n\n self.paths_GT, self.sizes_GT = util.get_image_paths(self.data_type, opt['dataroot_GT'])\n self.paths_LQ, self.sizes_LQ = util.get_image_paths(self.data_type, opt['dataroot_LQ'])\n assert self.paths_GT, 'Error: GT path is empty.'\n if self.paths_LQ and self.paths_GT:\n assert len(self.paths_LQ) == len(\n self.paths_GT\n ), 'GT and LQ datasets have different number of images - {}, {}.'.format(\n len(self.paths_LQ), len(self.paths_GT))\n self.random_scale_list = [1]\n\n # define the landmarks folder, may consider turning this variable into a arg\n self.landmarks_folder_256 = self.opt['dataroot_landmark']\n self.sigma = 4.0\n self.heatmap_type = 'gaussian'\n\n def _init_lmdb(self):\n # https://github.com/chainer/chainermn/issues/129\n self.GT_env = lmdb.open(self.opt['dataroot_GT'], readonly=True, lock=False, readahead=False,\n meminit=False)\n self.LQ_env = lmdb.open(self.opt['dataroot_LQ'], readonly=True, lock=False, readahead=False,\n meminit=False)\n\n\n def __getitem__(self, index):\n if self.data_type == 'lmdb' and (self.GT_env is None or self.LQ_env is None):\n self._init_lmdb()\n GT_path, LQ_path = None, None\n scale = self.opt['scale']\n GT_size = self.opt['GT_size']\n\n # get GT image\n GT_path = self.paths_GT[index]\n resolution = [int(s) for s in self.sizes_GT[index].split('_')\n ] if self.data_type == 'lmdb' else None\n img_GT = util.read_img(self.GT_env, GT_path, resolution)\n if self.opt['phase'] != 'train': # modcrop in the validation / test phase\n img_GT = util.modcrop(img_GT, scale)\n if self.opt['color']: # change color space if necessary\n img_GT = util.channel_convert(img_GT.shape[2], self.opt['color'], [img_GT])[0]\n\n # get LQ image\n if self.paths_LQ:\n LQ_path = self.paths_LQ[index]\n resolution = [int(s) for s in self.sizes_LQ[index].split('_')\n ] if self.data_type == 'lmdb' else None\n img_LQ = util.read_img(self.LQ_env, LQ_path, resolution)\n else: # down-sampling on-the-fly\n # randomly scale during training\n\n random_scale = random.choice(self.random_scale_list)\n H_s, W_s, _ = img_GT.shape\n\n def _mod(n, random_scale, scale, thres):\n rlt = int(n * random_scale)\n rlt = (rlt // scale) * scale\n return thres if rlt < thres else rlt\n\n H_s = _mod(H_s, random_scale, scale, GT_size)\n W_s = _mod(W_s, random_scale, scale, GT_size)\n img_GT = cv2.resize(img_GT, (W_s, H_s), interpolation=cv2.INTER_LINEAR)\n if img_GT.ndim == 2:\n img_GT = cv2.cvtColor(img_GT, cv2.COLOR_GRAY2BGR)\n\n H, W, _ = img_GT.shape\n # using matlab imresize\n img_LQ = util.imresize_np(img_GT, 1 / scale, True)\n if img_LQ.ndim == 2:\n img_LQ = np.expand_dims(img_LQ, axis=2)\n\n # get lr_large image\n img_LQ_Large = cv2.resize(img_LQ, (GT_size, GT_size), cv2.INTER_CUBIC)\n\n # get the pts, heatmaps and masks\n f_anno = osp.join(self.landmarks_folder_256, GT_path + '.txt')\n # load the landmarks\n pts, point_set = util.anno_parser(f_anno, 68)\n\n # if self.opt['phase'] == 'train':\n # # if the image size is too small\n # H, W, _ = img_GT.shape\n # if H < GT_size or W < GT_size:\n # img_GT = cv2.resize(img_GT, (GT_size, GT_size), interpolation=cv2.INTER_LINEAR)\n # # using matlab imresize\n # img_LQ = util.imresize_np(img_GT, 1 / scale, True)\n # if img_LQ.ndim == 2:\n # img_LQ = np.expand_dims(img_LQ, axis=2)\n\n H, W, C = img_LQ.shape\n\n # augmentation - flip, rotate\n imgs = []\n imgs.append(img_LQ)\n imgs.append(img_GT)\n rlt, pts = util.augment_imgs_landmarks(GT_size, imgs, pts, self.opt['use_flip'], self.opt['use_rot'])\n img_LQ = rlt[0]\n img_GT = rlt[-1]\n\n if self.opt['color']: # change color space if necessary\n img_LQ = util.channel_convert(C, self.opt['color'],\n [img_LQ])[0] # TODO during val no definition\n\n # BGR to RGB, HWC to CHW, numpy to tensor\n if img_GT.shape[2] == 3:\n img_GT = img_GT[:, :, [2, 1, 0]]\n img_LQ = img_LQ[:, :, [2, 1, 0]]\n img_LQ_Large = img_LQ_Large[:, :, [2, 1, 0]]\n img_GT = torch.from_numpy(np.ascontiguousarray(np.transpose(img_GT, (2, 0, 1)))).float()\n img_LQ = torch.from_numpy(np.ascontiguousarray(np.transpose(img_LQ, (2, 0, 1)))).float()\n img_LQ_Large = torch.from_numpy(np.ascontiguousarray(np.transpose(img_LQ_Large, (2, 0, 1)))).float()\n\n pts = util.apply_bound(pts, 256, 256)\n # H*W*C\n GT_heatmaps, GT_mask = util.generate_label_map(pts, 16, 16, self.sigma, 16.0, self.heatmap_type)\n GT_heatmaps = torch.from_numpy(GT_heatmaps.transpose((2, 0, 1))).type(torch.FloatTensor)\n GT_mask = torch.from_numpy(GT_mask.transpose((2,0,1))).type(torch.ByteTensor)\n\n if LQ_path is None:\n LQ_path = GT_path\n return {'LQ': img_LQ, 'GT': img_GT, 'LQ_Large': img_LQ_Large, 'GT_heatmaps': GT_heatmaps, 'GT_mask': GT_mask}\n\n\n def __len__(self):\n return len(self.paths_GT)\n","sub_path":"codes/data/Voxceleb_dataset.py","file_name":"Voxceleb_dataset.py","file_ext":"py","file_size_in_byte":6572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"40636862","text":"#!/usr/bin/python3\n\nfrom docopt import docopt\n\nfrom brownie import network, project\nfrom brownie.cli.utils.console import Console\nfrom brownie._config import ARGV, CONFIG, update_argv_from_docopt\n\n\n__doc__ = f\"\"\"Usage: brownie console [options]\n\nOptions:\n --network Use a specific network (default {CONFIG['network']['default']})\n --tb -t Show entire python traceback on exceptions\n --help -h Display this message\n\nConnects to the network and opens the brownie console.\n\"\"\"\n\n\ndef main():\n args = docopt(__doc__)\n update_argv_from_docopt(args)\n\n if project.check_for_project():\n active_project = project.load()\n active_project.load_config()\n print(f\"{active_project._name} is the active project.\")\n else:\n active_project = None\n print(\"No project was loaded.\")\n\n network.connect(ARGV[\"network\"])\n\n shell = Console(active_project)\n shell.interact(banner=\"Brownie environment is ready.\", exitmsg=\"\")\n","sub_path":"brownie/cli/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"575061778","text":"from decouple import config\nfrom fastapi import FastAPI\nfrom loguru import logger\n\n# os.environ['PAGE_ACCESS_TOKEN'] = \"MY Access token\"\n# os.environ['PAGE_VERIFY_TOKEN'] = \"MY Verify token\"\nfrom songmam import WebhookHandler, MessengerApi\nfrom songmam.models.messaging.quick_replies import QuickReply\nfrom songmam.models.messaging.templates.button import PostbackButton\nfrom songmam.models.webhook.events import *\n\napp = FastAPI()\nhandler = WebhookHandler(app)\napi = MessengerApi(config(\"PAGE_ACCESS_TOKEN\"), auto_avajana=False)\n\n\n@handler.add(MessagesEvent)\nasync def echo(event: MessagesEvent, *args, **kwargs):\n print(\n event.theMessaging.recipient,\n event.theMessaging.sender,\n event.theMessaging.message.text,\n )\n await api.send(\n event.sender,\n text=event.theMessaging.message.text,\n buttons=PostbackButton(title=\"send postback\", payload=\"handlers.do:tell_user\"),\n quick_replies=QuickReply(title=\"quick reply\", payload=\"handlers.do:tell_user\"),\n )\n\n\n@handler.add(MessagesEventWithQuickReply)\nasync def echo2(entry: MessagesEventWithQuickReply, *args, **kwargs):\n logger.info(\"echo2\")\n\n\n@handler.add(MessagingReferralEvent)\nasync def handle_ref(entry: MessagingReferralEvent, *args, **kwargs):\n logger.info(entry.sender)\n logger.info(entry.ref)\n\n\n@handler.add(MessageReadsEvent)\nasync def handle_read(entry: MessageReadsEvent, *args, **kwargs):\n logger.info(entry)\n\n\n@handler.add(MessageDeliveriesEvent)\nasync def handle_delivery(entry: MessageDeliveriesEvent, *args, **kwargs):\n logger.info(entry)\n\n\n# @handler.set_uncaught_postback_handler\n# async def handle_uncaught_postback(event):\n# logger.info(event)\n\n\nif __name__ == \"__main__\":\n import uvicorn\n\n uvicorn.run(\"app:app\", host=\"0.0.0.0\", port=8002, reload=True, log_level=\"debug\")\n","sub_path":"example/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"218026566","text":"from django.forms.models import modelformset_factory\nfrom django.forms.formsets import formset_factory\nfrom tags.models import TagFilter\nfrom django import forms\n\nclass TagFilterForm(forms.Form):\n\n # Change CSS class for errors:\n error_css_class = 'has-error'\n\n match_string = forms.CharField(\n widget=forms.TextInput(attrs={\n \"placeholder\": \"Match string\",\n \"class\":\"form-control input-sm\",\n }),\n required = True,\n )\n tags = forms.CharField(\n widget=forms.TextInput(attrs={\n \"data-role\":\"tagsinput\",\n \"placeholder\": \"Add tags here...\",\n \"class\":\"form-control input-sm\",\n }),\n required = False,\n )\n\n\n#-------------------------------------------------------------------------------\n\n\nclass TagForm(forms.Form):\n\n # List of tags, separated by commas.\n tags = forms.CharField(\n widget=forms.TextInput(attrs={\n \"data-role\":\"tagsinput\",\n \"name\":'',\n \"placeholder\": \"Add tags here...\"\n }),\n required = False,\n )\n\n\n#-------------------------------------------------------------------------------\n\n\nclass TagAndCategoryForm(forms.Form):\n\n tag = forms.CharField(\n widget=forms.TextInput(attrs={\n \"placeholder\": \"Tag\",\n \"class\":\"form-control input-sm\",\n }),\n required = True,\n )\n\n category = forms.CharField(\n widget=forms.TextInput(attrs={\n \"placeholder\": \"Tag category\",\n \"class\":\"form-control input-sm\",\n }),\n\n required = False,\n\n )\n\n","sub_path":"django/pycoon/tags/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"29595775","text":"\n\n#calss header\nclass _UPON():\n\tdef __init__(self,): \n\t\tself.name = \"UPON\"\n\t\tself.definitions = [u'on: ', u'to be something that someone will experience or have to deal with soon: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'prepositions'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/prepositions/_upon.py","file_name":"_upon.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"204679612","text":"# ctypes_helloWorld.py\n# Demonstrates using the dynamically linked library ctypes\n\nfrom ctypes import *\n\n# In Python 2.7 strings are byte-strings by default.\n# In Python 3.x they are unicode by defualt.\n# Use .encode('ascii') before handing them off\nlibc = CDLL(\"libc.so.6\")\nmessage_string = \"Hello world!\"\nmessage_string2 = \"Hello world!\"\nmessage_string.encode('ascii')\nlibc.printf(\"\\nTesting: %s\\n\", message_string)\n\n# If no variables are used prepend with b\nlibc.printf(b\"Testing: Hello world!\\n\\n\", message_string)\n\n","sub_path":"ctypes/ctypes_helloWorld.py","file_name":"ctypes_helloWorld.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"299488583","text":"import asyncio\nfrom .utils import format_transaction_for_mempool\n\n\nasync def add_transactions_to_mempool(connection, valid_transactions, utctime):\n result = False\n transactions = [\n format_transaction_for_mempool(transaction, utctime)\n for transaction in valid_transactions\n ]\n async with connection.transaction():\n result = await connection.executemany(\n f\"\"\"\n INSERT INTO mempool(\n sender_address\n ,receiver_address\n ,notes\n ,transacted_amount\n ,sender_inputs\n ,sender_ouputs\n ,receiver_outputs\n ,transaction_fee\n ,time_added \n ) VALUES (\n $1,$2,$3,$4,$5,$6,$7,$8,$9\n )\n \"\"\",\n transactions,\n )\n return result\n\n","sub_path":"app/node/src/models/mempool/transaction/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"111726004","text":"import pandas as pd #This is a very powerful library. Nearly replaces Excel and has a lot of tools for data analysis.\nimport numpy as np\nimport requests, shapefile as shp #Makes matplotlib pretty\nimport matplotlib.pyplot as plt #Plotting library for Python with various features. A beast, but when tamed can be extremely valuable\nimport matplotlib.pylab as pylab\nimport matplotlib as mpl\n# basemap import\nfrom mpl_toolkits.basemap import Basemap\n# Numpy import\n\npylab.rcParams['figure.figsize'] = 12, 10\n\n# get_ipython().magic(u'matplotlib inline')\n# A magic function that does... MAGIC! It makes it so that plots show up within the iPython notebook.\n\n\n# In[35]:\n\ncrimes = pd.read_json(\"https://data.cityofchicago.org/resource/vwwp-7yr9.json\")\nstations = shp.Reader(\"Data/CTA_Stations/CTA_Stations.shp\")\nstreets = shp.Reader(\"Data/Street_Center_Lines/transportation.shp\")\n\n\n\n# Create time and day columns\ntimes = [str(item)[len(str(item))-8:len(str(item))] for item in crimes['date']] \ncrimes['time'] = pd.Series(times)\ndates = [str(item)[0:len(str(item))-9].strip() for item in crimes['date']]\ncrimes['day'] = pd.to_datetime(pd.Series(dates))\n\n\n# In[64]:\n\n# crimes.head()\n\n\n# In[58]:\n\n# daily_crimes.tail()\n\n\n# In[66]:\n\ndaily_crimes = crimes[crimes.day==crimes.day[0]] # Most recent day\n\nday = str(daily_crimes.day[0])[:10] # Get day for string formatting\n\n# Get daily data\ndaily_narcotics = daily_crimes[daily_crimes.primary_type==\"NARCOTICS\"]\ndaily_homicide = daily_crimes[daily_crimes.primary_type==\"HOMICIDE\"]\ndaily_gta = daily_crimes[daily_crimes.primary_type==\"MOTOR VEHICLE THEFT\"]\n\n# Arrests data\narrest = crimes[crimes.arrest==True]\nno_arrest = crimes[crimes.arrest==False]\n\n\n\n\nwards = shp.Reader(\"Data/Wards_2015/WARDS_2015.shp\")\n\n\n\npylab.rcParams['figure.figsize'] = 12*5, 10*5\nfig = plt.figure()\nfor item in streets.shapeRecords(): # Plot street lines\n x = [i[0] for i in item.shape.points[:]]\n y = [i[1] for i in item.shape.points[:]]\n plt.plot(x,y, color='black', linewidth=1.0)\nfor item in wards.shapeRecords():\n x = [i[0] for i in item.shape.points[:]]\n y = [i[1] for i in item.shape.points[:]]\n plt.plot(x,y, color='blue', linewidth=3.0)\n\n# for item in tifs.shapeRecords():\n# x = [i[0] for i in item.shape.points[:]]\n# y = [i[1] for i in item.shape.points[:]]\n# plt.plot(x,y, color='red', linewidth=5)\n \nplt.scatter(daily_gta.x_coordinate, daily_gta.y_coordinate, color='green', alpha=0.8, s=600, label=\"Stolen Vehicles\")\nplt.scatter(daily_homicide.x_coordinate, daily_homicide.y_coordinate, color='red', alpha=0.8, s=600, label=\"Homicides\")\nplt.legend(fontsize=32*2)\n\nfig.savefig('{0}_{1}_{2} Crimes Ward Map.jpg'.format(day[5:7], day[8:], day[0:4]), dpi=80, bbox_inches='tight')","sub_path":"WardMap.py","file_name":"WardMap.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"212195930","text":"#!/usr/bin/python\n\nimport infosec.utils\n\n\nASSEMBLY_TEMPLATE = '.intel_syntax noprefix;.globl main;main:;{data}'\n\nASSEMBLE = 'gcc -xassembler - -o /dev/stdout -m32 -nostdlib -emain -Xlinker --oformat=binary'\n\n\ndef assemble_data(data):\n return infosec.utils.execute(ASSEMBLE, ASSEMBLY_TEMPLATE.format(data=data), raise_error=True).stdout\n \n\ndef assemble_file(path):\n with open(path, 'rb') as reader:\n data = reader.read()\n return assemble_data(data)\n\n\ndef main(path=None, markzero=False):\n try:\n assembly = assemble_file(path)\n assembly = ''.join('\\x1b[31m\\\\x00\\x1b[0m' if markzero and c == '\\x00' else ('\\\\x%02x' % ord(c)) for c in assembly)\n print(assembly)\n except RuntimeError as error:\n print(error)\n\n\nif __name__ == '__main__':\n import os, sys\n\n markzero = False\n \n if '--markzero' in sys.argv[1:]:\n markzero = True\n sys.argv.remove('--markzero')\n\n if '--help' in sys.argv or len(sys.argv) < 2:\n name = os.path.basename(sys.argv[0])\n print('USAGE:')\n print('\\t%s [--markzero] ' % name)\n sys.exit(1)\n\n main(path=sys.argv[-1], markzero=markzero)\n","sub_path":"hw6/assemble.py","file_name":"assemble.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"384540368","text":"#这个标签主要对页面中,Django Paginator对象的显示做处理,由于需求要求不高,还不是很完美。显示的pager.html就在同目录内。\n@register.inclusion_tag('app/pager.html', takes_context=True)\ndef pager(context):\n request =context[\"request\"]\n pager = context[\"pager\"]\n query_dict = request.GET.copy()\n\n if pager.has_next():\n query_dict[\"page_index\"] = pager.next_page_number()\n pager.next_url = request.path+\"?\"+query_dict.urlencode()\n if pager.has_previous():\n query_dict[\"page_index\"] = pager.previous_page_number()\n pager.previous_url = request.path+\"?\"+query_dict.urlencode()\n begin_page=1\n end_page=1\n if pager.has_other_pages():\n\n if pager.number > 5:\n begin_page = pager.number - 4\n if (pager.number+4) <= pager.paginator.num_pages:\n end_page = pager.number+4\n else:\n end_page = pager.paginator.num_pages\n else:\n begin_page = 1\n if pager.paginator.num_pages>=9:\n end_page = 9\n else:\n end_page = pager.paginator.num_pages\n page_numbers = range(begin_page,end_page+1)\n pager.page_urls=[]\n for page_number in page_numbers:\n query_dict[\"page_index\"] = page_number\n page_url = request.path+\"?\"+query_dict.urlencode()\n pager.page_urls.append(\n {\n \"index\":page_number,\n \"url\":page_url\n }\n )\n if \"page_index\" in query_dict:\n query_dict.pop(\"page_index\")\n pager.nopageindex_url = request.path+\"?\"+query_dict.urlencode()\n return {\"pager\":pager}\n","sub_path":"pagination/pager_tags.py","file_name":"pager_tags.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"230323273","text":"import plotly.graph_objects as go\nfrom apps import GetData\nimport pandas as pd\nimport plotly.graph_objects as go\nfrom plotly.colors import n_colors\nimport numpy as np\nimport chart_studio.plotly as py\n\n#Récupération du récap\ndate=\"20/03/2020\"\nprevious=5\ndf_recap=GetData.get_recap_by_country(date,previous=previous)\n\n\n#Récupération des cas confirmés\ndf_confirmed=GetData.get_world('confirmed')\nconfirmed=df_confirmed.iloc[:,5:].sum(axis=1).sum(axis=0)\nconfirmedp=df_recap['Cases (+)'].sum(axis=0)\n#Récupération des cas morts\ndf_deaths=GetData.get_world('deaths')\ndeaths=df_deaths.iloc[:,5:].sum(axis=1).sum(axis=0)\ndeathsp=df_recap['Deaths (+)'].sum(axis=0)\n#Récupération des cas guéris\ndf_recovered=GetData.get_world('recovered')\nrecovered=df_recovered.iloc[:,5:].sum(axis=1).sum(axis=0)\nrecoveredp=df_recap['Recovered (+)'].sum(axis=0)\n\n\ndf_H2=pd.DataFrame([[confirmed,deaths,recovered]],columns=[\"Confirmed Cases\", \"Deaths\", \"Recovered\"])\n\nconfirmed=f'{confirmed:,}'\ndeaths=f'{deaths:,}'\nrecovered=f'{recovered:,}'\n\nconfirmedp=f'{confirmedp:,}'\ndeathsp=f'{deathsp:,}'\nrecoveredp=f'{recoveredp:,}'\n\na=\"{0}\".format(confirmed)\nb=\"{0}\".format(deaths)\nc=\"{0}\".format(recovered)\n\ne=\"(+{0})\".format(confirmedp)\nf=\"(+{0})\".format(deathsp)\ng=\"(+{0})\".format(recoveredp)\n\n#CSS \nfill_color_H2='lightgray'\nline_color_H2='lightgray'\nfont_color_H2=[['black','red']]\nfont_size_H2=[15]\n\n\n#layout = go.Layout( autosize=True, **margin={'l': 0, 'r': 0, 't': 20, 'b': 0}**)\n\nfig_H2= go.Figure(data=[go.Table(\n header=dict(values=[\"Confirmed Cases\",\"Deaths\", \"Recovered\"]\n ,\n line_color=line_color_H2,fill_color=fill_color_H2,\n align='center',font=dict(color='black', size=10)\n ),\n cells=dict(\n values=[[a,e],[b,f], [c,g]],align='center',\n line_color=line_color_H2,\n fill_color=fill_color_H2, font_color=font_color_H2, \n font_size=font_size_H2\n \n ))\n])\nfig_H2.update_layout(\n autosize=False,\n width=500,\n height=170,\n margin=dict(\n l=20,\n r=20,\n b=20,\n t=50,\n pad=10\n ),\n title_text=\"WORLD\",title_x=0.5,\n title_font_color='black'\n)\n \n#from plotly.offline import plot\n#plot(fig_H2, auto_open=True)\n\n\n#USA \nus_cases=f'{int(df_recap[\"Cases\"][df_recap[\"Country/Region\"]==\"US\"]):,}'\nus_deaths=f'{int(df_recap[\"Deaths\"][df_recap[\"Country/Region\"]==\"US\"]):,}'\nus_cases=\"{0}\".format(us_cases)\nus_deaths=\"{0}\".format(us_deaths)\n\nus_casesp=f'{int(df_recap[\"Cases (+)\"][df_recap[\"Country/Region\"]==\"US\"]):,}'\nus_deathsp=f'{int(df_recap[\"Deaths (+)\"][df_recap[\"Country/Region\"]==\"US\"]):,}'\nus_casesp=\"(+{0})\".format(us_casesp)\nus_deathsp=\"(+{0})\".format(us_deathsp)\n\n#France\nfrance_cases=f'{int(df_recap[\"Cases\"][df_recap[\"Country/Region\"]==\"France\"]):,}'\nfrance_deaths=f'{int(df_recap[\"Deaths\"][df_recap[\"Country/Region\"]==\"France\"]):,}'\nfrance_cases=\"{0}\".format(france_cases)\nfrance_deaths=\"{0}\".format(france_deaths)\n\nfrance_casesp=f'{int(df_recap[\"Cases (+)\"][df_recap[\"Country/Region\"]==\"France\"]):,}'\nfrance_deathsp=f'{int(df_recap[\"Deaths (+)\"][df_recap[\"Country/Region\"]==\"France\"]):,}'\nfrance_casesp2=\"{0}\".format(france_casesp)\nfrance_deathsp2=\"{0}\".format(france_deathsp)\nfrance_casesp=\"(+{0})\".format(france_casesp)\nfrance_deathsp=\"(+{0})\".format(france_deathsp)\n\n#Italy\nitaly_cases=f'{int(df_recap[\"Cases\"][df_recap[\"Country/Region\"]==\"Italy\"]):,}'\nitaly_deaths=f'{int(df_recap[\"Deaths\"][df_recap[\"Country/Region\"]==\"Italy\"]):,}'\nitaly_cases=\"{0}\".format(italy_cases)\nitaly_deaths=\"{0}\".format(italy_deaths)\n\nitaly_casesp=f'{int(df_recap[\"Cases (+)\"][df_recap[\"Country/Region\"]==\"Italy\"]):,}'\nitaly_deathsp=f'{int(df_recap[\"Deaths (+)\"][df_recap[\"Country/Region\"]==\"Italy\"]):,}'\nitaly_casesp=\"(+{0})\".format(italy_casesp)\nitaly_deathsp=\"(+{0})\".format(italy_deathsp)\n\n#Germany\ngermany_cases=f'{int(df_recap[\"Cases\"][df_recap[\"Country/Region\"]==\"Germany\"]):,}'\ngermany_deaths=f'{int(df_recap[\"Deaths\"][df_recap[\"Country/Region\"]==\"Germany\"]):,}'\ngermany_cases=\"{0}\".format(germany_cases)\ngermany_deaths=\"{0}\".format(germany_deaths)\n\ngermany_casesp=f'{int(df_recap[\"Cases (+)\"][df_recap[\"Country/Region\"]==\"Germany\"]):,}'\ngermany_deathsp=f'{int(df_recap[\"Deaths (+)\"][df_recap[\"Country/Region\"]==\"Germany\"]):,}'\ngermany_casesp=\"(+{0})\".format(germany_casesp)\ngermany_deathsp=\"(+{0})\".format(germany_deathsp)\n\n#China\nchina_cases=f'{int(df_recap[\"Cases\"][df_recap[\"Country/Region\"]==\"China\"]):,}'\nchina_deaths=f'{int(df_recap[\"Deaths\"][df_recap[\"Country/Region\"]==\"China\"]):,}'\nchina_cases=\"{0}\".format(china_cases)\nchina_deaths=\"{0}\".format(china_deaths)\n\nchina_casesp=f'{int(df_recap[\"Cases (+)\"][df_recap[\"Country/Region\"]==\"China\"]):,}'\nchina_deathsp=f'{int(df_recap[\"Deaths (+)\"][df_recap[\"Country/Region\"]==\"China\"]):,}'\nchina_casesp=\"(+{0})\".format(china_casesp)\nchina_deathsp=\"(+{0})\".format(china_deathsp)\n\n#Spain\nspain_cases=f'{int(df_recap[\"Cases\"][df_recap[\"Country/Region\"]==\"Spain\"]):,}'\nspain_deaths=f'{int(df_recap[\"Deaths\"][df_recap[\"Country/Region\"]==\"Spain\"]):,}'\nspain_cases=\"{0}\".format(spain_cases)\nspain_deaths=\"{0}\".format(spain_deaths)\n\nspain_casesp=f'{int(df_recap[\"Cases (+)\"][df_recap[\"Country/Region\"]==\"Spain\"]):,}'\nspain_deathsp=f'{int(df_recap[\"Deaths (+)\"][df_recap[\"Country/Region\"]==\"Spain\"]):,}'\nspain_casesp=\"(+{0})\".format(spain_casesp)\nspain_deathsp=\"(+{0})\".format(spain_deathsp)\n\n#Iran\niran_cases=f'{int(df_recap[\"Cases\"][df_recap[\"Country/Region\"]==\"Iran\"]):,}'\niran_deaths=f'{int(df_recap[\"Deaths\"][df_recap[\"Country/Region\"]==\"Iran\"]):,}'\niran_cases=\"{0}\".format(iran_cases)\niran_deaths=\"{0}\".format(iran_deaths)\n\niran_casesp=f'{int(df_recap[\"Cases (+)\"][df_recap[\"Country/Region\"]==\"Iran\"]):,}'\niran_deathsp=f'{int(df_recap[\"Deaths (+)\"][df_recap[\"Country/Region\"]==\"Iran\"]):,}'\niran_casesp=\"(+{0})\".format(iran_casesp)\niran_deathsp=\"(+{0})\".format(iran_deathsp)\n\nvalues_H3=[[china_cases,china_casesp],[france_cases,france_casesp],\\\n [germany_cases,germany_casesp],[us_cases,us_casesp],\\\n [spain_cases,spain_casesp], [iran_cases,iran_casesp],\\\n [italy_cases,italy_casesp]]\nfig_H3= go.Figure(data=[go.Table(\n header=dict(values=[\"China\",\"France\", \"Germany\"\\\n ,\"US\",\"Spain\",\"Iran\"\\\n ,\"Italy\"]\n ,\n line_color=line_color_H2,fill_color=fill_color_H2,\n align='center',font=dict(color='black', size=10)\n ),\n cells=dict(\n line_color=line_color_H2,fill_color=fill_color_H2,font_color=font_color_H2,\n values=values_H3, font_size=13\n ))\n])\n\nfig_H3.update_layout(\n autosize=False,\n width=1000,\n height=170,\n margin=dict(\n l=300,\n r=20,\n b=0,\n t=40,\n pad=20\n ),\n title_text=\"CASES\",title_x=0.63,\n title_font_color='black'\n) \n \nvalues_H4=[[china_deaths,china_deathsp],[france_deaths,france_deathsp],\\\n [germany_deaths,germany_deathsp],[us_deaths,us_deathsp],\\\n [spain_deaths,spain_deathsp], [iran_deaths,iran_deathsp],\\\n [italy_deaths,italy_deathsp]]\n\nfig_H4= go.Figure(data=[go.Table(\n header=dict(values=[\"China\",\"France\", \"Germany\"\\\n ,\"US\",\"Spain\",\"Iran\"\\\n ,\"Italy\"]\n ,\n line_color=line_color_H2,fill_color=fill_color_H2,\n align='center',font=dict(color='black', size=10)\n ),\n cells=dict(\n line_color=line_color_H2,fill_color=fill_color_H2,font_color=font_color_H2,\n values=values_H4, font_size=13\n ))\n])\n\nfig_H4.update_layout(\n autosize=False,\n width=1000,\n height=170,\n margin=dict(\n l=300,\n r=20,\n b=0,\n t=40,\n pad=20\n ),\n title_text=\"DEATHS\",title_x=0.63,\n title_font_color='black'\n) \n#from plotly.offline import plot\n#plot(fig_H4, auto_open=True)\n \ndf_H5=pd.DataFrame(columns=[\"Country\", \"New Cases\", \"Total Cases\",\"New Deaths\",\"Total Deaths\",\"Fatality\", \"Recovered\"])\n\ndf_H5[\"Country\"]=x = ['{0}'.format(i) for i in df_recap[\"Country/Region\"]]\n\ndf_H5[\"Total Cases\"]=df_recap[\"Cases\"]\n\ndf_H5[\"Total Deaths\"]=df_recap[\"Deaths\"]\n\ndf_H5[\"Fatality\"]=df_H5[\"Total Deaths\"]/df_H5[\"Total Cases\"]\n\ndf_H5[\"Total Deaths\"]=df_recap[\"Deaths\"]\n\ndf_H5[\"Recovered\"]=df_recap[\"Recovered\"]\n\ndf_H5[\"New Cases\"]=[f'{i:,}'for i in df_recap[\"Cases (+)\"]]\n\ndf_H5[\"New Cases\"]=[\"(+{0})\".format(str(i)) for i in df_H5[\"New Cases\"]]\n\ndf_H5[\"New Deaths\"]=[f'{i:,}'for i in df_recap['Deaths (+)']]\n\ndf_H5[\"New Deaths\"]=[\"(+{0})\".format(str(i)) for i in df_H5[\"New Deaths\"]]\n\ndf_H5[\"Fatality\"]=[\"{:.2%}\".format(i) for i in df_H5[\"Fatality\"]]\n\ndf_H5[\"Recovered\"]=[f'{i:,}'for i in df_H5[\"Recovered\"]]\n\ndf_H5[\"Total Cases\"]=[f'{i:,}'for i in df_H5[\"Total Cases\"]]\n\ndf_H5[\"Total Deaths\"]=[f'{i:,}'for i in df_H5[\"Total Deaths\"]]\n\nfont_color_H5=['black','red','black','red','black','red','black']\n\ncolumns_=[\"Country\", \"New Cases\", \"Total Cases\",\"New Deaths\",\"Total Deaths\",\"Fatality\", \"Recovered\"]\ncolumns_=['{0}'.format(i) for i in columns_]\n\nfig_H5 = go.Figure(go.Table(\n header=dict(values=list(columns_),\n \n align ='center', font=dict(color='black',size=12),\n line_color=line_color_H2,\n fill_color=fill_color_H2), \n \n cells=dict(values=[df_H5.Country, df_H5[\"New Cases\"],df_H5[\"Total Cases\"]\\\n , df_H5[\"New Deaths\"], df_H5[\"Total Deaths\"],\\\n df_H5.Fatality, df_H5.Recovered],\n \n font_size=11,font_color=font_color_H5,\n line_color=line_color_H2,fill_color=fill_color_H2)))\n\n\nfig_H5.update_layout(\n autosize=False,\n width=1000,\n height=400,\n margin=dict(\n l=300,\n r=20,\n b=100,\n t=50,\n pad=400\n ),\n title_text=\"BY COUNTRY\",title_x=0.63,\n title_font_color='black'\n) \n\n#print(columns_)\n#from plotly.offline import plot\n#plot(fig_H5, auto_open=True)\n\n\n","sub_path":"apps/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":10063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"411683568","text":"# coding=utf-8\n\nx, y, a, b = list(map(int, input().split(' ')))\nans = 0\nresult = ''\nfor i in range(a, x + 1):\n for j in range(b, y + 1):\n if i > j:\n ans += 1\n result += '%s %s\\n' % (i, j)\nprint(ans)\nif ans:\n print(result)\n","sub_path":"solutions/242A.py","file_name":"242A.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"600178887","text":"# coding: utf-8\n\n# Profiling middleware and decorator, that allows to profile any django\n# view easily for superusers.\n\n__author__ = 'igor.katson@gmail.com'\n\nimport cProfile\nimport pstats\nimport tempfile\n\nimport cStringIO as StringIO\n\n\ndef profile_view(func):\n \"\"\"A view decorator that allows profiling.\n\n Usage:\n /path/to/view/?profile=1\n /path/to/view/?profile=1&print_callees=1\n /path/to/view/?profile=1&print_callers=1\n /path/to/view/?profile=1&profile_limit=100\n /path/to/view/?profile=1&profile_order=time\n \n This will print the pstats.Stats profiling report right to\n your screen, as a content of Django response.\n \"\"\"\n\n def inner(request, *args, **kwargs):\n if 'profile' not in request.GET:\n return func(request, *args, **kwargs)\n profile_order = request.GET.get('profile_order', 'cumulative')\n print_callees = request.GET.get('print_callees')\n print_callers = request.GET.get('print_callers')\n profile_limit = int(request.GET.get('profile_limit', 80))\n pfile = tempfile.NamedTemporaryFile()\n result = {}\n\n def wrapper():\n result_ = func(request, *args, **kwargs)\n result['result'] = result_\n return result_\n\n cProfile.runctx('wrapper()', globals(), locals(), pfile.name)\n stream = StringIO.StringIO()\n stats = pstats.Stats(pfile.name, stream=stream)\n stats = stats.sort_stats(profile_order)\n\n if print_callees:\n stats.print_callees(profile_limit)\n elif print_callers:\n stats.print_callers(profile_limit)\n else:\n stats.print_stats(profile_limit)\n\n response = result['result']\n text = stream.getvalue()\n \n # text/plain results use {line-break: break-word} by default in Chrome,\n # which makes long output of e.g. \"print_callees=1\" hard to read.\n # See http://stackoverflow.com/questions/5837556/how-to-disable-word-\n # wrapping-in-plain-text-files-in-chrome and\n # http://habrahabr.ru/company/mailru/blog/201778/#comment_6971294\n \n html = '
    %s
    ' % text\n response.content = html\n response['Content-Type'] = 'text/html'\n return response\n return inner\n\n\nclass ProfilingMiddleware(object):\n \"\"\"Profiling middleware, that allows to profile any django view easily.\n\n For usage examples, see profile_view decorator.\n \"\"\"\n def process_view(self, request, view_func, view_args, view_kwargs):\n if 'profile' in request.GET and request.user.is_superuser:\n return profile_view(view_func)(request, *view_args, **view_kwargs)","sub_path":"all-gists/7426817/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"111668983","text":"from PIL import Image, ImageDraw, ImageFont\n\nim = Image.open(\"avatar.png\");\n\ndraw = ImageDraw.Draw(im)\n\nfnt = ImageFont.truetype(\"/Library/Fonts/Arial.ttf\", size=36)\nxy = (im.size[0] - 60, 20)\ndraw.text(xy, \"10\", font=fnt, fill=(255, 0, 0))\n\ndel draw\n\nim.save(\"result.png\", \"PNG\")\n\nim.show()\n","sub_path":"heqingbao/0000/avatar.py","file_name":"avatar.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"510125957","text":"\n#460 LFU cache\n\nclass minHeap:\n def __init__(self):\n self.heap = [-1]\n self.userdata = [-1]\n\n def swap(self, i, j):\n tmp = self.heap[i]\n self.heap[i] = self.heap[j]\n self.heap[j] = tmp\n tmp = self.userdata[i]\n self.userdata[i] = self.userdata[j]\n self.userdata[j] = tmp\n\n def add(self, v, u = None):\n self.heap.append(v)\n self.userdata.append(u)\n index = len(self.heap)-1\n while (index != 1):\n parent = index // 2\n if (self.heap[parent] > self.heap[index]):\n self.swap(parent, index)\n else:\n break\n index = parent\n\n def remove(self, root = 1):\n if (len(self.heap) <= 1 or root >= len(self.heap)):\n return\n\n parent = root\n self.swap(parent, len(self.heap)-1)\n self.heap.pop()\n self.userdata.pop()\n while(True):\n l = 2*parent\n r = l + 1\n min = l\n if (r < len(self.heap) and self.heap[r] < self.heap[l]):\n min = r\n if (min >= len(self.heap) or self.heap[min] >= self.heap[parent]):\n break\n self.swap(parent, min)\n parent = min\n\n def findUserdata(self, i):\n return self.userdata.index(i)\n\n def min(self):\n return None if len(self.heap) <= 1 else self.heap[1]\n\nclass lfuCache:\n def __init__(self, cap):\n self.heap = minHeap()\n self.table = {}\n self.cap = cap\n self.id = 0\n \n def getid(self):\n id = self.id\n self.id += 1\n return id\n \n def get(self, key):\n if (key in self.table):\n f = self.heap.findUserdata(key)\n self.heap.remove(f)\n self.table[key][1] = self.getid()\n self.heap.add(self.table[key][1], key)\n return self.table[key][0]\n else:\n return None\n \n def put(self, key, value):\n if (key in self.table):\n self.table[key][0] = value\n self.get(key)\n else:\n self.table[key] = [value, self.getid()]\n self.heap.add(self.table[key][1], key) \n count = len(self.heap.heap)-1\n if (count > self.cap):\n u = self.heap.userdata[1]\n self.heap.remove()\n del self.table[u]\n \nlfu = lfuCache(2)\nlfu.put(1, 2)\nlfu.put(1, 3)\nprint(lfu.get(1))\nlfu.put(2, 4)\n#lfu.put(1, 5)\n#print(lfu.get(1))\nlfu.put(3, 4)\nprint(lfu.get(1))\nprint(lfu.get(2))\nprint(\"==========\")\nlfu = lfuCache(2)\nlfu.put(1, 1)\nlfu.put(2, 2)\nprint(lfu.get(1))\nlfu.put(3, 3)\nprint(lfu.get(2))\nprint(lfu.get(3))\nlfu.put(4, 4)\nprint(lfu.get(1))\nprint(lfu.get(3))\nprint(lfu.get(4))\n\n \n ","sub_path":"Leetcode/460.py","file_name":"460.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"559033734","text":"#!/usr/bin/env python3\n\n\"\"\"\nDependencies required to run program.\n - python3.6+\n - argparse\n - pandas >= 0.22.4\n - pysam==0.15.2\n\"\"\"\n\nimport sys\nimport pandas as pd\nfrom primer_tk import constants\n\ndef add_tabix_subparser(subparser):\n \"\"\"\n Get commandline arguments\n Args:\n subparser (?): Subparser object.\n Returns:\n args (Namespace): the parsed arguments.\n \"\"\"\n parser = subparser.add_parser(\"tabix\", help=\"Tabix subparser\")\n parser.add_argument(\"-vcf\", \"--variant-call-file\", dest=\"vcf\",\n help=\"Tabix indexed VCF.\")\n parser.add_argument(\"-in\", \"--primer-input-file\", dest=\"p_info\",\n help=\"The output of the primer pipeline.\")\n parser.add_argument(\"-o\", \"--output\", dest=\"output\",\n help=\"The name of the output file\")\n return parser\n\ndef create_tabix_df(primer_pipeline_output):\n \"\"\"\n Takes output of primer pipeline and generates dataframe.\n\n Args:\n total_primers (file): the output of the primer pipeline\n Returns:\n dataframe (pd.DataFrame): a pandas dataframe\n \"\"\"\n primer_df = pd.read_csv(primer_pipeline_output, header=0)\n return primer_df\n\ndef primer_range_left(seqid, rank, chrm, p_left, position1):\n \"\"\"\n Takes the chromosome, primer sequence, and position and creates a\n query range for tabix to search by.\n\n Args:\n seqid (pd.Series): pandas column containing seqids\n rank (pd.Series): pandas column containing primer rank\n chrm (pd.Series): pandas column of chrm info\n p_left (pd.Series): pandas column of the left primer seq\n position1 (pd.Series): pandas column of the left primer position.\n Returns:\n p_left_info (pd.DataFrame): the positional info in a frame format.\n \"\"\"\n p_left_info = pd.DataFrame()\n p_left_info['Sequence ID'] = seqid\n p_left_info['Primer Rank'] = rank\n p_left_info['Chromosome'] = chrm.apply(str)\n p_left_info['P_Len'] = p_left.apply(len)\n p_left_info['Position1'] = position1\n p_left_info['Position2'] = p_left_info['Position1']\\\n + p_left_info['P_Len']\n return p_left_info\n\ndef primer_range_right(seqid, rank, chrm, p_right, position2):\n \"\"\"\n Takes the chromosome, primer sequence, and position and creates a\n query range for tabix to search by.\n\n Args:\n seqid (pd.Series): pandas column containing seqids\n rank (pd.Series): pandas column containing primer rank\n chrm (pd.Series): pandas column of chrm info\n p_right (pd.Series): pandas column of the right primer seq\n position1 (pd.Series): pandas column of the right primer position.\n Returns:\n p_right_info (pd.DataFrame): the positional info in a frame format.\n \"\"\"\n p_right_info = pd.DataFrame()\n p_right_info['Sequence ID'] = seqid\n p_right_info['Primer Rank'] = rank\n p_right_info['Chromosome'] = chrm.apply(str)\n p_right_info['P_Len'] = p_right.apply(len)\n p_right_info['Position2'] = position2\n p_right_info['Position1'] = p_right_info['Position2']\\\n - p_right_info['P_Len']\n return p_right_info\n\n\ndef match_pinfo_to_vcf(p_info, vcf):\n \"\"\"\n Normalizes chromosome column to reference VCF info.\n Args:\n p_info (pd.DataFrame): the primer information df\n vcf (file): the tabix indexed vcf input\n Returns:\n p_info (pd.DataFrame): the primer information df normalized to genome\n \"\"\"\n switch = 0\n try:\n for rec in vcf.fetch('chr1', constants.START_POS, constants.END_POS):\n print(\"Updating switch: 1\")\n switch = 1\n except:\n for rec in vcf.fetch('1', constants.START_POS, constants.END_POS):\n print(\"Updating switch: 2\")\n switch = 2\n\n if switch == 1 and not p_info['Chromosome']\\\n .str.contains(\"chr\").any():\n p_info['Chromosome'] = 'chr' + p_info['Chromosome']\n elif switch == 2 and p_info['Chromosome']\\\n .str.contains(\"chr\").any():\n p_info['Chromosome'] = p_info['Chromosome']\\\n .str.replace('chr', '')\n else:\n pass\n return p_info\n\n\ndef tabix_fetch(seqids, ranks, chrom, position1, position2, vcf_in):\n \"\"\"\n Takes p_info positions and fetches SNPs from tabix indexed VCF.\n Args:\n seqids (pd.Series): pandas column of seqids\n ranks (pd.Series): pandas column of primer ranks\n chrom (pd.Series): pandas column of chrm info\n position1 (pd.Series): pandas column of pos1 info\n position2 (pd.Series): pandas column of pos2 info\n Returns:\n snp_list (list): list containing vcf info for primer positions.\n \"\"\"\n snp_list = []\n for seqid, rank, chrm, pos1, pos2 in zip(seqids, ranks, chrom, position1, position2):\n for row in vcf_in.fetch(chrm, pos1, pos2):\n snp_list.append((seqid, rank, str(row).strip('\\n').split('\\t')))\n select_list = []\n for item in snp_list:\n select_list.append((item[0], item[1], item[2][0], item[2][1],\\\n item[2][2], item[2][7].split(';')))\n final_list = []\n for item in select_list:\n geneinfo = [j for j in item[5] if \"GENEINFO=\" in j]\n caf = [j for j in item[5] if \"CAF=\" in j]\n topmed = [j for j in item[5] if \"TOPMED=\" in j]\n final_list.append((item[0], item[1], item[2], item[3], item[4], geneinfo, caf, topmed))\n return final_list\n\ndef tabix_results_to_df(tabix_list, which_primer, column_name):\n \"\"\"\n Takes the results from the tabix search and creates a dataframe.\n Args:\n tabix_list (list): tabix info from primer_left\n which_primer (str): should be L or R to denote left or right in names\n column_name (str): column name of snp count (should specify left or right)\n Returns:\n tabix_frame (pd.DataFrame): list organized into dataframe\n \"\"\"\n tabix_frame = pd.DataFrame(tabix_list)\n if len(tabix_frame) == 0:\n sys.exit(\"There are no SNPs in any of your primers, WOW!\")\n else:\n pass\n tabix_frame.columns = [\"Sequence ID\", \"Primer Rank\",\n \"Chromosome\", \"SNPPosition\", \"rs_id\",\n \"GeneInfo\", \"CommonAlleleFreq\", \"TopMedFreq\"]\n tabix_frame[\"GeneInfo\"] = tabix_frame[\"GeneInfo\"]\\\n .apply(lambda x: \"NA\" if len(x) == 0 else x[0].split(\"=\")[1])\n tabix_frame[\"CommonAlleleFreq\"] = tabix_frame[\"CommonAlleleFreq\"]\\\n .apply(lambda x: \"NA\" if len(x) == 0 else x[0].split('=')[1].split(',')[1])\n tabix_frame[\"TopMedFreq\"] = tabix_frame[\"TopMedFreq\"]\\\n .apply(lambda x: \"NA\" if len(x) == 0 else x[0].split('=')[1].split(',')[1])\n tabix_frame = tabix_frame.groupby([\"Sequence ID\", \"Primer Rank\", \"Chromosome\"])\\\n .agg({'SNPPosition': ';'.join,\n 'rs_id': ';'.join,\n 'GeneInfo': 'first',\n 'CommonAlleleFreq': ';'.join,\n 'TopMedFreq': ';'.join}).reset_index()\n tabix_frame[column_name] = tabix_frame[\"rs_id\"]\\\n .apply(lambda x: len(x.split(';')))\n tabix_frame.columns = [\"Sequence ID\", \"Primer Rank\", \"Chromosome\",\n \"%s_SNPPosition\" %which_primer,\n \"%s_rs_id\" %which_primer,\n \"%s_GeneInfo\" %which_primer,\n \"%s_CommonAlleleFreq\" %which_primer,\n \"%s_TopMedFreq\" %which_primer, column_name]\n return tabix_frame\n\ndef merge_left_right(left_df, right_df, total):\n \"\"\"\n Merge left and right primer tabix info dataframes.\n Args:\n left_df (pd.DataFrame): primer left tabix dataframe\n right_df (pd.DataFrame): primer right tabix dataframe\n returns:\n merged_tabix_df (pd.DataFrame): left and right merged df\n \"\"\"\n merged_tabix_df = pd.merge(total, left_df,\n on=['Sequence ID', 'Primer Rank'], how='left')\\\n .merge(right_df, on=['Sequence ID', 'Primer Rank'],\n how='left')\n return merged_tabix_df\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/primer_tk/primer_tabix.py","file_name":"primer_tabix.py","file_ext":"py","file_size_in_byte":8074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"534081802","text":"# -*- coding:utf-8 -*-\nimport ast\nimport csv\n\nimport os\nfrom bs4 import BeautifulSoup\nimport urllib\nfrom urllib import request\nfrom urllib import parse\n\n\ndef download_html(url):\n \"\"\"\n 爬取网页内容\n :param url: 爬取网页地址\n :return: soup对象\n \"\"\"\n try:\n # 特别注释,因为这里总是出现\n # UnicodeEncodeError: 'ascii' codec can't encode character '\\xb0'\n # in position 160: ordinal not in range(128)\n # 这是解决方法,因为网页源代码为gbk,先编译为utf-8 (bytes)转字符串\n url = str(url.encode('utf-8'))[2:][:-1]\n\n headers = {'User-agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:22.0) Gecko/20100101 Firefox/22.0',\n 'Accept': 'text / html, application / xhtml + xml, '\n 'application / xml;q = 0.9, image / webp, image / apng, * / *;q = 0.8',\n 'Accept - Encoding': 'gzip, deflate',\n 'Accept-Language': 'en-US,zh;q=0.8,en;q=0.6'\n }\n r = urllib.request.Request(url, headers=headers)\n response = urllib.request.urlopen(r, timeout=20)\n html = response.read()\n soup = BeautifulSoup(html, 'html.parser')\n return soup\n except Exception:\n return 'none'\n\n\ndef unify_salary(money):\n # 薪资单位 全部换算为 万/月\n # 1天8小时 1月30天 1年12月\n unit = money.rstrip()[-3:]\n if unit == '/小时':\n # 获得薪资数目\n money_num = money.rstrip()[:-4].lstrip().split('-')\n else:\n # 获得薪资数目\n money_num = money.rstrip()[:-3].lstrip().split('-')\n\n # 判断薪资是否为一个区间范围 2-3\n if len(money_num) < 2:\n # 说明只有200元/天 或者 100万以上/年\n try:\n if money.rstrip()[-3:] != '上/年':\n money_min = ast.literal_eval(money_num[0].strip())\n money_max = ast.literal_eval(money_num[0].strip())\n except:\n money_min = 0\n money_max = 0\n else:\n money_min = ast.literal_eval(money_num[0].strip())\n money_max = ast.literal_eval(money_num[1].strip())\n\n if unit == '元/天':\n money_min = float('%.2f' % (money_min * 30 / 10))\n money_max = float('%.2f' % (money_max * 30 / 10))\n elif unit == '元/小时':\n money_min = float('%.2f' % (money_min * 8 * 30 / 10))\n money_max = float('%.2f' % (money_max * 8 * 30 / 10))\n elif unit == '万/年':\n money_min = float('%.2f' % (money_min / 12))\n money_max = float('%.2f' % (money_max / 12))\n elif unit == '千/月':\n money_min = float('%.2f' % (money_min / 10))\n money_max = float('%.2f' % (money_max / 10))\n elif unit == '万/月':\n pass\n else:\n # 100万以上/年\n money_min = float('%.2f' % (100 / 12))\n money_max = float('%.2f' % (100 / 12))\n return money_min, money_max\n\n\ndef main(f, url, isFirst):\n # dialect为打开csv文件的方式,默认是excel,delimiter=\"\\t\"参数指写入的时候的分隔符\n csvwriter = csv.writer(f, dialect=\"excel\")\n # 获得soup对象\n soup = download_html(url)\n if soup == 'none':\n print('读取相应url失败')\n return\n # 获得下一页链接\n try:\n link = soup.select('li[class=\"bk\"]')[1].select('a')[0]['href']\n except:\n link = 'none'\n\n # 获得爬取地点名称\n province = soup.select('input[id=\"work_position_input\"]')[0]['value']\n\n # 岗位名称\n position_names = soup.select('span[class=\"t1\"], p[class=\"t1 \"] > span')\n # 公司名称\n company_names = soup.select('span[class=\"t2\"]')\n # 工作地点\n position_locations = soup.select('span[class=\"t3\"]')\n # 薪资\n salarys = soup.select('span[class=\"t4\"]')\n # 发布时间\n release_time = soup.select('span[class=\"t5\"]')\n # 写入到csv文件--2017/10/9--by wjh\n for i in range(len(position_names)):\n # 除去第一次之外的不抓取标题\n print('{0}/{1}'.format(i + 1, len(position_names)))\n if isFirst == 0 and i == 0:\n i += 1\n continue\n\n # 薪资处理\n # 这里是判断薪资如果没有涉及,默认为0万/月\n if len(salarys[i].getText().strip()) == 0:\n salary = '0-0'\n else:\n salary = salarys[i].getText().strip()\n # 这里需要对薪资做单位统一为“万/月”\n # 转换薪资单位\n if salary != '薪资':\n salary = unify_salary(salary)\n salary = str(salary[0]) + '-' + str(salary[1]) + '万/月'\n else:\n salary = salary + '(万/月)'\n\n # 工作地点处理 城市-具体到区\n if position_locations[i].getText().strip() != '工作地点':\n position_location = position_locations[i].getText().strip().split('-')\n if len(position_location) < 2: # 只有北京 没有写出区\n # 默认为未知\n location_city = position_location[0]\n location_area = '未知'\n else: # 都有\n location_city = position_location[0]\n location_area = position_location[1]\n else:\n location_city = '工作城市'\n location_area = '工作市区'\n\n csvwriter.writerow([position_names[i].getText().strip(),\n company_names[i].getText().strip(),\n location_city,\n location_area,\n salary,\n release_time[i].getText().strip()])\n return link, province # 返回下一页链接\n\n\nif __name__ == '__main__':\n count = 1 # %E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0\n position = '机器���习' # 这里可以自己指定 要爬取的职位信息\n position = urllib.parse.quote(position) #\n path = 'E:\\pythonProgram\\LittleProgram\\spider\\各个省'\n file_path = os.path.join(path, )\n province = '北京'\n for j in range(36):\n file_path = os.path.join(path, str(province) + \".csv\")\n f = open(file_path, \"w\", newline='')\n url = 'http://search.51job.com/jobsearch/search_result.php?fromJs=1&jobarea={0}' \\\n '&keyword={1}&keywordtype=2' \\\n '&lang=c&stype=2&postchannel=0000&fromType=1&confirmdate=9'.format((str(j + 1) + '0000').zfill(6), position)\n\n # 获取下一页链接\n # 首次爬取需要获得标题\n next_link, province = main(f, url, 1)\n print('{0}:{1}'.format(province, count))\n # 这时不需要再爬取标题\n while next_link != 'none':\n count += 1\n next_link, _ = main(f, next_link, 0)\n else:\n # 停止爬取\n f.close()\n\n\n\n\n\n\n\n\n","sub_path":"AnalysisRecruit/spider_recruit_each.py","file_name":"spider_recruit_each.py","file_ext":"py","file_size_in_byte":6799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"223673503","text":"import math\nimport scipy.ndimage.morphology as m\nimport cv2\nimport numpy as np\nfrom skimage import img_as_float\nfrom skimage import io, color, morphology\nfrom skimage import io, morphology, img_as_bool, segmentation\nfrom scipy import ndimage as ndi\nimport matplotlib.pyplot as plt\nimport ctypes\nimport tkinter as tk\nimport tkinter.filedialog\nfrom tkinter.filedialog import askopenfilename\n\n#Makes each pixel of the image black or white\ndef binary(img):\n im_gray = cv2.imread(img, cv2.IMREAD_GRAYSCALE)\n (thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n cv2.imwrite(\"binary.jpg\", im_bw)\n cv2.imshow(\"Binary\", im_bw)\n cv2.waitKey(0)\n\ndef binary1(img):\n img = cv2.imread(img,0)\n img = cv2.medianBlur(img,5)\n th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\\\n cv2.THRESH_BINARY,11,2)\n cv2.imwrite(\"binary.jpg\", th3)\n cv2.imshow(\"Binary\",th3)\n cv2.waitKey(0)\n\n#Applies median filtering to get rid of noise\ndef median(img1):\n img = cv2.imread(img1)\n median = cv2.medianBlur(img, 3)\n cv2.imwrite(\"median.jpg\",median)\n\n#Thins the image to one pixel wide using the skeletonize function from morphology\ndef thinning2(name):\n image = img_as_float(color.rgb2gray(io.imread(name)))\n image_binary = image < 0.5\n out_skeletonize = morphology.skeletonize(image_binary)\n out_thin = morphology.thin(image_binary)\n\n plt.imsave('gaps.jpg', out_skeletonize, cmap='gray')\n img = cv2.imread(\"gaps.jpg\")\n cv2.imshow(\"Thinning2\", img)\n cv2.waitKey(0)\n\n#Finds the edges using the Canny Edge Detection\ndef canny(name):\n image = cv2.imread(name)\n edges = cv2.Canny(image, 100, 200)\n cv2.imwrite(\"canny.jpg\",edges)\n cv2.imshow(\"Canny Edge Detection\", edges)\n cv2.waitKey(0)\n\n#Fills in gaps in the skeleton image\ndef complete(img):\n image = img_as_bool(io.imread(img))\n out = ndi.distance_transform_edt(~image)\n out = out < 0.02 * out.max()\n out = morphology.skeletonize(out)\n out = segmentation.clear_border(out)\n out = out | image\n\n cv2.imshow(\"out\",out)\n cv2.waitKey(0)\n cv2.imwrite('gaps_filled.jpg', out)\n\n#Makes all green pixels white\ndef bandw(img1):\n image = cv2.imread(img1)\n height, width, channels = image.shape\n for y in range(0,height):\n for x in range(0,width):\n color = image[y,x]\n b = color[0]\n g = color[1]\n r = color[2]\n if b>0 and g>100 and r>0:\n image[y,x] = [255,255,255]\n else:\n image[y,x] = [0,0,0]\n cv2.imwrite(\"bandw.jpg\", image)\n\n#The switch function is used to assign each number of crack with a color\ndef switch(x):\n return {\n 1: 'Red',\n 2: 'Yellow',\n 3: 'White',\n 4: 'Purple',\n 5: 'Orange',\n 6: 'Pink'\n }.get(x, 'Color')\n\n#Function to calculate the length\ndef getLength(img,wU,hU,units):\n image = cv2.imread(img)\n height, width, channels = image.shape\n w = wU/width\n h = hU/height\n #Array created that will contain all endpoints of the cracks\n endpoints = []\n split = [[]]\n circle = []\n\n for y in range(0,height):\n for x in range(0,width):\n color = image[y,x]\n #RGB values gotten for the selected pixel\n b = color[0]\n g = color[1]\n r = color[2]\n #Checks to see if all RGBs are part of the crack\n if b>150 and g>150 and r>150:\n #Cracks to see if pixel is not part of the border\n if y>0 and x>0 and y2:\n image[y,x]=[255,0,0]\n split.append([x,y])\n #print(\"[%d,%d]\" % (x,y))\n #If there are exactly two pixels in count, the selected pixel is in the middle of the cracking\n if count==2:\n image[y,x]=[0,255,0]\n if len(circle) == 0:\n circle.append([x,y])\n #If there is exactly one pixel in count, the selected pixel is an endpoint and added to the endpoint array\n if count==1:\n image[y,x]=[0,0,255]\n endpoints.append([x,y])\n #Categorizes the pixel as an endpoint if the selected pixel is on the border of the image\n if y==0 or x==0 or y==height or x==width:\n image[y,x]=[0,0,255]\n endpoints.append([x,y])\n\n intersection = [[]]\n slopes = []\n if len(endpoints) == 0:\n endpoints.append(circle[0][0],circle[0][1])\n image[circle[0][1],circle[0][0]]=[0,0,255]\n #l is the variable to keep track of the length\n l = 0.0\n f= open(\"output.txt\",\"w+\")\n #Loops through all the endpoints\n #For each endpoint we start with that pixel and move throughout the crack, adding to the length for each pixel until we get to another endpoint\n #counter is used to asign a number to each crack\n counter = 1\n while(len(endpoints)>0):\n tf = True\n x = endpoints[0][1]\n y = endpoints[0][0]\n endpoints.remove([y,x])\n while(tf):\n #Colors each crack based on the counter number\n if counter == 1:\n image[x,y]=[0,0,255]\n elif counter == 2:\n image[x,y]=[0,255,255]\n elif counter == 3:\n image[x,y]=[255,255,255]\n elif counter == 4:\n image[x,y]=[255,0,255]\n elif counter == 5:\n image[x,y]=[0,127,255]\n elif counter == 6:\n image[x,y]=[127,0,255]\n else:\n image[x,y]=[127,127,255]\n #Calls the getColor function to see which pixels are next to the selected one\n info = getColor(img,y,x)\n #Gets a number for how many pixels are next the selected one\n count = info[[len(info)-1][0]]\n info.remove([])\n info.remove(count)\n if count == 2:\n s = getSlope(img,y,x)\n slopes.append([s,x,y])\n check = 0\n #Loops through all the endpoints and splitting points\n for a1 in range(0,len(info)):\n color = image[info[a1-check][0],info[a1-check][1]]\n b = color[0]\n g = color[1]\n r = color[2]\n if r==255:\n x=(info[a1-check][0])\n y=(info[a1-check][1])\n info.remove([x,y])\n check = check + 1\n if len(info) == 0 or [y,x] in endpoints:\n if [y,x] in endpoints:\n endpoints.remove([y,x])\n tf = False\n elif len(info) == 1:\n x1 = x\n y1 = y\n x = info[0][0]\n y = info[0][1]\n if x==x1:\n l = l+w\n elif y==y1:\n l = l+h\n else:\n l = l+math.sqrt(math.pow(w,2)+math.pow(h,2))\n else:\n intersection.append([x,y])\n for i in range(0,len(info)):\n endpoints.append([info[i][1],info[i][0]])\n image[x,y]=[0,0,0]\n image[x,y]=[255,255,255]\n tf = False\n if l != 0:\n print(\"Length %d (%s): %.3f %s\" % (counter,switch(counter),l,units))\n f.write(\"Length %d (%s): %.3f %s\\n\" % (counter,switch(counter),l,units))\n counter = counter + 1\n l = 0\n f.close()\n cv2.imwrite('end.jpg',image)\n cv2.imshow(\"final\",image)\n cv2.waitKey(0)\n return slopes\n\n#Takes in the skeleton image and finds the perpendicular slope for each part of the cracking\n#Slope using 2-4 other pixels to get a more accurate length\ndef getSlope(img,x,y):\n total = 0\n counter = 0\n slope = 0\n info = getColor(img,x,y)\n #We look to get at least the two points around the selected one, but will hopefully get up to four around it\n point1 = [0,0]\n point2 = [info[1][1], info[1][0]]\n point3 = [x,y]\n point4 = [info[2][1], info[2][0]]\n point5 = [0,0]\n infoP2 = getColor(img,point2[0],point2[1])\n infoP4 = getColor(img,point4[0],point4[1])\n if infoP2[len(infoP2)-1] == 2:\n infoP2.remove(2)\n infoP2.remove([y,x])\n infoP2.remove([])\n point1 = [infoP2[0][1],infoP2[0][0]]\n counter = counter + 1\n if infoP4[len(infoP4)-1] == 2:\n infoP4.remove(2)\n infoP4.remove([y,x])\n infoP4.remove([])\n point5 = [infoP4[0][1],infoP4[0][0]]\n counter = counter + 5\n if ((float(point2[0]-point1[0])) != 0) and (counter == 1 or counter == 6):\n slope = slope + (float(point2[1]-point1[1])/(float(point2[0]-point1[0])))\n total = total + 1\n if (float(point3[0]-point2[0])) != 0:\n slope = slope + (float(point3[1]-point2[1])/(float(point3[0]-point2[0])))\n total = total + 1\n if (float(point4[0]-point3[0])) != 0:\n slope = slope + (float(point4[1]-point3[1])/(float(point4[0]-point3[0])))\n total = total + 1\n if ((float(point5[0]-point4[0])) != 0) and (counter == 5 or counter == 6):\n slope = slope + (float(point5[1]-point4[1])/(float(point5[0]-point4[0])))\n total = total + 1\n\n if total == 0:\n slope = 1\n else:\n slope = slope/total\n return slope\n\n#Takes in the canny image and the slopes and returns the widths\ndef getWidth(canny1,slopes,wU,hU,units):\n canny = cv2.imread(canny1)\n #WIdth and height in pixels and in the propper units are now variables\n height, width, channels = canny.shape\n w = wU/width\n h = hU/height\n #Variables for high, low, avg and an array of all widths are initialized and will be changed to what they should be throughout the code\n total = []\n high = 0\n low = 100\n avg = 0\n #Loops through the array of all the slopes of the pixels\n for a in range(0,len(slopes)):\n slope = slopes[a][0]\n x = slopes[a][1]\n y = slopes[a][2]\n canny[x,y]=[0,0,255]\n #Slope is put into fraction form to get the x and y of the slope\n newSlope = (float(slope)).as_integer_ratio()\n slopeX = newSlope[1]\n slopeY = newSlope[0]\n check = True\n counter = 1\n #Slope is used to extend the length until it reaches the edges of the cracking\n while check:\n #Checks to see if slope causes the width to go out of bounds\n if y-(counter*slopeY) >= width or y+(counter*slopeY) >= width or y-(counter*slopeY) < 0 or y+(counter*slopeY) < 0 or x-(counter*slopeX) >= width or x+(counter*slopeX) >= width or x-(counter*slopeX) < 0 or x+(counter*slopeX) < 0:\n #print(\"Out of bounds\")\n break\n #Slope is used to add to the width\n color1 = canny[x+(counter*slopeX),y-(counter*slopeY)]\n color2 = canny[x-(counter*slopeX),y+(counter*slopeY)]\n if (color1[0]>200 and color1[1]>200 and color1[2]) or (color2[0]>200 and color2[1]>200 and color2[2]):\n check = False\n total.append(math.sqrt(2*(math.pow((counter*w*slopeX),2)+math.pow((counter*h*slopeY),2))))\n canny = cv2.line(canny, (y+(counter*slopeY), x-(counter*slopeX)), (y-(counter*slopeY), x+(counter*slopeX)),(0,0,255))\n counter = counter + 1\n #The average width is calculated\n for b in range(0,len(total)):\n if total[b] < low:\n low = total[b]\n if total[b] > high:\n high = total[b]\n avg = avg + total[b]\n avg = avg/len(total)\n #Avg, smallest and largest lengths printed out\n print(\"Average Width: %0.4f %s\" % (avg,units))\n print(\"Smallest Width: %0.4f %s\" % (low,units))\n print(\"Highest Width: %0.4f %s\" % (high,units))\n #Saves and shows the widths on the canny image\n cv2.imwrite('slopes.jpg',canny)\n cv2.imshow(\"Width\",canny)\n cv2.waitKey(0)\n\n#Returns an array with the pixels that are part of the cracking from the eight surrounding pixels\n#Returns the number of pixels that is part of the cracks to allow the code to categorize as either endpoint, splitting point or regular pixel\ndef getColor(img,x,y):\n #Reads in the image\n image = cv2.imread(img)\n #Gets the height and width values (in pixels)\n height, width, channels = image.shape\n info = [[]]\n count = 0\n #Gets the RGB values for the selected pixel\n color = image[y,x]\n b = color[0]\n g = color[1]\n r = color[2]\n #Checks to see if the pixel to the upper left is part of the cracking\n color1 = image[y-1,x-1]\n b1 = color1[0]\n g1 = color1[1]\n r1 = color1[2]\n if b1>150 and g1>150 and r1>150:\n count = count+1\n info.append([y-1,x-1])\n #Checks to see if the pixel above is part of the cracking\n color2 = image[y-1,x]\n b2 = color2[0]\n g2 = color2[1]\n r2 = color2[2]\n if b2>150 and g2>150 and r2>150:\n count = count+1\n info.append([y-1,x])\n #Checks to see if the pixel to the upper right is part of the cracking\n color3 = image[y-1,x+1]\n b3 = color3[0]\n g3 = color3[1]\n r3 = color3[2]\n if b3>150 and g3>150 and r3>150:\n count = count+1\n info.append([y-1,x+1])\n #Checks to see if the pixel to the left is part of the cracking\n color4 = image[y,x-1]\n b4 = color4[0]\n g4 = color4[1]\n r4 = color4[2]\n if b4>150 and g4>150 and r4>150:\n count = count+1\n info.append([y,x-1])\n #Checks to see if the pixel to the right is part of the cracking\n color5 = image[y,x+1]\n b5 = color5[0]\n g5 = color5[1]\n r5 = color5[2]\n if b5>150 and g5>150 and r5>150:\n count = count+1\n info.append([y,x+1])\n #Checks to see if the pixel to the bottom left is part of the cracking\n color6 = image[y+1,x-1]\n b6 = color6[0]\n g6 = color6[1]\n r6 = color6[2]\n if b6>150 and g6>150 and r6>150:\n count = count+1\n info.append([y+1,x-1])\n #Checks to see if the pixel below is part of the cracking\n color7 = image[y+1,x]\n b7 = color7[0]\n g7 = color7[1]\n r7 = color7[2]\n if b7>150 and g7>150 and r7>150:\n count = count+1\n info.append([y+1,x])\n #Checks to see if the pixel to the bottom right is part of the cracking\n color8 = image[y+1,x+1]\n b8 = color8[0]\n g8 = color8[1]\n r8 = color8[2]\n if b8>150 and g8>150 and r8>150:\n count = count+1\n info.append([y+1,x+1])\n\n info.append(count)\n return info\n\n#############################################MAIN###################################\ncropping = False\nesc_keycode=27\nx_start, y_start, x_end, y_end = 0, 0, 0, 0\ndone = False\n\nfilename = askopenfilename()\nimg = cv2.imread(filename)\n\n#The image is made smaller if it extends the window size\nuser32 = ctypes.windll.user32\nh = user32.GetSystemMetrics(0)\nw = user32.GetSystemMetrics(1)\nheight, width, channels = img.shape\nwhile(height>h or width>w):\n height = (9/10)*height\n width = (9/10)*width\n\n#User enters the height and width and units\nmaster = tk.Tk()\ntk.Label(master, text=\"Width\").grid(row=0)\ntk.Label(master, text=\"Height\").grid(row=1)\ntk.Label(master, text=\"Units\").grid(row=2)\n\ne1 = tk.Entry(master)\ne2 = tk.Entry(master)\ne3 = tk.Entry(master)\n\ne1.grid(row=0, column=1)\ne2.grid(row=1, column=1)\ne3.grid(row=2, column=1)\n\ntk.Button(master,\n text='Done',\n command=master.quit).grid(row=4,\n column=0,\n sticky=tk.W,\n pady=4)\n\nmaster.mainloop()\n\nwidthUnits = float(e1.get())\nheightUnits = float(e2.get())\nunits = e3.get()\n\ncv2.imwrite(\"resize.jpg\", cv2.resize(img, (int(width),int(height))))\nimg = cv2.imread(\"resize.jpg\")\noriImage = img.copy()\ncheck = False\n\ndef mouse_crop(event, x, y, flags, param):\n # grab references to the global variables\n global x_start, y_start, x_end, y_end, cropping\n \n # if the left mouse button was DOWN, start RECORDING\n # (x, y) coordinates and indicate that cropping is being\n if event == cv2.EVENT_LBUTTONDOWN:\n x_start, y_start, x_end, y_end = x, y, x, y\n cropping = True\n \n # Mouse is Moving\n elif event == cv2.EVENT_MOUSEMOVE:\n if cropping == True:\n x_end, y_end = x, y\n \n # if the left mouse button was released\n elif event == cv2.EVENT_LBUTTONUP:\n # record the ending (x, y) coordinates\n x_end, y_end = x, y\n cropping = False # cropping is finished\n \n refPoint = [(x_start, y_start), (x_end, y_end)]\n \n if len(refPoint) == 2: #when two points were found\n roi = oriImage[refPoint[0][1]:refPoint[1][1], refPoint[0][0]:refPoint[1][0]]\n cv2.imshow(\"Cropped\", roi)\n cv2.imwrite(\"cropped.jpg\", roi)\n check = True\n \ncv2.namedWindow(\"image\")\ncv2.setMouseCallback(\"image\", mouse_crop)\n \nwhile done == False:\n i = img.copy()\n if not cropping:\n cv2.imshow(\"image\", img)\n elif cropping:\n cv2.rectangle(i, (x_start, y_start), (x_end, y_end), (255, 0, 0), 2)\n cv2.imshow(\"image\", i)\n check = True\n k = cv2.waitKey(1)\n if k == esc_keycode:\n if check == False:\n cv2.imwrite(\"cropped.jpg\", oriImage)\n break\n\n#Image is transformed into a binary image\n#binary(\"cropped.jpg\")\nbinary1(\"cropped.jpg\")\n#Uses the Canny Edge Detection to find the edges of the cracking\ncanny(\"binary.jpg\")\n#Median Filtering is used to get rid of access points\nmedian(\"binary.jpg\")\n#Thinning is used to make the cracking one pixel wide\nthinning2(\"median.jpg\")\n#Complete fills in gaps in the cracking\ncomplete(\"gaps.jpg\")\n#Cracking RGB values are 255\nbandw(\"gaps_filled.jpg\")\n#Length is calculated using demensions and units given. Perpendicular slopes are returned\nslopes = getLength(\"bandw.jpg\",widthUnits, heightUnits, units)\n#Width is calculated\ngetWidth(\"canny.jpg\", slopes, widthUnits, heightUnits, units)\n# close all open windows\ncv2.destroyAllWindows()\n","sub_path":"jimmyFolder/pythonFiles/crackAnalysis.py","file_name":"crackAnalysis.py","file_ext":"py","file_size_in_byte":18585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"418666201","text":"\"\"\" es_runners for conformer\n\"\"\"\n\nimport numpy\nimport automol\nimport elstruct\nimport autofile\nfrom routines.es._routines import _util as util\nfrom routines.es import runner as es_runner\nfrom lib import filesys\nfrom lib.phydat import bnd\n\n\ndef conformer_sampling(zma, spc_info,\n mod_thy_info, thy_save_fs,\n cnf_run_fs, cnf_save_fs,\n script_str, overwrite,\n saddle=False, nsamp_par=(False, 3, 3, 1, 50, 50),\n tors_names='', dist_info=(),\n two_stage=False, rxn_class='', **kwargs):\n \"\"\" Find the minimum energy conformer by optimizing from nsamp random\n initial torsional states\n \"\"\"\n\n ich = spc_info[0]\n coo_names = []\n\n # Read the geometry and zma from the ini file system\n # if not saddle:\n # geo = thy_save_fs[-1].file.geometry.read(mod_ini_thy_info[1:4])\n # tors_names = automol.geom.zmatrix_torsion_coordinate_names(geo)\n # zma = automol.geom.zmatrix(geo)\n # else:\n # geo = thy_save_fs[0].file.geometry.read()\n # zma = thy_save_fs[0].file.zmatrix.read()\n # coo_names.append(tors_names)\n if saddle:\n coo_names.append(tors_names)\n\n tors_ranges = tuple((0, 2*numpy.pi) for tors in tors_names)\n tors_range_dct = dict(zip(tors_names, tors_ranges))\n if not saddle:\n gra = automol.inchi.graph(ich)\n ntaudof = len(\n automol.graph.rotational_bond_keys(gra, with_h_rotors=False))\n nsamp = util.nsamp_init(nsamp_par, ntaudof)\n else:\n ntaudof = len(tors_names)\n nsamp = util.nsamp_init(nsamp_par, ntaudof)\n\n print('\\nSaving any conformers in run filesys...')\n save_conformers(\n cnf_run_fs=cnf_run_fs,\n cnf_save_fs=cnf_save_fs,\n thy_info=mod_thy_info,\n saddle=saddle,\n dist_info=dist_info,\n rxn_class=rxn_class\n )\n\n print('\\nSampling for more conformers if needed...')\n run_conformers(\n zma=zma,\n spc_info=spc_info,\n thy_info=mod_thy_info,\n nsamp=nsamp,\n tors_range_dct=tors_range_dct,\n cnf_run_fs=cnf_run_fs,\n cnf_save_fs=cnf_save_fs,\n script_str=script_str,\n overwrite=overwrite,\n saddle=saddle,\n two_stage=two_stage,\n **kwargs,\n )\n\n print('\\nSaving any newly found conformers in run filesys...')\n save_conformers(\n cnf_run_fs=cnf_run_fs,\n cnf_save_fs=cnf_save_fs,\n thy_info=mod_thy_info,\n saddle=saddle,\n dist_info=dist_info,\n rxn_class=rxn_class\n )\n\n # Save information about the minimum energy conformer in top directory\n min_cnf_locs = filesys.mincnf.min_energy_conformer_locators(cnf_save_fs)\n if min_cnf_locs:\n geo = cnf_save_fs[-1].file.geometry.read(min_cnf_locs)\n zma = cnf_save_fs[-1].file.zmatrix.read(min_cnf_locs)\n if not saddle:\n assert automol.zmatrix.almost_equal(zma, automol.geom.zmatrix(geo))\n thy_save_fs[-1].file.geometry.write(geo, mod_thy_info[1:4])\n thy_save_fs[-1].file.zmatrix.write(zma, mod_thy_info[1:4])\n\n else:\n thy_save_fs[0].file.geometry.write(geo)\n thy_save_fs[0].file.zmatrix.write(zma)\n\n\ndef single_conformer(zma, spc_info, thy_info,\n thy_save_fs, cnf_run_fs, cnf_save_fs,\n overwrite, saddle=False, dist_info=()):\n \"\"\" generate single optimized geometry for\n randomly sampled initial torsional angles\n \"\"\"\n opt_script_str, _, kwargs, _ = es_runner.par.run_qchem_par(*thy_info[0:2])\n conformer_sampling(\n zma=zma,\n spc_info=spc_info,\n mod_thy_info=thy_info,\n thy_save_fs=thy_save_fs,\n cnf_run_fs=cnf_run_fs,\n cnf_save_fs=cnf_save_fs,\n script_str=opt_script_str,\n overwrite=overwrite,\n nsamp_par=[False, 0, 0, 0, 0, 1],\n saddle=saddle,\n dist_info=dist_info,\n two_stage=saddle,\n **kwargs,\n )\n\n\ndef run_conformers(\n zma, spc_info, thy_info, nsamp, tors_range_dct,\n cnf_run_fs, cnf_save_fs, script_str, overwrite, saddle, two_stage,\n **kwargs):\n \"\"\" run sampling algorithm to find conformers\n \"\"\"\n if not tors_range_dct:\n print(\" - No torsional coordinates. Setting nsamp to 1.\")\n nsamp = 1\n\n cnf_save_fs[0].create()\n vma = automol.zmatrix.var_(zma)\n if cnf_save_fs[0].file.vmatrix.exists():\n existing_vma = cnf_save_fs[0].file.vmatrix.read()\n assert vma == existing_vma\n cnf_save_fs[0].file.vmatrix.write(vma)\n nsamp0 = nsamp\n inf_obj = autofile.system.info.conformer_trunk(0, tors_range_dct)\n if cnf_save_fs[0].file.info.exists():\n inf_obj_s = cnf_save_fs[0].file.info.read()\n nsampd = inf_obj_s.nsamp\n elif cnf_run_fs[0].file.info.exists():\n inf_obj_r = cnf_run_fs[0].file.info.read()\n nsampd = inf_obj_r.nsamp\n else:\n nsampd = 0\n\n tot_samp = nsamp - nsampd\n print(' - Number of samples that have been currently run:', nsampd)\n print(' - Number of samples requested:', nsamp)\n\n if nsamp-nsampd > 0:\n print('\\nRunning {} samples...'.format(nsamp-nsampd))\n samp_idx = 1\n while True:\n nsamp = nsamp0 - nsampd\n # Break the while loop if enough sampls completed\n if nsamp <= 0:\n print('Requested number of samples have been completed. '\n 'Conformer search complete.')\n break\n\n # Run the conformer sampling\n if nsampd > 0:\n samp_zma, = automol.zmatrix.samples(zma, 1, tors_range_dct)\n else:\n samp_zma = zma\n\n cid = autofile.system.generate_new_conformer_id()\n locs = [cid]\n\n cnf_run_fs[-1].create(locs)\n cnf_run_path = cnf_run_fs[-1].path(locs)\n run_fs = autofile.fs.run(cnf_run_path)\n\n print(\"Run {}/{}\".format(samp_idx, tot_samp))\n tors_names = list(tors_range_dct.keys())\n if two_stage and tors_names:\n print('Stage one beginning, holding the coordinates constant',\n tors_names)\n es_runner.run_job(\n job=elstruct.Job.OPTIMIZATION,\n script_str=script_str,\n run_fs=run_fs,\n geom=samp_zma,\n spc_info=spc_info,\n thy_info=thy_info,\n overwrite=overwrite,\n frozen_coordinates=[tors_names],\n saddle=saddle,\n **kwargs\n )\n print('Stage one success, reading for stage 2')\n ret = es_runner.read_job(\n job=elstruct.Job.OPTIMIZATION, run_fs=run_fs)\n if ret:\n sinf_obj, _, out_str = ret\n prog = sinf_obj.prog\n samp_zma = elstruct.reader.opt_zmatrix(prog, out_str)\n print('Stage one success beginning stage two on', samp_zma)\n es_runner.run_job(\n job=elstruct.Job.OPTIMIZATION,\n script_str=script_str,\n run_fs=run_fs,\n geom=samp_zma,\n spc_info=spc_info,\n thy_info=thy_info,\n overwrite=overwrite,\n saddle=saddle,\n **kwargs\n )\n else:\n es_runner.run_job(\n job=elstruct.Job.OPTIMIZATION,\n script_str=script_str,\n run_fs=run_fs,\n geom=samp_zma,\n spc_info=spc_info,\n thy_info=thy_info,\n overwrite=overwrite,\n saddle=saddle,\n **kwargs\n )\n\n if cnf_save_fs[0].file.info.exists():\n inf_obj_s = cnf_save_fs[0].file.info.read()\n nsampd = inf_obj_s.nsamp\n elif cnf_run_fs[0].file.info.exists():\n inf_obj_r = cnf_run_fs[0].file.info.read()\n nsampd = inf_obj_r.nsamp\n nsampd += 1\n samp_idx += 1\n inf_obj.nsamp = nsampd\n cnf_save_fs[0].file.info.write(inf_obj)\n cnf_run_fs[0].file.info.write(inf_obj)\n\n\ndef save_conformers(cnf_run_fs, cnf_save_fs, thy_info, saddle=False,\n dist_info=(), rxn_class=''):\n \"\"\" save the conformers that have been found so far\n \"\"\"\n\n locs_lst = cnf_save_fs[-1].existing()\n seen_geos = [cnf_save_fs[-1].file.geometry.read(locs)\n for locs in locs_lst]\n seen_enes = [cnf_save_fs[-1].file.energy.read(locs)\n for locs in locs_lst]\n\n if not cnf_run_fs[0].exists():\n print(\" - No conformers in run filesys to save.\")\n else:\n print(\" - Found conformers in run filesys to save.\\n\")\n for locs in cnf_run_fs[-1].existing():\n # # Only go through save procedure if conf not in save\n # # may need to get geo, ene, etc; maybe make function\n # if cnf_save_fs[-1].exists(locs):\n # continue\n # else:\n # print('New conformer to save...')\n cnf_run_path = cnf_run_fs[-1].path(locs)\n run_fs = autofile.fs.run(cnf_run_path)\n print(\"Reading from conformer run at {}\".format(cnf_run_path))\n\n ret = es_runner.read_job(\n job=elstruct.Job.OPTIMIZATION, run_fs=run_fs)\n if ret:\n inf_obj, inp_str, out_str = ret\n prog = inf_obj.prog\n method = inf_obj.method\n ene = elstruct.reader.energy(prog, method, out_str)\n geo = elstruct.reader.opt_geometry(prog, out_str)\n if not saddle:\n gra = automol.geom.graph(geo)\n conns = automol.graph.connected_components(gra)\n lconns = len(conns)\n else:\n lconns = 1\n if lconns > 1:\n print(\" - Geometry is disconnected.\",\n \"Conformer will not be saved.\")\n else:\n if saddle:\n # ts_class, ts_original_zma, ts_tors_names,\n # ts_dist_info\n # geo, zma, final_dist = check_filesys_for_ts(\n # ts_dct, ts_zma, cnf_save_fs, overwrite,\n # typ, dist_info, dist_name, bkp_ts_class_data)\n # zma = cnf_save_fs[-1].file.zmatrix.read(\n # cnf_save_locs)\n\n # # Add an angle check which is added\n # to spc dct for TS (crap code...)\n # vals = automol.zmatrix.values(zma)\n # final_dist = vals[dist_name]\n # dist_info[1] = final_dist\n # angle = ts.chk.check_angle(\n # ts_dct['zma'],\n # ts_dct['dist_info'],\n # ts_dct['class'])\n # ts_dct['dist_info'][1] = final_dist\n # ts_dct['dist_info'].append(angle)\n zma = elstruct.reader.opt_zmatrix(prog, out_str)\n dist_name = dist_info[0]\n dist_len = dist_info[1]\n ts_bnd = automol.zmatrix.bond_idxs(zma, dist_name)\n ts_bnd1 = min(ts_bnd)\n ts_bnd2 = max(ts_bnd)\n conf_dist_len = automol.zmatrix.values(zma)[dist_name]\n brk_name = dist_info[3]\n cent_atm = None\n ldist = len(dist_info)\n if dist_name and brk_name and ldist > 4:\n angle = dist_info[4]\n brk_bnd = automol.zmatrix.bond_idxs(zma, brk_name)\n ang_atms = [0, 0, 0]\n cent_atm = list(set(brk_bnd) & set(ts_bnd))\n if cent_atm:\n ang_atms[1] = cent_atm[0]\n for idx in brk_bnd:\n if idx != ang_atms[1]:\n ang_atms[0] = idx\n for idx in ts_bnd:\n if idx != ang_atms[1]:\n ang_atms[2] = idx\n geom = automol.zmatrix.geometry(zma)\n conf_ang = automol.geom.central_angle(\n geom, *ang_atms)\n max_disp = 0.6\n if 'addition' in rxn_class:\n max_disp = 0.8\n if 'abstraction' in rxn_class:\n max_disp = 1.4\n\n # check forming bond angle similar to ini config\n if cent_atm and 'elimination' not in rxn_class:\n # print('angle test in conformer selection:',\n # angle, conf_ang)\n if abs(conf_ang - angle) > .44:\n print(\" - Transition State conformer has\",\n \"diverged from original structure of\",\n \"angle {:.3f} with angle {:.3f}\".format(\n angle, conf_ang))\n continue\n # check if radical atom is closer to some atom\n # other than the bonding atom\n if 'add' in rxn_class or 'abst' in rxn_class:\n print('it is an addition or an abstraction:')\n cls = is_atom_closest_to_bond_atom(\n zma, ts_bnd2, conf_dist_len)\n if not cls:\n print(\" - Transition State conformer has\",\n \"diverged from original structure of\",\n \"dist {:.3f} with dist {:.3f}\".format(\n dist_len, conf_dist_len))\n print('Radical atom now has a new',\n 'nearest neighbor')\n continue\n if abs(conf_dist_len - dist_len) > max_disp:\n print(\" - Transition State conformer has\",\n \"diverged from original structure of\",\n \"dist {:.3f} with dist {:.3f}\".format(\n dist_len, conf_dist_len))\n continue\n symbols = automol.zmatrix.symbols(zma)\n\n # Set standard equivalent bond len for rxn coord\n symbols = automol.zmatrix.symbols(zma)\n symb1, symb2 = symbols[ts_bnd1], symbols[ts_bnd2]\n if (symb1, symb2) in bnd.LEN_DCT:\n equi_bnd = bnd.LEN_DCT[(symb1, symb2)]\n elif (symb2, symb1) in bnd.LEN_DCT:\n equi_bnd = bnd.LEN_DCT[(symb2, symb1)]\n else:\n equi_bnd = 0.0\n displace_from_equi = conf_dist_len - equi_bnd\n dchk1 = abs(conf_dist_len - dist_len) > 0.2\n dchk2 = displace_from_equi < 0.2\n if dchk1 and dchk2:\n print(\" - Transition State conformer has\",\n \"converged to an\",\n \"equilibrium structure with dist\",\n \" {:.3f} comp with equil {:.3f}\".format(\n conf_dist_len, equi_bnd))\n continue\n else:\n if abs(conf_dist_len - dist_len) > 0.4:\n print(\" - Transition State conformer has\",\n \"diverged from original structure of\",\n \"dist {:.3f} with dist {:.3f}\".format(\n dist_len, conf_dist_len))\n continue\n else:\n zma = automol.geom.zmatrix(geo)\n unique = is_unique_tors_dist_mat_energy(\n geo, ene, seen_geos, seen_enes, saddle)\n\n if not unique:\n print(\" - Geometry is not unique.\"\n \"Conformer will not be saved.\")\n else:\n vma = automol.zmatrix.var_(zma)\n if cnf_save_fs[0].file.vmatrix.exists():\n exist_vma = cnf_save_fs[0].file.vmatrix.read()\n if vma != exist_vma:\n print(\" - Isomer is not the same as starting\",\n \"isomer. Skipping...\")\n else:\n save_path = cnf_save_fs[-1].path(locs)\n print(\" - Geometry is unique. Saving...\")\n print(\" - Save path: {}\".format(save_path))\n\n cnf_save_fs[-1].create(locs)\n cnf_save_fs[-1].file.geometry_info.write(\n inf_obj, locs)\n cnf_save_fs[-1].file.geometry_input.write(\n inp_str, locs)\n cnf_save_fs[-1].file.energy.write(ene, locs)\n cnf_save_fs[-1].file.geometry.write(geo, locs)\n cnf_save_fs[-1].file.zmatrix.write(zma, locs)\n\n # Saving the energy to am SP filesys\n print(\" - Saving energy...\")\n sp_save_fs = autofile.fs.single_point(\n save_path)\n sp_save_fs[-1].create(thy_info[1:4])\n sp_save_fs[-1].file.input.write(\n inp_str, thy_info[1:4])\n sp_save_fs[-1].file.info.write(\n inf_obj, thy_info[1:4])\n sp_save_fs[-1].file.energy.write(\n ene, thy_info[1:4])\n\n seen_geos.append(geo)\n seen_enes.append(ene)\n\n # Update the conformer trajectory file\n print('')\n filesys.mincnf.traj_sort(cnf_save_fs)\n\n\ndef is_atom_closest_to_bond_atom(zma, idx_rad, bond_dist):\n \"\"\" Check to see whether the radical atom is still closest to the bond\n formation site.\n \"\"\"\n geo = automol.zmatrix.geometry(zma)\n atom_closest = True\n for idx, _ in enumerate(geo):\n if idx < idx_rad:\n distance = automol.geom.distance(geo, idx, idx_rad)\n if distance < bond_dist-0.01:\n atom_closest = False\n print('idx test:', idx, distance, bond_dist)\n return atom_closest\n\n\ndef check_angle(ts_zma, dist_info, rxn_class):\n \"\"\" Check the angle to amend the dct\n \"\"\"\n angle = None\n dist_name = dist_info[0]\n if 'abstraction' in rxn_class or 'addition' in rxn_class:\n brk_name = dist_info[3]\n if dist_name and brk_name:\n ts_bnd = automol.zmatrix.bond_idxs(\n ts_zma, dist_name)\n brk_bnd = automol.zmatrix.bond_idxs(\n ts_zma, brk_name)\n ang_atms = [0, 0, 0]\n cent_atm = list(set(brk_bnd) & set(ts_bnd))\n if cent_atm:\n ang_atms[1] = cent_atm[0]\n for idx in brk_bnd:\n if idx != ang_atms[1]:\n ang_atms[0] = idx\n for idx in ts_bnd:\n if idx != ang_atms[1]:\n ang_atms[2] = idx\n\n geom = automol.zmatrix.geometry(ts_zma)\n angle = automol.geom.central_angle(\n geom, *ang_atms)\n\n return angle\n\n\ndef is_unique_coulomb_energy(geo, ene, geo_list, ene_list):\n \"\"\" compare given geo with list of geos all to see if any have the same\n coulomb spectrum and energy\n \"\"\"\n unique = True\n for idx, geoi in enumerate(geo_list):\n enei = ene_list[idx]\n etol = 2.e-5\n if abs(ene-enei) < etol:\n if automol.geom.almost_equal_coulomb_spectrum(\n geo, geoi, rtol=1e-2):\n unique = False\n return unique\n\n\ndef is_unique_dist_mat_energy(geo, ene, geo_list, ene_list):\n \"\"\" compare given geo with list of geos all to see if any have the same\n distance matrix and energy\n \"\"\"\n unique = True\n for idx, geoi in enumerate(geo_list):\n enei = ene_list[idx]\n etol = 2.e-5\n if abs(ene-enei) < etol:\n if automol.geom.almost_equal_dist_mat(\n geo, geoi, thresh=1e-1):\n unique = False\n return unique\n\n\ndef int_sym_num_from_sampling(\n geo, ene, cnf_save_fs, saddle=False, frm_bnd_key=(),\n brk_bnd_key=(), form_coords=(), tors_names=()):\n \"\"\" Determine the symmetry number for a given conformer geometry.\n (1) Explore the saved conformers to find the list of similar conformers -\n i.e. those with a coulomb matrix and energy that are equivalent\n to those for the reference geometry.\n (2) Expand each of those similar conformers by applying\n rotational permutations to each of the terminal groups.\n (3) Count how many distinct distance matrices there are in\n the fully expanded conformer list.\n \"\"\"\n\n # Note: ignoring for saddle points the possibility that two configurations\n # differ only in their torsional values.\n # As a result, the symmetry factor is a lower bound of the true value\n if automol.geom.is_atom(geo):\n int_sym_num = 1.\n else:\n if not saddle:\n tors_names = automol.geom.zmatrix_torsion_coordinate_names(geo)\n if tors_names is None:\n int_sym_num = 1.\n else:\n ethrsh = 1.e-5\n locs_lst = cnf_save_fs[-1].existing()\n int_sym_num = 1.\n if locs_lst:\n enes = [cnf_save_fs[-1].file.energy.read(locs)\n for locs in locs_lst]\n geos = [cnf_save_fs[-1].file.geometry.read(locs)\n for locs in locs_lst]\n geo_sim = []\n geo_sim2 = []\n ene_sim = []\n for geoi, enei in zip(geos, enes):\n if enei - enes[0] < ethrsh:\n geo_lst = [geoi]\n ene_lst = [enei]\n unique = is_unique_coulomb_energy(\n geo, ene, geo_lst, ene_lst)\n if not unique:\n geo_sim.append(geoi)\n ene_sim.append(enei)\n\n int_sym_num = 0\n for geo_sim_i in geo_sim:\n new_geos = automol.geom.rot_permutated_geoms(\n geo_sim_i, saddle,\n frm_bnd_key, brk_bnd_key, form_coords)\n for new_geo in new_geos:\n new_geom = True\n for geo_sim_j in geo_sim2:\n if automol.geom.almost_equal_dist_mat(\n new_geo, geo_sim_j, thresh=3e-1):\n if saddle:\n new_geom = False\n break\n if are_torsions_same(new_geo, geo_sim_j):\n new_geom = False\n break\n if new_geom:\n geo_sim2.append(new_geo)\n int_sym_num += 1\n return int_sym_num\n\n\ndef symmetry_factor(\n geo, ene, cnf_save_fs, saddle=False, frm_bnd_key=(), brk_bnd_key=(),\n form_coords=(), tors_names=()):\n \"\"\" obtain overall symmetry factor for a geometry as a product\n of the external symmetry factor and the internal symmetry number\n \"\"\"\n # Note: ignoring for saddle points the possibility that two configurations\n # differ only in their torsional values.\n # As a result, the symmetry factor is a lower bound of the true value\n ext_sym = automol.geom.external_symmetry_factor(geo)\n if not saddle:\n tors_names = automol.geom.zmatrix_torsion_coordinate_names(geo)\n if tors_names:\n int_sym = int_sym_num_from_sampling(\n geo, ene, cnf_save_fs, saddle,\n frm_bnd_key, brk_bnd_key,\n form_coords, tors_names)\n else:\n int_sym = 1\n sym_fac = ext_sym * int_sym\n return sym_fac\n\n\ndef is_unique_stereo_dist_mat_energy(geo, ene, geo_list, ene_list):\n \"\"\" compare given geo with list of geos all to see if any have the same\n distance matrix and energy and stereo specific inchi\n \"\"\"\n unique = True\n ich = automol.convert.geom.inchi(geo)\n for idx, geoi in enumerate(geo_list):\n enei = ene_list[idx]\n etol = 2.e-5\n ichi = automol.convert.geom.inchi(geoi)\n # check energy\n if abs(ene-enei) < etol:\n # check distance matrix\n if automol.geom.almost_equal_dist_mat(\n geo, geoi, thresh=1e-1):\n # check stereo by generates stero label\n ichi = automol.convert.geom.inchi(geoi)\n if ich == ichi:\n unique = False\n return unique\n\n\ndef are_torsions_same(geo, geoi):\n \"\"\" compare all torsional angle values\n \"\"\"\n dtol = 0.09\n same_dihed = True\n zma = automol.geom.zmatrix(geo)\n tors_names = automol.geom.zmatrix_torsion_coordinate_names(geo)\n zmai = automol.geom.zmatrix(geoi)\n tors_namesi = automol.geom.zmatrix_torsion_coordinate_names(geoi)\n for idx, tors_name in enumerate(tors_names):\n val = automol.zmatrix.values(zma)[tors_name]\n vali = automol.zmatrix.values(zmai)[tors_namesi[idx]]\n valip = vali+2.*numpy.pi\n valim = vali-2.*numpy.pi\n vchk1 = abs(val - vali)\n vchk2 = abs(val - valip)\n vchk3 = abs(val - valim)\n if vchk1 > dtol and vchk2 > dtol and vchk3 > dtol:\n same_dihed = False\n return same_dihed\n\n\ndef is_unique_tors_dist_mat_energy(geo, ene, geo_list, ene_list, saddle):\n \"\"\" compare given geo with list of geos all to see if any have the same\n coulomb spectrum and energy and stereo specific inchi\n \"\"\"\n unique = True\n etol = 2.e-5\n for idx, geoi in enumerate(geo_list):\n enei = ene_list[idx]\n # check energy\n if abs(ene-enei) < etol:\n # check distance matrix\n if automol.geom.almost_equal_dist_mat(\n geo, geoi, thresh=3e-1):\n # check dihedrals\n if saddle:\n unique = False\n elif are_torsions_same(geo, geoi):\n unique = False\n return unique\n","sub_path":"routines/es/_routines/conformer.py","file_name":"conformer.py","file_ext":"py","file_size_in_byte":27587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"541429446","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass Pool:\n def __init__(self, summerTemp=23, winterTemp=36, tempThreshold=20, insulationThickness=0):\n self.summerTemp = summerTemp # celsius\n self.winterTemp = winterTemp # celsius\n self.summerDays = []\n self.summerTempThreshold = tempThreshold # celsius\n self.insulationThickness = insulationThickness # meters\n self.insulationPrice = 0\n self.surfaceArea = 200 # m^2\n self.sideArea = 90 # m^2\n self.windVelocity = 3 # m/s\n self.relativeHumidity = 0.5\n\n self.summerOff = True\n self.summerAirTemp = True\n\n self.nightCover = True\n self.nightCoverCost = 0\n self.coverReduction = 0.5 * 8/24\n self.standAlone = False\n\n self.hAirTop = 6.61 # W/(m^2 K)\n self.hAirWalls = 6.75 # W/(m^2 K)\n self.hmAir = 6.4133 # m/s\n self.vaporEnthalpy = 2418 # kJ/kg\n\n self.fig = None\n self.tempPlot = None\n self.heatPlot = None\n self.timeVector = np.linspace(0, 365, 365)\n self.heatLoss = np.zeros(365) # W\n self.airTemp = None\n self.poolTemp = None\n self.evapMassRate = 0\n\n self.QEvap = None # W ...\n self.QWaterInput = None\n self.QTop = None\n self.QWalls = None\n self.QRadiation = None\n\n def getTotalLoss(self):\n self.setAirTemp()\n self.setPoolTemp()\n\n self.getQEvap()\n self.getQWaterInput()\n self.getQSurfaces()\n self.getQRadiation()\n\n self.getCost()\n if self.standAlone:\n self.printStats()\n self.setPlot()\n\n def getLocalLoss(self):\n self.heatLoss = 0\n\n self.getQEvap()\n self.getQWaterInput()\n self.getQSurfaces()\n self.getQRadiation()\n\n return self.heatLoss\n\n def setAirTemp(self):\n self.airTemp = 6.4 + 29.5 * np.sin(1.5 * np.pi + 2 * np.pi * self.timeVector / 365)\n\n def setPoolTemp(self):\n self.summerDays = np.where(self.airTemp > self.summerTempThreshold)[0]\n if self.standAlone:\n print(\"Summer Pool for {} days a year.\".format(len(self.summerDays)))\n\n self.poolTemp = np.ones(365) * self.winterTemp\n\n if self.summerAirTemp:\n self.poolTemp[self.summerDays] = self.airTemp[self.summerDays]\n else:\n self.poolTemp[self.summerDays] = self.summerTemp\n\n def getQEvap(self):\n surfaceSatVapor = self.getSatVaporAt(self.poolTemp)\n airSatVapor = self.getSatVaporAt(self.airTemp)\n airVapor = self.relativeHumidity * airSatVapor\n\n self.evapMassRate = self.hmAir * self.surfaceArea * (surfaceSatVapor - airVapor) # kg/s\n\n if self.nightCover:\n self.evapMassRate *= 1 - self.coverReduction\n\n self.QEvap = self.evapMassRate * self.vaporEnthalpy\n self.heatLoss += self.QEvap\n\n def getSatVaporAt(self, temp):\n return 1.9747*10**(-5)*temp**2 + 1.3257*10**(-4)*temp + 3.9866*10**(-3)\n\n def getQWaterInput(self):\n coldWaterCp = 4.198 # kJ/(kg K) at 7 celsius\n self.QWaterInput = self.evapMassRate * coldWaterCp * (self.poolTemp - 7)\n self.heatLoss += self.QWaterInput\n\n def getQSurfaces(self):\n kGlass = 0.8 # W/mK\n kInsulation = 0.05 # W/mK\n\n resTop = 1 / (self.hAirTop * self.surfaceArea)\n if self.nightCover:\n resTop *= 1 + self.coverReduction\n\n resWalls = 0.015/kGlass + 1/self.hAirWalls\n if self.insulationThickness != 0:\n resWalls += self.insulationThickness/kInsulation\n\n resWalls /= self.sideArea\n resSum = resWalls + resTop\n\n resSurfaces = (1/resTop + 1/resWalls)**(-1)\n qTotal = (self.poolTemp - self.airTemp) / resSurfaces\n\n self.QTop = qTotal * (1 - resTop/resSum)\n self.QWalls = qTotal * (1 - resWalls/resSum)\n self.heatLoss += qTotal\n\n def getQRadiation(self):\n self.QRadiation = np.ones(365)\n self.QRadiation *= -8500\n\n def printStats(self):\n totalAnnualLoss = np.round(np.sum(self.heatLoss)*24/1000, 1)\n print(\"Total Heat Loss per year = {} kWh\\n\".format(totalAnnualLoss))\n\n for QLoss, label in zip([self.QEvap, self.QWaterInput, self.QTop, self.QWalls, self.QRadiation], [\"Evap\", \"WaterInput\", \"Top surface\", \"Walls\", \"Radiation\"]):\n annualLoss = np.round(np.sum(QLoss)*24/1000, 1)\n print(\"Q{} per year = {} kWh ({}%)\".format(label, annualLoss, np.round(annualLoss/totalAnnualLoss*100, 1)))\n\n def setPlot(self):\n self.fig, [self.tempPlot, self.heatPlot] = plt.subplots(2)\n self.tempPlot.datas, self.tempPlot.labels = [self.airTemp, self.poolTemp], [\"Air\", \"Piscine\"]\n self.heatPlot.datas, self.heatPlot.labels = [self.heatLoss, self.QEvap, self.QWaterInput, self.QTop, self.QWalls, self.QRadiation], [\"Total\", \"Évaporation\", \"Remplissage\", \"Surface\", \"Murs\", \"Rayonnement\"]\n\n for i, graph in enumerate([self.tempPlot, self.heatPlot]):\n for data, label in zip(graph.datas, graph.labels):\n if i == 0:\n graph.set_title(\"Températures au cours de l'année\")\n graph.plot(self.timeVector, data, label=label)\n graph.set_ylabel(\"T [$\\degree$C]\")\n else:\n graph.set_title(\"Bilan des pertes thermiques annuelles\")\n graph.plot(self.timeVector, data/1000, label=\"{} ({}%)\".format(label, np.round(100*np.sum(data)/np.sum(self.heatLoss), 1)))\n graph.set_ylabel(\"$q$ [kW]\")\n\n graph.set_xlabel(\"Jours\")\n graph.legend(loc=\"best\")\n graph.set_xlim(0, 365)\n\n def getCost(self):\n insulationVolume = self.sideArea*self.insulationThickness # m^3\n self.insulationPrice = np.round(insulationVolume*100, 2)\n self.nightCoverCost = self.nightCover*50\n\n if self.standAlone:\n print(\"\\nInsulation : {} m^3 => {} $\".format(insulationVolume, self.insulationPrice))\n\n def showPlot(self):\n plt.tight_layout()\n plt.show()\n","sub_path":"Transferts thermique [GMC-3005]/TP-piscine-exterieure/heatLoss.py","file_name":"heatLoss.py","file_ext":"py","file_size_in_byte":6138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"56372764","text":"from django import forms\r\nfrom django.utils.translation import ugettext_lazy as _\r\n\r\nfrom carpool.models import Trip\r\nfrom carpool.fields.geometry import DirectionsFormField\r\nfrom carpool.fields.datetime import DateTimeSelectWidget, DateSelectWidget, TimeSelectWidget\r\n\r\nfrom uni_form.helpers import FormHelper, Submit, Reset\r\nfrom uni_form.helpers import Layout, Fieldset, Row, Column, HTML\r\n\r\nTripFormHelper = FormHelper()\r\n\r\nlayout = Layout(\r\n Fieldset(_('Trip details'),\r\n Column('email', 'when'), css_class='span-5'),\r\n \r\n Fieldset(_('Route'),\r\n 'travel_path', css_class='span-15')\r\n)\r\n\r\nTripFormHelper.add_layout(layout)\r\n\r\nsubmit = Submit('add', _('Add this contact'))\r\n\r\nTripFormHelper.add_input(submit)\r\n\r\nclass TripForm(forms.ModelForm):\r\n \r\n class Meta:\r\n model = Trip\r\n widgets = {'when': DateTimeSelectWidget}\r\n \r\n travel_path = DirectionsFormField(label=_('Route'))\r\n\r\n helper = TripFormHelper\r\n\r\nSearchTripFormHelper = FormHelper()\r\n\r\nlayout = Layout(\r\n Fieldset(_('Trip details'),\r\n Column('day', 'earliest', 'latest'), css_class='span-5'),\r\n \r\n Fieldset(_('Route'),\r\n 'travel_path', css_class='span-15')\r\n)\r\n\r\nSearchTripFormHelper.add_layout(layout)\r\n\r\nsubmit = Submit('add', _('Add this contact'))\r\n\r\nSearchTripFormHelper.add_input(submit)\r\n\r\nclass SearchTripForm(forms.Form):\r\n \r\n travel_path = DirectionsFormField(label=_('Route'))\r\n day = forms.DateField(label=_('Day'), widget=DateSelectWidget)\r\n earliest = forms.TimeField(label=_('Earliest'), widget=TimeSelectWidget)\r\n latest = forms.TimeField(label=_('Latest'), widget=TimeSelectWidget)\r\n \r\n helper = SearchTripFormHelper","sub_path":"apps/django-carpool/carpool/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"531772623","text":"import optparse\nimport sys\n\n\ndef get_parse(version):\n\n parse = optparse.OptionParser(description=' Multiple flash system FW for OCP TiogaPass.',\n epilog=\" Wiwynn - Kimi Haung.\")\n\n parse.add_option(\"-t\", \"--type\", dest=\"type\", action=\"store\", type=\"string\",\n help=\"FW type selection. [OPTIONS]: BIOS, BMC, VR, CPLD\")\n parse.add_option(\"-m\", \"--mode\", dest=\"mode\", action=\"store\", type=\"string\",\n help=\"Updating mode. [OPTIONS]: remote, local, kcs\")\n parse.add_option(\"-f\", \"--filename\", dest=\"filename\", action=\"store\", type=\"string\",\n help=\"The filename of FW ROM.\")\n parse.add_option(\"-l\", \"--system_list\", dest=\"list\", action=\"store\", type=\"string\",\n help=\"The filename of system IP list table.\")\n parse.add_option(\"-e\", \"--example\", action=\"store_true\", help=\"Show few examples of the tool.\")\n parse.add_option(\"-v\", \"--version\", action=\"store_true\", help=\"Show the version of Multiple Flash tool.\")\n\n (option, args) = parse.parse_args()\n\n if option.version:\n print(\" Version: %s\" %version)\n sys.exit(0)\n\n if option.example:\n print(\"\\033[96m\")\n print(\"EXAMPLES:\\n\")\n print(\"# Local flash BIOS FW:\")\n print(\" --- multiple_flash.py -t BIOS -m local -f 5566.bin\\n\")\n print(\"# Remote flash BMC FW with an IP list:\")\n print(\" --- multiple_flash.py -t BMC -m remote -f GGYY.bin -l ip_list\\n\")\n print(\"# Local flash CPLD FW via kcs:\")\n print(\" --- multiple_flash.py -t CPLD -m kcs -f GGininder.bin\\n\")\n print(\"\\033[00m\")\n sys.exit(0)\n\n if option.type is None:\n parse.error(\"\\033[91m\" + \"Missing FW type selection!\" + \"\\033[00m\")\n sys.exit(1)\n\n if option.type != \"BIOS\" and option.type != \"BMC\" and option.type != \"VR\" and option.type != \"CPLD\":\n parse.error(\"\\033[91m\" + \"Wrong FW type (%s) selection!\" % option.type + \"\\033[00m\")\n sys.exit(1)\n\n if option.mode is None:\n parse.error(\"\\033[91m\" + \"Missing updating mode!\" + \"\\033[00m\")\n sys.exit(1)\n\n if option.mode != \"local\" and option.mode != \"remote\" and option.mode != \"kcs\":\n parse.error(\"\\033[91m\" + \"Wrong updating mode selected!\" + \"\\033[00m\")\n sys.exit(1)\n\n if option.filename is None:\n parse.error(\"\\033[91m\" + \"Missing FW ROM filename!\" + \"\\033[00m\")\n sys.exit(1)\n\n # Raise error when there is no hostname and system list for remote updating.\n if option.mode == \"remote\" and option.list is None:\n parse.error(\"\\033[91m\" + \"Need a system IP list table for remote updating!\" + \"\\033[00m\")\n sys.exit(1)\n\n if option.mode == \"local\" or option.mode == \"kcs\":\n if option.type == \"VR\":\n print(\"\\033[91m\" + \"It is impossible to flash VR firmware under OS!\" + \"\\033[00m\")\n print(\"\\033[91m\" + \"Please flash VR firmware via remote updating mode.\" + \"\\033[00m\")\n sys.exit(1)\n\n if option.mode == \"remote\":\n return option.type, option.mode, option.filename, option.list\n else:\n return option.type, option.mode, option.filename\n","sub_path":"opt.py","file_name":"opt.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"376137153","text":"\nimport scipy.io as io\nimport numpy as np\nimport gzip\nimport time\nimport pdb\n\n\nSOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'\nWORK_DIRECTORY = 'data'\nIMAGE_SIZE = 28\nNUM_CHANNELS = 1\nPIXEL_DEPTH = 255\nNUM_LABELS = 10\nVALIDATION_SIZE = 5000 # Size of the validation set.\n\n\n\n\n\n\ndef extract_data(filename, num_images):\n \"\"\"Extract the images into a 4D tensor [image index, y, x, channels].\n\n Values are rescaled from [0, 255] down to [-0.5, 0.5].\n \"\"\"\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)\n data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)\n return data\n\n\ndef extract_labels(filename, num_images):\n \"\"\"Extract the labels into a vector of int64 label IDs.\"\"\"\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels\n\n\n\n\n# ---------------------------------------------------------------------------\ndef softmax(Z):\n e_Z = np.exp(Z)\n A = e_Z / e_Z.sum(axis = 0)\n return A\n# ---------------------------------------------------------------------------\ndef softmax_stable(Z):\n e_Z = np.exp(Z - np.max(Z, axis = 0, keepdims = True))\n A = e_Z / e_Z.sum(axis = 0)\n return A\n# ---------------------------------------------------------------------------\n## One-hot coding\nfrom scipy import sparse \ndef convert_labels(y, C = NUM_LABELS):\n \"\"\"\n convert 1d label to a matrix label: each column of this \n matrix coresponding to 1 element in y. In i-th column of Y, \n only one non-zeros element located in the y[i]-th position, \n and = 1 ex: y = [0, 2, 1, 0], and 3 classes then return\n\n [[1, 0, 0, 1],\n [0, 0, 1, 0],\n [0, 1, 0, 0]]\n \"\"\"\n Y = sparse.coo_matrix((np.ones_like(y), \n (y, np.arange(len(y)))), shape = (C, len(y))).toarray()\n return Y.T\n# ---------------------------------------------------------------------------\ndef softmax_regression(W,b,image,labels):\n\n y_hat = image.dot(W)\n pdb.set_trace()\n y_hat = softmax(y_hat)\n # y_hat = softmax_stable(y_hat)\n\n \n\n groundTruth = convert_labels(labels)\n groundTruth = groundTruth[... ,:NUM_LABELS-1]\n \n # print(\"groundTruth Shape: \", groundTruth.shape)\n # print(groundTruth[1,:])\n \n cost = -np.sum(groundTruth * np.log(y_hat))\n\n gradient = image.T.dot( (y_hat-groundTruth) )\n\n\n # return cost,gradient.flatten()\n return cost,gradient\n# ---------------------------------------------------------------------------\ndef minFuncSGD(W,b,images,labels,funObj,*args):\n epochs = 10\n alpha = 1e-6\n minibatch = 256\n numberSample = labels.shape[0] # training set size\n # Setup for momentum\n mom = 0.5\n momIncrease = 20\n velocity = np.zeros_like(W)\n\n\n funObj(*args)\n\n it = 0\n for e in range(epochs):\n randomper =np.random.permutation(range(numberSample))\n for s in range(0,numberSample,minibatch):\n # print(\"s: \", s)\n # increase momentum after momIncrease iterations\n it = it + 1\n if it == momIncrease:\n mom = 0.9\n \n # get next randomly selected minibatch\n mb_images = images[randomper[s:s+minibatch],:]\n mb_labels = labels[randomper[s:s+minibatch]]\n cost, grad = funObj(W,b,mb_images,mb_labels)\n print(\"Epoch: %d, MiniBatch: %d -- Cost:\" % (e,s), cost)\n\n velocity = mom * velocity + alpha * grad;\n W = W - velocity; \n alpha = alpha/2.0\n\n return W\n\n\n\n\n\n\n\n# ---------------------------------------------------------------------------\ndef predict(W,images):\n # print(W.shape)\n # print(images.shape)\n result = softmax(images.dot(W))\n # print(result.shape)\n return np.argmax(result, axis = 1)\n# ---------------------------------------------------------------------------\ndef main():\n\n # Extract it into numpy arrays.\n train_data = extract_data('data/train-images-idx3-ubyte.gz', 60000)\n train_labels = extract_labels('data/train-labels-idx1-ubyte.gz', 60000)\n test_data = extract_data('data/t10k-images-idx3-ubyte.gz', 10000)\n test_labels = extract_labels('data/t10k-labels-idx1-ubyte.gz', 10000)\n\n # Generate a validation set.\n # validation_data = train_data[:VALIDATION_SIZE, ...]\n # validation_labels = train_labels[:VALIDATION_SIZE]\n # train_data = train_data[VALIDATION_SIZE:, ...]\n # train_labels = train_labels[VALIDATION_SIZE:]\n\n numberSample = train_data.shape[0]\n dataDimension = IMAGE_SIZE * IMAGE_SIZE;\n numberClassTrain = NUM_LABELS - 1;\n\n W = np.random.randn(dataDimension,NUM_LABELS-1)*0.001;\n b = np.random.randn(NUM_LABELS-1)*0.001;\n # -----------------------------------------------------------\n # io.savemat('Theta.mat',{'theta': W})\n temp = io.loadmat('Theta.mat')\n W = temp['theta']\n \n\n\n train_data = train_data.reshape(numberSample,dataDimension)\n test_data = test_data.reshape(test_labels.shape[0],dataDimension)\n\n # -----------------------------------------------------------\n # io.savemat('MNIST.mat',{'train_data': train_data, 'train_labels': train_labels, 'test_data': test_data, 'test_labels': test_labels }) \n # -----------------------------------------------------------\n\n\n tic = time.time()\n pdb.set_trace()\n W = minFuncSGD(W,b,train_data,train_labels,softmax_regression,W,b,train_data,train_labels)\n toc = time.time()\n print(\"Time training: \",toc - tic)\n\n\n labels_predict = predict(W,train_data)\n\n # Compute and print the fraction of correctly predicted examples\n num_correct = np.sum(train_labels == labels_predict)\n accuracy = float(num_correct) / numberSample\n print(\"Got %d / %d correct => accuracy: %f\" % (num_correct, numberSample, accuracy))\n\n\n labels_predict = predict(W,test_data)\n\n # Compute and print the fraction of correctly predicted examples\n num_correct = np.sum(test_labels == labels_predict)\n accuracy = float(num_correct) / test_labels.shape[0]\n print(\"Got %d / %d correct => accuracy: %f\" % (num_correct, test_labels.shape[0], accuracy))\n\n\n\n\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n","sub_path":"DL_SVM/Softmax.py","file_name":"Softmax.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"26710555","text":"#!/usr/bin/env python\nfrom __future__ import division\n\nimport txros\nimport tf\nimport tf.transformations as trns\nimport numpy as np\nimport navigator_tools\nfrom navigator_tools import fprint\nfrom navigator_singleton import pose_editor\nfrom twisted.internet import defer\n\nWEST = trns.quaternion_matrix(pose_editor.WEST)\nEAST = trns.quaternion_matrix(pose_editor.EAST)\nNORTH = trns.quaternion_matrix(pose_editor.NORTH)\nSOUTH = trns.quaternion_matrix(pose_editor.SOUTH)\n\n@txros.util.cancellableInlineCallbacks\ndef main(navigator):\n result = navigator.fetch_result()\n\n #middle_point = np.array([-10, -70, 0]) \n est_coral_survey = yield navigator.database_query(\"Coral_Survey\")\n \n yield navigator.move.set_position(est_coral_survey.objects[0]).go()\n\n totem = yield navigator.database_query(\"totem\")\n \n # Get the closest totem object to the boat\n totem_np = map(lambda obj: navigator_tools.point_to_numpy(obj), totem.objects)\n dist = map(lambda totem_np: np.linalg.norm(totem_np - navigator_tools.point_to_numpy(est_coral_survey.objects[0])), totems_np)\n\n middle_point = navigator_tools.point_to_numpy(totem.objects[0].position)\n quads_to_search = [1, 2, 3, 4]\n if (yield navigator.nh.has_param(\"/mission/coral_survey/quadrants\")):\n quads_to_search = yield navigator.nh.get_param(\"/mission/coral_survey/quadrants\")\n\n waypoint_from_center = np.array([10 * np.sqrt(2)])\n\n # Construct waypoint list along NSEW directions then rotate 45 degrees to get a good spot to go to.\n directions = [EAST, NORTH, WEST, SOUTH]\n waypoints = []\n for quad in quads_to_search:\n mid = navigator.move.set_position(middle_point).set_orientation(directions[quad - 1])\n waypoints.append(mid.yaw_left(45, \"deg\").forward(waypoint_from_center).set_orientation(NORTH))\n\n # Get into the coral survey area\n yield waypoints[0].go()\n\n # Publish ogrid with boundaries to stay inside\n ogrid = OgridFactory(middle_point, draw_borders=True)\n msg = ogrid.get_message()\n latched = navigator.latching_publisher(\"/mission_ogrid\", OccupancyGrid, msg)\n\n searcher = navigator.search(\"coral_survey\", waypoints)\n yield searcher.start_search(move_type='skid', spotings_req=1)\n\n fprint(\"Centering over the thing!\", title=\"CORAL_SURVEY\")\n\n # TODO: Center over the thing.\n\n boat_position = (yield navigator.tx_pose)[0]\n\n # TODO: make this easier\n quad = np.argmin(np.linalg.norm(boat_position - [[w.pose[0][0][0], w.pose[0][1][0],w.pose[0][2][0]] for w in waypoints], axis=1))\n quad = quads_to_search[quad]\n fprint(\"Estimated quadrant: {}\".format(quad), title=\"CORAL_SURVEY\", msg_color='green')\n\n yield navigator.nh.sleep(5)\n defer.returnValue(result)\n\n\n# This really shouldn't be here - it should be somewhere behind the scenes\nfrom nav_msgs.msg import OccupancyGrid\nimport cv2\n\nclass OgridFactory():\n def __init__(self, center, draw_borders=False):\n self.resolution = .3\n self.height, self.width = 60, 60 # meters\n self.center = center\n\n self.wall_length = 20 # meters\n self.walls = None\n\n # Sets x,y upper and lower bounds and the left and right wall bounds\n self.make_ogrid_transform()\n self.make_walls()\n self.draw_lines(self.walls, -2)\n\n if draw_borders:\n self.draw_borders()\n\n def draw_borders(self):\n borders = ((-1, -1), (self.grid.shape))\n cv2.rectangle(self.grid, borders[0], borders[1], 100, 3)\n\n def make_ogrid_transform(self):\n origin_x = self.center[0] - 30\n origin_y = self.center[1] - 30\n self.origin = navigator_tools.numpy_quat_pair_to_pose([origin_x, origin_y, 0],\n [0, 0, 0, 1])\n\n # The grid needs to have it's axes swaped since its row major\n self.grid = np.zeros((self.height / self.resolution, self.width / self.resolution)) - 1\n\n # Transforms points from ENU to ogrid frame coordinates\n self.t = np.array([[1 / self.resolution, 0, -origin_x / self.resolution],\n [0, 1 / self.resolution, -origin_y / self.resolution],\n [0, 0, 1]])\n\n self.transform = lambda point: self.t.dot(np.append(point[:2], 1))[:2]\n\n def make_walls(self):\n vect = np.array([self.wall_length, 0, 0])\n\n # Dotting with the negatives seemed to produce cleaner ogrids\n west_point = EAST[:3, :3].dot(-vect) + self.center\n east_point = EAST[:3, :3].dot(vect) + self.center\n north_point = NORTH[:3, :3].dot(vect) + self.center\n south_point = NORTH[:3, :3].dot(-vect) + self.center\n\n self.walls = [self.center, west_point,\n self.center, east_point,\n self.center, north_point,\n self.center, south_point]\n\n def draw_lines(self, points, value):\n last_wall = None\n for wall in points:\n if last_wall is None:\n last_wall = tuple(self.transform(wall).astype(np.int32))\n continue\n\n this_wall = tuple(self.transform(wall).astype(np.int32))\n cv2.line(self.grid, this_wall, last_wall, value, 1)\n last_wall = this_wall\n\n def get_message(self):\n ogrid = OccupancyGrid()\n ogrid.header = navigator_tools.make_header(frame=\"enu\")\n ogrid.info.resolution = self.resolution\n ogrid.info.height, ogrid.info.width = self.grid.shape\n ogrid.info.origin = self.origin\n grid = np.copy(self.grid)\n ogrid.data = np.clip(grid.flatten(), -100, 100).astype(np.int8)\n\n return ogrid\n","sub_path":"mission_control/navigator_missions/nav_missions/coral_survey.py","file_name":"coral_survey.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"479431768","text":"import pygame, sys\nimport time\nfrom pygame import *\nfrom mainGame import *\nimport pandas as pd\nfrom pandas import *\n\nimport numpy as np\n\npygame.init()\n'''anything go with rect use the form (left, top, width, height)'''\n\n#define the set image\nset0 = '../image/set0.png'\nset1 = '../image/set1.png'\nset2 = '../image/set2.png'\nset3 = '../image/set3.png'\nset4 = '../image/set4.png'\nset5 = '../image/set5.png'\n\n#images\nhelp = pygame.image.load('../image/help.png')\ndonate = pygame.image.load('../image/donateRaiseRacingGame.png')\nchangeSet = pygame.image.load('../image/changeSet.png')\nloginScreen = pygame.image.load(\"../image/loginscreen.png\")\nsaveButton = pygame.image.load(\"../image/save.png\")\n\n#from this is the define for game statistics\nFPS = 60\nfpsClock = pygame.time.Clock()\nnumberKey = [ord('1'), ord('2'), ord('3'), ord('4'), ord('5'), ord('6'),\n ord('7'), ord('8'), ord('9'), ord('0')]\ncharacterKey = [ord('A'), ord('B'), ord('C'), ord('D'), ord('E'),\n ord('F'), ord('G'), ord('H'), ord('I'), ord('J'),\n ord('K'), ord('L'), ord('M'), ord('N'), ord('O'),\n ord('P'), ord('Q'), ord('R'), ord('S'), ord('T'),\n ord('U'), ord('V'), ord('W'), ord('X'), ord('Y'),\n ord('a'), ord('b'), ord('c'), ord('d'), ord('e'),\n ord('f'), ord('g'), ord('h'), ord('i'), ord('j'),\n ord('k'), ord('l'), ord('m'), ord('n'), ord('o'),\n ord('p'), ord('q'), ord('r'), ord('s'), ord('t'),\n ord('u'), ord('v'), ord('w'), ord('x'), ord('y')]\n\nsetIndex = [set0, set1, set2, set3, set4, set5]\ncharacterSet = 0\n\n#access to database\ndatabase = '../database.csv'\ndata = pd.read_csv(database)\nquantity = int(data.iloc[0,8]) #number of account of this time\nsite = None\n#windows statics\nWINDOWSIZE = (1280,720) #window size\npygame.display.set_caption('Racing bet 888') #set Caption for title bar\nDISPLAYSURFACE = pygame.display.set_mode(WINDOWSIZE) #create surface for mainmenu\n\n#sound and music\nmenuSound = pygame.mixer.Sound('../soundFX/menu.wav') #open sound\ngameSound = pygame.mixer.Sound('../soundFX/Diviners -Stockholm Lights.mp3')\nloginSound = pygame.mixer.Sound(\"../soundFX/loginsound.wav\")\n\n#fonts\nfont = pygame.font.SysFont(None, 20, bold=True, italic=False) #set font for drawing\nuserNameFont = pygame.font.SysFont(None, 25, bold= False, italic=True)\nmediumfont = pygame.font.SysFont(None, 30, bold = False, italic = False)\nbigfont = pygame.font.SysFont(None, 40, bold = False, italic = False)\n\n#end define the game statistics\n#------------------------------------------------------------------------------------------------#\n\ndef checkExistAccount(username, password):\n global quantity\n exist = -1\n cSite = None\n for i in range(0, quantity):\n if data.iloc[i, 0] != None and data.iloc[i, 1] != None:\n if username == data.iloc[i, 0]:\n exist = 0\n if password == data.iloc[i, 1]:\n exist = 1\n cSite = i\n else:\n exist = 0\n return exist, cSite\n\n\ndef loadGame():\n username = data.iloc[site, 0]\n password = data.iloc[site, 1]\n money = int(data.iloc[site, 2])\n return username, password, money\n\n\ndef signUpAndLoad(username, password):\n global quantity\n data.iloc[quantity, 0] = username\n data.iloc[quantity, 1] = password\n money = int(data.iloc[quantity, 2])\n quantity += 1\n data.iloc[0,8] = quantity\n data.to_csv(database, index = False)\n return username, password, money\n\n\ndef draw_text(text, font, color, surface, x, y):\n textobj = font.render(text, 1, color)\n textrect = textobj.get_rect()\n textrect.topleft = (x,y)\n surface.blit(textobj, textrect)\n return 1\n\n\ndef loginscreen():\n global site\n running = True\n clicked = False\n loginSound.play(-1)\n show = True\n inputUserName = \"\"\n inputPassword = \"\"\n censoredPassword = \"\"\n typingUserName = False\n typingPassword = False\n pushLoginButtn = False\n money = None\n while running:\n DISPLAYSURFACE = pygame.display.set_mode(WINDOWSIZE)\n DISPLAYSURFACE.blit(loginScreen, (0, 0))\n DISPLAYSURFACE.blit(donate, (1080, 0))\n draw_text('DONATE TO HELP THE DEVELOPMENT', font, (255,255,255), DISPLAYSURFACE, 1000, 200)\n userNameArea = pygame.Rect(40, 320, 375, 37)\n passwordArea = pygame.Rect(40, 397, 374, 40)\n loginButton = pygame.Rect(312, 460, 99, 32)\n\n dx, dy = pygame.mouse.get_pos()\n\n if show:\n draw_text('Now Playing: NIVIRO - Demons (No Copyright Sound)', font, (255,255,255), DISPLAYSURFACE, 1, 705)\n show = not show\n\n checkExist, site = checkExistAccount(inputUserName, inputPassword)\n\n if userNameArea.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (0, 255, 0), userNameArea, 3)\n if clicked:\n typingUserName = True\n typingPassword = False\n print(inputUserName)\n if passwordArea.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (0, 255, 0), passwordArea, 3)\n if clicked:\n typingPassword = True\n typingUserName = False\n print(inputPassword)\n if loginButton.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (0, 255, 0), loginButton, 3)\n if clicked:\n TypingPassword = False\n TypingUserName = False\n pushLoginButtn = True\n\n\n clicked = False\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == MOUSEBUTTONDOWN:\n if event.button == 1:\n clicked = True\n if event.type == KEYDOWN:\n if event.key == K_RETURN and checkExist != 0:\n if inputUserName == \"\" or inputPassword == \"\":\n pushLoginButtn = False\n else:\n running = False\n username = inputUserName\n password = inputPassword\n else:\n if typingUserName and not typingPassword:\n if event.key == K_BACKSPACE:\n inputUserName = inputUserName[0:-1]\n else:\n if (event.key in characterKey) or (event.key in numberKey):\n if len(inputUserName) < 20:\n inputUserName += event.unicode\n elif typingPassword and not typingUserName:\n if event.key == K_BACKSPACE:\n inputPassword = inputPassword[0:-1]\n censoredPassword = censoredPassword[0:-1]\n else:\n if event.key in characterKey or event.key in numberKey:\n if len(inputPassword) < 20:\n inputPassword += event.unicode\n censoredPassword += '*'\n if pushLoginButtn:\n if inputUserName == \"\" or inputPassword == \"\":\n pushLoginButtn = False\n else:\n if checkExist != 0:\n running = False\n draw_text(inputUserName, font, (0,0,0), DISPLAYSURFACE, 45, 330)\n draw_text(censoredPassword, font, (0,0,0), DISPLAYSURFACE, 45, 407)\n\n if checkExist == 1:\n draw_text('LOGIN', font, (0,0,0), DISPLAYSURFACE, 335, 468)\n elif checkExist == -1:\n draw_text('SIGNUP', font, (0,0,0), DISPLAYSURFACE, 332, 468)\n elif checkExist == 0:\n draw_text('?????', font, (0,0,0), DISPLAYSURFACE, 335, 468)\n fpsClock.tick(FPS)\n pygame.display.update()\n loginSound.stop()\n if checkExist == 1:\n return loadGame()\n elif checkExist == -1:\n return signUpAndLoad(inputUserName, inputPassword)\n\n\ndef mainMenu(money, characterSet, username):\n Running = True #check if running\n clicked = False #get clicked\n show = True #music description info\n menuSound.play(-1) #playing background music\n betCar = 1\n betYet = False\n bet = 500\n logOut = False\n while Running:\n #define the display\n MAINMENUSCREEN = pygame.image.load(setIndex[characterSet])\n MAINMENUSCREEN = pygame.transform.scale(MAINMENUSCREEN, WINDOWSIZE)\n DISPLAYSURFACE.blit(MAINMENUSCREEN, (0,0)) #draw background\n #DISPLAYSURFACE.blit(MAINMENUSCREEN, ())\n displayUserNameArea = (250, 87, 190, 43)\n moneyArea = (600, 605, 250, 62)\n pygame.draw.rect(DISPLAYSURFACE, (255,255,255), moneyArea)\n pygame.draw.rect(DISPLAYSURFACE, (255, 0, 0), moneyArea, 3)\n pygame.draw.rect(DISPLAYSURFACE, (255, 255, 255), displayUserNameArea)\n draw_text(username, userNameFont, (0, 0, 0), DISPLAYSURFACE, 260, 100)\n draw_text(str(money), mediumfont, (255,0,0), DISPLAYSURFACE, 700, 630)\n\n #define the bet button\n bet1Button = pygame.Rect(5, 250, 200, 153)\n bet2Button = pygame.Rect(230, 250, 200, 153)\n bet3Button = pygame.Rect(440, 250, 200, 153)\n bet4Button = pygame.Rect(640, 250, 200, 153)\n bet5Button = pygame.Rect(850, 250, 200, 153)\n bet6Button = pygame.Rect(1060, 250, 200, 153)\n #show the music description\n if show:\n draw_text('Now Playing: Linko - Goodbye (No Copyright Sound)', font, (255,255,0), DISPLAYSURFACE, 500, 705)\n show = not show\n\n #define the Buttons used in main menu\n exitButton = pygame.Rect(58, 42, 82, 67)\n helpButton = pygame.Rect(55, 580, 110, 100)\n miniGameButton = pygame.Rect(212, 575, 100, 100)\n changeSetButton = pygame.Rect(360, 580, 110, 100)\n shopButton = pygame.Rect(888, 582, 93, 95)\n gameButton = pygame.Rect(1050, 580, 210, 100)\n changeNameButton = pygame.Rect(1075, 515, 120, 40)\n logOutButton = pygame.Rect(1213, 5, 68, 68)\n\n #GET MOUSE CLICK\n dx, dy = pygame.mouse.get_pos() #get clicked\n\n if logOut:\n menuSound.play(-1)\n logOut = False\n #if mouse click execute\n if characterSet == 2:\n frame = (0,0,0)\n else:\n frame = (255,255,255)\n if exitButton.collidepoint(dx, dy):\n if clicked:\n exitConfirmScreen()\n if helpButton.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, frame, helpButton, 3)\n if clicked:\n helpScreen()\n if miniGameButton.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, frame, miniGameButton, 3)\n if money >= 1000:\n clicked = False\n pygame.draw.rect(DISPLAYSURFACE, frame, (175, 535, 215, 20))\n draw_text(\"YOU CAN'T PLAY MINIGAME\", font, (255, 0, 0), DISPLAYSURFACE, 180, 540)\n if money < 1000:\n if clicked:\n money = miniGameScreen(money)\n if changeSetButton.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, frame, changeSetButton, 3)\n if clicked:\n characterSet = changeSetScreen(characterSet)\n if shopButton.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, frame, shopButton, 3)\n if clicked:\n money = shopScreen(money)\n if gameButton.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, frame, gameButton, 3)\n if clicked:\n if characterSet == 0:\n characterSet = 1\n money = runGame(username, betCar, characterSet, money, bet)\n gameSound.stop()\n menuSound.play(-1)\n if logOutButton.collidepoint(dx, dy):\n if clicked:\n menuSound.stop()\n username, password, money = loginscreen()\n logOut = True\n running = False\n\n #choose bet car\n if characterSet != 0:\n if bet1Button.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (0, 255, 0), bet1Button, 3)\n if clicked:\n betCar = 1\n betYet = True\n if bet2Button.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (0, 255, 0), bet2Button, 3)\n if clicked:\n betCar = 2\n betYet = True\n if bet3Button.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (0, 255, 0), bet3Button, 3)\n if clicked:\n betCar = 3\n betYet = True\n if bet4Button.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (0, 255, 0), bet4Button, 3)\n if clicked:\n betCar = 4\n betYet = True\n if bet5Button.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (0, 255, 0), bet5Button, 3)\n if clicked:\n betCar = 5\n betYet = True\n if bet6Button.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (0, 255, 0), bet6Button, 3)\n if clicked:\n betCar = 6\n betYet = True\n\n clicked = False\n\n #checking exit game or input mouse click\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == MOUSEBUTTONDOWN:\n if event.button == 1:\n clicked = True\n if betYet:\n bet = betPopUps(bet, money)\n betYet = False\n pygame.draw.rect(DISPLAYSURFACE, (0,0,0), (595, 550, 250, 50))\n pygame.draw.rect(DISPLAYSURFACE, (0,0,255), (595, 550, 250, 50), 3)\n draw_text('Your current car choose: ' + str(betCar), font, (255,255,255), DISPLAYSURFACE, 605, 560)\n draw_text('You bet amount of money: ' + str(bet), font, (255, 255, 255), DISPLAYSURFACE, 605, 580)\n #update screen every frame of loop\n fpsClock.tick(FPS)\n pygame.display.update() #update screen every execution\n if logOut:\n mainMenu(money, characterSet, username)\n else:\n return Running #return the running status to main\n\n\ndef betPopUps(bet, money):\n running = True\n inputBet = \"\"\n betArea = (500, 300, 200, 50)\n betTypingArea = (540, 315, 150, 20)\n while running:\n pygame.draw.rect(DISPLAYSURFACE, (5, 5, 255), betArea)\n pygame.draw.rect(DISPLAYSURFACE, (0, 0, 0), betTypingArea, 3)\n draw_text('Bet:', font, (0,0,0), DISPLAYSURFACE, 508, 320)\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n running = False\n bet = 0\n if event.key == K_RETURN:\n if inputBet == \"\":\n bet = 500\n else: bet = int(inputBet)\n running = False\n if event.key == K_BACKSPACE:\n inputBet = inputBet[0:-1]\n if event.key in numberKey:\n inputBet += event.unicode\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n draw_text(inputBet, font, (0,0,0), DISPLAYSURFACE, 545, 320)\n fpsClock.tick(FPS)\n pygame.display.update()\n if bet > money:\n bet = money\n return bet\n\n\ndef exitConfirmScreen():\n running = True\n clicked = False\n while running:\n DISPLAYSURFACE.fill((0,0,0))\n draw_text('Confirm Exit?', bigfont, (255,255,255), DISPLAYSURFACE, 500, 200)\n dx, dy = pygame.mouse.get_pos()\n\n #define and draw yes/no buttons\n yesButton = pygame.Rect(480, 300, 50, 50)\n noButton = pygame.Rect(680, 300, 50, 50)\n pygame.draw.rect(DISPLAYSURFACE, (255,255,255), yesButton)\n draw_text('Yes', font, (0,0,0), DISPLAYSURFACE, 490, 320)\n pygame.draw.rect(DISPLAYSURFACE, (255,255,255), noButton)\n draw_text('No', font, (0,0,0), DISPLAYSURFACE, 695, 320)\n\n if yesButton.collidepoint(dx,dy):\n if clicked:\n pygame.quit()\n sys.exit()\n elif noButton.collidepoint(dx,dy):\n if clicked:\n running = False\n\n clicked = False\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == MOUSEBUTTONDOWN:\n if event.button == 1:\n clicked = True\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n running = False\n fpsClock.tick(FPS)\n pygame.display.update()\n return running\n\n\ndef helpScreen():\n running = True\n while running:\n DISPLAYSURFACE.blit(help, (0,0))\n #check event\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n running = False\n fpsClock.tick(FPS)\n pygame.display.update()\n\n\ndef baucua(cuoc):\n dem = 0\n x = 'YOU WIN : '\n a = [\"BAU\", \"CUA\", \"TOM\", \"CA\", \"GA\", \"NAI\"]\n b = random.choice(a)\n if cuoc == b:\n dem = dem + 1\n c = random.choice(a)\n if cuoc == c:\n dem = dem + 1\n d = random.choice(a)\n if cuoc == d:\n dem = dem + 1\n draw_text('RESULT :', bigfont, (255, 255, 255), DISPLAYSURFACE, 480, 460)\n draw_text(b, mediumfont, (255, 255, 255), DISPLAYSURFACE, 480, 490)\n draw_text(c, mediumfont, (255, 255, 255), DISPLAYSURFACE, 480, 510)\n draw_text(d, mediumfont, (255, 255, 255), DISPLAYSURFACE, 480, 530)\n x = x + str(dem) + cuoc\n if dem == 0:\n if cuoc == \"ss\":\n draw_text(\"YOU HAVE NOT TO CHOOSE\", mediumfont, (255, 255, 255), DISPLAYSURFACE, 480, 580)\n else:\n draw_text(\"YOU LOSE, PLEASE TO PLAY AGAIN\", mediumfont, (255, 255, 255), DISPLAYSURFACE, 480, 580)\n else:\n draw_text(x, mediumfont, (255, 255, 255), DISPLAYSURFACE, 480, 580)\n return 300 * dem\n\n\ndef miniGameScreen(money):\n running = True\n kt_dat = False\n kt = False\n tien = money\n cuoc = \"ss\"\n DISPLAYSURFACE.fill((82, 139, 139))\n ship = pygame.image.load('../image/BCC.png')\n DISPLAYSURFACE.blit(ship, (470, 200))\n draw_text('$ MONEY:', mediumfont, (0, 255, 0), DISPLAYSURFACE, 550, 50)\n draw_text(str(money), mediumfont, (0, 255, 0), DISPLAYSURFACE, 700, 50)\n while running:\n\n # datButton = pygame.Rect(200, 260, 60, 20)\n xocButton = pygame.Rect(355, 279, 90, 40)\n bauButton = pygame.Rect(595, 279, 70, 20)\n tomButton = pygame.Rect(470, 380, 70, 20)\n cuaButton = pygame.Rect(720, 380, 70, 20)\n caButton = pygame.Rect(595, 380, 60, 20)\n gaButton = pygame.Rect(720, 279, 60, 20)\n naiButton = pygame.Rect(470, 279, 60, 20)\n # GET MOUSE CLICK\n pygame.draw.rect(DISPLAYSURFACE, (255, 255, 255), xocButton)\n pygame.draw.rect(DISPLAYSURFACE, (255, 255, 255), bauButton)\n pygame.draw.rect(DISPLAYSURFACE, (255, 255, 255), cuaButton)\n pygame.draw.rect(DISPLAYSURFACE, (255, 255, 255), tomButton)\n pygame.draw.rect(DISPLAYSURFACE, (255, 255, 255), caButton)\n pygame.draw.rect(DISPLAYSURFACE, (255, 255, 255), naiButton)\n pygame.draw.rect(DISPLAYSURFACE, (255, 255, 255), gaButton)\n dx, dy = pygame.mouse.get_pos() # get clicked\n draw_text('If you have more than 1000, the system will return Menu Screen', mediumfont, (0, 0, 0), DISPLAYSURFACE, 350, 650)\n draw_text('PLAY GAME', font, (0, 0, 0), DISPLAYSURFACE, 358, 295)\n draw_text('SELECT', font, (0, 0, 0), DISPLAYSURFACE, 470, 280)\n draw_text('SELECT', font, (0, 0, 0), DISPLAYSURFACE, 470, 380)\n draw_text('SELECT', font, (0, 0, 0), DISPLAYSURFACE, 595, 280)\n draw_text('SELECT', font, (0, 0, 0), DISPLAYSURFACE, 595, 380)\n draw_text('SELECT', font, (0, 0, 0), DISPLAYSURFACE, 720, 280)\n draw_text('SELECT', font, (0, 0, 0), DISPLAYSURFACE, 720, 380)\n draw_text('MINIGAME BAU CUA', bigfont, (255, 255, 255), DISPLAYSURFACE, 480, 100)\n draw_text('$ MONEY:', mediumfont, (0, 255, 0), DISPLAYSURFACE, 550, 50)\n draw_text(str(money), mediumfont, (0, 255, 0), DISPLAYSURFACE, 700, 50)\n # if mouse click execute\n # dat\n if cuaButton.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (10, 10, 10), cuaButton, 3)\n if clicked:\n if kt_dat == True: pygame.draw.rect(DISPLAYSURFACE, (255, 255, 255), datButton)\n datButton = pygame.Rect(720, 360, 60, 20)\n pygame.draw.rect(DISPLAYSURFACE, (225, 225, 0), datButton)\n draw_text('300', font, (0, 0, 0), DISPLAYSURFACE, 726, 361)\n kt_dat = True\n cuoc = \"CUA\"\n if gaButton.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (10, 10, 10), gaButton, 3)\n if clicked:\n if kt_dat == True: pygame.draw.rect(DISPLAYSURFACE, (255, 255, 255), datButton)\n datButton = pygame.Rect(720, 260, 60, 20)\n pygame.draw.rect(DISPLAYSURFACE, (225, 225, 0), datButton)\n draw_text('300', font, (0, 0, 0), DISPLAYSURFACE, 726, 261)\n kt_dat = True\n cuoc = \"GA\"\n if caButton.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (10, 10, 10), caButton, 3)\n if clicked:\n if kt_dat == True: pygame.draw.rect(DISPLAYSURFACE, (255, 255, 255), datButton)\n datButton = pygame.Rect(595, 360, 60, 20)\n pygame.draw.rect(DISPLAYSURFACE, (225, 225, 0), datButton)\n draw_text('300', font, (0, 0, 0), DISPLAYSURFACE, 600, 361)\n kt_dat = True\n cuoc = \"CA\"\n if bauButton.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (10, 10, 10), bauButton, 3)\n if clicked:\n if kt_dat == True: pygame.draw.rect(DISPLAYSURFACE, (255, 255, 255), datButton)\n datButton = pygame.Rect(595, 260, 60, 20)\n pygame.draw.rect(DISPLAYSURFACE, (225, 225, 0), datButton)\n draw_text('300', font, (0, 0, 0), DISPLAYSURFACE, 600, 261)\n kt_dat = True\n cuoc = \"BAU\"\n if naiButton.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (10, 10, 10), naiButton, 3)\n if clicked:\n if kt_dat == True: pygame.draw.rect(DISPLAYSURFACE, (255, 255, 255), datButton)\n datButton = pygame.Rect(470, 260, 60, 20)\n pygame.draw.rect(DISPLAYSURFACE, (225, 225, 0), datButton)\n draw_text('300', font, (0, 0, 0), DISPLAYSURFACE, 475, 261)\n kt_dat = True\n cuoc = \"NAI\"\n if tomButton.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (10, 10, 10), tomButton, 3)\n if clicked:\n if kt_dat == True: pygame.draw.rect(DISPLAYSURFACE, (255, 255, 255), datButton)\n datButton = pygame.Rect(470, 360, 70, 20)\n pygame.draw.rect(DISPLAYSURFACE, (225, 225, 0), datButton)\n draw_text('300', font, (0, 0, 0), DISPLAYSURFACE, 475, 361)\n kt_dat = True\n cuoc = \"TOM\"\n\n if xocButton.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (10, 10, 10), xocButton, 3)\n if clicked:\n if kt == True:\n kt = False\n\n DISPLAYSURFACE.fill((82, 139, 139))\n ship = pygame.image.load('../image/BCC.png')\n DISPLAYSURFACE.blit(ship, (470, 200))\n\n money = money + baucua(cuoc)\n cuoc = \"ss\"\n\n kt = True\n if money > 1000:\n running = False\n\n clicked = False\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == MOUSEBUTTONDOWN:\n if event.button == 1:\n clicked = True\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n running = False\n fpsClock.tick(FPS)\n pygame.display.update()\n return money\n\n\ndef miniGameEvent(money):\n running = True\n while running:\n DISPLAYSURFACE.fill((0,0,0))\n drawHelp()\n #check event\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n running = False\n fpsClock.tick(FPS)\n pygame.display.update()\n\n\ndef changeSetScreen(selectedSet):\n running = True\n clicked = False\n while running:\n DISPLAYSURFACE.blit(changeSet, (0,0))\n\n if clicked:\n running = False\n\n set1Button = pygame.Rect(9, 298, 200, 153)\n set2Button = pygame.Rect(242, 298, 200, 153)\n set3Button = pygame.Rect(484, 298, 200, 153)\n set4Button = pygame.Rect(720, 270, 210, 173)\n set5Button = pygame.Rect(960, 298, 305, 153)\n\n dx, dy = pygame.mouse.get_pos()\n\n if set1Button.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (0,0,0), set1Button, 3)\n if clicked:\n selectedSet = 1\n if set2Button.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (0,0,0), set2Button, 3)\n if clicked:\n selectedSet = 2\n if set3Button.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (0,0,0), set3Button, 3)\n if clicked:\n selectedSet = 3\n if set4Button.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (0,0,0), set4Button, 3)\n if clicked:\n selectedSet = 4\n if set5Button.collidepoint(dx, dy):\n pygame.draw.rect(DISPLAYSURFACE, (0,0,0), set5Button, 3)\n if clicked:\n selectedSet = 5\n\n clicked = False\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == ord('1'):\n selectedSet = 1\n if event.key == ord('2'):\n selectedSet = 2\n if event.key == ord('3'):\n selectedSet = 3\n if event.key == ord('4'):\n selectedSet = 4\n if event.key == ord('5'):\n selectedSet = 5\n if event.key == K_ESCAPE:\n running = False\n if event.type == MOUSEBUTTONDOWN:\n if event.button == 1:\n clicked = True\n fpsClock.tick(FPS)\n pygame.display.update()\n return selectedSet\n\n\ndef shopScreen(money):\n running = True\n dontHavemoney = 'YOU DON\\'T HAVE ENOUGH MONEY'\n while running:\n DISPLAYSURFACE.fill((0, 0, 0))\n draw_text('Nothing at this time', bigfont, (255, 255, 255), DISPLAYSURFACE, 470, 300)\n draw_text('Money at this time is: ' + str(money), mediumfont, (255, 255, 255), DISPLAYSURFACE, 490, 350)\n draw_text('Press ESC Key to return Main Menu', font, (255,255,255), DISPLAYSURFACE, 490, 200)\n draw_text('Press 1 to 5 to buy', font, (255, 255, 255), DISPLAYSURFACE, 550, 400)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == ord('1'):\n if money < 100:\n draw_text(dontHavemoney, bigfont, (255, 255, 255), DISPLAYSURFACE, 400, 500)\n else:\n money -= 100\n if event.key == ord('2'):\n if money < 200:\n draw_text(dontHavemoney, bigfont, (255, 255, 255), DISPLAYSURFACE, 400, 500)\n else:\n money -= 200\n if event.key == ord('3'):\n if money < 300:\n draw_text(dontHavemoney, bigfont, (255, 255, 255), DISPLAYSURFACE, 400, 500)\n else:\n money -= 300\n if event.key == ord('4'):\n if money < 400:\n draw_text(dontHavemoney, bigfont, (255, 255, 255), DISPLAYSURFACE, 400, 500)\n else:\n money -= 400\n if event.key == ord('5'):\n if money < 500:\n draw_text(dontHavemoney, bigfont, (255, 255, 255), DISPLAYSURFACE, 400, 500)\n else:\n money -= 500\n if event.key == K_ESCAPE:\n running = False\n fpsClock.tick(FPS)\n pygame.display.update()\n return money\n\n\ndef main():\n username, password, money = loginscreen()\n mainMenu(money, characterSet, username)\n\n\nif __name__ == \"__main__\":\n main()\n\n# end of file\n","sub_path":"SourceCode/mainMenu.py","file_name":"mainMenu.py","file_ext":"py","file_size_in_byte":29830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"514057371","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 11 22:35:53 2014\n\n@author: yuri\n\"\"\"\n\nimport os\n#import sys\nimport shutil\nimport re\n#import random\n\nexit()\n\ng_word_sil = \"\"\ng_phone_sil = \"SIL\"\ng_utils_dir = \"utils\"\n\nclass StrList:\n lines = None\n\n def __init__(self):\n self.lines = []\n \n def Clear(self):\n self.lines = []\n \n def Append(self, _line):\n self.lines.append(_line)\n \n def Save(self, _path):\n self.lines.sort()\n with open(_path, \"w\", encoding=\"utf-8\") as file_txt:\n for line in self.lines:\n file_txt.write(line + \"\\n\")\n file_txt.close()\n\nclass WordInfo:\n text = None\n tran = None\n word = None\n \n def __init__(self, _text, _tran, _word):\n self.text = _text.lower()\n self.tran = _tran\n self.word = int(_word)\n\ndef SortWordsById(_word):\n return _word.word\n\nclass Vocabulary:\n words = None\n \n def __init__(self, _lines):\n self.words = []\n for line in _lines:\n match = re.search(\"[Common]\\\\s*$\", line)\n if match:\n break\n match = re.search(\"^(.+)=(.+)#(\\\\d+)\\\\s*$\", line)\n if match:\n self.words.append(WordInfo(match.groups()[0], match.groups()[1], match.groups()[2]))\n self.words.sort(key=SortWordsById)\n \n def Text(self):\n text = None\n word_id = 0\n for word in self.words:\n if word.word == word_id:\n if text == None:\n text = word.text\n else:\n text = text + \" \" + word.text\n word_id = word_id + 1\n return text\n\nclass FileInfo:\n root = None\n name = None\n path = None\n spkr = None\n sent = None\n vocb = None\n text = None\n \n def __init__(self, _root, _name, _path, _spkr, _sent, _vocb):\n self.root = _root\n self.name = _name\n self.path = _path\n self.spkr = _spkr\n self.sent = _sent\n self.vocb = _vocb\n self.text = _vocb.Text()\n\nclass FileList(list):\n def __init__(self):\n list.__init__(self, [])\n \n def Load(self, _path_sdv, _path_wav):\n list.__init__(self, [])\n for root, dirs, files in os.walk(_path_sdv):\n for name in files:\n match = re.search(\"(\\\\d{3})_(\\\\d{5})\\\\.sdv$\", name.lower())\n if not match:\n continue\n path_wav = os.path.join(_path_wav, os.path.basename(root), os.path.splitext(name)[0] + \".wav\")\n if not os.path.exists(path_wav):\n continue\n path_sdv = os.path.join(root, name)\n lines = []\n with open(path_sdv, \"r\", encoding=\"CP1251\") as file_sdv:\n lines = file_sdv.readlines()\n file_sdv.close()\n spkr_id = match.groups()[0]\n sent_id = spkr_id + \"_\" + match.groups()[1]\n self.append(FileInfo(root, name, path_wav, spkr_id, sent_id, Vocabulary(lines)))\n\ndef CreateWavList(_path, _info_lst):\n lines = StrList()\n for info in _info_lst:\n lines.Append(info.path)\n lines.Save(_path)\n\ndef CreateWavScp(_path, _info_lst):\n lines = StrList()\n for info in _info_lst:\n lines.Append(info.sent + \" \" + info.path)\n lines.Save(_path)\n\ndef CreateWavTxt(_path, _info_lst):\n lines = StrList()\n for info in _info_lst:\n lines.Append(info.sent + \" \" + info.text)\n lines.Save(_path)\n\ndef CreateUtt2Spk(_path, _info_lst):\n lines = StrList()\n for info in _info_lst:\n lines.Append(info.sent + \" \" + info.spkr)\n lines.Save(_path)\n\ndef CreateLexicon(_path_lexicon, _path_lex_words, _info_lst):\n lexicon = set([])\n for info in _info_lst:\n for word in info.vocb.words:\n lexicon.add(word.text + \" \" + word.tran)\n lines = StrList()\n for line in lexicon:\n lines.Append(line)\n lines.Save(_path_lex_words)\n lines.Append(g_word_sil + \" \" + g_phone_sil)\n lines.Save(_path_lexicon)\n\ndef CreatePhoneTable(_path, _info_lst):\n phset = set([])\n for info in _info_lst:\n for word in info.vocb.words:\n phones = word.tran.split(\" \")\n for phone in phones:\n phset.add(phone)\n phones = StrList()\n for phone in phset:\n phones.Append(phone)\n phones.Save(_path)\n\ndef CreateSilTables(_path_sil, _path_opt_sil):\n silences = StrList()\n silences.Append(g_phone_sil)\n silences.Save(_path_sil)\n silences.Save(_path_opt_sil)\n\ndef CreateLangModel(_path, _info_lst):\n wdset = set([])\n for info in _info_lst:\n for word in info.vocb.words:\n wdset.add(word.text)\n words = list(wdset)\n words.sort()\n with open(_path, \"w\", encoding=\"utf-8\") as file_txt:\n file_txt.write(\"\\\\data\\\\\\n\")\n file_txt.write(\"ngram 1=\" + str(len(words) + 2) + \"\\n\")\n file_txt.write(\"\\\\1-grams:\\n\")\n file_txt.write(\"-99\\t\\n\")\n file_txt.write(\"-1\\t\\n\")\n for word in words:\n file_txt.write(\"-1\\t\" + word + \"\\n\")\n file_txt.write(\"\\\\end\\\\\\n\")\n file_txt.close()\n\nprint(\"======= BEGIN =======\")\n\nprint(\"Рабочая директория: \" + os.path.abspath(os.curdir))\ng_utils_dir = os.path.abspath(g_utils_dir)\nif not os.path.exists(g_utils_dir):\n print(\"Директория не существует: \" + g_utils_dir)\n exit()\ng_wav_dir = \"wav\"\nif \"wav_dir\" in os.environ:\n g_wav_dir = os.environ[\"wav_dir\"]\ng_wav_dir = os.path.abspath(g_wav_dir)\nif not os.path.exists(g_wav_dir):\n print(\"Директория не существует: \" + g_wav_dir)\n exit()\ng_sent_dir = \"sent\"\nif \"sent_dir\" in os.environ:\n g_sent_dir = os.environ[\"sent_dir\"]\ng_sent_dir = os.path.abspath(g_sent_dir)\nif not os.path.exists(g_sent_dir):\n print(\"Директория не существует: \" + g_sent_dir)\n exit()\ng_data_dir = \"data\"\nif \"data_dir\" in os.environ:\n g_data_dir = os.environ[\"data_dir\"]\ng_data_dir = os.path.abspath(g_data_dir)\npath_tmp = \"exp\"\nif os.path.exists(path_tmp):\n print(\"Удаление директории: \" + path_tmp)\n shutil.rmtree(path_tmp, True)\npath_tmp = \"mfcc\"\nif os.path.exists(path_tmp):\n print(\"Удаление директории: \" + path_tmp)\n shutil.rmtree(path_tmp, True)\nif os.path.exists(g_data_dir):\n print(\"Удаление директории: \" + g_data_dir)\n shutil.rmtree(g_data_dir, True)\nprint(\"Создание директории: \" + g_data_dir)\nos.mkdir(g_data_dir)\ng_local_dir = os.path.join(g_data_dir, \"local\")\nprint(\"Создание директории: \" + g_local_dir)\nos.mkdir(g_local_dir)\ng_dict_dir = os.path.join(g_local_dir, \"dict\")\nprint(\"Создани�� директории: \" + g_dict_dir)\nos.mkdir(g_dict_dir)\n\nprint(\"Загрузка sdv-файлов (train)...\")\ng_train_lst = FileList()\ng_train_lst.Load(g_sent_dir, os.path.join(g_wav_dir, \"train\"))\nprint(\" Обучающая выборка: \" + str(len(g_train_lst)) + \" файлов.\")\nprint(\"Загрузка sdv-файлов (test)...\")\ng_test_lst = FileList()\ng_test_lst.Load(g_sent_dir, os.path.join(g_wav_dir, \"test\"))\nprint(\" Тестовая выборка: \" + str(len(g_test_lst)) + \" файлов.\")\ng_file_lst = g_train_lst + g_test_lst\nprint(\"Всего загружено \" + str(len(g_file_lst)) + \" sdv-файлов.\")\n\nprint(\"Создание файла waves_all.list...\")\nCreateWavList(os.path.join(g_local_dir, \"waves_all.list\"), g_file_lst)\nprint(\"Создание файла waves.test...\")\nCreateWavList(os.path.join(g_local_dir, \"waves.test\"), g_test_lst)\nprint(\"Создание файла waves.train...\")\nCreateWavList(os.path.join(g_local_dir, \"waves.train\"), g_train_lst)\n\nprint(\"Создание файла test_wav.scp...\")\nCreateWavScp(os.path.join(g_local_dir, \"test_wav.scp\"), g_test_lst)\nprint(\"Создание файла train_wav.scp...\")\nCreateWavScp(os.path.join(g_local_dir, \"train_wav.scp\"), g_train_lst)\n\nprint(\"Создание файла test.txt...\")\nCreateWavTxt(os.path.join(g_local_dir, \"test.txt\"), g_test_lst)\nprint(\"Создание файла train.txt...\")\nCreateWavTxt(os.path.join(g_local_dir, \"train.txt\"), g_train_lst)\n\nfor data_set in [[\"test\", g_test_lst], [\"train\", g_train_lst]]:\n dir_dst = os.path.join(g_data_dir, data_set[0])\n print(\"Создание директории: \" + dir_dst)\n os.mkdir(dir_dst)\n print(\"Копирование файлов text и wav.scp...\")\n shutil.copyfile(os.path.join(g_local_dir, data_set[0] + \".txt\"), os.path.join(dir_dst, \"text\"))\n shutil.copyfile(os.path.join(g_local_dir, data_set[0] + \"_wav.scp\"), os.path.join(dir_dst, \"wav.scp\"))\n print(\"Создание файла utt2spk...\")\n path_src = os.path.join(dir_dst, \"utt2spk\")\n CreateUtt2Spk(path_src, data_set[1])\n print(\"Создание файла spk2utt...\")\n path_dst = os.path.join(dir_dst, \"spk2utt\")\n cmd_line = os.path.join(g_utils_dir, \"utt2spk_to_spk2utt.pl\") + \" < \" + path_src + \" > \" + path_dst\n proc = os.popen(cmd_line)\n for line in proc.readlines():\n print(line)\n\nprint(\"Создание файлов task.arpa и lm_test.arpa...\")\nCreateLangModel(os.path.join(g_local_dir, \"task.arpa\"), g_test_lst)\nshutil.copyfile(os.path.join(g_local_dir, \"task.arpa\"), os.path.join(g_local_dir, \"lm_test.arpa\"))\n\nprint(\"Создание файлов lexicon.txt и lexicon_words.txt...\")\nCreateLexicon(os.path.join(g_dict_dir, \"lexicon.txt\"), os.path.join(g_dict_dir, \"lexicon_words.txt\"), g_file_lst)\nprint(\"Создание файла nonsilence_phones.txt...\")\nCreatePhoneTable(os.path.join(g_dict_dir, \"nonsilence_phones.txt\"), g_file_lst)\nprint(\"Создание файлов silence_phones.txt и optional_silence.txt...\")\nCreateSilTables(os.path.join(g_dict_dir, \"silence_phones.txt\"), os.path.join(g_dict_dir, \"optional_silence.txt\"))\n\nprint(\"======= END =======\")\n","sub_path":"local/prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":10014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"123098161","text":"from arvore import Arvore\n\n\ndef main():\n\n livraria = Arvore(\"Livros\")\n livraria.raiz.inserir_filho(\"Gastronomia\")\n livraria.raiz.inserir_filho(\"Informática\")\n livraria.raiz.inserir_filho(\"Engenharia\")\n livraria.imprimir()\n\n encontrado = livraria.localizar_nodo(\"Livros\")\n print(\"Encontrado: {}\".format(encontrado))\n encontrado = livraria.localizar_nodo(\"Informática\")\n print(\"Encontrado: {}\".format(encontrado))\n encontrado = livraria.localizar_nodo(\"Letras\")\n print(\"Encontrado: {}\".format(encontrado))\n\n livraria.inserir_nodo(\"Informática\",\"Linguagens\")\n livraria.inserir_nodo(\"Linguagens\", \"Python\")\n livraria.inserir_nodo(\"Gastronomia\", \"Culinária\")\n livraria.inserir_nodo(\"Gastronomia\", \"Bebidas\")\n livraria.imprimir()\n\n removido = livraria.remover_nodo(\"Bebidas\")\n print(\"Removido: \", removido)\n removido = livraria.remover_nodo(\"Informática\")\n print(\"Removido: \", removido)\n livraria.imprimir()\n\nmain()","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"368267853","text":"## declare a list\n\nlst = [1,2,3,4,5]\n\nlst[0] = 'Hello'\n## [-1] last element in list\nlst[-1] = 20\n\nprint(lst[4])\n\n## tuple in ()\n\ntpl = (1,2,3,4,5)\n## not possible, tuples is RO not support assignement\ntry:\n\ttpl[0] = 0\nexcept:\n\tprint(\"can't assign tuples\")\n\t\nprint(5 in tpl) ## => True\n\n","sub_path":"Python/Day1/lists_tupples.py","file_name":"lists_tupples.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"367578824","text":"from __future__ import print_function\nfrom __future__ import division\n\nimport math\nimport time\nimport json\nfrom multiprocessing import Pool\n\nimport numpy as np\nimport tensorflow as tf\n\nimport ase\nimport ase.io\nimport ase.calculators.interface\nimport ase.neighborlist\n\nfrom Potential_Field_NN import PFNN\nimport G_func\nimport dG_func\n\n\n\nclass NN_calculator(ase.calculators.interface.Calculator):\n\n def __init__(self, input_layer=None, hidden_layer=None, output_layer=None, atom_types=None, \n max_nums_atom=None, r_cutoff=None, \n normalization_params_json=None, normalization_mode=0,\n params_G2=None, params_G4=None, trained_model=0, \n nproc_epot=None, nproc_force=None,\n turn_nn_on = True): \n \n # turn_nn_on = Flase: G calculation without NN calculation\n # turn_nn_on = True: epot and force calculation with NN calculation\n # normalization_mode = 0: (G-G_mean)/(G_max-G_min)\n # normalization_mode = 1: (G-G_mean)/G_std\n \n self.nproc_epot = nproc_epot\n self.nproc_force = nproc_force\n\n self.atom_types = atom_types\n\n # sym_func parameters\n self.r_cutoff = r_cutoff\n\n # G function parameters\n self.params_G2 = params_G2\n self.params_G4 = params_G4\n\n if turn_nn_on:\n # read normalization parameters\n self.norm_mode = normalization_mode\n self.norm_params = json.loads(open(normalization_params_json).read())\n for atom_type in self.atom_types:\n if self.norm_mode == 0:\n for k in ['G_mean', 'G_min', 'G_max']:\n self.norm_params[k][atom_type] = np.array(self.norm_params[k][atom_type])\n elif self.norm_mode == 1:\n for k in ['G_mean', 'G_std']:\n self.norm_params[k][atom_type] = np.array(self.norm_params[k][atom_type])\n\n # Potential energy & force prediction network\n self.nn = PFNN(input_layer, hidden_layer, output_layer, self.atom_types)\n self.nn.set_training_NN(max_nums_atom=max_nums_atom)\n self.nn.set_prediction_NN(trained_model=trained_model)\n \n self.grad = {}\n for atom_type in self.atom_types:\n self.grad[atom_type] = tf.gradients(self.nn.prediction_total_energy, self.nn.prediction_input[atom_type])\n\n\n # data cache\n self.cached_positions_hash = None\n\n\n def update(self, atoms):\n \n # whether this structure has been calculated\n positions_hash = hash( np.array2string(atoms.positions, threshold=int(1.0e12)) )\n\n if positions_hash != self.cached_positions_hash:\n\n coor = [[atoms.get_chemical_symbols()[i],atoms.get_positions()[i,:]] for i in range(len(atoms))]\n\n r_mat, fc_mat = G_func.calculate_dis_fc(atoms.positions, self.r_cutoff, len(atoms))\n\n G = {}\n for atom_type in self.atom_types:\n params = { 'nproc' : self.nproc_epot,\n 'center_atom_type' : atom_type,\n 'atom_types' : self.atom_types,\n 'r_mat' : r_mat, \n 'fc_mat' : fc_mat, \n 'coor' : coor, \n 'positions' : atoms.positions,\n 'r_cutoff' : self.r_cutoff,\n 'params_G2' : self.params_G2, \n 'params_G4' : self.params_G4 }\n G[atom_type] = G_func.calculate_sym_func(params)\n\n \n # update data cache\n self.G = G\n self.r_mat = r_mat\n self.fc_mat = fc_mat \n self.coor = coor\n self.cached_positions_hash = positions_hash\n\n \n\n def get_potential_energy(self, atoms=None, force_consistent=False):\n \n self.update(atoms)\n \n # normalization\n G_norm = {}\n for atom_type in self.atom_types:\n if self.norm_mode == 0:\n G_norm[atom_type] = (self.G[atom_type]-self.norm_params['G_mean'][atom_type]) / \\\n (self.norm_params['G_max'][atom_type]-self.norm_params['G_min'][atom_type])\n elif self.norm_mode == 1:\n G_norm[atom_type] = (self.G[atom_type]-self.norm_params['G_mean'][atom_type]) / self.norm_params['G_std'][atom_type]\n\n # tf\n with tf.Session() as sess:\n prediction_energy_feed_dict = {}\n for atom_type in self.atom_types:\n prediction_energy_feed_dict[self.nn.prediction_input[atom_type]] = G_norm[atom_type]\n potential_energy = sess.run(self.nn.prediction_total_energy, feed_dict=prediction_energy_feed_dict)\n\n return float(potential_energy)\n\n \n def get_forces(self, atoms):\n \n self.update(atoms)\n\n # normalization\n G_norm = {}\n for atom_type in self.atom_types:\n if self.norm_mode == 0:\n G_norm[atom_type] = (self.G[atom_type]-self.norm_params['G_mean'][atom_type]) / \\\n (self.norm_params['G_max'][atom_type]-self.norm_params['G_min'][atom_type])\n elif self.norm_mode == 1:\n G_norm[atom_type] = (self.G[atom_type]-self.norm_params['G_mean'][atom_type]) / self.norm_params['G_std'][atom_type]\n\n # gradient value: potential energy ~ G\n grad_value = {}\n with tf.Session() as sess:\n grad_feed_dict = {}\n for atom_type in self.atom_types:\n grad_feed_dict[self.nn.prediction_input[atom_type]] = G_norm[atom_type]\n \n for atom_type in self.atom_types:\n grad_value[atom_type] = sess.run(self.grad[atom_type], feed_dict=grad_feed_dict)[0]\n\n # single process\n if self.nproc_force == 1:\n\n forces = []\n\n for atom_idx in range(len(self.coor)):\n\n force_calculation_input = {}\n force_calculation_input['atom_types'] = self.atom_types\n force_calculation_input['atom_idx'] = atom_idx\n force_calculation_input['r_mat'] = self.r_mat\n force_calculation_input['fc_mat'] = self.fc_mat\n force_calculation_input['coor'] = self.coor\n force_calculation_input['positions'] = atoms.positions\n force_calculation_input['params_G2'] = self.params_G2\n force_calculation_input['params_G4'] = self.params_G4\n force_calculation_input['r_cutoff'] = self.r_cutoff\n force_calculation_input['norm_params'] = self.norm_params\n force_calculation_input['norm_mode'] = self.norm_mode\n force_calculation_input['grad_value'] = grad_value\n\n\n force = dG_func.calculate_force( force_calculation_input )\n forces.append(force)\n\n # multiple process\n if self.nproc_force > 1:\n\n pool = Pool(self.nproc_force)\n\n force_calculate_inputs = [ {'atom_types' : self.atom_types,\n 'atom_idx' : atom_idx,\n 'r_mat' : self.r_mat,\n 'fc_mat' : self.fc_mat,\n 'coor' : self.coor,\n 'positions' : atoms.positions,\n 'params_G2' : self.params_G2,\n 'params_G4' : self.params_G4,\n 'r_cutoff' : self.r_cutoff,\n 'norm_params' : self.norm_params,\n 'norm_mode' : self.norm_mode,\n 'grad_value' : grad_value } for atom_idx in range(len(self.coor)) ]\n\n forces = pool.map(dG_func.calculate_force, force_calculate_inputs)\n pool.close()\n pool.join()\n\n return np.array(forces)\n \n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"net_field/NN_calculator.py","file_name":"NN_calculator.py","file_ext":"py","file_size_in_byte":8396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"82914000","text":"#!/Users/rlaney/.virtualenvs/het-ansible/bin/python\n\n'''\nThis is the main engine for working with inventory.py\nDynamically collects all devices and attributes from Opmantek's Mongo database\nUpdates/creates/deletes Netbox's devices database with the collected data\nUpdates/creates/deletes Ansible's hosts database with the collected data\nUpdates/creates/deletes Ansible's groups database with the collected data\nLogs everything to ansible_inventorylog in the current directory\n\nReturns:\nNothing\n'''\n\nfrom __future__ import absolute_import, division, print_function\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\nimport csv\nfrom scripts.mytools import byteify\n#import os\n#import pprint\nimport re\n#import sys\nimport signal\nfrom pymongo.mongo_client import MongoClient\nfrom pymongo.collection import Collection, ReturnDocument\nfrom pymongo.database import Database\nfrom collections import OrderedDict\nfrom pprint import pprint\n#import json\nimport logging\n\nFORMAT1='%(asctime)s: %(levelname)s: %(lineno)d: %(message)s'\nFORMAT2='%(asctime)s: %(levelname)s: %(lineno)d: %(message)s \\n \\\n \\t%(process)d: %(processName)s: %(thread)d: %(threadName)s: %(funcName)s'\n\nlogging.basicConfig(filename='ansible_inventory.log',\n format=FORMAT2,\n level=logging.INFO)\n\n'''\nDefine the database connections globally\n'''\nlogging.info('Loading mongo connections')\nlocal_uri = 'mongodb://localhost/'\nlocal_con = MongoClient(local_uri)\n# Local Mongo DB and collection nodes for testing\nlocal_nmis = local_con.get_database('nmis')\nlocal_nodes = local_nmis.get_collection('nodes')\n# Local Mongo DB and new collection devices\nlocal_netbox = local_con.get_database('netbox')\nlocal_devices = local_netbox.get_collection('devices')\n# Local Mongo DB and new collection devices\nlocal_ansible = local_con.get_database('ansible')\nlocal_hosts = local_ansible.get_collection('hosts')\nlocal_groups = local_ansible.get_collection('groups')\n\n\n'''\nThis is the base class for working with all devices/hosts\nConverts the attributes from Opmantek to a readable format\n'''\nclass BaseHost:\n def __init__(self, name, host, customer=None, group=None, location=None,\n sysLocation=None, _id=None, sysName=None, netType=None,\n roleType=None, sysObjectName=None, nodeType=None,\n deviceType=None, nodeVendor=None, nodeModel=None,\n serialNum=None, uuid=None, active=None, sysDescr=None,\n os_info=None, *args, **kwargs):\n\n if name and host:\n self.name = name.split()[0].lower().replace('.', '_')\n self.primary_ip4 = host.split()[0]\n else:\n print('You must have a name and host')\n logging.warning('You must have a name and host')\n\n if customer:\n self.tenant_group = customer\n else:\n logging.debug('The field tenant_group is UNKNOWN!')\n self.tenant_group = 'Unknown'\n\n # Classify device by BU\n if group:\n self.tenant = group\n self.bu = group.split()[0].lower()\n else:\n logging.debug('The fields tenant and bu is UNKNOWN!')\n self.tenant = 'Unknown'\n self.bu = 'Unknown'\n\n # Classify device by site\n if not location:\n if not sysLocation:\n logging.debug('The fields sys_location and site is UNKNOWN!')\n self.sys_location = 'Unknown'\n self.site = 'Unknown'\n else:\n self.sys_location = sysLocation\n self.site = sysLocation.lower().replace(' ', '-')\n else:\n self.sys_location = sysLocation\n self.site = location.lower().replace(' ', '-')\n\n if _id:\n self.device_id = _id\n else:\n logging.debug('The field device_id is UNKNOWN!')\n self.device_id = 'Unknown'\n\n if sysName:\n self.sys_name = sysName\n else:\n logging.debug('The field sys_name is UNKNOWN!')\n self.sys_name = 'Unknown'\n\n # Classify device by network type\n if netType:\n self.net_type = netType.lower()\n else:\n logging.debug('The field net_type is UNKNOWN!')\n self.net_type = 'Unknown'\n\n # Classify device by network role\n if roleType:\n self.role_type = roleType.lower()\n else:\n logging.debug('The field role_type is UNKNOWN!')\n self.role_type = 'Unknown'\n\n if sysObjectName:\n self.device_type = sysObjectName\n else:\n logging.debug('The field device_type is UNKNOWN!')\n self.device_type = 'Unknown'\n\n if nodeType:\n self.node_type = nodeType\n else:\n logging.debug('The field node_type is UNKNOWN!')\n self.node_type = 'Unknown'\n\n # Classify device by role\n if deviceType:\n self.device_role = deviceType.lower().replace(' ', '_')\n else:\n logging.debug('The field device_role is UNKNOWN!')\n self.device_role = 'Unknown'\n\n if nodeVendor:\n self.vendor = nodeVendor\n else:\n logging.debug('The field vendor is UNKNOWN!')\n self.vendor = 'Unknown'\n\n # Classify device by type\n if nodeModel:\n self.model = nodeModel.lower()\n else:\n logging.debug('The field model is UNKNOWN!')\n self.model = 'Unknown'\n\n if serialNum:\n self.serial = serialNum\n else:\n logging.debug('The field serial is UNKNOWN!')\n self.serial = 'Unknown'\n\n if uuid:\n self.asset_tag = uuid\n else:\n logging.debug('The field uuid is UNKNOWN!')\n self.asset_tag = 'Unknown'\n\n if active == 'true':\n self.status = 'Production'\n else:\n logging.debug('The field status is NON-PRODUCTION?')\n self.status = 'Non-Production'\n\n if sysDescr:\n self.custom_field_values = sysDescr\n else:\n logging.debug('The field custom_field_values is UNKNOWN!')\n self.custom_field_values = 'Unknown'\n\n # Classify device by platform\n if os_info is None:\n self.platform = 'Unknown'\n self.series = 'Unknown'\n self.version = 'Unknown'\n logging.debug('The field os_info is UNKNOWN!')\n else:\n try:\n if os_info['platform']:\n self.series = os_info['platform']\n else:\n self.series = 'Unknown'\n except KeyError as e:\n self.series = 'Unknown'\n logging.debug('The field series is UNKNOWN!')\n\n try:\n if os_info['os']:\n self.platform = os_info['os']\n else:\n self.platform = 'Unknown'\n except KeyError as e:\n self.platform = 'Unknown'\n logging.debug('The field platform is UNKNOWN!')\n\n try:\n if os_info['version']:\n self.version = os_info['version']\n else:\n self.version = 'Unknown'\n except KeyError as e:\n self.version = 'Unknown'\n logging.debug('The field version is UNKNOWN!')\n\n def fix_bad_keys(self):\n # Checks keys that contain a '.' and converts to '_'\n for key in self.iterkeys():\n if key.contains('.'):\n key = key.replace('.', '_')\n logging.info('Bad key found for {} at {}'.format(self.name, key))\n\n\n def to_netbox(self):\n '''\n Performs CRUD operations on Netbox devices collection\n '''\n device = {\n 'name' : str(self.name),\n 'primary_ip4' : str(self.primary_ip4),\n 'tenant_group' : str(self.tenant_group),\n 'tenant' : str(self.tenant),\n 'site' : str(self.site),\n 'net_type' : str(self.net_type),\n 'role_type' : str(self.role_type),\n 'device_type' : str(self.device_type),\n 'node_type' : str(self.node_type),\n 'device_role' : str(self.device_role),\n 'vendor' : str(self.vendor),\n 'model' : str(self.model)\n }\n\n try:\n local_devices.insert_one(device)\n #print('Successfully inserted {}'.format(device['name']))\n logging.info('Successfully inserted {}'.format(device['name']))\n except Exception as e:\n #print('Unable to insert the device {} {}'.format(device['name'], e))\n #print('Trying to update the device {}'.format(device['name']))\n logging.info('Unable to insert the device {} {}'.format(device['name'], e))\n logging.info('Trying to update the device {}'.format(device['name']))\n try:\n dev_filter = {'name': self.name}\n for k, v in device:\n dev_update = dict({'$addToSet': {k : v}})\n local_devices.update(dev_filter, dev_update, upsert=True)\n #print('Successfully updated {}'.format(device['name']))\n logging.info('Successfully updated {}'.format(device['name']))\n except Exception as e:\n #print('Unable to update the device {} {}'.format(host, e))\n logging.info('Unable to update the device {} {}'.format(host, e))\n\n\n def to_ansible(self):\n '''\n Performs CRUD operations on Ansible hosts collection\n\n #### The json object structure on Mongo is:\n #### Host:\n {\n \"hostname\" : \"switch1\",\n \"vars\" : {\n \"inventory_hostname\" : \"10.22.4.10\",\n \"group_names\" : [\n \"core\",\n \"site-def\",\n \"NXOS\"\n ]\n },\n \"hostname\" : \"router1\",\n \"vars\" : {\n \"inventory_hostname\" : \"69.42.15.8\",\n \"group_names\" : [\n \"edge\",\n \"site-abc\",\n \"ISR\"\n ]\n }\n }\n '''\n device = {\n 'hostname' : str(self.name),\n 'vars': {\n 'inventory_hostname' : str(self.primary_ip4),\n 'group_names' : [\n str(self.bu),\n str(self.site),\n str(self.net_type),\n str(self.role_type),\n str(self.platform),\n str(self.series),\n str(self.model)\n ]\n }\n }\n\n try:\n local_hosts.insert_one(device)\n #print('Successfully inserted {}'.format(device['hostname']))\n logging.info('Successfully inserted {}'.format(device['hostname']))\n except Exception as e:\n #print('Unable to insert the device {} {}'.format(device['name'], e))\n #print('Trying to update the device {}'.format(device['hostname']))\n logging.info('Unable to insert the device {} {}'.format(device['hostname'], e))\n logging.info('Trying to update the device {}'.format(device['hostname']))\n try:\n dev_filter = {'hostname': self.name}\n for k, v in device:\n dev_update = dict({'$addToSet': {k : v}})\n local_hosts.update(dev_filter, dev_update, upsert=True)\n #print('Successfully updated {}'.format(device['hostname']))\n logging.info('Successfully updated {}'.format(device['hostname']))\n except Exception as e:\n #print('Unable to update the device {} {}'.format(device['hostname'], e))\n logging.info('Unable to update the device {} {}'.format(device['hostname'], e))\n\n\ndef create_ansible_groups():\n '''\n Performs CRUD operations on Ansible groups collection\n\n #### The json object structure on Mongo is:\n #### Group:\n {\n \"name\" : \"core\",\n \"vars\" : {\n \"ansible_username\" : \"example\",\n \"ansible_password\" : \"secret\"\n }\n }\n '''\n clean_groups = set()\n for device in local_hosts.find({}, { '_id': 0, 'vars.group_names': 1 }):\n for g in device['vars']['group_names']:\n clean_groups.add(g)\n\n for group in clean_groups:\n results = {\n 'name': group,\n 'vars': {},\n }\n\n\n try:\n local_groups.insert_one(results)\n #print('Successfully inserted:\\n{}'.format(results))\n logging.info('Successfully inserted:\\n{}'.format(results))\n except Exception as e:\n #print('Unable to insert the group {} {}'.format(results, e))\n #print('Trying to update the group {}'.format(results))\n logging.info('Unable to insert the group {} {}'.format(group, e))\n logging.info('Trying to update the group {}'.format(group))\n try:\n g_update = dict({'$push': {results['hosts'], results['hostvars']}})\n local_groups.update(results, g_update, upsert=True)\n #print('Successfully updated:\\n{}'.format(results))\n logging.info('Successfully updated:\\n{}'.format(group))\n except Exception as e:\n #print('Unable to update the group {} {}'.format(results, e))\n logging.info('Unable to update the group {} {}'.format(group, e))\n\n\ndef add_indexes():\n '''\n Creates the proper database indexes to improve lookup time\n '''\n try:\n local_devices.create_index([( 'name', pymongo.ASCENDING)], unique=True)\n logging.info('Created the \"NAME\" index on Netbox devices collection')\n except Exception as e:\n #print('Netbox MongoDB devices collection already has the \"NAME\" index')\n logging.info('Netbox MongoDB devices collection already has the \"NAME\" index')\n\n try:\n local_hosts.create_index([( 'name', pymongo.ASCENDING)], unique=True)\n logging.info('Created the \"NAME\" index on Ansible hosts collection')\n except Exception as e:\n #print('Ansible MongoDB hosts collection already has the \"NAME\" index')\n logging.info('Ansible MongoDB hosts collection already has the \"NAME\" index')\n\n try:\n local_groups.create_index([( 'hosts', pymongo.ASCENDING)], unique=True)\n logging.info('Created the \"HOSTS\" index on Ansible groups collection')\n except Exception as e:\n #print('Ansible MongoDB hosts collection already has the \"HOSTS\" index')\n logging.info('Ansible MongoDB hosts collection already has the \"HOSTS\" index')\n\n try:\n local_groups.create_index([( 'groups', pymongo.ASCENDING)], unique=True)\n logging.info('Created the \"GROUPS\" index on Ansible groups collection')\n except Exception as e:\n #print('Ansible MongoDB hosts collection already has the \"GROUPS\" index')\n logging.info('Ansible MongoDB hosts collection already has the \"GROUPS\" index')\n\n\nif __name__ == \"__main__\":\n for device in local_nodes.find():\n #device = byteify(device)\n device = BaseHost(**device)\n device.to_netbox()\n device.to_ansible()\n\n create_ansible_groups()\n add_indexes()\n\n","sub_path":"scripts/inventory/1st_inventory.py","file_name":"1st_inventory.py","file_ext":"py","file_size_in_byte":15537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"532933911","text":"import sys\nimport pymysql\nfrom functions.funtionLibrary import *\nfrom classes.database_access import DB_Connect\nfrom classes.clsBook import *\n\n\nmy_db = DB_Connect('root', '', 'python_projects')\n \n\nprint(\"Welcome To The Library Database!\\n\")\n\n\nhasCompleted = False\nwhile not hasCompleted:\n actionType = getAction()\n print(actionType)\n\n if actionType == \"Show All Books\":\n\n \"\"\"Show All Book is selected\"\"\"\n\n sqlStrSelect = selectAllSQLStr()\n results = my_db.executeSelectQuery(sqlStrSelect)\n count = len(results)\n if count > 0:\n for book in results:\n print(\"--------------------------------\")\n for item in book:\n print(str(item).upper() + \" : \"+ str(book[item]))\n \n else:\n print(\"No books are in the database.\")\n continue\n \n if actionType == \"Add a book\":\n\n \"\"\"Add all Book is selected\"\"\"\n \n isValidBookTitle = False\n while not isValidBookTitle:\n bookTitle = input(\"Please enter the name of the book title: \")\n if is_null(bookTitle) == False:\n if checkInvalidChars(bookTitle) == True:\n bookTitle = capTitles(bookTitle)\n break\n else:\n print(\"Invalid Book Title\")\n else:\n print(\"Please try again and dont leave Book Title blank.\")\n \n isValidBookAuthor = False\n while not isValidBookAuthor:\n author = input(\"Please enter the name of the book author: \")\n if is_null(author) == False:\n if hasNumbers(author) == False and checkInvalidChars(author) == True and hasNumbers(author) == False:\n bookAuthor = capTitles(author)\n break\n else:\n print(\"Invalid Book Author\")\n else:\n print(\"Please try again and dont leave Book Name blank.\")\n\n isValidBookISBN = False\n while not isValidBookISBN:\n ISBN = input(\"Please enter the name of the book ISBN: \")\n if hasNumbers(ISBN) == True and len(str(ISBN)) == 13:\n print(len(ISBN))\n ISBN = format_isbn(str(ISBN))\n break\n else:\n print(\"Invalid Book ISBN\\n\")\n \n isValidNumPurchased = False\n while not isValidNumPurchased:\n numPurchased = input(\"Please enter the number of the books purchased: \")\n if hasNumbers(numPurchased) == True:\n break\n else:\n print(\"Invalid Number Purchased\")\n \n my_book = Book(bookAuthor, ISBN, numPurchased, bookTitle, numCheckedOut = 0, retailPrice= 0)\n strSQLAddBook = buildInsertBookSQL(bookAuthor, str(ISBN), bookTitle, 0, numPurchased)\n my_db.executeQuery(strSQLAddBook)\n print(\"Book Added Successfully\")\n\n if actionType == \"Edit a book\":\n \n \"\"\"Edit a books value in the database\"\"\"\n isDone = False\n while not isDone:\n bookNameSTR = \"\"\n bookNameSTR = input(\"What is the name of the book that you would like to edit? \")\n if is_null(bookNameSTR) == False:\n strSql = selectBookSQLStr(bookNameSTR)\n bookVal = \"\"\n bookVal = my_db.executeSelectQuery(strSql)\n if len(bookVal) == 1:\n for item in bookVal:\n ID = item['bookID']\n bookTitle = item['bookTitle']\n bookAuthor = item['bookAuthor']\n bookISBN = item['ISBN']\n numberCheckedOut = item['numCheckedOut']\n numberPurchased = item['numPurchased']\n bookPrice = item['bookPrice']\n else:\n print(\"Sorry but the book you are searching for is not in our database\")\n continue\n \n actionBook = Book(bookAuthor,bookISBN, numberPurchased, bookTitle, numberCheckedOut, bookPrice)\n print(\"The book was found.\")\n showColumnOptions()\n selAction = input(\"Please select the column to change by the number associated with the action: \") \n if is_null(selAction) == False and hasLetters(selAction) == False:\n columnToChange = getColumnSelection(int(selAction))\n if not columnToChange == False:\n newValueColumn = input(\"Please enter the new value for the column: \")\n if is_null(newValueColumn) == False:\n \n if columnToChange == \"bookAuthor\" and selAction == 2:\n \"\"\" book author\"\"\"\n newValueColumn = capTitles(newValueColumn)\n else:\n print(\"Invalid Author value! Please try this step again.\")\n continue\n \n if columnToChange == \"ISBN\" and hasLetters(newValueColumn) == False and selAction == 3:\n \"\"\" ISBN and formatting\"\"\"\n newValueColumn = format_isbn(str(newValueColumn))\n else:\n print(\"Invalid ISBN value! Please try this step again.\")\n continue\n \n if columnToChange == \"numPurchased\" and hasLetters(newValueColumn)== False and selAction == 4:\n \"\"\" Number Purchased\"\"\"\n newValueColumn = int(newValueColumn)\n else:\n print(\"Invalid number purchased value! Please try this step again.\")\n continue\n \n if columnToChange == \"numCheckedOut\" and hasLetters(newValueColumn) == False and selAction == 5:\n \"\"\"Number Checked Out\"\"\"\n newValueColumn = int(newValueColumn)\n else:\n print(\"Invalid Number checked out value! Please try this step again.\")\n continue\n \n if columnToChange == \"bookTitle\" and checkInvalidChars(newValueColumn) == False and selAction == 6:\n \"\"\"Book Title\"\"\"\n newValueColumn = NoQuotes(newValueColumn)\n continue\n else:\n print(\"Invalid book title value! Please try this step again.\")\n \n if columnToChange == \"bookPrice\" and selAction == 7:\n \"\"\"Book Price\"\"\"\n newValueColumn = float(newValueColumn)\n else:\n print(\"Invalid Number purchased value! Please try this step again.\")\n continue\n \n editSQL = editSQLStr(columnToChange,newValueColumn,ID)\n my_db.executeQuery(editSQL)\n print(\"The change was successful.\")\n break\n else:\n print(\"Invalid entry. We cannot change the value to NULL. Please try this step again.\")\n continue\n else:\n print(\"The column you selected is invalid.Please try this step again\")\n continue\n else:\n print(\"Invalid selection! Please try this step again.\")\n continue\n else:\n print(\"Invalid Book Name. Please verify your selected and try again.\")\n continue \n \n if actionType == \"Remove a book\":\n \n \"\"\"Delete a book from the database\"\"\"\n \n bookNameSTR = \"\"\n bookNameSTR = input(\"What is the name of the book that you would like to delete? \")\n if is_null(bookNameSTR) == False:\n strSql = selectBookSQLStr(bookNameSTR)\n bookVal = \"\"\n bookVal = my_db.executeSelectQuery(strSql)\n if len(bookVal) == 1:\n for item in bookVal:\n ID = item['bookID']\n delSQLStr = removeBookSQLStr(ID)\n confirmation = input(\"Are you sure you want to remove this book? \\n\"+\n \"Please enter Y for Yes and N for No: \")\n if confirmation.lower() == \"y\":\n my_db.executeQuery(delSQLStr)\n print(\"The book was removed.\")\n continue\n else:\n print(\"We aborted the deletion as requested.\")\n continue\n else:\n print(\"We could not find the specified book in the system.\")\n continue\n else:\n print(\"The value you entered is invalid.\")\n continue\n \n if actionType == \"Exit Program\":\n exit()\n ","sub_path":"IT - 412/databaseAssignment/database_main.py","file_name":"database_main.py","file_ext":"py","file_size_in_byte":9612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"434507237","text":"# Copyright 2011-2012 Eucalyptus Systems, Inc.\n#\n# Redistribution and use of this software in source and binary forms,\n# with or without modification, are permitted provided that the following\n# conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport os\n\ndef _walk_recursive(paths, fn, *params):\n symlinks = []\n for path in paths:\n fn(path, *params)\n for dirpath, dirs, files in os.walk(path):\n for d in dirs:\n fullpath = os.path.join(dirpath, d)\n if os.path.islink(fullpath):\n symlinks.append(fullpath)\n else:\n fn(fullpath, *params)\n for f in files:\n fn(os.path.join(dirpath, f), *params)\n return symlinks\n\ndef chown_recursive(path, uid, gid):\n path = [path]\n while path:\n path = _walk_recursive(path, os.chown, uid, gid)\n\ndef chmod_recursive(path, mode):\n path = [path]\n while path:\n path = _walk_recursive(path, os.chmod, mode)\n\ndef get_accesskey_secretkey(url):\n # self.warn('No credentials found; attempting local authentication')\n from eucadmin.getcredentials import GetCredentials\n obj = GetCredentials(euca_home='/',\n account='eucalyptus',\n user='admin',\n zipfile='notused')\n s = obj.get_accesskey_secretkey()\n return s.split('\\t')\n\n","sub_path":"eucadmin/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"603782358","text":"import tempfile\nimport unittest\n\nfrom slivka.scheduler.command import CommandOption, PathWrapper\nfrom slivka.scheduler.exc import QueueUnavailableError, QueueBrokenError, \\\n JobNotFoundError\nfrom slivka.scheduler.executors import Executor, Job, GridEngineExec, \\\n GridEngineJob\n\ntry:\n import unittest.mock as mock\nexcept ImportError:\n import mock\n\nimport slivka.config\n\nmock.patch.object = mock.patch.object\n\nsettings_mock = mock.create_autospec(slivka.config.Settings)\ntmp_dir = tempfile.TemporaryDirectory()\nsettings_mock.WORK_DIR = tmp_dir.name\n\n\nclass TestExecutorBase(unittest.TestCase):\n\n def test_bin(self):\n exe = Executor(bin=\"python /var/slivka/manage.py\")\n self.assertListEqual(exe.bin, ['python', '/var/slivka/manage.py'])\n\n def test_empty_bin(self):\n exe = Executor()\n self.assertListEqual(exe.bin, [])\n\n @mock.patch('slivka.scheduler.executors.shlex', autospec=True)\n def test_options(self, mock_shlex):\n mock_shlex.split.return_value = [mock.sentinel.token]\n option = mock.create_autospec(CommandOption)\n option.name = mock.sentinel.option_name\n option.get_cmd_option.return_value = mock.sentinel.cmd_option\n\n exe = Executor(options=[option])\n options_cmd = exe.get_options({\n mock.sentinel.option_name: mock.sentinel.option_val\n })\n\n option.get_cmd_option.assert_called_with(mock.sentinel.option_val)\n mock_shlex.split.assert_called_with(mock.sentinel.cmd_option)\n self.assertListEqual(options_cmd, [mock.sentinel.token])\n\n def test_qargs(self):\n exe = Executor(qargs=mock.sentinel.qargs)\n self.assertEqual(exe.qargs, mock.sentinel.qargs)\n\n def test_empty_qargs(self):\n exe = Executor()\n self.assertListEqual(exe.qargs, [])\n\n def test_env(self):\n exe = Executor(env=mock.sentinel.env)\n self.assertEqual(exe.env, mock.sentinel.env)\n\n def test_empty_env(self):\n exe = Executor()\n self.assertDictEqual(exe.env, {})\n\n\nclass TestExecutorOptions(unittest.TestCase):\n\n def test_single_option(self):\n exe = Executor(options=[CommandOption('alpha', '${value}')])\n cmd = exe.get_options({'alpha': 'foo'})\n self.assertListEqual(['foo'], cmd)\n\n def test_option_with_space(self):\n exe = Executor(options=[CommandOption('alpha', '${value}')])\n cmd = exe.get_options({'alpha': 'foo bar'})\n self.assertListEqual(['foo bar'], cmd)\n\n def test_equal_separated_option(self):\n exe = Executor(options=[CommandOption('alpha', '-a=${value}')])\n cmd = exe.get_options({'alpha': 'foo'})\n self.assertListEqual(['-a=foo'], cmd)\n\n def test_equal_separated_option_with_space(self):\n exe = Executor(options=[CommandOption('alpha', '-a=${value}')])\n cmd = exe.get_options({'alpha': 'foo bar'})\n self.assertListEqual(['-a=foo bar'], cmd)\n\n def test_multiple_arguments(self):\n exe = Executor(\n options=[\n CommandOption('alpha', 'foo', default=True),\n CommandOption('beta', 'boo', default=True),\n CommandOption('gamma', '${value}'),\n CommandOption('delta', '${value}')\n ]\n )\n cmd = exe.get_options({'gamma': 'goo', 'delta': 'doo doom'})\n self.assertListEqual(['foo', 'boo', 'goo', 'doo doom'], cmd)\n\n def test_split_flag(self):\n exe = Executor(\n options=[CommandOption('alpha', 'foo bar', default=True)]\n )\n cmd = exe.get_options({})\n self.assertListEqual(['foo', 'bar'], cmd)\n\n def test_skip_empty_arguments(self):\n exe = Executor(options=[CommandOption('alpha', '')])\n cmd = exe.get_options({})\n self.assertListEqual([], cmd)\n\n\n# noinspection PyUnusedLocal\n@mock.patch('slivka.scheduler.executors.Executor.submit')\n@mock.patch('slivka.scheduler.executors.Executor.get_job_cls')\n@mock.patch('slivka.scheduler.executors.slivka.settings', new=settings_mock)\nclass TestExecutorSubmit(unittest.TestCase):\n\n def test_submit_called(self, mock_get_job, mock_submit):\n exe = Executor()\n exe(mock.sentinel.values)\n mock_submit.assert_called_once_with(mock.sentinel.values, mock.ANY)\n\n def test_submit_cwd(self, mock_get_job, mock_submit):\n exe = Executor()\n exe(mock.sentinel.values)\n ((val, cwd), kwargs) = mock_submit.call_args\n self.assertTrue(cwd.startswith(tmp_dir.name))\n\n def test_job_created(self, mock_get_job, mock_submit):\n exe = Executor()\n job = exe(mock.sentinel.values)\n mock_job = mock_get_job.return_value.return_value\n self.assertEqual(job, mock_job)\n\n def test_job_args(self, mock_get_job, mock_submit):\n mock_submit.return_value = mock.sentinel.job_id\n exe = Executor()\n exe(mock.sentinel.values)\n mock_job = mock_get_job.return_value\n mock_job.assert_called_once_with(mock.sentinel.job_id, mock.ANY, exe)\n\n def test_queue_unavailable(self, mock_get_job, mock_submit):\n mock_submit.side_effect = QueueUnavailableError(mock.sentinel.msg)\n exe = Executor()\n with self.assertRaises(QueueUnavailableError) as cm:\n exe(mock.sentinel.values)\n self.assertTupleEqual(cm.exception.args, (mock.sentinel.msg,))\n\n def test_queue_broken(self, mock_get_job, mock_submit):\n mock_submit.side_effect = QueueBrokenError(mock.sentinel.msg)\n exe = Executor()\n with self.assertRaises(QueueBrokenError) as cm:\n exe(mock.sentinel.values)\n self.assertTupleEqual(cm.exception.args, (mock.sentinel.msg,))\n\n def test_job_not_found(self, mock_get_job, mock_submit):\n mock_submit.side_effect = JobNotFoundError(mock.sentinel.msg)\n exe = Executor()\n with self.assertRaises(JobNotFoundError) as cm:\n exe(mock.sentinel.values)\n self.assertTupleEqual(cm.exception.args, (mock.sentinel.msg,))\n\n def test_unexpected_queue_error(self, mock_get_job, mock_submit):\n mock_submit.side_effect = Exception\n exe = Executor()\n self.assertRaises(QueueBrokenError, exe, mock.sentinel.values)\n\n\nclass TestJob(unittest.TestCase):\n\n # noinspection PyUnresolvedReferences\n def setUp(self):\n self.mock_exe = mock.create_autospec(Executor)\n\n def test_status_property(self):\n job = Job(mock.sentinel.id, None, self.mock_exe)\n with mock.patch.object(job, 'get_status') as mock_get_status:\n mock_get_status.return_value = mock.sentinel.status\n self.assertEqual(job.status, mock.sentinel.status)\n mock_get_status.assert_called_once_with(mock.sentinel.id)\n\n def test_result_property(self):\n job = Job(mock.sentinel.id, None, self.mock_exe)\n with mock.patch.object(job, 'get_result') as mock_get_result, \\\n mock.patch.object(job, 'get_status',\n return_value=Job.STATUS_COMPLETED):\n mock_get_result.return_value = mock.sentinel.result\n self.assertEqual(job.return_code, mock.sentinel.result)\n mock_get_result.assert_called_once_with(mock.sentinel.id)\n\n def test_file_results(self):\n mock_file_result1 = mock.create_autospec(PathWrapper)\n mock_file_result1.get_paths.return_value = ['/foo', '/bar']\n mock_file_result2 = mock.create_autospec(PathWrapper)\n mock_file_result2.get_paths.return_value = ['/qux']\n self.mock_exe.result_paths = [mock_file_result1, mock_file_result2]\n\n job = Job(None, mock.sentinel.cwd, self.mock_exe)\n self.assertListEqual(job.result_paths, ['/foo', '/bar', '/qux'])\n mock_file_result1.get_paths.assert_called_once_with(mock.sentinel.cwd)\n mock_file_result2.get_paths.assert_called_once_with(mock.sentinel.cwd)\n\n\nclass TestGridEngineExec(unittest.TestCase):\n\n def setUp(self):\n self.subprocess_path = mock.patch(\n 'slivka.scheduler.executors.subprocess'\n )\n self.mock_subprocess = self.subprocess_path.start()\n self.mock_popen = self.mock_subprocess.Popen.return_value\n self.mock_popen.communicate.return_value = (\n 'Your job 0 (command) has been submitted', ''\n )\n self.exe = GridEngineExec()\n\n def tearDown(self):\n self.subprocess_path.stop()\n\n @mock.patch('slivka.scheduler.executors.Executor.qargs',\n new_callable=mock.PropertyMock)\n def test_qsub(self, mock_qargs):\n mock_qargs.return_value = [mock.sentinel.qarg1, mock.sentinel.qarg2]\n self.exe.submit({}, '')\n expected_arg = ([\n 'qsub', '-cwd', '-e', 'stderr.txt', '-o', 'stdout.txt',\n mock.sentinel.qarg1, mock.sentinel.qarg2\n ],)\n (args, kwargs) = self.mock_subprocess.Popen.call_args\n self.assertTupleEqual(args, expected_arg)\n\n @mock.patch('slivka.scheduler.executors.Executor.bin',\n new_callable=mock.PropertyMock)\n def test_command(self, mock_bin):\n mock_bin.return_value = ['mockpython', 'mockscript.py']\n self.exe.submit({}, '')\n self.mock_popen.communicate.assert_called_once_with(\n \"echo > started;\\n\"\n \"mockpython mockscript.py;\\n\"\n \"echo > finished;\"\n )\n\n def test_job_id(self):\n self.mock_popen.communicate.return_value = (\n 'Your job 4365 (command) has been submitted', ''\n )\n job_id = self.exe.submit({}, '')\n self.assertEqual(job_id, \"4365\")\n\n\nclass TestGridEngineJob(unittest.TestCase):\n\n qstat_output = (\n \"1771701 1.00500 jp_4NIaEBc mockuser r \"\n \"08/13/2016 17:57:21 c6100.q@c6100-1-4.cluster.life 4\\n\"\n \"1778095 1.00500 jp_D21Nm6a mockuser r \"\n \"08/15/2016 22:53:29 c6100.q@c6100-1-4.cluster.life 4\\n\"\n \"1791672 0.01993 R mockuser Eqw \"\n \"08/17/2016 17:41:34 1\\n\"\n \"1776414 0.00588 fic_Sample mockuser qw \"\n \"08/15/2016 11:44:23 10\\n\"\n \"1776413 0.00589 fic_Sample mockuser d \"\n \"08/15/2016 11:44:23 10\\n\"\n )\n\n def setUp(self):\n self.getuser_patch = mock.patch(\n 'slivka.scheduler.executors.getpass.getuser',\n return_value='mockuser'\n )\n self.getuser_patch.start()\n self.subprocess_patch = mock.patch(\n 'slivka.scheduler.executors.subprocess',\n autospec=True\n )\n self.mock_subprocess = self.subprocess_patch.start()\n mock_popen = self.mock_subprocess.Popen.return_value\n mock_popen.communicate.return_value = (self.qstat_output, '')\n mock_exec = mock.create_autospec(Executor)\n mock_exec.result_paths = []\n self.job = GridEngineJob('', '', mock_exec)\n\n def tearDown(self):\n self.getuser_patch.stop()\n self.subprocess_patch.stop()\n\n @mock.patch('slivka.scheduler.executors.os.path', autospec=True)\n def test_qstat_call(self, mock_path):\n mock_path.getmtime.return_value = 0\n self.job.get_status('abc')\n (args, kwargs) = self.mock_subprocess.Popen.call_args\n expected_args = ('qstat -u \\'mockuser\\'',)\n self.assertTupleEqual(args, expected_args)\n self.assertTrue(kwargs['shell'])\n\n def test_job_queued(self):\n status = self.job.get_status('1776414')\n self.assertEqual(status, Job.STATUS_QUEUED)\n\n def test_job_running(self):\n status = self.job.get_status('1778095')\n self.assertEqual(status, Job.STATUS_RUNNING)\n\n @mock.patch('slivka.scheduler.executors.os.path', autospec=True)\n def test_job_completed_not_synced(self, mock_path):\n mock_path.getmtime.side_effect = (10, FileNotFoundError)\n status = self.job.get_status('1772453')\n self.assertEqual(status, Job.STATUS_RUNNING)\n\n @mock.patch('slivka.scheduler.executors.os.path', autospec=True)\n def test_job_running_restarted(self, mock_path):\n mock_path.getmtime.side_effect = (20, 19)\n status = self.job.get_status('1772453')\n self.assertEqual(status, Job.STATUS_RUNNING)\n\n @mock.patch('slivka.scheduler.executors.os.path', autospec=True)\n def test_job_completed(self, mock_path):\n mock_path.getmtime.side_effect = (19, 20)\n status = self.job.get_status('1772453')\n self.assertEqual(status, Job.STATUS_COMPLETED)\n\n def test_job_deleted(self):\n status = self.job.get_status('1776413')\n self.assertEqual(status, Job.STATUS_DELETED)\n","sub_path":"tests/executors/test_executor.py","file_name":"test_executor.py","file_ext":"py","file_size_in_byte":12653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"230392618","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 6 19:05:51 2018\r\n\r\n@author: nac2313\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport copy\r\nimport itertools\r\nimport spinops as so\r\nimport spintensor as st\r\nexp = np.exp\r\n\r\ndef ExOp(index1,index2,state):\r\n \r\n \r\n State = copy.deepcopy(state)\r\n '''\r\n Python only passes parameters within functions: NEVER copies. \r\n I.e state->State where 'State' is NOT a copy of 'state', it is a new name for state and any changes\r\n to 'State' are reflected in 'state'. You are basically refering to the same object by a different name.\r\n References to either of the object's names will change the object: two names, one object. \r\n \r\n deepcopy(object) will create a legitamate copy of an object, and anything done to the copy will not be \r\n reflected in the original.\r\n \r\n Exchange coupling operator between two spin-1/2 particles; gives coupling coefficients and coupled states.\r\n \r\n Operator is of the form ExOp = Sz1*Sz2 + 0.5(S1p*S2m + S1m*S2p)\r\n \r\n where S1p(S1m) is the raising(lowering) operator for spin 1 <-particle index.\r\n \r\n '''\r\n #spinflip coefficient\r\n if State[index1] == State[index2]:\r\n \r\n coeff = 0.25 \r\n State = State\r\n \r\n return([(coeff,state)])\r\n \r\n if State[index1] == 0 and State[index2] == 1:\r\n \r\n \r\n coeff1 = -0.25\r\n coeff2 = 0.5\r\n \r\n State[index1] = 1\r\n State[index2] = 0\r\n \r\n return([(coeff1,state),(coeff2,State)])\r\n \r\n if State[index1] == 1 and State[index2] == 0:\r\n \r\n coeff1 = -0.25\r\n coeff2 = 0.5\r\n State[index1] = 0\r\n State[index2] = 1\r\n \r\n return([(coeff1,state),(coeff2,State)])\r\n \r\n\r\ndef Intpairs(N,nlegs,modex='open',modey='open'):\r\n \r\n '''\r\n Forms a list of tuples which give information on which sites interact, and in\r\n which direction, for nearest neighbor coupling and are of the form:\r\n (exchange direction, site index 1, site index 2)\r\n \r\n '''\r\n pairs = []#list of tuples of the form (exchange direction, site index 1, site index 2)\r\n Nint = int(N/nlegs)\r\n \r\n for k in range(0,N):\r\n \r\n #handles x direction\r\n if (k+1)%Nint == 0 and k > 0:\r\n \r\n if modex == 'periodic':\r\n \r\n pair = (1,int(k),int(k-Nint+1))\r\n pairs.append(pair)\r\n \r\n elif modex == 'open':\r\n \r\n pairs = pairs #i.e. nothing happens\r\n\r\n else:\r\n \r\n pair = (1,int(k),int(k+1))\r\n pairs.append(pair)\r\n \r\n #handles y direction\r\n if N-k <= Nint: #covers second to last row\r\n \r\n if modey == 'periodic':\r\n \r\n pair = (2,int(k),int(k-Nint*(nlegs-1)))\r\n pairs.append(pair)\r\n \r\n elif modey == 'open':\r\n \r\n pairs = pairs #again nothing happens\r\n \r\n else:\r\n \r\n pair = (2,int(k),int(k+Nint))\r\n pairs.append(pair)\r\n \r\n return(pairs)\r\n\r\n\r\n\r\ndef Statelist(N,S,Jcurr=0,full='False'):\r\n '''\r\n Generates a list of all total Sz = Jcurr multi-states for spin 1/2 systems\r\n e.g. Jcurr = 0 then generates -> [1,0,1,0],[1,1,0,0]...\r\n \r\n '''\r\n basiskey = list(itertools.product([0, 1], repeat=N))\r\n states = []\r\n for i,basis in enumerate(basiskey):\r\n \r\n basis_state = [basis[i] for i in range(len(basis))]\r\n states.append((basis_state,i))\r\n \r\n #Generates all states\r\n if full == 'True':\r\n statelist = [state[0] for state in states]\r\n \r\n #Filters states with total Sz = Jcurr and generates a list of tuples whos first element is the binary representation of the total spin state, and second element is the state index\r\n elif full == 'False':\r\n index = []\r\n \r\n for state in states:\r\n if sum(state[0])-N*S == Jcurr:\r\n index.append(state[1])\r\n else:\r\n index = index\r\n \r\n slist = np.take(states,index,axis=0)\r\n statelist = [state[0] for state in slist]\r\n \r\n return(list(reversed(statelist)))\r\n\r\ndef Statedict(listofstates):\r\n '''\r\n Generates a dictionary given a list of states(arrays) who keys are the elements of the\r\n state(array) as a string e.g. [1,0,1,0]->'1010', and values are the index of the state\r\n in the list of states e.g. [[1,0,1,0],[1,0,1,1]] -> [1,0,1,1] -> key=1011 : value=1\r\n \r\n '''\r\n \r\n OrderedStates = []\r\n for i,state in enumerate(listofstates):\r\n key = ''.join(map(str,state))\r\n A = (key,i)\r\n OrderedStates.append(A)\r\n \r\n statedict = dict(OrderedStates)\r\n \r\n return(statedict)\r\n\r\ndef expandState(binarystate):\r\n \r\n vec = []\r\n up = [1,0]\r\n down = [0,1]\r\n \r\n for spin in binarystate:\r\n \r\n if spin == 0:\r\n vec.append(down)\r\n elif spin ==1:\r\n vec.append(up)\r\n exstate = vec[0]\r\n for i in range(len(vec)-1):\r\n D = np.kron(exstate,vec[i+1])\r\n exstate = D\r\n \r\n return(exstate)\r\n\r\ndef exval(state1,state2,Op):\r\n #Performs the operation of bra*operator*ket\r\n \r\n A = state1@Op@state2.T\r\n #A = np.asscalar(a)\r\n \r\n return(A)\r\n\r\ndef timeEv(t,Eneig1,Eneig2,Coeff):\r\n \r\n #multiplies each static coefficient by the appropriate time dependent one\r\n \r\n A = exp(-1j*t*(Eneig2-Eneig1))*Coeff\r\n \r\n return(A)\r\n\r\n\r\ndef Expand(Statevec,S,N,Jcurr):\r\n \r\n #expands a state from a subspace with total Sz=Jcurr to full hilbert space of system for spin-1/2 particles\r\n \r\n basiskey = list(itertools.product([0, 1], repeat=N))\r\n states = []\r\n index = []\r\n \r\n #Generates binary basis representation tuples states = (state,index)\r\n for i,basis in enumerate(basiskey):\r\n \r\n basis_state = [basis[i] for i in range(len(basis))]\r\n states.append((basis_state,i))\r\n \r\n #state[0] gives the ith state in states\r\n for state in states:\r\n if sum(state[0])-N*S == Jcurr: #summing up all the ones and zeros in a state and then subtracting the total system spin (N*S) will give Jcurr for that state\r\n index.append(state[1])#keeps track of which states are Jcurr states\r\n else:\r\n index = index\r\n \r\n #here the statevec for a give Jcurr is expanded based on the index e.g. [A B C] ->[0 A 0 0 B 0 C]\r\n A = Statevec\r\n State = np.zeros(int((2*S+1)**N))\r\n j=0\r\n for value in index:\r\n \r\n State[int(value)] = A[j]\r\n j = j+1\r\n \r\n #print(State)\r\n return(np.asmatrix(State))","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":6830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"203857999","text":"import shutil\nfrom urllib.request import quote\nimport scrapy\nfrom scrapy.http import HtmlResponse\nimport re\nfrom tutorial.items import *\nfrom urllib import parse\nfrom w3lib.html import *\nfrom w3lib.html import remove_tags\nclass DmozSpider4(scrapy.Spider): # 继承Spider类\n\n print(\"进入5了!!!!!!!!!\")\n import os\n if os.path.exists('output'):\n shutil.rmtree('output')\n yuming='中国青年'\n lang='英语'\n\n '''\n 超参数都在这里修改, 就下面这2个有用.name 随便起一个,在main函数里面调用这个名就行.\n html就是要爬取的网站.\n '''\n name = \"dmoz7\" # 爬虫的唯一标识,不能重复,启动爬虫的时候要用\n # html='http://www.171english.cn/news/'\n # html='http://www.171english.cn/news/2018'\n # html='http://www.171english.cn/news/2019'\n html='http://ru.tingroom.com/yuedu/zedzyd/'\n\n\n\n\n\n\n\n\n\n\n from bs4 import BeautifulSoup\n #首页写这里\n\n baseUrl=html\n\n import requests\n # a=requests.get(html).content\n\n # bs = BeautifulSoup(a, \"html.parser\") # 缩进格式\n # print(bs)\n # 下面冲bs中找到所有爬取的页.\n # print(bs.find_all(\"a\")) # 获取所有的a标签,也就是超链接\n from selenium import webdriver\n import sys\n\n\n\n # browser = webdriver.Firefox() # Get local session of firefox\n # aaa=browser.get(\"http://news.sina.com.cn/c/2013-07-11/175827642839.shtml \") # Load page\n # print(aaa)\n saveall=[html]\n print(777777777777777777777777777777,baseUrl)\n if 0:#调试用, 一般不用这么跑.这个只是动态js代码需要这么使用而已. 一般网页没有这种方式.这个方式太慢爬虫.但是可以避免不必要的js bug\n while 1:\n tmpurl=saveall[-1]\n from selenium import webdriver\n from selenium.webdriver.chrome.options import Options\n\n chrome_options = Options()\n chrome_options.add_argument(\"--headless\")\n from .utilsme import driver\n\n base_url = tmpurl\n driver.get(base_url) # 注意这里面结果直接写到deriver里面\n # print(driver.page_source)\n a=driver.page_source\n\n bs = BeautifulSoup(a, \"html.parser\") # 缩进格式\n # print(bs)\n # 下面冲bs中找到所有爬取的页.\n # print(bs.find_all(\"a\"))\n import re\n # tmp=bs.find_all(text=re.compile(\"Next[ ]*\"))\n # print(tmp)\n now=None\n\n for s in bs('a'):\n # print(s.text,444444444444444444444444444444444444444444444444)\n if s.text==\" 下一页» \":\n now=s.extract()\n # 需要对now进行中文转码\n # now=parse.quote(now.get('href'))\n print(\"loook\",now)\n # 注意这种旧网站的编码方式.\n now = parse.quote(now.get('href'), safe=\";/?:@&=+$, \", encoding=\"gbk\")\n # now=baseUrl+now\n # print(now,\"now网页是!!!!!!!!!!\")\n if now==None or now in saveall: #防止循环\n break\n else:\n saveall.append(now)\n print(saveall,'最后获取的所有index页')\n\n\n\n\n\n\n\n\n\n\n\n# 下面是直接匹配方式获取所有index页.\n if 0:#调试用\n while 1:\n\n\n\n tmpurl=saveall[-1]\n\n import urllib\n from bs4 import BeautifulSoup\n\n url = tmpurl\n # print(url,8989898998)\n page = requests.get(url)\n page.encoding = 'utf-8'\n # soup = BeautifulSoup(page,\"html.parser\")\n print(page,3434343434343)\n bs = BeautifulSoup(page.text, \"html.parser\") # 缩进格式\n print(bs,999999999999999999999999999999999999)\n # print(bs)\n # 下面冲bs中找到所有爬取的页.\n # print(bs.find_all(\"a\"))\n import re\n # tmp=bs.find_all(text=re.compile(\"Next[ ]*\"))\n # print(tmp)\n now=None\n # print(url,bs('a'),777777777777777777)\n for s in bs('a'):\n # print(s.text)\n if s.text==\"下一页\":\n now=s.extract()\n print(now,12345)\n # 需要对now进行中文转码\n # now=parse.quote(now.get('href'))\n print(\"loook\",now)\n # 注意这种旧网站的编码方式.\n now = 'https:'+parse.quote(now.get('href'), safe=\";/?:@&=+$, \", encoding=\"gbk\")\n # print(now,\"now网页是!!!!!!!!!!\")\n if now==None:\n break\n else:\n # print(now,556565656565)\n saveall.append(now)\n\n\n\n\n\n\n\n\n\n\n\n #成功爬取到下面.\n # print(\"进入demo3!!!!!!!!!!\")\n # 现在是预处理.就是要找到所有的一级网页!!!!!!!!!!! 然后赋值给saveall\n # print(saveall,\"打印主网页!!!!!!!!!!!!\")\n # # 提取所有的href\n # tmpurl = saveall[-1]\n # from selenium import webdriver\n # from selenium.webdriver.chrome.options import Options\n #\n # chrome_options = Options()\n # chrome_options.add_argument(\"--headless\")\n # driver = webdriver.Chrome(options=chrome_options)\n #\n # base_url = tmpurl\n # driver.get(base_url) # 注意这里面结果直接写到deriver里面\n # # print(driver.page_source)\n # a = driver.page_source\n # bs = BeautifulSoup(a, \"html.parser\") # 缩进格式\n # # print(bs('a'),888888888888)\n # tmp=bs.find_all(class_='pagelist')\n # out=[]\n # for i in tmp:\n # out+=[k['href'] for k in i.find_all('a')]\n #\n #\n # base_url=html[:html.rindex(r'/')+1]\n # print(base_url,77777777777777)\n # aaa=base_url\n # baseUrl=base_url\n # for i in range(len(out)):\n # out[i]=aaa+out[i]\n # # out=[aaa+i for i in out ]\n # # print(out,6767676767)\n # # 筛选 https://www.jianshu.com/p/5f207f7309ec\n # saveall=out\n #\n #\n #\n #\n # print(saveall)\n #\n #\n #\n #\n #\n #\n #\n #\n #\n #\n #[url1,...........urln]\n #\n #\n #\n # start_urls =saveall # 开始爬取的链接\n\n\n # 直接修改这里面!!!!!!!!!!!!!\n\n saveall=[\n\n #'http://www.171english.cn/news/2018/june/',\n 'http://ru.tingroom.com/yuedu/zedzyd/',\n\n\n ]\n start_urls = saveall # 开始爬取的链接 start_urls必须用这个名.\n\n\n\n\n def parse(self, response): # 一级爬取代码\n #xpath教学:https://blog.csdn.net/qq_27283619/article/details/88704479\n #https://www.cnblogs.com/wt7018/p/11749778.html\n # @表示属性\n # 好像使用框架scrapy没法debug.只能疯狂print了\n # help(response.url)\n print(response.url,77777777777777777777777777777777777777777777777777)\n print(response,'**********************当前爬取的网页链接')\n div_list = response.xpath('//ul[@class=\"e2\"]/li/a/@href') # 加入正则\n div_list=[i.extract() for i in div_list]\n # div_list = response.xpath('//div[@class=\"newslist solid\"]') # 加入正则\n # print(90909090,div_list)\n\n # print(div_list)\n # print(div_list[0])\n # print(div_list[-1])\n # print((div_list))\n print(\"进入了一级爬虫\")\n print(div_list,99999999999999999999999999999999999999)\n for i in div_list:\n # print(self.baseUrl+i.extract())# 获得了全部链接,进入二级爬虫.\n item = en_youth()\n item['link'] = i\n item['link']=item['link']\n # print(item['link'],\"lianjie !!!!!!!!!!!!!!!!!!!!!!\")\n #每一次一级爬虫得到的页面,都触发一次二级爬虫.\n yield scrapy.Request(item['link'], callback=self.parse_detail\n ,meta={'item':item},encoding='raw_unicode_escape')\n\n #https://blog.csdn.net/Light__1024/article/details/88763541 如何进行爬取二级界面\n\n def parse_detail(self, response): # 二级爬取代码\n infomation=response.meta['item']['link']\n # print(infomation,988776754456435345435345435)\n # print(infomation,\"二级爬取的地址是\")\n item = response.body\n # print(item,9090909090909090909090909090)\n # print(item,444444444444444444444444444444444444)\n # print(item)\n # print(response.body,\"???????????????\")\n # print(\"********打印二次爬虫结果\")#[@class=\"TRS_Editor\"]\n item=en_youth()\n\n\n # 预过滤: 改了body,但是还是不生效.??\n #\n # # response.body=\"dfadsf\"\n #\n # tmp=re.sub(r'','',str(response.body))\n # print(tmp,6666666666666666666666666666666666666666)\n # response._set_body(tmp.encode(response.encoding))\n # print(response.body,777777777777777777777777777777777777777777777)\n # print(response.body,88888888888888888888888888888888888)\n # HtmlResponse.replace()\n # HtmlResponse.replace('body',remove_tags_with_content(response.body, 'script'))\n # HtmlResponse.replace('body',remove_tags_with_content(response.body, 'script'))\n\n # tmp2=response.xpath('//td[@class=\"e14\"]//text()').extract()\n #下面要设计多重xpath判断.因为格式不同意.\n # 下面这个是只有div 里面写没有p标签.\n item['neirong']= response.xpath('//div[@class=\"content\"]').extract()\n item['neirong']+= response.xpath('//div[@id=\"fontzoom\"]//p').extract()\n item['neirong']+= response.xpath('//td[@class=\"e14\"]').extract()\n # print(item['neirong'],22222222222222222222222)\n\n\n save=[]\n\n item['neirong']=[i for i in item['neirong'] if ''not in x, item['neirong'])\n\n\n\n\n\n\n\n\n\n\n\n\n # print(item['neirong'], '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n # print(item['neirong'], 8888888888888888888)\n\n\n save2='\\r\\n'.join(item['neirong'])\n print(save2,9999999999999999999999999999999999999)\n item['neirong']=save2\n item['title']=infomation\n yield item\n # 下面学习pipeline, 进行文件读写.\n # setttings里面设置pipeline写入文件\n #https://www.cnblogs.com/python2687806834/p/9836935.html\n pass\n\n#\n# if __name__==\"__main__\":\n# DmozSpider()","sub_path":"tutorial/spiders/demo7.py","file_name":"demo7.py","file_ext":"py","file_size_in_byte":10504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"448579851","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[31]:\n\n\nimport os\nimport logging\nimport sys\nimport unittest\nimport time\nimport wtowers.wtowers as wtowers\n\nimport numpy\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = 10, 10\n\nfrom rascil.data_models.polarisation import PolarisationFrame\nfrom rascil.processing_components.image.operations import qa_image, show_image, export_image_to_fits, smooth_image, copy_image\nfrom rascil.processing_components.imaging.base import predict_skycomponent_visibility\nfrom rascil.processing_components.imaging import dft_skycomponent_visibility\nfrom rascil.processing_components.simulation import create_named_configuration\nfrom rascil.processing_components.simulation import ingest_unittest_visibility, create_unittest_model, create_unittest_components\nfrom rascil.processing_components.skycomponent.operations import find_skycomponents, find_nearest_skycomponent, insert_skycomponent\nfrom rascil.processing_components.visibility.coalesce import convert_blockvisibility_to_visibility\nfrom rascil.processing_components.imaging.base import invert_2d, predict_2d, shift_vis_to_image, normalize_sumwt\nfrom rascil.processing_components.visibility.base import copy_visibility\nfrom rascil.processing_components.imaging.ng import predict_ng, invert_ng\nfrom rascil.processing_components.griddata.kernels import create_awterm_convolutionfunction\n\n\n# In[3]:\n\n\nrdir = './'\nverbosity = True\ndopol = False\ndospectral = True\nzerow = False\nblock = True\npersist = True\nnpixel = 1024\nlow = create_named_configuration('LOWBD2', rmax=750.0)\nfreqwin = 21\nblockvis = list()\nntimes = 5\ntimes = numpy.linspace(-3.0, +3.0, ntimes) * numpy.pi / 12.0\n \nif freqwin > 1:\n frequency = numpy.linspace(0.99e8, 1.01e8, freqwin)\n channelwidth = numpy.array(freqwin * [frequency[1] - frequency[0]])\nelse:\n frequency = numpy.array([1e8])\n channelwidth = numpy.array([1e6])\n \nif dopol:\n blockvis_pol = PolarisationFrame('linear')\n image_pol = PolarisationFrame('stokesIQUV')\n f = numpy.array([100.0, 20.0, -10.0, 1.0])\nelse:\n blockvis_pol = PolarisationFrame('stokesI')\n image_pol = PolarisationFrame('stokesI')\n f = numpy.array([100.0])\n \nif dospectral:\n flux = numpy.array([f * numpy.power(freq / 1e8, -0.7) for freq in frequency])\nelse:\n flux = numpy.array([f])\n \nphasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-45.0 * u.deg, frame='icrs', equinox='J2000')\nblockvis = ingest_unittest_visibility(low,\n frequency,\n channelwidth,\n times,\n blockvis_pol,\n phasecentre,\n block=block,\n zerow=zerow)\n \nvis = convert_blockvisibility_to_visibility(blockvis)\n\nmodel = create_unittest_model(vis, image_pol, npixel=npixel, nchan=freqwin)\n \ncomponents = create_unittest_components(model, flux)\nmodel = insert_skycomponent(model, components)\n \nblockvis = predict_skycomponent_visibility(blockvis, components)\n#blockvis = dft_skycomponent_visibility(blockvis, components)\n\nblockvis1 = copy_visibility(blockvis)\nvis1 = convert_blockvisibility_to_visibility(blockvis1)\n\n# Calculate the model convolved with a Gaussian.\n \ncmodel = smooth_image(model)\nif persist: export_image_to_fits(model, '%s/test_imaging_2d_model.fits' % rdir)\nif persist: export_image_to_fits(cmodel, '%s/test_imaging_2d_cmodel.fits' % rdir)\n\n\n# In[4]:\n\n\nprint(qa_image(model))\n\n\n# In[5]:\n\n\nplt.rcParams['figure.figsize'] = 10, 10\nshow_image(cmodel)\nplt.savefig(\"cmodel.png\")\n\n\n# In[6]:\n\n\n# Find nw based on w_min, w_max\nw_min = numpy.amin(vis.data['uvw'][:,2])\nw_max = numpy.amax(vis.data['uvw'][:,2])\n\nw_range = 2*numpy.amax((numpy.abs(w_min), numpy.abs(w_max)))\nwstep = 3.0\nnw = numpy.floor(w_range/wstep)\nnw = int(1.1*nw)\nif nw%2 == 0:\n nw = nw+1\nprint(w_min, w_max,w_range, wstep, nw) \n \n\n\n# In[7]:\n\n\n#%timeit dirty_ng,_ = invert_ng(blockvis, model, normalize=True)\n\n\n# In[10]:\n\n\n# Make Rascil kernel\nstart = time.time()\ngcfcf_2d = create_awterm_convolutionfunction(model, make_pb=None, nw=nw, wstep=wstep, oversampling=8,\n support=32, use_aaf=False, maxsupport=512)\nelapsed = time.time() - start\nprint(\"Elapsed time = \", elapsed, \"sec\")\n\n#start = time.time()\n#gcfcf_wt = create_awterm_convolutionfunction(model, make_pb=None, nw=nw, wstep=wstep, oversampling=8,\n# support=32, use_aaf=False, maxsupport=512, wtowers=True)\n#wtkern_invert = gcf2wkern2(gcfcf_wt)\n#elapsed = time.time() - start\n#print(\"Elapsed time = \", elapsed, \"sec\")\n\n#wtkern_predict = gcf2wkern2(gcfcf, conjugate=True)\n\n\n# In[9]:\n\n\n# W-proj invert_wt results\n\n# In[10]:\n\n# In[17]:\n\n\n# W-proj invert_2d results\nstart = time.time()\n#dirty_wt,_ = invert_wt(blockvis, model, normalize=True, wtkern=wtkern_invert)\ndirty_2d,_ = invert_2d(blockvis, model, dopsf=False, normalize=True)\nelapsed = time.time() - start\nprint(\"Elapsed time = \", elapsed, \"sec\")\n\n\nplt.rcParams['figure.figsize'] = 10, 10\nshow_image(dirty_2d, chan=1)\nplt.savefig(\"dirty_invert_2d.png\")\n\n","sub_path":"examples/scripts/test_imaging_invert_comparison.py","file_name":"test_imaging_invert_comparison.py","file_ext":"py","file_size_in_byte":5422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"654490404","text":"from flask_restful import Resource,reqparse\nfrom models.store import StoreModel\n\nclass Store(Resource): \n def get(self,name): \n name = StoreModel.find_by_name(name)\n if name:\n return(name.json(),200) \n return({\"message\":\"Store not found\"},404) \n\n def post(self,name):\n if StoreModel.find_by_name(name):\n return({\"message\":\"Store with name {} already exists.\".format(name)},400) \n store = StoreModel(name) \n try: \n store.save_to_db() \n except:\n return({\"message\":\"An error occured while creating the store\"},500)\n return(store.json(),201)\n\n def delete(self,name):\n store = StoreModel.find_by_name(name)\n if store:\n store.delete_from_db()\n return({\"message\":\"Store Deleted\"})\n\nclass StoreList(Resource):\n def get(self):\n return({\"stores\":list(map(lambda x:x.json(),StoreModel.get_stores_from_db()))},200) ","sub_path":"resources/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"557789812","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nimport pickle\nfrom collections import OrderedDict\nimport os\n\n\n# Returns words from a text\ndef get_vector(text):\n ret = \"\"\n stp=[\"!\", \"@\", \"#\", \"|\", \"%\", \"(\", \")\", \"।\", \"—\", \".\", \"-\", \"\", \",\", \"’\", \"•\", \"‘\", \":\", \"*\", \"?\",\n \"০\", \"১\", \"২\", \"৩\", \"৪\", \"৫\", \"৬\", \"৭\", \"৮\", \"৯\"]\n for x in text:\n if x in stp:\n ret = ret + \" \"\n else:\n ret = ret + x\n ret = ret.replace(\" \", \" \")\n ret = ret.replace(\" \", \" \")\n ret = ret.split()\n return ret\n\n\ndef classify(text):\n file = open(\"classes/Blog/Classify/Fruits.obj\", 'rb')\n logisticRegr=pickle.load(file)\n vector = get_vector(text)\n\n features = OrderedDict()\n features_file = open('classes/Blog/Classify/feature.txt', 'r', encoding=\"utf8\")\n for line in features_file:\n features[line.split(',')[0]] = 0\n\n mark = 0\n for v in vector:\n if v in features:\n features[v] = features[v] + 1\n mark = 1\n\n if mark == 0:\n return \"No bangla text found\"\n\n input = []\n for f in features:\n input.append(features[f])\n prediction = logisticRegr.predict([input])\n\n if prediction[0] == 1:\n return 'Bangladesh'\n elif prediction[0] == 2:\n return 'Economy'\n elif prediction[0] == 3:\n return 'Entertainment'\n elif prediction[0] == 4:\n return 'International'\n elif prediction[0] == 5:\n return 'Sports'\n\n","sub_path":"CS_Classroom/classes/Blog/Classify/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"274604922","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 22 15:15:37 2017\n\n@author: ckoenigs\n\"\"\"\n\nfrom py2neo import Graph, authenticate\nimport datetime\nimport MySQLdb as mdb\nimport sys\n\n# sys.path.append('../Aeolus/')\nsys.path.append('../aeolus/')\n\n# from synonyms_cuis import search_for_synonyms_cuis\nimport get_drugbank_information\n\nimport xml.dom.minidom as dom\n\n\nclass DrugHetionet:\n \"\"\"\n identifier: string (Drugbank ID)\n name: string\n resource: list\n \"\"\"\n\n def __init__(self, identifier, name, resource):\n self.identifier = identifier\n self.name = name\n self.resource = resource\n\n\nclass DrugNDF_RT():\n \"\"\"\n code: string\n name: string\n properties: list (like synonyms)\n umls_cuis: list\n drugbank_ids: list\n uniis: list\n association:string\n how_mapped: string\n \"\"\"\n\n def __init__(self, code, properties, umls_cuis, name, rxnorm_cuis, association, nui):\n self.code = code\n self.properties = properties\n self.umls_cuis = umls_cuis\n self.drugbank_ids = []\n self.name = name\n self.rxnorm_cuis = rxnorm_cuis\n self.uniis = []\n self.association = association\n self.nui = nui\n\n def set_drugbank_ids(self, drugbank_ids):\n self.drugbank_ids = drugbank_ids\n\n def set_how_mapped(self, how_mapped):\n self.how_mapped = how_mapped\n\n\n# dictionary with compound_id as key and class DrugHetionet as value\ndict_drug_hetionet = {}\n\n# dictionary with code as key and value is class DrugNDF_RT\ndict_drug_NDF_RT = {}\n\n# dictionary with rxcui as key and value is list of codes\ndict_drug_NDF_RT_rxcui_to_code = {}\n\n# dictionary with code as key and value is class DrugNDF_RT\ndict_drug_NDF_RT_without_rxcui = {}\n\n'''\ncreate connection to neo4j and mysql\n'''\n\n\ndef create_connection_with_neo4j_mysql():\n authenticate(\"localhost:7474\", \"neo4j\", \"test\")\n global g\n g = Graph(\"http://localhost:7474/db/data/\")\n\n # create connection with mysql database\n global con\n con = mdb.connect('localhost', 'root', 'Za8p7Tf', 'umls')\n\n # generate connection to mysql to RxNorm database\n global conRxNorm\n conRxNorm = mdb.connect('localhost', 'root', 'Za8p7Tf', 'RxNorm')\n\n\n'''\nload in all compound from hetionet in a dictionary\n'''\n\n\ndef load_hetionet_drug_in():\n query = '''MATCH (n:Compound) RETURN n.identifier,n.name, n.resource'''\n results = g.run(query)\n\n for identifier, name, resource, in results:\n resource = resource if resource != None else []\n drug = DrugHetionet(identifier, name, resource)\n dict_drug_hetionet[identifier] = drug\n print('length of compound in hetionet:' + str(len(dict_drug_hetionet)))\n\n\n'''\nload in all compound from ndf-rt in a dictionary and get the umls cui, rxcui \n'''\n\n\ndef load_ndf_rt_drug_in():\n query = '''MATCH (n:NDF_RT_drug) RETURN n'''\n results = g.run(query)\n count = 0\n i = 0\n count_name_map = 0\n\n for result, in results:\n count += 1\n code = result['code']\n properties = result['properties']\n name = result['name']\n properties = properties.split(',')\n association = result['association'] if result['association'] != '' else ''\n umls_cuis = []\n rxnorm_cuis = []\n nui = ''\n for prop in properties:\n if prop[0:8] == 'UMLS_CUI':\n cui = prop\n umls_cuis.append(cui.split(':')[1])\n elif prop[0:10] == 'RxNorm_CUI':\n cui = prop\n rxnorm_cuis.append(cui.split(':')[1])\n elif prop[0:4] == 'NUI':\n nui = prop.split(':')[1]\n drug = DrugNDF_RT(code, properties, umls_cuis, name, rxnorm_cuis, association, nui)\n dict_drug_NDF_RT[code] = drug\n # generate dictionary with rxnorm cui as key and value list of codes\n if len(rxnorm_cuis) == 1:\n if not rxnorm_cuis[0] in dict_drug_NDF_RT_rxcui_to_code:\n dict_drug_NDF_RT_rxcui_to_code[rxnorm_cuis[0]] = [code]\n else:\n dict_drug_NDF_RT_rxcui_to_code[rxnorm_cuis[0]].append(code)\n i += 1\n elif len(rxnorm_cuis) == 0:\n cur = conRxNorm.cursor()\n # search for rxcui with name\n query = (\"Select Distinct RXCUI From RXNCONSO Where lower(STR) = '%s' ;\")\n query = query % (name.lower())\n # print(query)\n rows_counter = cur.execute(query)\n if rows_counter > 0:\n count_name_map += 1\n rxnorm_cuis = []\n for cui, in cur:\n rxnorm_cuis.append(cui)\n drug = DrugNDF_RT(code, properties, umls_cuis, name, rxnorm_cuis)\n if len(rxnorm_cuis) == 1:\n if not rxnorm_cuis[0] in dict_drug_NDF_RT_rxcui_to_code:\n dict_drug_NDF_RT_rxcui_to_code[rxnorm_cuis[0]] = [code]\n else:\n dict_drug_NDF_RT_rxcui_to_code[rxnorm_cuis[0]].append(code)\n elif len(rxnorm_cuis) == 0:\n dict_drug_NDF_RT_without_rxcui[code] = drug\n else:\n print('multiple rxnomrs')\n else:\n dict_drug_NDF_RT_without_rxcui[code] = drug\n\n print('number of all drugs from ndf-rt:' + str(count))\n print('length of compound in ndf-rt with rxcui:' + str(len(dict_drug_NDF_RT_rxcui_to_code)))\n a = True if count != len(dict_drug_NDF_RT_rxcui_to_code) else False\n print('is multiple mapping:' + str(a))\n print('length of compound in ndf-rt without rxcui:' + str(len(dict_drug_NDF_RT_without_rxcui)))\n print('number of name mapped rxcuis:' + str(count_name_map))\n\n\n# list of all rxcuis which are mapped to drugbankids\nlist_rxcuis_with_drugbank_ids = []\n\n# list of cuis which has no drugbank id\nlist_rxcuis_without_drugbank_ids = []\n# list_rxcuis_without_drugbank_ids=['1741407']\n\n\n# list of code which are map to a drugbank id\nlist_codes_with_drugbank_ids = []\n\n'''\nmap rxnorm to drugbank with use of the RxNorm database\n'''\n\n\ndef map_rxnorm_to_drugbank_use_rxnorm_database():\n i = 0\n number_of_mapped = 0\n for rxnorm_cui in dict_drug_NDF_RT_rxcui_to_code.keys():\n i += 1\n cur = conRxNorm.cursor()\n query = (\"Select RXCUI,LAT,CODE,SAB From RXNCONSO Where SAB = 'DRUGBANK' and RXCUI= %s ;\")\n rows_counter = cur.execute(query, (rxnorm_cui,))\n if rows_counter > 0:\n drugbank_ids = []\n for (rxcui, lat, code, sab,) in cur:\n drugbank_ids.append(code)\n drugbank_ids = list(set(drugbank_ids))\n codes = dict_drug_NDF_RT_rxcui_to_code[rxnorm_cui]\n for code in codes:\n dict_drug_NDF_RT[code].set_drugbank_ids(drugbank_ids)\n dict_drug_NDF_RT[code].set_how_mapped('use rxcui to drugbank ids with rxnorm')\n if not rxnorm_cui in list_rxcuis_with_drugbank_ids:\n list_rxcuis_with_drugbank_ids.append(rxnorm_cui)\n if not code in list_codes_with_drugbank_ids:\n number_of_mapped += 1\n list_codes_with_drugbank_ids.append(code)\n else:\n if not rxnorm_cui in list_rxcuis_without_drugbank_ids:\n list_rxcuis_without_drugbank_ids.append(rxnorm_cui)\n\n print('new mapped:' + str(number_of_mapped))\n print('length of list of rxcuis with all drugbank ids from rxnorm:' + str(len(list_rxcuis_with_drugbank_ids)))\n print('length of list of rxcuis without drugbank ids from rxnorm:' + str(len(list_rxcuis_without_drugbank_ids)))\n print('length of list of codes with all drugbank ids from rxnorm:' + str(len(list_codes_with_drugbank_ids)))\n\n\n'''\nload map rxnorm id to drugbank _id from dhimmel inchikey and use this to map the rest\nproperties:\n 0:rxcui\n 1:drugbank ids seperated with |\n'''\n\n\ndef map_use_dhimmel_rxnorm_drugbank_map_unii_inchikey():\n f = open('../RxNorm_to_DrugBank/results/map_rxnorm_to_drugbank_with_use_of_unii_and_inchikey_4.tsv', 'r')\n next(f)\n number_of_mapped = 0\n # list of all rxcuis which are mapped to drugbank id in this step\n delete_list = []\n for line in f:\n splitted = line.split('\\t')\n rxnorm_cui = splitted[0]\n drugbank_ids = splitted[1].split('\\r')[0].split('|')\n if rxnorm_cui in list_rxcuis_without_drugbank_ids:\n codes = dict_drug_NDF_RT_rxcui_to_code[rxnorm_cui]\n for code in codes:\n dict_drug_NDF_RT[code].set_drugbank_ids(drugbank_ids)\n dict_drug_NDF_RT[code].set_how_mapped('use rxcui to drugbank ids with unii and inchikey to drugbank')\n if not rxnorm_cui in list_rxcuis_with_drugbank_ids:\n list_rxcuis_with_drugbank_ids.append(rxnorm_cui)\n delete_list.append(list_rxcuis_without_drugbank_ids.index(rxnorm_cui))\n if not code in list_codes_with_drugbank_ids:\n number_of_mapped += 1\n list_codes_with_drugbank_ids.append(code)\n\n # remove all new mapped rxcuis from not mapped list\n delete_list = list(set(delete_list))\n delete_list.sort()\n delete_list = list(reversed(delete_list))\n for index in delete_list:\n list_rxcuis_without_drugbank_ids.pop(index)\n\n print('new mapped:' + str(number_of_mapped))\n print('length of list of rxcuis with all drugbank ids from rxnorm:' + str(len(list_rxcuis_with_drugbank_ids)))\n print('length of list of rxcuis without drugbank ids from rxnorm:' + str(len(list_rxcuis_without_drugbank_ids)))\n print('length of list of codes with all drugbank ids from rxnorm:' + str(len(list_codes_with_drugbank_ids)))\n\n\n# dictionary for unii to code\ndict_unii_to_code = {}\n\n# list of codes without a unii\nlist_codes_without_unii = []\n\n'''\nmake a dictionary for all not mapped codes a with unii as key \n'''\n\n\ndef generate_dict_unii_to_code():\n count_code_unii = 0\n for rxcui in list_rxcuis_without_drugbank_ids:\n codes = dict_drug_NDF_RT_rxcui_to_code[rxcui]\n for code in codes:\n properties = dict_drug_NDF_RT[code].properties\n properties = properties\n has_unii = False\n for prop in properties:\n if prop[0:8] == 'FDA_UNII':\n\n has_unii = True\n unii = prop.split(':')[1]\n dict_drug_NDF_RT[code].uniis.append(unii)\n if not unii in dict_unii_to_code:\n dict_unii_to_code[unii] = [code]\n else:\n dict_unii_to_code[unii].append(code)\n if not has_unii:\n list_codes_without_unii.append(code)\n else:\n count_code_unii += 1\n\n print('number of codes which has a unii:' + str(count_code_unii))\n print('numner of unii:' + str(len(dict_unii_to_code)))\n print('number of codes which has no unii:' + str(len(list_codes_without_unii)))\n\n\n'''\nfind drugbank map with use of unii, map is from drugbank (generate_unii_drugbank_table_with_drugbank.py)\nproperties:\n 0:unii \t \n 1:drugbank_id \n'''\n\n\ndef map_with_unii_to_drugbank():\n f = open('../drugbank/data/map_unii_to_drugbank_id.tsv', 'r')\n next(f)\n # list of all rxcuis which are mapped to drugbank id in this step\n delete_list = []\n number_of_mapped = 0\n for line in f:\n splitted = line.split('\\t')\n unii = splitted[0]\n drugbank_id = splitted[1].split('\\r')[0]\n if unii in dict_unii_to_code:\n codes = dict_unii_to_code[unii]\n for code in codes:\n dict_drug_NDF_RT[code].set_drugbank_ids([drugbank_id])\n dict_drug_NDF_RT[code].set_how_mapped('use unii of the ndf-rt code and map to drugbank id')\n rxnorm_cuis = dict_drug_NDF_RT[code].rxnorm_cuis\n for rxnorm_cui in rxnorm_cuis:\n if not rxnorm_cui in list_rxcuis_with_drugbank_ids:\n list_rxcuis_with_drugbank_ids.append(rxnorm_cui)\n delete_list.append(list_rxcuis_without_drugbank_ids.index(rxnorm_cui))\n if not code in list_codes_with_drugbank_ids:\n number_of_mapped += 1\n list_codes_with_drugbank_ids.append(code)\n\n # delete all new mapped rxcuis from not mapped list\n delete_list = list(set(delete_list))\n delete_list.sort()\n delete_list = list(reversed(delete_list))\n for index in delete_list:\n list_rxcuis_without_drugbank_ids.pop(index)\n\n print('number of new mapped:' + str(number_of_mapped))\n print('length of list of rxcuis with all drugbank ids from rxnorm:' + str(len(list_rxcuis_with_drugbank_ids)))\n print('length of list of rxcuis without drugbank ids from rxnorm:' + str(len(list_rxcuis_without_drugbank_ids)))\n print('length of list of codes with all drugbank ids from rxnorm:' + str(len(list_codes_with_drugbank_ids)))\n\n\n'''\nload map rxnorm id to drugbank _id from drugbank name mapped to rxnorm \nproperties:\n 0:drugbank_id\n 1:rxcui\n'''\n\n\ndef map_use_name_mapped_rxnorm_drugbank():\n f = open('../RxNorm_to_DrugBank/results/name_map_drugbank_to_rxnorm_2.tsv', 'r')\n next(f)\n # list of all rxcuis which are mapped to drugbank id in this step\n delete_list = []\n number_of_mapped = 0\n for line in f:\n splitted = line.split('\\t')\n rxnorm_cui = splitted[1].split('\\n')[0]\n if not len(splitted) > 1:\n continue\n drugbank_id = splitted[0]\n if rxnorm_cui in list_rxcuis_without_drugbank_ids:\n codes = dict_drug_NDF_RT_rxcui_to_code[rxnorm_cui]\n for code in codes:\n if len(dict_drug_NDF_RT[code].drugbank_ids) == 0:\n dict_drug_NDF_RT[code].set_drugbank_ids([drugbank_id])\n dict_drug_NDF_RT[code].set_how_mapped('use rxcui to drugbank ids with name mapping')\n if not rxnorm_cui in list_rxcuis_with_drugbank_ids:\n list_rxcuis_with_drugbank_ids.append(rxnorm_cui)\n delete_list.append(list_rxcuis_without_drugbank_ids.index(rxnorm_cui))\n if not code in list_codes_with_drugbank_ids:\n number_of_mapped += 1\n list_codes_with_drugbank_ids.append(code)\n else:\n dict_drug_NDF_RT[code].drugbank_ids.append(drugbank_id)\n\n # remove all new mapped rxcuis from not mapped list\n delete_list = list(set(delete_list))\n delete_list.sort()\n delete_list = list(reversed(delete_list))\n for index in delete_list:\n list_rxcuis_without_drugbank_ids.pop(index)\n\n print('number of new mapped:' + str(number_of_mapped))\n print('length of list of rxcuis with all drugbank ids from rxnorm:' + str(len(list_rxcuis_with_drugbank_ids)))\n print('length of list of rxcuis without drugbank ids from rxnorm:' + str(len(list_rxcuis_without_drugbank_ids)))\n print('length of list of codes with all drugbank ids from rxnorm:' + str(len(list_codes_with_drugbank_ids)))\n\n\n'''\nfind drugbank id by using the ingredient from of drug_kind\nthis is define in the association with name:Product_Component\nThis can be used because drugbank is not so specific with the drugs.\n'''\n\n\ndef map_to_drubank_id_with_ingredient_from():\n # write all drugs which are mapped with this technical in a file\n g = open('ingredients_with_no_drugbank_id_or_not_in_hetionet.tsv', 'w')\n g.write('code \\t name \\t associated code \\t name of associated code \\t why \\n')\n number_of_mapped = 0\n for rxcui in list_rxcuis_without_drugbank_ids:\n codes = dict_drug_NDF_RT_rxcui_to_code[rxcui]\n # list of all codes which are mapped to drugbank id in this step\n delete_mapped_codes = []\n index = 0\n for code in codes:\n index += 1\n associations = dict_drug_NDF_RT[code].association.split(',')\n for association in associations:\n if association[0:17] == 'Product_Component':\n associatied_code = association.split(':')[1]\n if associatied_code in dict_drug_NDF_RT:\n drugbank_ids = dict_drug_NDF_RT[associatied_code].drugbank_ids\n if len(drugbank_ids) > 0:\n dict_drug_NDF_RT[code].set_drugbank_ids(drugbank_ids)\n dict_drug_NDF_RT[code].set_how_mapped('use association to the ingredient from')\n if not code in list_codes_with_drugbank_ids:\n number_of_mapped += 1\n list_codes_with_drugbank_ids.append(code)\n delete_mapped_codes.append(index - 1)\n else:\n g.write(code + '\\t' + dict_drug_NDF_RT[code].name + '\\t' + associatied_code + '\\t' +\n dict_drug_NDF_RT[\n associatied_code].name + ' \\t ingredient also not mapped to drugbank id')\n else:\n g.write(code + '\\t' + dict_drug_NDF_RT[\n code].name + '\\t' + associatied_code + '\\t \\t ingredient not in hetionet')\n # remove all codes from the not mapped list of the rxcui\n delete_mapped_codes = list(set(delete_mapped_codes))\n delete_mapped_codes.sort()\n delete_mapped_codes = list(reversed(delete_mapped_codes))\n for index in delete_mapped_codes:\n dict_drug_NDF_RT_rxcui_to_code[rxcui].pop(index)\n\n print('number of new mapped:' + str(number_of_mapped))\n\n print('length of list of codes with all drugbank ids from rxnorm:' + str(len(list_codes_with_drugbank_ids)))\n\n\n# dictionary with all cuis that are not mapped\ndict_cui_to_codes = {}\n\n# list of rxnorms without a cui\nlist_rxnorm_without_cui = []\n\n\n# dictionary umls cuis that are mapped to hetionet, as key umls cui and value is a list of drugbank ids\ndict_map_cui_to_hetionet_drugbank_ids = {}\n\n# list of cuis that are not mapped\nlist_not_map_to_hetionet_with_drugbank_ids = []\n\n# files for the different how_mapped typs\nmap_rxcui = open('drug/ndf_rt_drugs_map_with_rxcui.tsv', 'w')\nmap_rxcui.write('ndf-rt code \\t drugbank_ids with | as seperator \\t name\\n')\n\nmap_with_name = open('drug/ndf_rt_drugs_map_with_name_table.tsv', 'w')\nmap_with_name.write('ndf-rt code \\t drugbank_ids with | as seperator \\t name\\n')\n\nmap_with_unii_inchikey = open('drug/ndf_rt_drugs_map_with_unii_inchikey_table.tsv', 'w')\nmap_with_unii_inchikey.write('ndf-rt code \\t drugbank_ids with | as seperator \\t name\\n')\n\nmap_with_unii = open('drug/ndf_rt_drugs_map_with_unii_table.tsv', 'w')\nmap_with_unii.write('ndf-rt code \\t drugbank_ids with | as seperator \\t name\\n')\n\nmap_with_association_to_ingredient = open('drug/ndf_rt_drugs_map_with_association_to_ingredient.tsv', 'w')\nmap_with_association_to_ingredient.write('ndf-rt code \\t drugbank_ids with | as seperator \\t name\\n')\n\n# dictionary of how_mapped with file as value\ndict_how_mapped_file = {\n 'use rxcui to drugbank ids with rxnorm': map_rxcui,\n 'use rxcui to drugbank ids with name mapping': map_with_name,\n 'use rxcui to drugbank ids with unii and inchikey to drugbank': map_with_unii_inchikey,\n 'use unii of the ndf-rt code and map to drugbank id': map_with_unii,\n 'use association to the ingredient from': map_with_association_to_ingredient}\n\n# generate file with rxnom and a list of drugbank ids and where there are from\nmultiple_drugbankids = open('ndf_rt_multiple_drugbank_ids.tsv', 'w')\nmultiple_drugbankids.write('ndf-rt code \\t drugbank_ids with | as seperator \\t where are it from \\t name\\n')\n\n'''\nmap ndf-rt drug to hetionet drug by using drugbank id\n'''\n\n\ndef map_drug_to_hetionet():\n for code in list_codes_with_drugbank_ids:\n drugbank_ids = dict_drug_NDF_RT[code].drugbank_ids\n one_has_mapped = False\n mapped_drugbanks = []\n name = dict_drug_NDF_RT[code].name\n string_drugbank_ids = \"|\".join(drugbank_ids)\n how_mapped = dict_drug_NDF_RT[code].how_mapped\n\n dict_how_mapped_file[how_mapped].write(code + '\\t' + string_drugbank_ids + '\\t' + name + '\\n')\n\n if len(drugbank_ids) > 1:\n multiple_drugbankids.write(code + '\\t' + string_drugbank_ids + '\\t' + how_mapped + '\\t' + name + '\\n')\n\n for drugbank_id in drugbank_ids:\n if drugbank_id in dict_drug_hetionet:\n one_has_mapped = True\n mapped_drugbanks.append(drugbank_id)\n if one_has_mapped:\n dict_map_cui_to_hetionet_drugbank_ids[code] = mapped_drugbanks\n else:\n list_not_map_to_hetionet_with_drugbank_ids.append(code)\n\n print('number of map to hetionet:' + str(len(dict_map_cui_to_hetionet_drugbank_ids)))\n print('number with drugbank but not mapped to hetionet:' + str(len(list_not_map_to_hetionet_with_drugbank_ids)))\n\n # generate a file with all not mapped ndf-rt drugs\n g = open('drug/drugs_that_did_not_get_a_drugbank_id.tsv', 'w')\n g.write('ndf-rt code \\t rxcuis \\t uniis \\t name\\n')\n for code, drug in dict_drug_NDF_RT.items():\n if not code in list_codes_with_drugbank_ids:\n rxcuis = dict_drug_NDF_RT[code].rxnorm_cuis\n string_rxcui = '|'.join(rxcuis)\n uniis = dict_drug_NDF_RT[code].uniis\n string_uniis = '|'.join(uniis)\n g.write(code + '\\t' + string_rxcui + '\\t' + string_uniis + '\\t' + drug.name + '\\n')\n g.close()\n\n\n# dictionary count of delete of drugbank id from different mapping methods\ndict_how_mapped_delete_counter = {}\n\n'''\nintegrate the ndf-rt drugs into hetionet for the drugs which are map to drugbank and generate a cypher file \na connection between compounds in hetionet and ndf-rt drug.\n'''\n\n\ndef integration_of_ndf_rt_drugs_into_hetionet():\n get_drugbank_information.load_all_drugbank_ids_in_dictionary()\n # count all possible mapped ndf-rt codes\n counter = 0\n # count all ndf-rt codes which has illegal drugbank ids\n counter_illegal_drugbank = 0\n # number of all connection\n counter_drugbank_connection = 0\n # list wiwth all codes which are mapped to only illegal drugbank ids\n delete_code = []\n for code in list_codes_with_drugbank_ids:\n counter += 1\n drugbank_ids = dict_drug_NDF_RT[code].drugbank_ids\n string_drugbank_ids = \"','\".join(drugbank_ids)\n how_mapped = dict_drug_NDF_RT[code].how_mapped\n\n query = '''MATCH (n:NDF_RT_drug{code:'%s'}) \n Set n.drugbank_ids=['%s'], n.how_mapped='%s' '''\n query = query % (code, string_drugbank_ids, dict_drug_NDF_RT[code].how_mapped)\n g.run(query)\n\n index = 0\n delete_index = []\n\n for drugbank_id in drugbank_ids:\n index += 1\n query = '''MATCH (n:Compound{identifier:'%s'}) RETURN n '''\n query = query % (drugbank_id)\n results = g.run(query)\n\n first_result = results.evaluate()\n if first_result == None:\n [name, inchi, inchikey] = get_drugbank_information.get_drugbank_information(drugbank_id)\n if name == '':\n delete_index.append(index - 1)\n continue\n\n query = '''Match (c:Compound) Where lower(c.name)=\"%s\" Return c'''\n query = query % (name.lower())\n results = g.run(query)\n first_entry = results.evaluate()\n if first_entry != None:\n delete_index.append(index - 1)\n continue\n resource = 'NDF-RT'\n url = 'http://www.drugbank.ca/drugs/' + drugbank_id\n query = '''MATCH (n:NDF_RT_drug{code:'%s'}) \n Create (c:Compound{identifier:'%s',ndf_rt:'yes',resource:['%s'], pubChem:\"\", hetionet:'no',sider:'no', license:'CC BY-NC 4.0?', inchikey:\"%s\", inchi:\"%s\", name:\"%s\", source:'DrugBank via NDF-RT' ,url:'%s' })\n Create (c)-[:equal_to_drug_ndf_rt]->(n)\n '''\n query = query % (code, drugbank_id, resource, inchikey, inchi, name, url)\n else:\n resource = first_result['resource'] if 'resource' in first_result else []\n resource.append('NDF-RT')\n resource = list(set(resource))\n resource = \"','\".join(resource)\n query = '''MATCH (n:NDF_RT_drug{code:'%s'}), (c:Compound{identifier:'%s'}) \n Set c.ndf_rt='yes', c.resource=['%s'] \n Create (c)-[:equal_to_drug_ndf_rt]->(n)\n '''\n query = query % (code, drugbank_id, resource)\n counter_drugbank_connection += 1\n g.run(query)\n\n # delete all illegal drugbank ids\n delete_index = list(set(delete_index))\n if len(delete_index) == len(drugbank_ids):\n counter_illegal_drugbank += 1\n delete_code.append(list_codes_with_drugbank_ids.index(code))\n # counte the strategy\n if how_mapped in dict_how_mapped_delete_counter:\n dict_how_mapped_delete_counter[how_mapped] += 1\n else:\n dict_how_mapped_delete_counter[how_mapped] = 1\n for index in delete_index:\n dict_drug_NDF_RT[code].drugbank_ids.pop(index)\n\n # all not mapped compound get as property ndf-rt='no'\n query = ''' Match (c:Compound) Where not exists(c.ndf_rt) \n Set c.ndf_rt=\"no\" '''\n g.run(query)\n\n # remove all codes which are only mapped to illegal drugbank ids\n delete_code.sort()\n delete_code = list(reversed(delete_code))\n for index in delete_code:\n list_codes_with_drugbank_ids.pop(index)\n print('number of illegal:' + str(counter_illegal_drugbank))\n print('all mapped drug to drugbank where so of the has not existing drugbank ids:' + str(counter))\n print('number of connection:' + str(counter_drugbank_connection))\n print(dict_how_mapped_delete_counter)\n\n\n# list contra indicates pairs\nlist_contra_indicates_pairs = []\n\n# list induces pairs\nlist_induces_pairs = []\n\n'''\nintegrate the connection: contra_indication and induces into hetionet\nget the information over the path in neo4j (c:Compound)-[:equal_to_drug_ndf_rt]->()-[:ContraIndicates]-(n:NDF_RT_disease)\nor (c:Compound)-[:equal_to_drug_ndf_rt]->()-[:Induces]-(n:NDF_RT_disease)\nthe NDF_RT_disease contains the DO_IDs which are use to connect them.\nAll this information goes into a cypher file.\n'''\n\n\ndef integrate_connection_into_hetionet():\n # count of integrated contra-indication relationship\n count_contra_indicate = 0\n # count of integrated induces relationships\n count_induces = 0\n # count all mapped codes\n count_code = 0\n # count of integrated contra-indication from ndf-rt\n number_of_contra_indication_connection_used = 0\n # count of integrated induces from ndf-rt\n number_of_induces_connection_used = 0\n\n #file counter\n i = 1\n h = open('map_connection_of_ndf_rt_in_hetionet_' + str(i) + '.cypher', 'w')\n h.write('begin \\n')\n i += 1\n\n counter_connection = 0\n\n constrain_number = 20000\n creation_max = 1000000\n\n counter_contraindication_double = 0\n counter_induces_double = 0\n\n for code in list_codes_with_drugbank_ids:\n count_code += 1\n drugbank_ids = dict_drug_NDF_RT[code].drugbank_ids\n nui = dict_drug_NDF_RT[code].nui\n umls_cuis = dict_drug_NDF_RT[code].umls_cuis\n umls_cuis = ','.join(umls_cuis)\n\n # get the do id from the contra indication connection\n query = '''Match (n:NDF_RT_drug{code:'%s'})-[:ContraIndicates]-(b:NDF_RT_disease) Return b.DO_IDs'''\n query = query % (code)\n connections_exist = g.run(query)\n do_ids_list_contra_indication = []\n if connections_exist:\n for do_ids, in connections_exist:\n number_of_contra_indication_connection_used += 1\n for do_id in do_ids:\n do_ids_list_contra_indication.append(do_id)\n\n # get do ids from induces connection\n query = '''Match (n:NDF_RT_drug{code:'%s'})-[:Induces]-(b:NDF_RT_disease) Return b.DO_IDs'''\n query = query % (code)\n connections_exist = g.run(query)\n do_ids_list_induce = []\n if connections_exist:\n for do_ids, in connections_exist:\n number_of_induces_connection_used += 1\n for do_id in do_ids:\n do_ids_list_induce.append(do_id)\n\n url = 'purl.bioontology.org/ontology/NDFRT/' + nui\n\n # go through all mapped frugbank ids and add the connection for this drug into the cypher file\n for drugbank_id in drugbank_ids:\n\n do_ids_list_contra_indication = list(set(do_ids_list_contra_indication))\n for do_id in do_ids_list_contra_indication:\n if not (drugbank_id, do_id) in list_contra_indicates_pairs:\n count_contra_indicate += 1\n query = ''' Match (c:Compound{identifier:'%s'}), (n:Disease{identifier:'%s'})\n Create (c)-[:CONTRA_INDICATES_CcD{source:'NDF-RT',code:\"%s\", ndf_rt:'yes' ,licence:'UMLS',how_often:1,url:\"%s\",umls_cuis:\"%s\"}]->(n);\n '''\n query = query % (drugbank_id, do_id, code, url, umls_cuis)\n list_contra_indicates_pairs.append((drugbank_id, do_id))\n else:\n counter_contraindication_double += 1\n continue\n counter_connection += 1\n h.write(query)\n if counter_connection % constrain_number == 0:\n h.write('commit \\n')\n if counter_connection % creation_max == 0:\n h.close()\n h = open('map_connection_of_ndf_rt_in_hetionet_' + str(i) + '.cypher', 'w')\n h.write('begin \\n')\n i += 1\n else:\n h.write('begin \\n')\n\n do_ids_list_induce = list(set(do_ids_list_induce))\n for do_id in do_ids_list_induce:\n if not (drugbank_id, do_id) in list_induces_pairs:\n count_induces += 1\n query = ''' Match (c:Compound{identifier:'%s'}), (n:Disease{identifier:'%s'})\n Create (c)-[:INDUCES_CiD{source:'NDF-RT',code:\"%s\", ndf_rt:'yes', licence:'UMLS',how_often:1,url:\"%s\",umls_cuis:\"%s\"}]->(n);\n '''\n query = query % (drugbank_id, do_id, code, url, umls_cuis)\n counter_connection += 1\n list_induces_pairs.append((drugbank_id, do_id))\n else:\n counter_induces_double += 1\n continue\n h.write(query)\n if counter_connection % constrain_number == 0:\n h.write('commit \\n')\n if counter_connection % creation_max == 0:\n h.close()\n h = open('map_connection_of_ndf_rt_in_hetionet_' + str(i) + '.cypher', 'w')\n h.write('begin \\n')\n i += 1\n else:\n h.write('begin \\n')\n\n h.write('commit')\n h.close()\n print(count_code)\n print('number of contra indications connections:' + str(count_contra_indicate))\n print('number of induces connections:' + str(count_induces))\n print('double of contra indicates connection:' + str(counter_contraindication_double))\n print('double of induces connection:' + str(counter_induces_double))\n\n\ndef main():\n print (datetime.datetime.utcnow())\n print('Generate connection with neo4j and mysql')\n\n create_connection_with_neo4j_mysql()\n\n print(\n '###########################################################################################################################')\n\n print (datetime.datetime.utcnow())\n print('Load in diseases from hetionet')\n\n load_hetionet_drug_in()\n\n print(\n '###########################################################################################################################')\n\n print (datetime.datetime.utcnow())\n print('Load in diseases from ndf-rt')\n\n load_ndf_rt_drug_in()\n\n print(\n '###########################################################################################################################')\n\n print (datetime.datetime.utcnow())\n print('map rxcui to drugbank ids with use of rxnorm')\n\n map_rxnorm_to_drugbank_use_rxnorm_database()\n\n print(\n '###########################################################################################################################')\n\n print (datetime.datetime.utcnow())\n print('map rxcui to drugbank ids with use of rxnorm-drugbank table with unii and inchikey from dhimmel')\n\n map_use_dhimmel_rxnorm_drugbank_map_unii_inchikey()\n\n print(\n '###########################################################################################################################')\n\n print (datetime.datetime.utcnow())\n print('generate dictionary unii to codes')\n\n generate_dict_unii_to_code()\n\n print(\n '###########################################################################################################################')\n\n print (datetime.datetime.utcnow())\n print('map unii to drugbank ids with use of unii-drugbank table')\n\n map_with_unii_to_drugbank()\n\n print(\n '###########################################################################################################################')\n\n print (datetime.datetime.utcnow())\n print('map rxcui to drugbank ids with use of rxnorm-drugbank table with name mapping')\n\n map_use_name_mapped_rxnorm_drugbank()\n\n print(\n '###########################################################################################################################')\n\n print (datetime.datetime.utcnow())\n print('map with use of the ingredient')\n\n map_to_drubank_id_with_ingredient_from()\n\n print(\n '###########################################################################################################################')\n\n print (datetime.datetime.utcnow())\n print('map to hetionet with use of drugbank id')\n\n map_drug_to_hetionet()\n\n print(\n '###########################################################################################################################')\n\n print (datetime.datetime.utcnow())\n print('integrate ndf-rt drugs into hetionet')\n\n integration_of_ndf_rt_drugs_into_hetionet()\n\n print(\n '###########################################################################################################################')\n\n print (datetime.datetime.utcnow())\n print('integrate ndf-rt connection into hetionet')\n\n integrate_connection_into_hetionet()\n\n print(\n '###########################################################################################################################')\n\n print (datetime.datetime.utcnow())\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n","sub_path":"mapping_and_merging_into_hetionet/ndf-rt/map_NDF-RT_drug_final.py","file_name":"map_NDF-RT_drug_final.py","file_ext":"py","file_size_in_byte":35284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"277078894","text":"import os\nfrom datetime import datetime\n\nimport motor.motor_asyncio\nimport pytest\nfrom bson.objectid import ObjectId\nfrom dotenv import load_dotenv\n\nfrom .dummy_types import loop\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef check_environment():\n load_dotenv()\n if 'MONGO_TEST_DB_URI' not in os.environ:\n raise Exception(\"\\n\\nMONGO_TEST_DB_URI \"\n \"environment variable must be defined\\n\"\n \"eg: MONGO_TEST_DB_URI=\"\n \"mongodb://user:password@localhost:27017/my_test_db\\n\")\n\n\n@pytest.fixture(scope=\"session\")\ndef event_loop():\n local_loop = loop\n yield local_loop\n local_loop.close()\n\n\n@pytest.fixture(scope=\"session\")\ndef mongo_test_db(event_loop):\n client = motor.motor_asyncio.AsyncIOMotorClient(\n os.environ['MONGO_TEST_DB_URI'], io_loop=event_loop\n )\n return client.get_default_database()\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\nasync def inject_test_data(check_environment, mongo_test_db):\n print(\"-------------------\")\n print(\"Injecting test data\")\n print(\"-------------------\")\n await mongo_test_db.drop_collection(\"profiles\")\n await mongo_test_db.drop_collection(\"friends\")\n await mongo_test_db.create_collection(\"profiles\", capped=False, size=100)\n profiles = [\n {\n '_id': ObjectId(\"60f0153c49c512366274ec{:02x}\".format(i)),\n 'first_name': f'user-{i}',\n 'email': f'user-{i}@graphene-is-awesome.com'\n } for i in range(100)\n ]\n await mongo_test_db[\"profiles\"].insert_many(profiles)\n\n await mongo_test_db.create_collection(\"friends\", capped=False, size=30)\n friends = [\n {\n '_id': ObjectId(\"60f01a0b49c512366274ec{:02x}\".format(i)),\n 'user_id': ObjectId(\"60f0153c49c512366274ec00\"),\n 'friend_id': ObjectId(\"60f0153c49c512366274ec{:02x}\".format(i)),\n 'friend_since': datetime(year=2021, month=3, day=i, hour=0, minute=0, second=0, microsecond=0)\n } for i in range(1, 30)\n ]\n await mongo_test_db[\"friends\"].insert_many(friends)\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"576960181","text":"def fact(num):\n factors=[]\n for i in range(2,num):\n if (num % i)==0 :\n num=num//i\n factors.append(i)\n return factors \n\nnum=int(input('input a integer to find its factors\\n'))\nprint(fact(num))\n","sub_path":"math/factors.py","file_name":"factors.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"203611051","text":"\nimport sys\nimport os\nimport subprocess\n#from datetime import datetime\n\nimport logging\nlog = logging.getLogger(__name__)\n\nfrom config import *\n\ndefault_mount_point = '/tmp/tracktb'\n\n\n\n#\n# Functions that are pretty specific to testbeam setup\n#\n\ndef db_file_name(basename, job, status, insert=False, remove=False):\n pathname = os.path.join(dbdir, JOBS.prefix[job], STATUS.prefix[status])\n if not os.path.isdir(pathname):\n os.makedirs(pathname)\n # f = os.path.join(dbdir, basename + '.' + JOBS.prefix[job] + '.' + STATUS.prefix[status])\n filename = basename + '.' + JOBS.prefix[job] + '.' + STATUS.prefix[status]\n f = os.path.join(pathname, filename)\n if insert:\n open(f, 'a').close()\n if remove:\n if os.path.exists(f):\n os.remove(f)\n return\n return f\n\ndef get_job_status(job, filename):\n\tstatus = STATUS.unknown\n\tfor st in range(STATUS.nStatus):\n\t\tfullname = db_file_name(filename, job, st)\n\t\tif os.path.isfile(fullname):\n\t\t status = st\n\treturn status\n\n\ndef get_runs(eos_mounted=True):\n runs = []\n cmd = 'ls -1 %s' % daqdir\n if not eos_mounted:\n cmd = '%s ls %s' % (eos, daqdir)\n output = proc_cmd(cmd)\n for line in output.split():\n if len(line) > 6: # skip non-valid run and files. \n continue\n\n run = line.zfill(6)\n if not run.isdigit():\n continue\n\n run = int(run) \n\n if run not in runs:\n runs.append(run)\n\n return runs\n\ndef get_board(dat): \n board = None \n name = dat.split('_')[0]\n if 'PixelTestBoard' in name: \n board = name \n return board \n\ndef parse_datfilename(fname):\n run = 0\n board = 'unknown'\n #Get run and board from filename\n name = fname.rpartition('/')[2] #get part of fname after last '/', this should be the actual file\n splitname = name.split('_spill_')\n if splitname[0].startswith('PixelTestBoard'):\n board = splitname[0]\n run = (splitname[1].split('_'))[0]\n\n return run, board\n\n\ndef mount_eos(mount_point=default_mount_point):\n # if not os.path.exists(mount_point+\"/eos\"):\n # cmd = 'mkdir -p %s/eos' % mount_point\n # proc_cmd(cmd)\n\n cmd = '%s -b fuse mount %s/eos' % (eos, mount_point)\n output = proc_cmd(cmd)\n log.info('%s', output)\n #sys.stdout.write('%s' % output)\n\n global daqdir\n daqdir = os.path.join(mount_point, eosdir)\n\ndef umount_eos(mount_point=default_mount_point):\n global daqdir\n daqdir = '/'+eosdir\n\n if not os.path.exists(mount_point+\"/eos\"):\n log.warning('Cannot find a point at %s to unmount', mount_point)\n #sys.stdout.write('WARNING: Cannot find a point at %s to unmount\\n' % mount_point)\n return\n\n cmd = '%s -b fuse umount %s/eos' % (eos, mount_point)\n output = proc_cmd(cmd)\n #cmd = 'rmdir %s/eos' % mount_point\n #proc_cmd(cmd)\n\ndef get_datfile_names(run, eos_mounted=False):\n log.debug('Getting dat files for run %s', str(run))\n # sys.stdout.write('getting dat files\\n')\n # sys.stdout.flush()\n\n datfiles = []\n datsize = {}\n maxsize = {'PixelTestBoard1':0, 'PixelTestBoard2':0}\n cmd = 'ls -1 %s/%s' % (daqdir, run)\n if not eos_mounted:\n cmd = '%s ls -1 %s/%s' % (eos, daqdir, run)\n output = proc_cmd(cmd)\n #sys.stdout.write('getting datfiles. output: %s' % output)\n #sys.stdout.flush()\n \n keyword = '.dat'\n for line in output.split():\n if keyword in line:\n #check if transfer marker exists\n t = os.path.join(daqdir, str(run), tprefix+line)\n if eos_mounted:\n if not os.path.exists(t):\n continue\n else:\n cmd = '%s ls -a %s' % (eos, t)\n stdout, rc = proc_cmd(cmd, get_returncode=True)\n if rc != 0:\n continue\n #check file before adding i\n f = os.path.join(daqdir, str(run), line)\n filesize = get_filesize(f,eos_mounted) \n if filesize > 10: \n # sys.stdout.write('%s : %d\\n' % (line, filesize))\n # sys.stdout.flush()\n\n datsize[line] = filesize\n for s in maxsize:\n if line.startswith(s):\n if filesize > maxsize[s]:\n maxsize[s] = filesize\n #datfiles.append(line)\n\n for key in datsize:\n for s in maxsize:\n if key.startswith(s):\n if datsize[key] == maxsize[s]:\n datfiles.append(key)\n\n # sys.stdout.write('datfiles: %s \\n' % datfiles)\n # sys.stdout.flush()\n\n\n return datfiles\n\ndef cp_dat(dat, copyto_dir):\n if not os.path.exists(copyto_dir):\n os.makedirs(copyto_dir)\n # cmd = \"mkdir -p %s\" % copyto_dir\n # proc_cmd(cmd)\n # srcfile = os.path.join(daqdir, str(run), dat)\n \n #cmd = '%s cp %s %s' %(eos, dat, copyto_dir)\n cmd = 'xrdcp -f root://eoscms//%s %s/' % (dat, copyto_dir)\n log.debug('%s', cmd)\n # sys.stdout.write('%s\\n' % cmd)\n # sys.stdout.flush()\n output, rc = proc_cmd(cmd, get_returncode=True)\n log.debug('%s', output)\n # sys.stdout.write('%s\\n' % output)\n # sys.stdout.flush()\n return rc\n\n\n#\n# Functions that should be fairly generic\n#\n\ndef proc_cmd(cmd, test=False, verbose=1, procdir=None, env=os.environ, get_returncode=False):\n if test:\n sys.stdout.write(cmd+'\\n')\n return \n\n cwd = os.getcwd()\n if procdir != None:\n os.chdir(procdir)\n\n process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, env=env)\n process.wait()\n stdout = process.communicate()[0]\n rc = process.returncode\n if 'error' in stdout:\n log.error(stdout)\n #sys.stdout.write(stdout)\n if procdir != None:\n os.chdir(cwd)\n if get_returncode:\n return stdout, rc\n return stdout\n\ndef get_filesize(f, eos_mounted=True):\n cmd = 'ls -l %s' % f\n if not eos_mounted:\n cmd = '%s ls -l %s' % (eos, f)\n output = proc_cmd(cmd)\n items = output.split()\n try:\n size = items[4]\n except IndexError:\n log.error('no size information: cmd was %s\\n\\toutput was %s\\n\\titems: %s', cmd, output, items)\n #sys.stdout.write('ERROR: no size information: cmd was %s\\n\\toutput was %s\\n\\titems: %s\\n' % (cmd, output, items))\n return 0\n if not size.isdigit(): \n log.warning('not able to get file size')\n #sys.stdout.write('WARNING: not able to get file size \\n')\n raise NameError(output)\n size = int(size)\n return size \n\ndef source_bash(f):\n pipe = subprocess.Popen(\". %s; env\" % f, stdout=subprocess.PIPE, shell=True)\n output = pipe.communicate()[0]\n env = {}\n for line in output.splitlines():\n items = line.split(\"=\", 1)\n if len(items) < 2:\n continue\n\n #this is a kluge to fix a problem I'm seeing\n if items[0] == 'module':\n items[1] += '\\n}'\n\n env[items[0]]= items[1]\n return env\n","sub_path":"python/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"503789915","text":"from functools import partial\nimport matplotlib.pyplot as plt\nfrom mne.viz.topo import _iter_topography\nimport numpy as np\nfrom scipy.spatial import distance\n\n\ndef plot_dsms(dsms, names=None, items=None, n_rows=1, cmap='viridis',\n title=None):\n \"\"\"Plot one or more DSMs\n\n Parameters\n ----------\n dsms : ndarray | list of ndarray\n The DSM or list of DSMs to plot. The DSMs can either be two-dimensional\n (n_items x n_items) matrices or be in condensed form.\n names : str | list of str | None\n For each given DSM, a name to show above it. Defaults to no names.\n items : list of str | None\n The each item (row/col) in the DSM, a string description. This will be\n displayed along the axes. Defaults to None which means the items will\n be numbered.\n n_rows : int\n Number of rows to use when plotting multiple DSMs at once. Defaults to\n 1.\n cmap : str\n Matplotlib colormap to use. See\n https://matplotlib.org/gallery/color/colormap_reference.html\n for all possibilities. Defaults to 'viridis'.\n title : str | None\n Title for the entire figure. Defaults to no title.\n\n Returns\n -------\n fig : matplotlib figure\n The figure produced by matplotlib\n \"\"\"\n if not isinstance(dsms, list):\n dsms = [dsms]\n\n if isinstance(names, str):\n names = [names]\n if names is not None and len(names) != len(dsms):\n raise ValueError(f'Number of given names ({len(names)}) does not '\n f'match the number of DSMs ({len(dsms)})')\n\n n_cols = int(np.ceil(len(dsms) / n_rows))\n fig = plt.figure(figsize=(2 * n_cols, 2 * n_rows))\n\n ax = fig.subplots(n_rows, n_cols, sharex=True, sharey=True, squeeze=False)\n for row in range(n_rows):\n for col in range(n_cols):\n i = row * n_cols + col % n_cols\n if i < len(dsms):\n dsm = dsms[i]\n if dsm.ndim == 1:\n dsm = distance.squareform(dsm)\n elif dsm.ndim > 2:\n raise ValueError(f'Invalid shape {dsm.shape} for DSM')\n im = ax[row, col].imshow(dsm, cmap=cmap)\n\n if names is not None:\n name = names[i]\n ax[row, col].set_title(name)\n else:\n ax[row, col].set_visible(False)\n\n plt.colorbar(im, ax=ax)\n if title is not None:\n plt.suptitle(title)\n return fig\n\n\ndef _click_func(ax, ch_idx, dsms, cmap):\n \"\"\" Function used to plot a single DSM interactively.\n\n Parameters\n ----------\n ax: matplotlib.Axes.axes\n Axes.axes object on which a new single DSM is plotted.\n ch_idx: int\n Index of a channel.\n dsms: ndarray, shape (n_sensors, n_dsm_datapoint)\n DSMs of MEG recordings; there's one DSM for each sensor.\n cmap: str\n Colormap used for plotting DSMs.\n Check matplotlib.pyplot.imshow for details.\n \"\"\"\n dsm = dsms[ch_idx]\n dsm = distance.squareform(dsm)\n ax.imshow(dsm, cmap=cmap)\n\n\ndef _plot_dsms_topo_timepoint(dsms, info, layout=None, fig=None, title=None,\n axis_facecolor='w', axis_spinecolor='w',\n fig_facecolor='w', figsize=(6.4, 4.8),\n cmap='viridis', show=False):\n \"\"\"Plot DSMs on 2D MEG topography.\n\n Parameters\n ----------\n dsms: ndarray, shape (n_sensors, n_dsm_datapoints)\n DSMs of MEG recordings; one DSM for each sensor.\n info: mne.io.meas_info.Info\n Info object that contains meta data of MEG recordings.\n layout: mne.channels.layout.Layout | None\n Layout objects containing sensor layout info.\n The default (``None``) will figure out layout based on info.\n fig: matplotlib.pyplot.Figure | None\n Figure object on which DSMs on 2D MEG topography are plotted.\n The default (``None``) creates a new Figure object.\n title: str | None\n Title of the plot, used only when ``fig=None``.\n The default (``None``) puts no title in the figure.\n axis_facecolor: str\n Face color of the each DSM. Defaults to 'w', white.\n axis_spinecolor: str\n Spine color of each DSM. Defaults to 'w', white.\n fig_facecolor: str\n Face color of the entire topography. Defaults to 'w', white.\n figsize: tuple of float\n Figure size. The first element specify width and the second height.\n Defaults to (6.4, 4.8).\n cmap: str\n Colormap used for plotting DSMs. Defaults to 'viridis'.\n Check :func:`matplotlib.pyplot.imshow` for details.\n show: bool\n Whether to display the generated figure. Defaults to False.\n\n Returns\n -------\n fig: matplotlib.pyplot.Figure\n Figure object in which DSMs are plotted on 2D MEG topography.\n \"\"\"\n on_pick = partial(_click_func, dsms=dsms, cmap=cmap)\n\n if fig is None:\n fig = plt.figure(figsize=figsize)\n if title is not None:\n fig.suptitle(title, x=0.98, horizontalalignment='right')\n else:\n fig = plt.figure(fig.number)\n\n my_topo_plot = _iter_topography(info=info, layout=layout, on_pick=on_pick,\n fig=fig, axis_facecolor=axis_facecolor,\n axis_spinecolor=axis_spinecolor,\n fig_facecolor=fig_facecolor,\n unified=False)\n\n for i, (ax, _) in enumerate(my_topo_plot):\n dsms_i = dsms[i]\n dsms_i = distance.squareform(dsms_i)\n ax.imshow(dsms_i, cmap=cmap)\n\n if show:\n fig.show()\n\n return fig\n\n\ndef plot_dsms_topo(dsms, info, time=None, layout=None, fig=None,\n axis_facecolor='w', axis_spinecolor='w', fig_facecolor='w',\n figsize=(6.4, 4.8), cmap='viridis', show=True):\n \"\"\" Plot DSMs on 2D sensor topography\n\n Parameters\n ----------\n dsms: ndarray | numpy.memmap, shape (n_sensors,[ n_times,] n_dsm_datapts)\n DSMs of MEG/EEG recordings; one DSM for each sensor and time point.\n info: mne.io.meas_info.Info\n Info object that contains meta data of MEG/EEG recordings.\n time: int | [int, int] | None\n A time point (int) or time window ([int, int]) for which DSMs are\n plotted. When a time window is given, averge DSMs for the window are\n plotted. The default (``None``) plots the average DSMs of all the time\n points. Start of the time window is inclusive, while the end is\n exclusive.\n layout: mne.channels.layout.Layout, optional\n Layout objects containing sensor layout info.\n The default, ``layout=None``, will figure out layout based on info.\n fig: matplotlib.pyplot.Figure | None, optional\n Figure object on which DSMs on 2D sensor topography are plotted.\n The default (``None``) creates a new Figure object\n with a title based on time parameter.\n axis_facecolor: str, optional\n Face color of the each DSM. Defaults to 'w', white.\n axis_spinecolor: str, optional\n Spine color of each DSM. Defaults to 'w', white.\n fig_facecolor: str, optional\n Face color of the entire topography. Defaults to 'w', white.\n figsize: tuple of float, optional\n Figure size. The first element specify width and the second height.\n Defaults to (6.4, 4.8).\n cmap: str, optional\n Colormap used for plotting DSMs. Defaults to 'viridis'.\n Check :func:`matplotlib.pyplot.imshow` for details.\n show: bool, optional\n Whether to display the generated figure. Defaults to ``True``.\n\n Returns\n -------\n fig: matplotlib.pyplot.Figure\n Figure object in which DSMs are plotted on 2D sensor topography.\n \"\"\"\n if dsms.ndim != 2 and dsms.ndim != 3:\n raise ValueError('dsms have to be a 2D or 3D ndarray or numpy.memmap, '\n '[n_sensors,[ n_times,] n_dsm_datapoints]')\n if len(dsms.shape) == 2:\n dsms = dsms[:, np.newaxis, :]\n if time is None:\n time = [0, dsms.shape[1]]\n if isinstance(time, int):\n time = [time, time + 1]\n if not isinstance(time, list):\n raise TypeError('time has to be int, list of [int, int] or None.')\n if (not all(isinstance(i, int) for i in time)) or (len(time) != 2):\n raise TypeError('time has to be int, list of [int, int] or None.')\n if time[0] >= time[1]:\n raise ValueError('The start of the time window has to be smaller '\n 'than the end of the time window.')\n if time[0] < 0 or time[1] > dsms.shape[1]:\n raise ValueError('The time window is out of range. The minimum is 0 '\n f'and the maximum is {dsms.shape[1]}')\n if (fig is not None) and (not isinstance(fig, plt.Figure)):\n raise TypeError('fig has to be matplotlib.pyplot.Figure or None.')\n\n dsms_cropped = dsms[:, time[0]:time[1], :]\n dsms_avg = dsms_cropped.mean(axis=1)\n # set title to time window\n if time[0] + 1 != time[1]:\n title = f'From {time[0]} (inclusive) to {time[1]} (exclusive)'\n else:\n title = f'Time point: {time[0]}'\n\n fig = _plot_dsms_topo_timepoint(dsms_avg, info, fig=fig, layout=layout,\n title=title,\n axis_facecolor=axis_facecolor,\n axis_spinecolor=axis_spinecolor,\n fig_facecolor=fig_facecolor,\n figsize=figsize, cmap=cmap, show=show)\n return fig\n","sub_path":"mne_rsa/viz.py","file_name":"viz.py","file_ext":"py","file_size_in_byte":9606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"598184488","text":"#6/8/2020\r\nimport discord\r\nfrom discord.ext import commands\r\nimport random\r\nimport time\r\n\r\nfrom discord.ext.commands import Bot\r\n\r\nbingo_numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]\r\n\r\n\r\nasync def display_board_to_dms(ctx, whole_board, player_no, players):\r\n\r\n list = check(whole_board, player_no)\r\n\r\n for i in range(player_no):\r\n style_1 = \"\"\r\n if list[i] == 0:\r\n await players[i].send(embed=discord.Embed(title=\"B I N G O\"))\r\n elif list[i] == 1:\r\n await players[i].send(embed=discord.Embed(title=\"X I N G O\"))\r\n elif list[i] == 2:\r\n await players[i].send(embed=discord.Embed(title=\"X X N G O\"))\r\n elif list[i] == 3:\r\n await players[i].send(embed=discord.Embed(title=\"X X X G O\"))\r\n elif list[i] == 4:\r\n await players[i].send(embed=discord.Embed(title=\"X X X X O\"))\r\n elif list[i] == 5:\r\n await players[i].send(embed=discord.Embed(title=\"X X X X X\"))\r\n\r\n for row in range(0, 5):\r\n\r\n printing = []\r\n for coloumn in range(0, 5):\r\n if whole_board[i][row][coloumn][1] == 1:\r\n printing.append(\"X \")\r\n\r\n else:\r\n a = whole_board[i][row][coloumn][0]\r\n if len(str(a)) == 1:\r\n printing.append(str(a) + \" \")\r\n else:\r\n printing.append(a)\r\n full_print = \"\"\r\n for x in printing:\r\n full_print = full_print + str(x)\r\n full_print = full_print + \" \"\r\n style_1 = style_1 + full_print + \"\\n\"\r\n #print(style_1)\r\n await players[i].send(style_1)\r\n\r\n\r\ndef horizontal_check(board):\r\n finish = 0\r\n for row in range(0, 5):\r\n count = 0\r\n for coloumn in range(0, 5):\r\n\r\n if board[row][coloumn][1] == 1:\r\n count = count + 1\r\n if count == 5:\r\n finish = finish + 1\r\n return int(finish)\r\n\r\n\r\ndef vertical_check(board):\r\n finish = 0\r\n for coloumn in range(0, 5):\r\n count = 0\r\n for row in range(0, 5):\r\n if board[row][coloumn][1] == 1:\r\n count = count + 1\r\n if count == 5:\r\n finish = finish + 1\r\n return int(finish)\r\n\r\n\r\ndef diagonal_check(board):\r\n finish = 0\r\n count = 0\r\n\r\n for r_c in range(0, 5):\r\n if board[r_c][r_c][1] == 1:\r\n count = count + 1\r\n if count == 5:\r\n finish = finish + 1\r\n\r\n count = 0\r\n coloumn = 0\r\n for row in range(4, -1, -1):\r\n if board[row][coloumn][1] == 1:\r\n count = count + 1\r\n coloumn = coloumn + 1\r\n if count == 5:\r\n finish = finish + 1\r\n\r\n return finish\r\n\r\n\r\ndef check(all_board, player_no):\r\n total_finishes = []\r\n for i in range(player_no):\r\n total_finish = vertical_check(all_board[i]) + horizontal_check(all_board[i]) + diagonal_check(all_board[i])\r\n total_finishes.append(total_finish)\r\n print(total_finishes)\r\n\r\n return total_finishes\r\n\r\n\r\ndef bingo_check(all_board, player_no):\r\n\r\n bingo_no = 10\r\n list = check(all_board, player_no)\r\n no = 0\r\n for i in list:\r\n if int(i) == 5:\r\n bingo_no = no\r\n break\r\n\r\n no = no + 1\r\n return bingo_no\r\n\r\n\r\ndef board_gen():\r\n board = [[[[], []], [[], []], [[], []], [[], []], [[], []]], [[[], []], [[], []], [[], []], [[], []], [[], []]],\r\n [[[], []], [[], []], [[], []], [[], []], [[], []]], [[[], []], [[], []], [[], []], [[], []], [[], []]],\r\n [[[], []], [[], []], [[], []], [[], []], [[], []]]]\r\n for row in range(0, 5):\r\n for coloumn in range(0, 5):\r\n # noinspection PyTypeChecker\r\n board[row][coloumn][1] = 0\r\n no = 0\r\n order = 0\r\n random.shuffle(bingo_numbers)\r\n for row in range(0, 5):\r\n for coloumn in range(0, 5):\r\n board[row][coloumn][no] = bingo_numbers[order]\r\n order = order + 1\r\n #print(board)\r\n return board\r\n\r\n\r\ndef cross_no(number, number_of_boards, boards):\r\n for row in range(0, 5):\r\n for coloumn in range(0, 5):\r\n for board_no in range(0, number_of_boards):\r\n if boards[board_no][row][coloumn][0] == number:\r\n boards[board_no][row][coloumn][1] = 1\r\n\r\n\r\ndef print_board(board):\r\n for row in board:\r\n for coloumn in row:\r\n pass\r\n #print(coloumn[0], end=' ')\r\n #print()\r\n\r\n\r\nasync def dm_users(ctx, users, whole_board):\r\n for i in range(len(whole_board)):\r\n await users[i].send(whole_board[i])\r\n\r\n\r\nclass bingo(commands.Cog):\r\n def __init__(self, bot: commands.Bot):\r\n self.bot = bot\r\n\r\n @commands.Cog.listener()\r\n async def on_ready(self):\r\n print('LET THE GAMES BEGIN!!')\r\n\r\n @commands.command()\r\n async def start_bingo(self, ctx):\r\n\r\n whole_board = []\r\n\r\n def check1(m):\r\n return m.channel == ctx.channel and m.author != Bot.user\r\n\r\n game = True\r\n run = True\r\n a = 0\r\n players = []\r\n\r\n await ctx.send(f\"type JOIN to join the game\")\r\n await ctx.send(\"type play to start the game after everyone typed JOIN\")\r\n\r\n while run:\r\n\r\n msg = await self.bot.wait_for('message', check=check1)\r\n \r\n time.sleep(1)\r\n a = a + 1\r\n if msg.content in ['play', 'PLAY', 'start', \"START\"]:\r\n\r\n run = False\r\n\r\n else:\r\n if msg.content == 'JOIN':\r\n await ctx.send(f\"{msg.author.mention} has joined \")\r\n players.append(msg.author)\r\n await ctx.send(\"The participating users are:\")\r\n\r\n player_no = len(players)\r\n for i in range(player_no):\r\n sub_board = board_gen()\r\n whole_board.append(sub_board)\r\n\r\n for i in range(len(players)):\r\n print(players[i].mention )\r\n await ctx.send(f\"{players[i].mention}\")\r\n\r\n chance = 0\r\n round_no = 0\r\n\r\n await ctx.send(\"LET THE GAME BEGIN\")\r\n while game: # main game loop\r\n\r\n if round_no == 0:\r\n await display_board_to_dms(ctx, whole_board, player_no, players)\r\n round_no = round_no + 1\r\n\r\n await ctx.send(f\"{players[chance].mention} \\'s turn\")\r\n no_chosen = await self.bot.wait_for('message', check=lambda message: message.author == players[chance])\r\n\r\n if int(no_chosen.content) not in bingo_numbers and no_chosen.content not in [\"end\"]:\r\n await ctx.send(\"pls enter a valid number\")\r\n\r\n else:\r\n if chance == (int(player_no) - 1):\r\n chance = 0\r\n else:\r\n chance = chance + 1\r\n\r\n if no_chosen.content == 'end':\r\n await ctx.send(f\"The game was ended by {players[chance].mention}\")\r\n break\r\n\r\n cross_no(int(no_chosen.content), player_no, whole_board)\r\n await display_board_to_dms(ctx, whole_board, player_no, players)\r\n done = bingo_check(whole_board, player_no)\r\n\r\n if done != 10:\r\n game = False\r\n winner = done\r\n await ctx.send(f\"GGs THE WINNER IS {players[winner].mention}\")\r\n await ctx.send(\"ty for playing :)\")\r\n await players[winner].send(\"YOU WON!!!\") \r\n\r\n\r\ndef setup(Bot):\r\n Bot.add_cog(bingo(Bot))\r\n","sub_path":"cogs/bingo.py","file_name":"bingo.py","file_ext":"py","file_size_in_byte":7666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"622029763","text":"from collections import deque\n\n\ndef solution(priorities, location):\n answer = 0\n q = deque([(idx, p) for idx, p in enumerate(priorities)])\n\n while q:\n idx, p = q.popleft()\n put_flag = False\n \n for p2 in q:\n if p2 > p:\n put_flag = True\n break\n \n if put_flag:\n q.append((idx, p))\n else:\n answer += 1\n if location == idx:\n break\n\n return answer\n","sub_path":"stack-queue/42587.py","file_name":"42587.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"176263128","text":"## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-\n\ndef build(bld):\n module = bld.create_ns3_module('processing', ['core', 'cc2420'])\n module.includes = '.'\n module.source = [\n 'model/program.cc',\n 'model/taskscheduler.cc',\n 'model/thread.cc',\n 'model/hwmodel.cc',\n 'model/peu.cc',\n 'model/sharedresource.cc',\n 'model/membus.cc',\n 'model/interrupt-controller.cc',\n 'model/execenv.cc',\n 'model/execenv-helper.cc',\n 'model/condition.cc',\n 'model/sem.cc',\n 'model/local-state-variable.cc',\n 'model/local-state-variable-queue.cc',\n\n # OYSTEDAL\n 'model/rrscheduler.cc',\n 'model/sync.cc',\n # 'model/apic.cc',\n # 'model/rrpriorityscheduler.cc',\n\n 'model/telosb.cc',\n ]\n\n headers = bld(features='ns3header')\n headers.module = 'processing'\n headers.source = [\n 'model/program.h',\n 'model/taskscheduler.h',\n 'model/thread.h',\n 'model/hwmodel.h',\n 'model/peu.h',\n 'model/sharedresource.h',\n 'model/membus.h',\n 'model/interrupt-controller.h',\n 'model/execenv.h',\n 'model/execenv-helper.h',\n 'model/condition.h',\n 'model/sem.h',\n 'model/local-state-variable.h',\n 'model/local-state-variable-queue.h',\n\n # OYSTEDAL\n 'model/rrscheduler.h',\n 'model/sync.h',\n # 'model/apic.h',\n # 'model/rrpriorityscheduler.h',\n\n 'model/telosb.h',\n ]\n\n if bld.env['ENABLE_EXAMPLES']:\n bld.recurse('examples')\n\n bld.ns3_python_bindings()\n","sub_path":"ns-3.19/src/processing/wscript","file_name":"wscript","file_ext":"19/src/processing/wscript","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"473035107","text":"import numpy as np \nfrom sklearn.cluster import KMeans\n\n\ndef loadData(filePath):\n fr = open(filePath, 'r', 1, 'gbk')\n lines = fr.readlines()\n retData = []\n retCityName = []\n for line in lines:\n items = line.strip().split(',')\n retCityName.append(items[0])\n retData.append([float(items[i]) for i in range(1, len(items))])\n return retData, retCityName\n\n\n\ndata_path = '../课程数据/聚类/31省市居民家庭消费水平-city.txt'\ndata, cityName = loadData(data_path)\nn_clusters = int(input('请输入需要聚类数量:'))\nkm = KMeans(n_clusters)\n\nlabel = km.fit_predict(data)\nexpenses = np.sum(km.cluster_centers_, axis=1)\n\nCityCluster = ()\nfor i in range(n_clusters):\n CityCluster += ([],)\n\n\nfor i in range(len(cityName)):\n CityCluster[label[i]].append(cityName[i])\n\nfor i in range(len(CityCluster)):\n print('Expenses: %.2f'%expenses[i])\n print(CityCluster[i])\n","sub_path":"KMeans_city.py","file_name":"KMeans_city.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"462378772","text":"import pandas as pd\nimport numpy as np\nimport bokeh\nfrom bokeh import palettes\nfrom bokeh.models import (\n ColumnDataSource,\n LinearColorMapper,\n CategoricalColorMapper,\n Patches,\n HoverTool,\n Range1d,\n formatters,\n LinearAxis,\n CategoricalAxis,\n Legend,\n )\nfrom bokeh.models.widgets import (\n DataTable,\n NumberFormatter,\n TableColumn,\n Select,\n MultiSelect,\n )\nfrom bokeh.plotting import figure\n\n### Plot Functions Definitions\ndef select_meta(metas_list):\n select = Select(title=\"Meta\", value=\"meta_1\", options=metas_list)\n return select\n\ndef select_indicador(indicadores_list):\n select = Select(title=\"Indicador\", value=\"indicador_1_1\", options=indicadores_list)\n return select\n\ndef mapa(source,color_mapper,plot_width,plot_height):\n mapa_fig = figure(\n tools=\"hover\",\n plot_width=plot_width,\n plot_height=plot_height,\n )\n mapa_fig.grid.grid_line_color = None\n mapa_fig.axis[0].visible=False\n mapa_fig.axis[1].visible=False\n mapa_renderer=mapa_fig.patches(\n 'x', 'y', source=source,\n fill_color={'field': 'indicador_selecionado', 'transform': color_mapper},\n fill_alpha=1.0,\n line_color=\"gray\",\n line_width=0.5,\n )\n mapa_renderer.nonselection_glyph=Patches(\n fill_color={'field': 'indicador_selecionado', 'transform': color_mapper},\n fill_alpha=1.0,\n line_color=\"gray\",\n line_width=0.5,\n )\n mapa_renderer.selection_glyph=Patches(\n fill_color={'field': 'indicador_selecionado', 'transform': color_mapper},\n fill_alpha=1.0,\n line_color=\"gray\",\n line_width=2.0,\n line_alpha=0.8,\n )\n hover_mapa = mapa_fig.select_one(HoverTool)\n hover_mapa.point_policy = \"follow_mouse\"\n hover_mapa.tooltips = [\n (\"Centro Rede CEDES:\", \"@nome_universidade\"),\n ('Indicador','@indicador_selecionado{0.00}'),\n ]\n mapa_fig.toolbar.logo=None\n mapa_fig.axis[0].visible=False\n mapa_fig.toolbar_location=None\n return mapa_fig\n\ndef hbar_indicadores(source,color_mapper,plot_width,plot_height):\n fig=figure(\n y_range=bokeh.models.ranges.FactorRange(factors=source.data['UF'][::-1]),\n plot_height=plot_height,\n plot_width=plot_width,\n tools='hover',\n x_range=Range1d(0.0,1.0),\n # title='Indicador'\n x_axis_location='above'\n )\n\n fig.hbar(\n y='UF',\n right='indicador_selecionado',\n height=0.5,\n source=source,\n fill_color={'field': 'indicador_selecionado', 'transform': color_mapper},\n line_color=None,\n )\n\n hover_barras =fig.select_one(HoverTool)\n hover_barras.point_policy = \"follow_mouse\"\n hover_barras.tooltips = [\n (\"UF\", \"@UF\"),\n ('Indicador','@indicador_selecionado{0.00}'),\n ]\n\n fig.toolbar.logo=None\n # fig.toolbar_location='above'\n # fig.toolbar_sticky=False\n fig.grid.grid_line_color = None\n fig.toolbar_location=None\n # fig.x_axis_location='above'\n # fig.axis[1].visible=False\n\n return fig\n\ndef pizza_plot(source,color_mapper,title,plot_width,plot_height):\n fig=figure(\n # x_range=bokeh.models.ranges.FactorRange(factors=source.data['fatores']),\n plot_width=plot_width,\n plot_height=plot_height,\n tools='',\n # y_range=Range1d(0.0,1.0),\n title=title\n )\n\n fig.wedge(\n # x='fatores',\n # top='valores',\n # width=0.5,\n # inner_radius=0,\n # outer_radius=1,\n x=0,\n y=0,\n radius=0.4,\n start_angle='start_angle',\n end_angle='end_angle',\n source=source,\n fill_color={'field': 'fatores', 'transform': color_mapper},\n line_color='grey',\n legend='fatores',\n )\n # \n # hover =fig.select_one(HoverTool)\n # hover.point_policy = \"follow_mouse\"\n # hover.tooltips = [\n # (\"Classificação\", \"@fatores\"),\n # ('Porcentagem','@valores{0.00}'),\n # ]\n\n fig.toolbar.logo=None\n fig.toolbar_location='above'\n fig.toolbar_sticky=True\n fig.grid.grid_line_color = None\n fig.axis[0].visible=False\n fig.axis[1].visible=False\n\n return fig\n\ndef vbar(source,color_mapper,title,plot_width,plot_height):\n fig=figure(\n x_range=bokeh.models.ranges.FactorRange(factors=source.data['fatores']),\n plot_width=plot_width,\n plot_height=plot_height,\n tools='xpan, xwheel_zoom,hover',\n y_range=Range1d(0.0,1.0),\n title=title\n )\n\n fig.vbar(\n x='fatores',\n top='valores',\n width=0.5,\n source=source,\n fill_color={'field': 'fatores', 'transform': color_mapper},\n line_color=None,\n )\n\n hover_barras =fig.select_one(HoverTool)\n hover_barras.point_policy = \"follow_mouse\"\n hover_barras.tooltips = [\n (\"Classificação\", \"@fatores\"),\n ('Porcentagem','@valores{0.00}'),\n ]\n\n fig.toolbar.logo=None\n fig.toolbar_location='above'\n fig.toolbar_sticky=True\n return fig\n\n\ndef table_select(source,plot_width,plot_height):\n columns = TableColumn(field='nome_UF',title='Centro Rede CEDES')\n\n tabela=DataTable(\n source=source,\n columns=[columns],\n row_headers=False,\n fit_columns=True,\n selectable=True,\n width=plot_width,\n height=plot_height,\n )\n return tabela\n\n\ndef vbar_detail(source,color_mapper,plot_width,plot_height):\n fig=figure(\n x_range=bokeh.models.ranges.FactorRange(factors=source.data['fatores']),\n plot_height=plot_height,\n plot_width=plot_width,\n tools='xpan, xwheel_zoom,hover',\n y_range=Range1d(0.0,1.0),\n title='Nenhum Centro Rede-CEDES Selecionado'\n )\n\n fig.vbar(\n x='fatores',\n top='valores',\n width=0.5,\n source=source,\n fill_color={'field': 'valores', 'transform': color_mapper},\n line_color=None,\n )\n fig.xaxis.major_label_orientation = np.pi/4\n hover_barras =fig.select_one(HoverTool)\n hover_barras.point_policy = \"follow_mouse\"\n hover_barras.tooltips = [\n (\"Índice\", \"@fatores\"),\n ('Valor','@valores{0.00}'),\n ]\n\n fig.toolbar.logo=None\n fig.toolbar_location='above'\n fig.toolbar_sticky=True\n return fig\n","sub_path":"bokeh_apps/cedes_plots/my_plots.py","file_name":"my_plots.py","file_ext":"py","file_size_in_byte":6285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"406447771","text":"from numpy import linspace, empty, arange, vstack, ones\n\ndef elt_matrices(elt, kappa, w, y):\n hp = elt[1]- elt[0]\n Ap = empty ((2,2))\n Ap[0] = [1, -1]\n Ap[1] = [-1, 1]\n l = 0\n sum = 0\n while(l < len(w)):\n plp = elt[0] + (y[l]*hp)\n sum += w[l] * kappa(plp)* (plp**2)\n l+=1\n Ap = (Ap/hp)*(sum)\n\n Mp = empty ((2,2))\n j = 0\n while(j < 2):\n k = 0\n while(k < 2):\n l = 0\n sum = 0\n while (l < len(w)):\n plp = elt[0] + y[l]*hp\n sum += w[l]*psi(hp, elt, plp, k)* psi(hp, elt, plp, j)*(plp**2)\n l += 1\n Mp[j,k] = sum\n k += 1\n j += 1\n return Ap, Mp*hp\n\ndef psi(hp,elt, plp, flag):\n if (flag == 0):\n return (elt[1]-plp)/hp\n if (flag == 1):\n return (plp-elt[0])/hp\n","sub_path":"q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"247697534","text":"#!python3\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup as parse\n\nfrom pyopenmensa.feed import LazyBuilder, extractDate\n\n\ndef parse_week(url, canteen, type):\n document = parse(urlopen(url).read())\n for day_table in document.find_all('table', 'swbs_speiseplan'):\n caption = day_table.find('th', 'swbs_speiseplan_head').text\n if type not in caption:\n continue\n date = extractDate(caption)\n for meal_tr in day_table.find_all('tr'):\n if not meal_tr.find('td'): # z.B Headline\n continue\n tds = meal_tr.find_all('td')\n category = tds[0].text.strip()\n name = tds[1].text\n if tds[1].find('a', href='http://www.stw-on.de/mensavital'):\n notes = ['MensaVital']\n else:\n notes = []\n prices = {\n 'student': tds[2].text,\n 'employee': tds[3].text,\n 'other': tds[4].text\n }\n canteen.addMeal(date, category, name, notes, prices)\n\n\ndef parse_url(url, today=False, canteentype='Mittagsmensa', this_week='', next_week=True, legend_url=None):\n canteen = LazyBuilder()\n canteen.legendKeyFunc = lambda v: v.lower()\n if not legend_url:\n legend_url = url[:url.find('essen/') + 6] + 'lebensmittelkennzeichnung'\n legend_doc = parse(urlopen(legend_url))\n canteen.setLegendData(\n text=legend_doc.find(id='artikel').text,\n regex=r'(?P(\\d+|[A-Z]+))\\s+=\\s+(?P\\w+( |\\t|\\w)*)'\n )\n parse_week(url + this_week, canteen, canteentype)\n if not today and next_week is True:\n parse_week(url + '-kommende-woche', canteen, canteentype)\n if not today and type(next_week) is str:\n parse_week(url + next_week, canteen, canteentype)\n return canteen.toXMLFeed()\n\n\ndef register_canteens(providers):\n def city(name, prefix='menus/mensa-', legend_url=None, next_week=None, **canteens):\n city_definition = {\n 'handler': parse_url,\n 'prefix': 'http://www.stw-on.de/{}/essen/'.format(name) + prefix,\n 'canteens': {k.replace('_', '-'): v for k, v in canteens.items()}\n }\n if legend_url:\n city_definition['options'] = {'legend_url': legend_url}\n if next_week is not None:\n city_definition.setdefault('options', {})\n city_definition['options']['next_week'] = next_week\n providers[name] = city_definition\n\n city('braunschweig', prefix='menus/',\n mensa1_mittag=('mensa-1', 'Mittagsmensa'),\n mensa1_abend=('mensa-1', 'Abendmensa'),\n mensa360=('360', 'Pizza', '-2', '-nachste-woche'),\n mensa2='mensa-2',\n hbk='mensa-hbk',\n legend_url='http://www.stw-on.de/braunschweig/essen/wissenswertes/lebensmittelkennzeichnung')\n city('clausthal', clausthal='clausthal', next_week='-kommend-woche')\n city('hildesheim', prefix='menus/',\n uni='mensa-uni',\n hohnsen='mensa-hohnsen',\n luebecker_strasse=('luebecker-strasse', 'Mittagsausgabe'))\n city('holzminden', hawk='hawk', next_week=False)\n city('lueneburg', prefix='speiseplaene/',\n campus='mensa-campus',\n rotes_feld='rotes-feld')\n city('suderburg', suderburg='suderburg')\n city('wolfenbuettel', ostfalia='ostfalia')\n","sub_path":"ostniedersachsen.py","file_name":"ostniedersachsen.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"93402739","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n# **********************************************************\n# * Author : Weibin Meng\n# * Email : m_weibin@163.com\n# * Create time : 2018-10-08 02:24\n# * Last modified : 2019-01-24 15:20\n# * Filename : detect_vector_onehot.py\n# * Description :\n'''\n\n'''\n# **********************************************************\n# Load Larger LSTM network and generate text\nimport sys\nfrom keras.layers import BatchNormalization\nimport math\nfrom sklearn.metrics import precision_recall_fscore_support\nimport numpy\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import Merge\nfrom keras.layers import LSTM\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.utils import np_utils\nfrom getWindowsTime import getRawTime\nimport argparse\nimport os\nfrom template2vec import Template2Vec\n\ndef findnewestfile(dir_path):\n filenames = os.listdir(dir_path)\n name_ = []\n time_ = []\n for filename in filenames:\n if 'DS' not in filename and 'hdf5' in filename: #此处可以指定后缀名\n # print filename\n c_time = os.path.getctime(dir_path+filename)\n\n # print type(mtime)\n name_.append(dir_path+filename)\n time_.append(c_time)\n # print filename,mtime\n newest_file = name_[time_.index(max(time_))]\n #print(name_)\n #print(time_)\n # print('new file:',newest_file)\n return newest_file\n\n\ndef detect_by_vector(para):\n import time\n t1=time.time()\n\n filename = para['test_file']\n seq_length = para['seq_length'] # l1,l2...l10 -> l_next\n n_candidates = para['n_candidates']#top n probability of the next tag\n windows_size = para['windows_size']#hours\n step_size = para['step_size']#时间窗口的滑动步长,hours\n onehot = para['onehot'] #1表示统计使用onehot,0表示使用template2vec\n model_filename = para['model_filename']#训练好的参数\n model_dir = para['model_dir'] #模板数量,要与train的一致\n template_index_map_path = para['template_index_map_path']#保存模板号与向量里数值的对应关系\n result_file = para['result_file']\n template_num = para['template_num']\n label_file = para['label_file'] #label文件,本样例中从日志文件中抽取label\n template2Vec_file = para['template2Vec_file']\n tempalte_file = para['template_file']\n count_matrix_flag = para['count_matrix']\n temp2Vec = Template2Vec(template2Vec_file, tempalte_file)\n # prediction_file = 'detection_result' #保存top1的log key异常和时间窗口异常的结果\n\n #如果没有指定model_filename, 则从weight/文件夹中找出最新生成的文件\n if model_filename == '':\n model_filename = findnewestfile(model_dir)\n print('cur_model_filename',model_filename)\n\n\n template_to_int = {}\n int_to_template = {}\n if template_num == 0:\n # 如果template_num为0,则根据模板序列文件来生成映射create mapping of unique chars to integers\n with open(template_index_map_path) as IN:\n for line in IN:\n l = line.strip().split()\n c = l[0]\n i = int(l[1])\n template_to_int[c] = i\n int_to_template[i] = c\n else:\n # 如果template_num不为0,则根据其构造映射,int从0开始,char从1开始\n template_to_int = dict((str(i+1), i) for i in range(template_num))\n int_to_template = dict((i, str(i+1)) for i in range(template_num))\n\n raw_text = []\n raw_time_list = []\n raw_label_list = []\n with open(filename) as line_IN:\n with open(label_file) as label_IN:\n for line, label_line in zip(line_IN, label_IN):\n l=line.strip().split()\n if l[1] != '-1' and l[1] !='0' and l[1] in template_to_int:\n raw_text.append(l[1])\n raw_label_list.append(int(label_line.strip()))\n\n #raw_text.append(l[1])\n #raw_time_list.append(int(l[0]))\n\n # create mapping of unique chars to integers, and a reverse mapping\n chars = sorted(list(set(raw_text)))\n\n\n\n # summarize the loaded data\n n_chars = len(raw_text)\n n_templates = len(template_to_int)\n print (\"length of log sequence: \", n_chars)\n print (\"# of templates: \", n_templates)\n # prepare the dataset of input to output pairs encoded as integers\n #dataX = []\n #dataY = []\n #timeY = []\n charX = []\n label_list = []\n vectorX = []\n vectorY = []\n for i in range(0, n_chars - seq_length, 1):\n seq_in = raw_text[i:i + seq_length]\n seq_out = raw_text[i + seq_length]\n #time_out = raw_time_list[i + seq_length]\n label_out = raw_label_list[i + seq_length]\n #dataX.append([template_to_int[char] for char in seq_in])\n charX.append(seq_in)\n #dataY.append(template_to_int[seq_out])\n #timeY.append(time_out)\n temp_list = []\n for seq in seq_in:\n if count_matrix_flag == 0:\n #不拼接,直接用拼接template vector\n temp_list.append(list(temp2Vec.model[seq]))\n else:\n #拼接template vector和count vector\n cur_count_vector = [0 for i in range(n_templates)]\n for t in seq_in:\n cur_index = template_to_int[t]\n cur_count_vector[cur_index]+=1\n #extend 没有返回值,但会在已存在的列表中添加新的列表内容\n l =list(temp2Vec.model[seq])\n l.extend(cur_count_vector)\n temp_list.append(l)\n vectorX.append(temp_list)\n vectorY.append(temp2Vec.model[seq_out])\n label_list.append(label_out)\n n_patterns = len(vectorX)\n print (\"# of patterns: \", n_patterns)\n #split time into windows\n\n # reshape X to be [samples, time steps, features]\n if count_matrix_flag == 0:\n X = numpy.reshape(vectorX, ( -1, seq_length, temp2Vec.dimension)) #\n else:\n X = numpy.reshape(vectorX, ( -1, seq_length, temp2Vec.dimension + n_templates))\n y = numpy.reshape(vectorY,(-1,temp2Vec.dimension))\n\n\n # define the LSTM model\n model_vec = Sequential()\n model_vec.add(LSTM(128, input_shape=(X.shape[1], temp2Vec.dimension), return_sequences=False))\n model_vec.add(Dropout(0.2))\n \n \n model_count = Sequential()\n model_count.add(LSTM(128, input_shape=(X.shape[1], n_templates), return_sequences=False))\n model_count.add(Dropout(0.2))\n \n model = Sequential() \n model.add(Merge([model_vec,model_count], mode='concat')) \n \n if onehot == 0:\n model.add(Dense(temp2Vec.dimension, activation='softmax'))\n else:\n model.add(Dense(n_templates, activation='softmax'))\n # load the network weights\n model.load_weights(model_filename)\n model.compile(loss='mse', optimizer='adam')\n if onehot ==1:\n model.compile(loss='categorical_crossentropy', optimizer='adam')\n\n # detect by tag\n total=0\n anomaly_count_dir = {}\n for i in range(n_candidates):\n anomaly_count_dir[i+1] = []\n test1_time = time.time()\n for x_char,x,aim_y_vector in zip(charX, X, y):\n total+=1\n if total%1000 ==0:\n test2_time = time.time()\n print(str(total)+'/'+str(len(X)),str( round(100*total/len(X),3) ),'% time:',(test2_time - test1_time)/60)\n test1_time = time.time()\n aim_y_char = temp2Vec.vector_to_most_similar(aim_y_vector, topn = 1)[0][0]\n if count_matrix_flag == 0:\n x = numpy.reshape(x, (1, seq_length, temp2Vec.dimension))\n else:\n x = numpy.reshape(x, (1, seq_length, temp2Vec.dimension + n_templates))\n prediction = model.predict([x[:,:,:temp2Vec.dimension],x[:,:,temp2Vec.dimension:]], verbose=0)[0] #输出一个len(tags)的向量,数值越高的列对应概率最高的类别\n\n #获取与prediction最相似的topn\n if onehot == 1: #dense的y是onehot格式\n for i in range(n_candidates):\n i += 1\n top_n_index = prediction.argsort()[-i:]#[-i:]\n top_n_tag=[int_to_template[index] for index in top_n_index]\n ##将0/1的结果保存在anomaly_count_dir中\n if aim_y_char not in top_n_tag:\n anomaly_count_dir[i].append(1)\n else:\n anomaly_count_dir[i].append(0)\n\n else:#dense的y是temp2Vec格式\n top_n_tuple = temp2Vec.vector_to_most_similar(prediction, topn=n_candidates)\n for i in range(n_candidates):\n i += 1\n top_n =[t[0] for t in top_n_tuple[:i]] #[-i:]\n #print(top_n)\n #top_n_tag=[int_to_template[index] for index in top_n_index]\n ##将0/1的结果保存在anomaly_count_dir中\n if aim_y_char not in top_n:\n anomaly_count_dir[i].append(1)\n else:\n anomaly_count_dir[i].append(0)\n\n\n '''\n #count by windows\n window_count_dir = {}\n time_start=timeY[0]\n time_end=timeY[-1]\n windows_num = max(0, int(((time_end - time_start) - windows_size * 3600) / step_size / 3600)) + 1\n windows_start_time = [time_start+i*step_size*3600 for i in range(windows_num)]\n\n\n raw_windows_label_list = numpy.zeros(windows_num)\n for i in range(n_candidates):\n i += 1\n window_count_dir[i] = numpy.zeros(windows_num)\n for cur_time,cur_flag,label in zip(timeY,anomaly_count_dir[i],label_list):\n cur_index = int(max(0,cur_time - time_start - windows_size * 3600) / step_size / 3600)\n window_count_dir[i][cur_index] += cur_flag\n raw_windows_label_list[cur_index] += label\n windows_label_list = [ 1 if n >=1 else 0 for n in raw_windows_label_list]\n '''\n\n '''\n precision, recall, f1_score, _ = np.array(list(precision_recall_fscore_support(testing_labels, prediction)))[:, 1]\n print('=' * 20, 'RESULT', '=' * 20)\n print(\"Precision: %.6f, Recall: %.6f, F1_score: %.6f\" % (precision, recall, f1_score))\n '''\n\n\n f = open(result_file,'w')\n\n print('\\nanomaly detection result:')\n for i in range(n_candidates):\n i += 1\n print('next tag is not in top'+str(i)+' candidates:')\n # print('# of anomalous/total logs:',str(sum(anomaly_count_dir[i]))+'/'+str(len(anomaly_count_dir[i])))\n\n precision, recall, f1_score, _ = numpy.array(list(precision_recall_fscore_support(label_list, anomaly_count_dir[i])))[:, 1]\n print('=' * 20, 'RESULT', '=' * 20)\n\n tp = 0\n fp = 0\n tn = 0\n fn = 0\n for a,b, in zip(label_list, anomaly_count_dir[i]):\n if b == 1 and a == 1:\n tp += 1\n if a == 1 and b ==0:\n fn += 1\n if a ==0 and b == 0:\n tn += 1\n if a==0 and b == 1:\n fp += 1\n print(\"Precision: %.6f, Recall: %.6f, F1_score: %.6f\" % (precision, recall, f1_score))\n print('tp:',tp, 'fn:',fn,'tn:',tn,'fp:',fp,'total:',tp+tn+fp+fn)\n print('=' * 20, 'RESULT', '=' * 20)\n f.writelines(str(precision)+' '+str(recall)+'\\n')\n '''\n windows_results = [ 1 if n >=1 else 0 for n in window_count_dir[i]]\n print('# of anomalous/total windows:',str(sum(windows_results))+'/'+str(len(windows_results)))\n precision, recall, f1_score, _ = numpy.array(list(precision_recall_fscore_support(windows_label_list, windows_results)))[:, 1]\n print(\"Precision: %.6f, Recall: %.6f, F1_score: %.6f\" % (precision, recall, f1_score))\n print('')\n '''\n f.close()\n t2 = time.time()\n print('testing time:',(t2-t1)/60,'mins')\n print (\"\\nDone.\")\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-test_file', help='test_file.', type=str, default='../../middle/bgl_log_20w.seq')\n parser.add_argument('-seq_length', help='seq_length.', type=int, default=10)\n parser.add_argument('-n_candidates', help='n_candidates.', type=int, default=15)\n parser.add_argument('-windows_size', help='windows_size.', type=int, default=3)\n parser.add_argument('-step_size', help='step_size.', type=int, default=1)\n parser.add_argument('-model_filename', help='you can give a model file.', type=str, default='')\n parser.add_argument('-model_dir', help='model_dir.', type=str, default='../weights/vector_deeplog/')\n parser.add_argument('-template_index_map_path', help='template_index_map_path.', type=str, default='./bgl_log_20w_template_to_int.txt')\n parser.add_argument('-onehot', help='默认为1。1表示统计使用onehot,0表示使用template2vec',type = int, default = 1)\n parser.add_argument('-result_file', help='result_file.', type=str, default='../results/bgl_log_20w_log_pr.txt')\n parser.add_argument('-template_num', help='若为0,则根据输入文件统计,否则,根据输入确定。默认0', type=int, default=0)\n parser.add_argument('-label_file', help='label_file.', type=str, default='../../data/bgl2_label_20w')\n parser.add_argument('-count_matrix', help='默认为0。1表示统计count_matrix,0不统计',type = int, default = 0)\n parser.add_argument('-template2Vec_file', help='template2Vec_file', type=str, default='../../model/bgl_log_20w.template_vector')\n parser.add_argument('-template_file', help='template_file', type=str, default='../../middle/bgl_log_20w.template')\n\n args = parser.parse_args()\n\n para_detect = {\n 'test_file': args.test_file,\n 'seq_length':args.seq_length,\n 'n_candidates': args.n_candidates,\n 'windows_size': args.windows_size,\n 'step_size':args.step_size,\n 'model_dir': args.model_dir,\n 'model_filename': args.model_filename,\n 'template_index_map_path':args.template_index_map_path,\n 'template_num' : args.template_num,\n 'result_file':args.result_file,\n 'label_file':args.label_file,\n 'template2Vec_file': args.template2Vec_file,\n 'template_file': args.template_file,\n 'count_matrix': args.count_matrix,\n 'onehot': args.onehot\n }\n\n detect_by_vector(para_detect)\n\n print('detection finish')\n\n from keras import backend as K\n K.clear_session()\n\n\n\n\n\n","sub_path":"LogAnomaly/LogAnomal_BGL/detect_vector_2LSTM_similarity.py","file_name":"detect_vector_2LSTM_similarity.py","file_ext":"py","file_size_in_byte":14415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"622677043","text":"\n# Connects with iana.org/domains/reserved and gets links in a column\n\nimport requests\nfrom bs4 import BeautifulSoup\n\npage_url = 'https://www.iana.org/domains/reserved'\n\n# Download page (is this only html?)\nprint('Getting page...')\npage = requests.get(page_url)\n\nif(page.status_code != 200):\n print('[' + str(page.status_code) + '] Failed when getting the page')\n exit(1)\n\n\nsoup = BeautifulSoup(page.content, 'html.parser')\ntable = soup.find_all('tbody')\ndomains = soup.find_all('span', class_='domain label')\nlinks = []\n\nfor domain in domains:\n if domain.find('a'):\n links.append(domain.find('a')['href'])\n\nprint(links)\n","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"293503743","text":"import numpy as np\nimport cv2 as cv\nfrom matplotlib import pyplot as plt\nimport sys, os\nimport glob\nsys.path.append(os.path.abspath(os.path.join('..', 'dataset_iterator')))\nimport dataset_iterator\n\n\n# Python3 program change RGB Color\n# Model to HSV Color Model\ndef rgb_to_hsv(r, g, b):\n # R, G, B values are divided by 255\n # to change the range from 0..255 to 0..1:\n r, g, b = r / 255.0, g / 255.0, b / 255.0\n\n # h, s, v = hue, saturation, value\n cmax = max(r, g, b) # maximum of r, g, b\n cmin = min(r, g, b) # minimum of r, g, b\n diff = cmax - cmin # diff of cmax and cmin.\n\n # if cmax and cmax are equal then h = 0\n if cmax == cmin:\n h = 0\n\n # if cmax equal r then compute h\n elif cmax == r:\n h = (60 * ((g - b) / diff) + 360) % 360\n\n # if cmax equal g then compute h\n elif cmax == g:\n h = (60 * ((b - r) / diff) + 120) % 360\n\n # if cmax equal b then compute h\n elif cmax == b:\n h = (60 * ((r - g) / diff) + 240) % 360\n\n # if cmax equal zero\n if cmax == 0:\n s = 0\n else:\n s = (diff / cmax) * 255\n\n # compute v\n v = cmax * 255\n print(h/2,s,v,type(h))\n return np.array([h/2, s, v])\n\n\nimg = cv.imread('test2.jpg')\n\n# Preprocessing pipeline\n\n# resize image\nimgRe = dataset_iterator.resize(img,10)\n\n# Grayscaling\ngray = cv.cvtColor(imgRe,cv.COLOR_BGR2GRAY)\n\n# Equalize\n# create a CLAHE object (Arguments are optional).\nclahe = cv.createCLAHE(clipLimit=1, tileGridSize=(8,8))\nequ = clahe.apply(gray)\n#equ = cv.equalizeHist(gray)\ncv.imshow('Equalized',equ)\n\n# blur\nblur = cv.GaussianBlur(equ,(7,7),0)\n\n#ret, thresh = cv.threshold(blur,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)\nthresh = cv.adaptiveThreshold(blur,255,cv.ADAPTIVE_THRESH_GAUSSIAN_C,cv.THRESH_BINARY_INV,5,2) # o penultimo parametro\n # igual 5 ficou fixe\ncv.imshow('Thresholded',thresh)\n\n# noise removal\nkernel = np.ones((3,3),np.uint8)\n\n\nopening = cv.morphologyEx(thresh,cv.MORPH_OPEN,np.ones((2,3),np.uint8), iterations = 1)\ncv.imshow('opening',opening)\n\nthresh=cv.dilate(opening,kernel,iterations=1)\ncv.imshow('Dilate',thresh)\n\nclosing = cv.morphologyEx(thresh,cv.MORPH_CLOSE,kernel, iterations = 10)\ncv.imshow('Close/Open',closing)\n\n# sure background area\nsure_bg = cv.dilate(closing,kernel,iterations=1)\ncv.imshow('Background',sure_bg)\n\n# Finding sure foreground area\ndist_transform = cv.distanceTransform(closing,cv.DIST_L2,5) # Calcula a distância ao border mais perto\nret, sure_fg = cv.threshold(dist_transform,0.7*dist_transform.max(),255,0) # Garante-se aqui que o que está presnete na imagem é foreground, visto\n # que fizemos um distanceTransform para ter uma noção das distâncias a\n # borda e de seguida usamos essa nova imagem com diferenças de intensidades\n #, diretamente relacionadas o quão perto se esta da borda, para remover a\n # parte que se garante que é do forground\ncv.imshow('Foreground',sure_fg)\n\n# Finding unknown region\nsure_fg = np.uint8(sure_fg)\nunknown = cv.subtract(sure_bg,sure_fg)\ncv.imshow('Subtraction',unknown) # Border em que não se tem a certeza se é mesmo ou não\n\n# Se objetivo fossse, simplesmente, segmentar o Foreground, bastaria usar erosion, que ajuda a separar objetos numa imageman\n# que estejam muito perto um do outro.\n\n# Marker labelling\nret, markers = cv.connectedComponents(sure_fg)\n# Add one to all labels so that sure background is not 0, but 1\nmarkers = markers+1\n# Now, mark the region of unknown with zero\nmarkers[unknown==255] = 0 # Por a border a 0, de forma a que o algoritmo a entenda como um area em que não se tem a certeza/desconhecida\n\nprint(markers)\n\nmarkers = cv.watershed(imgRe,markers)\n\n\n#print(imgRe[markers>1])\n# print(imgRe)\nimgRe2 = imgRe.copy()\npositions = np.add(markers > 1, markers == -1) # posicaos das peças completas, incluindo as bordas\nprint(markers[positions])\nimgRe2[markers > 1] = [0,255,0] # As bordas são representadas por -1 e o foregorund é maior que 1, e o backgorund é 1\nimgReNew = imgRe.copy()[markers > 1]\n\ncv.imshow(\"Random\", imgRe)\ncv.imshow(\"Final\", imgRe2)\n\n# Get max color\nmaxColor = np.array([imgReNew.max(0)[0], imgReNew.max(0)[1], imgReNew.max(0)[2]]) # In RGB\n# print(\"\\n\\n\\n\\n\\nMax BGR:\"+str(maxColor), end='\\n\\n\\n\\n', )\n# maxColorHSV = cv.cvtColor(np.uint16([[maxColor]]),cv.COLOR_BGR2HSV)\nmaxColorHSV = rgb_to_hsv(imgReNew.max(0)[2], imgReNew.max(0)[1], imgReNew.max(0)[0])\nfloatVecv = np.vectorize(int)\nprint(\"Max HSV:\"+str(maxColorHSV), end='\\n\\n\\n\\n')\n\n# Get min color\nminColor = np.array([imgReNew.min(0)[0], imgReNew.min(0)[1], imgReNew.min(0)[2]]) # In RGB\n# print(\"Min (BGR):\"+str(minColor), end='\\n\\n\\n\\n')\n# minColorHSV = cv.cvtColor(np.uint16([[minColor]]),cv.COLOR_BGR2HSV)\nminColorHSV = rgb_to_hsv(imgReNew.min(0)[2], imgReNew.min(0)[1], imgReNew.min(0)[0])\nprint(\"Min (HSV):\"+str(minColorHSV), end='\\n\\n\\n\\n')\n\n\nmatrix1 = np.array([minColorHSV,maxColorHSV])\nprint(matrix1)\nprint(matrix1.min(0)[0])\nlower = np.array([matrix1.min(0)[0], matrix1.min(0)[1], matrix1.min(0)[2]])\nhigher = np.array([matrix1.max(0)[0], matrix1.max(0)[1], matrix1.max(0)[2]])\nprint(\"Min (HSV):\"+str(floatVecv(lower)), end='\\n\\n\\n\\n')\nprint(\"Max HSV:\"+str(floatVecv(higher)), end='\\n\\n\\n\\n')\n\n# Get image\nimgRe = cv.cvtColor(imgRe, cv.COLOR_BGR2HSV)\nlast = cv.inRange(imgRe,lower,higher)\ncv.imshow(\"inRange\", last)\n\nk = cv.waitKey(0) # Wait for a key/ esperar por um tecla\nif k == ord(\"g\"):\n cv.imwrite(\"savedImage.png\", imgRe)\nelif k == ord(\"q\"):\n pass\n\n\n","sub_path":"Testing/Ratio/watershed_detect_color.py","file_name":"watershed_detect_color.py","file_ext":"py","file_size_in_byte":5835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"190671373","text":"# Script to simulate reading analog input from the ADS1x15 and save\n# it to a csv file\n# Author: Caleb Vatral\n\nimport csv\nimport time\nimport datetime\nfrom random import randint\n\nwhile True:\n # Read all the ADC channel values in a list.\n valuesa0 = [0]*2 # Initialize 1x2 array of zeroes\n valuesa1 = [0]*2\n valuesa2 = [0]*2\n valuesa3 = [0]*2\n\n # Get the current datetime in a readable string: YYYY-MM-DD HH:mm:SS:ssssss\n valuesa0[0] = str(datetime.datetime.now())\n # Read the specified ADC channel using the previously set gain value\n valuesa0[1] = str(randint(0, 32767))\n\n valuesa1[0] = str(datetime.datetime.now())\n valuesa1[1] = str(randint(0, 32767))\n\n valuesa2[0] = str(datetime.datetime.now())\n valuesa2[1] = str(randint(0, 32767))\n\n valuesa3[0] = str(datetime.datetime.now())\n valuesa3[1] = str(randint(0, 32767))\n\n with open(\"data/dataLog0.csv\", \"a\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',', lineterminator='\\n')\n writer.writerow(valuesa0)\n with open(\"data/dataLog1.csv\", \"a\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',', lineterminator='\\n')\n writer.writerow(valuesa1)\n with open(\"data/dataLog2.csv\", \"a\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',', lineterminator='\\n')\n writer.writerow(valuesa2)\n with open(\"data/dataLog3.csv\", \"a\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',', lineterminator='\\n')\n writer.writerow(valuesa3)\n\n # Pause for half a second.\n time.sleep(1)\n","sub_path":"src/deprecated_files/readData_test.py","file_name":"readData_test.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"105425340","text":"import requests\n\n\nclass Weather:\n\n def __init__(self):\n # Get weather data\n\n try:\n request = requests.get('https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20weather.forecast%20\\\n where%20woeid%20in%20(select%20woeid%20from%20geo.places(1)%20where%20text%3D%22New%20Port%20Richey%2C%20\\\n FL%22)&format=json&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys&callback=', None)\n forecast = request.json()['query']['results']['channel']\n self.location = forecast['location']['city']\n self.current_temp = forecast['item']['condition']['temp']\n self.current_condition = forecast['item']['condition']['text']\n self.today_high = forecast['item']['forecast'][0]['high']\n self.today_low = forecast['item']['forecast'][0]['low']\n self.forecasts = forecast['item']['forecast'][1:6]\n self.menu = 'Outside: ' + self.current_temp + '°'\n self.body = self.forecasts\n\n except (requests.exceptions.ConnectionError, ValueError, TypeError):\n self.body = 'Cannot reach Weather Server'\n self.current_temp = 'No Connection'\n self.menu = 'Outside: ' + self.current_temp\n\n finally:\n self.id = 'weather'\n self.url = 'weather'\n\n\n# Keys of interest:\n# Location: [query][results][location][city]\n# Current temp & condition: ['item']['condition']['temp'], ['text']\n# Forecasts: ['item']['forecast']['0-9']['high'], ['low'], ['text']\n","sub_path":"app/services/weather/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"601318777","text":"from event import Event\nimport random \n\nclass Told:\n def __init__(self, events=None, printer_handle=None, bot=None, say=None):\n self.events = events\n self.printer = printer_handle\n self.interests = ['__.told__']\n self.bot = bot\n self.say = say\n\n told = Event(\"__.told__\")\n told.define(msg_definition=\"^\\.told\")\n told.subscribe(self)\n self.bot.register_event(told, self)\n\n self.help = \".told \" \n\n def get_told_status(self, target):\n \"\"\"Randomly selects and returns a string with a \"told\" status.\"\"\"\n status = [\"FUCKING TOLD\",\n \"CASH4TOLD.COM\",\n \"KNIGHTS OF THE TOLD REPUBLIC\",\n \"STONE TOLD STEVE AUSTIN\",\n \"CURE FOR THE COMMON TOLD\",\n \"BEN TOLDS FIVE\",\n \"THE 40 YEAR TOLD VIRGIN\",\n \"TOLDENEYE 007\",\n \"TEXAS TOLD'EM\",\n \"AUSTIN POWERS IN TOLDMEMBER\",\n \"PTERODACTOLD\",\n \"NO COUNTRY FOR TOLD MEN\",\n \"24 CARAT TOLD RING\",\n \"ONLY SHOOTING STARS BREAK THE TOLD\",\n \"GOING ONCE...GOING TWICE...TOLD!\",\n \"GARY TOLDMAN\",\n \"TOLD SPICE\",\n \"TOLD STONE CREAMERY\",\n \"BABY IT'S TOLD OUTSIDE\",\n \"POKEMON TOLD AND SILVER\",\n \"TOLD YELLER\",\n \"EL DORADO: THE LOST CITY OF TOLD\",\n \"TOLDPLAY\",\n \"BATMAN: THE BRAVE AND THE TOLD\",\n \"DANNY DEVITOLD\",\n \"FOR WHOM THE BELL TOLDS\",\n \"CAN'T TEACH A TOLD DOG NEW TRICKS\",\n \"I AIN'T SAYING SHE A TOLD DIGGER\",\n \"THE TOLDEN COMPASS\",\n \"TOLDIER OF FORTUNE\",\n \"TOLDING CHAIR\",\n \"TOLDEN AXE\",\n \"TOLD MACDONALD HAD A FARM\",\n \"TOLDEN TOLDIES: HITS FROM THE 50'S, 60'S, AND 70'S\",\n \"BATTLETOLDS\",\n \"YE TOLDE PUB\",\n \"TOLDEN CAULFIELD\",\n \"THE TOLD MAN AND THE SEA\",\n \"TOLD MEDAL WINNER IN THE WINTER OLYMPICS\",\n \"POT OF TOLD AT THE END OF THE RAINBOW\",\n \"J.R.R. TOLDKIEN\",\n \"CALIFORNIA TOLD RUSH\",\n \"THERE'S TOLD IN THEM THAR HILLS\"\n ]\n exclamation = [\"Damn!\",\n \"Damn, son!\",\n \"Snap!\",\n \"Sheeeiiiiittttt.\",\n \"Ouch!\"\n ]\n return random.choice(exclamation) + \" %s\\'s told status: [X] \" % target + random.choice(status)\n\n def handle(self, event):\n _z = event.msg.split(None, 1)\n try:\n self.say(event.channel, self.get_told_status(_z[1]))\n except IndexError:\n self.say(event.channel, \"You didn\\'t say who got told!\")\n self.say(event.channel, self.get_told_status(event.user))\n","sub_path":"modules/told.py","file_name":"told.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"231911689","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nfrom gevent import monkey; monkey.patch_all()\nfrom bottle import route, run, static_file, request, response, get, post, redirect\nimport json, ip_num\nimport pymongo\n\ndef get_data():\n client=pymongo.MongoClient()\n db=client.ips_ports\n dbc=db.ips\n all_content={}\n all_content['ips']=[]\n all_content['ips'].extend(dbc.find(fields={'_id': False}))\n return all_content\n\n@route('/alarm/')\ndef alarm(ports_tmp):\n response.content_type = 'application/json'\n all_data = get_data()\n ports=[int(i) for i in ports_tmp.split('|')]\n white_list={}\n white_list['data']={}\n for i in all_data['ips']:\n content={}\n content[i['ip']]={}\n content[i['ip']]['status']=True\n content[i['ip']]['value']=[]\n for j in i['port']:\n if j not in ports:\n content[i['ip']]['value'].append(j)\n if content[i['ip']]['value'] != []:\n content[i['ip']]['status'] = False\n white_list['data']=dict(white_list['data'], **content)\n return json.dumps(white_list,\n sort_keys=True, indent=2, separators=(',', ': '))\n\n\n@route('/result')\ndef result():\n response.content_type = 'application/json'\n return json.dumps(get_data(),\n sort_keys=True, indent=2, separators=(',', ': '))\n\n@get('/filter')\ndef filter():\n return '''\n
    \n
    \n IP \n port \n \n
    \n Usage:\n

    只填写IP则返回该IP的端口信息,可填写格式为单个IP或IP段,如\"192.168.1.1-192.168.1.255\"

    \n

    只填写端口则返回开放该端口的IP列表,可填写格式为单个端口或端口段,如\"22-53\"

    \n

    若既填写IP也填写端口,则返回该IP段除了填写的端口之外开放的端口信息,可填写IP格式,单个IP或IP段,可填写端口格式为单个端口或用竖线\"|\"隔开的多个端口,如\"22|80\"

    \n
    \n '''\n@post('/filter')\ndef do_filter():\n ip_tmp = request.forms.get('ips')\n port_tmp = request.forms.get('ports')\n if ip_tmp != '' and port_tmp == '':\n ips = ip_tmp.split('-')\n if len(ips) == 1:\n if ip_num.isIP(ips[0]):\n redirect(\"/filter_ip/%s/%s\" % (ips[0],ips[0]))\n else:\n return 'Wrong IP'\n elif len(ips) == 2:\n if ip_num.isIP(ips[0]) & ip_num.isIP(ips[1]):\n redirect(\"/filter_ip/%s/%s\" % (ips[0],ips[1]))\n else:\n return \"Wrong IP\"\n else:\n return 'Wrong IP'\n elif ip_tmp == '' and port_tmp != '':\n ports = port_tmp.split('-')\n if len(ports) ==1:\n if ip_num.isPort(ports[0]):\n redirect(\"/filter_port/%s/%s\" % (ports[0],ports[0]))\n else:\n return 'Wrong port'\n elif len(ports) == 2:\n if ip_num.isPort(ports[0]) and ip_num.isPort(ports[1]):\n redirect(\"/filter_port/%s/%s\" % (ports[0],ports[1]))\n else:\n return 'Wrong port'\n else:\n return 'Wrong port'\n\n elif ip_tmp != '' and port_tmp != '':\n redirect('/white_list/%s/%s' % (ip_tmp, port_tmp))\n else:\n return 'Wrong input'\n\n@route('/filter_ip//')\ndef do_filter_ip(ip_from, ip_to):\n #ip_from = request.forms.get('ip_from')\n #ip_to = request.forms.get('ip_to')\n json_data=get_data()\n response.content_type = 'application/json'\n\n #filter ip_range and return json data\n if ip_from != '' and ip_to != '':\n if ip_num.ip2num(ip_from)>ip_num.ip2num(ip_to):\n ip_from,ip_to=ip_to,ip_from\n if ip_from == ip_to:\n content={}\n content[ip_from]={}\n content[ip_from]['port']=[]\n for i in json_data['ips']:\n if i['ip']==ip_from:\n content[ip_from]['port'].extend(i['port'])\n return json.dumps(content,\n sort_keys=True, indent=2, separators=(',', ': '))\n\n ips_content={}\n ips_content['data']={}\n for ip in range(ip_num.ip2num(ip_from), ip_num.ip2num(ip_to)+1):\n content={}\n for i in json_data['ips']:\n if i['ip']==ip_num.num2ip(ip):\n content[i['ip']]={}\n content[i['ip']]['port']=[]\n content[i['ip']]['port'].extend(i['port'])\n\n ips_content['data']=dict(ips_content['data'], **content)\n\n return json.dumps(ips_content,\n sort_keys=True, indent=2, separators=(',', ': '))\n return \"Wrong Input\"\n\n@route('/filter_port//')\ndef do_filter_port(port_from, port_to):\n #port_from = request.forms.get('port_from')\n #port_to = request.forms.get('port_to')\n json_data=get_data()\n response.content_type = 'application/json'\n\n #filter port_range and return json data\n if port_from != '' and port_to != '':\n if int(port_from)>int(port_to):\n port_from,port_to=port_to,port_from\n if port_from == port_to:\n content={}\n content[int(port_from)]=[]\n for i in json_data['ips']:\n for j in i['port']:\n if j==int(port_from):\n content[int(port_from)].append(i['ip'])\n return json.dumps(content,\n sort_keys=True, indent=2, separators=(',', ': '))\n\n ports_content={}\n ports_content['data']={}\n for port in range(int(port_from), int(port_to)+1):\n content={}\n content[port]=[]\n for i in json_data['ips']:\n for j in i['port']:\n if j==int(port):\n content[j].append(i['ip'])\n if content[port] == []:\n continue\n ports_content['data']=dict(ports_content['data'], **content)\n return json.dumps(ports_content,\n sort_keys=True, indent=2, separators=(',', ': '))\n return \"Wrong Input\"\n\n@route('/white_list//')\ndef do_white_list(ips_tmp, ports_tmp):\n #ips_tmp=request.forms.get('ips')\n #ports_tmp=request.forms.get('ports')\n json_data=get_data()\n response.content_type = 'application/json'\n white_list={}\n white_list['data']={}\n if ips_tmp!='' and ports_tmp!='':\n ips=ips_tmp.split('-')\n ports=[int(i) for i in ports_tmp.split('|')]\n if len(ips)==1:\n if ip_num.isIP(ips[0]) == False:\n return 'Wrong IP'\n content={}\n content['ip']=ips[0]\n content['port']=[]\n for i in json_data['ips']:\n if i['ip']==ips[0]:\n for j in i['port']:\n if j not in ports:\n content['port'].append(j)\n return json.dumps(content,\n sort_keys=True, indent=2, separators=(',', ': '))\n\n if False == (ip_num.isIP(ips[0]) and ip_num.isIP(ips[1])):\n return 'Wrong IP'\n if ip_num.ip2num(ips[0])>ip_num.ip2num(ips[1]):\n ips[0],ips[1]=ips[1],ips[0]\n for ip in range(ip_num.ip2num(ips[0]),ip_num.ip2num(ips[1]) + 1):\n for i in json_data['ips']:\n if i['ip']==ip_num.num2ip(ip):\n content={}\n content[i['ip']]={}\n content[i['ip']]['port']=[]\n for j in i['port']:\n if j not in ports:\n content[i['ip']]['port'].append(j)\n white_list['data']=dict(white_list['data'], **content)\n return json.dumps(white_list,\n sort_keys=True, indent=2, separators=(',', ': '))\n return \"Wrong Input\"\n\n\nif __name__ == '__main__':\n run(host='0.0.0.0', port=8080, server='gevent', debug=True, reloader=True)\n\n\n","sub_path":"port_scan/celery/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":7989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"445673182","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n# _ _ _____ _______ _ \n# | | | | /\\ | __ \\ |__ __| | |\n# | |__| | / \\ | |__) |__ _ __ | | ___ ___ | |\n# | __ | / /\\ \\ | ___/ _ \\ '_ \\| |/ _ \\ / _ \\| |\n# | | | |/ ____ \\| | | __/ | | | | (_) | (_) | |\n# |_| |_/_/ \\_\\_| \\___|_| |_|_|\\___/ \\___/|_| \n#\n# Hacker Adana - Since 2012\n# HackerAdana@Protonmail.com\n# HackerAdanaBlog.Blogspot.com\n# Github: @HackerAdana\n# Twitter: @HackerAdana\n# BTC: 1ADANAWbkPVnn1cfW26HTq7z8f6Z7K2yw\nimport jack\nimport paket\nimport os\n\ndef asistan():\n\tjack.rootkontrol()\n\tos.system(\"clear\")\n\tjack.banner()\n\tprint (jack.yesil + \" Popüler Uygulamalar\t\t\t\tKategoriler\" + jack.temizle)\n\tprint (\" [1] Metasploit-framework\t\t[A] Bilgi Toplama Araçları\")\n\tprint (\" [2] Sqlmap \t\t\t\t[B] Zafiyet Tarama Araçları\")\n\tprint (\" [3] Hapf \t\t\t\t[C] Kablosuz Saldırı Araçları\")\n\tprint (\" [4] Nmap \t\t\t\t[D] Parola Saldırı Araçları\")\n\tprint (\" [5] Katana-Framework\")\n\tprint (\" [6] Uniscan\")\n\tprint (\" [7] Recon-ng\")\n\tprint (jack.kalin + \"\\n [H] Yardım\")\n\tprint (\" [Q] Çıkış\" + jack.temizle)\n\tgirisraw = raw_input('\\033[1;32m Hapentool> \\033[0m')\n\t\n\n##  Bilgi Toplama Araçları Baslangıcı\n\tif girisraw\t== 'a' or girisraw == 'A':\n\t\tos.system(\"clear\")\n\t\tjack.banner()\n\t\tprint (\" [1] Dnsrecon \t\t\t\t[32] TLSSLed\")\n\t\tprint (\" [2] Dmitry \t\t\t\t[33] Smtp-user-enum\")\n\t\tprint (\" [3] Dnsenum \t\t\t\t[34] Twofi\")\n\t\tprint (\" [4] TheHarvester \t\t\t[35] p0f\")\n\t\tprint (\" [5] Automater \t\t\t\t[36] cisco-torch\")\n\t\tprint (\" [6] WhatWeb \t\t\t\t[37] Thc-ipv6\")\n\t\tprint (\" [7] Wafw00f \t\t\t\t[38] WAScan\")\n\t\tprint (\" [8] URLCrazy\")\n\t\tprint (\" [9] Bing-ip2hosts\")\n\t\tprint (\" [10] Nmap & Zenmap\")\n\t\tprint (\" [11] Metagoofil\")\n\t\tprint (\" [12] DotDotPwn\")\n\t\tprint (\" [13] Fierce\")\n\t\tprint (\" [14] Masscan\")\n\t\tprint (\" [15] Hapf\")\n\t\tprint (\" [16] Recon-ng\")\n\t\tprint (\" [17] Copy-router-config\")\n\t\tprint (\" [18] Dnstracer\")\n\t\tprint (\" [19] Miranda\")\n\t\tprint (\" [20] Parsero\")\n\t\tprint (\" [21] Enum4linux\")\n\t\tprint (\" [22] Fierce\")\n\t\tprint (\" [23] Braa\")\n\t\tprint (\" [24] Fragrouter\")\n\t\tprint (\" [25] Goofile\")\n\t\tprint (\" [26] Enumiax\")\n\t\tprint (\" [27] Snmp-check\")\n\t\tprint (\" [28] Sslcaudit\")\n\t\tprint (\" [29] Sslsplit\")\n\t\tprint (\" [30] Sslstrip\")\n\t\tprint (\" [31] Sslyze\")\n\t\tprint (jack.kalin + \"\\n [A] Hepsi\")\n\t\tprint (\" [G] Geri\")\n\t\tprint (\" [Q] Çıkış\" + jack.temizle)\n\t\tgirisraw2 = raw_input('\\033[1;32m Hapentool/Bilgi Toplama Araçları> \\033[0m')\n\t\tif girisraw2 == '1':\n\t\t\tpaket.dnsrecon()\n\n\t\telif girisraw2 == '2':\n\t\t\tpaket.dmitry()\n\n\t\telif girisraw2 == '3':\n\t\t\tpaket.dnsenum()\n\n\t\telif girisraw2 == '4':\n\t\t\tpaket.theharvester()\n\n\t\telif girisraw2 == '5':\n\t\t\tpaket.automater()\n\n\t\telif girisraw2 == '6':\n\t\t\tpaket.whatweb()\n\n\t\telif girisraw2 == '7':\n\t\t\tpaket.wafw00f()\n\n\t\telif girisraw2 == '8':\n\t\t\tpaket.urlcrazy()\n\n\t\telif girisraw2 == '9':\n\t\t\tpaket.bing_ip2hosts()\n\n\t\telif girisraw2 == '10':\n\t\t\tpaket.nmap()\n\n\t\telif girisraw2 == '11':\n\t\t\tpaket.metagoofil()\n\n\t\telif girisraw2 == '12':\n\t\t\tpaket.dotdotpwn()\n\n\t\telif girisraw2 == '13':\n\t\t\tpaket.fierce()\n\n\t\telif girisraw2 == '14':\n\t\t\tpaket.masscan()\n\n\t\telif girisraw2 == '15':\n\t\t\tpaket.hapf()\n\n\t\telif girisraw2 == '16':\n\t\t\tpaket.recon_ng()\n\n\t\telif girisraw2 == '17':\n\t\t\tpaket.copy_router_config()\n\n\t\telif girisraw2 == '18':\n\t\t\tpaket.dnstracer()\n\n\t\telif girisraw2 == '19':\n\t\t\tpaket.miranda()\n\n\t\telif girisraw2 == '20':\n\t\t\tpaket.parsero()\n\n\t\telif girisraw2 == '21':\n\t\t\tpaket.enum4linux()\n\n\t\telif girisraw2 == '22':\n\t\t\tpaket.fierce()\n\n\t\telif girisraw2 == '23':\n\t\t\tpaket.braa()\n\n\t\telif girisraw2 == '24':\n\t\t\tpaket.fragrouter()\n\n\t\telif girisraw2 == '25':\n\t\t\tpaket.goofile()\n\n\t\telif girisraw2 == '26':\n\t\t\tpaket.enumiax()\n\n\t\telif girisraw2 == '27':\n\t\t\tpaket.snmp-check()\n\n\t\telif girisraw2 == '28':\n\t\t\tpaket.sslcaudit()\n\n\t\telif girisraw2 == '29':\n\t\t\tpaket.sslsplit()\n\n\t\telif girisraw2 == '30':\n\t\t\tpaket.sslstrip()\n\n\t\telif girisraw2 == '31':\n\t\t\tpaket.sslyze()\n\n\t\telif girisraw2 == '32':\n\t\t\tpaket.tlssled()\n\n\t\telif girisraw2 == '33':\n\t\t\tpaket.smtp_user_enum()\n\n\t\telif girisraw2 == '34':\n\t\t\tpaket.twofi()\n\n\t\telif girisraw2 == '35':\n\t\t\tpaket.p0f()\n\n\t\telif girisraw2 == '36':\n\t\t\tpaket.cisco_torch()\n\n\t\telif girisraw2 == '37':\n\t\t\tpaket.thc_ipv6()\n\n\t\telif girisraw2 == '38':\n\t\t\tpaket.wascan()\n\n\t\telif girisraw2 == 'a' or girisraw2 == 'A':\n\t\t\tpaket.bilgitoplamatumu() \n\n\t\telif girisraw2 == 'G' or girisraw2 == 'g':\n\t\t\tasistan()\n\n\t\telif girisraw2 == 'q' or girisraw2 == 'Q':\n\t\t\texit()\n## Bilgi Toplama Araçları SONU\n\n\t\t#####\n\n## Zafiyet Tarama Araçları Başlangıcı\n\tif girisraw == 'b' or girisraw == 'B':\n\t\tos.system(\"clear\")\n\t\tjack.banner()\n\t\tprint (\" [1] BBQSQL \t\t\t\t[21] Sfuzz\")\n\t\tprint (\" [2] Sqlmap \t\t\t\t[22] Sidguesser\")\n\t\tprint (\" [3] Nmap \t\t\t\t[23] SIPArmyKnife\")\n\t\tprint (\" [4] BED \t\t\t\t[24] Sqlninja\")\n\t\tprint (\" [5] Cisco-auditing-tool \t\t[25] Sqlsus\")\n\t\tprint (\" [6] Cisco-global-exploiter \t\t[26] Thc-ipv6\")\n\t\tprint (\" [7] Cisco-ocs \t\t\t\t[27] Tnscmd10g\")\n\t\tprint (\" [8] Cisco-torch \t\t\t[28] Unix-privesc-check\")\n\t\tprint (\" [9] Copy-router-config\")\n\t\tprint (\" [10] DBPwAudit\")\n\t\tprint (\" [11] Doona\")\n\t\tprint (\" [12] DotDotPwn\")\n\t\tprint (\" [13] HexorBase\")\n\t\tprint (\" [14] Inguma\")\n\t\tprint (\" [15] Jsql\")\n\t\tprint (\" [16] Lynis\")\n\t\tprint (\" [17] Nmap\")\n\t\tprint (\" [18] Ohrwurm\")\n\t\tprint (\" [19] Oscanner\")\n\t\tprint (\" [20] Powerfuzzer\")\n\t\tprint (jack.kalin + \"\\n [A] Hepsi\")\n\t\tprint (\" [G] Geri\")\n\t\tprint (\" [Q] Çıkış\" + jack.temizle)\n\t\tgirisraw3 = raw_input('\\033[1;32m Hapentool/Zafiyet Tarama Araçları> \\033[0m')\t\t\n\n\t\tif girisraw3 == '1':\n\t\t\tpaket.bbqsql()\n\n\t\telif girisraw3 == '2':\n\t\t\tpaket.sqlmap()\n\n\t\telif girisraw3 == '3':\n\t\t\tpaket.nmap()\n\n\t\telif girisraw3 == '4':\n\t\t\tpaket.bed()\n\n\t\telif girisraw3 == '5':\n\t\t\tpaket.cisco_auditing_tool()\n\n\t\telif girisraw3 == '6':\n\t\t\tpaket.cisco_global_exploiter()\n\n\t\telif girisraw3 == '7':\n\t\t\tpaket.cisco_ocs()\n\n\t\telif girisraw3 == '8':\n\t\t\tpaket.cisco_torch()\n\n\t\telif girisraw3 == '9':\n\t\t\tpaket.copy_router_config()\n\n\t\telif girisraw3 == '10':\n\t\t\tpaket.dbpwaudit()\n\n\t\telif girisraw3 == '11':\n\t\t\tpaket.doona()\n\n\t\telif girisraw3 == '12':\n\t\t\tpaket.dotdotpwn()\n\n\t\telif girisraw3 == '13':\n\t\t\tpaket.hexorbase()\n\n\t\telif girisraw3 == '14':\n\t\t\tpaket.inguma()\n\n\t\telif girisraw3 == '15':\n\t\t\tpaket.jsql()\n\n\t\telif girisraw3 == '16':\n\t\t\tpaket.lynis()\n\n\t\telif girisraw3 == '17':\n\t\t\tpaket.nmap()\n\n\t\telif girisraw3 == '18':\n\t\t\tpaket.ohrwurm()\n\n\t\telif girisraw3 == '19':\n\t\t\tpaket.oscanner()\n\n\t\telif girisraw3 == '20':\n\t\t\tpaket.powerfuzzer()\n\n\t\telif girisraw3 == '21':\n\t\t\tpaket.sfuzz()\n\n\t\telif girisraw3 == '22':\n\t\t\tpaket.sidguesser()\n\n\t\telif girisraw3 == '23':\n\t\t\tpaket.siparmyknife()\n\n\t\telif girisraw3 == '24':\n\t\t\tpaket.sqlninja()\n\n\t\telif girisraw3 == '25':\n\t\t\tpaket.sqlsus()\n\n\t\telif girisraw3 == '26':\n\t\t\tpaket.thc_ipv6()\n\n\t\telif girisraw3 == '27':\n\t\t\tpaket.tnscmd10g()\n\n\t\telif girisraw3 == '28':\n\t\t\tpaket.unix_privesc_check()\n\n\t\telif girisraw3 == 'a' or girisraw3 == 'A':\n\t\t\tpaket.ZafiyetT_AraclariTUMU() \n\n\t\telif girisraw3 == 'G' or girisraw3 == 'g':\n\t\t\tasistan()\n\n\t\telif girisraw3 == 'q' or girisraw3 == 'Q':\n\t\t\texit()\n## Zafiyet Tarama Araçları SONU\n\n\t\t#######\n\n## Kablosuz Saldırı Araçları BASLANGICI\n\tif girisraw\t== 'c' or girisraw == 'C':\n\t\tos.system(\"clear\")\n\t\tjack.banner()\n\t\tprint (\" [1] Aircrack-ng\")\n\t\tprint (\" [2] Airbase-ng\")\n\t\tprint (\" [3] Airdecap-ng\")\n\t\tprint (\" [4] Airdecloak-ng\")\n\t\tprint (\" [5] Aireplay-ng\")\n\t\tprint (\" [6] Airmon-ng\")\n\t\tprint (\" [7] Airodump-ng\")\n\t\tprint (\" [8] Airolib-ng\")\n\t\tprint (\" [9] Airserv-ng\")\n\t\tprint (\" [10] Airtun-ng\")\n\t\tprint (\" [11] Asleap\")\n\t\tprint (\" [12] Besside-ng\")\n\t\tprint (\" [13] Bluemaho\")\n\t\tprint (\" [14] Bluepot\")\n\t\tprint (\" [15] Blueranger\")\n\t\tprint (\" [16] Bully\")\n\t\tprint (\" [17] coWPAtty\")\n\t\tprint (\" [18] Crackle\")\n\t\tprint (\" [19] eapmd5pass\")\n\t\tprint (\" [20] Fern Wifi Cracker\")\n\t\tprint (\" [21] Ghost Phisher\")\n\t\tprint (\" [22] GISKismet\")\n\n\t\tprint (jack.kalin + \"\\n [A] Hepsi\")\n\t\tprint (\" [G] Geri\")\n\t\tprint (\" [Q] Çıkış\" + jack.temizle)\n\t\tgirisraw4 = raw_input('\\033[1;32m Hapentool/Kablosuz Saldırı Araçları> \\033[0m')\n\t\tif girisraw4 == '1' or girisraw4 == '2' or girisraw4 == '3' or girisraw4 == '4':\n\t\t\tpaket.aircrack_ng()\n\n\t\telif girisraw4 == '5' or girisraw4 == '6' or girisraw4 == '7' or girisraw4 == '8':\n\t\t\tpaket.aircrack_ng()\n\t\t\n\t\telif girisraw4 == '9' or girisraw4 == '10' or girisraw4 == '12':\n\t\t\tpaket.aircrack_ng()\n\n\t\telif girisraw4 == '11':\n\t\t\tpaket.asleap()\n\t\t\n\t\telif girisraw4 == '13':\n\t\t\tpaket.bluemaho()\n\n\t\telif girisraw4 == '14':\n\t\t\tpaket.bluepot()\n\n\t\telif girisraw4 == '15':\n\t\t\tpaket.blueranger()\n\n\t\telif girisraw4 == '16':\n\t\t\tpaket.bully()\n\n\t\telif girisraw4 == '17':\n\t\t\tpaket.cowpatty()\n\n\t\telif girisraw4 == '18':\n\t\t\tpaket.crackle()\n\n\t\telif girisraw4 == '19':\n\t\t\tpaket.eapmd5pass()\n\n\t\telif girisraw4 == '20':\n\t\t\tpaket.fern_wifi_cracker()\n\n\t\telif girisraw4 == '21':\n\t\t\tpaket.ghost_phisher()\n\n\t\telif girisraw4 == '22':\n\t\t\tpaket.giskismet()\n\n\t\telif girisraw4 == 'a' or girisraw4 == 'A':\n\t\t\tpaket.Kablosuz_Araclar_Tumu()\n\n\t\telif girisraw4 == 'G' or girisraw4 == 'g':\n\t\t\tasistan()\n\n\t\telif girisraw4 == 'q' or girisraw4 == 'Q':\n\t\t\texit()\n# Kablosuz Saldırı Araçları SONU\n\n\t\t#####\n\n# Parola Saldırı Araçları BASLANGICI\n\n\tif girisraw == 'd' or girisraw == 'D':\n\t\tos.system(\"clear\")\n\t\tjack.banner()\n\t\tprint (\" [1] Acccheck\")\n\t\tprint (\" [2] Brutespray\")\n\t\tprint (\" [3] CeWL\")\n\t\tprint (\" [4] Cisco-auditing-tool\")\n\t\tprint (\" [5] Creddump\")\n\t\tprint (\" [6] Crowbar\")\n\t\tprint (\" [7] Crunch\")\n\t\tprint (\" [8] DBPwAudit\")\n\t\tprint (\" [9] Findmyhash\")\n\t\tprint (\" [10] Gpp-decrypt\")\n\t\tprint (\" [11] Hash-identifier\")\n\t\tprint (\" [12] Hashcat\")\n\t\tprint (\" [13] John the Ripper\")\n\n\t\tprint (jack.kalin + \"\\n [A] Hepsi\")\n\t\tprint (\" [G] Geri\")\n\t\tprint (\" [Q] Çıkış\" + jack.temizle)\n\t\tgirisraw5 = raw_input('\\033[1;32m Hapentool/Parola Saldırıları Araçları> \\033[0m')\n\n\t\tif girisraw5 == '1':\n\t\t\tpaket.acccheck()\n\n\t\telif girisraw5 == '2':\n\t\t\tpaket.brutespray()\n\n\t\telif girisraw5 == '3':\n\t\t\tpaket.cewl()\n\n\t\telif girisraw5 == '4':\n\t\t\tpaket.cisco_auditing_tool()\n\n\t\telif girisraw5 == '5':\n\t\t\tpaket.creddump()\n\n\t\telif girisraw5 == '6':\n\t\t\tpaket.crowbar()\n\n\t\telif girisraw5 == '7':\n\t\t\tpaket.crunch()\n\n\t\telif girisraw5 == '8':\n\t\t\tpaket.dbpwaudit()\n\n\t\telif girisraw5 == '9':\n\t\t\tpaket.findmyhash()\n\n\t\telif girisraw5 == '10':\n\t\t\tpaket.Gpp_decrypt()\n\n\t\telif girisraw5 == '11':\n\t\t\tpaket.hash_identifier()\n\n\t\telif girisraw5 == '12':\n\t\t\tpaket.hashcat()\n\n\t\telif girisraw5 == '13':\n\t\t\tpaket.johnripper()\n\n\t\telif girisraw5 == 'a' or girisraw5 == 'A':\n\t\t\tpaket.parola_araclari_tumu()\n\n\t\telif girisraw5 == 'G' or girisraw5 == 'g':\n\t\t\tasistan()\n\n\t\telif girisraw5 == 'q' or girisraw5 == 'Q':\n\t\t\texit()\n\n\n## Parola Saldırı Araçları SONU\n\n\t\t######\n\n## Populer Uygulamalar BASLANGICI\n\n\tif girisraw == 'H' or girisraw == 'h':\n\t\tprint (\"Yardım metni henüz hazır değil :(\")\n\n\tif girisraw == '1':\n\t\tpaket.msfconsole()\n\n\telif girisraw == '2':\n\t\tpaket.sqlmap()\n\n\telif girisraw == '3':\n\t\tpaket.hapf()\n\n\telif girisraw == '4':\n\t\tpaket.nmap()\n\n\telif girisraw == '5':\n\t\tpaket.katanafrmwk()\n\n\telif girisraw == '6':\n\t\tpaket.uniscan()\n\n\telif girisraw == '7':\n\t\tpaket.recon_ng()\n\n## Populer Uygulamalar SONU\n","sub_path":"lib/asistan.py","file_name":"asistan.py","file_ext":"py","file_size_in_byte":11041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"12774991","text":"# -*- coding: utf-8 -*-\n\"\"\"Contains the main Application class which runs euporie.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nimport sys\nfrom asyncio import AbstractEventLoop\nfrom pathlib import Path\nfrom typing import Any, Callable, Optional\n\nfrom prompt_toolkit.formatted_text import (\n AnyFormattedText,\n fragment_list_to_text,\n to_formatted_text,\n)\nfrom prompt_toolkit.layout import Float, HSplit, Layout, Window\nfrom prompt_toolkit.layout.containers import AnyContainer, FloatContainer, to_container\nfrom prompt_toolkit.layout.controls import FormattedTextControl\nfrom prompt_toolkit.layout.dimension import LayoutDimension as D\nfrom prompt_toolkit.widgets import Button, Dialog, Label, TextArea\n\nfrom euporie import __app_name__, __copyright__, __logo__, __strapline__, __version__\nfrom euporie.keys import KeyBindingsInfo\nfrom euporie.log import log_memory\nfrom euporie.text import ANSI, FormattedTextArea\n\nlog = logging.getLogger(__name__)\n\n\nclass DialogMixin:\n \"\"\"Provides dialogs for the main application.\"\"\"\n\n root_container: \"FloatContainer\"\n layout: \"Layout\"\n open_file: \"Callable\"\n\n def dialog(\n self,\n title: \"AnyFormattedText\",\n body: \"AnyContainer\",\n buttons: \"dict[str, Optional[Callable]]\",\n to_focus: \"Optional[AnyContainer]\" = None,\n ) -> None:\n \"\"\"Display a modal dialog above the application.\n\n Returns focus to the previously selected control when closed.\n\n Args:\n title: The title of the dialog. Can be formatted text.\n body: The container to use as the main body of the dialog.\n buttons: A dictionary mapping text to display as dialog buttons to\n callbacks to run when the button is clicked. If the callback is\n `None`, the dialog will be closed without running a callback.\n to_focus: The control to focus when the dialog is displayed.\n\n \"\"\"\n\n def _make_handler(cb: \"Optional[Callable]\") -> \"Callable\":\n def inner() -> \"None\":\n self.root_container.floats.remove(dialog)\n if focused in self.layout.find_all_controls():\n self.layout.focus(focused)\n if cb:\n cb()\n\n return inner\n\n focused = self.layout.current_control\n\n button_widgets = [\n Button(text, _make_handler(cb), left_symbol=\"[\", right_symbol=\"]\")\n for text, cb in buttons.items()\n ]\n\n dialog = Float(\n Dialog(\n title=title,\n body=body,\n buttons=button_widgets,\n modal=True,\n with_background=True,\n )\n )\n self.root_container.floats.append(\n dialog,\n )\n if to_focus is None:\n to_focus = button_widgets[0]\n self.layout.focus(to_focus)\n\n def ask_open_file(\n self,\n default: \"str\" = \"\",\n validate: \"bool\" = True,\n error: \"Optional[str]\" = None,\n ) -> None:\n \"\"\"Display a dialog asking for file name input.\n\n Args:\n default: The default filename to display in the text entry box\n validate: Whether to disallow files which do not exist\n error: An optional error message to display below the file name\n\n \"\"\"\n filepath = TextArea(text=default, multiline=False)\n\n def _open_cb() -> None:\n path = filepath.text\n if not validate or Path(path).expanduser().exists():\n self.open_file(filepath.text)\n else:\n self.ask_open_file(\n default=filepath.text, validate=validate, error=\"File not found\"\n )\n\n body_contents: \"list[AnyContainer]\" = [\n Label(\"Enter file name:\"),\n filepath,\n ]\n if error:\n body_contents.append(to_container(Label(error, style=\"red\")))\n self.dialog(\n title=\"Select file\",\n body=HSplit(body_contents),\n buttons={\n \"OK\": _open_cb,\n \"Cancel\": None,\n },\n to_focus=filepath,\n )\n\n def help_keys(self) -> None:\n \"\"\"Displays details of registered key-bindings in a dialog.\"\"\"\n key_details = KeyBindingsInfo.to_formatted_text()\n max_line_width = max(\n [len(line) for line in fragment_list_to_text(key_details).split(\"\\n\")]\n )\n body = FormattedTextArea(\n formatted_text=key_details,\n multiline=True,\n focusable=True,\n wrap_lines=False,\n width=D(preferred=max_line_width + 2),\n scrollbar=True,\n )\n\n self.dialog(\n title=\"Keyboard Shortcuts\",\n body=body,\n buttons={\"OK\": None},\n )\n\n def help_logs(self) -> None:\n \"\"\"Displays a dialog with logs.\"\"\"\n log_memory.seek(0)\n log_data = to_formatted_text(ANSI(log_memory.read()))\n\n body = FormattedTextArea(\n formatted_text=log_data,\n multiline=True,\n focusable=True,\n wrap_lines=False,\n width=D(preferred=120),\n scrollbar=True,\n )\n self.dialog(\n title=\"Logs\",\n body=body,\n buttons={\"OK\": None},\n )\n\n def help_about(self) -> None:\n \"\"\"Displays an about dialog.\"\"\"\n self.dialog(\n title=\"About\",\n body=Window(\n FormattedTextControl(\n [\n (\"class:logo\", __logo__),\n (\"\", \" \"),\n (\"bold\", __app_name__),\n (\"\", f\"Version {__version__}\\n\\n\".rjust(27, \" \")),\n (\"\", __strapline__),\n (\"\", \"\\n\"),\n (\"class:hr\", \"─\" * 34 + \"\\n\\n\"),\n (\"\", __copyright__),\n ]\n ),\n dont_extend_height=True,\n ),\n buttons={\"OK\": None},\n )\n\n def _handle_exception(\n self, loop: \"AbstractEventLoop\", context: \"dict[str, Any]\"\n ) -> None:\n from prompt_toolkit.formatted_text import to_formatted_text\n from rich.traceback import Traceback\n\n from euporie.render import RichRenderer\n\n # Log observed exceptions to the log\n log.error(\"An unhandled exception occured\", exc_info=sys.exc_info())\n\n # Also display a dialog to the user\n formatted_tb = to_formatted_text(\n ANSI(RichRenderer().render(Traceback(), width=80, height=999))\n )\n tb_control = FormattedTextArea(\n formatted_text=formatted_tb,\n multiline=True,\n focusable=True,\n wrap_lines=False,\n width=D(preferred=81),\n height=D(max=15),\n scrollbar=True,\n )\n self.dialog(\n title=\"Error\",\n body=HSplit(\n [\n Label(\"An error occured:\\n\", style=\"bold\"),\n tb_control,\n ]\n ),\n buttons={\"OK\": None},\n )\n","sub_path":"euporie/app/dialog.py","file_name":"dialog.py","file_ext":"py","file_size_in_byte":7222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"624587859","text":"#!/bin/env python3\n# -*- coding: utf-8 -*-\n# TOMUSS: The Online Multi User Simple Spreadsheet\n# Copyright (C) 2015 Thierry EXCOFFIER, Universite Claude Bernard\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n#\n# Contact: Thierry.EXCOFFIER@univ-lyon1.fr\n\nfrom ..column import TableAttr\n\nclass TableRounding(TableAttr):\n name = 'rounding'\n display_table = 1\n default_value = 0 # Compatible with old TOMUSS version\n gui_display = \"GUI_select\"\n formatter = \"\"\"\nfunction(value)\n{\n var e = document.getElementById('t_table_attr_rounding') ;\n if ( e )\n if ( value == 2 )\n e.style.background = '#F88' ;\n else\n e.style.background = '' ;\n for(var data_col in columns)\n {\n var column = columns[data_col] ;\n column.need_update = true ;\n column_attributes.rounding.check_and_set(column.rounding, column) ;\n }\n return value ;\n}\"\"\"\n css = \"\"\"\n#menutop #t_table_attr_rounding { width: 50% }\n#menutop #t_table_attr_rounding OPTION { background: #DDD }\n\"\"\"\n\n def encode(self, value):\n return int(value)\n def check(self, value):\n try:\n value = int(value)\n except ValueError:\n return self.check_error(value)\n if 0 <= value <= 2:\n return\n return self.check_error(value)\n","sub_path":"ATTRIBUTES/tablerounding.py","file_name":"tablerounding.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"99506916","text":"import pygame\nfrom PIL import Image\n\nclass Ship():\n\n def __init__(self, screen):\n \"\"\"Initialize ship and set starting position.\"\"\"\n self.screen = screen\n\n # Load ship image and get its rect.\n self.image = pygame.image.load('images/ship.bmp')\n self.rect = self.image.get_rect()\n self.screen_rect = self.screen.get_rect()\n\n #Start each new ship at the bottom center of screen.\n self.rect.centerx = self.screen_rect.centerx\n self.rect.bottom = self.screen_rect.bottom\n\n def blitme(self):\n \"\"\"Draw the ship and its current location.\"\"\"\n self.screen.blit(self.image, self.rect)\n","sub_path":"ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"435932306","text":"from the_tyranny_of_the_rocket_equation import TyrannyOfTheRocket\n\nif __name__ == '__main__':\n filename = \"input.txt\"\n inputData = open(filename, \"r\")\n data = inputData.readlines()\n\n totr = TyrannyOfTheRocket()\n fuelCost = totr.recursiveFuelCost(data) \n print(\"Fuel requiments = %d\" % fuelCost)\n","sub_path":"1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"130048868","text":"# literate for head to tail\n# use dic to save the duplicate char's index \n# 初始化起始位置为0, 最长不重复长度为0。\n# 遍历字符串,使用字典存储每个字符的index,如果遇见出现过的字符,并且字符出现的index 比此时的起始位置更靠后,应该更新起始位置为index+1。\n\nclass Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n res = 0\n dic = {}\n start = 0\n for i,ch in enumerate(s):\n if ch in dic and dic[ch] >= start:\n start = dic[ch] + 1\n else:\n res = max(res,i-start+1)\n dic[ch] = i\n return res\n \n \n \n \n \n \n \n \n \n \n \n","sub_path":"algorithms/3. Longest Substring Without Repeating Characters.py","file_name":"3. Longest Substring Without Repeating Characters.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"451425211","text":"from pynput import keyboard\r\nimport time\r\nimport json\r\nimport os\r\n\r\nlast_time = time.time()\r\nhasReleased = True\r\nmorse_letter = ''\r\nmorse_message = ''\r\n\r\ndef on_press(key):\r\n global last_time\r\n global hasReleased\r\n global morse_message\r\n global morse_letter\r\n\r\n if key == keyboard.Key.enter:\r\n letter = morse_dictionary.get(morse_letter[1:])\r\n morse_letter = ''\r\n if letter is None:\r\n morse_message += ''\r\n else:\r\n morse_message += letter\r\n return True\r\n\r\n if key == keyboard.Key.tab:\r\n morse_message += ' '\r\n morse_letter = ''\r\n \r\n if hasReleased:\r\n last_time = time.time()\r\n hasReleased = False\r\n \r\n if key == keyboard.Key.esc:\r\n print()\r\n print('MESSAGE:')\r\n print(morse_message)\r\n time.sleep(2)\r\n os.system('cls')\r\n return False\r\n\r\ndef on_release(key):\r\n global last_time\r\n global hasReleased\r\n global morse_letter\r\n\r\n hasReleased = True\r\n if time.time() - last_time < 0.2:\r\n character = '.'\r\n else:\r\n character = '-'\r\n\r\n morse_letter += character\r\n\r\nwith open('morse.json') as f:\r\n morse_dictionary = json.load(f)\r\n\r\nos.system('cls')\r\nprint('''A .- B -...\r\nC -.-. D -..\r\nE . F ..-.\r\nG --. H ....\r\nI .. J .---\r\nK -.- L .-..\r\nM -- N -.\r\nO --- P .--.\r\nQ --.- R .-.\r\nS ... T -\r\nU ..- V ...-\r\nW .-- X -..-\r\nY -.-- Z --..\r\n0 ----- 1 .----\r\n2 ..--- 3 ...--\r\n4 ....- 5 .....\r\n6 -.... 7 --...\r\n8 ---.. 9 ----.''')\r\nprint(\"Listening.\")\r\nprint(\"Begin by pressing Enter.\")\r\nprint(\"Seperate letters using the Enter key.\")\r\nprint(\"Seperate words using the Tab key\")\r\nprint(\"When you are finished press Escape\")\r\n\r\nwith keyboard.Listener(on_press=on_press, on_release=on_release) as listener:\r\n listener.join()\r\n","sub_path":"pymorse.py","file_name":"pymorse.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"302718740","text":"from model.project import Project\n\n\nclass ProjectHelper:\n\n def __init__(self, app):\n self.app = app\n\n def open_home_page(self):\n wd = self.app.wd\n if not (wd.current_url.endswith(\"/my_view_page.php\")):\n wd.find_element_by_link_text(\"My view\").click()\n\n def open_projects_to_manage(self):\n wd = self.app.wd\n wd.find_element_by_link_text(\"Manage\").click()\n wd.find_element_by_link_text(\"Manage Projects\").click()\n\n def filling_form(self, project):\n wd = self.app.wd\n self.change_field_value(\"name\", project.name)\n\n def change_field_value(self, field_name, text):\n wd = self.app.wd\n if text is not None:\n wd.find_element_by_name(field_name).click()\n wd.find_element_by_name(field_name).clear()\n wd.find_element_by_name(field_name).send_keys(text)\n\n def creation(self, project):\n wd = self.app.wd\n self.open_projects_to_manage()\n wd.find_element_by_css_selector(\"input[value='Create New Project']\").click()\n self.filling_form(project)\n wd.find_element_by_css_selector(\"input[value='Add Project']\").click()\n wd.find_element_by_link_text(\"Proceed\").click()\n\n project_cache = None\n\n def get_project_list(self):\n if self.project_cache is None:\n wd = self.app.wd\n self.open_projects_to_manage()\n self.project_cache = []\n for row in wd.find_elements_by_css_selector(\"tr.row-1, tr.row-2\"):\n cells = row.find_elements_by_css_selector(\"td\")\n text_name = cells[0].text\n self.project_cache.append(Project(name=text_name))\n return list(self.project_cache)\n\n def delete_first_project(self):\n wd = self.app.wd\n self.open_projects_to_manage()\n wd.find_element_by_css_selector(\"tr.row-1 > td > a\").click()\n # submit delete\n wd.find_element_by_css_selector(\"input[value='Delete Project']\").click()\n wd.find_element_by_css_selector(\"input[value='Delete Project']\").click()\n self.project_cache = None","sub_path":"fixture/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"365505103","text":"from bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport time\nimport os\nimport re\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"web_novel_analysis.settings\")\nimport django\ndjango.setup()\nfrom joara.models import TodayBest\n\nnow = time.localtime()\n\nhangul = re.compile('[^ ㄱ-ㅣ가-힣]+') \n\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('headless')\ndriver = webdriver.Chrome('D:/chromedriver.exe', options=chrome_options)\n\ndriver.get('http://www.joara.com/best/today_best_list.html?page_no=1&sl_subcategory=all')\nhtml = driver.page_source\nsoup = BeautifulSoup(html, 'html.parser')\n\nbook_list = soup.find_all('td', class_='book_data_intro_form subject_long')\n\ndate = str(now.tm_year)+str(now.tm_mon).zfill(2)+str(now.tm_mday).zfill(2)\n\nfor book in book_list:\n try:\n genre = book.strong.text\n title = book.a.text.replace(genre, '').strip()\n intro = book.span.text\n except:\n genre = 'failed'\n title = ''\n intro = book\n TodayBest(date=date, genre=genre, title=title, intro=intro).save()\ndriver.close()","sub_path":"joara_todaybest.py","file_name":"joara_todaybest.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"131113519","text":"# Libraries that we need to import.\nimport mysql.connector\nfrom mysql.connector import errorcode\nfrom setup import cursor\n\n# Name of the Database\nDB_NAME = 'TRIBUCODE'\n\n# Init the tables empty\nTABLES = {}\n\n#Creation of the Tables with his features\nTABLES['Followers'] = (\n \"CREATE TABLE `Followers`(\" \n \" `id` int(15) NOT NULL AUTO_INCREMENT,\"\n \"`user` varchar(250) NOT NULL, \"\n \" `last_name` varchar(250) NOT NULL, \"\n \" `date` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,\" \n \" PRIMARY KEY (`id`)\"\n \") ENGINE=InnoDB\"\n)\n\n# Function which create the database schema\ndef create_database():\n cursor.execute(\"CREATE DATABASE IF NOT EXISTS {} DEFAULT CHARACTER SET 'utf8'\".format(DB_NAME))\n print(\"Database {} created!\".format(DB_NAME))\n\n# Function which create the table inside the database schema\ndef create_tables():\n cursor.execute(\"USE {}\".format(DB_NAME))\n for table_name in TABLES:\n table_description = TABLES[table_name]\n try:\n print(\"Creating table ({}) \".format(table_name), end=\"\")\n cursor.execute(table_description)\n except mysql.connector.Error as error:\n if error.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print(\"Already Exists\")\n else:\n print(error.msg)\n \n# Calling the functions to execute them\ncreate_database()\ncreate_tables()\n\n\n\n#Thanks for downloading. You can find all my videos on my channel of Youtube Tribucode\n# TRIBUCODE #","sub_path":"creation.py","file_name":"creation.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"172673874","text":"import os.path\nimport sys\n\nimport pytest\n\ncollect_ignore = [os.path.join('appmap', 'test', 'data')]\npytest_plugins = ['pytester']\n\n\n@pytest.fixture\ndef data_dir(pytestconfig):\n dir = str(os.path.join(str(pytestconfig.rootpath),\n 'appmap', 'test', 'data'))\n added = False\n if dir not in sys.path:\n sys.path.append(dir)\n added = True\n\n yield dir\n\n if added:\n sys.path.remove(dir)\n\n\n@pytest.fixture\ndef appmap_enabled(monkeypatch):\n monkeypatch.setenv(\"APPMAP\", \"true\")\n monkeypatch.setenv(\"APPMAP_LOG_LEVEL\", \"debug\")\n\n yield monkeypatch\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"155087833","text":"# to test that the code is working\n# load basictest_arduinocode.ino onto arduino and plug into pc with usb\n\nimport serial\nimport time\nimport threading\n\n\ndef checkforresponse(serialconnection):\n while True:\n data = serialconnection.read()\n print(data)\n\n\nif __name__ == '__main__':\n print('connecting to arduino...')\n\n arduinoserial = serial.Serial('com3', 9600)\n\n print('connected !')\n\n # wait for arduino to respond\n time.sleep(1)\n datacollectorthread = threading.Thread(name='datacollector', target=checkforresponse, args=(arduinoserial,))\n datacollectorthread.start()\n\n while True:\n inp = input('LED to light up:').strip()\n if inp.isdigit():\n # send led index to arduino which will then display it as a different colour each time\n arduinoserial.write(bytes([int(inp)]))\n","sub_path":"basic test.py","file_name":"basic test.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"253415646","text":"import socket\r\nimport random\r\n\r\n#public_key = [3, 17] # Testing\r\n\r\npublic_key = [53, 524287]\r\nlength = 6\r\nmethod = None\r\n\r\nglobal_host = 'localhost'\r\nglobal_port = 5555\r\n\r\n\r\ndef mix(a, gen):\r\n out = gen**a % public_key[1]\r\n return out\r\n\r\ndef gen_private(length):\r\n start = '1'\r\n end = ''\r\n for i in range(length):\r\n end += '9'\r\n for i in range(length - 1):\r\n start += '0'\r\n\r\n start = int(start)\r\n end = int(end)\r\n \r\n return random.randint(start, end)\r\n\r\n\r\n#Server/Client\r\n\r\ndef server():\r\n print('Starting Socket...')\r\n\r\n host = ''\r\n port = global_port\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n try:\r\n s.bind((host, port))\r\n except socket.error as e:\r\n print(str(e))\r\n\r\n print('Running on', host + ':' + str(port))\r\n\r\n s.listen(5)\r\n conn, addr = s.accept()\r\n\r\n print('Connected to', str(addr[0]) + ':' + str(addr[1]))\r\n\r\n return conn\r\n\r\ndef client():\r\n host = global_host\r\n port = global_port\r\n\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n try:\r\n s.connect((host, port))\r\n print('Connected to Server')\r\n return s\r\n except:\r\n print('Connection Failed')\r\n return 'ERROR'\r\n\r\ndef get_socket():\r\n global method\r\n print('S => Server\\nC => Client')\r\n method_input = input('Method > ').upper()\r\n print()\r\n\r\n if method_input == 'S':\r\n s = server()\r\n method = 's'\r\n return s\r\n elif method_input == 'C':\r\n s = client()\r\n method = 'c'\r\n return s\r\n else:\r\n print('Invalid Input')\r\n return get_socket()\r\n\r\ndef server_action(s, private_key):\r\n data = s.recv(2048)\r\n data = data.decode('utf-8')\r\n reply = str(mix(private_key, public_key[0]))\r\n s.sendall(str.encode(reply))\r\n s.close()\r\n return data\r\n\r\ndef client_action(s, private_key):\r\n s.send(str(mix(private_key, public_key[0])).encode())\r\n data = s.recv(2048)\r\n reply = data.decode('utf-8')\r\n return reply\r\n\r\ndef set_host_port():\r\n print('Setting Host and Port')\r\n print('If you are going to run as a server,\\nPort is the only value that matters.')\r\n host = input('Host > ')\r\n port = int(input('Port > '))\r\n return (host, port)\r\n\r\n#Start Up\r\n\r\ndef init(defaults=False):\r\n global global_host, global_port\r\n \r\n if not defaults:\r\n global_host, global_port = set_host_port()\r\n print()\r\n \r\n s = get_socket()\r\n print()\r\n\r\n if method == 'c':\r\n input('Ready to Calculate Key ')\r\n\r\n #Messages\r\n\r\n private = gen_private(length)\r\n\r\n if method == 's':\r\n net_mix = int(server_action(s, private))\r\n elif method == 'c':\r\n net_mix = int(client_action(s, private))\r\n\r\n key = mix(private, net_mix)\r\n return key\r\n\r\n#INIT\r\n\r\ndef main():\r\n print('~~~Public Key~~~')\r\n print()\r\n\r\n key = init()\r\n print('Secure Key => ' + str(key))\r\n input()\r\n","sub_path":"public_key.py","file_name":"public_key.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"258898730","text":"import os\nimport json\nfrom typing import Dict\nfrom collections import Counter\nimport random\n# import argparse\nfrom config import *\nfrom exprsynth.data_loader import data_loader\n\n\nwith open(vocab_path, 'r') as f:\n vocab = json.load(f)\n\n\ndef collect_target_seq(root, productions):\n seq = []\n if str(root) not in productions:\n seq.append(root)\n return seq\n for subroot in productions[str(root)]:\n seq += collect_target_seq(subroot, productions)\n return seq\n\n\ndef preprocess_graph(raw_graph: Dict):\n # print(raw_graph.keys())\n # if 'ContextGraph' not in raw_graph:\n # print(raw_graph)\n context_graph = raw_graph['ContextGraph']\n cg_node_labels = context_graph['NodeLabels']\n tg_node_labels = raw_graph['SymbolLabels']\n root = min([int(subroot) for subroot in raw_graph['Productions']])\n target_seq = collect_target_seq(root, raw_graph['Productions'])\n\n # number of cg nodes\n num_cg_nodes = len(cg_node_labels)\n num_tg_nodes = len(tg_node_labels)\n # get target token ids\n tg_token_ids = [vocab['tg_label2idx']\n [tg_node_labels[str(i)]] for i in target_seq]\n assert ''.join(\n [tg_node_labels[str(i)] for i in target_seq]) == raw_graph['OriginalExpression'].replace(' ', '')\n assert ''.join([vocab['tg_idx2label'][id]\n for id in tg_token_ids]) == raw_graph['OriginalExpression'].replace(' ', ''), '{} != {}'.format(''.join([vocab['tg_idx2label'][id] for id in tg_token_ids]), raw_graph['OriginalExpression'])\n\n # get cg token ids\n cg_token_ids = [-1] * num_cg_nodes\n for idx in cg_node_labels:\n cg_token_ids[int(idx)] = vocab['cg_label2idx'][cg_node_labels[idx]]\n assert -1 not in cg_token_ids, 'bad id in cg_token_ids'\n\n edges = {vocab['edge_label2idx'][edge_type]: context_graph['Edges']\n [edge_type] for edge_type in context_graph['Edges']}\n\n # print(cg_token_ids[raw_graph['HoleNode']])\n graph = {'num_cg_nodes': num_cg_nodes,\n 'num_tg_nodes': num_tg_nodes,\n 'cg_token_ids': cg_token_ids,\n 'tg_token_ids': tg_token_ids,\n 'edges': edges,\n 'hole_node_index': raw_graph['HoleNode']}\n return graph\n\n\ndef load_graphs(graph_path):\n print('[Info] loading graphs from {}'.format(graph_path))\n graphs = []\n with open(graph_path, 'r') as f:\n for l in f:\n # if l.strip() == '':\n # continue\n graphs.append(json.loads(l))\n # print(graphs[-1].keys())\n return graphs\n\n\nif __name__ == '__main__':\n\n graphs = load_graphs(graph_path)\n\n # vocab = build_vocab(graphs)\n # assert False\n graphs = [preprocess_graph(g) for g in graphs]\n # data_batches = data_loader(graphs, batch_size)\n # graph = random.choice(graphs)\n # graph = preprocess_graph(graph)\n\n # for i, (cg_token_ids_batch, tg_token_ids_batch, adjacency_matrix_batch) in enumerate(data_batches):\n # print(i)\n # print(cg_token_ids_batch)\n # print(tg_token_ids_batch)\n # print(adjacency_matrix_batch)\n","sub_path":"Models/exprsynth/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"382119274","text":"# 29. Write a program to enter a character from a-z or A-Z and\n# convert it into lowercase and vice-versa.\n\nimport re\n\nc = input(\"Enter character.\")\n\nif re.findall('[a-z]', c):\n up = c.upper()\n print(up)\nelif re.findall('[A-Z]', c):\n lw = c.lower()\n print(lw)\n\n\n\n","sub_path":"py_practice/Problem29.py","file_name":"Problem29.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"186760066","text":"# Note: this script is executed with the proxy role Manager, because this script needs\n# to use checkbook_module.\n\n\ntransaction = state_change['object']\nline = transaction.get('movement')\nif line is not None and line.getPortalType() == 'Banking Operation Line':\n # This is a single currency operation, so it is not necessary to convert the price.\n line.setSourceCredit(transaction.getSourceTotalAssetPrice())\n\n#transaction.edit(aggregate = check.getRelativeUrl())\n","sub_path":"bt5/erp5_banking_cash/WorkflowTemplateItem/portal_workflow/internal_money_deposit_workflow/scripts/updateBankingOperation.py","file_name":"updateBankingOperation.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"456607112","text":"class Solution:\n def finalValueAfterOperations(self, operations: List[str]) -> int:\n x = 0\n \n for op in operations:\n if op == \"++X\":\n x += 1\n \n elif op == \"X++\":\n x += 1\n \n elif op == \"X--\":\n x -= 1\n \n elif op == \"--X\":\n x -= 1\n \n return x\n\n# completed 2022-11-28 (YYYY-MM-DD)\n# Runtime: 136 ms, faster than 6.39% of Python3 online submissions for Final Value of Variable After Performing Operations.\n# Memory Usage: 13.8 MB, less than 56.77% of Python3 online submissions for Final Value of Variable After Performing Operations.\n# notes: could probably have used a hashmap or avoided string comparisons for better speed\n","sub_path":"completed/leetcode/bad/2011-final-value-of-variable-after-performing-operations.py","file_name":"2011-final-value-of-variable-after-performing-operations.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"255478794","text":"from subprocess import Popen, PIPE, call\nimport re\n\n# read response.txt(format: \"word\\tresponse\") into response\nwith open(\"./response.txt\") as f:\n response = dict(s.strip().split(\"\\t\") for s in f.readlines())\n\np = Popen([\"sh\", \"ex1.sh\"], stdout=PIPE, stderr=PIPE)\nwhile True:\n input = p.stdout.readline().decode()\n output = \"\"\n if re.match(\"sentence1:(.*)\", input):\n input = input[input.index(\":\")+1:].strip()\n words = input.split()\n for a_word in words:\n if a_word in response:\n output = response[a_word]\n if output:\n print(\"speak: {}\".format(output))\n call([\"say\", output])\n else:\n print(\"result: {}\".format(input))\np.kill()\n","sub_path":"ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"158605824","text":"# -*- coding: utf-8 -*-\nfrom decimal import Decimal\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom basky.forms import BasketForm\nfrom basky.exceptions import BasketFormException\nfrom basky.models import TestSimpleProduct\nfrom basky import settings as basky_settings\n\n\nclass BasketFormTestCases(TestCase):\n\n def setUp(self):\n # create a simple product\n item = TestSimpleProduct()\n item.price = Decimal(\"9.99\")\n item.title = 'Another Product'\n item.save()\n self.item = item\n # get the ctype\n self.ctype = ContentType.objects.get(\n app_label=self.item._meta.app_label,\n model=self.item._meta.object_name.lower())\n # get the basket\n self.client.get('/')\n self.basket = self.client.session['basket']\n\n def test_post_or_item_required(self):\n \"\"\"With a basket form args[0] or kwargs['item'] is required and if\n neither are present then a \"\"\"\n self.assertRaises(BasketFormException, BasketForm)\n\n def test_basketform_item(self):\n \"\"\"Basket form behaves correctly and only requires item or post\"\"\"\n BasketForm(instance=self.item)\n\n def test_basketform_post(self):\n \"\"\"Basket form behaves correctly and only requires item or post\"\"\"\n fake_post = {\n 'contenttype_pk': self.ctype,\n 'object_id': self.item.pk,\n }\n BasketForm(fake_post)\n","sub_path":"basky/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"39501971","text":"import os\nimport tarfile\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom pandas.plotting import scatter_matrix\nhousing=pd.read_csv('https://raw.githubusercontent.com/ageron/handson-ml2/master/datasets/housing/housing.csv')\nhousing['income_cat']=pd.cut(housing['median_income'],\nbins=[0.,1.5,3.0,4.5,6.,np.inf],labels=[1,2,3,4,5])\n\n\n#use sklearn to do Stratified Sampling\nfrom sklearn.model_selection import StratifiedShuffleSplit\nsplit= StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)\nfor train_index,test_index in split.split(housing,housing['income_cat']):\n strat_train_set=housing.loc[train_index]\n strat_test_set=housing.loc[test_index]\n\n\nhousing= strat_train_set.drop('median_house_value',axis=1)\nhousing_labels = strat_train_set['median_house_value'].copy()\n\n#DataCleaning\nfrom sklearn.impute import SimpleImputer\nimputer = SimpleImputer(strategy='median')\n\n#remove all the attributes that have object category as median is number\nhousing_num=housing.drop('ocean_proximity',axis=1)\nhousing_cat=housing[['ocean_proximity']]\nimputer.fit(housing_num)\n\nhousing_tr=pd.DataFrame(imputer.transform(housing_num),columns=housing_num.columns)\n\n#Handling Text and Categorical Attributes\nfrom sklearn.preprocessing import OrdinalEncoder\nordinal_encoder= OrdinalEncoder()\nhousing_cat_encoded= ordinal_encoder.fit_transform(housing_cat)\n#print(housing_cat_encoded[0:10],ordinal_encoder.categories_)\n\n#Custom Transformer\nfrom sklearn.base import BaseEstimator,TransformerMixin\nrooms_ix,bedrooms_ix,population_ix,households_ix=3,4,5,6\n\nclass CombinedAttributesAdder(BaseEstimator,TransformerMixin):\n def __init__(self,add_bedrooms_per_room=True):\n self.add_bedrooms_per_room=add_bedrooms_per_room\n def fit(self,X,y=None):\n return self\n def transform(self,X,y=None):\n rooms_per_household = X[:,rooms_ix]/X[:,households_ix]\n population_per_household= X[:,population_ix]/X[:,households_ix]\n if self.add_bedrooms_per_room:\n bedrooms_per_room=X[:,bedrooms_ix]/X[:,rooms_ix]\n return np.c_[X,rooms_per_household,population_per_household,bedrooms_per_room]\n else:\n return np.c_[X,rooms_per_household,population_per_household]\n\nattr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)\nhousing_extra_attribs =attr_adder.transform(housing.values)\n\n#Transformation Pipelines\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\n\nnum_pipeline = Pipeline([('imputer',SimpleImputer(strategy=\"median\")),('attribs_adder',\nCombinedAttributesAdder()),('std_scalar',StandardScaler())])\n\nhousing_num_tr = num_pipeline.fit_transform(housing_num)\n\nfrom sklearn.compose import ColumnTransformer\nnum_attribs = list(housing_num)\ncat_attribs = [\"ocean_proximity\"]\n\nfull_pipeline = ColumnTransformer\n","sub_path":"Housing-Price-Prediction/housing_project_v2.py","file_name":"housing_project_v2.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"54390378","text":"from flask import make_response\n\nfrom app import create_app\n\napp = create_app()\n\n__author__ = 'linxinzhe'\n\n\n@app.route('/hello/') # 唯一url原则,带/可以兼容不带/情况\ndef hello():\n headers = {\n 'content-type': 'text/plain'\n }\n response = make_response('', 404)\n response.headers = headers\n return response\n\n\n# app.add_url_rule('/hello/', view_func=hello) # 另一个注册路由方式\n\nif __name__ == '__main__': # 生产环境是放在nginx+uwsgi里的,使用这行保证不会启动自带的服务器\n app.run(host='0.0.0.0', debug=app.config['DEBUG'], port=81)\n","sub_path":"fisher.py","file_name":"fisher.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"104885385","text":"from data_loader.data_loaders import WaterMeterDataLoader, WaterMeterDataset\nimport os\nfrom PIL import Image\nimport glob\nimport numpy as np\nfrom multiprocessing import Pool\nimport matplotlib.pyplot as plt\nimport ntpath\nimport pandas as pd\nfrom absl.flags import FLAGS\nfrom absl import app, flags\nfrom torchvision.datasets import ImageFolder\nfrom model.model import WaterMeterModel\nimport torch\nimport tqdm\nfrom model.decode import feature_to_y\n\nflags.DEFINE_string('input_folder', '/home/mde/python/Water-Meter/example-images/', 'path to folder containing jpgs for prediction')\nflags.DEFINE_string('model_weights', '/home/mde/python/Water-Meter/saved/models/WMN_FCSRN/final_model-77acc.pth', 'path to model weightsfile')\nflags.DEFINE_string('output_csv', './predictions/predictions.csv', 'csv with predictions')\nflags.DEFINE_string('output_folder', './predictions', 'folder with visualized predictions, if None')\nflags.DEFINE_string('default_image_folder', '../pytorch-rotation-decoupled-detector/images/fota_zip_test',\n 'folder with default images')\nflags.DEFINE_integer('batch_size', 128, 'batch size used for prediction')\nflags.DEFINE_integer('num_workers', 4, 'number of cores to use')\nflags.DEFINE_integer('n_gpu', 2, 'number of gpus to use')\nflags.DEFINE_integer('unknown_class', 11, 'class id for empty or not unrecognizable digit.(WaterMeterDataset.unknown_class)')\n\n\n\ndef load_model(checkpoint_path):\n print('loading model...')\n\n model = WaterMeterModel()\n checkpoint = torch.load(checkpoint_path)\n if FLAGS.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n state_dict = checkpoint['state_dict']\n model.load_state_dict(state_dict)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = model.to(device)\n model.eval()\n return model, device\n\n\ndef replace_unknow_class(Y):\n Y = [ ''.join([str(y) if y != FLAGS.unknown_class else '-' for y in yarr]) for yarr in Y]\n return Y\n\n\ndef plot_predictions(img_and_predictions_and_img_name):\n img_path, img_output_path, predictions, color = img_and_predictions_and_img_name\n\n img = Image.open(img_path)\n width, height = img.size\n if width > 200:\n img = img.resize((int(width*0.3), int(height*0.3)))\n plt.imshow(img)\n title_obj = plt.title(predictions)\n plt.setp(title_obj, color=color)\n plt.savefig(img_output_path)\n\n\ndef predict(data_loader, model, device, output_csv):\n\n print('predicting...')\n\n predictions = dict()\n with torch.no_grad():\n for i, (imgs, img_names) in enumerate(tqdm.tqdm(data_loader)):\n imgs = imgs.to(device)\n\n model_outputs = model(imgs)\n predicted_classes, batch_probs = feature_to_y(model_outputs, return_probs=True)\n final_digits = replace_unknow_class(predicted_classes)\n\n default_image_names = []\n for name in img_names:\n extension = name.split('.')[-1]\n def_name = ''.join(name.split('__')[:-1]) + f'.{extension}'\n default_image_names.append(def_name)\n\n for img_name, default_img_name, digits, probs in zip(img_names, default_image_names, final_digits, batch_probs):\n if default_img_name not in predictions:\n predictions[default_img_name] = []\n predictions[default_img_name].append((img_name, digits, probs))\n\n return predictions\n\n\ndef postprocess_predictions(prediction_dict, output_csv):\n\n def get_single_prediction_for_img(prediction_dict):\n\n prediction_dict_selected_index = dict()\n\n for default_img_name in prediction_dict:\n img_predictions = prediction_dict[default_img_name]\n known_digit_count = []\n for idx, (img_name, digits, probs) in enumerate(img_predictions):\n # print(idx, img_name, digits, np.round(probs, 2), np.median(probs))\n count = len(digits) - digits.count('-')\n known_digit_count.append(count)\n\n max_known_digit_count_idx = np.argmax(known_digit_count)\n max_known_digit_count = known_digit_count[max_known_digit_count_idx]\n\n if known_digit_count.count(max_known_digit_count) > 1:\n max_mean_probs = -np.inf\n max_mean_probs_idx = None\n for idx, (img_name, _, probs) in enumerate(img_predictions):\n mean_probs = np.mean(probs)\n if mean_probs > max_mean_probs:\n max_mean_probs = mean_probs\n max_mean_probs_idx = idx\n final_idx = max_mean_probs_idx\n else:\n final_idx = max_known_digit_count_idx\n prediction_dict_selected_index[default_img_name] = final_idx\n\n return prediction_dict_selected_index\n\n prediction_dict_selected_index = get_single_prediction_for_img(prediction_dict)\n\n df_final_predictions = pd.DataFrame(columns=['img_name', 'prediction'])\n\n if FLAGS.output_folder is not None and FLAGS.default_image_folder:\n os.makedirs(os.path.join(FLAGS.output_folder, 'imgs'), exist_ok=True)\n os.makedirs(os.path.join(FLAGS.output_folder, 'bbs'), exist_ok=True)\n df_images_to_export = pd.DataFrame(columns=[\n 'input_img_path',\n 'output_img_path',\n 'label',\n 'color'\n ])\n df_export_idx = 0\n\n for i, img_name in enumerate(tqdm.tqdm(prediction_dict)):\n\n selected_img_idx = prediction_dict_selected_index[img_name]\n\n final_img_prediction = prediction_dict[img_name][selected_img_idx]\n _, final_digits, _ = final_img_prediction\n\n df_final_predictions.loc[i] =[img_name, final_digits]\n\n \n if FLAGS.output_folder is not None:\n #final prediction visualized on default image\n df_images_to_export.loc[df_export_idx] = [\n os.path.join(FLAGS.default_image_folder, img_name),\n os.path.join(FLAGS.output_folder,'imgs', img_name),\n final_digits,\n 'green'\n ]\n df_export_idx += 1\n \n #predictions visualized on every cropped subimg of default image\n for idx, (croped_img_name, cropped_img_prediction, _) in enumerate(prediction_dict[img_name]):\n \n df_images_to_export.loc[df_export_idx] = [\n os.path.join(FLAGS.input_folder, croped_img_name),\n os.path.join(FLAGS.output_folder,'bbs', croped_img_name),\n cropped_img_prediction,\n 'green' if idx == selected_img_idx else 'red'\n ]\n df_export_idx += 1\n \n\n if FLAGS.output_folder is not None and FLAGS.default_image_folder:\n print(f\"exporting imgs with predictions to {FLAGS.output_folder}\")\n with Pool(processes=FLAGS.num_workers) as pool:\n pool.map(plot_predictions, df_images_to_export.values.tolist())\n\n print(f'predictions written to {output_csv}')\n\n df_final_predictions.to_csv(output_csv, index=False, sep=';')\n\nimport pickle\ndef main(_args):\n\n data_loader = WaterMeterDataLoader(\n data_dir=FLAGS.input_folder,\n batch_size=FLAGS.batch_size,\n shuffle=False,\n num_workers=FLAGS.num_workers,\n validation_split=0,\n mode='predict'\n )\n \n model, device = load_model(FLAGS.model_weights)\n \n prediction_dict = predict(data_loader, model, device, FLAGS.output_csv)\n\n postprocess_predictions(prediction_dict, FLAGS.output_csv)\n\nif __name__ == '__main__':\n try:\n app.run(main)\n except SystemExit:\n pass\n","sub_path":"predict_folder.py","file_name":"predict_folder.py","file_ext":"py","file_size_in_byte":7733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"256682894","text":"# -- coding: gbk --\n#爬取热搜消息\n\n#导入谷歌,谷歌配置,相当于模拟人自动操控浏览器\nimport time\nimport pymysql\nimport traceback #追踪异常\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\n\n\ndef get_baidu_hot():\n \"\"\"\n :return:返回百度疫情热搜\n \"\"\"\n #爬取的网站\n url = \"https://voice.baidu.com/act/virussearch/virussearch?from=osari_map&tab=0&infomore=1\" \n options = Options()\n options.add_argument('--headless')\n options.add_argument('--no-sandbox')\n options.add_argument('--disable-dev-shm-usage')\n driver = webdriver.Chrome(executable_path=\"/usr/bin/chromedriver\",\n chrome_options=options) \n\n driver.get(url)\n #print(browser.page_source)\n\n\n dl = driver.find_element_by_css_selector('#ptab-0 > div > div.VirusHot_1-5-4_32AY4F.VirusHot_1-5-4_2RnRvg > section > div')\n #点击展开,显示剩余的热搜信息\n dl.click()\n time.sleep(1) #等待一秒\n\n #通过Xpath路径访问想要的东西,ctrl+f:查看对应的路径消息\n c = driver.find_elements_by_xpath('//*[@id=\"ptab-0\"]/div/div[2]/section/a/div/span[2]')\n context = [i.text for i in c] #获取标签中的内容\n # print(context)查看输出的内容\n return context\n\n#将获取的数据插入到数据库中\n\n#定义好建立连接和关闭连接\ndef get_conn():\n\n conn = pymysql.connect(host=\"localhost\",user=\"root\",password=\"123456\",db=\"cov\")\n cursor = conn.cursor()\n return conn, cursor\n\n\ndef close_conn(conn, cursor):\n if cursor:\n cursor.close()\n if conn:\n conn.close()\n\ndef update_hotsearch():\n \"\"\"\n 将获取到的疫情热搜数据插入到hotsearch数据库中\n :return: 没有返回值\n \"\"\"\n cursor = None\n conn = None\n \n try:\n context = get_baidu_hot()#调用刚刚写好东西\n print(f\"{time.asctime()}开始更新热搜数据\")\n conn, cursor = get_conn()\n sql1 = \"TRUNCATE table hotsearch;\"#更新之前先将所有数据删除\n cursor.execute(sql1)\n print(f\"{time.asctime()}清空数据完毕\")\n sql = \"insert into hotsearch(dt,content) values(%s,%s)\"\n ts = time.strftime(\"%Y-%m-%d %X\") #将时间处理为当前格式\n for i in context:\n cursor.execute(sql,(ts, i)) #插入数据(时间,内容)\n conn.commit()#提交事务\n print(f\"{time.asctime()}数据更新完毕\")\n except:\n traceback.print_exc()\n finally:\n close_conn(conn,cursor)\n\nupdate_hotsearch()\n\n\n\n \n","sub_path":"python爬虫学习/resougoogle2.py","file_name":"resougoogle2.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"466771436","text":"'''\nCopyright (c) 2019 Modul 9/HiFiBerry\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\nimport logging\nfrom typing import Dict\n\nfrom usagecollector.client import report_usage\n\nfrom ac2.plugins.control.controller import Controller\n\n\nclass Keyboard(Controller):\n\n def __init__(self, params: Dict[str, str]=None):\n super().__init__()\n \n self.name = \"keyboard\"\n\n if params is None or len(params) == 0:\n # Default code table that works with this remote control:\n #\n self.codetable = {\n # volume up\n 115: \"volume_up\",\n # volume down\n 114: \"volume_down\",\n # right\n 106: \"next\",\n # left\n 105: \"previous\",\n # enter\n 28: \"playpause\",\n # up\n 103: \"previous\",\n # down\n 108: \"next\"\n }\n else:\n self.codetable = {}\n for i in params:\n self.codetable[int(params[i])] = i\n\n import keyboard\n try:\n keyboard.on_press(self.keyboard_hook)\n logging.debug(\"keyboard listener started\")\n except:\n logging.error(\"could not start Keyboard listener, \"\n \"no keyboard detected or no permissions\")\n\n def keyboard_hook(self, e):\n import keyboard\n\n if e.event_type == keyboard.KEY_DOWN:\n try:\n command = self.codetable[e.scan_code]\n except:\n logging.error(\"%s unknown\", e.scan_code)\n return\n\n try:\n command_run = False\n if command == \"volume_up\":\n if self.volumecontrol is not None:\n self.volumecontrol.change_volume_percent(5)\n command_run =True\n else:\n logging.info(\"ignoring %s, no volume control\",\n command)\n\n elif command == \"volume_down\":\n if self.volumecontrol is not None:\n self.volumecontrol.change_volume_percent(-5)\n command_run =True\n else:\n logging.info(\"ignoring %s, no volume control\",\n command)\n\n elif command == \"previous\":\n if self.playercontrol is not None:\n self.playercontrol.previous()\n command_run =True\n else:\n logging.info(\"ignoring %s, no playback control\",\n command)\n\n elif command == \"next\":\n if self.playercontrol is not None:\n self.playercontrol.next()\n command_run =True\n else:\n logging.info(\"ignoring %s, no playback control\",\n command)\n\n elif command == \"playpause\":\n if self.playercontrol is not None:\n self.playercontrol.playpause()\n command_run =True\n else:\n logging.info(\"ignoring %s, no playback control\",\n command)\n\n if command_run:\n report_usage(\"audiocontrol_keyboard_key\", 1)\n\n logging.debug(\"processed %s\", command)\n\n except Exception as e:\n logging.warning(\"problem handling %s (%s)\", command, e)\n\n def run(self):\n import keyboard\n keyboard.wait()\n","sub_path":"ac2/plugins/control/keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":4739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"65967916","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# Generation of Raw Gaussian Pulse\ninput = np.arange(-5, 5, 0.1)\n\nmean = 0\nstd_deviation = 1\n\ngaussian_pulse = [0]*20\nfor index, std_deviation in enumerate(np.arange(1,10,0.5)):\n gaussian_pulse[index] = np.exp((-(input - mean)**2)/ 2*std_deviation**2)/(np.sqrt(2*np.pi)*std_deviation)\n\n\n plt.figure(1)\n plt.plot(input, gaussian_pulse[index], label = std_deviation)\n\nplt.grid()\nplt.legend(fontsize = 'small')\nplt.title('Gaussian Pulse')\nplt.show()","sub_path":"Chapter2/gaussianwithmeanandstddeviation.py","file_name":"gaussianwithmeanandstddeviation.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"540816374","text":"import pymysql\nimport pymssql\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\n\n'''\n 把mysql的数据更新进sqlserver中\n'''\n\n\nclass mysql_to_sqlserver:\n def __init__(self):\n mysql_ip = '192.168.2.125'\n mysql_database = 'IndexDB'\n mysql_password = '1'\n mysql_username = 'root'\n self.mysql_db = pymysql.connect(mysql_ip, mysql_username, mysql_password, mysql_database, use_unicode=True,\n charset='utf8')\n self.mysql_curs = self.mysql_db.cursor()\n\n sql_ip = '192.168.2.132'\n sql_database = 'IndexDB'\n sql_password = 'Ztxy123'\n sql_username = 'lyl'\n self.sql_db = pymssql.connect(sql_ip, sql_username, sql_password, sql_database)\n self.sql_curs = self.sql_db.cursor()\n\n def get_mysql_tables(self):\n sql = '''show tables'''\n self.mysql_curs.execute(sql)\n data = self.mysql_curs.fetchall()\n\n tables_list = []\n for i in data:\n tables_list.append(i[0])\n return tables_list\n\n def get_mysql_data(self, table_name):\n my_str = '''select * from %s''' % table_name\n self.mysql_curs.execute(my_str)\n data = self.mysql_curs.fetchall()\n datalist = []\n for i in data:\n datalist.append([i[0], i[1], i[2], i[3]])\n return datalist\n\n def create_table_sqlserver(self, table_name):\n sql = '''if not exists (select * from sysobjects where id=object_id('%s')) CREATE TABLE %s(\n COMMTYPE varchar(50),\n BIZDATE int,\n BIZMIN int,\n VALUE decimal(25,10))''' % (table_name, table_name)\n\n self.sql_curs.execute(sql)\n self.sql_db.commit()\n\n def truncate_sqlserver(self, table_name):\n sql = '''truncate table %s''' % table_name\n self.sql_curs.execute(sql)\n self.sql_db.commit()\n\n def writr_into_sqlserver(self, table_name, data_list):\n\n for i in data_list:\n sql = '''insert into %s values ('%s',%d,%d,%25.10f)''' % (table_name, i[0], i[1], i[2], i[3])\n\n self.sql_curs.execute(sql)\n\n self.sql_db.commit()\n print(table_name, '写入完成')\n\n def __del__(self):\n self.mysql_curs.close()\n self.mysql_db.close()\n self.sql_curs.close()\n self.sql_db.close()\n\n def test(self):\n sql = '''select * from sysobjects where xtype='U';'''\n self.sql_curs.execute(sql)\n data = self.sql_curs.fetchall()\n\n print(data, '\\n')\n\n def get_sqlserver_data(self, table_name):\n sql = '''select * from %s''' % table_name\n dataframe = pd.read_sql(sql, self.sql_db)\n list1 = np.array(dataframe).tolist()\n return list1\n\n def find_diff(self, data1, data2):\n '''data1是sql数据,data2是sqlserver数据'''\n update_list = []\n for i in data1:\n if i not in data2:\n update_list.append(i)\n return update_list, data2\n\n def update_data_in(self, data_list, small_ist):\n\n '''data_list :需要更新的所有数据'''\n '''small_ist : sqlserver的所有数据'''\n types = set([x[0] for x in data_list])\n update_list = [] # 更新旧数据\n insert_list = [] # 添加新数据\n for i in types:\n daylist1 = [x[1] for x in data_list if x[0] == i]\n daylist2 = [x[1] for x in small_ist if x[0] == i]\n for day in daylist1:\n\n if day in daylist2:\n update_list.append([x for x in data_list if x[0] == i and x[1] is day][0])\n for i in data_list:\n if i not in update_list:\n insert_list.append(i)\n return update_list, insert_list\n\n def write_data(self, table, update_list, insert_list):\n with open(f'../matlablog/{table}.log','a') as fb:\n if len(update_list)>0 or len(insert_list)>0:\n fb.write(f'{str(datetime.today())[:19]}----\\n\\n更新旧数据:\\n{update_list}\\n\\n添加新数据:\\n{insert_list}\\n\\n')\n else:\n fb.write(f'{str(datetime.today())[:19]}----\\n\\n没有可更新数据\\n\\n')\n return\n for i in update_list:\n sql = f'''update {table} set VALUE = {i[3]} where COMMTYPE='{i[0]}' and BIZDATE ={i[1]}'''\n self.sql_curs.execute(sql)\n\n for j in insert_list:\n sql2 = f'''insert into {table} values ('{j[0]}',{j[1]},{j[2]},{j[3]})'''\n self.sql_curs.execute(sql2)\n self.sql_db.commit()\n\n def start(self):\n tables = self.get_mysql_tables()\n for table in tables:\n # self.create_table_sqlserver(table)\n mysql_data = self.get_mysql_data(table)\n sqlserver_data = self.get_sqlserver_data(table)\n data_list, small_ist = self.find_diff(mysql_data, sqlserver_data)\n update_list, insert_list = self.update_data_in(data_list, small_ist)\n self.write_data(table, update_list, insert_list)\n #print(table, '完成')\n #break\n\n\nif __name__ == '__main__':\n fuck = mysql_to_sqlserver()\n fuck.start()\n","sub_path":"mysql_to_sqlserver.py","file_name":"mysql_to_sqlserver.py","file_ext":"py","file_size_in_byte":5168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"107955768","text":"import requests\nimport json\n\nclass runmain:\n # def __init__(self,url,method,data=None):\n # self.res=self.run_main(url,method,data)#构造方法,要加self表示当前参数\n\n def send_get(self,url,data,header=None):\n res = None\n if header != None:\n res = requests.get(url=url, data=data, headers=header)\n # return json.dumps(res,indent=2,sort_keys=True)#格式化json数据\n else:\n res = requests.get(url=url, data=data)\n return res.json()\n\n#print(send_request(url,data))\n def send_post(self,url,data,header=None):\n res=None\n if header!=None:\n res=requests.post(url=url,data=data,headers=header)\n #return json.dumps(res,indent=2,sort_keys=True)#格式化json数据\n else:\n res=requests.post(url=url,data=data)\n return res.json()\n\n def run_main(self,url,method,data=None,header=None):#空值不能放在不为空的前面\n res=None\n if method=='GET':\n res=self.send_get(url,data,header)\n else:\n res=self.send_post(url,data,header)\n return json.dumps(res,ensure_ascii=False)\n#\n# if __name__ == '__main__':\n# url = 'http://www.imooc.com/m/web/shizhanapi/loadmorepingjia.html'\n# data = {\n# 'cart': '11'\n# }\n# #run=runmain(url,'GET',data)\n# run=runmain()\n# print(run.run_main(url,'GET',data))","sub_path":"case/getAndPost_test.py","file_name":"getAndPost_test.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"212556317","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals # noqa\nfrom typing import * # noqa\nfrom abc import ABCMeta, abstractmethod # noqa\nfrom mock import Mock, MagicMock, PropertyMock # noqa\nfrom pytest_mock import MockFixture # noqa\nfrom pykka import ThreadingActor, ActorProxy, ThreadingFuture # noqa\n\nfrom transcode import (TranscoderAsyncInterface, TranscoderAsyncImplement,\n TranscoderAsyncFactory, TranscodeTemplateInterface,\n TranscodeTemplateImplement, TranscodeTemplateFactory)\n\n\nclass TestTranscoderAsyncFactory(object):\n def test_make(self):\n # type: () -> None\n obj = TranscoderAsyncFactory.make()\n assert isinstance(obj, ActorProxy)\n assert obj.actor_ref.is_alive()\n assert obj.actor_ref.actor_class == TranscoderAsyncImplement\n obj.actor_ref.stop()\n\n\nclass TestTranscoderAsyncImplement(object):\n def test_is_implement_of_interface(self):\n # type: () -> None\n assert issubclass(TranscoderAsyncImplement, TranscoderAsyncInterface)\n\n def create_submit_task_args_transcode_args(self):\n # type: () -> Dict[str, Any]\n transcode_args = {\n 'template_type': 1,\n 'file_id': 'transcode_args_file_id_xx',\n 'file_type': 'm3u8',\n 's': '1920x1080',\n 'r': 24,\n 'crf': 34,\n } # type: Dict[str, Any]\n return transcode_args\n\n def create_submit_task_args(\n self,\n mocker, # type: MockFixture\n transcode_args # type: Dict[str, Any]\n ):\n # type: (...) -> Tuple[str, str, str, str, str, str, Dict[str, Any], MagicMock, MagicMock] # noqa\n mock_stub_callback_ok = mocker.stub(name='callback_ok')\n mock_stub_callback_error = mocker.stub(name='callback_error')\n return ('task_id_xx', 'file_id_xx', 'm3u8', 'filename_xx',\n '/tmp/upload_path', '/tmp/vod/path', transcode_args,\n mock_stub_callback_ok, mock_stub_callback_error)\n\n def get_submit_task__transcode_func(self, mocker, obj, submit_task_args):\n # type: (MockFixture, TranscoderAsyncImplement, Tuple) -> Callable\n mock_Thread = mocker.patch(\n 'transcode.Thread',\n return_value=mocker.MagicMock()) # type: MagicMock\n obj.submit_task(*submit_task_args)\n return mock_Thread.call_args[1]['target']\n\n def test_submit_task(self, mocker):\n # type: (MockFixture) -> None\n obj = TranscoderAsyncImplement()\n mock_thread = mocker.MagicMock() # type: MagicMock\n mock_Thread = mocker.patch(\n 'transcode.Thread', return_value=mock_thread) # type: MagicMock\n obj.submit_task(*self.create_submit_task_args(\n mocker, self.create_submit_task_args_transcode_args()))\n mock_Thread.assert_called_once()\n mock_thread.setDaemon.assert_called_once_with(True)\n mock_thread.start.assert_called_once()\n\n def create_submit_task__transcode_mocks(self, mocker):\n # type: (MockFixture) -> Dict[str, MagicMock]\n mocks = {} # type: Dict[str, MagicMock]\n\n mocks['storage'] = mocker.patch('transcode.storage')\n mocks['tt'] = mocker.MagicMock()\n mocks['TranscodeTemplateFactory_make'] = mocker.patch(\n 'transcode.TranscodeTemplateFactory.make',\n return_value=mocks['tt'])\n mocks['os_path_exists'] = mocker.patch(\n 'os.path.exists', return_value=False)\n mocks['os_mkdir'] = mocker.patch('os.mkdir')\n mocks['ffmpeg_runner'] = mocker.MagicMock()\n mocks['FFmpegRunnerFactory_make'] = mocker.patch(\n 'transcode.FFmpegRunnerFactory.make',\n return_value=mocks['ffmpeg_runner'])\n mocks['media_info'] = mocker.MagicMock()\n mocks['RecordMediaInfoFactory_make'] = mocker.patch(\n 'transcode.RecordMediaInfoFactory.make',\n return_value=mocks['media_info'])\n\n return mocks\n\n def test_submit_task__transcode_callback_error_with_run_ffmpeg_cmd_raise_IOError( # noqa\n self, mocker):\n # type: (MockFixture) -> None\n mocks = self.create_submit_task__transcode_mocks(mocker)\n mocks['ffmpeg_runner'].run_cmd.side_effect = IOError\n\n submit_task_args = self.create_submit_task_args(\n mocker, self.create_submit_task_args_transcode_args())\n mock_stub_callback_error = submit_task_args[-1]\n self.get_submit_task__transcode_func(\n mocker, TranscoderAsyncImplement(), submit_task_args)()\n\n # 执行失败回调\n mock_stub_callback_error.assert_called_once()\n\n def test_submit_task__transcode_callback_ok(self, mocker):\n # type: (MockFixture) -> None\n mocks = self.create_submit_task__transcode_mocks(mocker)\n\n task_id, file_id, file_type, filename, upload_path, vod_path, \\\n transcode_args, mock_stub_callback_ok, mock_stub_callback_error = \\\n submit_task_args = \\\n self.create_submit_task_args(\n mocker, self.create_submit_task_args_transcode_args())\n transcode_func = self.get_submit_task__transcode_func(\n mocker, TranscoderAsyncImplement(), submit_task_args)\n transcode_func()\n\n src_path = '%s/%s' % (upload_path, filename)\n dst_path = '%s/%s' % (vod_path, transcode_args['file_id'])\n\n # 从文件存储中心取出到本地,超时 600 秒\n mocks['storage'].get_upload_file.assert_called_once_with(\n filename, upload_path)\n mocks['storage'].get_upload_file().get.assert_called_once_with(\n timeout=600)\n # 正确使用了模板生成了转码参数\n mocks['TranscodeTemplateFactory_make'].assert_called_once_with(\n str(transcode_args['template_type']))\n mocks['tt'].parse_from_json_to_cmd_args.assert_called_once_with(\n src_path, dst_path, transcode_args)\n # 如果目标目录不存在时则创建\n mocks['os_path_exists'].assert_called_once_with(dst_path)\n mocks['os_mkdir'].assert_called_once_with(dst_path)\n # 调用了 FFmpegRunnerInterface 执行\n mocks['FFmpegRunnerFactory_make'].assert_called_once()\n mocks['ffmpeg_runner'].run_cmd.assert_called_once()\n # 调用了 RecordMediaInfoInterface 刷新媒体信息\n mocks['RecordMediaInfoFactory_make'].assert_called_once_with(dst_path)\n mocks['media_info'].refresh_media_info.assert_called_once()\n mocks['media_info'].refresh_media_snapshot.assert_called_once_with(0)\n # 存入到文件存储中心,超时 600 秒\n mocks['storage'].copy_vod_dir.assert_called_once_with(dst_path)\n mocks['storage'].copy_vod_dir().get.assert_called_once_with(\n timeout=600)\n # 执行成功回调\n mock_stub_callback_ok.assert_called_once()\n\n\nclass TestTranscodeTemplateImplement(object):\n def test_is_implement_of_interface(self):\n # type: () -> None\n assert issubclass(TranscodeTemplateImplement,\n TranscodeTemplateInterface)\n\n def test_parse_from_json_to_cmd_args_with_body_s_eq_1920x1080(self):\n # type: () -> None\n obj = TranscodeTemplateImplement('1')\n src_path, dst_path = '/tmp/src_path', '/tmp/dst_path'\n body = {\n 'file_type': 'm3u8',\n 's': '1920x1080',\n 'r': 24,\n 'crf': 34,\n }\n result = obj.parse_from_json_to_cmd_args(src_path, dst_path, body)\n result_string = ' '.join(result)\n assert '-i %s' % src_path in result_string\n assert '-f hls %s' % dst_path in result_string\n assert '-threads 2' in result_string\n assert '-s 1920x1080' in result_string\n assert '-r 24' in result_string\n assert '-crf 34' in result_string\n\n\nclass TestTranscodeTemplateFactor(object):\n def test_make_with_template_type_1(self):\n # type: () -> None\n obj = TranscodeTemplateFactory.make('1')\n assert isinstance(obj, TranscodeTemplateImplement)\n","sub_path":"src/transcode_tests.py","file_name":"transcode_tests.py","file_ext":"py","file_size_in_byte":8127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"3156081","text":"from urllib.request import Request, urlopen\nfrom bs4 import BeautifulSoup\n\ndef scrapping_page(url_site):\n url = url_site\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'}\n req = Request(url, headers=headers)\n response = urlopen(req)\n html = response.read()\n html = html.decode('ISO-8859-1')\n def clear_dataset(input):\n return \" \".join(input.split()).replace('> <', '><')\n html = clear_dataset(html)\n soup_crawler = BeautifulSoup(html, 'html.parser')\n return soup_crawler\n","sub_path":"src/scripts/etl/function_scraping.py","file_name":"function_scraping.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"147883196","text":"import os\r\n\r\nfrom PyQt5 import QtWidgets\r\nfrom PyQt5.QtGui import QPixmap\r\n\r\nfrom PyQt.py.AddCar import Ui_AddCarWindow\r\n\r\n\r\nclass AddCarForm(QtWidgets.QMainWindow, Ui_AddCarWindow):\r\n def __init__(self, db, user):\r\n QtWidgets.QMainWindow.__init__(self)\r\n self.setupUi(self)\r\n self.db = db\r\n self.user = user\r\n self.add_profile_form = None\r\n self.img.setPixmap(QPixmap(os.path.abspath(\"ico/car.png\")))\r\n self.btn_add.clicked.connect(self.on_add)\r\n\r\n def on_add(self):\r\n self.db.insert_car(\r\n self.user.id,\r\n self.edit_model.text(),\r\n self.edit_number.text()\r\n )\r\n self.open_profile_form()\r\n\r\n def open_profile_form(self):\r\n from Forms.ProfileForm import ProfileForm\r\n self.add_profile_form = ProfileForm(self.db, self.db.get_user(self.user.login))\r\n self.add_profile_form.show()\r\n self.close()\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"6term/SQL/sql_taxi_app/Forms/AddCarForm.py","file_name":"AddCarForm.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"402263296","text":"import numpy as np\n\nclass ReplayBuffer():\n def __init__(self,max_size, input_shape, n_actions, alpha):\n self.mem_size = max_size\n self.mem_cntr = 0\n self.alpha = alpha\n\n self.state_memory = np.array([None] * max_size)\n self.new_state_memory = np.array([None] * max_size)\n self.action_memory = np.array([None] * max_size)\n self.reward_memory = np.array([None] * max_size)\n self.terminal_memory = np.array([None] * max_size)\n self.priorities = np.array([0] *max_size)\n \n\n def store_transition(self, state, action, reward, next_state, done):\n\n index = self.mem_cntr % self.mem_size\n self.state_memory[index] = state\n self.action_memory[index] = action\n self.reward_memory[index] = reward\n self.new_state_memory[index] = next_state\n self.terminal_memory[index] = done\n self.priorities[index] = max(self.priorities) if self.mem_cntr > 0 else 1.0\n self.mem_cntr += 1\n\n def update_priorities(self, indices, errors, offset):\n priorities = abs(errors) + offset\n #print(\"PRIORITIES\",priorities)\n self.priorities[indices] = priorities\n #print(\"SELF.PRIOc\",self.priorities)\n if np.all(self.priorities==self.priorities[0]):\n print(\"NOT EQUAL\",self.priorities)\n\n def sample_buffer(self,batch_size, beta):\n max_mem=min(self.mem_cntr, self.mem_size)\n priorities = self.priorities[:max_mem]\n #print(\"PRIORITIES\", priorities)\n probabilities = (priorities ** self.alpha) / ((priorities ** self.alpha).sum()) # Pr = Pi ^ a / P ^ a\n #print(\"PROB\",probabilities)\n batch=np.random.choice(max_mem, batch_size, p = probabilities, replace=False).astype(int) #max index of max_mem and shape batchsize. replace=False makes it so indexes aren't repeated\n \n importance = (max_mem * probabilities[batch]) ** (-beta)\n importance = importance / importance.max() #normalizing importance keeping it between 0 and 1\n \n states=[self.state_memory[i] for i in batch]\n actions=[self.action_memory[i] for i in batch]\n rewards=[self.reward_memory[i] for i in batch]\n next_states=[self.new_state_memory[i] for i in batch]\n dones=[self.terminal_memory[i] for i in batch]\n\n return states, actions, rewards, next_states, dones, importance","sub_path":"Sanjay/PriorityDuelingDoubleDQN/utils/replay_buffer.py","file_name":"replay_buffer.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"90172396","text":"from datetime import datetime, timezone\nfrom nio import Block\nfrom nio.block.mixins import EnrichSignals\nfrom nio.block.mixins.enrich.enrich_signals import EnrichProperties\nfrom nio.properties import BoolProperty, ObjectProperty, PropertyHolder, \\\n StringProperty, VersionProperty\n\n\nclass CustomEnrichProperties(EnrichProperties):\n \"\"\" Overrides default enrichment to include existing fields.\"\"\"\n exclude_existing = BoolProperty(title='Exclude Existing?', default=False)\n\n\nclass Units(PropertyHolder):\n\n days = BoolProperty(title='Days', default=False, order=0)\n hours = BoolProperty(title='Hours', default=False, order=1)\n minutes = BoolProperty(title='Minutes', default=False, order=2)\n seconds = BoolProperty(title='Seconds', default=False, order=3)\n\n\nclass ElapsedTime(EnrichSignals, Block):\n\n timestamp_a = StringProperty(title='Timestamp A', order=0)\n timestamp_b = StringProperty(title='Timestamp B', order=1)\n\n output_attr = StringProperty(\n title='Outgoing Signal Attribute',\n default='timedelta',\n order=0,\n advanced=True)\n units = ObjectProperty(\n Units,\n title='Units',\n default=Units(),\n order=1,\n advanced=True)\n milliseconds = BoolProperty(\n title='Include Milliseconds',\n default=True,\n order=2,\n advanced=True)\n\n enrich = ObjectProperty(\n CustomEnrichProperties,\n title='Signal Enrichment',\n default=CustomEnrichProperties(), # use custom default\n order=100,\n advanced=True)\n version = VersionProperty('0.1.0')\n\n def process_signal(self, signal):\n delta = self._get_timedelta(signal)\n signal_dict = {\n self.output_attr(signal): delta,\n }\n output_signal = self.get_output_signal(signal_dict, signal)\n return output_signal\n\n def _get_timedelta(self, signal):\n \"\"\" Returns computed delta in terms of `units` using `signal`\"\"\"\n truncate = not self.milliseconds(signal)\n time_a = self._load_timestamp(\n self.timestamp_a(signal),\n truncate=truncate)\n time_b = self._load_timestamp(\n self.timestamp_b(signal),\n truncate=truncate)\n # subtract datetimes to get timedelta in seconds\n seconds = (time_b - time_a).total_seconds()\n if truncate and self.units().seconds(signal):\n # timedelta.total_seconds() returns a float\n seconds = int(seconds)\n # convert into more significant units\n minutes = seconds / 60\n hours = minutes / 60\n days = hours / 24\n # parse into selected units\n all_units_selected = (\n self.units().days(signal) and\n self.units().hours(signal) and\n self.units().minutes(signal) and\n self.units().seconds(signal))\n any_units_selected = (\n self.units().days(signal) or\n self.units().hours(signal) or\n self.units().minutes(signal) or\n self.units().seconds(signal))\n if not any_units_selected:\n # the default case\n delta = {\n 'days': days,\n 'hours': hours,\n 'minutes': minutes,\n 'seconds': seconds,\n }\n elif all_units_selected:\n _days = int(days)\n try:\n _hours = int(hours % (_days * 24))\n except ZeroDivisionError:\n # zero days\n _hours = int(hours)\n try:\n _minutes = int(minutes % (_hours * 60)) % 60\n except ZeroDivisionError:\n # zero hours\n _minutes = int(minutes)\n _seconds = seconds % 60\n delta = {\n 'days': _days,\n 'hours': _hours,\n 'minutes': _minutes,\n 'seconds': _seconds,\n }\n else:\n # some units selected\n delta = {}\n if self.units().seconds(signal):\n if not self._more_significant_selected('seconds', signal):\n # seconds only\n delta['seconds'] = seconds\n elif self.units().minutes(signal):\n if not self._more_significant_selected('minutes', signal):\n # seconds and minutes\n delta['minutes'] = int(minutes)\n delta['seconds'] = seconds % (delta['minutes'] * 60)\n elif self.units().hours(signal):\n if not self._more_significant_selected(\n 'hours', signal):\n # seconds and minutes and hours\n delta['hours'] = int(hours)\n delta['minutes'] = \\\n int(minutes % (delta['hours'] * 60))\n delta['seconds'] = seconds % 60\n else:\n # seconds and minutes and hours and days\n # aka all_units_selected, already covered\n pass\n elif self.units().days(signal):\n # seconds and minutes and days\n delta['days'] = int(days)\n delta['minutes'] = int(minutes % (60 * 24))\n delta['seconds'] = seconds % 60\n elif self.units().hours(signal):\n if not self._more_significant_selected('hours', signal):\n # seconds and hours\n delta['hours'] = int(hours)\n delta['seconds'] = seconds % (delta['hours'] * 60**2)\n else:\n # seconds and hours and days\n delta['days'] = int(days)\n delta['hours'] = int(hours % (delta['days'] * 24))\n delta['seconds'] = seconds % (delta['hours'] * 60**2)\n elif self.units().days(signal):\n # seconds and days\n delta['days'] = int(days)\n delta['seconds'] = seconds % (delta['days'] * 60**2 * 24)\n elif self.units().minutes(signal):\n if not self._more_significant_selected('minutes', signal):\n # minutes only\n delta['minutes'] = minutes\n elif self.units().hours(signal):\n if not self._more_significant_selected('hours', signal):\n # minutes and hours\n delta['hours'] = int(hours)\n delta['minutes'] = minutes % (delta['hours'] * 60)\n else:\n # minutes and hours and days\n delta['days'] = int(days)\n delta['hours'] = int(hours % (delta['days'] * 24))\n delta['minutes'] = minutes % (delta['hours'] * 60)\n elif self.units().days(signal):\n # minutes and days\n delta['days'] = int(days)\n delta['minutes'] = minutes % (delta['days'] * 60 * 24)\n elif self.units().hours(signal):\n if not self._more_significant_selected('hours', signal):\n # hours only\n delta['hours'] = hours\n else:\n # hours and days\n delta['days'] = int(days)\n delta['hours'] = hours % (delta['days'] * 24)\n elif self.units().days(signal):\n # days only\n delta['days'] = days\n return delta\n\n def _load_timestamp(self, timestamp, truncate=False):\n \"\"\" Returns a datetime object from an ISO 8601 string.\"\"\"\n if '.' in timestamp: # includes milliseconds\n if truncate:\n # remove millisecond component\n _timestamp = timestamp.split('.')\n timestamp = _timestamp[0] + _timestamp[1][3:]\n timestamp_format = '%Y-%m-%dT%H:%M:%S'\n else:\n timestamp_format = '%Y-%m-%dT%H:%M:%S.%f'\n else:\n timestamp_format = '%Y-%m-%dT%H:%M:%S'\n if timestamp.endswith('Z'): # UTC timezone\n timestamp_format += 'Z'\n else:\n timestamp_format += '%z'\n # create datetime object from timestamp string\n time = datetime.strptime(timestamp, timestamp_format)\n if time.tzinfo is None: # if UTC the datetime will be offset-naive\n time = time.replace(tzinfo=timezone.utc)\n return time\n\n def _more_significant_selected(self, item, signal):\n \"\"\" Use a signal to evaluate if a unit more significant than item has\n been selected.\n \"\"\"\n units = ['seconds', 'minutes', 'hours', 'days']\n # get index to item in units\n for i, unit in enumerate(units):\n if unit == item:\n break\n # check more significant units and return True if selected\n i += 1\n for r in range(len(units) - i):\n if getattr(self.units(), units[r + i])(signal):\n return True\n return False\n","sub_path":"elapsed_time_block.py","file_name":"elapsed_time_block.py","file_ext":"py","file_size_in_byte":9278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"200445386","text":"\"\"\"Defines the neural network, losss function and metrics\"\"\"\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass NeuralNet(nn.Module):\n def __init__(self):\n super(NeuralNet, self).__init__()\n self.fc1 = nn.Linear(24, 98)\n self.fc2 = nn.Linear(98, 98)\n self.fc3 = nn.Linear(98, 98//2)\n self.fc4 = nn.Linear(98// 2, 10) # , bias=False)\n\n self.dropout_rate = 0.25\n\n def forward(self, x):\n\n # first layer\n out_1_1 = F.relu(self.fc1(x))\n out_1_f = F.dropout(out_1_1, self.dropout_rate, training=self.training)\n\n # second layer\n out_2_1 = F.relu(self.fc2(out_1_f))\n out_2_f = F.dropout(out_2_1, self.dropout_rate, training=self.training)\n\n # third layer\n out_3_1 = F.relu(self.fc3(out_2_f))\n out_3_f = F.dropout(out_3_1, self.dropout_rate, training=self.training)\n\n out_4_l = self.fc4(out_3_f)\n # print(out)\n # out = F.relu(self.fc4(out))\n out_l_s = F.log_softmax(out_4_l, dim=1) # on purpose\n out_s = F.softmax(out_4_l, dim=1) # on purpose\n\n return out_1_f, out_2_f, out_3_f, out_4_l, out_l_s, out_s\n # return out\n\n\ndef convert_int_to_one_hot_vector(label, num_of_classes):\n\n if len(list(label.size())) < 3:\n label_shaped = label.view(-1, 1)\n\n one_hot_vector = torch.zeros([list(label.size())[0], num_of_classes], device=label.device)\n\n one_hot_vector.scatter_(1, label_shaped, 1)\n one_hot_vector = one_hot_vector.type(torch.FloatTensor)\n\n if torch.cuda.is_available():\n return one_hot_vector.cuda()\n return one_hot_vector\n\n else:\n # this is for 3d tensor .\n labels_shaped = label.view(label.size(0), label.size(1), -1)\n\n one_hot_matrix = torch.zeros([list(labels_shaped.size())[0], list(labels_shaped.size())[1], num_of_classes], device=label.device)\n one_hot_matrix.scatter_(2, labels_shaped, 1)\n # added to keep a 2d dimension of labels\n one_hot_matrix = one_hot_matrix.view(-1, list(labels_shaped.size())[1] * num_of_classes)\n one_hot_matrix = one_hot_matrix.type(torch.FloatTensor)\n\n if torch.cuda.is_available():\n return one_hot_matrix.cuda()\n return one_hot_matrix\n\n\ndef loss_fn(outputs, labels, num_of_classes):\n \"\"\"\n Compute the cross entropy loss given outputs and labels.\n\n Args:\n outputs: (Variable) dimension batch_size x 10 - output of the model\n labels: (Variable) dimension batch_size, where each element is a value in [0- 9]\n num_of_classes: (int) value describing number of different classes (10)\n\n Returns:\n loss (Variable): cross entropy loss for all images in the batch\n\n Note: you may use a standard loss function from http://pytorch.org/docs/master/nn.html#loss-functions. This example\n demonstrates how you can easily define a custom loss function.\n \"\"\"\n\n kl_criterion = nn.KLDivLoss(reduction='batchmean')\n one_hot_vector = convert_int_to_one_hot_vector(labels, num_of_classes)\n\n return kl_criterion(outputs, one_hot_vector)\n\n\ndef accuracy(outputs, labels):\n \"\"\"\n Compute the accuracy, given the outputs and labels for all images.\n\n Args:\n outputs: (np.ndarray) dimension batch_size x 10 - log softmax output of the model\n labels: (np.ndarray) dimension batch_size, where each element is a value in [0-9]\n\n Returns: (float) accuracy in [0,1]\n \"\"\"\n outputs = np.argmax(outputs, axis=1)\n return np.sum(outputs == labels)/float(labels.size)\n\n\ndef incorrect(images, outputs, labels, curr_min=0, curr_max=1, dest_min=0, dest_max=255):\n \"\"\"\n Keep all images for which the classification is wrong\n\n Args:\n images: (np.ndarray) dimension batch_size x 24- input to the model\n outputs: (np.ndarray) dimension batch_size x 10 - log softmax output of the model\n labels: (np.ndarray) dimension batch_size, where each element is a value in [0- 9]\n\n Returns: (list) of images for which the classification is wrong, the classification and the correct label\n \"\"\"\n mat_out = []\n outputs = np.argmax(outputs, axis=1)\n # find incorrect indexes\n current_incorrect_bin = (outputs != labels)\n current_incorrect_indexes = np.nonzero(current_incorrect_bin)\n\n # find compatible incorrect samples and save them in a list\n samples_numpy = images.cpu().numpy()\n\n # convert back to range [0, 255]\n samples_numpy = \\\n dest_min + (dest_max - dest_min) * (samples_numpy - curr_min) / (curr_max - curr_min)\n # find samples\n incorrect_samples = (samples_numpy[current_incorrect_indexes]).astype(int)\n\n # find classifier result\n labels_pred_numpy = outputs\n incorrect_labels = labels_pred_numpy[current_incorrect_indexes]\n\n # find true labels\n labels_actual_numpy = labels\n true_labels = labels_actual_numpy[current_incorrect_indexes]\n\n # organize data\n all_labels = np.column_stack((incorrect_labels, true_labels))\n numpy_mat_out = np.concatenate((incorrect_samples, all_labels), axis=1)\n length = len(numpy_mat_out.tolist())\n if length > 0:\n mat_out.extend(numpy_mat_out.tolist())\n\n return mat_out\n\n\n# maintain all metrics required in this dictionary- these are used in the training and evaluation loops\nmetrics = {\n 'accuracy': accuracy\n # could add more metrics such as accuracy for each token type\n}\n","sub_path":"pytorch/schi/to_shap/net_to_fviz.py","file_name":"net_to_fviz.py","file_ext":"py","file_size_in_byte":5482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"473127942","text":"class Quaternion:\r\n def __init__(self, a, b, c, d):\r\n self.a = a\r\n self.b = b\r\n self.c = c\r\n self.d = d\r\n self.norm = (a**2 + b**2 + c**2 + d**2)**0.5\r\n self.norm_sqr = self.norm**2\r\n\r\n def __str__(self):\r\n q_str1 = f'{self.a} ' + '{0:+}i '.format(self.b)\r\n q_str2 = '{0:+}j '.format(self.c) + '{0:+}k '.format(self.d)\r\n return q_str1 + q_str2\r\n\r\n def __repr__(self):\r\n return f'({self.a}, {self.b}, {self.c}, {self.d})'\r\n\r\n def __add__(self, other):\r\n a = self.a + other.a\r\n b = self.b + other.b\r\n c = self.c + other.c\r\n d = self.d + other.d\r\n result = Quaternion(a, b, c, d)\r\n return result\r\n\r\n def __mul__(self, other):\r\n if isinstance(other, Quaternion):\r\n a = (other.a*self.a - other.b*self.b - other.c*self.c - other.d*self.d)\r\n b = (other.a*self.b + other.b*self.a - other.c*self.d + other.d*self.c)\r\n c = (other.a*self.c + other.b*self.d + other.c*self.a - other.d*self.b)\r\n d = (other.a*self.d - other.b*self.c + other.c*self.b + other.d*self.a)\r\n elif isinstance(other, (int, float)):\r\n a = self.a * other\r\n b = self.b * other\r\n c = self.c * other\r\n d = self.d * other\r\n else:\r\n return 'Invalid multiplier! Please enter a number or a quaternion.'\r\n result = Quaternion(a, b, c, d)\r\n return result\r\n\r\n def __truediv__(self, other):\r\n if isinstance(other, Quaternion):\r\n inv_a = self.a / self.norm_sqr\r\n inv_b = -self.b / self.norm_sqr\r\n inv_c = -self.c / self.norm_sqr\r\n inv_d = -self.d / self.norm_sqr\r\n\r\n a = (other.a*inv_a - other.b*inv_b - other.c*inv_c - other.d*inv_d)\r\n b = (other.a*inv_b + other.b*inv_a - other.c*inv_d + other.d*inv_c)\r\n c = (other.a*inv_c + other.b*inv_d + other.c*inv_a - other.d*inv_b)\r\n d = (other.a*inv_d - other.b*inv_c + other.c*inv_b + other.d*inv_a)\r\n elif isinstance(other, (int, float)):\r\n a = self.a / other\r\n b = self.b / other\r\n c = self.c / other\r\n d = self.d / other\r\n else:\r\n return 'Invalid divisor! Please enter a number or a quaternion.'\r\n result = Quaternion(a, b, c, d)\r\n return result\r\n\r\n def __mod__(self, irrelevant):\r\n \"\"\"\r\n The quaternion norm calculation doesn't require an additional parameter,\r\n but in order to provide the functionality using the '%' operator, the\r\n 'irrelevant' argument was added.\r\n \"\"\"\r\n return self.norm\r\n\r\n def __eq__(self, other):\r\n if isinstance(other, Quaternion):\r\n q = self.a == other.a\r\n x = self.b == other.b\r\n y = self.c == other.c\r\n z = self.d == other.d\r\n return q and x and y and z\r\n else:\r\n return ('Incomparable values!')\r\n","sub_path":"06-advanced-python/hw/quaternions.py","file_name":"quaternions.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"624035774","text":"'''\nRegression.\n\nThe dataset describes 13 numerical properties of houses in Boston suburbs and\nis concerned with modeling the price of houses in those suburbs in thousands of\ndollars.\n\nReasonable performance for models evaluated using Mean Squared Error (MSE) are\naround 20 in squared thousands of dollars\n(or $4,500 if you take the square root).\n'''\nimport numpy\nimport pandas\nfrom keras.layers import Dense\nfrom keras.models import Sequential\nfrom keras.wrappers.scikit_learn import KerasRegressor\nfrom sklearn.model_selection import cross_val_score, KFold\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\n\n\n# Create and compile model\ndef create_model():\n model = Sequential()\n model.add(Dense(20, input_dim=13, init='normal', activation='relu'))\n model.add(Dense(1, init='normal'))\n model.compile(loss='mean_squared_error', optimizer='adam')\n return model\n\n\n# Load dataset\ndataframe = pandas.read_csv(\"csv/housing.csv\", delim_whitespace=True,\n header=None)\ndataset = dataframe.values\nX = dataset[:, 0:13]\nY = dataset[:, 13]\n\n# Fix random seed for reproducibility\nseed = 7\nnumpy.random.seed(seed)\n\n# Evaluate model with standardized dataset\nestimators = []\nestimators.append(('standardize', StandardScaler()))\nestimators.append(('mlp', KerasRegressor(build_fn=create_model, nb_epoch=100,\n batch_size=5, verbose=0)))\n\n# Executes one or more models within a pass of the cross validation procedure\npipeline = Pipeline(estimators)\n\n# 10-fold cross validation test\nkfold = KFold(n_splits=10, random_state=seed)\n\n# Evaluate a score by cross-validation\nresults = cross_val_score(pipeline, X, Y, cv=kfold)\nprint(\"Results: %.2f (%.2f) MSE\" % (results.mean(), results.std()))\n","sub_path":"keras/regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"142576289","text":"# encoding: utf-8\n# network file -> build Search Space for Auto-DeepLab\n# @author: yanwei.li\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom config import config as cfg\nfrom retrain_cell import Cell\nimport logging\nimport math\n\nclass AutoDeepLab(nn.Module):\n \"\"\"\n Define Search Space as Auto-DeepLab with one-shot training.\n \"\"\"\n def __init__(self, init_channel=64, arch_choice=None, cell_choice=None, out_planes=None, input_size=None,\n criterion=None, imgnet_pretrain=False):\n super(AutoDeepLab, self).__init__()\n # whether to use affine in BN.\n if cfg.sync_bn:\n self.affine = True\n print('Using Sync BN......')\n else:\n self.affine = False\n\n # img input size, 224 if using ImgNet pre-train.\n self.imgnet_pretrain = imgnet_pretrain\n self.input_size = input_size\n fix_channel = 64\n # start with 2 stem layers down-sampling by 4.\n self.stem_1 = nn.Sequential(\n nn.Conv2d(3, fix_channel, kernel_size=3, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(fix_channel, affine=self.affine))\n self.stem_2 = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(fix_channel, fix_channel, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(fix_channel, affine=self.affine))\n self.stem_3 = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(fix_channel, 2*fix_channel, kernel_size=3, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(2*fix_channel, affine=self.affine))\n\n self.all_cell_list = nn.ModuleList()\n\n if not self.imgnet_pretrain:\n logging.info('Seg Mode...')\n self.criterion = criterion\n else:\n logging.info('ImageNet Pre-train Mode...')\n self.out_channel = 256\n self.cell_num_list = cfg.cell_num_list\n self.cell_num_list = self.cell_num_list[:cfg.search_layer]\n assert len(self.cell_num_list) == cfg.search_layer\n in_channel = 2*fix_channel\n out_channel = init_channel #* 2\n # using the initial layer\n self.arch_choice = arch_choice\n allow_keep = False\n allow_down = False\n if self.arch_choice[0]==0 or self.arch_choice[1]==0:\n allow_keep = True\n if self.arch_choice[0]==1 or self.arch_choice[1]==1:\n allow_down = True\n\n self.init_layer = Cell(C_in=in_channel, C_out=out_channel, cell_choice=cell_choice,\n allow_up=False, allow_keep=allow_keep, allow_down=allow_down, config=cfg)\n in_channel = out_channel\n # add cells in each layer\n for layer_index in range(len(self.arch_choice)):\n in_channel_cell = in_channel * pow(2, self.arch_choice[layer_index])\n if layer_index == 3:\n self.low_feat_channel = in_channel_cell\n # add res and dim switch to each cell\n allow_up = False\n allow_keep = False\n allow_down = False\n if layer_index < len(self.arch_choice)-2:\n next_choice = self.arch_choice[layer_index+1]\n next_next_choice = self.arch_choice[layer_index+2]\n # add res up and dim down by 2\n if next_choice-self.arch_choice[layer_index]==-1 or next_next_choice-self.arch_choice[layer_index]==-1:\n allow_up = True\n if next_choice-self.arch_choice[layer_index]==0 or next_next_choice-self.arch_choice[layer_index]==0:\n allow_keep = True\n # dim down and resolution up by 2\n if next_choice-self.arch_choice[layer_index]==1 or next_next_choice-self.arch_choice[layer_index]==1:\n allow_down = True\n elif layer_index == len(self.arch_choice)-2:\n next_choice = self.arch_choice[layer_index + 1]\n # prepare for the last output\n allow_keep = True\n if next_choice-self.arch_choice[layer_index]==-1:\n allow_up = True\n if next_choice-self.arch_choice[layer_index]==1:\n allow_down = True\n else:\n allow_keep = True\n\n self.all_cell_list.append(Cell(C_in=in_channel_cell, C_out=in_channel_cell, cell_choice=cell_choice,\n allow_up=allow_up, allow_keep=allow_keep, allow_down=allow_down, config=cfg))\n # build ASPP block and classifier\n channel_multi = pow(2, self.arch_choice[-1])\n in_channel_aspp = in_channel * channel_multi\n conv_1x1 = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(in_channel_aspp, in_channel_aspp, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(in_channel_aspp, affine=self.affine))\n conv_3x3 = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(in_channel_aspp, in_channel_aspp, kernel_size=3, stride=1, padding=96 // (channel_multi * 4),\n dilation=96 // (channel_multi * 4), bias=False),\n nn.BatchNorm2d(in_channel_aspp, affine=self.affine))\n global_1x1 = nn.Sequential(\n nn.ReLU(),\n nn.AvgPool2d(self.input_size // (channel_multi * 4)),\n nn.Conv2d(in_channel_aspp, in_channel_aspp, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(in_channel_aspp, affine=self.affine))\n fuse_1x1 = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(in_channel_aspp*3, in_channel_aspp, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(in_channel_aspp, affine=self.affine))\n # ASPP module list.\n self.layer_aspp = nn.ModuleDict({'conv_1x1':conv_1x1, 'conv_3x3':conv_3x3, 'global_1x1':global_1x1, 'fuse_1x1':fuse_1x1})\n if not self.imgnet_pretrain:\n if cfg.using_decoder:\n self.decoder_switch = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(self.low_feat_channel, 48, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(48, affine=self.affine))\n self.decoder_conv = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(48+in_channel_aspp, 256, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(256, affine=self.affine),\n nn.ReLU(),\n nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(256, affine=self.affine),\n nn.ReLU())\n in_channel_aspp = 256\n # the output layer\n self.layer_predict = nn.Conv2d(in_channels=in_channel_aspp, out_channels=out_planes,\n kernel_size=3, stride=1, padding=1)\n\n else:\n self.aspp_switch = nn.Conv2d(in_channels=in_channel_aspp, out_channels=self.out_channel,\n kernel_size=3, stride=1, padding=1)\n if self.imgnet_pretrain:\n # building classifier\n self.classifier = nn.Linear(self.out_channel, 1000)\n\n def forward(self, x, label=None, drop_prob=0.0, step_rate=0.0):\n \"\"\"\n :param x: input image\n :param label: label for ImgNet pre-train\n :return: loss or prediction\n \"\"\"\n h_l1 = self.stem_1(x)\n h_l1 = self.stem_2(h_l1)\n h_l1 = self.stem_3(h_l1)\n # the first layer\n layer_rate = 0.0\n h_l1_list = self.init_layer(h_l2=h_l1, h_l1=h_l1, drop_prob=drop_prob, layer_rate=layer_rate, step_rate=step_rate)\n prev_out_list = h_l1_list\n prev_prev_out_list = [[], [], [h_l1]]\n # build forward outputs\n prev_prev_index = -2\n prev_index = 0\n low_level_feat = None\n for layer_index in range(len(self.cell_num_list)):\n h_l1_in = prev_out_list[self.arch_choice[layer_index] - prev_index + 1][0]\n if abs(self.arch_choice[layer_index] - prev_prev_index) <= 1:\n h_l2_in = prev_prev_out_list[self.arch_choice[layer_index] - prev_prev_index + 1][0]\n else:\n h_l2_in = h_l1_in\n layer_rate = (layer_index+1)/float(len(self.cell_num_list))\n h_l_list = self.all_cell_list[layer_index](h_l2=h_l2_in, h_l1=h_l1_in, drop_prob=drop_prob,\n layer_rate=layer_rate, step_rate=step_rate)\n prev_prev_out_list = prev_out_list\n prev_out_list = h_l_list\n prev_prev_index = prev_index\n prev_index = self.arch_choice[layer_index]\n # low level feature (Cell_2) for Decoder Module\n if cfg.using_decoder and layer_index == 2:\n low_level_feat = [h_l_list[i][0] for i in range(3) if len(h_l_list[i])>0][0]\n\n # use ASPP in ImageNet and segmentation\n aspp_stride = int(math.pow(2, self.arch_choice[-1])*4)\n aspp_input = prev_out_list[1][0]\n if not self.imgnet_pretrain:\n up_stride = self.input_size // aspp_stride\n conv_1x1 = self.layer_aspp['conv_1x1'](aspp_input)\n conv_3x3 = self.layer_aspp['conv_3x3'](aspp_input)\n global_1x1 = self.layer_aspp['global_1x1'](aspp_input)\n global_1x1 = F.interpolate(input=global_1x1, scale_factor=up_stride, mode='bilinear')\n fuse_cat = torch.cat([conv_1x1, conv_3x3, global_1x1], dim=1)\n fuse_1x1 = self.layer_aspp['fuse_1x1'](fuse_cat)\n if cfg.using_decoder:\n low_level_feat = self.decoder_switch(low_level_feat)\n low_up_stride = int(math.pow(2, self.arch_choice[3]))\n high_up_stride = int(math.pow(2, self.arch_choice[-1]))\n low_level_feat = F.interpolate(input=low_level_feat, scale_factor=low_up_stride, mode='bilinear')\n high_level_feat = F.interpolate(input=fuse_1x1, scale_factor=high_up_stride, mode='bilinear')\n decoder_feat = torch.cat([low_level_feat, high_level_feat], dim=1)\n fuse_1x1 = self.decoder_conv(decoder_feat)\n aspp_stride = 4\n pred = self.layer_predict(fuse_1x1)\n else:\n pred = self.aspp_switch(aspp_input)\n pred = F.interpolate(input=pred, scale_factor=aspp_stride, mode='bilinear')\n\n if not self.imgnet_pretrain:\n if label is not None:\n loss = self.criterion(pred, label)\n return loss\n else:\n pred = F.log_softmax(pred, dim=1)\n return pred\n else:\n pred = nn.AdaptiveAvgPool2d(1)(pred)\n pred = pred.view(-1, pred)\n pred = self.classifier(pred)\n return pred\n\n\nif __name__ == \"__main__\":\n model = AutoDeepLab()\n print(model)\n","sub_path":"seg retrain/retrain_network.py","file_name":"retrain_network.py","file_ext":"py","file_size_in_byte":10924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"456617983","text":"import matplotlib.pyplot as plt\nimport math as m\n\ndef sarmo(N,p): \n S=[1]; a=0\n n=1\n while n<=N:\n a=a+1/n\n S.append(a-p*m.log(n))\n n=n+1\n return S\n\nSp=sarmo(20,0.9)\nprint(Sp)\nplt.figure()\nplt.plot(Sp)\nplt.title(\"S_n(p)\"); \nplt.show()\n","sub_path":"codes/clase_04/sarmo.py","file_name":"sarmo.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"553122467","text":"import evernote.edam.type.ttypes as Types #from evernote\nfrom evernote.api.client import EvernoteClient #from evernote\n\n\nclass evernote_connect(NebriOS):\n listens_to = ['save_to_evernote']\n\n def check(self):\n return self.save_to_evernote == True\n\n def action(self):\n self.save_to_evernote= \"RAN\"\n dev_token = shared.EVERNOTE_DEV_TOKEN\n client = EvernoteClient(token=dev_token, sandbox=False)\n # you can get your personal DEV_TOKEN here:\n # https://www.evernote.com/api/DeveloperToken.action\n \n note_title = \"Test note title\"\n note_body = \"Test note body\"\n \n note_store = client.get_note_store()\n note = Types.Note()\n note.title = note_title\n note.content = ''\n note.content += '%s' % note_body\n note_store.createNote(note)\n","sub_path":"evernote_connect.py","file_name":"evernote_connect.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"308760395","text":"import collections\n\nimport numpy as np\n\n\ndef entropy(labels):\n _, val_freq = np.unique(labels, return_counts=True)\n val_prob = val_freq / len(labels)\n return -val_prob.dot(np.log2(val_prob))\n\n\ndef info_gain(attr_col, labels):\n e_labels = entropy(labels)\n print(\"parent entropy: \", e_labels)\n e_vals = 0.\n vals, freqs = np.unique(attr_col, return_counts=True)\n for i in range(len(vals)):\n count = freqs[i]\n val = vals[i]\n e_vals += count * entropy(labels[attr_col == val])\n print(\"val: \", val)\n print(\"freq: %f\\tent:%f\" % (count / len(labels), entropy(labels[attr_col == val])))\n e_vals = e_vals / len(labels)\n return e_labels - e_vals\n\n\nclass DecisionNode:\n\n def __init__(self, depth=0, attribute=-1, label=-1, parent=None):\n self.children = {}\n self.attribute = attribute\n self.label = label\n self.parent = parent\n self.depth = depth\n # print(\"node made!\", sep='\\t')\n # self.print_node()\n\n def add_child(self, value, node):\n self.children[value] = node\n node.parent = self\n # print(\"child with value %s added to below node: \" % value)\n # self.print_node()\n\n def print_node(self):\n # if self.parent is not None:\n # print(\"attr=%d \\tlabel=%d \\tparent=%d\" % (self.attribute, self.label, self.parent))\n # else:\n print(\"attr=%d \\tlabel=%d\" % (self.attribute, self.label))\n\n\ndef id3(examples, labels, attributes, target_values, max_depth, leaf_purity): # add base conditions\n \"\"\"\n :param leaf_purity > percentage of dominant tag -> node=left\n :param max_depth:\n :param examples: x\n :param labels: y\n :param attributes: left attributes to use as a node in this subtree\n :param target_values: values that each attribute can get (for handling nodes with 0 number of examples)\n :return: root node\n \"\"\"\n\n def find_best_attr(func=info_gain):\n max_val = -float('inf')\n final_attr = -1\n for attr in attributes:\n attr_col = examples[:, attr]\n new_val = func(attr_col, labels)\n print(\"------------------- attr: %d\\tIG: %f\" % (attr, new_val))\n if new_val > max_val:\n max_val = new_val\n final_attr = attr\n return final_attr\n\n dominant_label = np.bincount(labels).argmax()\n depth = len(target_values) - np.size(attributes)\n\n tags, freqs = np.unique(labels, return_counts=True)\n # if len(tags) == 1 or len(attributes) == 0 or depth == max_depth:\n if (np.max(freqs) / len(labels)) >= leaf_purity \\\n or len(attributes) == 0 or depth == max_depth:\n return DecisionNode(label=dominant_label)\n\n best_attr = find_best_attr()\n # print(\"best attr: \", best_attr)\n root = DecisionNode(attribute=best_attr)\n\n # attr_values = np.unique(examples[:, best_attr])\n attr_values = target_values[best_attr]\n for val in attr_values:\n ind = examples[:, best_attr] == val\n examples_val = examples[ind]\n labels_val = labels[ind]\n if np.sum(ind) == 0:\n child = DecisionNode(label=dominant_label)\n else:\n child = id3(examples_val, labels_val,\n np.delete(attributes, np.where(attributes == best_attr)),\n target_values, max_depth, leaf_purity)\n root.add_child(val, child)\n return root\n\n\nclass DecisionTree:\n\n def __init__(self, examples, labels, max_depth, purity=1, alg=id3):\n \"\"\"\n :param examples: training examples\n :param labels: labels of examples\n :param alg: learning algorithm\n \"\"\"\n self.attributes = np.array(range(np.size(examples, 1)))\n self.x = examples\n self.y = labels\n self.max_depth = max_depth\n self.purity = purity\n self.alg = alg\n self.root = None\n\n def fit(self):\n target_values = {}\n for attr in self.attributes:\n target_values[attr] = np.unique(self.x[:, attr])\n self.root = self.alg(self.x, self.y, self.attributes, target_values,\n max_depth=self.max_depth, leaf_purity=self.purity)\n\n def traverse(self):\n visited, queue = set(), collections.deque([self.root])\n visited.add(self.root)\n while queue:\n v: DecisionNode = queue.popleft()\n for val, u in v.children.items():\n if u.parent is not None:\n print(\"val=%s\\tattr=%d\\tlabel=%d\\tparent_attr=%d\" % (val, u.attribute, u.label, u.parent.attribute))\n visited.add(u)\n queue.append(u)\n\n def predict(self, x):\n n = np.size(x, axis=0)\n y_pred = []\n for i in range(n):\n # print(x[i, :])\n v: DecisionNode = self.root\n while v.label == -1:\n value = x[i, v.attribute]\n v = v.children[value]\n # v.print_node()\n # print(v.label)\n y_pred.append(v.label)\n return np.array(y_pred)\n\n def accuracy(self, x, y):\n y_pred = self.predict(x)\n return np.mean(y_pred == y)\n","sub_path":"dtree.py","file_name":"dtree.py","file_ext":"py","file_size_in_byte":5163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"572626708","text":"from Panda import *\n\n# We can generalize this idea a little.\n# This time, write a pandaLine function that takes three arguments:\n# The number of pandas\n# The location of the next panda\n# The distance between pandas\ndef pandaLine(number, loc, dist):\n if number > 0:\n panda(position = loc)\n pandaLine(number-1, loc+dist, dist)\n# You need to make a panda at the right place and figure out how\n# number, loc, and dist will change in the recursion\n \n\n\n# Make a line of pandas starting at (0,0,0) moving one unit to the right\n\npandaLine(5, P3(0,0,0), P3(1,0,0))\n# Make another line starting at (0,0,0) moving 2 back\n\n# This time, use mouseControl to allow the camera to be moved around\nmouseControlCamera(camera)\n\n# Can you complete the square of pandas?\n# Can you use any model you want instead of just pandas?\nstart()\n","sub_path":"Handouts/src/3-1 Interpolation and Collections/02-morerecursion.py","file_name":"02-morerecursion.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"129683571","text":"#!/usr/local/bin/python3.5\nimport json\n\nfin = open(\"packets.json\", \"r\")\npackets = json.load(fin)\nfin.close()\n#print packets\n#numpkts = len(packets)\n#print \"Read\", numpkts, \"packets\"\ni = 0\nfor packet in packets :\n i = i + 1\n print (\"Packet\", i, \":\") \n header = packet['header']\n print (\" Header - Version:\", header['version'], \"Checksum:\", header['checksum'], \"PduType:\", header['pdutype'])\n print (\" Payload:\")\n j = 0\n payload = packet['payload'] \n for tlv in payload['tlvs'] : \n j = j + 1\n print (\" TLV\", j, \"- Type:\", tlv['type'], \", Len:\", tlv['length'], \", Val:\", tlv['val'])\n\nprint (\"Done\")\n\n","sub_path":"json-reader.py","file_name":"json-reader.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"78069805","text":"from itertools import product\nfrom sys import stdout as out\nfrom mip import Model, xsum, minimize, BINARY\n\n# solve single TSP dengan MIP\n# credit to https://python-mip.readthedocs.io/en/latest/intro.html\ndef solveTSP(adjMatrixSubGraph, listPath, nodeKantor, listNode, mapIdxToNode, mapNodeToIdx):\n model = Model()\n listNode.insert(0,nodeKantor)\n n = len(listNode)\n\n # add variable\n x = [[model.add_var(var_type=BINARY) for j in range(n)] for i in range(n)]\n y = [model.add_var() for i in range(n)]\n\n # add objective function\n model.objective = minimize(xsum(adjMatrixSubGraph[mapNodeToIdx[listNode[i]]][mapNodeToIdx[listNode[j]]]*x[i][j] for i in range(n) for j in range(n)))\n\n V = set(range(n))\n # constraint : leave each city only once\n for i in V:\n model += xsum(x[i][j] for j in V - {i}) == 1\n\n # constraint : enter each city only once\n for i in V:\n model += xsum(x[j][i] for j in V - {i}) == 1\n\n\n # subtour elimination\n for (i, j) in product(V - {0}, V - {0}):\n if i != j:\n model += y[i] - (n+1)*x[i][j] >= y[j]-n\n\n # optimizing\n model.optimize(max_seconds=30)\n\n\n res = []\n # checking if a solution was found\n if model.num_solutions:\n print(\"SOLUTION FOUND\")\n for i in range(n):\n for j in range(n):\n if (x[i][j].x==1):\n print(listNode[i],\" \",listNode[j],\" : \", listPath[mapNodeToIdx[listNode[i]]][mapNodeToIdx[listNode[j]]])\n res.append((listNode[i],listNode[j]))\n\n \n\n else:\n print(\"gak ketemu\")\n return res","sub_path":"src/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"178317578","text":"# Advanced Programming in Python -- Lesson 4 Exercise 1\n# Jason Virtue\n# Start Date 2/17/2020\n\n\"\"\"\nSimple iterator examples\n\"\"\"\n\nclass IterateMe_1:\n \"\"\"\n\n returns a sequence of numbers\n ( like range() )\n \"\"\"\n\n def __init__(self, start, increment, stop):\n self.start = start\n self.increment = increment\n self.stop = stop\n self.current = start\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self.current += self.increment\n if self.current < self.stop:\n return self.current\n else:\n raise StopIteration\n\nif __name__ == \"__main__\":\n\n iter = IterateMe_1(5,2,17)\n for i in iter:\n if i >10: break\n print(i)\n # it's stateful\n for i in iter:\n print(i)\n\n # reinitialize \"loses state\"\n iter = IterateMe_1(5,2,17)\n for i in iter:\n print(i)","sub_path":"students/j_virtue/lesson04/exercise/iterators.py","file_name":"iterators.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"280892583","text":"\n\n#calss header\nclass _FAMILIAR():\n\tdef __init__(self,): \n\t\tself.name = \"FAMILIAR\"\n\t\tself.definitions = [u'easy to recognize because of being seen, met, heard, etc. before: ', u'to know something or someone well: ', u'informal and friendly, sometimes in a way that does not show respect to someone who is not a family member or close friend: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_familiar.py","file_name":"_familiar.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"17242905","text":"import pandas as pd\nfrom math import sqrt\nimport numpy as np\n\n\n\nmovies_df = pd.read_csv('movies.csv')\nratings_df = pd.read_csv('ratings.csv')\nmovies_df['year'] = movies_df.title.str.extract('(\\(\\d\\d\\d\\d\\))',expand=False)\nmovies_df['year'] = movies_df.year.str.extract('(\\d\\d\\d\\d)',expand=False)\nmovies_df['title'] = movies_df.title.str.replace('(\\(\\d\\d\\d\\d\\))', '')\nmovies_df['title'] = movies_df['title'].apply(lambda x: x.strip())\nmovies_df = movies_df.drop('genres', 1)\nratings_df = ratings_df.drop('timestamp', 1)\n\ndef recommend(userInput):\n inputMovies = pd.DataFrame(userInput)\n inputId = movies_df[movies_df['title'].isin(inputMovies['title'].tolist())]\n inputMovies = pd.merge(inputId, inputMovies)\n inputMovies = inputMovies.drop('year', 1)\n userSubset = ratings_df[ratings_df['movieId'].isin(inputMovies['movieId'].tolist())]\n userSubsetGroup = userSubset.groupby(['userId'])\n userSubsetGroup = sorted(userSubsetGroup, key=lambda x: len(x[1]), reverse=True)\n userSubsetGroup = userSubsetGroup[0:100]\n\n #Store the Pearson Correlation in a dictionary, where the key is the user Id and the value is the coefficient\n pearsonCorrelationDict = {}\n\n #For every user group in our subset\n for name, group in userSubsetGroup:\n #Let's start by sorting the input and current user group so the values aren't mixed up later on\n group = group.sort_values(by='movieId')\n inputMovies = inputMovies.sort_values(by='movieId')\n nRatings = len(group)\n temp_df = inputMovies[inputMovies['movieId'].isin(group['movieId'].tolist())]\n tempRatingList = temp_df['rating'].tolist()\n tempGroupList = group['rating'].tolist()\n Sxx = sum([i**2 for i in tempRatingList]) - pow(sum(tempRatingList),2)/float(nRatings)\n Syy = sum([i**2 for i in tempGroupList]) - pow(sum(tempGroupList),2)/float(nRatings)\n Sxy = sum( i*j for i, j in zip(tempRatingList, tempGroupList)) - sum(tempRatingList)*sum(tempGroupList)/float(nRatings)\n \n if Sxx != 0 and Syy != 0:\n pearsonCorrelationDict[name] = Sxy/sqrt(Sxx*Syy)\n else:\n pearsonCorrelationDict[name] = 0\n\n pearsonCorrelationDict.items()\n\n pearsonDF = pd.DataFrame.from_dict(pearsonCorrelationDict, orient='index')\n pearsonDF.columns = ['similarityIndex']\n pearsonDF['userId'] = pearsonDF.index\n pearsonDF.index = range(len(pearsonDF))\n topUsers=pearsonDF.sort_values(by='similarityIndex', ascending=False)[0:50]\n topUsersRating=topUsers.merge(ratings_df, left_on='userId', right_on='userId', how='inner')\n topUsersRating['weightedRating'] = topUsersRating['similarityIndex']*topUsersRating['rating']\n tempTopUsersRating = topUsersRating.groupby('movieId').sum()[['similarityIndex','weightedRating']]\n tempTopUsersRating.columns = ['sum_similarityIndex','sum_weightedRating']\n recommendation_df = pd.DataFrame()\n recommendation_df['weighted average recommendation score'] = tempTopUsersRating['sum_weightedRating']/tempTopUsersRating['sum_similarityIndex']\n recommendation_df['movieId'] = tempTopUsersRating.index\n recommendation_df = recommendation_df.sort_values(by='weighted average recommendation score', ascending=False)\n return list(movies_df.loc[movies_df['movieId'].isin(recommendation_df.head(10)['movieId'].tolist())].title)\n\n","sub_path":"FLASK/collaborative_model.py","file_name":"collaborative_model.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"457258549","text":"from django.contrib.auth.decorators import login_required\nfrom django.views.generic import View, TemplateView\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.shortcuts import redirect, render\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.mail import EmailMessage\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\n\nfrom apps.checkout.forms import CheckoutForm\nfrom apps.cart.models import Cart\nfrom apps.checkout.models import Order\nfrom django.http.response import HttpResponseRedirect\nfrom django.urls import reverse_lazy\n\n\n@login_required\ndef CheckoutView(request):\n try:\n # kiem tra xem use hien tai co order nao hay khong\n order_of_user = Order.objects.get(\n user_id=request.user, is_complete=False\n )\n # Neu nhu nguoi dung da co\n # Cap nhat la gia tien cua order\n cart_of_user = Cart.objects.filter(\n user_id=request.user, is_active=True\n )[0]\n order_of_user.order_total = float(cart_of_user.get_total())\n order_of_user.save()\n\n form = CheckoutForm()\n\n context = {\"form\": form, \"cart\": cart_of_user}\n return render(request, \"checkout/checkout.html\", context)\n\n except ObjectDoesNotExist:\n # neu nguoi dung chua co se tao moi 1 order\n cart_of_user = Cart.objects.filter(\n user_id=request.user, is_active=True\n )\n if cart_of_user.exists(): # kiem tra nguoi dung co gio hang nao khong\n cart = cart_of_user[0]\n new_order = Order.objects.create(\n user_id=request.user,\n cart_id=cart,\n order_total=int(cart.get_total()),\n )\n return redirect(\"checkout:checkout\")\n\n\n@login_required\ndef checkout(request):\n form = CheckoutForm(request.POST, request.FILES)\n order = Order.objects.get(user_id=request.user, is_complete=False)\n cart_of_user = Cart.objects.filter(\n user_id=request.user, is_active=True\n )[0]\n if form.is_valid():\n if order.cart_id == cart_of_user:\n fullname = form.cleaned_data.get(\"fullname\")\n shipping_address = form.cleaned_data.get(\"shipping_address\")\n phone_number = form.cleaned_data.get(\"phone_number\")\n order_description = form.cleaned_data.get(\"order_description\")\n\n # save form\n order.fullname = fullname\n order.shipping_address = shipping_address\n order.phone_number = phone_number\n order.order_description = order_description\n order.is_complete = True\n order.save()\n\n # change status cart and cart_movie\n cart_of_user.is_active = False\n cart_of_user.save()\n\n for cart_movie in cart_of_user.movies.all():\n cart_movie.is_active = False\n cart_movie.save()\n\n # send email success payment\n messages = render_to_string(\"checkout/checkout_email.html\", {\n 'name': order.user_id.username,\n })\n email = EmailMessage(\n \"Thank you for purchasing movies\",\n messages,\n settings.DEFAULT_FROM_EMAIL,\n [request.user.email],\n )\n email.fail_silently = False\n email.send()\n # payment success\n return HttpResponseRedirect(reverse_lazy('checkout:checkout-done'))\n else:\n context = {\"form\": form, \"cart\": cart_of_user}\n return render(request, \"checkout/checkout.html\", context)\n\n\nclass CheckoutDoneView(LoginRequiredMixin, TemplateView):\n template_name = \"checkout/checkout_done.html\"\n","sub_path":"apps/checkout/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"313101628","text":"\"\"\"\nGiven an array where elements are sorted in ascending order, convert it to a height balanced BST.\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def sortedArrayToBST(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: TreeNode\n \"\"\"\n #return self.bin_search_recursion(nums, 0, len(nums)-1)\n return self.iterative(nums)\n \n def bin_search_recursion(self, nums, begin,end):\n if begin > end:\n return None\n \n middle = begin + (end-begin)/2\n root = TreeNode(nums[middle])\n root.left = self.bin_search(nums,begin, middle-1)\n root.right = self.bin_search(nums,middle+1, end)\n \n return root\n \n #for fun\n def iterative(self, nums):\n length = len(nums)\n if length == 0:\n return None\n \n begin, end = 0, length -1\n stack = []\n root = TreeNode(nums[(end-begin)/2])\n stack.append( (root, begin, end) )\n while stack:\n (node, begin, end ) = stack.pop()\n middle = begin + (end-begin)/2\n \n #create the right node\n if middle+1 <= end:\n #print middle, end\n node.right = TreeNode(nums[middle+1+ (end- (middle+1))/2])\n stack.append( ( node.right, middle+1, end ) )\n \n #create the left node\n if begin<=middle-1:\n #print begin, middle\n node.left = TreeNode( nums[begin+ (middle -1 -begin)/2] )\n stack.append( (node.left, begin, middle-1) )\n return root\n \n","sub_path":"108_Convert_Sorted_Array_to_Binary_Search_Tree.py","file_name":"108_Convert_Sorted_Array_to_Binary_Search_Tree.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"75918821","text":"import os\n\nfrom absl import app as absl_app\nfrom absl import flags\nimport tensorflow as tf\n\nfrom official.utils.flags import core as flags_core\nfrom official.utils.logs import logger\nfrom official.wide_deep import uv_dataset\nfrom official.wide_deep import wide_deep_run_loop\n\n\ndef define_uv_flags():\n wide_deep_run_loop.define_wide_deep_flags()\n flags.adopt_module_key_flags(wide_deep_run_loop)\n flags_core.set_defaults(data_dir='uv_dataset',\n model_dir='uv_model',\n train_epochs=1,\n epochs_between_evals=1,\n inter_op_parallelism_threads=0,\n intra_op_parallelism_threads=0,\n batch_size=500)\n\n\ndef build_estimator(model_dir, model_type, model_column_fn, inter_op, intra_op):\n \"\"\"Build an estimator appropriate for the given model type.\"\"\"\n wide_columns, deep_columns = model_column_fn()\n hidden_units = [100, 50]\n\n # Create a tf.estimator.RunConfig to ensure the model is run on CPU, which\n # trains faster than GPU for this model.\n run_config = tf.estimator.RunConfig().replace(\n session_config=tf.ConfigProto(device_count={'GPU': 0},\n inter_op_parallelism_threads=inter_op,\n intra_op_parallelism_threads=intra_op))\n\n if model_type == 'wide':\n return tf.estimator.LinearClassifier(\n model_dir=model_dir,\n feature_columns=wide_columns,\n config=run_config)\n elif model_type == 'deep':\n return tf.estimator.DNNClassifier(\n model_dir=model_dir,\n feature_columns=deep_columns,\n hidden_units=hidden_units,\n config=run_config)\n else:\n return tf.estimator.DNNLinearCombinedClassifier(\n model_dir=model_dir,\n linear_feature_columns=wide_columns,\n dnn_feature_columns=deep_columns,\n dnn_hidden_units=hidden_units,\n config=run_config)\n\n\ndef run_uv(flags_obj):\n \"\"\"Construct all necessary functions and call run_loop.\n\n Args:\n flags_obj: Object containing user specified flags.\n \"\"\"\n\n train_file = os.path.join(flags_obj.data_dir, uv_dataset.TRAINING_FILE)\n test_file = os.path.join(flags_obj.data_dir, uv_dataset.EVAL_FILE)\n\n # Train and evaluate the model every `flags.epochs_between_evals` epochs.\n def train_input_fn():\n return uv_dataset.input_fn(\n train_file, flags_obj.epochs_between_evals, True, flags_obj.batch_size)\n\n def eval_input_fn():\n return uv_dataset.input_fn(test_file, 1, False, flags_obj.batch_size)\n\n tensors_to_log = {\n 'average_loss': '{loss_prefix}head/truediv',\n 'loss': '{loss_prefix}head/weighted_loss/Sum'\n }\n\n wide_deep_run_loop.run_loop(\n name=\"uv level experiments\", train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn,\n model_column_fn=uv_dataset.build_model_columns,\n build_estimator_fn=build_estimator,\n flags_obj=flags_obj,\n tensors_to_log=tensors_to_log,\n early_stop=True)\n\n\ndef main(_):\n with logger.benchmark_context(flags.FLAGS):\n run_uv(flags.FLAGS)\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n define_uv_flags()\n absl_app.run(main)\n","sub_path":"official/wide_deep/uv_main.py","file_name":"uv_main.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"138948682","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.decomposition import PCA\nimport argparse\nfrom scipy.stats import pearsonr\n\n\n####################\n#\n# expression method: TMM, qn, tpm, rpkm\n# trait: Height, BMI, PRS\n# tissue: Adipose_Tissue, Brain, Breast, Colon, Muscle, fibroblast\n#\n####################\n\nEXP_PATH = '~/DATA1/jaeho/Omnigenic_study/01.raw_data/01.GTEx_data/02.Expression_data/02.2017_data'\n\n\ndef import_expression(tissue, norm_method):\n # get expression file\n norm_method = norm_method.lower()\n if norm_method == 'tmm':\n filepath = '{0}/Expression_Data/{1}/GTEx_20160115_{1}_tmm.log2.txt'.format(EXP_PATH, tissue)\n elif norm_method == 'rpkm':\n filepath = '{0}/Expression_Data/{1}/GTEx_20160115_{1}_rpkm.geneID.log2.editID.txt'.format(EXP_PATH, tissue)\n elif norm_method == 'qn':\n filepath = '{0}/Expression_Data/{1}/GTEx_20160115_{1}_qn.log2.txt'.format(EXP_PATH, tissue)\n elif norm_method == 'tpm':\n filepath = '{0}/Expression_Data/{1}/GTEx_20160115_{1}_tpm.geneID.log2.editID.txt'.format(EXP_PATH, tissue)\n\n exp_df = pd.read_csv(filepath, sep='\\t', header=0, index_col=0)\n\n # import expressed index file\n expressed = pd.read_csv('{0}/Expression_Data/Expressed/GTEx_20160115_{1}_tpm.log2.expressed.txt'.format(EXP_PATH, tissue),\n squeeze=True, sep='\\t', header=None, index_col=0)\n # pick only expressed genes\n ix1 = np.intersect1d(exp_df.columns, expressed.values)\n exp_df[ix1]\n\n # sample id -> participant id\n new_index = []\n for i in exp_df.index.str.rsplit('_', 3):\n new_index.append(i[0])\n exp_df.index = new_index\n return exp_df\n\n\ndef import_trait(trait):\n if trait in ['Height', 'BMI']:\n trait_df = pd.read_csv('{0}/GTEx_sampleID_information.txt'.format(EXP_PATH),\n sep='\\t', header=0, index_col=0)\n if trait == 'Height':\n return trait_df['HGHT']\n elif trait == 'BMI':\n return trait_df['BMI']\n else:\n tokens = trait.split('_')\n trait_df = pd.read_csv('~/DATA1/jaeho/Omnigenic_study/01.raw_data/03.Polygenic_score/Output_PRS_score/{0}/{0}.PRSice.All_pval.sum.score.txt'.format(tokens[0]),\n sep='\\t', header=0, index_col=0)\n #print trait_df.head()\n return trait_df['{0:<08}'.format(float(tokens[-1]))]\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--tissue\", type=str, required=True,\n choices=['Adipose_Tissue', 'Brain', 'Breast', 'Colon', 'Muscle', 'fibroblast'],\n help=\"Tissue type\")\n parser.add_argument(\"--trait\", type=str, required=True,\n choices=['Height', 'BMI', 'Height_prs_0.5', 'Height_prs_0.05', 'Height_prs_0.01',\n 'Height_prs_0.001', 'Height_prs_0.0001', 'BMI_prs_0.5', 'BMI_prs_0.05',\n 'BMI_prs_0.01', 'BMI_prs_0.001', 'BMI_prs_0.0001'],\n help='Trait type')\n parser.add_argument(\"--norm_method\", type=str, default='TMM',\n choices=['TMM', 'RPKM', 'TPM', 'QN'],\n help=\"Normalization method\")\n parser.add_argument(\"-o\", \"--outdir\", type=str, default='.',\n help=\"Output directory\")\n args = parser.parse_args()\n\n\n trait_series = import_trait(args.trait)\n exp_df = import_expression(args.tissue, args.norm_method)\n df = exp_df.copy()\n df[args.trait] = trait_series[exp_df.index]\n pca = PCA()\n X_pca = pca.fit_transform(exp_df.values)\n\n\n\n pca_corr_info = []\n for i in range(X_pca.shape[1]):\n r, p = pearsonr(df[args.trait].values, X_pca[:, i])\n if abs(r) > 0.2:\n # one-based pc number\n pca_corr_info.append([i + 1, pca.explained_variance_ratio_[i], r, p])\n\n pca_gene_corr_info = []\n for k in pca_corr_info:\n for i in range(5, df.shape[1]):\n exp_val = df.iloc[:, i]\n r, p = pearsonr(X_pca[:, k[0] - 1], exp_val)\n if abs(r) > 0.35:\n pca_gene_corr_info.append([k[0], df.columns[i], r, p])\n\n with open('{0}/{1}_{2}.{3}.pc_trait.correlation.txt'.format(args.outdir, args.tissue, args.trait, args.norm_method), 'w') as f:\n # header\n f.write('PC\\tExplained_Variance_Ratio\\tR\\tp-val\\n')\n for tokens in pca_corr_info:\n f.write('\\t'.join(str(i) for i in tokens) + '\\n')\n\n with open('{0}/{1}_{2}.{3}.bestPC_gene.correlation.txt'.format(args.outdir, args.tissue, args.trait, args.norm_method), 'w') as f:\n # header\n f.write('PC\\tgene_id\\tR\\tp-val\\n')\n for tokens in pca_gene_corr_info:\n f.write('\\t'.join(str(i) for i in tokens) + '\\n')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pca/pca_gene.py","file_name":"pca_gene.py","file_ext":"py","file_size_in_byte":4805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"368906195","text":"from bs4 import BeautifulSoup\nimport re\nimport os\nimport pycurl\nimport requests\n\n# GLOBAL VARIABLES\nGLOBAL_validateURLs = True\n\nclass Discography:\n\t# A bandcamp discography has a Name, URL, and List of albums\n\t# Name is the artists name\n\t# URL to bandcamp\n\t# Albums a list of albums that make up the discography\n\tdef __init__(self, name, url ):\n\t\tself.name = name\n\t\tif ( self.validURL( url ) ):\n\t\t\tself.url = url\n\n\tdef updateName( self, name ):\n\t\tself.name = name\n\n\tdef getName( self ):\n\t\treturn str(self.name)\n\n\tdef updateURL( self, url ):\n\t\tself.url = url\n\n\tdef getURL( self ):\n\t\treturn str(self.url)\n\n\tdef validURL( self, url ):\n\t\t# only reason to turn off URL validations is speed... or\n\t\t# if you're a sycophants, and product of incest\n\t\tif not GLOBAL_validateURLs:\n\t\t\tprint( 'WARNING:\\t GLOBAL_validateURLs is set to false, NOT validating URLs')\n\t\t\treturn True\n\t\turl = str( url.lower() )\n\t\t# make sure the URL is a bandcamp URL\n\t\tif url.find( 'bandcamp' ) != -1:\n\t\t\ttry:\n\t\t\t\tr = requests.get(url)\n\t\t\texcept requests.exceptions.RequestException as e:\n\t\t\t\tself.urlError = str(e)\n\t\t\t\treturn False\n\t\t\tif 200 == r.status_code:\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef __str__(self):\n\t\treturnString = self.getName() + '\\t' + self.getURL()\n\t\tif hasattr( self, 'urlError' ):\n\t\t\treturnString += '\\nERROR:\\n' + str(self.urlError)\n\t\treturn str( returnString )\n\nclass Album( Discography ):\n\t# An album has a Title, URL, and track listing\n\t# Title of the album\n\t# URL of the album\n\t# tracks is a list of track objects\n\tdef __init__(self, name='', url='', tracks=[] ):\n\t\tself.name = name\n\t\tif ( self.validURL( url ) ):\n\t\t\tself.url = url\n\t\telse:\n\t\t\tself.url = ''\n\t\tself.tracks = tracks\n\n\tdef updateURL( self, tracks ):\n\t\tself.tracks = tracks\n\n\tdef getURL( self ):\n\t\treturn str(self.tracks)\n\n\tdef __str__(self):\n\t\treturnString = self.getName() + '\\t' + self.getURL()\n\t\treturn str( returnString )\n\nclass Track( Album ):\n\t# A track has a Title, URL, and a downloaded flag:\n\t# Title of the track\n\t# URL of the bandcamp mp3\n\t# downloaded is False untill the track is downloaded\n\tdef __init__(self, title='', url='' ):\n\t\tself.title = title\n\t\t# even though the URL is pulled from the HTML double check it's validity\n\t\tif ( self.validURL( url ) ):\n\t\t\tself.url = url\n\t\telse:\n\t\t\tself.url = ''\n\t\tself.downloaded = False\n\n\nnewDiscography = Discography( 'test', 'https://erinmckeown.bandcamp.com' )\nprint( newDiscography )","sub_path":"Song Downloader.py","file_name":"Song Downloader.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"611633047","text":"from __future__ import (absolute_import, division,\r\n print_function, unicode_literals)\r\nexec(open(\"ground.py\").read())\r\n#\r\n\r\n# mine\r\nimport hamiltonian\r\nimport diffeo\r\nimport sde\r\nfrom utility import *\r\n#\r\ndef run(dict,r=0):\r\n \"\"\"\r\n Qr=dict['landmarks'][0,:,:]\r\n Apply push-forward map to sample Qr.\r\n Find average using MAP4.\r\n Plot phase-plane with confidence balls and original data (from push-forward map).\r\n \"\"\"\r\n import os.path\r\n if 'fname' in dict:\r\n filename=dict['fname']\r\n else:\r\n print(\"No filename given\")\r\n exit(1)\r\n print(\"\\n\",filename,\" \",dict['ext'],\"============================================\",\"\\n\")\r\n plt.ion()\r\n G=hamiltonian.GaussGreen(dict['ell'],0)\r\n # create set of landmarks by push-forward\r\n SDE = sde.SDE(G)\r\n SDE.set_lam_beta(dict['lam'],dict['beta'],True)\r\n Qr=dict['landmarks'][r,:,:] # use first set as reference\r\n dict['landmarks_n']=SDE.add_sde_noise(Qr, dict['num'])\r\n # find landmark average\r\n SDE=sde.MAP4(G)\r\n SDE.set_data_var(dict['data_var'])\r\n SDE.set_lam_beta(dict['lam'],dict['beta'],False)\r\n SDE.set_landmarks(dict['landmarks_n'])\r\n SDE.set_no_steps(dict['no_steps'])\r\n SDE.solve()\r\n cov_q,cov_p=SDE.cov()\r\n #\r\n # plot landmarks (noisy source data)\r\n plt.figure(1)\r\n plot_setup()\r\n plt.axis('equal')\r\n plot_landmarks(dict['landmarks_n'],shadow=3,lw=0.2)\r\n plt.savefig(filename+dict['ext']+'_samps.pdf',bbox_inches='tight') # lam\r\n # plot landmarks with average and confidence ball\r\n plt.figure(1)\r\n plot_setup()\r\n plt.axis('equal')\r\n plot_average(SDE.Qh) #\r\n Qav=np.average(dict['landmarks_n'],axis=0)\r\n plot_average(Qav,2) # 2=color scheme\r\n add_sd_plot(SDE.Qh, cov_q)\r\n plt.savefig(filename+dict['ext']+'_av.pdf',bbox_inches='tight')\r\n print(\"...finished.\")\r\n####################################################################\r\ndef my_set(i,j,r): # i=ten or two samps, j = experiment, r= which image\r\n noise_var=0.0\r\n beta=25\r\n lam=0.1\r\n if j==5:\r\n #beta=50\r\n dict=exp5(noise_var)\r\n if j==4:\r\n #beta=40\r\n dict=exp4(noise_var)\r\n if j==1:\r\n dict=exp1(noise_var)\r\n if j==2:\r\n dict=exp2(noise_var)\r\n #beta=50\r\n r=1\r\n #\r\n dict['beta']=beta\r\n dict['lam']=lam\r\n\r\n if i==1:\r\n dict['ext']='two'\r\n dict['num']=2\r\n if i==2:\r\n dict['ext']='ten'\r\n dict['num']=10\r\n if i==3:\r\n dict['ext']='fifty'\r\n dict['num']=50\r\n dict['data_var']=noise_var+0.05\r\n run(dict,r)\r\n######################\r\nif __name__ == \"__main__\":\r\n # do this\r\n plt.ion()\r\n #\r\n jlist=[1,2,4,5]\r\n rlist=[0,1,0,0]\r\n for k in range(len(jlist)):\r\n print(k)\r\n for i in range(3,4):\r\n my_set(i,jlist[k], rlist[k])\r\n","sub_path":"run_supp_split2.py","file_name":"run_supp_split2.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"330786590","text":"class Gun:\n def __init__(self, model):\n self.model = model\n self.bullet_count = 0\n\n def __str__(self):\n return \"[%s]有%d颗子弹\" % (self.model, self.bullet_count)\n\n def add_bullet(self, count):\n self.bullet_count += count\n\n def shoot(self):\n if self.bullet_count is 0:\n print(\"[%s]没有子弹,不能发射\" % self.model)\n return\n\n print(\"[%s] 突突突...\" % self.model)\n self.bullet_count -= 1\n\n\nclass Solider:\n def __init__(self, name):\n self.name = name\n # 先定义属性,设置为None。对象创建完成后再设置值\n self.gun = None\n\n def rush(self):\n if self.gun is None:\n print(\"[%s]没有枪,不能冲锋\" % self.name)\n return\n\n self.gun.add_bullet(50)\n print(\"[%s] 发起冲锋\" % self.name)\n self.gun.shoot()\n\n\nak47 = Gun(\"ak47\")\n\nxusanduo = Solider(\"许三多\")\nxusanduo.gun = ak47\nxusanduo.rush()\nprint(ak47)\n","sub_path":"oop/s_02_士兵突击.py","file_name":"s_02_士兵突击.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"527695555","text":"#!/usr/bin/env python3\n\"\"\"\nTest for client\n\"\"\"\n\nfrom unittest import TestCase, mock\nfrom unittest.mock import patch, PropertyMock\nfrom parameterized import parameterized\nfrom client import GithubOrgClient\nfrom utils import requests\n\n\nclass TestGithubOrgClient(TestCase):\n \"\"\"\n Test for client.GithubOrgClient\n \"\"\"\n @parameterized.expand([\n (\"google\", {\"payload\": True}),\n (\"abc\", {\"payload\": False}),\n ])\n @patch('client.get_json')\n def test_org(self, name, payload, fn_get):\n \"\"\" test case for client.org \"\"\"\n goc = GithubOrgClient(name)\n fn_get.return_value = payload\n\n self.assertEqual(payload, goc.org)\n fn_get.assert_called_once()\n\n def test_public_repos_url(self):\n \"\"\"\n test for client._public_repos_url\n \"\"\"\n with patch(\"client.GithubOrgClient._public_repos_url\",\n new_callable=PropertyMock) as mock_obj:\n mock_obj.return_value = {\"url\": 'http://google.com'}\n\n r = GithubOrgClient(mock_obj.return_value)._public_repos_url\n\n self.assertEqual(r, mock_obj.return_value)\n mock_obj.assert_called_once()\n\n @parameterized.expand([\n ({'license': {'key': 'my_license'}}, 'my_license', True),\n ({'license': {'key': 'other_license'}}, 'my_license', False)\n ])\n def test_has_license(self, repo, license, expected):\n \"\"\"\n test for client._has_licence\n \"\"\"\n self.assertEqual(GithubOrgClient.has_license(repo, license), expected)\n\n @patch('client.get_json')\n def test_public_repos(self, mock1):\n \"\"\"\n test for client._public_repos\n \"\"\"\n a = {\"name\": \"a\", \"license\": {\"key\": \"k\"}}\n b = {\"name\": \"b\", \"license\": {\"key\": \"l\"}}\n c = {\"name\": \"c\"}\n method = 'client.GithubOrgClient._public_repos_url'\n mock1.return_value = [a, b, c]\n with patch(method, PropertyMock(return_value=\"www.k.com\")) as m2:\n goc = GithubOrgClient(\"my_goc\")\n self.assertEqual(goc.public_repos(), ['a', 'b', 'c'])\n self.assertEqual(goc.public_repos(\"k\"), ['a'])\n self.assertEqual(goc.public_repos(\"c\"), [])\n self.assertEqual(goc.public_repos(17), [])\n mock1.assert_called_once_with(\"www.k.com\")\n m2.assert_called_once_with()\n","sub_path":"0x09-Unittests_and_integration_tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"11649115","text":"from challenge2 import *\n\nfilepath = \"graph_data.txt\"\n\nprint('\\ntesting readGraph function.')\ndata = readGraph(filepath)\nassert data[0] == ['1', '2', '3', '4', '5']\nassert data[1] == [(1, 2), (2, 1), (1, 4), (4, 1), (2, 3), (3, 2), (2, 4), (4, 2), (2, 5), (5, 2), (3, 5), (5, 3)]\n\nprint('\\nreadGraph works. testing graph class initialization.')\nnew_graph = Graph(len(data[0]))\n\nassert new_graph.numberOfVertices == 5\nassert new_graph.vertices == [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]\n\nprint('\\ngraph initialization works. testing addEdges and getVertices.')\nnew_graph.addEdges(data[1])\nassert new_graph.getVertices() == [[0, 1, 0, 1, 0], [1, 0, 1, 1, 1], [0, 1, 0, 0, 1], [1, 1, 0, 0, 0], [0, 1, 1, 0, 0]]\n\nprint('\\naddEdges and getVertices work. testing breadth_first_search.')\nassert new_graph.breadth_first_search(filepath, 1, 3) == \"Vertices in shortest path: 1,2,4,3\\nNumber of edges in shortest path: 3\"\n\nprint('\\nbreadth_first_search works.\\n\\ntests complete. great job!')\n","sub_path":"challenges/challenge2/challenge2_test.py","file_name":"challenge2_test.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"182042702","text":"import docx\nimport os\nfrom PIL import Image\nimport datetime\n\n\n\n\n\n\ndef B27_img_extract(media_B27,ImageName,RPmtime_urltime,FiledocxPathName):\n doc = docx.Document(FiledocxPathName)\n ImgSavePathlist = []\n rIdlist = ['rId7']\n for i in rIdlist:\n imgSavePath = media_B27 + '\\\\' + ImageName + RPmtime_urltime + '_' + i + '.jpg'\n # imgSavePath.encode('utf-8')\n # print(imgSavePath)\n imgblob = doc.part.related_parts[i]\n img = open(imgSavePath, 'wb')\n img.write(imgblob.blob)\n img.close()\n box1 = (634, 462)\n im_m = Image.open(imgSavePath)\n out = im_m.resize(box1, Image.ANTIALIAS)\n out = out.convert('RGB')\n out.save(imgSavePath)\n ImgSavePathlist.append(imgSavePath)\n return ImgSavePathlist[0]\n\n","sub_path":"doc2mysql/img_extract.py","file_name":"img_extract.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"229146758","text":"\"\"\"\ntraining.py\nZhiang Chen, April 2020\n\"\"\"\n\nimport torch\nimport torch.utils.data\nimport torchvision.datasets\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom utils import *\nimport torchvision.models as models\nfrom data import EurekaDataset\nimport os\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\ntorch.manual_seed(0)\n\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n\neureka_normalize = transforms.Normalize(mean=[0.44, 0.50, 0.43],\n std=[0.26, 0.25, 0.26])\n\neureka_transform = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n eureka_normalize,])\n\n\ntrain_transform = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,])\n\ntest_transform = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.ToTensor(),\n normalize,])\n\ndef neural_network(architecture, nm_classes, pretrained=True, change_last_layer=True):\n assert architecture in model_names\n print(\"=> creating model '{}'\".format(architecture))\n model = models.__dict__[architecture](pretrained=pretrained)\n if change_last_layer:\n if architecture.startswith('densenet'):\n in_features = model.classifier.in_features\n model.classifier = nn.Linear(in_features=in_features, out_features=nm_classes)\n else:\n in_features = model.fc.in_features\n model.fc = nn.Linear(in_features=in_features, out_features=nm_classes)\n\n return model\n\ndef cifar10(root='./datasets/cifar10/', val=True):\n train = torchvision.datasets.CIFAR10(root, train=True, download=True, transform=train_transform)\n test = torchvision.datasets.CIFAR10(root, train=False, download=True, transform=test_transform)\n \"\"\"\n if val:\n indices = torch.randperm(len(train)).tolist()\n train_set = torch.utils.data.Subset(train, indices[:-10000])\n val_set = torch.utils.data.Subset(train, indices[-10000:])\n return train_set, val_set, test\n \"\"\"\n return train, test\n\ndef eureka():\n\ttrain = EurekaDataset('./datasets/Eureka/images/','./datasets/Eureka/class.json', eureka_transform)\n\ttest = EurekaDataset('./datasets/Eureka/images_test/','./datasets/Eureka/class.json', eureka_transform)\n\ttest.addJson('./datasets/Eureka/label_102.json')\n\treturn train, test\n\nif __name__ == '__main__':\n cuda = 'cuda:0'\n device = torch.device(cuda)\n nm_classes = 3\n train_dataset, test_dataset = eureka()\n\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=4, shuffle=True, num_workers=8, collate_fn=collate_fn)\n test_dataloader = torch.utils.data.DataLoader(\n test_dataset, batch_size=4, shuffle=True, num_workers=8, collate_fn=collate_fn)\n\n model = neural_network('densenet161', nm_classes)\n\n #if you want to load weight\n\t#model.load_state_dict(torch.load(\"trained_param_eureka_cls/epoch_0002.param\"))\t\n\t#model.eval()\t\n\n model.to(device)\n\n criterion = nn.CrossEntropyLoss().to(device)\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = torch.optim.SGD(params, lr=0.001, momentum=0.9, weight_decay=0.00001)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.65)\n\n #init_epoch = 0\n #num_epochs = 60\n #print_freq = 100\n\n #save_param = \"trained_param3_resnext101/epoch_{:04d}.param\".format(init_epoch)\n #torch.save(model.state_dict(), save_param)\n weight_path = \"trained_param_densenet161\"\n weights = [f for f in os.listdir(weight_path) if f.endswith(\".param\")]\n weights.sort()\n\n w = \"epoch_0009.param\"\n weight_name = os.path.join(weight_path, w)\n model.load_state_dict(torch.load(weight_name))\n validate(train_dataloader, model, criterion, device)\n\n \"\"\"\n for w in weights:\n weight_name = os.path.join(weight_path, w)\n #save_param = \"trained_param3_resnext101/epoch_{:04d}.param\".format(epoch)\n #train(train_dataloader, model, criterion, optimizer, epoch, device, print_freq)\n #lr_scheduler.step()\n print(weight_name)\n model.load_state_dict(torch.load(weight_name))\n validate(test_dataloader, model, criterion, device)\n #acc = test(model, test_dataset, device)\n #print(\"acc: %f\" % acc)\n #torch.save(model.state_dict(), save_param)\n \"\"\"\n","sub_path":"image_classification/valid_densenet161.py","file_name":"valid_densenet161.py","file_ext":"py","file_size_in_byte":4573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"213744118","text":"# This script processes all raw datasets into a more compact form, removing all unnecessary features.\r\n# Also contains functions to create new time-series datasets.\r\n\r\n# EPA data: https://aqs.epa.gov/aqsweb/airdata/download_files.html#AQI\r\n# Meswest data: https://mesowest.utah.edu/\r\n# NOAA data: https://www.ncdc.noaa.gov/cdo-web/search\r\n\r\n# EPA data is originally in the form [State, County, State Code, County Code, Date, AQI, Category, Defining Parameter, Defining Site, # of Sites]\r\n# We want to focus on Salt Lake county so we will remove all other data. Also need to remove unneeded features.\r\n\r\n# Meso data units:\r\n# Temperature - C\r\n# Wind_X, Wind_Y - m/s\r\n# Humidity - %\r\n# Pressure - Pascals\r\n\r\nimport csv\r\nimport math\r\nimport dateutil.parser as parser\r\nfrom datetime import timedelta\r\nfrom os import listdir\r\nfrom operator import add\r\n\r\n# Checks if the input exists, if not returns 0, otherwise converts it to a float\r\ndef verify_input(data):\r\n\tif data == '':\r\n\t\treturn 0.0\r\n\telse:\r\n\t\treturn float(data)\r\n\r\n# Processes a AQI data file from the EPA\r\ndef process_epa_data(file_name):\r\n\tdata = {}\r\n\r\n\twith open(file_name) as csv_file:\r\n\t\tcsv_reader = csv.reader(csv_file, delimiter=',')\r\n\r\n\t\t# Look for rows with Utah, Salt Lake\r\n\t\tfor row in csv_reader:\r\n\t\t\tif row[0] == 'Utah' and row[1] == 'Salt Lake':\r\n\t\t\t\tdata[row[4]] = [row[5], row[6], row[7]]\r\n\r\n\treturn data\r\n\r\n# Processes all files in raw_epa_data folder, creating a single epa_data csv file\r\ndef create_epa_dataset():\r\n\twith open('epa_data.csv', 'w', newline='') as csvfile:\r\n\t\twriter = csv.writer(csvfile, delimiter=',')\r\n\t\twriter.writerow(['Date', 'AQI', 'Category', 'Parameter']) # Adds a header\r\n\r\n\t\t# Iterate over all datasets\r\n\t\tfor f in listdir('raw_epa_data'):\r\n\t\t\tprocessed_data = process_epa_data('raw_epa_data/' + f)\r\n\t\t\tfor date, data in processed_data.items():\r\n\t\t\t\twriter.writerow([date] + data)\r\n\r\n# Processes weather data from MesoWest\r\n# Some of the data is useless, while some of it is recorded every hour\r\n# Thus we need to process the data into a more useful format, such as avg. temp for the day, avg. wind speed, etc.\r\n# The first index will be the date in the format 'YYYY-MM-DD' to match the EPA data\r\n# Current output is [Date] = [Temp (C), Wind (m/s), Humidity (%), Pressure (P)]\r\ndef process_meso_data(file_name):\r\n\tprocessed_data = {}\r\n\r\n\twith open(file_name) as csv_file:\r\n\t\tcsv_reader = csv.reader(csv_file, delimiter=',')\r\n\r\n\t\t# Get the CSV headers and units\r\n\t\theader = next(csv_reader)\r\n\t\tunits = next(csv_reader)\r\n\r\n\t\t# Determine indices to access\r\n\t\ttemp_index = header.index('air_temp_set_1')\r\n\t\thumidity_index = header.index('relative_humidity_set_1')\r\n\t\tspeed_index = header.index('wind_speed_set_1')\r\n\t\tangle_index = header.index('wind_direction_set_1')\r\n\t\tpressure_index = header.index('pressure_set_1d')\r\n\r\n\t\t# Initialize stuff on the first line\r\n\t\t# We need to group data by the day, so keep a running count of all daily averages we would like to track\r\n\t\tfirst_line = next(csv_reader)\r\n\t\tcurrent_date = first_line[1].split('T', 2)[0]\r\n\t\tavg_temp = verify_input(first_line[temp_index])\r\n\t\tavg_humidity = verify_input(first_line[humidity_index])\r\n\t\tspeed = verify_input(first_line[speed_index])\r\n\t\tangle = verify_input(first_line[angle_index])\r\n\t\tavg_wind = [speed * math.cos(math.radians(angle)), speed * math.sin(math.radians(angle))]\r\n\t\tavg_pressure = verify_input(first_line[pressure_index])\r\n\t\tcount = 1.0\r\n\r\n\t\tfor row in csv_reader:\r\n\t\t\tdate, time = row[1].split('T', 2) # Get the date from the timestamp\r\n\r\n\t\t\t# End of sequence, store averages\r\n\t\t\tif date != current_date:\r\n\t\t\t\tprocessed_data[current_date] = [avg_temp/count, avg_wind[0]/count, avg_wind[1]/count, avg_humidity/count, avg_pressure/count]\r\n\t\t\t\tavg_temp = 0.0\r\n\t\t\t\tavg_wind = [0.0, 0.0]\r\n\t\t\t\tavg_humidity = 0.0\r\n\t\t\t\tavg_pressure = 0.0\r\n\t\t\t\tcount = 0.0\r\n\t\t\t\tcurrent_date = date\r\n\r\n\t\t\t# Update averages\r\n\t\t\tavg_temp += verify_input(row[temp_index])\r\n\t\t\tavg_humidity += verify_input(row[humidity_index])\r\n\t\t\tspeed = verify_input(row[speed_index])\r\n\t\t\tangle = verify_input(row[angle_index])\r\n\t\t\tavg_wind[0] += speed * math.cos(math.radians(angle))\r\n\t\t\tavg_wind[1] += speed * math.sin(math.radians(angle))\r\n\t\t\tavg_pressure += verify_input(row[pressure_index])\r\n\t\t\tcount += 1\r\n\r\n\t\t# Dump the last value\r\n\t\tprocessed_data[current_date] = [avg_temp/count, avg_wind[0]/count, avg_wind[1]/count, avg_humidity/count, avg_pressure/count]\r\n\r\n\treturn processed_data\r\n\r\n# Processes all raw data from MesoWest into a single CSV file\r\n# Raw data that shares the same date will be merged and averaged\r\ndef create_meso_dataset():\r\n\tdatasets = []\r\n\tprocessed_data = {}\r\n\r\n\t# First collect all the datasets to be merged\r\n\tfor f in listdir('raw_meso_data'):\r\n\t\tdatasets.append(process_meso_data('raw_meso_data/' + f))\r\n\r\n\t# Now combine them\r\n\tmerge_count = {}\r\n\tfor dataset in datasets:\r\n\t\tfor date, data in dataset.items():\r\n\t\t\tif date in merge_count:\r\n\t\t\t\tprocessed_data[date] = list(map(add, processed_data[date], data))\r\n\t\t\t\tmerge_count[date] += 1\r\n\t\t\telse:\r\n\t\t\t\tmerge_count[date] = 1\r\n\t\t\t\tprocessed_data[date] = data\r\n\r\n\t# Divide each element by the mergecount to average them\r\n\tfor date, data in processed_data.items():\r\n\t\tprocessed_data[date] = [x / merge_count[date] for x in data]\r\n\r\n\t# Write the merged data to a csv file\r\n\twith open('meso_data.csv', 'w', newline='') as csvfile:\r\n\t\twriter = csv.writer(csvfile, delimiter=',')\r\n\t\twriter.writerow(['Date', 'Temperature', 'Wind_X', 'Wind_Y', 'Humidity', 'Pressure']) # Adds a header\r\n\r\n\t\tfor date, data in processed_data.items():\r\n\t\t\twriter.writerow([date] + data)\r\n\r\n\treturn processed_data\r\n\r\n# Loads a dataset, returning a list containing the headers and a dictionary containing the data\r\ndef load_dataset(file_name):\r\n\theader = []\r\n\tdata = {}\r\n\r\n\twith open(file_name) as csv_file:\r\n\t\tcsv_reader = csv.reader(csv_file, delimiter=',')\r\n\t\theader = next(csv_reader)\r\n\t\theader.pop(0) # Gets read of the date part\r\n\r\n\t\tfor row in csv_reader:\r\n\t\t\tdata[row[0]] = row[1:]\r\n\r\n\treturn header, data\r\n\r\n# Creates a time series model from a dataset\r\n# Assumes last index of dataset is the label\r\ndef time_series(dataset):\r\n\tnew_dataset = []\r\n\r\n\t# For every element except the last, relabel the example using the next example's label\r\n\tfor i in range(len(dataset) - 1):\r\n\t\tnew_dataset.append(dataset[i] + [dataset[i+1][-1]])\r\n\r\n\treturn new_dataset\r\n\r\n# Converts a categorical label into a set of classes\r\ndef convert_class(dataset):\r\n\tconverted_dataset = list(dataset)\r\n\tclasses = {}\r\n\tclass_count = 0\r\n\r\n\tfor i in range(len(dataset)):\r\n\t\tclass_label = dataset[i][-1]\r\n\r\n\t\t# Found a new label\r\n\t\tif class_label not in classes:\r\n\t\t\tclass_count += 1\r\n\t\t\tclasses[class_label] = class_count\r\n\r\n\t\tconverted_dataset[i][-1] = classes[class_label]\r\n\r\n\treturn classes, converted_dataset\r\n","sub_path":"process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":6785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"235733339","text":"\"\"\"\nTag 是XML或者HTML原生文档中的元素标签对象, 实际上BS实例化是就是生成了对应HTML或者XML的Tag的集合\n\"\"\"\n\n# 导入对象\n\nfrom bs4 import BeautifulSoup\nimport requests\n\nif __name__ == '__main__':\n # 创建HTML实例\n html = requests.get(\"https://www.wine.com/\")\n\n # 创建 Beautiful Soup 对象\n bs = BeautifulSoup(html.text, 'html5lib')\n tag_h1 = bs.h1\n print(type(tag_h1))\n print(\"tag's name: \", tag_h1.name) # 实际上与bs.h1.name是一样的, 注意name是tag本身的名字,比如h1的名字就是h1, tag的内容要用text来获取\n print(\"tag's all attributes: \", tag_h1.attrs) # 是一个字典的集合\n print(\"tag's class attribute: \", tag_h1['class'])\n","sub_path":"Python-Library/BeautifulSoup/01a_Tag.py","file_name":"01a_Tag.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"327745872","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 29 19:48:28 2019\n\n@author: RAMAN\nNumber of upvotes - DT - Regressor\n\"\"\"\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport pydotplus\nimport math\nfrom sklearn.tree import export_graphviz\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import mean_squared_error, r2_score\n\nos.chdir(\"C:/Users/RAMAN/Documents/R/Analytics Vidya/Number of Upvotes\")\nos.getcwd()\n\n# Read the data\ntrain_data = pd.read_csv('train_NoU.csv')\ntest_data = pd.read_csv('test_NoU.csv')\n\ndescribe = train_data.describe(include = 'all')\n\ntrain_data.nunique()\n\ntrain_data.dtypes\n\n# Create dummy variables\ncateg_vars = train_data.loc[:, train_data.dtypes == object].columns\ndummy_df = pd.get_dummies(train_data[categ_vars], dtype = int)\ndummy_df.dtypes\n\nfull_data = pd.concat([train_data, dummy_df], axis=1)\nfull_data.shape\nfull_data.dtypes\nfulldata = full_data.drop(['Tag', 'ID', 'Username'], axis=1).copy()\nfulldata.info()\nfulldata.shape\n\n# Seperating dependent and independent variables from dataset\nY = fulldata['Upvotes']\nX = fulldata.drop(['Upvotes'], axis = 1)\n\n# Confirm the changes\nX.shape\nY.shape\n\n# Random Sampling into Train_X, Test_X, Test_Y, Train_Y\nTrain_X, Test_X, Train_Y, Test_Y = train_test_split(X, Y, test_size = 0.3, random_state = 100)\n\n# Decision Tree Model\nM1 = DecisionTreeRegressor(criterion=\"mse\", random_state = 100)\nM1_Model = M1.fit(Train_X, Train_Y)\n\n\n# Scores from sklearn.metrics\nmodel_score_train = M1_Model.score(Train_X,Train_Y) # 0.9999999980615071\n\nTest_Pred = M1_Model.predict(Test_X)\n\nmodel_score_test = M1_Model.score(Test_X,Test_Y) # 0.842298226033708\n\nM1_Model.feature_importances_\n\nVar_Importance_Df = pd.concat([pd.DataFrame(Train_X.columns).rename(columns = {0:'Colnames'}), pd.DataFrame(M1_Model.feature_importances_)], axis = 1)\nVar_Importance_Df\n\n# Score\nr2_score(Test_Y, Test_Pred) # 0.842298226033708\nmath.sqrt(mean_squared_error(Test_Y, Test_Pred)) # 1376.878189283463\n\n################################## Test on actual test set ####################################33\n# Create dummy variables\ncateg_vars_test = test_data.loc[:, train_data.dtypes == object].columns\ndummy_df_test = pd.get_dummies(test_data[categ_vars], dtype = int)\ndummy_df_test.dtypes\n\nfull_data_test = pd.concat([test_data, dummy_df_test], axis=1)\nfull_data_test.shape\nfull_data_test.dtypes\nfulldata_test = full_data_test.drop(['Tag', 'ID', 'Username'], axis=1).copy()\n\n# predict\n\nraw_Test_Pred = M1_Model.predict(fulldata_test)\n\nfrom pandas import DataFrame\n\nsubmission = DataFrame(raw_Test_Pred)\nsubmission.to_csv('submission.csv')\n\n# 1476.2800989323948.\n\n# Plot\n# Create dot data\ndot_data = export_graphviz(M1_Model, out_file=None, feature_names = Train_X.columns, max_depth = 6, filled = True)\n\n# Draw Graph\ngraph = pydotplus.graph_from_dot_data(dot_data)\n\n# Show graph (on console) # takes a bot of time\n#Image(graph.create_png())\n\n# Write to a file\ngraph.write_pdf(\"DT_Plot_Number_Of_Upvotes.pdf\")\nos.getcwd()\n\nexport_graphviz(M1_Model, out_file ='tree.dot', \n feature_names =Train_X.columns) \n","sub_path":"Decision Tree Regressor/NumberOfUpvotes_DecisionTree.py","file_name":"NumberOfUpvotes_DecisionTree.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"424544202","text":"class Pizza:\n\n def __init__(self, name, price, ingredients, vegetarian=False):\n self.name = name\n self.price = price\n self.ingredients = ingredients\n self.vegetarian = vegetarian\n\n\n def display(self):\n veg_str = ''\n\n if self.vegetarian:\n veg_str = ' - VEGETARIAN'\n print(f'Pizza {self.name}: ${self.price}' + veg_str)\n print(', '.join(self.ingredients) + '\\n')\n\n\n\nclass CustomPizza(Pizza):\n BASE_PRICE = 7\n PRICE_PER_INGREDIENT = 1.2\n last_number = 0\n\n def __init__(self):\n CustomPizza.last_number += 1\n self.number = CustomPizza.last_number\n super().__init__('Custom ' + str(self.number), 0, [])\n self.ask_user_for_ingredient()\n self.compute_price()\n\n\n def ask_user_for_ingredient(self):\n # Ingredients for pizza number 1\n print(f'\\nIngredients for pizza number {self.number}')\n while True:\n ingredient = input('Add an ingredient (or press ENTER to finish): ')\n if ingredient == '':\n return\n self.ingredients.append(ingredient)\n print(f'You have {len(self.ingredients)} ingredient(s): {\", \".join(self.ingredients)}')\n\n\n def compute_price(self):\n self.price = self.BASE_PRICE + len(self.ingredients) * self.PRICE_PER_INGREDIENT\n\n\n\n\npizzas = [\n Pizza('4 cheeses', 8.99, ('blue cheese', 'brie', 'emmental', 'mozarella'), True),\n Pizza('Hawaii', 9.5, ('tomato', 'pineapple', 'oignon')),\n Pizza('4 seasons', 11, ('eggs', 'tomato', 'emmental', 'ham', 'olive')),\n Pizza('Vegetarian', 7.8, ('mushrooms', 'tomato', 'onions', 'bell peppers'), True),\n CustomPizza(),\n CustomPizza()\n]\n\n\n\ndef pizza_sort(e):\n return len(e.ingredients)\n\n# pizzas.sort(key=pizza_sort)\n\nfor i in pizzas:\n i.display()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"4354892","text":"# -*- coding: utf8 -*-\n# Dioptas - GUI program for fast processing of 2D X-ray diffraction data\n# Copyright (C) 2015 Clemens Prescher (clemens.prescher@gmail.com)\n# Institute for Geology and Mineralogy, University of Cologne\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QIntValidator, QDoubleValidator\n\n\nclass ImageManipGroupBox(QGroupBox):\n def __init__(self, *args, **kwargs):\n super(ImageManipGroupBox, self).__init__('Image Manipulation', *args, **kwargs)\n\n self._layout = QVBoxLayout(self)\n\n self._grid_layout = QGridLayout()\n self._grid_layout.setSpacing(6)\n\n\n self.rotate_p90_btn = FlatButton('Rotate +90')\n self.rotate_m90_btn = FlatButton('Rotate -90', self)\n self._grid_layout.addWidget(self.rotate_p90_btn, 0, 0)\n self._grid_layout.addWidget(self.rotate_m90_btn, 0, 1)\n\n self.flip_horizontal_btn = FlatButton('Flip horizontal', self)\n self.flip_vertical_btn = FlatButton('Flip vertical', self)\n self._grid_layout.addWidget(self.flip_horizontal_btn, 1, 0)\n self._grid_layout.addWidget(self.flip_vertical_btn, 1, 1)\n\n self.reset_transformations_btn = FlatButton('Reset transformations', self)\n self._grid_layout.addWidget(self.reset_transformations_btn, 2, 0, 1, 2)\n\n self._layout.addLayout(self._grid_layout)\n\n self.setLayout(self._layout)\n\n\nclass NumberTextField(QLineEdit):\n def __init__(self, *args, **kwargs):\n super(NumberTextField, self).__init__(*args, **kwargs)\n self.setValidator(QDoubleValidator())\n self.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)\n\n\nclass NumberIntField(QLineEdit):\n def __init__(self, *args, **kwargs):\n super(NumberIntField, self).__init__(*args, **kwargs)\n self.setValidator(QIntValidator())\n self.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)\n\n\nclass TextField(QLineEdit):\n def __init__(self, *args, **kwargs):\n super(TextField, self).__init__(*args, **kwargs)\n self.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)\n\n\nclass LabelAlignRight(QLabel):\n def __init__(self, *args, **kwargs):\n super(LabelAlignRight, self).__init__(*args, **kwargs)\n self.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n\n\nclass LabelAlignCenter(QLabel):\n def __init__(self, *args, **kwargs):\n super(LabelAlignCenter, self).__init__(*args, **kwargs)\n self.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)\n\n\nclass CleanLooksComboBox(QComboBox):\n cleanlooks = QStyleFactory.create('motif')\n\n def __init__(self, *args, **kwargs):\n super(CleanLooksComboBox, self).__init__(*args, **kwargs)\n self.setStyle(CleanLooksComboBox.cleanlooks)\n\n\nclass FlatButton(QPushButton):\n def __init__(self, *args):\n super(FlatButton, self).__init__(*args)\n self.setFlat(True)\n\n\nclass CheckableFlatButton(QPushButton):\n def __init__(self, *args):\n super(CheckableFlatButton, self).__init__(*args)\n self.setFlat(True)\n self.setCheckable(True)\n\n\nclass ListTableWidget(QTableWidget):\n def __init__(self, columns=3):\n super(ListTableWidget, self).__init__()\n\n self.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.setSelectionMode(QAbstractItemView.NoSelection)\n self.setColumnCount(columns)\n self.horizontalHeader().setVisible(False)\n self.verticalHeader().setVisible(False)\n self.horizontalHeader().setStretchLastSection(True)\n self.setShowGrid(False)\n\n\ndef HorizontalSpacerItem(minimum_width=0):\n return QSpacerItem(minimum_width, 0, QSizePolicy.MinimumExpanding, QSizePolicy.Minimum)\n\n\ndef VerticalSpacerItem():\n return QSpacerItem(0, 0, QSizePolicy.Minimum, QSizePolicy.Expanding)\n","sub_path":"rt/Widgets/CustomWidgets.py","file_name":"CustomWidgets.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"93176189","text":"__author__ = 'eka'\nimport numpy as np\n\n\nclass Game2048:\n def __init__(self):\n self.n=4\n\n def down(self,grid):\n #get vector per column\n copyGrid=np.zeros([self.n,self.n])\n\n for i in range(self.n):\n vect=np.copy(grid[:,i])\n vect=self.remove_trailing_zero(vect)\n rvect=self.to_down(vect[::-1],0)\n #write back to each column\n base=self.n-len(rvect)\n copyGrid[base:base+len(rvect),i]=rvect[::-1] #invert\n return copyGrid\n\n def right(self, grid):\n # get vector per column\n copyGrid = np.zeros([self.n, self.n])\n\n for i in range(self.n):\n vect = np.copy(grid[i, :])\n vect = self.remove_trailing_zero(vect)\n rvect = self.to_down(vect[::-1], 0)\n # write back to each column\n base = self.n - len(rvect)\n copyGrid[i,base:base + len(rvect)] = rvect[::-1] # invert\n return copyGrid\n\n def left(self, grid):\n # get vector per column\n copyGrid = np.zeros([self.n, self.n])\n\n for i in range(self.n):\n vect = np.copy(grid[i, :])\n vect = self.remove_trailing_zero(vect)\n rvect = self.to_down(vect, 0)\n # write back to each column\n base = self.n - len(rvect)\n copyGrid[i, :len(rvect)] = rvect # invert\n return copyGrid\n\n def up(self, grid):\n # get vector per column\n copyGrid = np.zeros([self.n, self.n])\n\n for i in range(self.n):\n vect = np.copy(grid[:, i])\n vect = self.remove_zero(vect)\n rvect = self.to_down(vect, 0)\n # write back to each column\n #base = self.n - len(rvect)\n copyGrid[:len(rvect), i] = rvect # invert\n return copyGrid\n\n #works for command, D, R\n def to_down(self, vect, pos):\n if(pos>=len(vect)):\n return []\n #base case\n if(pos==len(vect)-1):\n return [vect[pos]]\n else:\n #two situation breakdown\n #merge neighboring found\n if(vect[pos]==vect[pos+1]):\n return [vect[pos]+vect[pos+1]]+self.to_down(vect,pos+2)\n #no merge neighbor\n else:\n #recurse again\n return [vect[pos]]+self.to_down(vect,pos+1)\n\n def remove_trailing_zero(self, vect):\n #search non zero value\n i=self.get_first_non_zero_pos(vect)\n j=self.get_last_non_zero_pos(vect)\n return vect[i:j+1]\n\n def remove_zero(self, vect):\n rvect=[]\n for i in range(len(vect)):\n if (vect[i] != 0):\n rvect.append(vect[i])\n return rvect\n\n def get_first_non_zero_pos(self, vect):\n length = len(vect)\n # get first non zero post\n for i in range(length):\n if (vect[i] != 0):\n return i\n return 0\n\n def get_last_non_zero_pos(self, vect):\n length = len(vect)\n # get first non zero post\n for i in range(length-1,-1,-1):\n if (vect[i] != 0):\n return i\n return 0\n\n def count_tiles(self, vect):\n (X,F)=np.unique(vect,return_counts=True)\n #regenerate array exponent of two\n freq_index=range(1,10)\n freq_index=map(lambda x: 2**x, freq_index)\n freq_count=np.zeros(9)\n for i in range(len(freq_index)):\n pos=(X==freq_index[i])\n c=F[pos]\n if(F[pos]):\n freq_count[i]=F[pos]\n k=self.get_last_non_zero_pos(freq_count)\n return freq_count[:k+1]","sub_path":"Game2048.py","file_name":"Game2048.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"532824938","text":"\"\"\"\n__title__ = ''\n__author__ = 'Thompson'\n__mtime__ = 2019/3/16\n# code is far away from bugs with the god animal protecting\n I love animals. They taste delicious.\n ┏┓ ┏┓\n ┏┛┻━━━┛┻┓\n ┃ ☃ ┃\n ┃ ┳┛ ┗┳ ┃\n ┃ ┻ ┃\n ┗━┓ ┏━┛\n ┃ ┗━━━┓\n ┃ 神兽保佑 ┣┓\n ┃ 永无BUG! ┏┛\n ┗┓┓┏━┳┓┏┛\n ┃┫┫ ┃┫┫\n ┗┻┛ ┗┻┛\n\"\"\"\nfrom django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n url(r'add_shopcart/$', views.add_shopcart, name='add_shopcart'),\n url(r'del_shopcart/$', views.del_shopcart, name='del_shopcart'),\n url(r'shopcars/$', views.shopcars, name='shopcars'),\n]","sub_path":"AiYou/Shopcart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"269124546","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport uncertainties.unumpy as unp\nimport scipy.constants as con\nfrom scipy.optimize import curve_fit\nfrom scipy import stats\nfrom uncertainties import ufloat\n\n\n# in matplotlibrc leider (noch) nicht möglich\n# Abstand zwischen Schirm und Blende\nl = 1 # m\na=633e-9 #m\n\n\n#sin phi= np.sin(x/l) =x/np.sqrt(x**2+l**2) sin ist gegenkathete durch hypothenuse\n############################## Einzelspalt 1 ###################################\n\n#Intensitätsverteilung Einzelspalt\ndef f(x,A_0,b):\n return A_0**2 * b**2 * (a/(np.pi*b*(np.sin(x/l))))**2 * np.sin((np.pi*b*(np.sin(x/l))/a))**2\n\nxx=np.linspace(-11e-3,11e-3,10000)\n\nx1,I1 = np.genfromtxt('data/e1.txt', unpack=True)\nI1=I1-0.15e-3 #Abzug Offsetstrom Potenz als Unterschied zischen micro und nano\n#in SI\nx1=x1*1e-3 #m\nI1=I1*1e-6 #A hier quasi Einheitenlos\n\n\nparams1,cov1= curve_fit(f,x1,I1,p0=(5,0.4e-3))\nerr1 = np.sqrt(np.diag(cov1))\nprint('E1\\n')\nprint('A_0: ',params1[0],'\\pm',err1[0],'b/mm: ',params1[1]*1e3,'\\pm',err1[1])\n\n\n#Furier\ndef furE(x,A_0,b):\n return (4 * A_0**2 * a**2)/(4*np.pi**2 *(x/l)**2)*np.sin((2 * np.pi * b *x / l)/(a))**2\n\nfparams1,fcov1=curve_fit(furE,x1,I1,p0=(5,0.4e-3))\nferr1=np.sqrt(np.diag(fcov1))\n#in SI\n#print('\\nFurier E1\\nA_0= ',fparams1[0],'\\pm',ferr1[0],'\\nb: ',fparams1[1],'\\pm',ferr1[1])\n\nplt.plot(xx,f(xx,*params1)*1e6, label='Regression')\n\n#plt.plot(xx,furE(xx,*fparams1),label='Furier')\nplt.plot(xx,f(xx,params1[0],0.4e-3)*1e6, '--', color='#e86143', label='Theorie')#Theoriekurve b=0.4mm, A_0=params1[0]\n\nplt.plot(x1,I1*1e6, 'kx', markersize=5, label='Messwerte')\n\nplt.grid(True)\n#plt.axis([-0.011,0.011,-0.0000005,0.000006])\nplt.xlabel(r'$x \\:/\\: \\si{\\meter}$')\nplt.ylabel(r'$I \\:/\\: \\si{\\micro\\ampere}$')\nplt.legend(loc='best')\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\nplt.savefig('plots/e1.pdf')\nplt.close()\n\n\n\n############################## Einzelspalt 2 ###################################\nxxx=np.linspace(-26e-3,26e-3,10000)\nx2, I2 = np.genfromtxt('data/e2.txt', unpack=True)\nI2=I2-0.15 #Abzug Offsetstrom beides in nano\n#in SI\nx2=x2*1e-3 #m\nI2=I2*1e-9 #A hier quasi Einheitenlos\n\n\n\nparams2,cov2= curve_fit(f,x2,I2,p0=(5,0.075e-3))\nerr2 = np.sqrt(np.diag(cov2))\nprint('E2\\n')\nprint('A_0: ',params2[0],'\\pm',err2[0],'b/mm: ',params2[1]*1e3,'\\pm',err2[1])\n\n#Furier\n\nfparams2,fcov2=curve_fit(furE,x2,I2,p0=(5,0.075e-3))\nferr2=np.sqrt(np.diag(fcov2))\n#in SI\n#print('\\nFurier E2\\nA_0= ',fparams2[0],'\\pm',ferr2[0],'\\nb: ',fparams2[1],'\\pm',ferr2[1])\n\nplt.plot(xxx,f(xxx,*params2)*1e6, label='Regression')\n#Theoriekurve b=0.075mm, A_0=params2[0]\n#plt.plot(xxx,furE(xxx,*fparams2),label='Furier')\nplt.plot(xxx,f(xxx,params2[0],0.075e-3)*1e6, '--', color='#e86143', label='Theorie')\n\n\nplt.plot(x2,I2*1e6, 'kx', markersize=5, label='Messwerte')\n\n\nplt.grid(True)\n#plt.axis([-0.026,0.026,-0.00000001,0.00000016])\nplt.xlabel(r'$x \\:/\\: \\si{\\meter}$')\nplt.ylabel(r'$I \\:/\\: \\si{\\micro\\ampere}$')\nplt.legend(loc='best')\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\nplt.savefig('plots/e2.pdf')\nplt.close()\n\n\n#sin phi= x/l =x/np.sqrt(x**2+l**2) sin ist gegenkathete durch hypothenuse\n############################## Doppelspalt #####################################\n#Intensität am Doppelspalt\n\ndef g(x,A_0,b,s):\n return 4 * A_0 * np.cos(np.pi * s *(np.sin(x/l)) / a)**2 * (a/(np.pi * b * (np.sin(x/l))))**2 * np.sin(np.pi * b * (np.sin(x/l))/ a)**2\n\nxd, Id = np.genfromtxt('data/d.txt', unpack=True)\nId=Id-0.15e-3 #Abzug Offsetstrom Potenz als Unterschied zischen micro und nano\n#in SI\nxd=xd*1e-3 #m\nId=Id*1e-6 #A hier quasi Einheitenlos\n\n\nparams3,cov3=curve_fit(g,xd,Id,p0=(18,0.15e-3,0.5e-3))\nerr3=np.sqrt(np.diag(cov3))\nprint('\\n\\nDS\\n')\nprint('A_0: ',params3[0],'\\pm',err3[0],'b/mm: ',params3[1]*1e3,'\\pm',err3[1],'\\ns/mm: ',params3[2]*1e3,'\\pm',err3[2])\n\nplt.plot(xxx,g(xxx,*params3)*1e6,label='Regressions')\nplt.plot(xxx,g(xxx,params3[0],0.15e-3,0.5e-3)*1e6, '--', color='#e86143', label='Theorie')\nplt.plot(xd, Id*1e6, 'kx', markersize=5, label='Messwerte')\nplt.grid(True)\n#plt.axis([-0.026,0.026,-0.0000001,0.0000022])\n\nplt.xlabel(r'$x \\:/\\: \\si{\\meter}$')\nplt.ylabel(r'$I \\:/\\: \\si{\\micro\\ampere}$')\nplt.legend(loc='best')\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\nplt.savefig('plots/d.pdf')\nplt.close()\n\n\n#Diskussion\n\n#Fehler in A_0\nA=np.array([params1[0],params2[0]])\nfA=np.array([fparams1[0],fparams2[0]])\ndA=(A-fA)/fA\nprint('\\n\\nFehler in A :',dA)\n\n#Fehler in b\ntb=np.array([0.4e-3,0.075e-3,0.15e-3])\nb=np.array([params1[1],params2[1],params3[1]])\ndb=(tb-b)/tb\nds=(0.5e-3-params3[2])/0.5e-3\n\nprint('\\nFehler b_exp: ',db,'\\nds',ds)\n","sub_path":"SoSe/V406/plots/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":4600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"551100472","text":"# -*- coding: utf-8 -*-\nfrom django.test import TestCase\nimport json\nfrom unittest.mock import patch\n\n\nfrom 臺灣言語平臺.使用者模型 import 使用者表\n\nclass 登入狀況試驗(TestCase):\n\tdef setUp(self):\n\t\tself.登入使用者���號patcher = patch('臺灣言語平臺.使用者模型.使用者表.判斷編號')\n\t\tself.登入使用者編號mock = self.登入使用者編號patcher.start()\n\tdef tearDown(self):\n\t\tself.登入使用者編號patcher.stop()\n\n\tdef test_無登入(self):\n\t\tself.登入使用者編號mock.return_value = None\n\t\t回應 = self.client.get('/使用者/看編號')\n\t\t\n\t\tself.assertEqual(回應.status_code, 200)\n\t\t回應資料 = json.loads(回應.content.decode(\"utf-8\"))\n\t\tself.assertEqual(回應資料['使用者編號'], '無登入',)\n\t\t\t\n\tdef test_一般使用者(self):\n\t\t阿媠 = 使用者表.加使用者('sui2@pigu.tw', {\"名\":'阿媠', '出世年':'1950', '出世地':'臺灣', },)\n\t\tself.登入使用者編號mock.return_value = 阿媠.編號()\n\t\t\n\t\t回應 = self.client.get('/使用者/看編號')\n\t\t\n\t\tself.assertEqual(回應.status_code, 200)\n\t\t回應資料 = json.loads(回應.content.decode(\"utf-8\"))\n\t\tself.assertEqual(回應資料['使用者編號'], str(阿媠.編號()),)\n\t\t\t","sub_path":"臺灣言語平臺/試驗/登出入/test登入狀況試驗.py","file_name":"test登入狀況試驗.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"467191167","text":"import unittest\nimport os\nfrom selenium.webdriver import DesiredCapabilities, Remote\nfrom pages.auth_page import AuthPage\nfrom pages.userinfo_page import UserinfoPage\n\n\nclass SuggestTownTest(unittest.TestCase):\n TOWN_PREFIX = 'Мос'\n SUGGEST_LIST = [\n 'Москва, Россия',\n 'Московский, Московская обл., Россия',\n 'Мосальск, Калужская обл., Россия'\n ]\n\n def setUp(self):\n browser = os.environ.get('BROWSER', 'CHROME')\n\n self.driver = Remote(\n command_executor='http://127.0.0.1:4444/wd/hub',\n desired_capabilities=getattr(DesiredCapabilities, browser).copy()\n )\n\n def tearDown(self):\n self.driver.quit()\n\n def test(self):\n auth_page = AuthPage(self.driver)\n auth_page.open()\n auth_page.authorize()\n\n userinfo_page = UserinfoPage(self.driver)\n userinfo_page.open()\n userinfo_form = userinfo_page.form\n userinfo_form.set_town(self.TOWN_PREFIX)\n userinfo_form.wait_for_last_suggest(self.SUGGEST_LIST[-1])\n self.assertEqual(self.SUGGEST_LIST, userinfo_form.get_suggests_for_town())","sub_path":"tests/userinfo/suggesttown_test.py","file_name":"suggesttown_test.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"488379031","text":"\"\"\"Loops: maximum: Write a function named minmax_in_list that takes a list of integers as\r\nan input and returns the minimum and maximum values in the list. Return (None, None)\r\nif the list is empty. Note that the maximum and minimum integers in Python are given\r\nby, respectively, the constants sys.maxsize and -sys.maxsize-1. You must use loops\r\nrather than any built-in function\"\"\"\r\n\r\ndef minmax_in_list(ls):\r\n if ls == []:\r\n return (None, None)\r\n maxi = ls[0]\r\n mini = ls[0]\r\n for item in ls:\r\n if item < mini:\r\n mini = item\r\n if item > maxi:\r\n maxi = item\r\n return (mini, maxi)","sub_path":"Min_max_in_list_loops.py","file_name":"Min_max_in_list_loops.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"129512109","text":"from flask.ext import restful\nfrom flask.ext.restful import fields, marshal_with, reqparse\n\nimport bson\n\nimport json\n\npost_parser = reqparse.RequestParser()\npost_parser.add_argument(\n 'name',type=str, \n location='form', required=True, \n help='The organization name',\n)\npost_parser.add_argument(\n 'description',type=str, \n location='form', required=True, \n help='The organization description',\n)\npost_parser.add_argument(\n 'short_description',type=str, \n location='form', \n help='A short description for organization',\n)\npost_parser.add_argument(\n\t'owner', type=str,\n\trequired=True, location='form',\n\thelp='The user that owns the organization')\n\n\nclass Organization(restful.Resource):\n\tdef post(self):\n\t\tmy_args = post_parser.parse_args()\n\t\tuser = get_user(my_args.owner)\n\t\tnew_id = str(bson.objectid.ObjectId())\n\t\torganization = create_org()\n\t\treturn organization\n\n\tdef get(self, org_id):\n\t\torganization = get_org(org_id)\n\t\treturn organization\n\n\tdef delete(self, org_id):\n\t\tdelete_org(org_id)\n\t\treturn 'organization {id} is all gone'.format(id=org_id)\n\n\tdef put(self):\n\t\tupdate_org()\n\t\treturn organization\n\n\n","sub_path":"resources/organization.py","file_name":"organization.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"580888374","text":"import logging\nfrom typing import Optional\nfrom sys import stdout, stderr\nfrom utils.config import config\n\n\n_logger:Optional[logging.Logger] = None\n\n\ndef get_logger(name:str) -> logging.Logger:\n \"\"\"Return logger with given name.\"\"\"\n global _logger\n if not _logger:\n _logger = logging.getLogger()\n _logger.setLevel(0)\n _logger.addHandler(logging.NullHandler(0))\n formatter = logging.Formatter('%(asctime)s|%(name)s|%(levelname)s|%(message)s')\n for info in config['logging']['loggers']:\n if info['enabled']:\n if info['type'] == 'file':\n handler:logging.Handler = logging.FileHandler(info['output'])\n handler.setLevel(info['level'])\n handler.setFormatter(formatter)\n _logger.addHandler(handler)\n elif info['type'] == 'stream':\n if info['output'] == 'out':\n handler = logging.StreamHandler(stdout)\n handler.setLevel(info['level'])\n handler.setFormatter(formatter)\n _logger.addHandler(handler)\n elif info['output'] == 'err':\n handler = logging.StreamHandler(stderr)\n handler.setLevel(info['level'])\n handler.setFormatter(formatter)\n _logger.addHandler(handler)\n return logging.getLogger(name)\n","sub_path":"aggregator/utils/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"75162366","text":"import requests\n\nresponse = requests.get(url=\"http://zipcloud.ibsnet.co.jp/api/search?zipcode=0287111\")\n\ndata = {\n \"message\": None,\n \"results\": [\n {\n \"address1\": \"北海道\",\n \"address2\": \"江別市\",\n \"address3\": \"大麻南樹町\",\n \"kana1\": \"ホッカイドウ\",\n \"kana2\": \"エベツシ\",\n \"kana3\": \"オオアサミナキチョウ\",\n \"prefcode\": \"1\",\n \"zipcode\": \"0690865\"\n }\n ],\n \"status\": 200\n}\n\nprint(data[\"results\"][0][\"address1\"])\n","sub_path":"request-sample01.py","file_name":"request-sample01.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"363979794","text":"import discord\nimport yaml\nfrom discord.utils import get\n\nfrom src.eventsHandler.eventsHandler import EventsHandler\n\n# Get configuration\nwith open('run/config/config.yml', 'r') as file:\n config = yaml.safe_load(file)\n\nclient = discord.Client()\n\n\n@client.event\nasync def on_ready():\n await EventsHandler.on_ready(client)\n\n # #ith open('run/data/reactions.yml', 'r') as file:\n # reactions = yaml.safe_load(file)\n\n # for reaction in reactions:\n # message = await client. \\\n # get_channel(reaction['channel_id']). \\\n # fetch_message(reaction['message_id'])\n # for r in reaction['reactions']:\n # emoji = [\n # x for x in client.emojis\n # if str(x) == r['emoji_id']\n # ]\n # if emoji:\n # await message.add_reaction(emoji[0])\n\n\n@client.event\nasync def on_message(message: discord.Message):\n await EventsHandler.on_message(client, message)\n\n\n@client.event\nasync def on_raw_reaction_add(payload: discord.RawReactionActionEvent):\n with open('run/data/reactions.yml', 'r') as file:\n reactions = yaml.safe_load(file)\n\n reaction = [x for x in reactions\n if x['channel_id'] == payload.channel_id\n and x['message_id'] == payload.message_id]\n\n if not reaction:\n return\n\n emoji_role = [x for x in reaction[0]['reactions']\n if x['emoji_id'] == str(payload.emoji)]\n\n if not emoji_role:\n return\n\n guild: discord.Guild = client.get_guild(payload.guild_id)\n\n role: discord.Role = guild.get_role(emoji_role[0]['role_id'])\n await guild.get_member(payload.user_id).add_roles(role)\n print(role.name)\n\n\n@client.event\nasync def on_raw_reaction_remove(payload: discord.RawReactionActionEvent):\n with open('run/data/reactions.yml', 'r') as file:\n reactions = yaml.safe_load(file)\n\n reaction = [x for x in reactions\n if x['channel_id'] == payload.channel_id\n and x['message_id'] == payload.message_id]\n\n if not reaction:\n return\n\n emoji_role = [x for x in reaction[0]['reactions']\n if x['emoji_id'] == str(payload.emoji)]\n\n if not emoji_role:\n return\n\n guild: discord.Guild = client.get_guild(payload.guild_id)\n\n role: discord.Role = guild.get_role(emoji_role[0]['role_id'])\n await guild.get_member(payload.user_id).remove_roles(role)\n print(role.name)\n\n\nclient.run(config['token'])\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"165415617","text":"#!/usr/bin/env python3\n\n########################################################################################\n#\n# Name: PCFG_Cracker Cracking Management Code\n# Description: Manages a cracking session\n# Eventaully I want to add other functionality into the pcfg_manager codebase\n# such as honeywords, so I am moving the actually running of a cracking\n# session into this chunk of code\n#\n#########################################################################################\n\nimport sys\nimport time\nimport threading ##--Used only for the \"check for user input\" threads\nfrom multiprocessing import Process, Queue\n\nfrom pcfg_manager.core_grammar import PcfgClass, print_grammar\nfrom pcfg_manager.priority_queue import PcfgQueue\nfrom pcfg_manager.ret_types import RetType\n\n\n###################################################################################################\n# Used to manage a password cracking session\n###################################################################################################\nclass CrackingSession:\n \n ############################################################################\n # Basic initialization function\n ############################################################################\n def __init__(self, pcfg = None, p_queue = None):\n self.p_queue = p_queue\n self.pcfg = pcfg\n \n ##--Debugging and Performance Monitoring Variables--##\n self.num_parse_trees = 0 #-Total number of parse_trees processed so far\n self.num_guesses = 0 #-Total number of guesses made so far\n self.p_queue_start_time = 0 #-Start time of running the Next algorithm on the priority queue\n self.p_queue_stop_time = 0 #-Stop time of running the Next algorithm\n self.guess_start_time = 0 #-Start time of genering the actual guesses\n self.guess_stop_time = 0 #-Stop time of generating the actual guesses\n self.running_queue_time = 0 #-Total time running the \"Next\" algorithm to get a new pre-terminal\n self.running_guess_time = 0 #-Total time spent generaing guesses from pre-terminals and printing them out\n\n ##############################################################################\n # Starts the cracking session and starts generating guesses\n ##############################################################################\n def run(self, print_queue_info = False):\n ##--Setup the check to see if a user is pressing a button--#\n user_input = [None]\n user_thread = threading.Thread(target=keypress, args=(user_input,))\n user_thread.daemon = True # thread dies when main thread (only non-daemon thread) exits.\n user_thread.start()\n \n #-Start the clock\n self.total_time_start = time.perf_counter()\n \n #-Generate the first parse tree to process\n self.p_queue_start_time = time.perf_counter()\n queue_item_list = []\n #-This is the function that does all the work\n ret_value = self.p_queue.next_function(self.pcfg, queue_item_list)\n \n #-Currently there is only one item returned at a time, this may change in the future\n if len(queue_item_list) > 0:\n queue_item = queue_item_list[0]\n else:\n return ret_value\n \n self.p_queue_stop_time = time.perf_counter() - self.p_queue_start_time\n self.running_queue_time = self.running_queue_time + self.p_queue_stop_time\n \n ##--Keep running while the p_queue.next_function still has items in it\n while ret_value == RetType.STATUS_OK:\n \n ##--Expand the guesses from the parse tree\n self.guess_start_time = time.perf_counter()\n current_guesses = self.pcfg.list_terminals(queue_item.parse_tree) \n self.guess_stop_time = time.perf_counter() - self.guess_start_time\n self.running_guess_time = self.running_guess_time + self.guess_stop_time\n \n self.num_parse_trees = self.num_parse_trees +1\n self.num_guesses = self.num_guesses + len(current_guesses) \n \n ##--Print_Queue_Info says if we are running this session for debugging and performance improvements vs actually cracking passwords\n if print_queue_info == True: \n \n if self.num_parse_trees % 10000 == 0:\n print (\"PQueue:\" + str(len(self.p_queue.p_queue)),file=sys.stderr)\n print (\"Backup storage list:\" + str(len(self.p_queue.storage_list)),file=sys.stderr)\n print (\"Total number of Parse Trees: \" + str (self.num_parse_trees),file=sys.stderr)\n print (\"PQueueTime \" + str(self.running_queue_time),file=sys.stderr)\n print (\"Guesses:\" + str(self.num_guesses),file=sys.stderr)\n print (\"GuessTime \" + str(self.running_guess_time),file=sys.stderr)\n print (\"Average num of guesses per parse-tree: \" + str(self.num_guesses // self.num_parse_trees),file=sys.stderr)\n print (\"Total Time \" + str(time.perf_counter() - self.total_time_start),file=sys.stderr)\n print (\"Number of guesses a second: \" + str(self.num_guesses // (time.perf_counter() - self.total_time_start)),file=sys.stderr)\n print (\"Current probability: \" + str(self.p_queue.max_probability),file=sys.stderr)\n print ()\n\n ##--This is if you are actually trying to generate guesses\n else:\n for guess in current_guesses:\n try:\n print(guess)\n ##--While I could silently replace/ignore the Unicode character for now I want to know if this is happening\n except UnicodeEncodeError:\n print(\"UNICODE_ERROR\",file=sys.stderr) \n except IOError:\n print(\"Consumer, (probably the password cracker), stopped accepting input.\",file=sys.stderr)\n print(\"Halting guess generation and exiting\",file=sys.stderr)\n return RetType.BROKEN_PIPE\n \n ##--Check for user requested status output--##\n if user_input[0] is not None: \n self.display_status(guess_list = current_guesses)\n user_input[0] = None\n ##--Kick off again the thread to check if user_input was entered\n if not user_thread.is_alive():\n user_thread = threading.Thread(target=keypress, args=(user_input,))\n user_thread.daemon = True # thread dies when main thread (only non-daemon thread) exits.\n user_thread.start()\n \n ##--Generate more parse trees from the priority queue\n self.p_queue_start_time = time.perf_counter()\n queue_item_list = [] \n ret_value = self.p_queue.next_function(self.pcfg, queue_item_list)\n if len(queue_item_list) > 0:\n queue_item = queue_item_list[0]\n self.p_queue_stop_time = time.perf_counter() - self.p_queue_start_time\n self.running_queue_time = self.running_queue_time + self.p_queue_stop_time \n \n return RetType.STATUS_OK\n \n\n ######################################################################################################\n # Displays status of cracking session\n ######################################################################################################\n def display_status(self, guess_list = []):\n print (\"Status Report:\",file=sys.stderr)\n if len(guess_list) != 0:\n print (\"Currently generating guesses from \" + str(guess_list[0]) + \" to \" + str(guess_list[-1]),file=sys.stderr)\n print(\"\",file=sys.stderr)\n return RetType.STATUS_OK\n \n\n###########################################################################################\n# Used to check to see if a key was pressed to output program status\n# *Hopefully* should work on multiple OSs\n# --Simply check user_input_char to see if it is not none\n###########################################################################################\ndef keypress(user_input_ref):\n user_input_ref[0] = input() ","sub_path":"python_pcfg_cracker/pcfg_manager/cracking_session.py","file_name":"cracking_session.py","file_ext":"py","file_size_in_byte":8437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"430707295","text":"# Detector is based on Caffee framework\n# Files needed: .prototxt file: defining the model architecture\n# .caffemodel: containing the weights of the actual layers\n\nimport numpy as np \nimport argparse\nimport cv2\n\nargs = argparse.ArgumentParser()\nargs.add_argument('-i','--image', help='The Input image path')\nargs.add_argument('-p','--prototxt', help='The path to the Caffe deploy prototxt file')\nargs.add_argument('-m','--model',help='Path to the pretrained model')\nargs.add_argument('-c','--confidence', help='Minimum probability to filter work detection')\n\nargs = vars(args.parse_args)\n\nnet = cv2.dnn.readNetFromCaffe(args['prototxt'], args['model'])\n\nimage = cv2.imread(args['image'])\n(h,w) = image.shape[:2]\n\nblob = cv2.dnn.blobFromImage(cv2.resize(image,(300,300)), 1.0, (300,300), (104.0,177.0,123.0))\n\nprint('Starting detection')\n\nnet.setInput(blob)\ndetections = net.forward()\n\nfor i in range(0, detections.shape[2]):\n confidence = detections[0,0,i,2]\n\n if confidence > args['confidence']:\n box = detections[0,0,i,3:7] * np.aarray([w,h,w,h])\n ","sub_path":"Face Detection/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"216540726","text":"import json, unittest\n\nfrom . make import make\n\n\nPROJECT_FAILURE1 = \"\"\"\n{\n \"driver\": {\n \"typename\": \"test.bibliopixel.failure.Failure\",\n \"num\": 12\n },\n\n \"layout\": {\n \"typename\": \"bibliopixel.layout.strip.Strip\"\n },\n\n \"animation\": {\n \"typename\": \"bibliopixel.animation.tests.StripChannelTest\"\n }\n}\n\"\"\"\n\n\nPROJECT_FAILURE2 = \"\"\"\n{\n \"driver\": {\n \"typename\": \"test.bibliopixel.failure2.NON_EXISTENT\",\n \"num\": 12\n },\n\n \"layout\": {\n \"typename\": \"bibliopixel.layout.strip.Strip\"\n },\n\n \"animation\": {\n \"typename\": \"bibliopixel.animation.tests.StripChannelTest\"\n }\n}\n\"\"\"\n\n\nPROJECT_FAILURE3 = \"\"\"\n{\n \"driver\": {\n \"typename\": \"test.NON_EXISTENT.Failure\",\n \"num\": 12\n },\n\n \"layout\": {\n \"typename\": \"bibliopixel.layout.strip.Strip\"\n },\n\n \"animation\": {\n \"typename\": \"bibliopixel.animation.tests.StripChannelTest\"\n }\n}\n\"\"\"\n\n\nclass ImportFailureTest(unittest.TestCase):\n def test_bad_json(self):\n with self.assertRaises(ValueError):\n make('{]')\n\n def test_failure1(self):\n with self.assertRaises(ImportError) as e:\n make(PROJECT_FAILURE1)\n self.assertEquals(e.exception.name, 'test.bibliopixel.failure.Failure')\n\n def test_failure2(self):\n with self.assertRaises(ImportError) as e:\n make(PROJECT_FAILURE2)\n self.assertEquals(e.exception.name,\n 'test.bibliopixel.failure2.NON_EXISTENT')\n\n def test_failure3(self):\n with self.assertRaises(ImportError) as e:\n make(PROJECT_FAILURE3)\n self.assertEquals(e.exception.name, 'test.NON_EXISTENT.Failure')\n","sub_path":"test/bibliopixel/project/import_failure_test.py","file_name":"import_failure_test.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"527560483","text":"try:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nconfig = {\n 'description': 'Simple NEF format parser',\n 'author': 'TJ Ragan',\n 'url': 'http://github.com/tragan',\n 'download_url': 'http://github.com/tragan.',\n 'author_email': 'tjr22@le.ac.uk',\n 'version': '0.1',\n 'install_requires': ['nose', 'numpy', 'pandas'],\n 'packages': ['NEFreader'],\n 'scripts': [],\n 'name': 'NEFreader'\n}\n\nsetup(**config)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"471146316","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('files', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='imagefile',\n name='image_type',\n field=models.CharField(default=b'marker content', max_length=255, blank=True, choices=[(b'marker content', b'Image for Marker Content'), (b'custom thumbnail', b'Image for custom video thumbnail'), (b'custom logo', b'Image for custom logo on player chrome'), (b'profile avatar', b'Profile avatar')]),\n ),\n ]\n","sub_path":"videopath/apps/files/migrations/0002_auto_20200426_2245.py","file_name":"0002_auto_20200426_2245.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"192684015","text":"from Dot import Dot\nimport random\nimport pygame\nimport pickle\nimport os\nimport math\n\nDIR = [\n (0,0),\n (1, 0),\n (-1, 0),\n (0, 1),\n (0, -1)\n ]\n\n# Population class for group of dots\nclass Population(object):\n def __init__(self, size, startx, starty, goalx, goaly, brainsize, path, train):\n self.dots = []\n # Create array of dots\n for i in range(size):\n self.dots.append(Dot(startx,starty, brainsize))\n\n # Set initial values\n self.goalx = goalx\n self.goaly = goaly\n self.gen = 1\n self.bestDot = 0\n # If in training mode, start with 20 steps and increment\n if train:\n self.maxStep = 20\n # Otherwise, run as normal\n else:\n self.maxStep = brainsize\n self.brainsize = brainsize\n self.fitnessSum = 0\n self.path = path\n\n # Draw every dot on screen\n def show(self, screen):\n for dot in self.dots:\n dot.show(screen)\n # Show best dot in blue\n self.dots[0].show(screen)\n\n # Update every dot \n def update(self, walls, obstacles):\n # Kill dot if it exceeds the min step required\n for dot in self.dots:\n if dot.brain.step > self.maxStep:\n dot.dead = True\n\n # Update otherwise\n else:\n dot.update(self.goalx, self.goaly, walls, obstacles)\n\n # Calculate fitness of every dot in population\n def calculateFitness(self):\n for dot in self.dots:\n dot.calculateFitness(self.goalx, self.goaly, self.path)\n\n # Check if every dot is dead\n def allDotsDead(self):\n for dot in self.dots:\n # Return false if even one is alive\n if not dot.dead and not dot.reachedGoal:\n return False\n return True\n\n # Natural selection for population\n def naturalSelection(self):\n # Make array of new dots\n newDots = []\n # Find best dot and fitness sum\n self.setBestDot()\n self.calculateFitnessSum()\n\n # Best dot in population is carried over\n newDots.append(self.dots[self.bestDot].makeBaby())\n newDots[0].isBest = True\n\n # Find random parent for every subsequent dot\n # Make baby for each\n for i in range(len(self.dots) - 1):\n parent = self.selectParent()\n newDots.append(parent.makeBaby())\n\n # Set new population\n for i in range(len(self.dots)):\n self.dots[i] = newDots[i]\n\n # Increase generation\n self.gen += 1\n\n if self.gen % 30 == 0 and self.maxStep < self.brainsize:\n for dot in self.dots:\n for i in range(self.maxStep, self.brainsize):\n r = math.floor(random.random() * 5) \n dot.brain.directions[i] = DIR[r]\n\n self.maxStep += 20\n\n pygame.time.wait(50)\n\n # Save directions from best dot\n def save(self, level):\n data = self.dots[0].brain.directions\n pickle.dump(data, open(\"data_\" + str(level) + \".dat\", \"wb\"))\n\n # Upload data into best dot given a level\n def upload(self, level):\n # loads saved data\n self.dots[0].brain.directions = pickle.load(open(\"trained_data/data_\" + str(level) + \".dat\", \"rb\"))\n\n # Accessor for generation value\n def generation(self):\n gen = self.gen\n return gen\n\n # Sum fitness for entire population\n def calculateFitnessSum(self):\n self.fitnessSum = 0\n for dot in self.dots:\n self.fitnessSum += dot.fitness\n # Print for testing\n print(\"Generation \" + str(self.gen) + \": \" + str(self.fitnessSum))\n\n # Select parent for a baby\n def selectParent(self):\n # Create random number from 0 to fitnessSum\n rand = random.random() * self.fitnessSum\n runningSum = 0\n\n # Iterate through dots, higher fitness dots have higher chance to reproduce\n for i in range(len(self.dots)):\n runningSum += self.dots[i].fitness\n if runningSum > rand:\n return self.dots[i]\n\n return self.dots[self.bestDot]\n\n # Mutate every baby in dots except first one\n def mutateBabies(self):\n for i in range(1, len(self.dots)):\n self.dots[i].brain.mutate()\n\n # Find best dot\n def setBestDot(self):\n # Linear search for index of dot with highest fitness\n max = 0\n maxIndex = 0\n for i in range(len(self.dots)):\n if self.dots[i].fitness > max:\n max = self.dots[i].fitness\n maxIndex = i\n # Set value\n self.bestDot = maxIndex\n\n # Set new limit on steps if goal is reached\n if self.dots[self.bestDot].reachedGoal:\n self.maxStep = self.dots[self.bestDot].brain.step\n print(\"step: \" + str(self.maxStep))\n","sub_path":"Population.py","file_name":"Population.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"607882228","text":"import json\nimport time\nimport hashlib\n\nimport requests\n\nfrom api_keys import API_KEY, API_SEC, HOST\n\n\ndef get_api_sig():\n timestamp = repr(int(time.time()))\n all = str.encode(API_KEY + API_SEC + timestamp)\n signature = hashlib.sha256(all).hexdigest()\n return signature\n\n\ndef get_api_call(url):\n r = requests.get(url)\n json_data = json.loads(r.text)\n return json_data\n\n\nif __name__ == '__main__':\n\n API_SIG = get_api_sig()\n url = '{}arge/matches/1943404?pbp=true&date=2018-09-04&api_key={}&sig={}'.format(\n HOST, API_KEY, API_SIG\n )\n\n test = get_api_call(url)['apiResults']\n\n\n import pdb; pdb.set_trace() # noqa # yapf: disable\n","sub_path":"get_data/python/api_calls.py","file_name":"api_calls.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"436809334","text":"#!/usr/bin/env python3\n\n# defined a function textmyself() that send text message passed to it as string\n\n# preset values:\n\naccountSID = 'ACxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'\nauthToken = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'\nmyNumber = '+******'\ntwilioNumber = '+******'\n\nfrom twilio.rest import Client\n\ndef textmyself(message):\n twilioCli = Client(authToken,authToken)\n twilioCli.messages.create(body=message, from_=twilioNumber, to=myNumber)\n\n","sub_path":"062_textMyself.py","file_name":"062_textMyself.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"204711314","text":"\n\ndef selamVer() :\n print(\"Selam millet!\")\n\nselamVer()\n\ndef kullanıcıbilgi(isim,soyisim,şehir):\n print(\"isminiz: \"+isim+\"\\n soyisminiz: \"+soyisim+\"\\n şehir: \" +şehir+\"\" )\n\nkullanıcıbilgi(\"yasemin\",\"turhan\",\"istanbul\")\n\n#\n#\n\n\nilksayi = 5\nikincisayi = 8\n\n\ndef hangisiBuyuk(ilksayi, ikincisayi):\n if ilksayi > ikincisayi:\n print(ilksayi)\n\n elif ikincisayi > ilksayi:\n print(ikincisayi)\n\n\nhangisiBuyuk(5, 8)\n\n\n\nliste = [3, 4, 7, 2, 5, 6]\n\n\ndef toplam(liste):\n a = 0\n for i in liste:\n a = a + i\n\n print(a)\n\n\ntoplam(liste)\n\n\n\n\n\n\n","sub_path":"fonksiyonlaraGiris.py","file_name":"fonksiyonlaraGiris.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"188182095","text":"class Point:\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def __add__(self, other):\n p = Point()\n if isinstance(other, Point):\n p.x = self.x + other.x\n p.y = self.y + other.y\n\n elif isinstance(other, tuple):\n p.x = self.x + other[0]\n p.y = self.y + other[1]\n return p\n\n def __str__(self):\n return 'The sum is (%d,%d)' % (self.x, self.y)\n\n\nif __name__ == '__main__':\n p1 = Point(3, 4)\n p2 = Point(5, 6)\n p3 = p1 + p2\n print(p3)\n\n p4 = Point(3, 4)\n p5 = (7, 8)\n p6 = p4 + p5\n print(p6)\n","sub_path":"lab5_task6.py","file_name":"lab5_task6.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"63837810","text":"import numpy as np\nfrom .tensor_data import (\n count,\n index_to_position,\n broadcast_index,\n shape_broadcast,\n MAX_DIMS,\n)\nfrom numba import njit, prange\n\ncount = njit()(count)\nindex_to_position = njit()(index_to_position)\nbroadcast_index = njit()(broadcast_index)\n\n\ndef tensor_map(fn):\n \"\"\"\n Higher-order tensor map function.\n\n Args:\n fn: function mappings floats-to-floats to apply.\n out (array): storage for out tensor.\n out_shape (array): shape for out tensor.\n out_strides (array): strides for out tensor.\n in_storage (array): storage for in tensor.\n in_shape (array): shape for in tensor.\n in_strides (array): strides for in tensor.\n \"\"\"\n\n def _map(out, out_shape, out_strides, in_storage, in_shape, in_strides):\n if (\n len(out_strides) != len(in_strides)\n or (out_strides != in_strides).any()\n or (out_shape != in_shape).any()\n ):\n for i in prange(len(out)):\n out_index = np.zeros(MAX_DIMS, np.int32)\n in_index = np.zeros(MAX_DIMS, np.int32)\n count(i, out_shape, out_index)\n broadcast_index(out_index, out_shape, in_shape, in_index)\n o = index_to_position(out_index, out_strides)\n j = index_to_position(in_index, in_strides)\n out[o] = fn(in_storage[j])\n else:\n for i in prange(len(out)):\n out[i] = fn(in_storage[i])\n\n\n return njit(parallel=True)(_map)\n\n\ndef map(fn):\n f = tensor_map(njit()(fn))\n\n def ret(a, out=None):\n if out is None:\n out = a.zeros(a.shape)\n f(*out.tuple(), *a.tuple())\n return out\n\n return ret\n\n\ndef tensor_zip(fn):\n \"\"\"\n Higher-order tensor zipWith (or map2) function.\n\n Args:\n fn: function mappings two floats to float to apply.\n out (array): storage for `out` tensor.\n out_shape (array): shape for `out` tensor.\n out_strides (array): strides for `out` tensor.\n a_storage (array): storage for `a` tensor.\n a_shape (array): shape for `a` tensor.\n a_strides (array): strides for `a` tensor.\n b_storage (array): storage for `b` tensor.\n b_shape (array): shape for `b` tensor.\n b_strides (array): strides for `b` tensor.\n \"\"\"\n\n def _zip(out, out_shape, out_strides, a, a_shape, a_strides, b, b_shape, b_strides):\n if (\n len(out_strides) != len(a_strides)\n or (out_strides != a_strides).any()\n or (out_shape != a_shape).any()\n or len(out_strides) != len(b_strides)\n or (out_strides != b_strides).any()\n or (out_shape != b_shape).any()\n ):\n for i in prange(len(out)):\n out_index = np.zeros(MAX_DIMS, np.int32)\n a_index = np.zeros(MAX_DIMS, np.int32)\n b_index = np.zeros(MAX_DIMS, np.int32)\n count(i, out_shape, out_index)\n o = index_to_position(out_index, out_strides)\n broadcast_index(out_index, out_shape, a_shape, a_index)\n j = index_to_position(a_index, a_strides)\n broadcast_index(out_index, out_shape, b_shape, b_index)\n k = index_to_position(b_index, b_strides)\n out[o] = fn(a[j], b[k])\n else:\n for i in prange(len(out)):\n out[i] = fn(a[i], b[i])\n\n return njit(parallel=True)(_zip)\n\n\ndef zip(fn):\n\n f = tensor_zip(njit()(fn))\n\n def ret(a, b):\n c_shape = shape_broadcast(a.shape, b.shape)\n out = a.zeros(c_shape)\n f(*out.tuple(), *a.tuple(), *b.tuple())\n return out\n\n return ret\n\n\ndef tensor_reduce(fn):\n \"\"\"\n Higher-order tensor reduce function.\n\n Args:\n fn: reduction function mapping two floats to float.\n out (array): storage for `out` tensor.\n out_shape (array): shape for `out` tensor.\n out_strides (array): strides for `out` tensor.\n a_storage (array): storage for `a` tensor.\n a_shape (array): shape for `a` tensor.\n a_strides (array): strides for `a` tensor.\n reduce_shape (array): shape of reduction (1 for dimension kept, shape value for dimensions summed out)\n reduce_size (int): size of reduce shape\n \"\"\"\n\n def _reduce(\n out, out_shape, out_strides, a, a_shape, a_strides, reduce_shape, reduce_size\n ):\n for i in prange(len(out)):\n out_index = np.zeros(MAX_DIMS, np.int32)\n a_index = np.zeros(MAX_DIMS, np.int32)\n\n count(i, out_shape, out_index)\n o = index_to_position(out_index, out_strides)\n\n for s in range(reduce_size):\n count(s, reduce_shape, a_index)\n for k in range(len(reduce_shape)):\n if reduce_shape[k] != 1:\n out_index[k] = a_index[k]\n j = index_to_position(out_index, a_strides)\n out[o] = fn(out[o], a[j])\n\n return njit(parallel=True)(_reduce)\n\n\ndef reduce(fn, start=0.0):\n f = tensor_reduce(njit()(fn))\n\n def ret(a, dims=None, out=None):\n if out is None:\n out_shape = list(a.shape)\n for d in dims:\n out_shape[d] = 1\n # Other values when not sum.\n out = a.zeros(tuple(out_shape))\n out._tensor._storage[:] = start\n\n diff = len(a.shape) - len(out.shape)\n\n reduce_shape = []\n reduce_size = 1\n for i, s in enumerate(a.shape):\n if i < diff or out.shape[i - diff] == 1:\n reduce_shape.append(s)\n reduce_size *= s\n else:\n reduce_shape.append(1)\n # assert len(out.shape) == len(a.shape)\n f(*out.tuple(), *a.tuple(), np.array(reduce_shape), reduce_size)\n return out\n\n return ret\n\n\nclass FastOps:\n map = map\n zip = zip\n reduce = reduce\n","sub_path":"jovsatools/minitorch/coursework/Module-3/minitorch/fast_ops.py","file_name":"fast_ops.py","file_ext":"py","file_size_in_byte":5963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"452366910","text":"# encoding: utf-8\n\"\"\" WatchEventProcessor.py \"\"\"\n\nfrom logging import getLogger\n\n\nclass WatchEventProcessor(object):\n \"\"\" WatchEvent processor. \"\"\"\n\n def __init__(self):\n self.logger = getLogger('LOGGER')\n\n def get_data_from_event(self, event):\n \"\"\" Gets necessary data from GitHub WatchEvent\n :param event: GitHub event\n :return: data for database\n \"\"\"\n data = {\"actor\": None, \"url\": None}\n for key in data.keys():\n try:\n data[key] = event[key]\n except ValueError:\n self.logger.warning(__name__ + \": \" + \"Missing key %s in event %s\" % (key, event))\n data[\"data_type\"] = \"NewData\"\n return data","sub_path":"WatchEventProcessor.py","file_name":"WatchEventProcessor.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"57798065","text":"import json\n\n\ndef get_code(code):\n if len(code) > 2: return(get_cities(code))\n\n with open('./Codes.json') as f:\n data = json.load(f)\n\n for x in data:\n if(x['Code'] == code): return(get_cities(x['Name']))\n\n return None\n\ndef get_cities(country):\n with open('./Cities.json') as f:\n data = json.load(f)\n response = \"The cities within \"+country+\" are: \\n\"\n found = False\n for x in data:\n if x['country'] == country:\n found = True\n response += x['name'] + '\\n'\n\n return response if found else \"Invalid Country Name or Code\"\n\n\ndef get_code_from_country(country):\n with open('./Codes.json') as f:\n data = json.load(f)\n\n for x in data:\n if x['Name'] == country: return x['Code']\n\n return None","sub_path":"venv/JsonParse.py","file_name":"JsonParse.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"558592079","text":"\"\"\"\n Copyright (c) 2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport random\nimport contextlib\n\nimport pytest\nimport numpy as np\nimport tensorflow as tf\n\nfrom nncf import NNCFConfig\nfrom nncf.tensorflow import create_compression_callbacks\nfrom nncf.tensorflow import register_default_init_args\nfrom tests.tensorflow.helpers import create_compressed_model_and_algo_for_test\nfrom tests.tensorflow.quantization.utils import get_basic_quantization_config\n\n\ndef set_random_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n tf.random.set_seed(seed)\n\n\ndef get_simple_conv_regression_model(img_size=10):\n model = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=1, kernel_size=(3, 3),\n activation='relu', input_shape=(img_size, img_size, 1)),\n tf.keras.layers.AveragePooling2D(),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(1)\n ])\n return model\n\n\ndef get_basic_magnitude_sparsity_config(input_sample_size=None):\n if input_sample_size is None:\n input_sample_size = [1, 4, 4, 1]\n config = NNCFConfig({\n \"model\": \"basic_sparse_conv\",\n \"input_info\":\n {\n \"sample_size\": input_sample_size,\n },\n \"compression\":\n {\n \"algorithm\": \"magnitude_sparsity\",\n \"sparsity_init\": 0.3,\n \"params\": {}\n }\n })\n return config\n\n\ndef get_const_target_mock_regression_dataset(num_samples=20, img_size=10, target_value=20.0):\n class SingleBatchGenerator:\n def __init__(self, X):\n self.X = X\n\n def __call__(self):\n for i, _ in enumerate(self.X):\n xi = np.expand_dims(self.X[i], axis=0)\n yield xi, [target_value, ]\n\n X = [np.random.uniform(0, 255, size=(img_size, img_size, 1)).astype(np.uint8)\n for _ in range(num_samples)]\n gen = SingleBatchGenerator(X)\n dataset = tf.data.Dataset.from_generator(\n generator=gen,\n output_types=(tf.float64, tf.float64),\n output_shapes=((1, img_size, img_size, 1), (1,)),\n )\n return dataset\n\n\n@pytest.mark.parametrize(\n ('max_accuracy_degradation',\n 'final_compression_rate',\n 'reference_final_metric',\n 'should_raise_runtime_error'),\n (\n ({'maximal_relative_accuracy_degradation': 30.0}, 0.846153, 0.141971, False),\n ({'maximal_relative_accuracy_degradation': 1.0}, 0.0, 0.0, True),\n ({'maximal_absolute_accuracy_degradation': 0.10}, 0.846153, 0.141971, False),\n )\n)\ndef test_adaptive_compression_training_loop(max_accuracy_degradation, final_compression_rate,\n reference_final_metric, should_raise_runtime_error,\n initial_training_phase_epochs=5, patience_epochs=3,\n uncompressed_model_accuracy=0.2, steps_per_epoch=20,\n img_size=10):\n set_random_seed(42)\n model = get_simple_conv_regression_model(img_size)\n dataset = get_const_target_mock_regression_dataset(img_size=img_size,\n num_samples=steps_per_epoch)\n config = get_basic_magnitude_sparsity_config(input_sample_size=[1, img_size, img_size, 1])\n\n params = {\n \"initial_training_phase_epochs\": initial_training_phase_epochs,\n \"patience_epochs\": patience_epochs,\n }\n params.update(max_accuracy_degradation)\n accuracy_aware_config = {\n \"accuracy_aware_training\": {\n \"mode\": \"adaptive_compression_level\",\n \"params\": params\n }\n }\n\n config.update(accuracy_aware_config)\n\n compress_model, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)\n compression_callbacks = create_compression_callbacks(compression_ctrl, log_tensorboard=False)\n compress_model.add_loss(compression_ctrl.loss)\n\n def inverse_loss(y_true, y_pred):\n return 1 / (1 + (y_true - y_pred) ** 2)\n\n compress_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01),\n loss=tf.keras.losses.MeanSquaredError(),\n metrics=inverse_loss)\n\n result_dict_to_val_metric_fn = lambda results: results['inverse_loss']\n\n exec_ctx = pytest.raises(RuntimeError) if should_raise_runtime_error \\\n else contextlib.suppress()\n with exec_ctx as execinfo:\n compress_model.accuracy_aware_fit(dataset,\n compression_ctrl,\n nncf_config=config,\n callbacks=compression_callbacks,\n initial_epoch=0,\n steps_per_epoch=steps_per_epoch,\n uncompressed_model_accuracy=uncompressed_model_accuracy,\n result_dict_to_val_metric_fn=result_dict_to_val_metric_fn)\n validation_metrics = compress_model.evaluate(dataset, return_dict=True)\n\n assert result_dict_to_val_metric_fn(validation_metrics) == pytest.approx(reference_final_metric, 1e-4)\n assert compression_ctrl.compression_rate == pytest.approx(final_compression_rate, 1e-3)\n\n if should_raise_runtime_error:\n assert str(execinfo.value) == 'Cannot produce a compressed model with a ' \\\n 'specified minimal tolerable accuracy'\n\n\n@pytest.mark.parametrize(\n 'max_accuracy_degradation',\n (({'maximal_relative_accuracy_degradation': 30.0}),\n ({'maximal_relative_accuracy_degradation': 1.0}),\n ({'maximal_absolute_accuracy_degradation': 0.1})\n )\n)\ndef test_early_exit_compression_training_loop(max_accuracy_degradation,\n maximal_total_epochs=100, uncompressed_model_accuracy=0.2,\n steps_per_epoch=20, img_size=10):\n set_random_seed(42)\n model = get_simple_conv_regression_model(img_size)\n dataset = get_const_target_mock_regression_dataset(img_size=img_size,\n num_samples=steps_per_epoch)\n\n config = get_basic_quantization_config(img_size)\n params = {\n \"maximal_total_epochs\": maximal_total_epochs,\n }\n params.update(max_accuracy_degradation)\n accuracy_aware_config = {\n \"accuracy_aware_training\": {\n \"mode\": \"early_exit\",\n \"params\": params\n }\n }\n config.update(accuracy_aware_config)\n config = register_default_init_args(config, dataset, batch_size=1)\n compress_model, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)\n compression_callbacks = create_compression_callbacks(compression_ctrl, log_tensorboard=False)\n compress_model.add_loss(compression_ctrl.loss)\n\n def inverse_loss(y_true, y_pred):\n return 1 / (1 + (y_true - y_pred) ** 2)\n\n compress_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01),\n loss=tf.keras.losses.MeanSquaredError(),\n metrics=inverse_loss)\n\n result_dict_to_val_metric_fn = lambda results: results['inverse_loss']\n\n compress_model.accuracy_aware_fit(dataset,\n compression_ctrl,\n nncf_config=config,\n callbacks=compression_callbacks,\n initial_epoch=0,\n steps_per_epoch=steps_per_epoch,\n uncompressed_model_accuracy=uncompressed_model_accuracy,\n result_dict_to_val_metric_fn=result_dict_to_val_metric_fn)\n original_model_accuracy = compress_model.original_model_accuracy\n compressed_model_accuracy = result_dict_to_val_metric_fn(compress_model.evaluate(dataset, return_dict=True))\n\n if \"maximal_absolute_accuracy_degradation\" in max_accuracy_degradation:\n assert (original_model_accuracy - compressed_model_accuracy) <= \\\n max_accuracy_degradation[\"maximal_absolute_accuracy_degradation\"]\n else:\n assert (original_model_accuracy - compressed_model_accuracy) / original_model_accuracy * 100 <= \\\n max_accuracy_degradation[\"maximal_relative_accuracy_degradation\"]\n","sub_path":"tests/tensorflow/accuracy_aware_training/test_keras_api.py","file_name":"test_keras_api.py","file_ext":"py","file_size_in_byte":8998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"350782290","text":"#!/usb/bin/env python3\nimport os\nimport sys\nimport re\nfrom collections import Counter\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\n\nT1 = float(os.environ.get(\"T1\", 1.0))\nROOT = bool(os.environ.get(\"ROOT\", False))\n\nSTYLE=os.environ.get(\"STYLE\", \"tableau-colorblind10\")\n\n\ndef stable0(df: pd.DataFrame) -> pd.DataFrame:\n return df\n\n\ndef stable1(df: pd.DataFrame) -> pd.DataFrame:\n return df.loc[df.vanilla1 >= T1]\n\n\ndef stable2(df: pd.DataFrame) -> pd.DataFrame:\n return df.loc[(df.vanilla1 >= T1) & (df.fullblock3p1 < T1) & (df.fullblock3p2 < T1)]\n\n\nSTABLES = {\n 'raw': stable0,\n #'vanilla-self': stable1,\n 'high-confidence': stable2,\n}\n\n\ndef save_ax_pdf(ax, filename: str, no_xticks: bool = True):\n fig = ax.get_figure()\n if no_xticks:\n ax.set_xticklabels([])\n else:\n fig.autofmt_xdate(rotation=45)\n fig.tight_layout()\n fig.savefig(filename)\n plt.close(fig)\n\n\ndef main(argv):\n try:\n csvfile = argv[1]\n except IndexError:\n print(f\"usage: {argv[0]} CSV_FILE\")\n return\n \n csv_stem = os.path.splitext(csvfile)[0]\n df = pd.read_csv(csvfile)\n\n if 'is_root' in df.columns:\n if ROOT:\n df = df[df.is_root == True].drop(['is_root', 'is_ad'], axis=1)\n csv_stem += \"_root\"\n subset = \"Root/1p Frames\"\n else:\n df = df[(df.is_root == False) & (df.is_ad == False)].drop(['is_root', 'is_ad'], axis=1)\n csv_stem += \"_3pnoad\"\n subset = \"Non-Ad 3p Frames\"\n else:\n subset = \"All Frames\"\n \n if 'session' in df.columns:\n groupers = ['session', 'site_tag', 'frame_url', 'p2', 'p1']\n else:\n groupers = ['site_tag', 'frame_url', 'p2', 'p1']\n\n YRANGE = (-0.1, 1.1)\n\n plt.style.use(STYLE)\n COLORS = plt.rcParams['axes.prop_cycle'].by_key()['color']\n baseline_info = ('Permissive-1', COLORS[0])\n policy_info = {\n 'prototype': ('Page-length', COLORS[1]),\n 'splitkey': ('Site-keyed', COLORS[2]),\n 'fullblock3p': ('Blocking', COLORS[3]),\n }\n variant_style = {\n '1': '.',\n '2': 'x',\n }\n STRIDE = 500\n\n for stability_algo, stability_func in STABLES.items():\n raw_by_node = df.groupby(groupers).node_jaccard.sum().unstack().dropna()\n by_node = stability_func(raw_by_node)\n node_ratio = len(by_node) / len(raw_by_node)\n raw_by_edge = df.groupby(groupers).edge_jaccard.sum().unstack().dropna()\n by_edge = stability_func(raw_by_edge)\n edge_ratio = len(by_edge) / len(raw_by_edge)\n \n #ax = (by_node.cumsum() / len(by_node)).plot(title=f\"Cumulative Node-Bag Similarity [0.0-1.0] Scores\\n({subset}; {stability_algo}[{T1:.2f}]: {node_ratio:.2%})\", ylim=YRANGE)\n #save_ax_pdf(ax, f\"{csv_stem}_nodes_sum_{stability_algo}.pdf\")\n #ax = by_node.plot.box(title=f\"Node-Bag Similarity [0.0-1.0] Score Distributions\\n({subset}; {stability_algo}[{T1:.2f}]: {node_ratio:.2%})\", ylim=YRANGE)\n #save_ax_pdf(ax, f\"{csv_stem}_nodes_box_{stability_algo}.pdf\", no_xticks=False)\n #ax = (by_edge.cumsum() / len(by_edge)).plot(title=f\"Cumulative Edge-Bag Similarity [0.0-1.0] Scores\\n({subset}; {stability_algo}[{T1:.2f}]: {edge_ratio:.2%})\", ylim=YRANGE)\n #save_ax_pdf(ax, f\"{csv_stem}_edges_sum_{stability_algo}.pdf\")\n #ax = by_edge.plot.box(title=f\"Edge-Bag Similarity [0.0-1.0] Score Distributions\\n({subset}; {stability_algo}[{T1:.2f}]: {edge_ratio:.2%})\", ylim=YRANGE)\n #save_ax_pdf(ax, f\"{csv_stem}_edges_box_{stability_algo}.pdf\", no_xticks=False)\n\n ncs = by_edge.cumsum() / len(by_edge)\n series_map = dict(ncs.items())\n ax = series_map['vanilla1'].plot(label=baseline_info[0], color=baseline_info[1], linewidth=3, linestyle=\":\")\n for policy, (name, color) in policy_info.items():\n for N in \"12\":\n series = series_map[policy + N]\n series.plot(ax=ax, label=f\"{name}-{N}\", color=color, marker=variant_style[N], markevery=STRIDE)\n ax.legend()\n ax.set_xticks([i for i in range(0, len(ncs) + 1, STRIDE)], minor=False)\n ax.set_xticklabels([str(i) for i in range(0, len(ncs) + 1, STRIDE)], minor=False)\n ax.set_xlabel(\"distinct frame instances loaded across all profiles\\n(third-party, non-ad frames only)\")\n ax.set_ylabel(\"normalized cumulative similarity to Permissive-2\\n(0 = disjoint, 1 = equal)\")\n fig = ax.get_figure()\n fig.tight_layout()\n fig.savefig(f\"{csv_stem}_edges_sum_{stability_algo}.pdf\")\n plt.close(fig)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)","sub_path":"analysis/plot_compat.py","file_name":"plot_compat.py","file_ext":"py","file_size_in_byte":4642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"650058469","text":"import logging\nimport argparse\nimport sys\nimport psycopg2\n\n#Set the log output file, and the log level\nlogging.basicConfig(filename=\"snippets.log\", level = logging.DEBUG)\n\nlogging.debug(\"Connecting to PostgreSQL\")\nconnection = psycopg2.connect(\"dbname='snippets' user='ubuntu' password='P@ssword1!' host='localhost'\")\nlogging.debug(\"Database connection established\")\n\ndef put(name, snippet):\n \"\"\"Store a snippet with an associated name.\"\"\"\n logging.info(\"Storing snippet {!r}: {!r}\".format(name, snippet))\n cursor = connection.cursor()\n command = \"insert into snippets values (%s, %s)\"\n cursor.execute(command, (name, snippet))\n connection.commit()\n logging.debug(\"Snippet stored successfully.\")\n return name, snippet\n \n \ndef get(name):\n \"\"\"Retrieve the snippet with a given name.\n If there is no such snippet...\n Returns the snippet.\n \"\"\"\n \n logging.error(\"FIXME: Unimplemented - get({!r})\".format(name))\n return \"\"\n \ndef main():\n \"\"\"Main Function\"\"\"\n logging.info(\"Constructing parser\")\n parser = argparse.ArgumentParser(description = \"Store and retrieve snippets of text\")\n arguments = parser.parse_args(sys.argv[1:])\n \nif __name__ == \"__main__\":\n main()\n \n\n ","sub_path":"snippets.py","file_name":"snippets.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"185438334","text":"from rest_framework import generics\nfrom django.contrib.auth import get_user_model\nfrom rest_framework import permissions\nfrom django.http import HttpResponseRedirect\nfrom communication.models import Message\nfrom communication.serializers import MessageSerializer\nfrom profiles.custompermission import IfPermissionForCommunicate\n\n\nclass MessageList(generics.ListCreateAPIView):\n\n # Will return only messages which concern two users participated in conversation.\n def get_queryset(self):\n queryset = list(Message.objects.all())\n user = self.request.user\n second_users_pk = self.kwargs['pk']\n second_users_nickname = get_user_model().objects.get(pk=second_users_pk)\n result_queryset = []\n for i in queryset:\n if (i.message_sender == user and i.message_receiver == second_users_nickname) \\\n or (i.message_sender == second_users_nickname and i.message_receiver == user):\n result_queryset.append(i)\n return result_queryset\n\n # Method is overridden for redirecting user to massage list after creating new massage. And there will be\n # auto fill of the fields 'message sender' and 'message receiver'.\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n sender_id = request.user.id\n receiver_id = self.kwargs['pk']\n serializer.validated_data['message_sender'] = get_user_model().objects.get(pk=sender_id)\n serializer.validated_data['message_receiver'] = get_user_model().objects.get(pk=receiver_id)\n self.perform_create(serializer)\n second_users_pk = self.kwargs['pk']\n second_users_conversation_url = get_user_model().objects.get(pk=second_users_pk).conversation\n return HttpResponseRedirect(redirect_to=second_users_conversation_url)\n\n serializer_class = MessageSerializer\n name = 'massage-list'\n permission_classes = (\n permissions.IsAuthenticatedOrReadOnly,\n IfPermissionForCommunicate,\n )\n","sub_path":"social/communication/views/messages_view.py","file_name":"messages_view.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"96061008","text":"from bs4 import BeautifulSoup #as Soup\r\nimport os\r\n\r\n##import soupselect_bs4; soupselect_bs4.monkeypatch()\r\nimport urllib.request\r\n\r\n###soup = BeautifulSoup(urllib.request.urlopen('http://slashdot.org/')) #Soup()\r\n##select(soup, 'div#firehoselist > article > h2.story > span.story-title a')\r\n##soup.findSelect('h2.story span a')\r\n\r\nfor i in range(5):\r\n file_name = 'sample'+str(18757665+i)+'.htm'\r\n try: \r\n file_size_bytes = os.stat(file_name).st_size\r\n except FileNotFoundError as e: \r\n #print(str(e))\r\n print(\"파일 없음\")\r\n else:\r\n if file_size_bytes < 1030 :\r\n continue\r\n with open(file_name) as f:\r\n indent_init = 0\r\n indent_true = False\r\n\r\n soup = BeautifulSoup(f.read()) #Soup()\r\n\r\n yes_wrap = soup.select('div#yesWrap')[0]\r\n wrapper_content = yes_wrap.select('div#wrapperContent')[0]\r\n \r\n title_div = wrapper_content.select('div#title')[0] #1.제목든div##\r\n \r\n media_kind = title_div.select('span.rkeyL')[0] \r\n book_or_not = media_kind.contents[0]\r\n\r\n author_anchor = title_div.select('p > a')[0]\r\n author_name = author_anchor.contents[0]\r\n translator_anchor = title_div.select('p > a')[1]\r\n translator_name = translator_anchor.contents[0]\r\n press_anchor = title_div.select('p > a')[2]\r\n press_name = press_anchor.contents[0]\r\n originalB_anchor = title_div.select('p > a')[3]\r\n originalB__name = originalB_anchor.contents[0]\r\n\r\n book_salsInfo_div = wrapper_content.select('div#salsInfo')[0]\r\n book_timespace_dd = book_salsInfo_div.select('dd.pdDate')\r\n #print(book_timespace_dd[0])\r\n #print(book_timespace_dd[0].contents)\r\n book_birthdate = book_timespace_dd[0].select('p')[0].contents[0]\r\n # print(book_birthdate) \r\n book_space_dim = book_timespace_dd[0].find_all('p')[1].contents[0].strip()\r\n # print(book_space_dim)\r\n \r\n print(book_or_not)\r\n\r\n if book_or_not == '도서':\r\n interim_div = wrapper_content.select('div#contents')[0] #a1.책소개든div##\r\n\r\n goods_book_detail_div = wrapper_content.select('div.detailGoodsBasic')[0]\r\n goods_book_prices_info = goods_book_detail_div.select('td')\r\n print()\r\n print(goods_book_prices_info)\r\n print()\r\n #intro_p0 = interim_div.select('div.communityHide') \r\n #print(intro_p0)\r\n intro_p0 = interim_div.select('td')[0].contents #old: .find_all\r\n\r\n ##new_chosen_div = wrapper_content.select('div#contents > div')[1]\r\n ##last_chosen_paragraphs = new_chosen_div('p')\r\n #print(last_chosen_paragraphs)\r\n ##for p_elt in last_chosen_paragraphs:\r\n ## print(p_elt.contents[0].strip())\r\n\r\n print(\"----책 정보 요약 BEGIN----\")\r\n #print(\"---Beginning of *.----\")\r\n #print(next_interim_div)\r\n #print(\"---End of Book *.----\")\r\n \r\n #intro_p0 = next_interim_div.select('td')[0]\r\n \r\n title_h1 = title_div.select('h1 > a')[0] #2.제목든div 속의 것##\r\n\r\n authors_transl_press_a_s = title_div.select('p > a')\r\n print(authors_transl_press_a_s)\r\n authors = authors_transl_press_a_s[0].contents[0]\r\n transl = authors_transl_press_a_s[1].contents[0]\r\n press = authors_transl_press_a_s[2].contents[0]\r\n #a2.책소개든div 속의 것##\r\n \r\n print(\"파일명 : \" + file_name)\r\n print(\"책 제목 : \" + str(title_h1.contents[0])) #3.제목 든 것 끄집어냄##\r\n print(\"출간일 : \"+ book_birthdate)\r\n \r\n print(\"저자 : \" + authors)\r\n print(\"역자 : \"+transl)\r\n print(\"출판사 : \"+press)\r\n print()\r\n # print(\"---Beginning of Book Intro.----\")\r\n print(\"[ 책 소개 ]\")\r\n #print(intro_p0)\r\n #if ( len(intro_p) > 0 )\r\n # for raw_p in intro_p\r\n # p_elt = str(raw_p)\r\n # print(p_elt)\r\n \r\n\r\n ###print(intro_p0)\r\n tmp_str = \"\"\r\n for raw_elt in intro_p0 :\r\n elt = str(raw_elt)\r\n if elt.startswith('